1//===----------X86InstrFragments - X86 Pattern fragments. --*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8 9// X86-specific DAG node. 10def SDTX86CmpTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisInt<1>, 11 SDTCisSameAs<1, 2>]>; 12def SDTX86FCmp : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisFP<1>, 13 SDTCisSameAs<1, 2>]>; 14 15def SDTX86Cmov : SDTypeProfile<1, 4, 16 [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, 17 SDTCisVT<3, i8>, SDTCisVT<4, i32>]>; 18 19// Unary and binary operator instructions that set EFLAGS as a side-effect. 20def SDTUnaryArithWithFlags : SDTypeProfile<2, 1, 21 [SDTCisSameAs<0, 2>, 22 SDTCisInt<0>, SDTCisVT<1, i32>]>; 23 24def SDTBinaryArithWithFlags : SDTypeProfile<2, 2, 25 [SDTCisSameAs<0, 2>, 26 SDTCisSameAs<0, 3>, 27 SDTCisInt<0>, SDTCisVT<1, i32>]>; 28 29// SDTBinaryArithWithFlagsInOut - RES1, EFLAGS = op LHS, RHS, EFLAGS 30def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3, 31 [SDTCisSameAs<0, 2>, 32 SDTCisSameAs<0, 3>, 33 SDTCisInt<0>, 34 SDTCisVT<1, i32>, 35 SDTCisVT<4, i32>]>; 36// RES1, RES2, FLAGS = op LHS, RHS 37def SDT2ResultBinaryArithWithFlags : SDTypeProfile<3, 2, 38 [SDTCisSameAs<0, 1>, 39 SDTCisSameAs<0, 2>, 40 SDTCisSameAs<0, 3>, 41 SDTCisInt<0>, SDTCisVT<1, i32>]>; 42def SDTX86BrCond : SDTypeProfile<0, 3, 43 [SDTCisVT<0, OtherVT>, 44 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>; 45 46def SDTX86SetCC : SDTypeProfile<1, 2, 47 [SDTCisVT<0, i8>, 48 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>; 49def SDTX86SetCC_C : SDTypeProfile<1, 2, 50 [SDTCisInt<0>, 51 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>; 52 53def SDTX86sahf : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i8>]>; 54 55def SDTX86rdrand : SDTypeProfile<2, 0, [SDTCisInt<0>, SDTCisVT<1, i32>]>; 56 57def SDTX86rdpkru : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; 58def SDTX86wrpkru : SDTypeProfile<0, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, 59 SDTCisVT<2, i32>]>; 60 61def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>, 62 SDTCisVT<2, i8>]>; 63def SDTX86cas8pair : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>; 64def SDTX86cas16pair : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i64>]>; 65 66def SDTLockBinaryArithWithFlags : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, 67 SDTCisPtrTy<1>, 68 SDTCisInt<2>]>; 69 70def SDTLockUnaryArithWithFlags : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, 71 SDTCisPtrTy<1>]>; 72 73def SDTX86Ret : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>; 74 75def SDT_X86CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>, 76 SDTCisVT<1, i32>]>; 77def SDT_X86CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, 78 SDTCisVT<1, i32>]>; 79 80def SDT_X86Call : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>; 81 82def SDT_X86NtBrind : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>; 83 84def SDT_X86VASTART_SAVE_XMM_REGS : SDTypeProfile<0, -1, [SDTCisVT<0, i8>, 85 SDTCisPtrTy<1>]>; 86 87def SDT_X86VAARG : SDTypeProfile<1, -1, [SDTCisPtrTy<0>, 88 SDTCisPtrTy<1>, 89 SDTCisVT<2, i32>, 90 SDTCisVT<3, i8>, 91 SDTCisVT<4, i32>]>; 92 93def SDTX86RepStr : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>; 94 95def SDTX86Void : SDTypeProfile<0, 0, []>; 96 97def SDTX86Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>; 98 99def SDT_X86TLSADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>; 100 101def SDT_X86TLSBASEADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>; 102 103def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisInt<0>]>; 104 105def SDT_X86DYN_ALLOCA : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>; 106 107def SDT_X86SEG_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>; 108 109def SDT_X86PROBED_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>; 110 111def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>; 112 113def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>; 114 115def SDT_X86ENQCMD : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, 116 SDTCisPtrTy<1>, SDTCisSameAs<1, 2>]>; 117 118def SDT_X86AESENCDECKL : SDTypeProfile<2, 2, [SDTCisVT<0, v2i64>, 119 SDTCisVT<1, i32>, 120 SDTCisVT<2, v2i64>, 121 SDTCisPtrTy<3>]>; 122 123def SDTX86Cmpccxadd : SDTypeProfile<1, 4, [SDTCisSameAs<0, 2>, 124 SDTCisPtrTy<1>, SDTCisSameAs<2, 3>, 125 SDTCisVT<4, i8>]>; 126 127def X86MFence : SDNode<"X86ISD::MFENCE", SDTNone, [SDNPHasChain]>; 128 129 130def X86bsf : SDNode<"X86ISD::BSF", SDTUnaryArithWithFlags>; 131def X86bsr : SDNode<"X86ISD::BSR", SDTUnaryArithWithFlags>; 132def X86fshl : SDNode<"X86ISD::FSHL", SDTIntShiftDOp>; 133def X86fshr : SDNode<"X86ISD::FSHR", SDTIntShiftDOp>; 134 135def X86cmp : SDNode<"X86ISD::CMP" , SDTX86CmpTest>; 136def X86fcmp : SDNode<"X86ISD::FCMP", SDTX86FCmp>; 137def X86strict_fcmp : SDNode<"X86ISD::STRICT_FCMP", SDTX86FCmp, [SDNPHasChain]>; 138def X86strict_fcmps : SDNode<"X86ISD::STRICT_FCMPS", SDTX86FCmp, [SDNPHasChain]>; 139def X86bt : SDNode<"X86ISD::BT", SDTX86CmpTest>; 140 141def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>; 142def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond, 143 [SDNPHasChain]>; 144def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC>; 145def X86setcc_c : SDNode<"X86ISD::SETCC_CARRY", SDTX86SetCC_C>; 146 147def X86rdrand : SDNode<"X86ISD::RDRAND", SDTX86rdrand, 148 [SDNPHasChain, SDNPSideEffect]>; 149 150def X86rdseed : SDNode<"X86ISD::RDSEED", SDTX86rdrand, 151 [SDNPHasChain, SDNPSideEffect]>; 152 153def X86rdpkru : SDNode<"X86ISD::RDPKRU", SDTX86rdpkru, 154 [SDNPHasChain, SDNPSideEffect]>; 155def X86wrpkru : SDNode<"X86ISD::WRPKRU", SDTX86wrpkru, 156 [SDNPHasChain, SDNPSideEffect]>; 157 158def X86cas : SDNode<"X86ISD::LCMPXCHG_DAG", SDTX86cas, 159 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore, 160 SDNPMayLoad, SDNPMemOperand]>; 161def X86cas8 : SDNode<"X86ISD::LCMPXCHG8_DAG", SDTX86cas8pair, 162 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore, 163 SDNPMayLoad, SDNPMemOperand]>; 164def X86cas16 : SDNode<"X86ISD::LCMPXCHG16_DAG", SDTX86cas16pair, 165 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore, 166 SDNPMayLoad, SDNPMemOperand]>; 167 168def X86retglue : SDNode<"X86ISD::RET_GLUE", SDTX86Ret, 169 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; 170def X86iret : SDNode<"X86ISD::IRET", SDTX86Ret, 171 [SDNPHasChain, SDNPOptInGlue]>; 172 173def X86vastart_save_xmm_regs : 174 SDNode<"X86ISD::VASTART_SAVE_XMM_REGS", 175 SDT_X86VASTART_SAVE_XMM_REGS, 176 [SDNPHasChain, SDNPMayStore, SDNPMemOperand, SDNPVariadic]>; 177def X86vaarg64 : 178 SDNode<"X86ISD::VAARG_64", SDT_X86VAARG, 179 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, 180 SDNPMemOperand]>; 181def X86vaargx32 : 182 SDNode<"X86ISD::VAARG_X32", SDT_X86VAARG, 183 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, 184 SDNPMemOperand]>; 185def X86callseq_start : 186 SDNode<"ISD::CALLSEQ_START", SDT_X86CallSeqStart, 187 [SDNPHasChain, SDNPOutGlue]>; 188def X86callseq_end : 189 SDNode<"ISD::CALLSEQ_END", SDT_X86CallSeqEnd, 190 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; 191 192def X86call : SDNode<"X86ISD::CALL", SDT_X86Call, 193 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, 194 SDNPVariadic]>; 195 196def X86call_rvmarker : SDNode<"X86ISD::CALL_RVMARKER", SDT_X86Call, 197 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, 198 SDNPVariadic]>; 199 200 201def X86NoTrackCall : SDNode<"X86ISD::NT_CALL", SDT_X86Call, 202 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, 203 SDNPVariadic]>; 204def X86NoTrackBrind : SDNode<"X86ISD::NT_BRIND", SDT_X86NtBrind, 205 [SDNPHasChain]>; 206 207def X86rep_stos: SDNode<"X86ISD::REP_STOS", SDTX86RepStr, 208 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore]>; 209def X86rep_movs: SDNode<"X86ISD::REP_MOVS", SDTX86RepStr, 210 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore, 211 SDNPMayLoad]>; 212 213def X86Wrapper : SDNode<"X86ISD::Wrapper", SDTX86Wrapper>; 214def X86WrapperRIP : SDNode<"X86ISD::WrapperRIP", SDTX86Wrapper>; 215 216def X86RecoverFrameAlloc : SDNode<"ISD::LOCAL_RECOVER", 217 SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, 218 SDTCisInt<1>]>>; 219 220def X86tlsaddr : SDNode<"X86ISD::TLSADDR", SDT_X86TLSADDR, 221 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; 222 223def X86tlsbaseaddr : SDNode<"X86ISD::TLSBASEADDR", SDT_X86TLSBASEADDR, 224 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; 225 226def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET, 227 [SDNPHasChain]>; 228 229def X86eh_sjlj_setjmp : SDNode<"X86ISD::EH_SJLJ_SETJMP", 230 SDTypeProfile<1, 1, [SDTCisInt<0>, 231 SDTCisPtrTy<1>]>, 232 [SDNPHasChain, SDNPSideEffect]>; 233def X86eh_sjlj_longjmp : SDNode<"X86ISD::EH_SJLJ_LONGJMP", 234 SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>, 235 [SDNPHasChain, SDNPSideEffect]>; 236def X86eh_sjlj_setup_dispatch : SDNode<"X86ISD::EH_SJLJ_SETUP_DISPATCH", 237 SDTypeProfile<0, 0, []>, 238 [SDNPHasChain, SDNPSideEffect]>; 239 240def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET, 241 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; 242 243def X86add_flag : SDNode<"X86ISD::ADD", SDTBinaryArithWithFlags, 244 [SDNPCommutative]>; 245def X86sub_flag : SDNode<"X86ISD::SUB", SDTBinaryArithWithFlags>; 246def X86smul_flag : SDNode<"X86ISD::SMUL", SDTBinaryArithWithFlags, 247 [SDNPCommutative]>; 248def X86umul_flag : SDNode<"X86ISD::UMUL", SDT2ResultBinaryArithWithFlags, 249 [SDNPCommutative]>; 250def X86adc_flag : SDNode<"X86ISD::ADC", SDTBinaryArithWithFlagsInOut>; 251def X86sbb_flag : SDNode<"X86ISD::SBB", SDTBinaryArithWithFlagsInOut>; 252 253def X86or_flag : SDNode<"X86ISD::OR", SDTBinaryArithWithFlags, 254 [SDNPCommutative]>; 255def X86xor_flag : SDNode<"X86ISD::XOR", SDTBinaryArithWithFlags, 256 [SDNPCommutative]>; 257def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags, 258 [SDNPCommutative]>; 259 260def X86lock_add : SDNode<"X86ISD::LADD", SDTLockBinaryArithWithFlags, 261 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, 262 SDNPMemOperand]>; 263def X86lock_sub : SDNode<"X86ISD::LSUB", SDTLockBinaryArithWithFlags, 264 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, 265 SDNPMemOperand]>; 266def X86lock_or : SDNode<"X86ISD::LOR", SDTLockBinaryArithWithFlags, 267 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, 268 SDNPMemOperand]>; 269def X86lock_xor : SDNode<"X86ISD::LXOR", SDTLockBinaryArithWithFlags, 270 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, 271 SDNPMemOperand]>; 272def X86lock_and : SDNode<"X86ISD::LAND", SDTLockBinaryArithWithFlags, 273 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, 274 SDNPMemOperand]>; 275 276def X86bextr : SDNode<"X86ISD::BEXTR", SDTIntBinOp>; 277def X86bextri : SDNode<"X86ISD::BEXTRI", SDTIntBinOp>; 278 279def X86bzhi : SDNode<"X86ISD::BZHI", SDTIntBinOp>; 280 281def X86pdep : SDNode<"X86ISD::PDEP", SDTIntBinOp>; 282def X86pext : SDNode<"X86ISD::PEXT", SDTIntBinOp>; 283 284def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>; 285 286def X86DynAlloca : SDNode<"X86ISD::DYN_ALLOCA", SDT_X86DYN_ALLOCA, 287 [SDNPHasChain, SDNPOutGlue]>; 288 289def X86SegAlloca : SDNode<"X86ISD::SEG_ALLOCA", SDT_X86SEG_ALLOCA, 290 [SDNPHasChain]>; 291 292def X86ProbedAlloca : SDNode<"X86ISD::PROBED_ALLOCA", SDT_X86PROBED_ALLOCA, 293 [SDNPHasChain]>; 294 295def X86TLSCall : SDNode<"X86ISD::TLSCALL", SDT_X86TLSCALL, 296 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; 297 298def X86lwpins : SDNode<"X86ISD::LWPINS", 299 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>, 300 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>, 301 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPSideEffect]>; 302 303def X86umwait : SDNode<"X86ISD::UMWAIT", 304 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>, 305 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>, 306 [SDNPHasChain, SDNPSideEffect]>; 307 308def X86tpause : SDNode<"X86ISD::TPAUSE", 309 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>, 310 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>, 311 [SDNPHasChain, SDNPSideEffect]>; 312 313def X86enqcmd : SDNode<"X86ISD::ENQCMD", SDT_X86ENQCMD, 314 [SDNPHasChain, SDNPSideEffect]>; 315def X86enqcmds : SDNode<"X86ISD::ENQCMDS", SDT_X86ENQCMD, 316 [SDNPHasChain, SDNPSideEffect]>; 317def X86testui : SDNode<"X86ISD::TESTUI", 318 SDTypeProfile<1, 0, [SDTCisVT<0, i32>]>, 319 [SDNPHasChain, SDNPSideEffect]>; 320 321def X86aesenc128kl : SDNode<"X86ISD::AESENC128KL", SDT_X86AESENCDECKL, 322 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect, 323 SDNPMemOperand]>; 324def X86aesdec128kl : SDNode<"X86ISD::AESDEC128KL", SDT_X86AESENCDECKL, 325 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect, 326 SDNPMemOperand]>; 327def X86aesenc256kl : SDNode<"X86ISD::AESENC256KL", SDT_X86AESENCDECKL, 328 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect, 329 SDNPMemOperand]>; 330def X86aesdec256kl : SDNode<"X86ISD::AESDEC256KL", SDT_X86AESENCDECKL, 331 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect, 332 SDNPMemOperand]>; 333 334def X86cmpccxadd : SDNode<"X86ISD::CMPCCXADD", SDTX86Cmpccxadd, 335 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, 336 SDNPMemOperand]>; 337 338// Define X86-specific addressing mode. 339def addr : ComplexPattern<iPTR, 5, "selectAddr", [], [SDNPWantParent]>; 340def lea32addr : ComplexPattern<i32, 5, "selectLEAAddr", 341 [add, sub, mul, X86mul_imm, shl, or, xor, frameindex], 342 []>; 343// In 64-bit mode 32-bit LEAs can use RIP-relative addressing. 344def lea64_32addr : ComplexPattern<i32, 5, "selectLEA64_32Addr", 345 [add, sub, mul, X86mul_imm, shl, or, xor, 346 frameindex, X86WrapperRIP], 347 []>; 348 349def tls32addr : ComplexPattern<i32, 5, "selectTLSADDRAddr", 350 [tglobaltlsaddr], []>; 351 352def tls32baseaddr : ComplexPattern<i32, 5, "selectTLSADDRAddr", 353 [tglobaltlsaddr], []>; 354 355def lea64addr : ComplexPattern<i64, 5, "selectLEAAddr", 356 [add, sub, mul, X86mul_imm, shl, or, xor, frameindex, 357 X86WrapperRIP], []>; 358 359def tls64addr : ComplexPattern<i64, 5, "selectTLSADDRAddr", 360 [tglobaltlsaddr], []>; 361 362def tls64baseaddr : ComplexPattern<i64, 5, "selectTLSADDRAddr", 363 [tglobaltlsaddr], []>; 364 365def vectoraddr : ComplexPattern<iPTR, 5, "selectVectorAddr", [],[SDNPWantParent]>; 366 367// A relocatable immediate is an operand that can be relocated by the linker to 368// an immediate, such as a regular symbol in non-PIC code. 369def relocImm : ComplexPattern<iAny, 1, "selectRelocImm", 370 [X86Wrapper], [], 0>; 371 372// X86 specific condition code. These correspond to CondCode in 373// X86InstrInfo.h. They must be kept in synch. 374def X86_COND_O : PatLeaf<(i8 0)>; 375def X86_COND_NO : PatLeaf<(i8 1)>; 376def X86_COND_B : PatLeaf<(i8 2)>; // alt. COND_C 377def X86_COND_AE : PatLeaf<(i8 3)>; // alt. COND_NC 378def X86_COND_E : PatLeaf<(i8 4)>; // alt. COND_Z 379def X86_COND_NE : PatLeaf<(i8 5)>; // alt. COND_NZ 380def X86_COND_BE : PatLeaf<(i8 6)>; // alt. COND_NA 381def X86_COND_A : PatLeaf<(i8 7)>; // alt. COND_NBE 382def X86_COND_S : PatLeaf<(i8 8)>; 383def X86_COND_NS : PatLeaf<(i8 9)>; 384def X86_COND_P : PatLeaf<(i8 10)>; // alt. COND_PE 385def X86_COND_NP : PatLeaf<(i8 11)>; // alt. COND_PO 386def X86_COND_L : PatLeaf<(i8 12)>; // alt. COND_NGE 387def X86_COND_GE : PatLeaf<(i8 13)>; // alt. COND_NL 388def X86_COND_LE : PatLeaf<(i8 14)>; // alt. COND_NG 389def X86_COND_G : PatLeaf<(i8 15)>; // alt. COND_NLE 390 391def i16immSExt8 : ImmLeaf<i16, [{ return isInt<8>(Imm); }]>; 392def i32immSExt8 : ImmLeaf<i32, [{ return isInt<8>(Imm); }]>; 393def i64immSExt8 : ImmLeaf<i64, [{ return isInt<8>(Imm); }]>; 394def i64immSExt32 : ImmLeaf<i64, [{ return isInt<32>(Imm); }]>; 395def i64timmSExt32 : TImmLeaf<i64, [{ return isInt<32>(Imm); }]>; 396 397def i16relocImmSExt8 : PatLeaf<(i16 relocImm), [{ 398 return isSExtAbsoluteSymbolRef(8, N); 399}]>; 400def i32relocImmSExt8 : PatLeaf<(i32 relocImm), [{ 401 return isSExtAbsoluteSymbolRef(8, N); 402}]>; 403def i64relocImmSExt8 : PatLeaf<(i64 relocImm), [{ 404 return isSExtAbsoluteSymbolRef(8, N); 405}]>; 406def i64relocImmSExt32 : PatLeaf<(i64 relocImm), [{ 407 return isSExtAbsoluteSymbolRef(32, N); 408}]>; 409 410// If we have multiple users of an immediate, it's much smaller to reuse 411// the register, rather than encode the immediate in every instruction. 412// This has the risk of increasing register pressure from stretched live 413// ranges, however, the immediates should be trivial to rematerialize by 414// the RA in the event of high register pressure. 415// TODO : This is currently enabled for stores and binary ops. There are more 416// cases for which this can be enabled, though this catches the bulk of the 417// issues. 418// TODO2 : This should really also be enabled under O2, but there's currently 419// an issue with RA where we don't pull the constants into their users 420// when we rematerialize them. I'll follow-up on enabling O2 after we fix that 421// issue. 422// TODO3 : This is currently limited to single basic blocks (DAG creation 423// pulls block immediates to the top and merges them if necessary). 424// Eventually, it would be nice to allow ConstantHoisting to merge constants 425// globally for potentially added savings. 426// 427def imm_su : PatLeaf<(imm), [{ 428 return !shouldAvoidImmediateInstFormsForSize(N); 429}]>; 430def i64immSExt32_su : PatLeaf<(i64immSExt32), [{ 431 return !shouldAvoidImmediateInstFormsForSize(N); 432}]>; 433 434def relocImm8_su : PatLeaf<(i8 relocImm), [{ 435 return !shouldAvoidImmediateInstFormsForSize(N); 436}]>; 437def relocImm16_su : PatLeaf<(i16 relocImm), [{ 438 return !shouldAvoidImmediateInstFormsForSize(N); 439}]>; 440def relocImm32_su : PatLeaf<(i32 relocImm), [{ 441 return !shouldAvoidImmediateInstFormsForSize(N); 442}]>; 443 444def i16relocImmSExt8_su : PatLeaf<(i16relocImmSExt8), [{ 445 return !shouldAvoidImmediateInstFormsForSize(N); 446}]>; 447def i32relocImmSExt8_su : PatLeaf<(i32relocImmSExt8), [{ 448 return !shouldAvoidImmediateInstFormsForSize(N); 449}]>; 450def i64relocImmSExt8_su : PatLeaf<(i64relocImmSExt8), [{ 451 return !shouldAvoidImmediateInstFormsForSize(N); 452}]>; 453def i64relocImmSExt32_su : PatLeaf<(i64relocImmSExt32), [{ 454 return !shouldAvoidImmediateInstFormsForSize(N); 455}]>; 456 457def i16immSExt8_su : PatLeaf<(i16immSExt8), [{ 458 return !shouldAvoidImmediateInstFormsForSize(N); 459}]>; 460def i32immSExt8_su : PatLeaf<(i32immSExt8), [{ 461 return !shouldAvoidImmediateInstFormsForSize(N); 462}]>; 463def i64immSExt8_su : PatLeaf<(i64immSExt8), [{ 464 return !shouldAvoidImmediateInstFormsForSize(N); 465}]>; 466 467// i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit 468// unsigned field. 469def i64immZExt32 : ImmLeaf<i64, [{ return isUInt<32>(Imm); }]>; 470 471def i64immZExt32SExt8 : ImmLeaf<i64, [{ 472 return isUInt<32>(Imm) && isInt<8>(static_cast<int32_t>(Imm)); 473}]>; 474 475// Helper fragments for loads. 476 477// It's safe to fold a zextload/extload from i1 as a regular i8 load. The 478// upper bits are guaranteed to be zero and we were going to emit a MOV8rm 479// which might get folded during peephole anyway. 480def loadi8 : PatFrag<(ops node:$ptr), (i8 (unindexedload node:$ptr)), [{ 481 LoadSDNode *LD = cast<LoadSDNode>(N); 482 ISD::LoadExtType ExtType = LD->getExtensionType(); 483 return ExtType == ISD::NON_EXTLOAD || ExtType == ISD::EXTLOAD || 484 ExtType == ISD::ZEXTLOAD; 485}]>; 486 487// It's always safe to treat a anyext i16 load as a i32 load if the i16 is 488// known to be 32-bit aligned or better. Ditto for i8 to i16. 489def loadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{ 490 LoadSDNode *LD = cast<LoadSDNode>(N); 491 ISD::LoadExtType ExtType = LD->getExtensionType(); 492 if (ExtType == ISD::NON_EXTLOAD) 493 return true; 494 if (ExtType == ISD::EXTLOAD && EnablePromoteAnyextLoad) 495 return LD->getAlign() >= 2 && LD->isSimple(); 496 return false; 497}]>; 498 499def loadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{ 500 LoadSDNode *LD = cast<LoadSDNode>(N); 501 ISD::LoadExtType ExtType = LD->getExtensionType(); 502 if (ExtType == ISD::NON_EXTLOAD) 503 return true; 504 if (ExtType == ISD::EXTLOAD && EnablePromoteAnyextLoad) 505 return LD->getAlign() >= 4 && LD->isSimple(); 506 return false; 507}]>; 508 509def loadi64 : PatFrag<(ops node:$ptr), (i64 (load node:$ptr))>; 510def loadf16 : PatFrag<(ops node:$ptr), (f16 (load node:$ptr))>; 511def loadf32 : PatFrag<(ops node:$ptr), (f32 (load node:$ptr))>; 512def loadf64 : PatFrag<(ops node:$ptr), (f64 (load node:$ptr))>; 513def loadf80 : PatFrag<(ops node:$ptr), (f80 (load node:$ptr))>; 514def loadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr))>; 515def alignedloadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{ 516 LoadSDNode *Ld = cast<LoadSDNode>(N); 517 return Ld->getAlign() >= Ld->getMemoryVT().getStoreSize(); 518}]>; 519def memopf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{ 520 LoadSDNode *Ld = cast<LoadSDNode>(N); 521 return Subtarget->hasSSEUnalignedMem() || 522 Ld->getAlign() >= Ld->getMemoryVT().getStoreSize(); 523}]>; 524 525def sextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (sextloadi8 node:$ptr))>; 526def sextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (sextloadi8 node:$ptr))>; 527def sextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (sextloadi16 node:$ptr))>; 528def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>; 529def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>; 530def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>; 531 532def zextloadi8i1 : PatFrag<(ops node:$ptr), (i8 (zextloadi1 node:$ptr))>; 533def zextloadi16i1 : PatFrag<(ops node:$ptr), (i16 (zextloadi1 node:$ptr))>; 534def zextloadi32i1 : PatFrag<(ops node:$ptr), (i32 (zextloadi1 node:$ptr))>; 535def zextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (zextloadi8 node:$ptr))>; 536def zextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (zextloadi8 node:$ptr))>; 537def zextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (zextloadi16 node:$ptr))>; 538def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>; 539def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>; 540def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>; 541def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>; 542 543def extloadi8i1 : PatFrag<(ops node:$ptr), (i8 (extloadi1 node:$ptr))>; 544def extloadi16i1 : PatFrag<(ops node:$ptr), (i16 (extloadi1 node:$ptr))>; 545def extloadi32i1 : PatFrag<(ops node:$ptr), (i32 (extloadi1 node:$ptr))>; 546def extloadi16i8 : PatFrag<(ops node:$ptr), (i16 (extloadi8 node:$ptr))>; 547def extloadi32i8 : PatFrag<(ops node:$ptr), (i32 (extloadi8 node:$ptr))>; 548def extloadi32i16 : PatFrag<(ops node:$ptr), (i32 (extloadi16 node:$ptr))>; 549def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>; 550def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>; 551def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>; 552 553// We can treat an i8/i16 extending load to i64 as a 32 bit load if its known 554// to be 4 byte aligned or better. 555def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (unindexedload node:$ptr)), [{ 556 LoadSDNode *LD = cast<LoadSDNode>(N); 557 ISD::LoadExtType ExtType = LD->getExtensionType(); 558 if (ExtType != ISD::EXTLOAD) 559 return false; 560 if (LD->getMemoryVT() == MVT::i32) 561 return true; 562 563 return LD->getAlign() >= 4 && LD->isSimple(); 564}]>; 565 566// binary op with only one user 567class binop_oneuse<SDPatternOperator operator> 568 : PatFrag<(ops node:$A, node:$B), 569 (operator node:$A, node:$B), [{ 570 return N->hasOneUse(); 571}]>; 572 573def add_su : binop_oneuse<add>; 574def and_su : binop_oneuse<and>; 575def srl_su : binop_oneuse<srl>; 576 577// unary op with only one user 578class unop_oneuse<SDPatternOperator operator> 579 : PatFrag<(ops node:$A), 580 (operator node:$A), [{ 581 return N->hasOneUse(); 582}]>; 583 584 585def ineg_su : unop_oneuse<ineg>; 586def trunc_su : unop_oneuse<trunc>; 587 588def X86add_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs), 589 (X86add_flag node:$lhs, node:$rhs), [{ 590 return hasNoCarryFlagUses(SDValue(N, 1)); 591}]>; 592 593def X86sub_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs), 594 (X86sub_flag node:$lhs, node:$rhs), [{ 595 // Only use DEC if the result is used. 596 return !SDValue(N, 0).use_empty() && hasNoCarryFlagUses(SDValue(N, 1)); 597}]>; 598 599def X86testpat : PatFrag<(ops node:$lhs, node:$rhs), 600 (X86cmp (and_su node:$lhs, node:$rhs), 0)>; 601 602 603def X86any_fcmp : PatFrags<(ops node:$lhs, node:$rhs), 604 [(X86strict_fcmp node:$lhs, node:$rhs), 605 (X86fcmp node:$lhs, node:$rhs)]>; 606 607// PREFETCHWT1 is supported we want to use it for everything but T0. 608def PrefetchWLevel : PatFrag<(ops), (i32 timm), [{ 609 return N->getSExtValue() == 3 || !Subtarget->hasPREFETCHWT1(); 610}]>; 611 612// Use PREFETCHWT1 for NTA, T2, T1. 613def PrefetchWT1Level : TImmLeaf<i32, [{ 614 return Imm < 3; 615}]>; 616 617def X86lock_add_nocf : PatFrag<(ops node:$lhs, node:$rhs), 618 (X86lock_add node:$lhs, node:$rhs), [{ 619 return hasNoCarryFlagUses(SDValue(N, 0)); 620}]>; 621 622def X86lock_sub_nocf : PatFrag<(ops node:$lhs, node:$rhs), 623 (X86lock_sub node:$lhs, node:$rhs), [{ 624 return hasNoCarryFlagUses(SDValue(N, 0)); 625}]>; 626 627def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off), 628 (X86tcret node:$ptr, node:$off), [{ 629 // X86tcret args: (*chain, ptr, imm, regs..., glue) 630 unsigned NumRegs = 0; 631 for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i) 632 if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6) 633 return false; 634 return true; 635}]>; 636 637def X86tcret_1reg : PatFrag<(ops node:$ptr, node:$off), 638 (X86tcret node:$ptr, node:$off), [{ 639 // X86tcret args: (*chain, ptr, imm, regs..., glue) 640 unsigned NumRegs = 1; 641 const SDValue& BasePtr = cast<LoadSDNode>(N->getOperand(1))->getBasePtr(); 642 if (isa<FrameIndexSDNode>(BasePtr)) 643 NumRegs = 3; 644 else if (BasePtr->getNumOperands() && isa<GlobalAddressSDNode>(BasePtr->getOperand(0))) 645 NumRegs = 3; 646 for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i) 647 if (isa<RegisterSDNode>(N->getOperand(i)) && ( NumRegs-- == 0)) 648 return false; 649 return true; 650}]>; 651 652// If this is an anyext of the remainder of an 8-bit sdivrem, use a MOVSX 653// instead of a MOVZX. The sdivrem lowering will emit emit a MOVSX to move 654// %ah to the lower byte of a register. By using a MOVSX here we allow a 655// post-isel peephole to merge the two MOVSX instructions into one. 656def anyext_sdiv : PatFrag<(ops node:$lhs), (anyext node:$lhs),[{ 657 return (N->getOperand(0).getOpcode() == ISD::SDIVREM && 658 N->getOperand(0).getResNo() == 1); 659}]>; 660 661// Any instruction that defines a 32-bit result leaves the high half of the 662// register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may 663// be copying from a truncate. AssertSext/AssertZext/AssertAlign aren't saying 664// anything about the upper 32 bits, they're probably just qualifying a 665// CopyFromReg. FREEZE may be coming from a a truncate. Any other 32-bit 666// operation will zero-extend up to 64 bits. 667def def32 : PatLeaf<(i32 GR32:$src), [{ 668 return N->getOpcode() != ISD::TRUNCATE && 669 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG && 670 N->getOpcode() != ISD::CopyFromReg && 671 N->getOpcode() != ISD::AssertSext && 672 N->getOpcode() != ISD::AssertZext && 673 N->getOpcode() != ISD::AssertAlign && 674 N->getOpcode() != ISD::FREEZE; 675}]>; 676 677// Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero. 678def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{ 679 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1))) 680 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue()); 681 682 KnownBits Known0 = CurDAG->computeKnownBits(N->getOperand(0), 0); 683 KnownBits Known1 = CurDAG->computeKnownBits(N->getOperand(1), 0); 684 return (~Known0.Zero & ~Known1.Zero) == 0; 685}]>; 686 687def shiftMask8 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 688 return isUnneededShiftMask(N, 3); 689}]>; 690 691def shiftMask16 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 692 return isUnneededShiftMask(N, 4); 693}]>; 694 695def shiftMask32 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 696 return isUnneededShiftMask(N, 5); 697}]>; 698 699def shiftMask64 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 700 return isUnneededShiftMask(N, 6); 701}]>; 702 703//===----------------------------------------------------------------------===// 704// Pattern fragments to auto generate BMI instructions. 705//===----------------------------------------------------------------------===// 706 707def or_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs), 708 (X86or_flag node:$lhs, node:$rhs), [{ 709 return hasNoCarryFlagUses(SDValue(N, 1)); 710}]>; 711 712def xor_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs), 713 (X86xor_flag node:$lhs, node:$rhs), [{ 714 return hasNoCarryFlagUses(SDValue(N, 1)); 715}]>; 716 717def and_flag_nocf : PatFrag<(ops node:$lhs, node:$rhs), 718 (X86and_flag node:$lhs, node:$rhs), [{ 719 return hasNoCarryFlagUses(SDValue(N, 1)); 720}]>; 721 722//===----------------------------------------------------------------------===// 723// FPStack specific DAG Nodes. 724//===----------------------------------------------------------------------===// 725 726def SDTX86Fld : SDTypeProfile<1, 1, [SDTCisFP<0>, 727 SDTCisPtrTy<1>]>; 728def SDTX86Fst : SDTypeProfile<0, 2, [SDTCisFP<0>, 729 SDTCisPtrTy<1>]>; 730def SDTX86Fild : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisPtrTy<1>]>; 731def SDTX86Fist : SDTypeProfile<0, 2, [SDTCisFP<0>, SDTCisPtrTy<1>]>; 732 733def SDTX86CwdStore : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>; 734def SDTX86CwdLoad : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>; 735def SDTX86FPEnv : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>; 736 737def X86fp80_add : SDNode<"X86ISD::FP80_ADD", SDTFPBinOp, [SDNPCommutative]>; 738def X86strict_fp80_add : SDNode<"X86ISD::STRICT_FP80_ADD", SDTFPBinOp, 739 [SDNPHasChain,SDNPCommutative]>; 740def any_X86fp80_add : PatFrags<(ops node:$lhs, node:$rhs), 741 [(X86strict_fp80_add node:$lhs, node:$rhs), 742 (X86fp80_add node:$lhs, node:$rhs)]>; 743 744def X86fld : SDNode<"X86ISD::FLD", SDTX86Fld, 745 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; 746def X86fst : SDNode<"X86ISD::FST", SDTX86Fst, 747 [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; 748def X86fild : SDNode<"X86ISD::FILD", SDTX86Fild, 749 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; 750def X86fist : SDNode<"X86ISD::FIST", SDTX86Fist, 751 [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; 752def X86fp_to_mem : SDNode<"X86ISD::FP_TO_INT_IN_MEM", SDTX86Fst, 753 [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; 754def X86fp_cwd_get16 : SDNode<"X86ISD::FNSTCW16m", SDTX86CwdStore, 755 [SDNPHasChain, SDNPMayStore, SDNPSideEffect, 756 SDNPMemOperand]>; 757def X86fp_cwd_set16 : SDNode<"X86ISD::FLDCW16m", SDTX86CwdLoad, 758 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect, 759 SDNPMemOperand]>; 760def X86fpenv_get : SDNode<"X86ISD::FNSTENVm", SDTX86FPEnv, 761 [SDNPHasChain, SDNPMayStore, SDNPSideEffect, 762 SDNPMemOperand]>; 763def X86fpenv_set : SDNode<"X86ISD::FLDENVm", SDTX86FPEnv, 764 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect, 765 SDNPMemOperand]>; 766 767def X86fstf32 : PatFrag<(ops node:$val, node:$ptr), 768 (X86fst node:$val, node:$ptr), [{ 769 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::f32; 770}]>; 771def X86fstf64 : PatFrag<(ops node:$val, node:$ptr), 772 (X86fst node:$val, node:$ptr), [{ 773 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::f64; 774}]>; 775def X86fstf80 : PatFrag<(ops node:$val, node:$ptr), 776 (X86fst node:$val, node:$ptr), [{ 777 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::f80; 778}]>; 779 780def X86fldf32 : PatFrag<(ops node:$ptr), (X86fld node:$ptr), [{ 781 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::f32; 782}]>; 783def X86fldf64 : PatFrag<(ops node:$ptr), (X86fld node:$ptr), [{ 784 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::f64; 785}]>; 786def X86fldf80 : PatFrag<(ops node:$ptr), (X86fld node:$ptr), [{ 787 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::f80; 788}]>; 789 790def X86fild16 : PatFrag<(ops node:$ptr), (X86fild node:$ptr), [{ 791 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16; 792}]>; 793def X86fild32 : PatFrag<(ops node:$ptr), (X86fild node:$ptr), [{ 794 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32; 795}]>; 796def X86fild64 : PatFrag<(ops node:$ptr), (X86fild node:$ptr), [{ 797 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64; 798}]>; 799 800def X86fist32 : PatFrag<(ops node:$val, node:$ptr), 801 (X86fist node:$val, node:$ptr), [{ 802 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32; 803}]>; 804 805def X86fist64 : PatFrag<(ops node:$val, node:$ptr), 806 (X86fist node:$val, node:$ptr), [{ 807 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64; 808}]>; 809 810def X86fp_to_i16mem : PatFrag<(ops node:$val, node:$ptr), 811 (X86fp_to_mem node:$val, node:$ptr), [{ 812 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16; 813}]>; 814def X86fp_to_i32mem : PatFrag<(ops node:$val, node:$ptr), 815 (X86fp_to_mem node:$val, node:$ptr), [{ 816 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32; 817}]>; 818def X86fp_to_i64mem : PatFrag<(ops node:$val, node:$ptr), 819 (X86fp_to_mem node:$val, node:$ptr), [{ 820 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64; 821}]>; 822 823//===----------------------------------------------------------------------===// 824// FPStack pattern fragments 825//===----------------------------------------------------------------------===// 826 827def fpimm0 : FPImmLeaf<fAny, [{ 828 return Imm.isExactlyValue(+0.0); 829}]>; 830 831def fpimmneg0 : FPImmLeaf<fAny, [{ 832 return Imm.isExactlyValue(-0.0); 833}]>; 834 835def fpimm1 : FPImmLeaf<fAny, [{ 836 return Imm.isExactlyValue(+1.0); 837}]>; 838 839def fpimmneg1 : FPImmLeaf<fAny, [{ 840 return Imm.isExactlyValue(-1.0); 841}]>; 842