1//===-- X86InstrInfo.td - Main X86 Instruction Properties --*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file describes the X86 properties of the instructions which are needed 10// for code generation, machine code emission, and analysis. 11// 12//===----------------------------------------------------------------------===// 13 14//===----------------------------------------------------------------------===// 15// X86 specific DAG Nodes. 16// 17 18def SDTX86CmpTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisInt<1>, 19 SDTCisSameAs<1, 2>]>; 20def SDTX86FCmp : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisFP<1>, 21 SDTCisSameAs<1, 2>]>; 22 23def SDTX86Cmov : SDTypeProfile<1, 4, 24 [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, 25 SDTCisVT<3, i8>, SDTCisVT<4, i32>]>; 26 27// Unary and binary operator instructions that set EFLAGS as a side-effect. 28def SDTUnaryArithWithFlags : SDTypeProfile<2, 1, 29 [SDTCisSameAs<0, 2>, 30 SDTCisInt<0>, SDTCisVT<1, i32>]>; 31 32def SDTBinaryArithWithFlags : SDTypeProfile<2, 2, 33 [SDTCisSameAs<0, 2>, 34 SDTCisSameAs<0, 3>, 35 SDTCisInt<0>, SDTCisVT<1, i32>]>; 36 37// SDTBinaryArithWithFlagsInOut - RES1, EFLAGS = op LHS, RHS, EFLAGS 38def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3, 39 [SDTCisSameAs<0, 2>, 40 SDTCisSameAs<0, 3>, 41 SDTCisInt<0>, 42 SDTCisVT<1, i32>, 43 SDTCisVT<4, i32>]>; 44// RES1, RES2, FLAGS = op LHS, RHS 45def SDT2ResultBinaryArithWithFlags : SDTypeProfile<3, 2, 46 [SDTCisSameAs<0, 1>, 47 SDTCisSameAs<0, 2>, 48 SDTCisSameAs<0, 3>, 49 SDTCisInt<0>, SDTCisVT<1, i32>]>; 50def SDTX86BrCond : SDTypeProfile<0, 3, 51 [SDTCisVT<0, OtherVT>, 52 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>; 53 54def SDTX86SetCC : SDTypeProfile<1, 2, 55 [SDTCisVT<0, i8>, 56 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>; 57def SDTX86SetCC_C : SDTypeProfile<1, 2, 58 [SDTCisInt<0>, 59 SDTCisVT<1, i8>, SDTCisVT<2, i32>]>; 60 61def SDTX86sahf : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i8>]>; 62 63def SDTX86rdrand : SDTypeProfile<2, 0, [SDTCisInt<0>, SDTCisVT<1, i32>]>; 64 65def SDTX86rdpkru : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; 66def SDTX86wrpkru : SDTypeProfile<0, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, 67 SDTCisVT<2, i32>]>; 68 69def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>, 70 SDTCisVT<2, i8>]>; 71def SDTX86cas8pair : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>; 72def SDTX86cas16pair : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i64>]>; 73 74def SDTLockBinaryArithWithFlags : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, 75 SDTCisPtrTy<1>, 76 SDTCisInt<2>]>; 77 78def SDTLockUnaryArithWithFlags : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, 79 SDTCisPtrTy<1>]>; 80 81def SDTX86Ret : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>; 82 83def SDT_X86CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>, 84 SDTCisVT<1, i32>]>; 85def SDT_X86CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, 86 SDTCisVT<1, i32>]>; 87 88def SDT_X86Call : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>; 89 90def SDT_X86NtBrind : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>; 91 92def SDT_X86VASTART_SAVE_XMM_REGS : SDTypeProfile<0, -1, [SDTCisVT<0, i8>, 93 SDTCisPtrTy<1>]>; 94 95def SDT_X86VAARG : SDTypeProfile<1, -1, [SDTCisPtrTy<0>, 96 SDTCisPtrTy<1>, 97 SDTCisVT<2, i32>, 98 SDTCisVT<3, i8>, 99 SDTCisVT<4, i32>]>; 100 101def SDTX86RepStr : SDTypeProfile<0, 1, [SDTCisVT<0, OtherVT>]>; 102 103def SDTX86Void : SDTypeProfile<0, 0, []>; 104 105def SDTX86Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>; 106 107def SDT_X86TLSADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>; 108 109def SDT_X86TLSBASEADDR : SDTypeProfile<0, 1, [SDTCisInt<0>]>; 110 111def SDT_X86TLSCALL : SDTypeProfile<0, 1, [SDTCisInt<0>]>; 112 113def SDT_X86DYN_ALLOCA : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>; 114 115def SDT_X86SEG_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>; 116 117def SDT_X86PROBED_ALLOCA : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>; 118 119def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>; 120 121def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>; 122 123def SDT_X86ENQCMD : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, 124 SDTCisPtrTy<1>, SDTCisSameAs<1, 2>]>; 125 126def SDT_X86AESENCDECKL : SDTypeProfile<2, 2, [SDTCisVT<0, v2i64>, 127 SDTCisVT<1, i32>, 128 SDTCisVT<2, v2i64>, 129 SDTCisPtrTy<3>]>; 130 131def SDTX86Cmpccxadd : SDTypeProfile<1, 4, [SDTCisSameAs<0, 2>, 132 SDTCisPtrTy<1>, SDTCisSameAs<2, 3>, 133 SDTCisVT<4, i8>]>; 134 135def X86MFence : SDNode<"X86ISD::MFENCE", SDTNone, [SDNPHasChain]>; 136 137 138def X86bsf : SDNode<"X86ISD::BSF", SDTUnaryArithWithFlags>; 139def X86bsr : SDNode<"X86ISD::BSR", SDTUnaryArithWithFlags>; 140def X86fshl : SDNode<"X86ISD::FSHL", SDTIntShiftDOp>; 141def X86fshr : SDNode<"X86ISD::FSHR", SDTIntShiftDOp>; 142 143def X86cmp : SDNode<"X86ISD::CMP" , SDTX86CmpTest>; 144def X86fcmp : SDNode<"X86ISD::FCMP", SDTX86FCmp>; 145def X86strict_fcmp : SDNode<"X86ISD::STRICT_FCMP", SDTX86FCmp, [SDNPHasChain]>; 146def X86strict_fcmps : SDNode<"X86ISD::STRICT_FCMPS", SDTX86FCmp, [SDNPHasChain]>; 147def X86bt : SDNode<"X86ISD::BT", SDTX86CmpTest>; 148 149def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>; 150def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond, 151 [SDNPHasChain]>; 152def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC>; 153def X86setcc_c : SDNode<"X86ISD::SETCC_CARRY", SDTX86SetCC_C>; 154 155def X86rdrand : SDNode<"X86ISD::RDRAND", SDTX86rdrand, 156 [SDNPHasChain, SDNPSideEffect]>; 157 158def X86rdseed : SDNode<"X86ISD::RDSEED", SDTX86rdrand, 159 [SDNPHasChain, SDNPSideEffect]>; 160 161def X86rdpkru : SDNode<"X86ISD::RDPKRU", SDTX86rdpkru, 162 [SDNPHasChain, SDNPSideEffect]>; 163def X86wrpkru : SDNode<"X86ISD::WRPKRU", SDTX86wrpkru, 164 [SDNPHasChain, SDNPSideEffect]>; 165 166def X86cas : SDNode<"X86ISD::LCMPXCHG_DAG", SDTX86cas, 167 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore, 168 SDNPMayLoad, SDNPMemOperand]>; 169def X86cas8 : SDNode<"X86ISD::LCMPXCHG8_DAG", SDTX86cas8pair, 170 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore, 171 SDNPMayLoad, SDNPMemOperand]>; 172def X86cas16 : SDNode<"X86ISD::LCMPXCHG16_DAG", SDTX86cas16pair, 173 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore, 174 SDNPMayLoad, SDNPMemOperand]>; 175 176def X86retglue : SDNode<"X86ISD::RET_GLUE", SDTX86Ret, 177 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; 178def X86iret : SDNode<"X86ISD::IRET", SDTX86Ret, 179 [SDNPHasChain, SDNPOptInGlue]>; 180 181def X86vastart_save_xmm_regs : 182 SDNode<"X86ISD::VASTART_SAVE_XMM_REGS", 183 SDT_X86VASTART_SAVE_XMM_REGS, 184 [SDNPHasChain, SDNPMayStore, SDNPMemOperand, SDNPVariadic]>; 185def X86vaarg64 : 186 SDNode<"X86ISD::VAARG_64", SDT_X86VAARG, 187 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, 188 SDNPMemOperand]>; 189def X86vaargx32 : 190 SDNode<"X86ISD::VAARG_X32", SDT_X86VAARG, 191 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, 192 SDNPMemOperand]>; 193def X86callseq_start : 194 SDNode<"ISD::CALLSEQ_START", SDT_X86CallSeqStart, 195 [SDNPHasChain, SDNPOutGlue]>; 196def X86callseq_end : 197 SDNode<"ISD::CALLSEQ_END", SDT_X86CallSeqEnd, 198 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; 199 200def X86call : SDNode<"X86ISD::CALL", SDT_X86Call, 201 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, 202 SDNPVariadic]>; 203 204def X86call_rvmarker : SDNode<"X86ISD::CALL_RVMARKER", SDT_X86Call, 205 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, 206 SDNPVariadic]>; 207 208 209def X86NoTrackCall : SDNode<"X86ISD::NT_CALL", SDT_X86Call, 210 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, 211 SDNPVariadic]>; 212def X86NoTrackBrind : SDNode<"X86ISD::NT_BRIND", SDT_X86NtBrind, 213 [SDNPHasChain]>; 214 215def X86rep_stos: SDNode<"X86ISD::REP_STOS", SDTX86RepStr, 216 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore]>; 217def X86rep_movs: SDNode<"X86ISD::REP_MOVS", SDTX86RepStr, 218 [SDNPHasChain, SDNPInGlue, SDNPOutGlue, SDNPMayStore, 219 SDNPMayLoad]>; 220 221def X86Wrapper : SDNode<"X86ISD::Wrapper", SDTX86Wrapper>; 222def X86WrapperRIP : SDNode<"X86ISD::WrapperRIP", SDTX86Wrapper>; 223 224def X86RecoverFrameAlloc : SDNode<"ISD::LOCAL_RECOVER", 225 SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, 226 SDTCisInt<1>]>>; 227 228def X86tlsaddr : SDNode<"X86ISD::TLSADDR", SDT_X86TLSADDR, 229 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; 230 231def X86tlsbaseaddr : SDNode<"X86ISD::TLSBASEADDR", SDT_X86TLSBASEADDR, 232 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; 233 234def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET, 235 [SDNPHasChain]>; 236 237def X86eh_sjlj_setjmp : SDNode<"X86ISD::EH_SJLJ_SETJMP", 238 SDTypeProfile<1, 1, [SDTCisInt<0>, 239 SDTCisPtrTy<1>]>, 240 [SDNPHasChain, SDNPSideEffect]>; 241def X86eh_sjlj_longjmp : SDNode<"X86ISD::EH_SJLJ_LONGJMP", 242 SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>, 243 [SDNPHasChain, SDNPSideEffect]>; 244def X86eh_sjlj_setup_dispatch : SDNode<"X86ISD::EH_SJLJ_SETUP_DISPATCH", 245 SDTypeProfile<0, 0, []>, 246 [SDNPHasChain, SDNPSideEffect]>; 247 248def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET, 249 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; 250 251def X86add_flag : SDNode<"X86ISD::ADD", SDTBinaryArithWithFlags, 252 [SDNPCommutative]>; 253def X86sub_flag : SDNode<"X86ISD::SUB", SDTBinaryArithWithFlags>; 254def X86smul_flag : SDNode<"X86ISD::SMUL", SDTBinaryArithWithFlags, 255 [SDNPCommutative]>; 256def X86umul_flag : SDNode<"X86ISD::UMUL", SDT2ResultBinaryArithWithFlags, 257 [SDNPCommutative]>; 258def X86adc_flag : SDNode<"X86ISD::ADC", SDTBinaryArithWithFlagsInOut>; 259def X86sbb_flag : SDNode<"X86ISD::SBB", SDTBinaryArithWithFlagsInOut>; 260 261def X86or_flag : SDNode<"X86ISD::OR", SDTBinaryArithWithFlags, 262 [SDNPCommutative]>; 263def X86xor_flag : SDNode<"X86ISD::XOR", SDTBinaryArithWithFlags, 264 [SDNPCommutative]>; 265def X86and_flag : SDNode<"X86ISD::AND", SDTBinaryArithWithFlags, 266 [SDNPCommutative]>; 267 268def X86lock_add : SDNode<"X86ISD::LADD", SDTLockBinaryArithWithFlags, 269 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, 270 SDNPMemOperand]>; 271def X86lock_sub : SDNode<"X86ISD::LSUB", SDTLockBinaryArithWithFlags, 272 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, 273 SDNPMemOperand]>; 274def X86lock_or : SDNode<"X86ISD::LOR", SDTLockBinaryArithWithFlags, 275 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, 276 SDNPMemOperand]>; 277def X86lock_xor : SDNode<"X86ISD::LXOR", SDTLockBinaryArithWithFlags, 278 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, 279 SDNPMemOperand]>; 280def X86lock_and : SDNode<"X86ISD::LAND", SDTLockBinaryArithWithFlags, 281 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, 282 SDNPMemOperand]>; 283 284def X86bextr : SDNode<"X86ISD::BEXTR", SDTIntBinOp>; 285def X86bextri : SDNode<"X86ISD::BEXTRI", SDTIntBinOp>; 286 287def X86bzhi : SDNode<"X86ISD::BZHI", SDTIntBinOp>; 288 289def X86pdep : SDNode<"X86ISD::PDEP", SDTIntBinOp>; 290def X86pext : SDNode<"X86ISD::PEXT", SDTIntBinOp>; 291 292def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>; 293 294def X86DynAlloca : SDNode<"X86ISD::DYN_ALLOCA", SDT_X86DYN_ALLOCA, 295 [SDNPHasChain, SDNPOutGlue]>; 296 297def X86SegAlloca : SDNode<"X86ISD::SEG_ALLOCA", SDT_X86SEG_ALLOCA, 298 [SDNPHasChain]>; 299 300def X86ProbedAlloca : SDNode<"X86ISD::PROBED_ALLOCA", SDT_X86PROBED_ALLOCA, 301 [SDNPHasChain]>; 302 303def X86TLSCall : SDNode<"X86ISD::TLSCALL", SDT_X86TLSCALL, 304 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; 305 306def X86lwpins : SDNode<"X86ISD::LWPINS", 307 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>, 308 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>, 309 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPSideEffect]>; 310 311def X86umwait : SDNode<"X86ISD::UMWAIT", 312 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>, 313 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>, 314 [SDNPHasChain, SDNPSideEffect]>; 315 316def X86tpause : SDNode<"X86ISD::TPAUSE", 317 SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisInt<1>, 318 SDTCisVT<2, i32>, SDTCisVT<3, i32>]>, 319 [SDNPHasChain, SDNPSideEffect]>; 320 321def X86enqcmd : SDNode<"X86ISD::ENQCMD", SDT_X86ENQCMD, 322 [SDNPHasChain, SDNPSideEffect]>; 323def X86enqcmds : SDNode<"X86ISD::ENQCMDS", SDT_X86ENQCMD, 324 [SDNPHasChain, SDNPSideEffect]>; 325def X86testui : SDNode<"X86ISD::TESTUI", 326 SDTypeProfile<1, 0, [SDTCisVT<0, i32>]>, 327 [SDNPHasChain, SDNPSideEffect]>; 328 329def X86aesenc128kl : SDNode<"X86ISD::AESENC128KL", SDT_X86AESENCDECKL, 330 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect, 331 SDNPMemOperand]>; 332def X86aesdec128kl : SDNode<"X86ISD::AESDEC128KL", SDT_X86AESENCDECKL, 333 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect, 334 SDNPMemOperand]>; 335def X86aesenc256kl : SDNode<"X86ISD::AESENC256KL", SDT_X86AESENCDECKL, 336 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect, 337 SDNPMemOperand]>; 338def X86aesdec256kl : SDNode<"X86ISD::AESDEC256KL", SDT_X86AESENCDECKL, 339 [SDNPHasChain, SDNPMayLoad, SDNPSideEffect, 340 SDNPMemOperand]>; 341 342def X86cmpccxadd : SDNode<"X86ISD::CMPCCXADD", SDTX86Cmpccxadd, 343 [SDNPHasChain, SDNPMayLoad, SDNPMayStore, 344 SDNPMemOperand]>; 345 346//===----------------------------------------------------------------------===// 347// X86 Operand Definitions. 348// 349 350// A version of ptr_rc which excludes SP, ESP, and RSP. This is used for 351// the index operand of an address, to conform to x86 encoding restrictions. 352def ptr_rc_nosp : PointerLikeRegClass<1>; 353 354// *mem - Operand definitions for the funky X86 addressing mode operands. 355// 356def X86MemAsmOperand : AsmOperandClass { 357 let Name = "Mem"; 358} 359let RenderMethod = "addMemOperands", SuperClasses = [X86MemAsmOperand] in { 360 def X86Mem8AsmOperand : AsmOperandClass { let Name = "Mem8"; } 361 def X86Mem16AsmOperand : AsmOperandClass { let Name = "Mem16"; } 362 def X86Mem32AsmOperand : AsmOperandClass { let Name = "Mem32"; } 363 def X86Mem64AsmOperand : AsmOperandClass { let Name = "Mem64"; } 364 def X86Mem80AsmOperand : AsmOperandClass { let Name = "Mem80"; } 365 def X86Mem128AsmOperand : AsmOperandClass { let Name = "Mem128"; } 366 def X86Mem256AsmOperand : AsmOperandClass { let Name = "Mem256"; } 367 def X86Mem512AsmOperand : AsmOperandClass { let Name = "Mem512"; } 368 // Gather mem operands 369 def X86Mem64_RC128Operand : AsmOperandClass { let Name = "Mem64_RC128"; } 370 def X86Mem128_RC128Operand : AsmOperandClass { let Name = "Mem128_RC128"; } 371 def X86Mem256_RC128Operand : AsmOperandClass { let Name = "Mem256_RC128"; } 372 def X86Mem128_RC256Operand : AsmOperandClass { let Name = "Mem128_RC256"; } 373 def X86Mem256_RC256Operand : AsmOperandClass { let Name = "Mem256_RC256"; } 374 375 def X86Mem64_RC128XOperand : AsmOperandClass { let Name = "Mem64_RC128X"; } 376 def X86Mem128_RC128XOperand : AsmOperandClass { let Name = "Mem128_RC128X"; } 377 def X86Mem256_RC128XOperand : AsmOperandClass { let Name = "Mem256_RC128X"; } 378 def X86Mem128_RC256XOperand : AsmOperandClass { let Name = "Mem128_RC256X"; } 379 def X86Mem256_RC256XOperand : AsmOperandClass { let Name = "Mem256_RC256X"; } 380 def X86Mem512_RC256XOperand : AsmOperandClass { let Name = "Mem512_RC256X"; } 381 def X86Mem256_RC512Operand : AsmOperandClass { let Name = "Mem256_RC512"; } 382 def X86Mem512_RC512Operand : AsmOperandClass { let Name = "Mem512_RC512"; } 383 def X86Mem512_GR16Operand : AsmOperandClass { let Name = "Mem512_GR16"; } 384 def X86Mem512_GR32Operand : AsmOperandClass { let Name = "Mem512_GR32"; } 385 def X86Mem512_GR64Operand : AsmOperandClass { let Name = "Mem512_GR64"; } 386 387 def X86SibMemOperand : AsmOperandClass { let Name = "SibMem"; } 388} 389 390def X86AbsMemAsmOperand : AsmOperandClass { 391 let Name = "AbsMem"; 392 let SuperClasses = [X86MemAsmOperand]; 393} 394 395class X86MemOperand<string printMethod, 396 AsmOperandClass parserMatchClass = X86MemAsmOperand, 397 int size = 0> : Operand<iPTR> { 398 let PrintMethod = printMethod; 399 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, SEGMENT_REG); 400 let ParserMatchClass = parserMatchClass; 401 let OperandType = "OPERAND_MEMORY"; 402 int Size = size; 403} 404 405// Gather mem operands 406class X86VMemOperand<RegisterClass RC, string printMethod, 407 AsmOperandClass parserMatchClass, int size = 0> 408 : X86MemOperand<printMethod, parserMatchClass, size> { 409 let MIOperandInfo = (ops ptr_rc, i8imm, RC, i32imm, SEGMENT_REG); 410} 411 412def anymem : X86MemOperand<"printMemReference">; 413def X86any_fcmp : PatFrags<(ops node:$lhs, node:$rhs), 414 [(X86strict_fcmp node:$lhs, node:$rhs), 415 (X86fcmp node:$lhs, node:$rhs)]>; 416 417// FIXME: Right now we allow any size during parsing, but we might want to 418// restrict to only unsized memory. 419def opaquemem : X86MemOperand<"printMemReference">; 420 421def sibmem: X86MemOperand<"printMemReference", X86SibMemOperand>; 422 423def i8mem : X86MemOperand<"printbytemem", X86Mem8AsmOperand, 8>; 424def i16mem : X86MemOperand<"printwordmem", X86Mem16AsmOperand, 16>; 425def i32mem : X86MemOperand<"printdwordmem", X86Mem32AsmOperand, 32>; 426def i64mem : X86MemOperand<"printqwordmem", X86Mem64AsmOperand, 64>; 427def i128mem : X86MemOperand<"printxmmwordmem", X86Mem128AsmOperand, 128>; 428def i256mem : X86MemOperand<"printymmwordmem", X86Mem256AsmOperand, 256>; 429def i512mem : X86MemOperand<"printzmmwordmem", X86Mem512AsmOperand, 512>; 430def f16mem : X86MemOperand<"printwordmem", X86Mem16AsmOperand, 16>; 431def f32mem : X86MemOperand<"printdwordmem", X86Mem32AsmOperand, 32>; 432def f64mem : X86MemOperand<"printqwordmem", X86Mem64AsmOperand, 64>; 433def f80mem : X86MemOperand<"printtbytemem", X86Mem80AsmOperand, 80>; 434def f128mem : X86MemOperand<"printxmmwordmem", X86Mem128AsmOperand, 128>; 435def f256mem : X86MemOperand<"printymmwordmem", X86Mem256AsmOperand, 256>; 436def f512mem : X86MemOperand<"printzmmwordmem", X86Mem512AsmOperand, 512>; 437 438// 32/64 mode specific mem operands 439def i512mem_GR16 : X86MemOperand<"printzmmwordmem", X86Mem512_GR16Operand, 512>; 440def i512mem_GR32 : X86MemOperand<"printzmmwordmem", X86Mem512_GR32Operand, 512>; 441def i512mem_GR64 : X86MemOperand<"printzmmwordmem", X86Mem512_GR64Operand, 512>; 442 443// Gather mem operands 444def vx64mem : X86VMemOperand<VR128, "printqwordmem", X86Mem64_RC128Operand, 64>; 445def vx128mem : X86VMemOperand<VR128, "printxmmwordmem", X86Mem128_RC128Operand, 128>; 446def vx256mem : X86VMemOperand<VR128, "printymmwordmem", X86Mem256_RC128Operand, 256>; 447def vy128mem : X86VMemOperand<VR256, "printxmmwordmem", X86Mem128_RC256Operand, 128>; 448def vy256mem : X86VMemOperand<VR256, "printymmwordmem", X86Mem256_RC256Operand, 256>; 449 450def vx64xmem : X86VMemOperand<VR128X, "printqwordmem", X86Mem64_RC128XOperand, 64>; 451def vx128xmem : X86VMemOperand<VR128X, "printxmmwordmem", X86Mem128_RC128XOperand, 128>; 452def vx256xmem : X86VMemOperand<VR128X, "printymmwordmem", X86Mem256_RC128XOperand, 256>; 453def vy128xmem : X86VMemOperand<VR256X, "printxmmwordmem", X86Mem128_RC256XOperand, 128>; 454def vy256xmem : X86VMemOperand<VR256X, "printymmwordmem", X86Mem256_RC256XOperand, 256>; 455def vy512xmem : X86VMemOperand<VR256X, "printzmmwordmem", X86Mem512_RC256XOperand, 512>; 456def vz256mem : X86VMemOperand<VR512, "printymmwordmem", X86Mem256_RC512Operand, 256>; 457def vz512mem : X86VMemOperand<VR512, "printzmmwordmem", X86Mem512_RC512Operand, 512>; 458 459// A version of i8mem for use on x86-64 and x32 that uses a NOREX GPR instead 460// of a plain GPR, so that it doesn't potentially require a REX prefix. 461def ptr_rc_norex : PointerLikeRegClass<2>; 462def ptr_rc_norex_nosp : PointerLikeRegClass<3>; 463 464def i8mem_NOREX : X86MemOperand<"printbytemem", X86Mem8AsmOperand, 8> { 465 let MIOperandInfo = (ops ptr_rc_norex, i8imm, ptr_rc_norex_nosp, i32imm, 466 SEGMENT_REG); 467} 468 469// GPRs available for tailcall. 470// It represents GR32_TC, GR64_TC or GR64_TCW64. 471def ptr_rc_tailcall : PointerLikeRegClass<4>; 472 473// Special i32mem for addresses of load folding tail calls. These are not 474// allowed to use callee-saved registers since they must be scheduled 475// after callee-saved register are popped. 476def i32mem_TC : X86MemOperand<"printdwordmem", X86Mem32AsmOperand, 32> { 477 let MIOperandInfo = (ops ptr_rc_tailcall, i8imm, ptr_rc_tailcall, 478 i32imm, SEGMENT_REG); 479} 480 481// Special i64mem for addresses of load folding tail calls. These are not 482// allowed to use callee-saved registers since they must be scheduled 483// after callee-saved register are popped. 484def i64mem_TC : X86MemOperand<"printqwordmem", X86Mem64AsmOperand, 64> { 485 let MIOperandInfo = (ops ptr_rc_tailcall, i8imm, 486 ptr_rc_tailcall, i32imm, SEGMENT_REG); 487} 488 489// Special parser to detect 16-bit mode to select 16-bit displacement. 490def X86AbsMem16AsmOperand : AsmOperandClass { 491 let Name = "AbsMem16"; 492 let RenderMethod = "addAbsMemOperands"; 493 let SuperClasses = [X86AbsMemAsmOperand]; 494} 495 496// Branch targets print as pc-relative values. 497class BranchTargetOperand<ValueType ty> : Operand<ty> { 498 let OperandType = "OPERAND_PCREL"; 499 let PrintMethod = "printPCRelImm"; 500 let ParserMatchClass = X86AbsMemAsmOperand; 501} 502 503def i32imm_brtarget : BranchTargetOperand<i32>; 504def i16imm_brtarget : BranchTargetOperand<i16>; 505 506// 64-bits but only 32 bits are significant, and those bits are treated as being 507// pc relative. 508def i64i32imm_brtarget : BranchTargetOperand<i64>; 509 510def brtarget : BranchTargetOperand<OtherVT>; 511def brtarget8 : BranchTargetOperand<OtherVT>; 512def brtarget16 : BranchTargetOperand<OtherVT> { 513 let ParserMatchClass = X86AbsMem16AsmOperand; 514} 515def brtarget32 : BranchTargetOperand<OtherVT>; 516 517let RenderMethod = "addSrcIdxOperands" in { 518 def X86SrcIdx8Operand : AsmOperandClass { 519 let Name = "SrcIdx8"; 520 let SuperClasses = [X86Mem8AsmOperand]; 521 } 522 def X86SrcIdx16Operand : AsmOperandClass { 523 let Name = "SrcIdx16"; 524 let SuperClasses = [X86Mem16AsmOperand]; 525 } 526 def X86SrcIdx32Operand : AsmOperandClass { 527 let Name = "SrcIdx32"; 528 let SuperClasses = [X86Mem32AsmOperand]; 529 } 530 def X86SrcIdx64Operand : AsmOperandClass { 531 let Name = "SrcIdx64"; 532 let SuperClasses = [X86Mem64AsmOperand]; 533 } 534} // RenderMethod = "addSrcIdxOperands" 535 536let RenderMethod = "addDstIdxOperands" in { 537 def X86DstIdx8Operand : AsmOperandClass { 538 let Name = "DstIdx8"; 539 let SuperClasses = [X86Mem8AsmOperand]; 540 } 541 def X86DstIdx16Operand : AsmOperandClass { 542 let Name = "DstIdx16"; 543 let SuperClasses = [X86Mem16AsmOperand]; 544 } 545 def X86DstIdx32Operand : AsmOperandClass { 546 let Name = "DstIdx32"; 547 let SuperClasses = [X86Mem32AsmOperand]; 548 } 549 def X86DstIdx64Operand : AsmOperandClass { 550 let Name = "DstIdx64"; 551 let SuperClasses = [X86Mem64AsmOperand]; 552 } 553} // RenderMethod = "addDstIdxOperands" 554 555let RenderMethod = "addMemOffsOperands" in { 556 def X86MemOffs16_8AsmOperand : AsmOperandClass { 557 let Name = "MemOffs16_8"; 558 let SuperClasses = [X86Mem8AsmOperand]; 559 } 560 def X86MemOffs16_16AsmOperand : AsmOperandClass { 561 let Name = "MemOffs16_16"; 562 let SuperClasses = [X86Mem16AsmOperand]; 563 } 564 def X86MemOffs16_32AsmOperand : AsmOperandClass { 565 let Name = "MemOffs16_32"; 566 let SuperClasses = [X86Mem32AsmOperand]; 567 } 568 def X86MemOffs32_8AsmOperand : AsmOperandClass { 569 let Name = "MemOffs32_8"; 570 let SuperClasses = [X86Mem8AsmOperand]; 571 } 572 def X86MemOffs32_16AsmOperand : AsmOperandClass { 573 let Name = "MemOffs32_16"; 574 let SuperClasses = [X86Mem16AsmOperand]; 575 } 576 def X86MemOffs32_32AsmOperand : AsmOperandClass { 577 let Name = "MemOffs32_32"; 578 let SuperClasses = [X86Mem32AsmOperand]; 579 } 580 def X86MemOffs32_64AsmOperand : AsmOperandClass { 581 let Name = "MemOffs32_64"; 582 let SuperClasses = [X86Mem64AsmOperand]; 583 } 584 def X86MemOffs64_8AsmOperand : AsmOperandClass { 585 let Name = "MemOffs64_8"; 586 let SuperClasses = [X86Mem8AsmOperand]; 587 } 588 def X86MemOffs64_16AsmOperand : AsmOperandClass { 589 let Name = "MemOffs64_16"; 590 let SuperClasses = [X86Mem16AsmOperand]; 591 } 592 def X86MemOffs64_32AsmOperand : AsmOperandClass { 593 let Name = "MemOffs64_32"; 594 let SuperClasses = [X86Mem32AsmOperand]; 595 } 596 def X86MemOffs64_64AsmOperand : AsmOperandClass { 597 let Name = "MemOffs64_64"; 598 let SuperClasses = [X86Mem64AsmOperand]; 599 } 600} // RenderMethod = "addMemOffsOperands" 601 602class X86SrcIdxOperand<string printMethod, AsmOperandClass parserMatchClass> 603 : X86MemOperand<printMethod, parserMatchClass> { 604 let MIOperandInfo = (ops ptr_rc, SEGMENT_REG); 605} 606 607class X86DstIdxOperand<string printMethod, AsmOperandClass parserMatchClass> 608 : X86MemOperand<printMethod, parserMatchClass> { 609 let MIOperandInfo = (ops ptr_rc); 610} 611 612def srcidx8 : X86SrcIdxOperand<"printSrcIdx8", X86SrcIdx8Operand>; 613def srcidx16 : X86SrcIdxOperand<"printSrcIdx16", X86SrcIdx16Operand>; 614def srcidx32 : X86SrcIdxOperand<"printSrcIdx32", X86SrcIdx32Operand>; 615def srcidx64 : X86SrcIdxOperand<"printSrcIdx64", X86SrcIdx64Operand>; 616def dstidx8 : X86DstIdxOperand<"printDstIdx8", X86DstIdx8Operand>; 617def dstidx16 : X86DstIdxOperand<"printDstIdx16", X86DstIdx16Operand>; 618def dstidx32 : X86DstIdxOperand<"printDstIdx32", X86DstIdx32Operand>; 619def dstidx64 : X86DstIdxOperand<"printDstIdx64", X86DstIdx64Operand>; 620 621class X86MemOffsOperand<Operand immOperand, string printMethod, 622 AsmOperandClass parserMatchClass> 623 : X86MemOperand<printMethod, parserMatchClass> { 624 let MIOperandInfo = (ops immOperand, SEGMENT_REG); 625} 626 627def offset16_8 : X86MemOffsOperand<i16imm, "printMemOffs8", 628 X86MemOffs16_8AsmOperand>; 629def offset16_16 : X86MemOffsOperand<i16imm, "printMemOffs16", 630 X86MemOffs16_16AsmOperand>; 631def offset16_32 : X86MemOffsOperand<i16imm, "printMemOffs32", 632 X86MemOffs16_32AsmOperand>; 633def offset32_8 : X86MemOffsOperand<i32imm, "printMemOffs8", 634 X86MemOffs32_8AsmOperand>; 635def offset32_16 : X86MemOffsOperand<i32imm, "printMemOffs16", 636 X86MemOffs32_16AsmOperand>; 637def offset32_32 : X86MemOffsOperand<i32imm, "printMemOffs32", 638 X86MemOffs32_32AsmOperand>; 639def offset32_64 : X86MemOffsOperand<i32imm, "printMemOffs64", 640 X86MemOffs32_64AsmOperand>; 641def offset64_8 : X86MemOffsOperand<i64imm, "printMemOffs8", 642 X86MemOffs64_8AsmOperand>; 643def offset64_16 : X86MemOffsOperand<i64imm, "printMemOffs16", 644 X86MemOffs64_16AsmOperand>; 645def offset64_32 : X86MemOffsOperand<i64imm, "printMemOffs32", 646 X86MemOffs64_32AsmOperand>; 647def offset64_64 : X86MemOffsOperand<i64imm, "printMemOffs64", 648 X86MemOffs64_64AsmOperand>; 649 650def ccode : Operand<i8> { 651 let PrintMethod = "printCondCode"; 652 let OperandNamespace = "X86"; 653 let OperandType = "OPERAND_COND_CODE"; 654} 655 656class ImmSExtAsmOperandClass : AsmOperandClass { 657 let SuperClasses = [ImmAsmOperand]; 658 let RenderMethod = "addImmOperands"; 659} 660 661def X86GR32orGR64AsmOperand : AsmOperandClass { 662 let Name = "GR32orGR64"; 663} 664def GR32orGR64 : RegisterOperand<GR32> { 665 let ParserMatchClass = X86GR32orGR64AsmOperand; 666} 667 668def X86GR16orGR32orGR64AsmOperand : AsmOperandClass { 669 let Name = "GR16orGR32orGR64"; 670} 671def GR16orGR32orGR64 : RegisterOperand<GR16> { 672 let ParserMatchClass = X86GR16orGR32orGR64AsmOperand; 673} 674 675def AVX512RCOperand : AsmOperandClass { 676 let Name = "AVX512RC"; 677} 678def AVX512RC : Operand<i32> { 679 let PrintMethod = "printRoundingControl"; 680 let OperandNamespace = "X86"; 681 let OperandType = "OPERAND_ROUNDING_CONTROL"; 682 let ParserMatchClass = AVX512RCOperand; 683} 684 685// Sign-extended immediate classes. We don't need to define the full lattice 686// here because there is no instruction with an ambiguity between ImmSExti64i32 687// and ImmSExti32i8. 688// 689// The strange ranges come from the fact that the assembler always works with 690// 64-bit immediates, but for a 16-bit target value we want to accept both "-1" 691// (which will be a -1ULL), and "0xFF" (-1 in 16-bits). 692 693// [0, 0x7FFFFFFF] | 694// [0xFFFFFFFF80000000, 0xFFFFFFFFFFFFFFFF] 695def ImmSExti64i32AsmOperand : ImmSExtAsmOperandClass { 696 let Name = "ImmSExti64i32"; 697} 698 699// [0, 0x0000007F] | [0x000000000000FF80, 0x000000000000FFFF] | 700// [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF] 701def ImmSExti16i8AsmOperand : ImmSExtAsmOperandClass { 702 let Name = "ImmSExti16i8"; 703 let SuperClasses = [ImmSExti64i32AsmOperand]; 704} 705 706// [0, 0x0000007F] | [0x00000000FFFFFF80, 0x00000000FFFFFFFF] | 707// [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF] 708def ImmSExti32i8AsmOperand : ImmSExtAsmOperandClass { 709 let Name = "ImmSExti32i8"; 710} 711 712// [0, 0x0000007F] | 713// [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF] 714def ImmSExti64i8AsmOperand : ImmSExtAsmOperandClass { 715 let Name = "ImmSExti64i8"; 716 let SuperClasses = [ImmSExti16i8AsmOperand, ImmSExti32i8AsmOperand, 717 ImmSExti64i32AsmOperand]; 718} 719 720// 4-bit immediate used by some XOP instructions 721// [0, 0xF] 722def ImmUnsignedi4AsmOperand : AsmOperandClass { 723 let Name = "ImmUnsignedi4"; 724 let RenderMethod = "addImmOperands"; 725 let DiagnosticType = "InvalidImmUnsignedi4"; 726} 727 728// Unsigned immediate used by SSE/AVX instructions 729// [0, 0xFF] 730// [0xFFFFFFFFFFFFFF80, 0xFFFFFFFFFFFFFFFF] 731def ImmUnsignedi8AsmOperand : AsmOperandClass { 732 let Name = "ImmUnsignedi8"; 733 let RenderMethod = "addImmOperands"; 734} 735 736// A couple of more descriptive operand definitions. 737// 16-bits but only 8 bits are significant. 738def i16i8imm : Operand<i16> { 739 let ParserMatchClass = ImmSExti16i8AsmOperand; 740 let OperandType = "OPERAND_IMMEDIATE"; 741} 742// 32-bits but only 8 bits are significant. 743def i32i8imm : Operand<i32> { 744 let ParserMatchClass = ImmSExti32i8AsmOperand; 745 let OperandType = "OPERAND_IMMEDIATE"; 746} 747 748// 64-bits but only 32 bits are significant. 749def i64i32imm : Operand<i64> { 750 let ParserMatchClass = ImmSExti64i32AsmOperand; 751 let OperandType = "OPERAND_IMMEDIATE"; 752} 753 754// 64-bits but only 8 bits are significant. 755def i64i8imm : Operand<i64> { 756 let ParserMatchClass = ImmSExti64i8AsmOperand; 757 let OperandType = "OPERAND_IMMEDIATE"; 758} 759 760// Unsigned 4-bit immediate used by some XOP instructions. 761def u4imm : Operand<i8> { 762 let PrintMethod = "printU8Imm"; 763 let ParserMatchClass = ImmUnsignedi4AsmOperand; 764 let OperandType = "OPERAND_IMMEDIATE"; 765} 766 767// Unsigned 8-bit immediate used by SSE/AVX instructions. 768def u8imm : Operand<i8> { 769 let PrintMethod = "printU8Imm"; 770 let ParserMatchClass = ImmUnsignedi8AsmOperand; 771 let OperandType = "OPERAND_IMMEDIATE"; 772} 773 774// 16-bit immediate but only 8-bits are significant and they are unsigned. 775// Used by BT instructions. 776def i16u8imm : Operand<i16> { 777 let PrintMethod = "printU8Imm"; 778 let ParserMatchClass = ImmUnsignedi8AsmOperand; 779 let OperandType = "OPERAND_IMMEDIATE"; 780} 781 782// 32-bit immediate but only 8-bits are significant and they are unsigned. 783// Used by some SSE/AVX instructions that use intrinsics. 784def i32u8imm : Operand<i32> { 785 let PrintMethod = "printU8Imm"; 786 let ParserMatchClass = ImmUnsignedi8AsmOperand; 787 let OperandType = "OPERAND_IMMEDIATE"; 788} 789 790// 64-bit immediate but only 8-bits are significant and they are unsigned. 791// Used by BT instructions. 792def i64u8imm : Operand<i64> { 793 let PrintMethod = "printU8Imm"; 794 let ParserMatchClass = ImmUnsignedi8AsmOperand; 795 let OperandType = "OPERAND_IMMEDIATE"; 796} 797 798def lea64_32mem : Operand<i32> { 799 let PrintMethod = "printMemReference"; 800 let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG); 801 let ParserMatchClass = X86MemAsmOperand; 802} 803 804// Memory operands that use 64-bit pointers in both ILP32 and LP64. 805def lea64mem : Operand<i64> { 806 let PrintMethod = "printMemReference"; 807 let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG); 808 let ParserMatchClass = X86MemAsmOperand; 809} 810 811let RenderMethod = "addMaskPairOperands" in { 812 def VK1PairAsmOperand : AsmOperandClass { let Name = "VK1Pair"; } 813 def VK2PairAsmOperand : AsmOperandClass { let Name = "VK2Pair"; } 814 def VK4PairAsmOperand : AsmOperandClass { let Name = "VK4Pair"; } 815 def VK8PairAsmOperand : AsmOperandClass { let Name = "VK8Pair"; } 816 def VK16PairAsmOperand : AsmOperandClass { let Name = "VK16Pair"; } 817} 818 819def VK1Pair : RegisterOperand<VK1PAIR, "printVKPair"> { 820 let ParserMatchClass = VK1PairAsmOperand; 821} 822 823def VK2Pair : RegisterOperand<VK2PAIR, "printVKPair"> { 824 let ParserMatchClass = VK2PairAsmOperand; 825} 826 827def VK4Pair : RegisterOperand<VK4PAIR, "printVKPair"> { 828 let ParserMatchClass = VK4PairAsmOperand; 829} 830 831def VK8Pair : RegisterOperand<VK8PAIR, "printVKPair"> { 832 let ParserMatchClass = VK8PairAsmOperand; 833} 834 835def VK16Pair : RegisterOperand<VK16PAIR, "printVKPair"> { 836 let ParserMatchClass = VK16PairAsmOperand; 837} 838 839//===----------------------------------------------------------------------===// 840// X86 Complex Pattern Definitions. 841// 842 843// Define X86-specific addressing mode. 844def addr : ComplexPattern<iPTR, 5, "selectAddr", [], [SDNPWantParent]>; 845def lea32addr : ComplexPattern<i32, 5, "selectLEAAddr", 846 [add, sub, mul, X86mul_imm, shl, or, xor, frameindex], 847 []>; 848// In 64-bit mode 32-bit LEAs can use RIP-relative addressing. 849def lea64_32addr : ComplexPattern<i32, 5, "selectLEA64_32Addr", 850 [add, sub, mul, X86mul_imm, shl, or, xor, 851 frameindex, X86WrapperRIP], 852 []>; 853 854def tls32addr : ComplexPattern<i32, 5, "selectTLSADDRAddr", 855 [tglobaltlsaddr], []>; 856 857def tls32baseaddr : ComplexPattern<i32, 5, "selectTLSADDRAddr", 858 [tglobaltlsaddr], []>; 859 860def lea64addr : ComplexPattern<i64, 5, "selectLEAAddr", 861 [add, sub, mul, X86mul_imm, shl, or, xor, frameindex, 862 X86WrapperRIP], []>; 863 864def tls64addr : ComplexPattern<i64, 5, "selectTLSADDRAddr", 865 [tglobaltlsaddr], []>; 866 867def tls64baseaddr : ComplexPattern<i64, 5, "selectTLSADDRAddr", 868 [tglobaltlsaddr], []>; 869 870def vectoraddr : ComplexPattern<iPTR, 5, "selectVectorAddr", [],[SDNPWantParent]>; 871 872// A relocatable immediate is an operand that can be relocated by the linker to 873// an immediate, such as a regular symbol in non-PIC code. 874def relocImm : ComplexPattern<iAny, 1, "selectRelocImm", 875 [X86Wrapper], [], 0>; 876 877//===----------------------------------------------------------------------===// 878// X86 Instruction Predicate Definitions. 879def TruePredicate : Predicate<"true">; 880 881def HasCMOV : Predicate<"Subtarget->canUseCMOV()">; 882def NoCMOV : Predicate<"!Subtarget->canUseCMOV()">; 883 884def HasNOPL : Predicate<"Subtarget->hasNOPL()">; 885def HasMMX : Predicate<"Subtarget->hasMMX()">; 886def Has3DNow : Predicate<"Subtarget->hasThreeDNow()">; 887def Has3DNowA : Predicate<"Subtarget->hasThreeDNowA()">; 888def HasSSE1 : Predicate<"Subtarget->hasSSE1()">; 889def UseSSE1 : Predicate<"Subtarget->hasSSE1() && !Subtarget->hasAVX()">; 890def HasSSE2 : Predicate<"Subtarget->hasSSE2()">; 891def UseSSE2 : Predicate<"Subtarget->hasSSE2() && !Subtarget->hasAVX()">; 892def HasSSE3 : Predicate<"Subtarget->hasSSE3()">; 893def UseSSE3 : Predicate<"Subtarget->hasSSE3() && !Subtarget->hasAVX()">; 894def HasSSSE3 : Predicate<"Subtarget->hasSSSE3()">; 895def UseSSSE3 : Predicate<"Subtarget->hasSSSE3() && !Subtarget->hasAVX()">; 896def HasSSE41 : Predicate<"Subtarget->hasSSE41()">; 897def NoSSE41 : Predicate<"!Subtarget->hasSSE41()">; 898def UseSSE41 : Predicate<"Subtarget->hasSSE41() && !Subtarget->hasAVX()">; 899def HasSSE42 : Predicate<"Subtarget->hasSSE42()">; 900def UseSSE42 : Predicate<"Subtarget->hasSSE42() && !Subtarget->hasAVX()">; 901def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">; 902def NoAVX : Predicate<"!Subtarget->hasAVX()">; 903def HasAVX : Predicate<"Subtarget->hasAVX()">; 904def HasAVX2 : Predicate<"Subtarget->hasAVX2()">; 905def HasAVX1Only : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX2()">; 906def HasAVX512 : Predicate<"Subtarget->hasAVX512()">; 907def UseAVX : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX512()">; 908def UseAVX2 : Predicate<"Subtarget->hasAVX2() && !Subtarget->hasAVX512()">; 909def NoAVX512 : Predicate<"!Subtarget->hasAVX512()">; 910def HasCDI : Predicate<"Subtarget->hasCDI()">; 911def HasVPOPCNTDQ : Predicate<"Subtarget->hasVPOPCNTDQ()">; 912def HasPFI : Predicate<"Subtarget->hasPFI()">; 913def HasERI : Predicate<"Subtarget->hasERI()">; 914def HasDQI : Predicate<"Subtarget->hasDQI()">; 915def NoDQI : Predicate<"!Subtarget->hasDQI()">; 916def HasBWI : Predicate<"Subtarget->hasBWI()">; 917def NoBWI : Predicate<"!Subtarget->hasBWI()">; 918def HasVLX : Predicate<"Subtarget->hasVLX()">; 919def NoVLX : Predicate<"!Subtarget->hasVLX()">; 920def NoVLX_Or_NoBWI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasBWI()">; 921def NoVLX_Or_NoDQI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasDQI()">; 922def HasPKU : Predicate<"Subtarget->hasPKU()">; 923def HasVNNI : Predicate<"Subtarget->hasVNNI()">; 924def HasVP2INTERSECT : Predicate<"Subtarget->hasVP2INTERSECT()">; 925def HasBF16 : Predicate<"Subtarget->hasBF16()">; 926def HasFP16 : Predicate<"Subtarget->hasFP16()">; 927def HasAVXVNNIINT16 : Predicate<"Subtarget->hasAVXVNNIINT16()">; 928def HasAVXVNNIINT8 : Predicate<"Subtarget->hasAVXVNNIINT8()">; 929def HasAVXVNNI : Predicate <"Subtarget->hasAVXVNNI()">; 930def NoVLX_Or_NoVNNI : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVNNI()">; 931 932def HasBITALG : Predicate<"Subtarget->hasBITALG()">; 933def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">; 934def HasAES : Predicate<"Subtarget->hasAES()">; 935def HasVAES : Predicate<"Subtarget->hasVAES()">; 936def NoVLX_Or_NoVAES : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVAES()">; 937def HasFXSR : Predicate<"Subtarget->hasFXSR()">; 938def HasX87 : Predicate<"Subtarget->hasX87()">; 939def HasXSAVE : Predicate<"Subtarget->hasXSAVE()">; 940def HasXSAVEOPT : Predicate<"Subtarget->hasXSAVEOPT()">; 941def HasXSAVEC : Predicate<"Subtarget->hasXSAVEC()">; 942def HasXSAVES : Predicate<"Subtarget->hasXSAVES()">; 943def HasPCLMUL : Predicate<"Subtarget->hasPCLMUL()">; 944def NoVLX_Or_NoVPCLMULQDQ : 945 Predicate<"!Subtarget->hasVLX() || !Subtarget->hasVPCLMULQDQ()">; 946def HasVPCLMULQDQ : Predicate<"Subtarget->hasVPCLMULQDQ()">; 947def HasGFNI : Predicate<"Subtarget->hasGFNI()">; 948def HasFMA : Predicate<"Subtarget->hasFMA()">; 949def HasFMA4 : Predicate<"Subtarget->hasFMA4()">; 950def NoFMA4 : Predicate<"!Subtarget->hasFMA4()">; 951def HasXOP : Predicate<"Subtarget->hasXOP()">; 952def HasTBM : Predicate<"Subtarget->hasTBM()">; 953def NoTBM : Predicate<"!Subtarget->hasTBM()">; 954def HasLWP : Predicate<"Subtarget->hasLWP()">; 955def HasMOVBE : Predicate<"Subtarget->hasMOVBE()">; 956def HasRDRAND : Predicate<"Subtarget->hasRDRAND()">; 957def HasF16C : Predicate<"Subtarget->hasF16C()">; 958def HasFSGSBase : Predicate<"Subtarget->hasFSGSBase()">; 959def HasLZCNT : Predicate<"Subtarget->hasLZCNT()">; 960def HasBMI : Predicate<"Subtarget->hasBMI()">; 961def HasBMI2 : Predicate<"Subtarget->hasBMI2()">; 962def NoBMI2 : Predicate<"!Subtarget->hasBMI2()">; 963def HasVBMI : Predicate<"Subtarget->hasVBMI()">; 964def HasVBMI2 : Predicate<"Subtarget->hasVBMI2()">; 965def HasIFMA : Predicate<"Subtarget->hasIFMA()">; 966def HasAVXIFMA : Predicate<"Subtarget->hasAVXIFMA()">; 967def NoVLX_Or_NoIFMA : Predicate<"!Subtarget->hasVLX() || !Subtarget->hasIFMA()">; 968def HasRTM : Predicate<"Subtarget->hasRTM()">; 969def HasADX : Predicate<"Subtarget->hasADX()">; 970def HasSHA : Predicate<"Subtarget->hasSHA()">; 971def HasSHA512 : Predicate<"Subtarget->hasSHA512()">; 972def HasSGX : Predicate<"Subtarget->hasSGX()">; 973def HasSM3 : Predicate<"Subtarget->hasSM3()">; 974def HasRDSEED : Predicate<"Subtarget->hasRDSEED()">; 975def HasSSEPrefetch : Predicate<"Subtarget->hasSSEPrefetch()">; 976def NoSSEPrefetch : Predicate<"!Subtarget->hasSSEPrefetch()">; 977def HasPRFCHW : Predicate<"Subtarget->hasPRFCHW()">; 978def HasPREFETCHI : Predicate<"Subtarget->hasPREFETCHI()">; 979def HasPrefetchW : Predicate<"Subtarget->hasPrefetchW()">; 980def HasPREFETCHWT1 : Predicate<"Subtarget->hasPREFETCHWT1()">; 981def HasLAHFSAHF : Predicate<"Subtarget->hasLAHFSAHF()">; 982def HasLAHFSAHF64 : Predicate<"Subtarget->hasLAHFSAHF64()">; 983def HasMWAITX : Predicate<"Subtarget->hasMWAITX()">; 984def HasCLZERO : Predicate<"Subtarget->hasCLZERO()">; 985def HasCLDEMOTE : Predicate<"Subtarget->hasCLDEMOTE()">; 986def HasMOVDIRI : Predicate<"Subtarget->hasMOVDIRI()">; 987def HasMOVDIR64B : Predicate<"Subtarget->hasMOVDIR64B()">; 988def HasPTWRITE : Predicate<"Subtarget->hasPTWRITE()">; 989def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">; 990def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">; 991def HasSHSTK : Predicate<"Subtarget->hasSHSTK()">; 992def HasSM4 : Predicate<"Subtarget->hasSM4()">; 993def HasCLFLUSH : Predicate<"Subtarget->hasCLFLUSH()">; 994def HasCLFLUSHOPT : Predicate<"Subtarget->hasCLFLUSHOPT()">; 995def HasCLWB : Predicate<"Subtarget->hasCLWB()">; 996def HasWBNOINVD : Predicate<"Subtarget->hasWBNOINVD()">; 997def HasRDPID : Predicate<"Subtarget->hasRDPID()">; 998def HasRDPRU : Predicate<"Subtarget->hasRDPRU()">; 999def HasWAITPKG : Predicate<"Subtarget->hasWAITPKG()">; 1000def HasINVPCID : Predicate<"Subtarget->hasINVPCID()">; 1001def HasCX8 : Predicate<"Subtarget->hasCX8()">; 1002def HasCX16 : Predicate<"Subtarget->hasCX16()">; 1003def HasPCONFIG : Predicate<"Subtarget->hasPCONFIG()">; 1004def HasENQCMD : Predicate<"Subtarget->hasENQCMD()">; 1005def HasAMXFP16 : Predicate<"Subtarget->hasAMXFP16()">; 1006def HasCMPCCXADD : Predicate<"Subtarget->hasCMPCCXADD()">; 1007def HasAVXNECONVERT : Predicate<"Subtarget->hasAVXNECONVERT()">; 1008def HasKL : Predicate<"Subtarget->hasKL()">; 1009def HasRAOINT : Predicate<"Subtarget->hasRAOINT()">; 1010def HasWIDEKL : Predicate<"Subtarget->hasWIDEKL()">; 1011def HasHRESET : Predicate<"Subtarget->hasHRESET()">; 1012def HasSERIALIZE : Predicate<"Subtarget->hasSERIALIZE()">; 1013def HasTSXLDTRK : Predicate<"Subtarget->hasTSXLDTRK()">; 1014def HasAMXTILE : Predicate<"Subtarget->hasAMXTILE()">; 1015def HasAMXBF16 : Predicate<"Subtarget->hasAMXBF16()">; 1016def HasAMXINT8 : Predicate<"Subtarget->hasAMXINT8()">; 1017def HasAMXCOMPLEX : Predicate<"Subtarget->hasAMXCOMPLEX()">; 1018def HasUINTR : Predicate<"Subtarget->hasUINTR()">; 1019def HasCRC32 : Predicate<"Subtarget->hasCRC32()">; 1020 1021def HasX86_64 : Predicate<"Subtarget->hasX86_64()">; 1022def Not64BitMode : Predicate<"!Subtarget->is64Bit()">, 1023 AssemblerPredicate<(all_of (not Is64Bit)), "Not 64-bit mode">; 1024def In64BitMode : Predicate<"Subtarget->is64Bit()">, 1025 AssemblerPredicate<(all_of Is64Bit), "64-bit mode">; 1026def IsLP64 : Predicate<"Subtarget->isTarget64BitLP64()">; 1027def NotLP64 : Predicate<"!Subtarget->isTarget64BitLP64()">; 1028def In16BitMode : Predicate<"Subtarget->is16Bit()">, 1029 AssemblerPredicate<(all_of Is16Bit), "16-bit mode">; 1030def Not16BitMode : Predicate<"!Subtarget->is16Bit()">, 1031 AssemblerPredicate<(all_of (not Is16Bit)), "Not 16-bit mode">; 1032def In32BitMode : Predicate<"Subtarget->is32Bit()">, 1033 AssemblerPredicate<(all_of Is32Bit), "32-bit mode">; 1034def IsWin64 : Predicate<"Subtarget->isTargetWin64()">; 1035def NotWin64 : Predicate<"!Subtarget->isTargetWin64()">; 1036def NotWin64WithoutFP : Predicate<"!Subtarget->isTargetWin64() ||" 1037 "Subtarget->getFrameLowering()->hasFP(*MF)"> { 1038 let RecomputePerFunction = 1; 1039} 1040def IsPS : Predicate<"Subtarget->isTargetPS()">; 1041def NotPS : Predicate<"!Subtarget->isTargetPS()">; 1042def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">; 1043def NotNaCl : Predicate<"!Subtarget->isTargetNaCl()">; 1044def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">; 1045def KernelCode : Predicate<"TM.getCodeModel() == CodeModel::Kernel">; 1046def NearData : Predicate<"TM.getCodeModel() == CodeModel::Small ||" 1047 "TM.getCodeModel() == CodeModel::Kernel">; 1048def IsNotPIC : Predicate<"!TM.isPositionIndependent()">; 1049 1050// We could compute these on a per-module basis but doing so requires accessing 1051// the Function object through the <Target>Subtarget and objections were raised 1052// to that (see post-commit review comments for r301750). 1053let RecomputePerFunction = 1 in { 1054 def OptForSize : Predicate<"shouldOptForSize(MF)">; 1055 def OptForMinSize : Predicate<"MF->getFunction().hasMinSize()">; 1056 def OptForSpeed : Predicate<"!shouldOptForSize(MF)">; 1057 def UseIncDec : Predicate<"!Subtarget->slowIncDec() || " 1058 "shouldOptForSize(MF)">; 1059 def NoSSE41_Or_OptForSize : Predicate<"shouldOptForSize(MF) || " 1060 "!Subtarget->hasSSE41()">; 1061} 1062 1063def CallImmAddr : Predicate<"Subtarget->isLegalToCallImmediateAddr()">; 1064def FavorMemIndirectCall : Predicate<"!Subtarget->slowTwoMemOps()">; 1065def HasFastMem32 : Predicate<"!Subtarget->isUnalignedMem32Slow()">; 1066def HasFastLZCNT : Predicate<"Subtarget->hasFastLZCNT()">; 1067def HasFastSHLDRotate : Predicate<"Subtarget->hasFastSHLDRotate()">; 1068def HasERMSB : Predicate<"Subtarget->hasERMSB()">; 1069def HasFSRM : Predicate<"Subtarget->hasFSRM()">; 1070def HasMFence : Predicate<"Subtarget->hasMFence()">; 1071def UseIndirectThunkCalls : Predicate<"Subtarget->useIndirectThunkCalls()">; 1072def NotUseIndirectThunkCalls : Predicate<"!Subtarget->useIndirectThunkCalls()">; 1073 1074//===----------------------------------------------------------------------===// 1075// X86 Instruction Format Definitions. 1076// 1077 1078include "X86InstrFormats.td" 1079 1080//===----------------------------------------------------------------------===// 1081// Pattern fragments. 1082// 1083 1084// X86 specific condition code. These correspond to CondCode in 1085// X86InstrInfo.h. They must be kept in synch. 1086def X86_COND_O : PatLeaf<(i8 0)>; 1087def X86_COND_NO : PatLeaf<(i8 1)>; 1088def X86_COND_B : PatLeaf<(i8 2)>; // alt. COND_C 1089def X86_COND_AE : PatLeaf<(i8 3)>; // alt. COND_NC 1090def X86_COND_E : PatLeaf<(i8 4)>; // alt. COND_Z 1091def X86_COND_NE : PatLeaf<(i8 5)>; // alt. COND_NZ 1092def X86_COND_BE : PatLeaf<(i8 6)>; // alt. COND_NA 1093def X86_COND_A : PatLeaf<(i8 7)>; // alt. COND_NBE 1094def X86_COND_S : PatLeaf<(i8 8)>; 1095def X86_COND_NS : PatLeaf<(i8 9)>; 1096def X86_COND_P : PatLeaf<(i8 10)>; // alt. COND_PE 1097def X86_COND_NP : PatLeaf<(i8 11)>; // alt. COND_PO 1098def X86_COND_L : PatLeaf<(i8 12)>; // alt. COND_NGE 1099def X86_COND_GE : PatLeaf<(i8 13)>; // alt. COND_NL 1100def X86_COND_LE : PatLeaf<(i8 14)>; // alt. COND_NG 1101def X86_COND_G : PatLeaf<(i8 15)>; // alt. COND_NLE 1102 1103def i16immSExt8 : ImmLeaf<i16, [{ return isInt<8>(Imm); }]>; 1104def i32immSExt8 : ImmLeaf<i32, [{ return isInt<8>(Imm); }]>; 1105def i64immSExt8 : ImmLeaf<i64, [{ return isInt<8>(Imm); }]>; 1106def i64immSExt32 : ImmLeaf<i64, [{ return isInt<32>(Imm); }]>; 1107def i64timmSExt32 : TImmLeaf<i64, [{ return isInt<32>(Imm); }]>; 1108 1109def i16relocImmSExt8 : PatLeaf<(i16 relocImm), [{ 1110 return isSExtAbsoluteSymbolRef(8, N); 1111}]>; 1112def i32relocImmSExt8 : PatLeaf<(i32 relocImm), [{ 1113 return isSExtAbsoluteSymbolRef(8, N); 1114}]>; 1115def i64relocImmSExt8 : PatLeaf<(i64 relocImm), [{ 1116 return isSExtAbsoluteSymbolRef(8, N); 1117}]>; 1118def i64relocImmSExt32 : PatLeaf<(i64 relocImm), [{ 1119 return isSExtAbsoluteSymbolRef(32, N); 1120}]>; 1121 1122// If we have multiple users of an immediate, it's much smaller to reuse 1123// the register, rather than encode the immediate in every instruction. 1124// This has the risk of increasing register pressure from stretched live 1125// ranges, however, the immediates should be trivial to rematerialize by 1126// the RA in the event of high register pressure. 1127// TODO : This is currently enabled for stores and binary ops. There are more 1128// cases for which this can be enabled, though this catches the bulk of the 1129// issues. 1130// TODO2 : This should really also be enabled under O2, but there's currently 1131// an issue with RA where we don't pull the constants into their users 1132// when we rematerialize them. I'll follow-up on enabling O2 after we fix that 1133// issue. 1134// TODO3 : This is currently limited to single basic blocks (DAG creation 1135// pulls block immediates to the top and merges them if necessary). 1136// Eventually, it would be nice to allow ConstantHoisting to merge constants 1137// globally for potentially added savings. 1138// 1139def imm_su : PatLeaf<(imm), [{ 1140 return !shouldAvoidImmediateInstFormsForSize(N); 1141}]>; 1142def i64immSExt32_su : PatLeaf<(i64immSExt32), [{ 1143 return !shouldAvoidImmediateInstFormsForSize(N); 1144}]>; 1145 1146def relocImm8_su : PatLeaf<(i8 relocImm), [{ 1147 return !shouldAvoidImmediateInstFormsForSize(N); 1148}]>; 1149def relocImm16_su : PatLeaf<(i16 relocImm), [{ 1150 return !shouldAvoidImmediateInstFormsForSize(N); 1151}]>; 1152def relocImm32_su : PatLeaf<(i32 relocImm), [{ 1153 return !shouldAvoidImmediateInstFormsForSize(N); 1154}]>; 1155 1156def i16relocImmSExt8_su : PatLeaf<(i16relocImmSExt8), [{ 1157 return !shouldAvoidImmediateInstFormsForSize(N); 1158}]>; 1159def i32relocImmSExt8_su : PatLeaf<(i32relocImmSExt8), [{ 1160 return !shouldAvoidImmediateInstFormsForSize(N); 1161}]>; 1162def i64relocImmSExt8_su : PatLeaf<(i64relocImmSExt8), [{ 1163 return !shouldAvoidImmediateInstFormsForSize(N); 1164}]>; 1165def i64relocImmSExt32_su : PatLeaf<(i64relocImmSExt32), [{ 1166 return !shouldAvoidImmediateInstFormsForSize(N); 1167}]>; 1168 1169def i16immSExt8_su : PatLeaf<(i16immSExt8), [{ 1170 return !shouldAvoidImmediateInstFormsForSize(N); 1171}]>; 1172def i32immSExt8_su : PatLeaf<(i32immSExt8), [{ 1173 return !shouldAvoidImmediateInstFormsForSize(N); 1174}]>; 1175def i64immSExt8_su : PatLeaf<(i64immSExt8), [{ 1176 return !shouldAvoidImmediateInstFormsForSize(N); 1177}]>; 1178 1179// i64immZExt32 predicate - True if the 64-bit immediate fits in a 32-bit 1180// unsigned field. 1181def i64immZExt32 : ImmLeaf<i64, [{ return isUInt<32>(Imm); }]>; 1182 1183def i64immZExt32SExt8 : ImmLeaf<i64, [{ 1184 return isUInt<32>(Imm) && isInt<8>(static_cast<int32_t>(Imm)); 1185}]>; 1186 1187// Helper fragments for loads. 1188 1189// It's safe to fold a zextload/extload from i1 as a regular i8 load. The 1190// upper bits are guaranteed to be zero and we were going to emit a MOV8rm 1191// which might get folded during peephole anyway. 1192def loadi8 : PatFrag<(ops node:$ptr), (i8 (unindexedload node:$ptr)), [{ 1193 LoadSDNode *LD = cast<LoadSDNode>(N); 1194 ISD::LoadExtType ExtType = LD->getExtensionType(); 1195 return ExtType == ISD::NON_EXTLOAD || ExtType == ISD::EXTLOAD || 1196 ExtType == ISD::ZEXTLOAD; 1197}]>; 1198 1199// It's always safe to treat a anyext i16 load as a i32 load if the i16 is 1200// known to be 32-bit aligned or better. Ditto for i8 to i16. 1201def loadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{ 1202 LoadSDNode *LD = cast<LoadSDNode>(N); 1203 ISD::LoadExtType ExtType = LD->getExtensionType(); 1204 if (ExtType == ISD::NON_EXTLOAD) 1205 return true; 1206 if (ExtType == ISD::EXTLOAD && EnablePromoteAnyextLoad) 1207 return LD->getAlign() >= 2 && LD->isSimple(); 1208 return false; 1209}]>; 1210 1211def loadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{ 1212 LoadSDNode *LD = cast<LoadSDNode>(N); 1213 ISD::LoadExtType ExtType = LD->getExtensionType(); 1214 if (ExtType == ISD::NON_EXTLOAD) 1215 return true; 1216 if (ExtType == ISD::EXTLOAD && EnablePromoteAnyextLoad) 1217 return LD->getAlign() >= 4 && LD->isSimple(); 1218 return false; 1219}]>; 1220 1221def loadi64 : PatFrag<(ops node:$ptr), (i64 (load node:$ptr))>; 1222def loadf16 : PatFrag<(ops node:$ptr), (f16 (load node:$ptr))>; 1223def loadf32 : PatFrag<(ops node:$ptr), (f32 (load node:$ptr))>; 1224def loadf64 : PatFrag<(ops node:$ptr), (f64 (load node:$ptr))>; 1225def loadf80 : PatFrag<(ops node:$ptr), (f80 (load node:$ptr))>; 1226def loadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr))>; 1227def alignedloadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{ 1228 LoadSDNode *Ld = cast<LoadSDNode>(N); 1229 return Ld->getAlign() >= Ld->getMemoryVT().getStoreSize(); 1230}]>; 1231def memopf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{ 1232 LoadSDNode *Ld = cast<LoadSDNode>(N); 1233 return Subtarget->hasSSEUnalignedMem() || 1234 Ld->getAlign() >= Ld->getMemoryVT().getStoreSize(); 1235}]>; 1236 1237def sextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (sextloadi8 node:$ptr))>; 1238def sextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (sextloadi8 node:$ptr))>; 1239def sextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (sextloadi16 node:$ptr))>; 1240def sextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>; 1241def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>; 1242def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>; 1243 1244def zextloadi8i1 : PatFrag<(ops node:$ptr), (i8 (zextloadi1 node:$ptr))>; 1245def zextloadi16i1 : PatFrag<(ops node:$ptr), (i16 (zextloadi1 node:$ptr))>; 1246def zextloadi32i1 : PatFrag<(ops node:$ptr), (i32 (zextloadi1 node:$ptr))>; 1247def zextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (zextloadi8 node:$ptr))>; 1248def zextloadi32i8 : PatFrag<(ops node:$ptr), (i32 (zextloadi8 node:$ptr))>; 1249def zextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (zextloadi16 node:$ptr))>; 1250def zextloadi64i1 : PatFrag<(ops node:$ptr), (i64 (zextloadi1 node:$ptr))>; 1251def zextloadi64i8 : PatFrag<(ops node:$ptr), (i64 (zextloadi8 node:$ptr))>; 1252def zextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (zextloadi16 node:$ptr))>; 1253def zextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (zextloadi32 node:$ptr))>; 1254 1255def extloadi8i1 : PatFrag<(ops node:$ptr), (i8 (extloadi1 node:$ptr))>; 1256def extloadi16i1 : PatFrag<(ops node:$ptr), (i16 (extloadi1 node:$ptr))>; 1257def extloadi32i1 : PatFrag<(ops node:$ptr), (i32 (extloadi1 node:$ptr))>; 1258def extloadi16i8 : PatFrag<(ops node:$ptr), (i16 (extloadi8 node:$ptr))>; 1259def extloadi32i8 : PatFrag<(ops node:$ptr), (i32 (extloadi8 node:$ptr))>; 1260def extloadi32i16 : PatFrag<(ops node:$ptr), (i32 (extloadi16 node:$ptr))>; 1261def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>; 1262def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>; 1263def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>; 1264 1265// We can treat an i8/i16 extending load to i64 as a 32 bit load if its known 1266// to be 4 byte aligned or better. 1267def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (unindexedload node:$ptr)), [{ 1268 LoadSDNode *LD = cast<LoadSDNode>(N); 1269 ISD::LoadExtType ExtType = LD->getExtensionType(); 1270 if (ExtType != ISD::EXTLOAD) 1271 return false; 1272 if (LD->getMemoryVT() == MVT::i32) 1273 return true; 1274 1275 return LD->getAlign() >= 4 && LD->isSimple(); 1276}]>; 1277 1278// binary op with only one user 1279class binop_oneuse<SDPatternOperator operator> 1280 : PatFrag<(ops node:$A, node:$B), 1281 (operator node:$A, node:$B), [{ 1282 return N->hasOneUse(); 1283}]>; 1284 1285def add_su : binop_oneuse<add>; 1286def and_su : binop_oneuse<and>; 1287def srl_su : binop_oneuse<srl>; 1288 1289// unary op with only one user 1290class unop_oneuse<SDPatternOperator operator> 1291 : PatFrag<(ops node:$A), 1292 (operator node:$A), [{ 1293 return N->hasOneUse(); 1294}]>; 1295 1296 1297def ineg_su : unop_oneuse<ineg>; 1298def trunc_su : unop_oneuse<trunc>; 1299 1300//===----------------------------------------------------------------------===// 1301// X86 Type infomation definitions 1302//===----------------------------------------------------------------------===// 1303 1304/// X86TypeInfo - This is a bunch of information that describes relevant X86 1305/// information about value types. For example, it can tell you what the 1306/// register class and preferred load to use. 1307class X86TypeInfo<ValueType vt, string instrsuffix, RegisterClass regclass, 1308 PatFrag loadnode, X86MemOperand memoperand, ImmType immkind, 1309 Operand immoperand, SDPatternOperator immoperator, 1310 SDPatternOperator immnosuoperator, Operand imm8operand, 1311 SDPatternOperator imm8operator, SDPatternOperator imm8nosuoperator, 1312 bit hasOddOpcode, OperandSize opSize, 1313 bit hasREX_W> { 1314 /// VT - This is the value type itself. 1315 ValueType VT = vt; 1316 1317 /// InstrSuffix - This is the suffix used on instructions with this type. For 1318 /// example, i8 -> "b", i16 -> "w", i32 -> "l", i64 -> "q". 1319 string InstrSuffix = instrsuffix; 1320 1321 /// RegClass - This is the register class associated with this type. For 1322 /// example, i8 -> GR8, i16 -> GR16, i32 -> GR32, i64 -> GR64. 1323 RegisterClass RegClass = regclass; 1324 1325 /// LoadNode - This is the load node associated with this type. For 1326 /// example, i8 -> loadi8, i16 -> loadi16, i32 -> loadi32, i64 -> loadi64. 1327 PatFrag LoadNode = loadnode; 1328 1329 /// MemOperand - This is the memory operand associated with this type. For 1330 /// example, i8 -> i8mem, i16 -> i16mem, i32 -> i32mem, i64 -> i64mem. 1331 X86MemOperand MemOperand = memoperand; 1332 1333 /// ImmEncoding - This is the encoding of an immediate of this type. For 1334 /// example, i8 -> Imm8, i16 -> Imm16, i32 -> Imm32. Note that i64 -> Imm32 1335 /// since the immediate fields of i64 instructions is a 32-bit sign extended 1336 /// value. 1337 ImmType ImmEncoding = immkind; 1338 1339 /// ImmOperand - This is the operand kind of an immediate of this type. For 1340 /// example, i8 -> i8imm, i16 -> i16imm, i32 -> i32imm. Note that i64 -> 1341 /// i64i32imm since the immediate fields of i64 instructions is a 32-bit sign 1342 /// extended value. 1343 Operand ImmOperand = immoperand; 1344 1345 /// ImmOperator - This is the operator that should be used to match an 1346 /// immediate of this kind in a pattern (e.g. imm, or i64immSExt32). 1347 SDPatternOperator ImmOperator = immoperator; 1348 1349 SDPatternOperator ImmNoSuOperator = immnosuoperator; 1350 1351 /// Imm8Operand - This is the operand kind to use for an imm8 of this type. 1352 /// For example, i8 -> <invalid>, i16 -> i16i8imm, i32 -> i32i8imm. This is 1353 /// only used for instructions that have a sign-extended imm8 field form. 1354 Operand Imm8Operand = imm8operand; 1355 1356 /// Imm8Operator - This is the operator that should be used to match an 8-bit 1357 /// sign extended immediate of this kind in a pattern (e.g. imm16immSExt8). 1358 SDPatternOperator Imm8Operator = imm8operator; 1359 1360 SDPatternOperator Imm8NoSuOperator = imm8nosuoperator; 1361 1362 /// HasOddOpcode - This bit is true if the instruction should have an odd (as 1363 /// opposed to even) opcode. Operations on i8 are usually even, operations on 1364 /// other datatypes are odd. 1365 bit HasOddOpcode = hasOddOpcode; 1366 1367 /// OpSize - Selects whether the instruction needs a 0x66 prefix based on 1368 /// 16-bit vs 32-bit mode. i8/i64 set this to OpSizeFixed. i16 sets this 1369 /// to Opsize16. i32 sets this to OpSize32. 1370 OperandSize OpSize = opSize; 1371 1372 /// HasREX_W - This bit is set to true if the instruction should have 1373 /// the 0x40 REX prefix. This is set for i64 types. 1374 bit HasREX_W = hasREX_W; 1375} 1376 1377def invalid_node : SDNode<"<<invalid_node>>", SDTIntLeaf,[],"<<invalid_node>>">; 1378 1379def Xi8 : X86TypeInfo<i8, "b", GR8, loadi8, i8mem, Imm8, i8imm, 1380 imm_su, imm, i8imm, invalid_node, invalid_node, 1381 0, OpSizeFixed, 0>; 1382def Xi16 : X86TypeInfo<i16, "w", GR16, loadi16, i16mem, Imm16, i16imm, 1383 imm_su, imm, i16i8imm, i16immSExt8_su, i16immSExt8, 1384 1, OpSize16, 0>; 1385def Xi32 : X86TypeInfo<i32, "l", GR32, loadi32, i32mem, Imm32, i32imm, 1386 imm_su, imm, i32i8imm, i32immSExt8_su, i32immSExt8, 1387 1, OpSize32, 0>; 1388def Xi64 : X86TypeInfo<i64, "q", GR64, loadi64, i64mem, Imm32S, i64i32imm, 1389 i64immSExt32_su, i64immSExt32, i64i8imm, i64immSExt8_su, 1390 i64immSExt8, 1, OpSizeFixed, 1>; 1391 1392/// ITy - This instruction base class takes the type info for the instruction. 1393/// Using this, it: 1394/// 1. Concatenates together the instruction mnemonic with the appropriate 1395/// suffix letter, a tab, and the arguments. 1396/// 2. Infers whether the instruction should have a 0x66 prefix byte. 1397/// 3. Infers whether the instruction should have a 0x40 REX_W prefix. 1398/// 4. Infers whether the low bit of the opcode should be 0 (for i8 operations) 1399/// or 1 (for i16,i32,i64 operations). 1400class ITy<bits<8> opcode, Format f, X86TypeInfo typeinfo, dag outs, dag ins, 1401 string mnemonic, string args, list<dag> pattern> 1402 : I<{opcode{7}, opcode{6}, opcode{5}, opcode{4}, 1403 opcode{3}, opcode{2}, opcode{1}, typeinfo.HasOddOpcode }, 1404 f, outs, ins, 1405 !strconcat(mnemonic, "{", typeinfo.InstrSuffix, "}\t", args), pattern> { 1406 1407 // Infer instruction prefixes from type info. 1408 let OpSize = typeinfo.OpSize; 1409 let hasREX_W = typeinfo.HasREX_W; 1410} 1411 1412//===----------------------------------------------------------------------===// 1413// Subsystems. 1414//===----------------------------------------------------------------------===// 1415 1416include "X86InstrMisc.td" 1417include "X86InstrTBM.td" 1418include "X86InstrArithmetic.td" 1419include "X86InstrCMovSetCC.td" 1420include "X86InstrExtension.td" 1421include "X86InstrControl.td" 1422include "X86InstrShiftRotate.td" 1423 1424// X87 Floating Point Stack. 1425include "X86InstrFPStack.td" 1426 1427// SIMD support (SSE, MMX and AVX) 1428include "X86InstrFragmentsSIMD.td" 1429 1430// FMA - Fused Multiply-Add support (requires FMA) 1431include "X86InstrFMA.td" 1432 1433// XOP 1434include "X86InstrXOP.td" 1435 1436// SSE, MMX and 3DNow! vector support. 1437include "X86InstrSSE.td" 1438include "X86InstrAVX512.td" 1439include "X86InstrMMX.td" 1440include "X86Instr3DNow.td" 1441 1442include "X86InstrVMX.td" 1443include "X86InstrSVM.td" 1444include "X86InstrSNP.td" 1445 1446include "X86InstrTSX.td" 1447include "X86InstrSGX.td" 1448 1449include "X86InstrTDX.td" 1450 1451// Key Locker instructions 1452include "X86InstrKL.td" 1453 1454// AMX instructions 1455include "X86InstrAMX.td" 1456 1457// RAO-INT instructions 1458include "X86InstrRAOINT.td" 1459 1460// System instructions. 1461include "X86InstrSystem.td" 1462 1463// Compiler Pseudo Instructions and Pat Patterns 1464include "X86InstrCompiler.td" 1465include "X86InstrVecCompiler.td" 1466 1467// Assembler mnemonic/instruction aliases 1468include "X86InstrAsmAlias.td" 1469 1470