1//===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file describes the various pseudo instructions used by the compiler, 10// as well as Pat patterns used during instruction selection. 11// 12//===----------------------------------------------------------------------===// 13 14//===----------------------------------------------------------------------===// 15// Pattern Matching Support 16 17def GetLo32XForm : SDNodeXForm<imm, [{ 18 // Transformation function: get the low 32 bits. 19 return getI32Imm((uint32_t)N->getZExtValue(), SDLoc(N)); 20}]>; 21 22 23//===----------------------------------------------------------------------===// 24// Random Pseudo Instructions. 25 26// PIC base construction. This expands to code that looks like this: 27// call $next_inst 28// popl %destreg" 29let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP, SSP], 30 SchedRW = [WriteJump] in 31 def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label), 32 "", []>; 33 34// ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into 35// a stack adjustment and the codegen must know that they may modify the stack 36// pointer before prolog-epilog rewriting occurs. 37// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become 38// sub / add which can clobber EFLAGS. 39let Defs = [ESP, EFLAGS, SSP], Uses = [ESP, SSP], SchedRW = [WriteALU] in { 40def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs), 41 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3), 42 "#ADJCALLSTACKDOWN", []>, Requires<[NotLP64]>; 43def ADJCALLSTACKUP32 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), 44 "#ADJCALLSTACKUP", 45 [(X86callseq_end timm:$amt1, timm:$amt2)]>, 46 Requires<[NotLP64]>; 47} 48def : Pat<(X86callseq_start timm:$amt1, timm:$amt2), 49 (ADJCALLSTACKDOWN32 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[NotLP64]>; 50 51 52// ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into 53// a stack adjustment and the codegen must know that they may modify the stack 54// pointer before prolog-epilog rewriting occurs. 55// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become 56// sub / add which can clobber EFLAGS. 57let Defs = [RSP, EFLAGS, SSP], Uses = [RSP, SSP], SchedRW = [WriteALU] in { 58def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs), 59 (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3), 60 "#ADJCALLSTACKDOWN", []>, Requires<[IsLP64]>; 61def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), 62 "#ADJCALLSTACKUP", 63 [(X86callseq_end timm:$amt1, timm:$amt2)]>, 64 Requires<[IsLP64]>; 65} 66def : Pat<(X86callseq_start timm:$amt1, timm:$amt2), 67 (ADJCALLSTACKDOWN64 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[IsLP64]>; 68 69let SchedRW = [WriteSystem] in { 70 71// x86-64 va_start lowering magic. 72let usesCustomInserter = 1, Defs = [EFLAGS] in { 73def VASTART_SAVE_XMM_REGS : I<0, Pseudo, 74 (outs), 75 (ins GR8:$al, 76 i64imm:$regsavefi, i64imm:$offset, 77 variable_ops), 78 "#VASTART_SAVE_XMM_REGS $al, $regsavefi, $offset", 79 [(X86vastart_save_xmm_regs GR8:$al, 80 imm:$regsavefi, 81 imm:$offset), 82 (implicit EFLAGS)]>; 83 84// The VAARG_64 pseudo-instruction takes the address of the va_list, 85// and places the address of the next argument into a register. 86let Defs = [EFLAGS] in 87def VAARG_64 : I<0, Pseudo, 88 (outs GR64:$dst), 89 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align), 90 "#VAARG_64 $dst, $ap, $size, $mode, $align", 91 [(set GR64:$dst, 92 (X86vaarg64 addr:$ap, imm:$size, imm:$mode, imm:$align)), 93 (implicit EFLAGS)]>; 94 95 96// When using segmented stacks these are lowered into instructions which first 97// check if the current stacklet has enough free memory. If it does, memory is 98// allocated by bumping the stack pointer. Otherwise memory is allocated from 99// the heap. 100 101let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in 102def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size), 103 "# variable sized alloca for segmented stacks", 104 [(set GR32:$dst, 105 (X86SegAlloca GR32:$size))]>, 106 Requires<[NotLP64]>; 107 108let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in 109def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size), 110 "# variable sized alloca for segmented stacks", 111 [(set GR64:$dst, 112 (X86SegAlloca GR64:$size))]>, 113 Requires<[In64BitMode]>; 114 115// To protect against stack clash, dynamic allocation should perform a memory 116// probe at each page. 117 118let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in 119def PROBED_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size), 120 "# variable sized alloca with probing", 121 [(set GR32:$dst, 122 (X86ProbedAlloca GR32:$size))]>, 123 Requires<[NotLP64]>; 124 125let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in 126def PROBED_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size), 127 "# variable sized alloca with probing", 128 [(set GR64:$dst, 129 (X86ProbedAlloca GR64:$size))]>, 130 Requires<[In64BitMode]>; 131} 132 133let hasNoSchedulingInfo = 1 in 134def STACKALLOC_W_PROBING : I<0, Pseudo, (outs), (ins i64imm:$stacksize), 135 "# fixed size alloca with probing", 136 []>; 137 138// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows 139// targets. These calls are needed to probe the stack when allocating more than 140// 4k bytes in one go. Touching the stack at 4K increments is necessary to 141// ensure that the guard pages used by the OS virtual memory manager are 142// allocated in correct sequence. 143// The main point of having separate instruction are extra unmodelled effects 144// (compared to ordinary calls) like stack pointer change. 145 146let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in 147def WIN_ALLOCA_32 : I<0, Pseudo, (outs), (ins GR32:$size), 148 "# dynamic stack allocation", 149 [(X86WinAlloca GR32:$size)]>, 150 Requires<[NotLP64]>; 151 152let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in 153def WIN_ALLOCA_64 : I<0, Pseudo, (outs), (ins GR64:$size), 154 "# dynamic stack allocation", 155 [(X86WinAlloca GR64:$size)]>, 156 Requires<[In64BitMode]>; 157} // SchedRW 158 159// These instructions XOR the frame pointer into a GPR. They are used in some 160// stack protection schemes. These are post-RA pseudos because we only know the 161// frame register after register allocation. 162let Constraints = "$src = $dst", isMoveImm = 1, isPseudo = 1, Defs = [EFLAGS] in { 163 def XOR32_FP : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src), 164 "xorl\t$$FP, $src", []>, 165 Requires<[NotLP64]>, Sched<[WriteALU]>; 166 def XOR64_FP : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src), 167 "xorq\t$$FP $src", []>, 168 Requires<[In64BitMode]>, Sched<[WriteALU]>; 169} 170 171//===----------------------------------------------------------------------===// 172// EH Pseudo Instructions 173// 174let SchedRW = [WriteSystem] in { 175let isTerminator = 1, isReturn = 1, isBarrier = 1, 176 hasCtrlDep = 1, isCodeGenOnly = 1 in { 177def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr), 178 "ret\t#eh_return, addr: $addr", 179 [(X86ehret GR32:$addr)]>, Sched<[WriteJumpLd]>; 180 181} 182 183let isTerminator = 1, isReturn = 1, isBarrier = 1, 184 hasCtrlDep = 1, isCodeGenOnly = 1 in { 185def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr), 186 "ret\t#eh_return, addr: $addr", 187 [(X86ehret GR64:$addr)]>, Sched<[WriteJumpLd]>; 188 189} 190 191let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1, 192 isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1 in { 193 def CLEANUPRET : I<0, Pseudo, (outs), (ins), "# CLEANUPRET", [(cleanupret)]>; 194 195 // CATCHRET needs a custom inserter for SEH. 196 let usesCustomInserter = 1 in 197 def CATCHRET : I<0, Pseudo, (outs), (ins brtarget32:$dst, brtarget32:$from), 198 "# CATCHRET", 199 [(catchret bb:$dst, bb:$from)]>; 200} 201 202let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1, 203 usesCustomInserter = 1 in { 204 def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf), 205 "#EH_SJLJ_SETJMP32", 206 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>, 207 Requires<[Not64BitMode]>; 208 def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf), 209 "#EH_SJLJ_SETJMP64", 210 [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>, 211 Requires<[In64BitMode]>; 212 let isTerminator = 1 in { 213 def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf), 214 "#EH_SJLJ_LONGJMP32", 215 [(X86eh_sjlj_longjmp addr:$buf)]>, 216 Requires<[Not64BitMode]>; 217 def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf), 218 "#EH_SJLJ_LONGJMP64", 219 [(X86eh_sjlj_longjmp addr:$buf)]>, 220 Requires<[In64BitMode]>; 221 } 222} 223 224let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in { 225 def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst), 226 "#EH_SjLj_Setup\t$dst", []>; 227} 228} // SchedRW 229 230//===----------------------------------------------------------------------===// 231// Pseudo instructions used by unwind info. 232// 233let isPseudo = 1, SchedRW = [WriteSystem] in { 234 def SEH_PushReg : I<0, Pseudo, (outs), (ins i32imm:$reg), 235 "#SEH_PushReg $reg", []>; 236 def SEH_SaveReg : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst), 237 "#SEH_SaveReg $reg, $dst", []>; 238 def SEH_SaveXMM : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst), 239 "#SEH_SaveXMM $reg, $dst", []>; 240 def SEH_StackAlloc : I<0, Pseudo, (outs), (ins i32imm:$size), 241 "#SEH_StackAlloc $size", []>; 242 def SEH_StackAlign : I<0, Pseudo, (outs), (ins i32imm:$align), 243 "#SEH_StackAlign $align", []>; 244 def SEH_SetFrame : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$offset), 245 "#SEH_SetFrame $reg, $offset", []>; 246 def SEH_PushFrame : I<0, Pseudo, (outs), (ins i1imm:$mode), 247 "#SEH_PushFrame $mode", []>; 248 def SEH_EndPrologue : I<0, Pseudo, (outs), (ins), 249 "#SEH_EndPrologue", []>; 250 def SEH_Epilogue : I<0, Pseudo, (outs), (ins), 251 "#SEH_Epilogue", []>; 252} 253 254//===----------------------------------------------------------------------===// 255// Pseudo instructions used by segmented stacks. 256// 257 258// This is lowered into a RET instruction by MCInstLower. We need 259// this so that we don't have to have a MachineBasicBlock which ends 260// with a RET and also has successors. 261let isPseudo = 1, SchedRW = [WriteJumpLd] in { 262def MORESTACK_RET: I<0, Pseudo, (outs), (ins), "", []>; 263 264// This instruction is lowered to a RET followed by a MOV. The two 265// instructions are not generated on a higher level since then the 266// verifier sees a MachineBasicBlock ending with a non-terminator. 267def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins), "", []>; 268} 269 270//===----------------------------------------------------------------------===// 271// Alias Instructions 272//===----------------------------------------------------------------------===// 273 274// Alias instruction mapping movr0 to xor. 275// FIXME: remove when we can teach regalloc that xor reg, reg is ok. 276let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1, 277 isPseudo = 1, isMoveImm = 1, AddedComplexity = 10 in 278def MOV32r0 : I<0, Pseudo, (outs GR32:$dst), (ins), "", 279 [(set GR32:$dst, 0)]>, Sched<[WriteZero]>; 280 281// Other widths can also make use of the 32-bit xor, which may have a smaller 282// encoding and avoid partial register updates. 283let AddedComplexity = 10 in { 284def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>; 285def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>; 286def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)>; 287} 288 289let Predicates = [OptForSize, Not64BitMode], 290 AddedComplexity = 10 in { 291 let SchedRW = [WriteALU] in { 292 // Pseudo instructions for materializing 1 and -1 using XOR+INC/DEC, 293 // which only require 3 bytes compared to MOV32ri which requires 5. 294 let Defs = [EFLAGS], isReMaterializable = 1, isPseudo = 1 in { 295 def MOV32r1 : I<0, Pseudo, (outs GR32:$dst), (ins), "", 296 [(set GR32:$dst, 1)]>; 297 def MOV32r_1 : I<0, Pseudo, (outs GR32:$dst), (ins), "", 298 [(set GR32:$dst, -1)]>; 299 } 300 } // SchedRW 301 302 // MOV16ri is 4 bytes, so the instructions above are smaller. 303 def : Pat<(i16 1), (EXTRACT_SUBREG (MOV32r1), sub_16bit)>; 304 def : Pat<(i16 -1), (EXTRACT_SUBREG (MOV32r_1), sub_16bit)>; 305} 306 307let isReMaterializable = 1, isPseudo = 1, AddedComplexity = 5, 308 SchedRW = [WriteALU] in { 309// AddedComplexity higher than MOV64ri but lower than MOV32r0 and MOV32r1. 310def MOV32ImmSExti8 : I<0, Pseudo, (outs GR32:$dst), (ins i32i8imm:$src), "", 311 [(set GR32:$dst, i32immSExt8:$src)]>, 312 Requires<[OptForMinSize, NotWin64WithoutFP]>; 313def MOV64ImmSExti8 : I<0, Pseudo, (outs GR64:$dst), (ins i64i8imm:$src), "", 314 [(set GR64:$dst, i64immSExt8:$src)]>, 315 Requires<[OptForMinSize, NotWin64WithoutFP]>; 316} 317 318// Materialize i64 constant where top 32-bits are zero. This could theoretically 319// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however 320// that would make it more difficult to rematerialize. 321let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1, 322 isPseudo = 1, SchedRW = [WriteMove] in 323def MOV32ri64 : I<0, Pseudo, (outs GR64:$dst), (ins i64i32imm:$src), "", 324 [(set GR64:$dst, i64immZExt32:$src)]>; 325 326// This 64-bit pseudo-move can also be used for labels in the x86-64 small code 327// model. 328def mov64imm32 : ComplexPattern<i64, 1, "selectMOV64Imm32", [X86Wrapper]>; 329def : Pat<(i64 mov64imm32:$src), (MOV32ri64 mov64imm32:$src)>; 330 331// Use sbb to materialize carry bit. 332let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteADC], 333 hasSideEffects = 0 in { 334// FIXME: These are pseudo ops that should be replaced with Pat<> patterns. 335// However, Pat<> can't replicate the destination reg into the inputs of the 336// result. 337def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "", []>; 338def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "", []>; 339} // isCodeGenOnly 340 341//===----------------------------------------------------------------------===// 342// String Pseudo Instructions 343// 344let SchedRW = [WriteMicrocoded] in { 345let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in { 346def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins), 347 "{rep;movsb (%esi), %es:(%edi)|rep movsb es:[edi], [esi]}", 348 [(X86rep_movs i8)]>, REP, AdSize32, 349 Requires<[NotLP64]>; 350def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins), 351 "{rep;movsw (%esi), %es:(%edi)|rep movsw es:[edi], [esi]}", 352 [(X86rep_movs i16)]>, REP, AdSize32, OpSize16, 353 Requires<[NotLP64]>; 354def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins), 355 "{rep;movsl (%esi), %es:(%edi)|rep movsd es:[edi], [esi]}", 356 [(X86rep_movs i32)]>, REP, AdSize32, OpSize32, 357 Requires<[NotLP64]>; 358def REP_MOVSQ_32 : RI<0xA5, RawFrm, (outs), (ins), 359 "{rep;movsq (%esi), %es:(%edi)|rep movsq es:[edi], [esi]}", 360 [(X86rep_movs i64)]>, REP, AdSize32, 361 Requires<[NotLP64, In64BitMode]>; 362} 363 364let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in { 365def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins), 366 "{rep;movsb (%rsi), %es:(%rdi)|rep movsb es:[rdi], [rsi]}", 367 [(X86rep_movs i8)]>, REP, AdSize64, 368 Requires<[IsLP64]>; 369def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins), 370 "{rep;movsw (%rsi), %es:(%rdi)|rep movsw es:[rdi], [rsi]}", 371 [(X86rep_movs i16)]>, REP, AdSize64, OpSize16, 372 Requires<[IsLP64]>; 373def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins), 374 "{rep;movsl (%rsi), %es:(%rdi)|rep movsdi es:[rdi], [rsi]}", 375 [(X86rep_movs i32)]>, REP, AdSize64, OpSize32, 376 Requires<[IsLP64]>; 377def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins), 378 "{rep;movsq (%rsi), %es:(%rdi)|rep movsq es:[rdi], [rsi]}", 379 [(X86rep_movs i64)]>, REP, AdSize64, 380 Requires<[IsLP64]>; 381} 382 383// FIXME: Should use "(X86rep_stos AL)" as the pattern. 384let Defs = [ECX,EDI], isCodeGenOnly = 1 in { 385 let Uses = [AL,ECX,EDI] in 386 def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins), 387 "{rep;stosb %al, %es:(%edi)|rep stosb es:[edi], al}", 388 [(X86rep_stos i8)]>, REP, AdSize32, 389 Requires<[NotLP64]>; 390 let Uses = [AX,ECX,EDI] in 391 def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins), 392 "{rep;stosw %ax, %es:(%edi)|rep stosw es:[edi], ax}", 393 [(X86rep_stos i16)]>, REP, AdSize32, OpSize16, 394 Requires<[NotLP64]>; 395 let Uses = [EAX,ECX,EDI] in 396 def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins), 397 "{rep;stosl %eax, %es:(%edi)|rep stosd es:[edi], eax}", 398 [(X86rep_stos i32)]>, REP, AdSize32, OpSize32, 399 Requires<[NotLP64]>; 400 let Uses = [RAX,RCX,RDI] in 401 def REP_STOSQ_32 : RI<0xAB, RawFrm, (outs), (ins), 402 "{rep;stosq %rax, %es:(%edi)|rep stosq es:[edi], rax}", 403 [(X86rep_stos i64)]>, REP, AdSize32, 404 Requires<[NotLP64, In64BitMode]>; 405} 406 407let Defs = [RCX,RDI], isCodeGenOnly = 1 in { 408 let Uses = [AL,RCX,RDI] in 409 def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins), 410 "{rep;stosb %al, %es:(%rdi)|rep stosb es:[rdi], al}", 411 [(X86rep_stos i8)]>, REP, AdSize64, 412 Requires<[IsLP64]>; 413 let Uses = [AX,RCX,RDI] in 414 def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins), 415 "{rep;stosw %ax, %es:(%rdi)|rep stosw es:[rdi], ax}", 416 [(X86rep_stos i16)]>, REP, AdSize64, OpSize16, 417 Requires<[IsLP64]>; 418 let Uses = [RAX,RCX,RDI] in 419 def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins), 420 "{rep;stosl %eax, %es:(%rdi)|rep stosd es:[rdi], eax}", 421 [(X86rep_stos i32)]>, REP, AdSize64, OpSize32, 422 Requires<[IsLP64]>; 423 424 let Uses = [RAX,RCX,RDI] in 425 def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins), 426 "{rep;stosq %rax, %es:(%rdi)|rep stosq es:[rdi], rax}", 427 [(X86rep_stos i64)]>, REP, AdSize64, 428 Requires<[IsLP64]>; 429} 430} // SchedRW 431 432//===----------------------------------------------------------------------===// 433// Thread Local Storage Instructions 434// 435let SchedRW = [WriteSystem] in { 436 437// ELF TLS Support 438// All calls clobber the non-callee saved registers. ESP is marked as 439// a use to prevent stack-pointer assignments that appear immediately 440// before calls from potentially appearing dead. 441let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7, 442 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7, 443 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, 444 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, 445 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF], 446 usesCustomInserter = 1, Uses = [ESP, SSP] in { 447def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym), 448 "# TLS_addr32", 449 [(X86tlsaddr tls32addr:$sym)]>, 450 Requires<[Not64BitMode]>; 451def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym), 452 "# TLS_base_addr32", 453 [(X86tlsbaseaddr tls32baseaddr:$sym)]>, 454 Requires<[Not64BitMode]>; 455} 456 457// All calls clobber the non-callee saved registers. RSP is marked as 458// a use to prevent stack-pointer assignments that appear immediately 459// before calls from potentially appearing dead. 460let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11, 461 FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7, 462 ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7, 463 MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, 464 XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, 465 XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF], 466 usesCustomInserter = 1, Uses = [RSP, SSP] in { 467def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym), 468 "# TLS_addr64", 469 [(X86tlsaddr tls64addr:$sym)]>, 470 Requires<[In64BitMode]>; 471def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym), 472 "# TLS_base_addr64", 473 [(X86tlsbaseaddr tls64baseaddr:$sym)]>, 474 Requires<[In64BitMode]>; 475} 476 477// Darwin TLS Support 478// For i386, the address of the thunk is passed on the stack, on return the 479// address of the variable is in %eax. %ecx is trashed during the function 480// call. All other registers are preserved. 481let Defs = [EAX, ECX, EFLAGS, DF], 482 Uses = [ESP, SSP], 483 usesCustomInserter = 1 in 484def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym), 485 "# TLSCall_32", 486 [(X86TLSCall addr:$sym)]>, 487 Requires<[Not64BitMode]>; 488 489// For x86_64, the address of the thunk is passed in %rdi, but the 490// pseudo directly use the symbol, so do not add an implicit use of 491// %rdi. The lowering will do the right thing with RDI. 492// On return the address of the variable is in %rax. All other 493// registers are preserved. 494let Defs = [RAX, EFLAGS, DF], 495 Uses = [RSP, SSP], 496 usesCustomInserter = 1 in 497def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym), 498 "# TLSCall_64", 499 [(X86TLSCall addr:$sym)]>, 500 Requires<[In64BitMode]>; 501} // SchedRW 502 503//===----------------------------------------------------------------------===// 504// Conditional Move Pseudo Instructions 505 506// CMOV* - Used to implement the SELECT DAG operation. Expanded after 507// instruction selection into a branch sequence. 508multiclass CMOVrr_PSEUDO<RegisterClass RC, ValueType VT> { 509 def CMOV#NAME : I<0, Pseudo, 510 (outs RC:$dst), (ins RC:$t, RC:$f, i8imm:$cond), 511 "#CMOV_"#NAME#" PSEUDO!", 512 [(set RC:$dst, (VT (X86cmov RC:$t, RC:$f, timm:$cond, 513 EFLAGS)))]>; 514} 515 516let usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] in { 517 // X86 doesn't have 8-bit conditional moves. Use a customInserter to 518 // emit control flow. An alternative to this is to mark i8 SELECT as Promote, 519 // however that requires promoting the operands, and can induce additional 520 // i8 register pressure. 521 defm _GR8 : CMOVrr_PSEUDO<GR8, i8>; 522 523 let Predicates = [NoCMov] in { 524 defm _GR32 : CMOVrr_PSEUDO<GR32, i32>; 525 defm _GR16 : CMOVrr_PSEUDO<GR16, i16>; 526 } // Predicates = [NoCMov] 527 528 // fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no 529 // SSE1/SSE2. 530 let Predicates = [FPStackf32] in 531 defm _RFP32 : CMOVrr_PSEUDO<RFP32, f32>; 532 533 let Predicates = [FPStackf64] in 534 defm _RFP64 : CMOVrr_PSEUDO<RFP64, f64>; 535 536 defm _RFP80 : CMOVrr_PSEUDO<RFP80, f80>; 537 538 let Predicates = [HasMMX] in 539 defm _VR64 : CMOVrr_PSEUDO<VR64, x86mmx>; 540 541 let Predicates = [HasSSE1,NoAVX512] in 542 defm _FR32 : CMOVrr_PSEUDO<FR32, f32>; 543 let Predicates = [HasSSE2,NoAVX512] in 544 defm _FR64 : CMOVrr_PSEUDO<FR64, f64>; 545 let Predicates = [HasAVX512] in { 546 defm _FR32X : CMOVrr_PSEUDO<FR32X, f32>; 547 defm _FR64X : CMOVrr_PSEUDO<FR64X, f64>; 548 } 549 let Predicates = [NoVLX] in { 550 defm _VR128 : CMOVrr_PSEUDO<VR128, v2i64>; 551 defm _VR256 : CMOVrr_PSEUDO<VR256, v4i64>; 552 } 553 let Predicates = [HasVLX] in { 554 defm _VR128X : CMOVrr_PSEUDO<VR128X, v2i64>; 555 defm _VR256X : CMOVrr_PSEUDO<VR256X, v4i64>; 556 } 557 defm _VR512 : CMOVrr_PSEUDO<VR512, v8i64>; 558 defm _VK1 : CMOVrr_PSEUDO<VK1, v1i1>; 559 defm _VK2 : CMOVrr_PSEUDO<VK2, v2i1>; 560 defm _VK4 : CMOVrr_PSEUDO<VK4, v4i1>; 561 defm _VK8 : CMOVrr_PSEUDO<VK8, v8i1>; 562 defm _VK16 : CMOVrr_PSEUDO<VK16, v16i1>; 563 defm _VK32 : CMOVrr_PSEUDO<VK32, v32i1>; 564 defm _VK64 : CMOVrr_PSEUDO<VK64, v64i1>; 565} // usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] 566 567def : Pat<(f128 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 568 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 569 570let Predicates = [NoVLX] in { 571 def : Pat<(v16i8 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 572 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 573 def : Pat<(v8i16 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 574 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 575 def : Pat<(v4i32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 576 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 577 def : Pat<(v4f32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 578 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 579 def : Pat<(v2f64 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)), 580 (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>; 581 582 def : Pat<(v32i8 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 583 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 584 def : Pat<(v16i16 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 585 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 586 def : Pat<(v8i32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 587 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 588 def : Pat<(v8f32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 589 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 590 def : Pat<(v4f64 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)), 591 (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>; 592} 593let Predicates = [HasVLX] in { 594 def : Pat<(v16i8 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 595 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 596 def : Pat<(v8i16 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 597 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 598 def : Pat<(v4i32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 599 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 600 def : Pat<(v4f32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 601 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 602 def : Pat<(v2f64 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)), 603 (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>; 604 605 def : Pat<(v32i8 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 606 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 607 def : Pat<(v16i16 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 608 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 609 def : Pat<(v8i32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 610 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 611 def : Pat<(v8f32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 612 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 613 def : Pat<(v4f64 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)), 614 (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>; 615} 616 617def : Pat<(v64i8 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 618 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 619def : Pat<(v32i16 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 620 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 621def : Pat<(v16i32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 622 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 623def : Pat<(v16f32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 624 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 625def : Pat<(v8f64 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)), 626 (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>; 627 628//===----------------------------------------------------------------------===// 629// Normal-Instructions-With-Lock-Prefix Pseudo Instructions 630//===----------------------------------------------------------------------===// 631 632// FIXME: Use normal instructions and add lock prefix dynamically. 633 634// Memory barriers 635 636let isCodeGenOnly = 1, Defs = [EFLAGS] in 637def OR32mi8Locked : Ii8<0x83, MRM1m, (outs), (ins i32mem:$dst, i32i8imm:$zero), 638 "or{l}\t{$zero, $dst|$dst, $zero}", []>, 639 Requires<[Not64BitMode]>, OpSize32, LOCK, 640 Sched<[WriteALURMW]>; 641 642let hasSideEffects = 1 in 643def Int_MemBarrier : I<0, Pseudo, (outs), (ins), 644 "#MEMBARRIER", 645 [(X86MemBarrier)]>, Sched<[WriteLoad]>; 646 647// RegOpc corresponds to the mr version of the instruction 648// ImmOpc corresponds to the mi version of the instruction 649// ImmOpc8 corresponds to the mi8 version of the instruction 650// ImmMod corresponds to the instruction format of the mi and mi8 versions 651multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8, 652 Format ImmMod, SDNode Op, string mnemonic> { 653let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1, 654 SchedRW = [WriteALURMW] in { 655 656def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 657 RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 }, 658 MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2), 659 !strconcat(mnemonic, "{b}\t", 660 "{$src2, $dst|$dst, $src2}"), 661 [(set EFLAGS, (Op addr:$dst, GR8:$src2))]>, LOCK; 662 663def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 664 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 }, 665 MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2), 666 !strconcat(mnemonic, "{w}\t", 667 "{$src2, $dst|$dst, $src2}"), 668 [(set EFLAGS, (Op addr:$dst, GR16:$src2))]>, 669 OpSize16, LOCK; 670 671def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 672 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 }, 673 MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2), 674 !strconcat(mnemonic, "{l}\t", 675 "{$src2, $dst|$dst, $src2}"), 676 [(set EFLAGS, (Op addr:$dst, GR32:$src2))]>, 677 OpSize32, LOCK; 678 679def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, 680 RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 }, 681 MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), 682 !strconcat(mnemonic, "{q}\t", 683 "{$src2, $dst|$dst, $src2}"), 684 [(set EFLAGS, (Op addr:$dst, GR64:$src2))]>, LOCK; 685 686// NOTE: These are order specific, we want the mi8 forms to be listed 687// first so that they are slightly preferred to the mi forms. 688def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4}, 689 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 }, 690 ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2), 691 !strconcat(mnemonic, "{w}\t", 692 "{$src2, $dst|$dst, $src2}"), 693 [(set EFLAGS, (Op addr:$dst, i16immSExt8:$src2))]>, 694 OpSize16, LOCK; 695 696def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4}, 697 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 }, 698 ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2), 699 !strconcat(mnemonic, "{l}\t", 700 "{$src2, $dst|$dst, $src2}"), 701 [(set EFLAGS, (Op addr:$dst, i32immSExt8:$src2))]>, 702 OpSize32, LOCK; 703 704def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4}, 705 ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 }, 706 ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2), 707 !strconcat(mnemonic, "{q}\t", 708 "{$src2, $dst|$dst, $src2}"), 709 [(set EFLAGS, (Op addr:$dst, i64immSExt8:$src2))]>, 710 LOCK; 711 712def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 713 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 }, 714 ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2), 715 !strconcat(mnemonic, "{b}\t", 716 "{$src2, $dst|$dst, $src2}"), 717 [(set EFLAGS, (Op addr:$dst, (i8 imm:$src2)))]>, LOCK; 718 719def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 720 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 }, 721 ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2), 722 !strconcat(mnemonic, "{w}\t", 723 "{$src2, $dst|$dst, $src2}"), 724 [(set EFLAGS, (Op addr:$dst, (i16 imm:$src2)))]>, 725 OpSize16, LOCK; 726 727def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 728 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 }, 729 ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2), 730 !strconcat(mnemonic, "{l}\t", 731 "{$src2, $dst|$dst, $src2}"), 732 [(set EFLAGS, (Op addr:$dst, (i32 imm:$src2)))]>, 733 OpSize32, LOCK; 734 735def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, 736 ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 }, 737 ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2), 738 !strconcat(mnemonic, "{q}\t", 739 "{$src2, $dst|$dst, $src2}"), 740 [(set EFLAGS, (Op addr:$dst, i64immSExt32:$src2))]>, 741 LOCK; 742} 743 744} 745 746defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, X86lock_add, "add">; 747defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, X86lock_sub, "sub">; 748defm LOCK_OR : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, X86lock_or , "or">; 749defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, X86lock_and, "and">; 750defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, X86lock_xor, "xor">; 751 752def X86lock_add_nocf : PatFrag<(ops node:$lhs, node:$rhs), 753 (X86lock_add node:$lhs, node:$rhs), [{ 754 return hasNoCarryFlagUses(SDValue(N, 0)); 755}]>; 756 757def X86lock_sub_nocf : PatFrag<(ops node:$lhs, node:$rhs), 758 (X86lock_sub node:$lhs, node:$rhs), [{ 759 return hasNoCarryFlagUses(SDValue(N, 0)); 760}]>; 761 762let Predicates = [UseIncDec] in { 763 let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1, 764 SchedRW = [WriteALURMW] in { 765 def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst), 766 "inc{b}\t$dst", 767 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i8 1)))]>, 768 LOCK; 769 def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), 770 "inc{w}\t$dst", 771 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i16 1)))]>, 772 OpSize16, LOCK; 773 def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), 774 "inc{l}\t$dst", 775 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i32 1)))]>, 776 OpSize32, LOCK; 777 def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), 778 "inc{q}\t$dst", 779 [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i64 1)))]>, 780 LOCK; 781 782 def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst), 783 "dec{b}\t$dst", 784 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i8 1)))]>, 785 LOCK; 786 def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), 787 "dec{w}\t$dst", 788 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i16 1)))]>, 789 OpSize16, LOCK; 790 def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), 791 "dec{l}\t$dst", 792 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i32 1)))]>, 793 OpSize32, LOCK; 794 def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), 795 "dec{q}\t$dst", 796 [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i64 1)))]>, 797 LOCK; 798 } 799 800 // Additional patterns for -1 constant. 801 def : Pat<(X86lock_add addr:$dst, (i8 -1)), (LOCK_DEC8m addr:$dst)>; 802 def : Pat<(X86lock_add addr:$dst, (i16 -1)), (LOCK_DEC16m addr:$dst)>; 803 def : Pat<(X86lock_add addr:$dst, (i32 -1)), (LOCK_DEC32m addr:$dst)>; 804 def : Pat<(X86lock_add addr:$dst, (i64 -1)), (LOCK_DEC64m addr:$dst)>; 805 def : Pat<(X86lock_sub addr:$dst, (i8 -1)), (LOCK_INC8m addr:$dst)>; 806 def : Pat<(X86lock_sub addr:$dst, (i16 -1)), (LOCK_INC16m addr:$dst)>; 807 def : Pat<(X86lock_sub addr:$dst, (i32 -1)), (LOCK_INC32m addr:$dst)>; 808 def : Pat<(X86lock_sub addr:$dst, (i64 -1)), (LOCK_INC64m addr:$dst)>; 809} 810 811// Atomic compare and swap. 812multiclass LCMPXCHG_UnOp<bits<8> Opc, Format Form, string mnemonic, 813 SDPatternOperator frag, X86MemOperand x86memop> { 814let isCodeGenOnly = 1, usesCustomInserter = 1 in { 815 def NAME : I<Opc, Form, (outs), (ins x86memop:$ptr), 816 !strconcat(mnemonic, "\t$ptr"), 817 [(frag addr:$ptr)]>, TB, LOCK; 818} 819} 820 821multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form, 822 string mnemonic, SDPatternOperator frag> { 823let isCodeGenOnly = 1, SchedRW = [WriteCMPXCHGRMW] in { 824 let Defs = [AL, EFLAGS], Uses = [AL] in 825 def NAME#8 : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap), 826 !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"), 827 [(frag addr:$ptr, GR8:$swap, 1)]>, TB, LOCK; 828 let Defs = [AX, EFLAGS], Uses = [AX] in 829 def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap), 830 !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"), 831 [(frag addr:$ptr, GR16:$swap, 2)]>, TB, OpSize16, LOCK; 832 let Defs = [EAX, EFLAGS], Uses = [EAX] in 833 def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap), 834 !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"), 835 [(frag addr:$ptr, GR32:$swap, 4)]>, TB, OpSize32, LOCK; 836 let Defs = [RAX, EFLAGS], Uses = [RAX] in 837 def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap), 838 !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"), 839 [(frag addr:$ptr, GR64:$swap, 8)]>, TB, LOCK; 840} 841} 842 843let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX], 844 Predicates = [HasCmpxchg8b], SchedRW = [WriteCMPXCHGRMW] in { 845defm LCMPXCHG8B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg8b", X86cas8, i64mem>; 846} 847 848// This pseudo must be used when the frame uses RBX as 849// the base pointer. Indeed, in such situation RBX is a reserved 850// register and the register allocator will ignore any use/def of 851// it. In other words, the register will not fix the clobbering of 852// RBX that will happen when setting the arguments for the instrucion. 853// 854// Unlike the actual related instruction, we mark that this one 855// defines EBX (instead of using EBX). 856// The rationale is that we will define RBX during the expansion of 857// the pseudo. The argument feeding EBX is ebx_input. 858// 859// The additional argument, $ebx_save, is a temporary register used to 860// save the value of RBX across the actual instruction. 861// 862// To make sure the register assigned to $ebx_save does not interfere with 863// the definition of the actual instruction, we use a definition $dst which 864// is tied to $rbx_save. That way, the live-range of $rbx_save spans across 865// the instruction and we are sure we will have a valid register to restore 866// the value of RBX. 867let Defs = [EAX, EDX, EBX, EFLAGS], Uses = [EAX, ECX, EDX], 868 Predicates = [HasCmpxchg8b], SchedRW = [WriteCMPXCHGRMW], 869 isCodeGenOnly = 1, isPseudo = 1, Constraints = "$ebx_save = $dst", 870 usesCustomInserter = 1 in { 871def LCMPXCHG8B_SAVE_EBX : 872 I<0, Pseudo, (outs GR32:$dst), 873 (ins i64mem:$ptr, GR32:$ebx_input, GR32:$ebx_save), 874 !strconcat("cmpxchg8b", "\t$ptr"), 875 [(set GR32:$dst, (X86cas8save_ebx addr:$ptr, GR32:$ebx_input, 876 GR32:$ebx_save))]>; 877} 878 879 880let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX], 881 Predicates = [HasCmpxchg16b,In64BitMode], SchedRW = [WriteCMPXCHGRMW] in { 882defm LCMPXCHG16B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg16b", 883 X86cas16, i128mem>, REX_W; 884} 885 886// Same as LCMPXCHG8B_SAVE_RBX but for the 16 Bytes variant. 887let Defs = [RAX, RDX, RBX, EFLAGS], Uses = [RAX, RCX, RDX], 888 Predicates = [HasCmpxchg16b,In64BitMode], SchedRW = [WriteCMPXCHGRMW], 889 isCodeGenOnly = 1, isPseudo = 1, Constraints = "$rbx_save = $dst", 890 usesCustomInserter = 1 in { 891def LCMPXCHG16B_SAVE_RBX : 892 I<0, Pseudo, (outs GR64:$dst), 893 (ins i128mem:$ptr, GR64:$rbx_input, GR64:$rbx_save), 894 !strconcat("cmpxchg16b", "\t$ptr"), 895 [(set GR64:$dst, (X86cas16save_rbx addr:$ptr, GR64:$rbx_input, 896 GR64:$rbx_save))]>; 897} 898 899defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg", X86cas>; 900 901// Atomic exchange and add 902multiclass ATOMIC_LOAD_BINOP<bits<8> opc8, bits<8> opc, string mnemonic, 903 string frag> { 904 let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1, 905 SchedRW = [WriteALURMW] in { 906 def NAME#8 : I<opc8, MRMSrcMem, (outs GR8:$dst), 907 (ins GR8:$val, i8mem:$ptr), 908 !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"), 909 [(set GR8:$dst, 910 (!cast<PatFrag>(frag # "_8") addr:$ptr, GR8:$val))]>; 911 def NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst), 912 (ins GR16:$val, i16mem:$ptr), 913 !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"), 914 [(set 915 GR16:$dst, 916 (!cast<PatFrag>(frag # "_16") addr:$ptr, GR16:$val))]>, 917 OpSize16; 918 def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst), 919 (ins GR32:$val, i32mem:$ptr), 920 !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"), 921 [(set 922 GR32:$dst, 923 (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))]>, 924 OpSize32; 925 def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst), 926 (ins GR64:$val, i64mem:$ptr), 927 !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"), 928 [(set 929 GR64:$dst, 930 (!cast<PatFrag>(frag # "_64") addr:$ptr, GR64:$val))]>; 931 } 932} 933 934defm LXADD : ATOMIC_LOAD_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add">, TB, LOCK; 935 936/* The following multiclass tries to make sure that in code like 937 * x.store (immediate op x.load(acquire), release) 938 * and 939 * x.store (register op x.load(acquire), release) 940 * an operation directly on memory is generated instead of wasting a register. 941 * It is not automatic as atomic_store/load are only lowered to MOV instructions 942 * extremely late to prevent them from being accidentally reordered in the backend 943 * (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions) 944 */ 945multiclass RELEASE_BINOP_MI<string Name, SDNode op> { 946 def : Pat<(atomic_store_8 addr:$dst, 947 (op (atomic_load_8 addr:$dst), (i8 imm:$src))), 948 (!cast<Instruction>(Name#"8mi") addr:$dst, imm:$src)>; 949 def : Pat<(atomic_store_16 addr:$dst, 950 (op (atomic_load_16 addr:$dst), (i16 imm:$src))), 951 (!cast<Instruction>(Name#"16mi") addr:$dst, imm:$src)>; 952 def : Pat<(atomic_store_32 addr:$dst, 953 (op (atomic_load_32 addr:$dst), (i32 imm:$src))), 954 (!cast<Instruction>(Name#"32mi") addr:$dst, imm:$src)>; 955 def : Pat<(atomic_store_64 addr:$dst, 956 (op (atomic_load_64 addr:$dst), (i64immSExt32:$src))), 957 (!cast<Instruction>(Name#"64mi32") addr:$dst, (i64immSExt32:$src))>; 958 959 def : Pat<(atomic_store_8 addr:$dst, 960 (op (atomic_load_8 addr:$dst), (i8 GR8:$src))), 961 (!cast<Instruction>(Name#"8mr") addr:$dst, GR8:$src)>; 962 def : Pat<(atomic_store_16 addr:$dst, 963 (op (atomic_load_16 addr:$dst), (i16 GR16:$src))), 964 (!cast<Instruction>(Name#"16mr") addr:$dst, GR16:$src)>; 965 def : Pat<(atomic_store_32 addr:$dst, 966 (op (atomic_load_32 addr:$dst), (i32 GR32:$src))), 967 (!cast<Instruction>(Name#"32mr") addr:$dst, GR32:$src)>; 968 def : Pat<(atomic_store_64 addr:$dst, 969 (op (atomic_load_64 addr:$dst), (i64 GR64:$src))), 970 (!cast<Instruction>(Name#"64mr") addr:$dst, GR64:$src)>; 971} 972defm : RELEASE_BINOP_MI<"ADD", add>; 973defm : RELEASE_BINOP_MI<"AND", and>; 974defm : RELEASE_BINOP_MI<"OR", or>; 975defm : RELEASE_BINOP_MI<"XOR", xor>; 976defm : RELEASE_BINOP_MI<"SUB", sub>; 977 978// Atomic load + floating point patterns. 979// FIXME: This could also handle SIMD operations with *ps and *pd instructions. 980multiclass ATOMIC_LOAD_FP_BINOP_MI<string Name, SDNode op> { 981 def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))), 982 (!cast<Instruction>(Name#"SSrm") FR32:$src1, addr:$src2)>, 983 Requires<[UseSSE1]>; 984 def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))), 985 (!cast<Instruction>("V"#Name#"SSrm") FR32:$src1, addr:$src2)>, 986 Requires<[UseAVX]>; 987 def : Pat<(op FR32X:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))), 988 (!cast<Instruction>("V"#Name#"SSZrm") FR32X:$src1, addr:$src2)>, 989 Requires<[HasAVX512]>; 990 991 def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))), 992 (!cast<Instruction>(Name#"SDrm") FR64:$src1, addr:$src2)>, 993 Requires<[UseSSE1]>; 994 def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))), 995 (!cast<Instruction>("V"#Name#"SDrm") FR64:$src1, addr:$src2)>, 996 Requires<[UseAVX]>; 997 def : Pat<(op FR64X:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))), 998 (!cast<Instruction>("V"#Name#"SDZrm") FR64X:$src1, addr:$src2)>, 999 Requires<[HasAVX512]>; 1000} 1001defm : ATOMIC_LOAD_FP_BINOP_MI<"ADD", fadd>; 1002// FIXME: Add fsub, fmul, fdiv, ... 1003 1004multiclass RELEASE_UNOP<string Name, dag dag8, dag dag16, dag dag32, 1005 dag dag64> { 1006 def : Pat<(atomic_store_8 addr:$dst, dag8), 1007 (!cast<Instruction>(Name#8m) addr:$dst)>; 1008 def : Pat<(atomic_store_16 addr:$dst, dag16), 1009 (!cast<Instruction>(Name#16m) addr:$dst)>; 1010 def : Pat<(atomic_store_32 addr:$dst, dag32), 1011 (!cast<Instruction>(Name#32m) addr:$dst)>; 1012 def : Pat<(atomic_store_64 addr:$dst, dag64), 1013 (!cast<Instruction>(Name#64m) addr:$dst)>; 1014} 1015 1016let Predicates = [UseIncDec] in { 1017 defm : RELEASE_UNOP<"INC", 1018 (add (atomic_load_8 addr:$dst), (i8 1)), 1019 (add (atomic_load_16 addr:$dst), (i16 1)), 1020 (add (atomic_load_32 addr:$dst), (i32 1)), 1021 (add (atomic_load_64 addr:$dst), (i64 1))>; 1022 defm : RELEASE_UNOP<"DEC", 1023 (add (atomic_load_8 addr:$dst), (i8 -1)), 1024 (add (atomic_load_16 addr:$dst), (i16 -1)), 1025 (add (atomic_load_32 addr:$dst), (i32 -1)), 1026 (add (atomic_load_64 addr:$dst), (i64 -1))>; 1027} 1028 1029defm : RELEASE_UNOP<"NEG", 1030 (ineg (i8 (atomic_load_8 addr:$dst))), 1031 (ineg (i16 (atomic_load_16 addr:$dst))), 1032 (ineg (i32 (atomic_load_32 addr:$dst))), 1033 (ineg (i64 (atomic_load_64 addr:$dst)))>; 1034defm : RELEASE_UNOP<"NOT", 1035 (not (i8 (atomic_load_8 addr:$dst))), 1036 (not (i16 (atomic_load_16 addr:$dst))), 1037 (not (i32 (atomic_load_32 addr:$dst))), 1038 (not (i64 (atomic_load_64 addr:$dst)))>; 1039 1040def : Pat<(atomic_store_8 addr:$dst, (i8 imm:$src)), 1041 (MOV8mi addr:$dst, imm:$src)>; 1042def : Pat<(atomic_store_16 addr:$dst, (i16 imm:$src)), 1043 (MOV16mi addr:$dst, imm:$src)>; 1044def : Pat<(atomic_store_32 addr:$dst, (i32 imm:$src)), 1045 (MOV32mi addr:$dst, imm:$src)>; 1046def : Pat<(atomic_store_64 addr:$dst, (i64immSExt32:$src)), 1047 (MOV64mi32 addr:$dst, i64immSExt32:$src)>; 1048 1049def : Pat<(atomic_store_8 addr:$dst, GR8:$src), 1050 (MOV8mr addr:$dst, GR8:$src)>; 1051def : Pat<(atomic_store_16 addr:$dst, GR16:$src), 1052 (MOV16mr addr:$dst, GR16:$src)>; 1053def : Pat<(atomic_store_32 addr:$dst, GR32:$src), 1054 (MOV32mr addr:$dst, GR32:$src)>; 1055def : Pat<(atomic_store_64 addr:$dst, GR64:$src), 1056 (MOV64mr addr:$dst, GR64:$src)>; 1057 1058def : Pat<(i8 (atomic_load_8 addr:$src)), (MOV8rm addr:$src)>; 1059def : Pat<(i16 (atomic_load_16 addr:$src)), (MOV16rm addr:$src)>; 1060def : Pat<(i32 (atomic_load_32 addr:$src)), (MOV32rm addr:$src)>; 1061def : Pat<(i64 (atomic_load_64 addr:$src)), (MOV64rm addr:$src)>; 1062 1063// Floating point loads/stores. 1064def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))), 1065 (MOVSSmr addr:$dst, FR32:$src)>, Requires<[UseSSE1]>; 1066def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))), 1067 (VMOVSSmr addr:$dst, FR32:$src)>, Requires<[UseAVX]>; 1068def : Pat<(atomic_store_32 addr:$dst, (i32 (bitconvert (f32 FR32:$src)))), 1069 (VMOVSSZmr addr:$dst, FR32:$src)>, Requires<[HasAVX512]>; 1070 1071def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))), 1072 (MOVSDmr addr:$dst, FR64:$src)>, Requires<[UseSSE2]>; 1073def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))), 1074 (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[UseAVX]>; 1075def : Pat<(atomic_store_64 addr:$dst, (i64 (bitconvert (f64 FR64:$src)))), 1076 (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[HasAVX512]>; 1077 1078def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))), 1079 (MOVSSrm_alt addr:$src)>, Requires<[UseSSE1]>; 1080def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))), 1081 (VMOVSSrm_alt addr:$src)>, Requires<[UseAVX]>; 1082def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))), 1083 (VMOVSSZrm_alt addr:$src)>, Requires<[HasAVX512]>; 1084 1085def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))), 1086 (MOVSDrm_alt addr:$src)>, Requires<[UseSSE2]>; 1087def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))), 1088 (VMOVSDrm_alt addr:$src)>, Requires<[UseAVX]>; 1089def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))), 1090 (VMOVSDZrm_alt addr:$src)>, Requires<[HasAVX512]>; 1091 1092//===----------------------------------------------------------------------===// 1093// DAG Pattern Matching Rules 1094//===----------------------------------------------------------------------===// 1095 1096// Use AND/OR to store 0/-1 in memory when optimizing for minsize. This saves 1097// binary size compared to a regular MOV, but it introduces an unnecessary 1098// load, so is not suitable for regular or optsize functions. 1099let Predicates = [OptForMinSize] in { 1100def : Pat<(simple_store (i16 0), addr:$dst), (AND16mi8 addr:$dst, 0)>; 1101def : Pat<(simple_store (i32 0), addr:$dst), (AND32mi8 addr:$dst, 0)>; 1102def : Pat<(simple_store (i64 0), addr:$dst), (AND64mi8 addr:$dst, 0)>; 1103def : Pat<(simple_store (i16 -1), addr:$dst), (OR16mi8 addr:$dst, -1)>; 1104def : Pat<(simple_store (i32 -1), addr:$dst), (OR32mi8 addr:$dst, -1)>; 1105def : Pat<(simple_store (i64 -1), addr:$dst), (OR64mi8 addr:$dst, -1)>; 1106} 1107 1108// In kernel code model, we can get the address of a label 1109// into a register with 'movq'. FIXME: This is a hack, the 'imm' predicate of 1110// the MOV64ri32 should accept these. 1111def : Pat<(i64 (X86Wrapper tconstpool :$dst)), 1112 (MOV64ri32 tconstpool :$dst)>, Requires<[KernelCode]>; 1113def : Pat<(i64 (X86Wrapper tjumptable :$dst)), 1114 (MOV64ri32 tjumptable :$dst)>, Requires<[KernelCode]>; 1115def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)), 1116 (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>; 1117def : Pat<(i64 (X86Wrapper texternalsym:$dst)), 1118 (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>; 1119def : Pat<(i64 (X86Wrapper mcsym:$dst)), 1120 (MOV64ri32 mcsym:$dst)>, Requires<[KernelCode]>; 1121def : Pat<(i64 (X86Wrapper tblockaddress:$dst)), 1122 (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>; 1123 1124// If we have small model and -static mode, it is safe to store global addresses 1125// directly as immediates. FIXME: This is really a hack, the 'imm' predicate 1126// for MOV64mi32 should handle this sort of thing. 1127def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst), 1128 (MOV64mi32 addr:$dst, tconstpool:$src)>, 1129 Requires<[NearData, IsNotPIC]>; 1130def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst), 1131 (MOV64mi32 addr:$dst, tjumptable:$src)>, 1132 Requires<[NearData, IsNotPIC]>; 1133def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst), 1134 (MOV64mi32 addr:$dst, tglobaladdr:$src)>, 1135 Requires<[NearData, IsNotPIC]>; 1136def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst), 1137 (MOV64mi32 addr:$dst, texternalsym:$src)>, 1138 Requires<[NearData, IsNotPIC]>; 1139def : Pat<(store (i64 (X86Wrapper mcsym:$src)), addr:$dst), 1140 (MOV64mi32 addr:$dst, mcsym:$src)>, 1141 Requires<[NearData, IsNotPIC]>; 1142def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst), 1143 (MOV64mi32 addr:$dst, tblockaddress:$src)>, 1144 Requires<[NearData, IsNotPIC]>; 1145 1146def : Pat<(i32 (X86RecoverFrameAlloc mcsym:$dst)), (MOV32ri mcsym:$dst)>; 1147def : Pat<(i64 (X86RecoverFrameAlloc mcsym:$dst)), (MOV64ri mcsym:$dst)>; 1148 1149// Calls 1150 1151// tls has some funny stuff here... 1152// This corresponds to movabs $foo@tpoff, %rax 1153def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)), 1154 (MOV64ri32 tglobaltlsaddr :$dst)>; 1155// This corresponds to add $foo@tpoff, %rax 1156def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)), 1157 (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>; 1158 1159 1160// Direct PC relative function call for small code model. 32-bit displacement 1161// sign extended to 64-bit. 1162def : Pat<(X86call (i64 tglobaladdr:$dst)), 1163 (CALL64pcrel32 tglobaladdr:$dst)>; 1164def : Pat<(X86call (i64 texternalsym:$dst)), 1165 (CALL64pcrel32 texternalsym:$dst)>; 1166 1167// Tailcall stuff. The TCRETURN instructions execute after the epilog, so they 1168// can never use callee-saved registers. That is the purpose of the GR64_TC 1169// register classes. 1170// 1171// The only volatile register that is never used by the calling convention is 1172// %r11. This happens when calling a vararg function with 6 arguments. 1173// 1174// Match an X86tcret that uses less than 7 volatile registers. 1175def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off), 1176 (X86tcret node:$ptr, node:$off), [{ 1177 // X86tcret args: (*chain, ptr, imm, regs..., glue) 1178 unsigned NumRegs = 0; 1179 for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i) 1180 if (isa<RegisterSDNode>(N->getOperand(i)) && ++NumRegs > 6) 1181 return false; 1182 return true; 1183}]>; 1184 1185def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off), 1186 (TCRETURNri ptr_rc_tailcall:$dst, imm:$off)>, 1187 Requires<[Not64BitMode, NotUseIndirectThunkCalls]>; 1188 1189// FIXME: This is disabled for 32-bit PIC mode because the global base 1190// register which is part of the address mode may be assigned a 1191// callee-saved register. 1192def : Pat<(X86tcret (load addr:$dst), imm:$off), 1193 (TCRETURNmi addr:$dst, imm:$off)>, 1194 Requires<[Not64BitMode, IsNotPIC, NotUseIndirectThunkCalls]>; 1195 1196def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off), 1197 (TCRETURNdi tglobaladdr:$dst, imm:$off)>, 1198 Requires<[NotLP64]>; 1199 1200def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off), 1201 (TCRETURNdi texternalsym:$dst, imm:$off)>, 1202 Requires<[NotLP64]>; 1203 1204def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off), 1205 (TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>, 1206 Requires<[In64BitMode, NotUseIndirectThunkCalls]>; 1207 1208// Don't fold loads into X86tcret requiring more than 6 regs. 1209// There wouldn't be enough scratch registers for base+index. 1210def : Pat<(X86tcret_6regs (load addr:$dst), imm:$off), 1211 (TCRETURNmi64 addr:$dst, imm:$off)>, 1212 Requires<[In64BitMode, NotUseIndirectThunkCalls]>; 1213 1214def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off), 1215 (INDIRECT_THUNK_TCRETURN64 ptr_rc_tailcall:$dst, imm:$off)>, 1216 Requires<[In64BitMode, UseIndirectThunkCalls]>; 1217 1218def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off), 1219 (INDIRECT_THUNK_TCRETURN32 ptr_rc_tailcall:$dst, imm:$off)>, 1220 Requires<[Not64BitMode, UseIndirectThunkCalls]>; 1221 1222def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off), 1223 (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>, 1224 Requires<[IsLP64]>; 1225 1226def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off), 1227 (TCRETURNdi64 texternalsym:$dst, imm:$off)>, 1228 Requires<[IsLP64]>; 1229 1230// Normal calls, with various flavors of addresses. 1231def : Pat<(X86call (i32 tglobaladdr:$dst)), 1232 (CALLpcrel32 tglobaladdr:$dst)>; 1233def : Pat<(X86call (i32 texternalsym:$dst)), 1234 (CALLpcrel32 texternalsym:$dst)>; 1235def : Pat<(X86call (i32 imm:$dst)), 1236 (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>; 1237 1238// Comparisons. 1239 1240// TEST R,R is smaller than CMP R,0 1241def : Pat<(X86cmp GR8:$src1, 0), 1242 (TEST8rr GR8:$src1, GR8:$src1)>; 1243def : Pat<(X86cmp GR16:$src1, 0), 1244 (TEST16rr GR16:$src1, GR16:$src1)>; 1245def : Pat<(X86cmp GR32:$src1, 0), 1246 (TEST32rr GR32:$src1, GR32:$src1)>; 1247def : Pat<(X86cmp GR64:$src1, 0), 1248 (TEST64rr GR64:$src1, GR64:$src1)>; 1249 1250// zextload bool -> zextload byte 1251// i1 stored in one byte in zero-extended form. 1252// Upper bits cleanup should be executed before Store. 1253def : Pat<(zextloadi8i1 addr:$src), (MOV8rm addr:$src)>; 1254def : Pat<(zextloadi16i1 addr:$src), 1255 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>; 1256def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>; 1257def : Pat<(zextloadi64i1 addr:$src), 1258 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>; 1259 1260// extload bool -> extload byte 1261// When extloading from 16-bit and smaller memory locations into 64-bit 1262// registers, use zero-extending loads so that the entire 64-bit register is 1263// defined, avoiding partial-register updates. 1264 1265def : Pat<(extloadi8i1 addr:$src), (MOV8rm addr:$src)>; 1266def : Pat<(extloadi16i1 addr:$src), 1267 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>; 1268def : Pat<(extloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>; 1269def : Pat<(extloadi16i8 addr:$src), 1270 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>; 1271def : Pat<(extloadi32i8 addr:$src), (MOVZX32rm8 addr:$src)>; 1272def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>; 1273 1274// For other extloads, use subregs, since the high contents of the register are 1275// defined after an extload. 1276// NOTE: The extloadi64i32 pattern needs to be first as it will try to form 1277// 32-bit loads for 4 byte aligned i8/i16 loads. 1278def : Pat<(extloadi64i32 addr:$src), 1279 (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>; 1280def : Pat<(extloadi64i1 addr:$src), 1281 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>; 1282def : Pat<(extloadi64i8 addr:$src), 1283 (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>; 1284def : Pat<(extloadi64i16 addr:$src), 1285 (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>; 1286 1287// anyext. Define these to do an explicit zero-extend to 1288// avoid partial-register updates. 1289def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG 1290 (MOVZX32rr8 GR8 :$src), sub_16bit)>; 1291def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>; 1292 1293// Except for i16 -> i32 since isel expect i16 ops to be promoted to i32. 1294def : Pat<(i32 (anyext GR16:$src)), 1295 (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>; 1296 1297def : Pat<(i64 (anyext GR8 :$src)), 1298 (SUBREG_TO_REG (i64 0), (MOVZX32rr8 GR8 :$src), sub_32bit)>; 1299def : Pat<(i64 (anyext GR16:$src)), 1300 (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>; 1301def : Pat<(i64 (anyext GR32:$src)), 1302 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, sub_32bit)>; 1303 1304// If this is an anyext of the remainder of an 8-bit sdivrem, use a MOVSX 1305// instead of a MOVZX. The sdivrem lowering will emit emit a MOVSX to move 1306// %ah to the lower byte of a register. By using a MOVSX here we allow a 1307// post-isel peephole to merge the two MOVSX instructions into one. 1308def anyext_sdiv : PatFrag<(ops node:$lhs), (anyext node:$lhs),[{ 1309 return (N->getOperand(0).getOpcode() == ISD::SDIVREM && 1310 N->getOperand(0).getResNo() == 1); 1311}]>; 1312def : Pat<(i32 (anyext_sdiv GR8:$src)), (MOVSX32rr8 GR8:$src)>; 1313 1314// Any instruction that defines a 32-bit result leaves the high half of the 1315// register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may 1316// be copying from a truncate. Any other 32-bit operation will zero-extend 1317// up to 64 bits. AssertSext/AssertZext aren't saying anything about the upper 1318// 32 bits, they're probably just qualifying a CopyFromReg. 1319def def32 : PatLeaf<(i32 GR32:$src), [{ 1320 return N->getOpcode() != ISD::TRUNCATE && 1321 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG && 1322 N->getOpcode() != ISD::CopyFromReg && 1323 N->getOpcode() != ISD::AssertSext && 1324 N->getOpcode() != ISD::AssertZext; 1325}]>; 1326 1327// In the case of a 32-bit def that is known to implicitly zero-extend, 1328// we can use a SUBREG_TO_REG. 1329def : Pat<(i64 (zext def32:$src)), 1330 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>; 1331def : Pat<(i64 (and (anyext def32:$src), 0x00000000FFFFFFFF)), 1332 (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>; 1333 1334//===----------------------------------------------------------------------===// 1335// Pattern match OR as ADD 1336//===----------------------------------------------------------------------===// 1337 1338// If safe, we prefer to pattern match OR as ADD at isel time. ADD can be 1339// 3-addressified into an LEA instruction to avoid copies. However, we also 1340// want to finally emit these instructions as an or at the end of the code 1341// generator to make the generated code easier to read. To do this, we select 1342// into "disjoint bits" pseudo ops. 1343 1344// Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero. 1345def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{ 1346 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1))) 1347 return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue()); 1348 1349 KnownBits Known0 = CurDAG->computeKnownBits(N->getOperand(0), 0); 1350 KnownBits Known1 = CurDAG->computeKnownBits(N->getOperand(1), 0); 1351 return (~Known0.Zero & ~Known1.Zero) == 0; 1352}]>; 1353 1354 1355// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits. 1356// Try this before the selecting to OR. 1357let SchedRW = [WriteALU] in { 1358 1359let isConvertibleToThreeAddress = 1, isPseudo = 1, 1360 Constraints = "$src1 = $dst", Defs = [EFLAGS] in { 1361let isCommutable = 1 in { 1362def ADD8rr_DB : I<0, Pseudo, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2), 1363 "", // orb/addb REG, REG 1364 [(set GR8:$dst, (or_is_add GR8:$src1, GR8:$src2))]>; 1365def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), 1366 "", // orw/addw REG, REG 1367 [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>; 1368def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), 1369 "", // orl/addl REG, REG 1370 [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>; 1371def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), 1372 "", // orq/addq REG, REG 1373 [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>; 1374} // isCommutable 1375 1376// NOTE: These are order specific, we want the ri8 forms to be listed 1377// first so that they are slightly preferred to the ri forms. 1378 1379def ADD8ri_DB : I<0, Pseudo, 1380 (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2), 1381 "", // orb/addb REG, imm8 1382 [(set GR8:$dst, (or_is_add GR8:$src1, imm:$src2))]>; 1383def ADD16ri8_DB : I<0, Pseudo, 1384 (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2), 1385 "", // orw/addw REG, imm8 1386 [(set GR16:$dst,(or_is_add GR16:$src1,i16immSExt8:$src2))]>; 1387def ADD16ri_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2), 1388 "", // orw/addw REG, imm 1389 [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>; 1390 1391def ADD32ri8_DB : I<0, Pseudo, 1392 (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2), 1393 "", // orl/addl REG, imm8 1394 [(set GR32:$dst,(or_is_add GR32:$src1,i32immSExt8:$src2))]>; 1395def ADD32ri_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2), 1396 "", // orl/addl REG, imm 1397 [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>; 1398 1399 1400def ADD64ri8_DB : I<0, Pseudo, 1401 (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), 1402 "", // orq/addq REG, imm8 1403 [(set GR64:$dst, (or_is_add GR64:$src1, 1404 i64immSExt8:$src2))]>; 1405def ADD64ri32_DB : I<0, Pseudo, 1406 (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), 1407 "", // orq/addq REG, imm 1408 [(set GR64:$dst, (or_is_add GR64:$src1, 1409 i64immSExt32:$src2))]>; 1410} 1411} // AddedComplexity, SchedRW 1412 1413//===----------------------------------------------------------------------===// 1414// Pattern match SUB as XOR 1415//===----------------------------------------------------------------------===// 1416 1417// An immediate in the LHS of a subtract can't be encoded in the instruction. 1418// If there is no possibility of a borrow we can use an XOR instead of a SUB 1419// to enable the immediate to be folded. 1420// TODO: Move this to a DAG combine? 1421 1422def sub_is_xor : PatFrag<(ops node:$lhs, node:$rhs), (sub node:$lhs, node:$rhs),[{ 1423 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 1424 KnownBits Known = CurDAG->computeKnownBits(N->getOperand(1)); 1425 1426 // If all possible ones in the RHS are set in the LHS then there can't be 1427 // a borrow and we can use xor. 1428 return (~Known.Zero).isSubsetOf(CN->getAPIntValue()); 1429 } 1430 1431 return false; 1432}]>; 1433 1434let AddedComplexity = 5 in { 1435def : Pat<(sub_is_xor imm:$src2, GR8:$src1), 1436 (XOR8ri GR8:$src1, imm:$src2)>; 1437def : Pat<(sub_is_xor i16immSExt8:$src2, GR16:$src1), 1438 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>; 1439def : Pat<(sub_is_xor imm:$src2, GR16:$src1), 1440 (XOR16ri GR16:$src1, imm:$src2)>; 1441def : Pat<(sub_is_xor i32immSExt8:$src2, GR32:$src1), 1442 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>; 1443def : Pat<(sub_is_xor imm:$src2, GR32:$src1), 1444 (XOR32ri GR32:$src1, imm:$src2)>; 1445def : Pat<(sub_is_xor i64immSExt8:$src2, GR64:$src1), 1446 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>; 1447def : Pat<(sub_is_xor i64immSExt32:$src2, GR64:$src1), 1448 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>; 1449} 1450 1451//===----------------------------------------------------------------------===// 1452// Some peepholes 1453//===----------------------------------------------------------------------===// 1454 1455// Odd encoding trick: -128 fits into an 8-bit immediate field while 1456// +128 doesn't, so in this special case use a sub instead of an add. 1457def : Pat<(add GR16:$src1, 128), 1458 (SUB16ri8 GR16:$src1, -128)>; 1459def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst), 1460 (SUB16mi8 addr:$dst, -128)>; 1461 1462def : Pat<(add GR32:$src1, 128), 1463 (SUB32ri8 GR32:$src1, -128)>; 1464def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst), 1465 (SUB32mi8 addr:$dst, -128)>; 1466 1467def : Pat<(add GR64:$src1, 128), 1468 (SUB64ri8 GR64:$src1, -128)>; 1469def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst), 1470 (SUB64mi8 addr:$dst, -128)>; 1471 1472def : Pat<(X86add_flag_nocf GR16:$src1, 128), 1473 (SUB16ri8 GR16:$src1, -128)>; 1474def : Pat<(X86add_flag_nocf GR32:$src1, 128), 1475 (SUB32ri8 GR32:$src1, -128)>; 1476def : Pat<(X86add_flag_nocf GR64:$src1, 128), 1477 (SUB64ri8 GR64:$src1, -128)>; 1478 1479// The same trick applies for 32-bit immediate fields in 64-bit 1480// instructions. 1481def : Pat<(add GR64:$src1, 0x0000000080000000), 1482 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>; 1483def : Pat<(store (add (loadi64 addr:$dst), 0x0000000080000000), addr:$dst), 1484 (SUB64mi32 addr:$dst, 0xffffffff80000000)>; 1485 1486def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000), 1487 (SUB64ri32 GR64:$src1, 0xffffffff80000000)>; 1488 1489// To avoid needing to materialize an immediate in a register, use a 32-bit and 1490// with implicit zero-extension instead of a 64-bit and if the immediate has at 1491// least 32 bits of leading zeros. If in addition the last 32 bits can be 1492// represented with a sign extension of a 8 bit constant, use that. 1493// This can also reduce instruction size by eliminating the need for the REX 1494// prefix. 1495 1496// AddedComplexity is needed to give priority over i64immSExt8 and i64immSExt32. 1497let AddedComplexity = 1 in { 1498def : Pat<(and GR64:$src, i64immZExt32SExt8:$imm), 1499 (SUBREG_TO_REG 1500 (i64 0), 1501 (AND32ri8 1502 (EXTRACT_SUBREG GR64:$src, sub_32bit), 1503 (i32 (GetLo32XForm imm:$imm))), 1504 sub_32bit)>; 1505 1506def : Pat<(and GR64:$src, i64immZExt32:$imm), 1507 (SUBREG_TO_REG 1508 (i64 0), 1509 (AND32ri 1510 (EXTRACT_SUBREG GR64:$src, sub_32bit), 1511 (i32 (GetLo32XForm imm:$imm))), 1512 sub_32bit)>; 1513} // AddedComplexity = 1 1514 1515 1516// AddedComplexity is needed due to the increased complexity on the 1517// i64immZExt32SExt8 and i64immZExt32 patterns above. Applying this to all 1518// the MOVZX patterns keeps thems together in DAGIsel tables. 1519let AddedComplexity = 1 in { 1520// r & (2^16-1) ==> movz 1521def : Pat<(and GR32:$src1, 0xffff), 1522 (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>; 1523// r & (2^8-1) ==> movz 1524def : Pat<(and GR32:$src1, 0xff), 1525 (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>; 1526// r & (2^8-1) ==> movz 1527def : Pat<(and GR16:$src1, 0xff), 1528 (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)), 1529 sub_16bit)>; 1530 1531// r & (2^32-1) ==> movz 1532def : Pat<(and GR64:$src, 0x00000000FFFFFFFF), 1533 (SUBREG_TO_REG (i64 0), 1534 (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)), 1535 sub_32bit)>; 1536// r & (2^16-1) ==> movz 1537def : Pat<(and GR64:$src, 0xffff), 1538 (SUBREG_TO_REG (i64 0), 1539 (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))), 1540 sub_32bit)>; 1541// r & (2^8-1) ==> movz 1542def : Pat<(and GR64:$src, 0xff), 1543 (SUBREG_TO_REG (i64 0), 1544 (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))), 1545 sub_32bit)>; 1546} // AddedComplexity = 1 1547 1548 1549// Try to use BTS/BTR/BTC for single bit operations on the upper 32-bits. 1550 1551def BTRXForm : SDNodeXForm<imm, [{ 1552 // Transformation function: Find the lowest 0. 1553 return getI64Imm((uint8_t)N->getAPIntValue().countTrailingOnes(), SDLoc(N)); 1554}]>; 1555 1556def BTCBTSXForm : SDNodeXForm<imm, [{ 1557 // Transformation function: Find the lowest 1. 1558 return getI64Imm((uint8_t)N->getAPIntValue().countTrailingZeros(), SDLoc(N)); 1559}]>; 1560 1561def BTRMask64 : ImmLeaf<i64, [{ 1562 return !isUInt<32>(Imm) && !isInt<32>(Imm) && isPowerOf2_64(~Imm); 1563}]>; 1564 1565def BTCBTSMask64 : ImmLeaf<i64, [{ 1566 return !isInt<32>(Imm) && isPowerOf2_64(Imm); 1567}]>; 1568 1569// For now only do this for optsize. 1570let AddedComplexity = 1, Predicates=[OptForSize] in { 1571 def : Pat<(and GR64:$src1, BTRMask64:$mask), 1572 (BTR64ri8 GR64:$src1, (BTRXForm imm:$mask))>; 1573 def : Pat<(or GR64:$src1, BTCBTSMask64:$mask), 1574 (BTS64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>; 1575 def : Pat<(xor GR64:$src1, BTCBTSMask64:$mask), 1576 (BTC64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>; 1577} 1578 1579 1580// sext_inreg patterns 1581def : Pat<(sext_inreg GR32:$src, i16), 1582 (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>; 1583def : Pat<(sext_inreg GR32:$src, i8), 1584 (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>; 1585 1586def : Pat<(sext_inreg GR16:$src, i8), 1587 (EXTRACT_SUBREG (MOVSX32rr8 (EXTRACT_SUBREG GR16:$src, sub_8bit)), 1588 sub_16bit)>; 1589 1590def : Pat<(sext_inreg GR64:$src, i32), 1591 (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>; 1592def : Pat<(sext_inreg GR64:$src, i16), 1593 (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>; 1594def : Pat<(sext_inreg GR64:$src, i8), 1595 (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>; 1596 1597// sext, sext_load, zext, zext_load 1598def: Pat<(i16 (sext GR8:$src)), 1599 (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>; 1600def: Pat<(sextloadi16i8 addr:$src), 1601 (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>; 1602def: Pat<(i16 (zext GR8:$src)), 1603 (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>; 1604def: Pat<(zextloadi16i8 addr:$src), 1605 (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>; 1606 1607// trunc patterns 1608def : Pat<(i16 (trunc GR32:$src)), 1609 (EXTRACT_SUBREG GR32:$src, sub_16bit)>; 1610def : Pat<(i8 (trunc GR32:$src)), 1611 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)), 1612 sub_8bit)>, 1613 Requires<[Not64BitMode]>; 1614def : Pat<(i8 (trunc GR16:$src)), 1615 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), 1616 sub_8bit)>, 1617 Requires<[Not64BitMode]>; 1618def : Pat<(i32 (trunc GR64:$src)), 1619 (EXTRACT_SUBREG GR64:$src, sub_32bit)>; 1620def : Pat<(i16 (trunc GR64:$src)), 1621 (EXTRACT_SUBREG GR64:$src, sub_16bit)>; 1622def : Pat<(i8 (trunc GR64:$src)), 1623 (EXTRACT_SUBREG GR64:$src, sub_8bit)>; 1624def : Pat<(i8 (trunc GR32:$src)), 1625 (EXTRACT_SUBREG GR32:$src, sub_8bit)>, 1626 Requires<[In64BitMode]>; 1627def : Pat<(i8 (trunc GR16:$src)), 1628 (EXTRACT_SUBREG GR16:$src, sub_8bit)>, 1629 Requires<[In64BitMode]>; 1630 1631def immff00_ffff : ImmLeaf<i32, [{ 1632 return Imm >= 0xff00 && Imm <= 0xffff; 1633}]>; 1634 1635// h-register tricks 1636def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))), 1637 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>, 1638 Requires<[Not64BitMode]>; 1639def : Pat<(i8 (trunc (srl_su (i32 (anyext GR16:$src)), (i8 8)))), 1640 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>, 1641 Requires<[Not64BitMode]>; 1642def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))), 1643 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi)>, 1644 Requires<[Not64BitMode]>; 1645def : Pat<(srl GR16:$src, (i8 8)), 1646 (EXTRACT_SUBREG 1647 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)), 1648 sub_16bit)>; 1649def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))), 1650 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>; 1651def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))), 1652 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>; 1653def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)), 1654 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>; 1655def : Pat<(srl (and_su GR32:$src, immff00_ffff), (i8 8)), 1656 (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>; 1657 1658// h-register tricks. 1659// For now, be conservative on x86-64 and use an h-register extract only if the 1660// value is immediately zero-extended or stored, which are somewhat common 1661// cases. This uses a bunch of code to prevent a register requiring a REX prefix 1662// from being allocated in the same instruction as the h register, as there's 1663// currently no way to describe this requirement to the register allocator. 1664 1665// h-register extract and zero-extend. 1666def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)), 1667 (SUBREG_TO_REG 1668 (i64 0), 1669 (MOVZX32rr8_NOREX 1670 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi)), 1671 sub_32bit)>; 1672def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))), 1673 (SUBREG_TO_REG 1674 (i64 0), 1675 (MOVZX32rr8_NOREX 1676 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)), 1677 sub_32bit)>; 1678def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))), 1679 (SUBREG_TO_REG 1680 (i64 0), 1681 (MOVZX32rr8_NOREX 1682 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)), 1683 sub_32bit)>; 1684 1685// h-register extract and store. 1686def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst), 1687 (MOV8mr_NOREX 1688 addr:$dst, 1689 (EXTRACT_SUBREG GR64:$src, sub_8bit_hi))>; 1690def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst), 1691 (MOV8mr_NOREX 1692 addr:$dst, 1693 (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>, 1694 Requires<[In64BitMode]>; 1695def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst), 1696 (MOV8mr_NOREX 1697 addr:$dst, 1698 (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>, 1699 Requires<[In64BitMode]>; 1700 1701 1702// (shl x, 1) ==> (add x, x) 1703// Note that if x is undef (immediate or otherwise), we could theoretically 1704// end up with the two uses of x getting different values, producing a result 1705// where the least significant bit is not 0. However, the probability of this 1706// happening is considered low enough that this is officially not a 1707// "real problem". 1708def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr GR8 :$src1, GR8 :$src1)>; 1709def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>; 1710def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>; 1711def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>; 1712 1713def shiftMask8 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 1714 return isUnneededShiftMask(N, 3); 1715}]>; 1716 1717def shiftMask16 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 1718 return isUnneededShiftMask(N, 4); 1719}]>; 1720 1721def shiftMask32 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 1722 return isUnneededShiftMask(N, 5); 1723}]>; 1724 1725def shiftMask64 : PatFrag<(ops node:$lhs), (and node:$lhs, imm), [{ 1726 return isUnneededShiftMask(N, 6); 1727}]>; 1728 1729 1730// Shift amount is implicitly masked. 1731multiclass MaskedShiftAmountPats<SDNode frag, string name> { 1732 // (shift x (and y, 31)) ==> (shift x, y) 1733 def : Pat<(frag GR8:$src1, (shiftMask32 CL)), 1734 (!cast<Instruction>(name # "8rCL") GR8:$src1)>; 1735 def : Pat<(frag GR16:$src1, (shiftMask32 CL)), 1736 (!cast<Instruction>(name # "16rCL") GR16:$src1)>; 1737 def : Pat<(frag GR32:$src1, (shiftMask32 CL)), 1738 (!cast<Instruction>(name # "32rCL") GR32:$src1)>; 1739 def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask32 CL)), addr:$dst), 1740 (!cast<Instruction>(name # "8mCL") addr:$dst)>; 1741 def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask32 CL)), addr:$dst), 1742 (!cast<Instruction>(name # "16mCL") addr:$dst)>; 1743 def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst), 1744 (!cast<Instruction>(name # "32mCL") addr:$dst)>; 1745 1746 // (shift x (and y, 63)) ==> (shift x, y) 1747 def : Pat<(frag GR64:$src1, (shiftMask64 CL)), 1748 (!cast<Instruction>(name # "64rCL") GR64:$src1)>; 1749 def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst), 1750 (!cast<Instruction>(name # "64mCL") addr:$dst)>; 1751} 1752 1753defm : MaskedShiftAmountPats<shl, "SHL">; 1754defm : MaskedShiftAmountPats<srl, "SHR">; 1755defm : MaskedShiftAmountPats<sra, "SAR">; 1756 1757// ROL/ROR instructions allow a stronger mask optimization than shift for 8- and 1758// 16-bit. We can remove a mask of any (bitwidth - 1) on the rotation amount 1759// because over-rotating produces the same result. This is noted in the Intel 1760// docs with: "tempCOUNT <- (COUNT & COUNTMASK) MOD SIZE". Masking the rotation 1761// amount could affect EFLAGS results, but that does not matter because we are 1762// not tracking flags for these nodes. 1763multiclass MaskedRotateAmountPats<SDNode frag, string name> { 1764 // (rot x (and y, BitWidth - 1)) ==> (rot x, y) 1765 def : Pat<(frag GR8:$src1, (shiftMask8 CL)), 1766 (!cast<Instruction>(name # "8rCL") GR8:$src1)>; 1767 def : Pat<(frag GR16:$src1, (shiftMask16 CL)), 1768 (!cast<Instruction>(name # "16rCL") GR16:$src1)>; 1769 def : Pat<(frag GR32:$src1, (shiftMask32 CL)), 1770 (!cast<Instruction>(name # "32rCL") GR32:$src1)>; 1771 def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask8 CL)), addr:$dst), 1772 (!cast<Instruction>(name # "8mCL") addr:$dst)>; 1773 def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask16 CL)), addr:$dst), 1774 (!cast<Instruction>(name # "16mCL") addr:$dst)>; 1775 def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst), 1776 (!cast<Instruction>(name # "32mCL") addr:$dst)>; 1777 1778 // (rot x (and y, 63)) ==> (rot x, y) 1779 def : Pat<(frag GR64:$src1, (shiftMask64 CL)), 1780 (!cast<Instruction>(name # "64rCL") GR64:$src1)>; 1781 def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst), 1782 (!cast<Instruction>(name # "64mCL") addr:$dst)>; 1783} 1784 1785 1786defm : MaskedRotateAmountPats<rotl, "ROL">; 1787defm : MaskedRotateAmountPats<rotr, "ROR">; 1788 1789// Double "funnel" shift amount is implicitly masked. 1790// (fshl/fshr x (and y, 31)) ==> (fshl/fshr x, y) (NOTE: modulo32) 1791def : Pat<(X86fshl GR16:$src1, GR16:$src2, (shiftMask32 CL)), 1792 (SHLD16rrCL GR16:$src1, GR16:$src2)>; 1793def : Pat<(X86fshr GR16:$src2, GR16:$src1, (shiftMask32 CL)), 1794 (SHRD16rrCL GR16:$src1, GR16:$src2)>; 1795 1796// (fshl/fshr x (and y, 31)) ==> (fshl/fshr x, y) 1797def : Pat<(fshl GR32:$src1, GR32:$src2, (shiftMask32 CL)), 1798 (SHLD32rrCL GR32:$src1, GR32:$src2)>; 1799def : Pat<(fshr GR32:$src2, GR32:$src1, (shiftMask32 CL)), 1800 (SHRD32rrCL GR32:$src1, GR32:$src2)>; 1801 1802// (fshl/fshr x (and y, 63)) ==> (fshl/fshr x, y) 1803def : Pat<(fshl GR64:$src1, GR64:$src2, (shiftMask64 CL)), 1804 (SHLD64rrCL GR64:$src1, GR64:$src2)>; 1805def : Pat<(fshr GR64:$src2, GR64:$src1, (shiftMask64 CL)), 1806 (SHRD64rrCL GR64:$src1, GR64:$src2)>; 1807 1808let Predicates = [HasBMI2] in { 1809 let AddedComplexity = 1 in { 1810 def : Pat<(sra GR32:$src1, (shiftMask32 GR8:$src2)), 1811 (SARX32rr GR32:$src1, 1812 (INSERT_SUBREG 1813 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1814 def : Pat<(sra GR64:$src1, (shiftMask64 GR8:$src2)), 1815 (SARX64rr GR64:$src1, 1816 (INSERT_SUBREG 1817 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1818 1819 def : Pat<(srl GR32:$src1, (shiftMask32 GR8:$src2)), 1820 (SHRX32rr GR32:$src1, 1821 (INSERT_SUBREG 1822 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1823 def : Pat<(srl GR64:$src1, (shiftMask64 GR8:$src2)), 1824 (SHRX64rr GR64:$src1, 1825 (INSERT_SUBREG 1826 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1827 1828 def : Pat<(shl GR32:$src1, (shiftMask32 GR8:$src2)), 1829 (SHLX32rr GR32:$src1, 1830 (INSERT_SUBREG 1831 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1832 def : Pat<(shl GR64:$src1, (shiftMask64 GR8:$src2)), 1833 (SHLX64rr GR64:$src1, 1834 (INSERT_SUBREG 1835 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1836 } 1837 1838 def : Pat<(sra (loadi32 addr:$src1), (shiftMask32 GR8:$src2)), 1839 (SARX32rm addr:$src1, 1840 (INSERT_SUBREG 1841 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1842 def : Pat<(sra (loadi64 addr:$src1), (shiftMask64 GR8:$src2)), 1843 (SARX64rm addr:$src1, 1844 (INSERT_SUBREG 1845 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1846 1847 def : Pat<(srl (loadi32 addr:$src1), (shiftMask32 GR8:$src2)), 1848 (SHRX32rm addr:$src1, 1849 (INSERT_SUBREG 1850 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1851 def : Pat<(srl (loadi64 addr:$src1), (shiftMask64 GR8:$src2)), 1852 (SHRX64rm addr:$src1, 1853 (INSERT_SUBREG 1854 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1855 1856 def : Pat<(shl (loadi32 addr:$src1), (shiftMask32 GR8:$src2)), 1857 (SHLX32rm addr:$src1, 1858 (INSERT_SUBREG 1859 (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1860 def : Pat<(shl (loadi64 addr:$src1), (shiftMask64 GR8:$src2)), 1861 (SHLX64rm addr:$src1, 1862 (INSERT_SUBREG 1863 (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1864} 1865 1866// Use BTR/BTS/BTC for clearing/setting/toggling a bit in a variable location. 1867multiclass one_bit_patterns<RegisterClass RC, ValueType VT, Instruction BTR, 1868 Instruction BTS, Instruction BTC, 1869 PatFrag ShiftMask> { 1870 def : Pat<(and RC:$src1, (rotl -2, GR8:$src2)), 1871 (BTR RC:$src1, 1872 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1873 def : Pat<(or RC:$src1, (shl 1, GR8:$src2)), 1874 (BTS RC:$src1, 1875 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1876 def : Pat<(xor RC:$src1, (shl 1, GR8:$src2)), 1877 (BTC RC:$src1, 1878 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1879 1880 // Similar to above, but removing unneeded masking of the shift amount. 1881 def : Pat<(and RC:$src1, (rotl -2, (ShiftMask GR8:$src2))), 1882 (BTR RC:$src1, 1883 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1884 def : Pat<(or RC:$src1, (shl 1, (ShiftMask GR8:$src2))), 1885 (BTS RC:$src1, 1886 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1887 def : Pat<(xor RC:$src1, (shl 1, (ShiftMask GR8:$src2))), 1888 (BTC RC:$src1, 1889 (INSERT_SUBREG (VT (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; 1890} 1891 1892defm : one_bit_patterns<GR16, i16, BTR16rr, BTS16rr, BTC16rr, shiftMask16>; 1893defm : one_bit_patterns<GR32, i32, BTR32rr, BTS32rr, BTC32rr, shiftMask32>; 1894defm : one_bit_patterns<GR64, i64, BTR64rr, BTS64rr, BTC64rr, shiftMask64>; 1895 1896//===----------------------------------------------------------------------===// 1897// EFLAGS-defining Patterns 1898//===----------------------------------------------------------------------===// 1899 1900// add reg, reg 1901def : Pat<(add GR8 :$src1, GR8 :$src2), (ADD8rr GR8 :$src1, GR8 :$src2)>; 1902def : Pat<(add GR16:$src1, GR16:$src2), (ADD16rr GR16:$src1, GR16:$src2)>; 1903def : Pat<(add GR32:$src1, GR32:$src2), (ADD32rr GR32:$src1, GR32:$src2)>; 1904def : Pat<(add GR64:$src1, GR64:$src2), (ADD64rr GR64:$src1, GR64:$src2)>; 1905 1906// add reg, mem 1907def : Pat<(add GR8:$src1, (loadi8 addr:$src2)), 1908 (ADD8rm GR8:$src1, addr:$src2)>; 1909def : Pat<(add GR16:$src1, (loadi16 addr:$src2)), 1910 (ADD16rm GR16:$src1, addr:$src2)>; 1911def : Pat<(add GR32:$src1, (loadi32 addr:$src2)), 1912 (ADD32rm GR32:$src1, addr:$src2)>; 1913def : Pat<(add GR64:$src1, (loadi64 addr:$src2)), 1914 (ADD64rm GR64:$src1, addr:$src2)>; 1915 1916// add reg, imm 1917def : Pat<(add GR8 :$src1, imm:$src2), (ADD8ri GR8:$src1 , imm:$src2)>; 1918def : Pat<(add GR16:$src1, imm:$src2), (ADD16ri GR16:$src1, imm:$src2)>; 1919def : Pat<(add GR32:$src1, imm:$src2), (ADD32ri GR32:$src1, imm:$src2)>; 1920def : Pat<(add GR16:$src1, i16immSExt8:$src2), 1921 (ADD16ri8 GR16:$src1, i16immSExt8:$src2)>; 1922def : Pat<(add GR32:$src1, i32immSExt8:$src2), 1923 (ADD32ri8 GR32:$src1, i32immSExt8:$src2)>; 1924def : Pat<(add GR64:$src1, i64immSExt8:$src2), 1925 (ADD64ri8 GR64:$src1, i64immSExt8:$src2)>; 1926def : Pat<(add GR64:$src1, i64immSExt32:$src2), 1927 (ADD64ri32 GR64:$src1, i64immSExt32:$src2)>; 1928 1929// sub reg, reg 1930def : Pat<(sub GR8 :$src1, GR8 :$src2), (SUB8rr GR8 :$src1, GR8 :$src2)>; 1931def : Pat<(sub GR16:$src1, GR16:$src2), (SUB16rr GR16:$src1, GR16:$src2)>; 1932def : Pat<(sub GR32:$src1, GR32:$src2), (SUB32rr GR32:$src1, GR32:$src2)>; 1933def : Pat<(sub GR64:$src1, GR64:$src2), (SUB64rr GR64:$src1, GR64:$src2)>; 1934 1935// sub reg, mem 1936def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)), 1937 (SUB8rm GR8:$src1, addr:$src2)>; 1938def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)), 1939 (SUB16rm GR16:$src1, addr:$src2)>; 1940def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)), 1941 (SUB32rm GR32:$src1, addr:$src2)>; 1942def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)), 1943 (SUB64rm GR64:$src1, addr:$src2)>; 1944 1945// sub reg, imm 1946def : Pat<(sub GR8:$src1, imm:$src2), 1947 (SUB8ri GR8:$src1, imm:$src2)>; 1948def : Pat<(sub GR16:$src1, imm:$src2), 1949 (SUB16ri GR16:$src1, imm:$src2)>; 1950def : Pat<(sub GR32:$src1, imm:$src2), 1951 (SUB32ri GR32:$src1, imm:$src2)>; 1952def : Pat<(sub GR16:$src1, i16immSExt8:$src2), 1953 (SUB16ri8 GR16:$src1, i16immSExt8:$src2)>; 1954def : Pat<(sub GR32:$src1, i32immSExt8:$src2), 1955 (SUB32ri8 GR32:$src1, i32immSExt8:$src2)>; 1956def : Pat<(sub GR64:$src1, i64immSExt8:$src2), 1957 (SUB64ri8 GR64:$src1, i64immSExt8:$src2)>; 1958def : Pat<(sub GR64:$src1, i64immSExt32:$src2), 1959 (SUB64ri32 GR64:$src1, i64immSExt32:$src2)>; 1960 1961// sub 0, reg 1962def : Pat<(X86sub_flag 0, GR8 :$src), (NEG8r GR8 :$src)>; 1963def : Pat<(X86sub_flag 0, GR16:$src), (NEG16r GR16:$src)>; 1964def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>; 1965def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>; 1966 1967// mul reg, reg 1968def : Pat<(mul GR16:$src1, GR16:$src2), 1969 (IMUL16rr GR16:$src1, GR16:$src2)>; 1970def : Pat<(mul GR32:$src1, GR32:$src2), 1971 (IMUL32rr GR32:$src1, GR32:$src2)>; 1972def : Pat<(mul GR64:$src1, GR64:$src2), 1973 (IMUL64rr GR64:$src1, GR64:$src2)>; 1974 1975// mul reg, mem 1976def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)), 1977 (IMUL16rm GR16:$src1, addr:$src2)>; 1978def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)), 1979 (IMUL32rm GR32:$src1, addr:$src2)>; 1980def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)), 1981 (IMUL64rm GR64:$src1, addr:$src2)>; 1982 1983// mul reg, imm 1984def : Pat<(mul GR16:$src1, imm:$src2), 1985 (IMUL16rri GR16:$src1, imm:$src2)>; 1986def : Pat<(mul GR32:$src1, imm:$src2), 1987 (IMUL32rri GR32:$src1, imm:$src2)>; 1988def : Pat<(mul GR16:$src1, i16immSExt8:$src2), 1989 (IMUL16rri8 GR16:$src1, i16immSExt8:$src2)>; 1990def : Pat<(mul GR32:$src1, i32immSExt8:$src2), 1991 (IMUL32rri8 GR32:$src1, i32immSExt8:$src2)>; 1992def : Pat<(mul GR64:$src1, i64immSExt8:$src2), 1993 (IMUL64rri8 GR64:$src1, i64immSExt8:$src2)>; 1994def : Pat<(mul GR64:$src1, i64immSExt32:$src2), 1995 (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>; 1996 1997// reg = mul mem, imm 1998def : Pat<(mul (loadi16 addr:$src1), imm:$src2), 1999 (IMUL16rmi addr:$src1, imm:$src2)>; 2000def : Pat<(mul (loadi32 addr:$src1), imm:$src2), 2001 (IMUL32rmi addr:$src1, imm:$src2)>; 2002def : Pat<(mul (loadi16 addr:$src1), i16immSExt8:$src2), 2003 (IMUL16rmi8 addr:$src1, i16immSExt8:$src2)>; 2004def : Pat<(mul (loadi32 addr:$src1), i32immSExt8:$src2), 2005 (IMUL32rmi8 addr:$src1, i32immSExt8:$src2)>; 2006def : Pat<(mul (loadi64 addr:$src1), i64immSExt8:$src2), 2007 (IMUL64rmi8 addr:$src1, i64immSExt8:$src2)>; 2008def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2), 2009 (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>; 2010 2011// Increment/Decrement reg. 2012// Do not make INC/DEC if it is slow 2013let Predicates = [UseIncDec] in { 2014 def : Pat<(add GR8:$src, 1), (INC8r GR8:$src)>; 2015 def : Pat<(add GR16:$src, 1), (INC16r GR16:$src)>; 2016 def : Pat<(add GR32:$src, 1), (INC32r GR32:$src)>; 2017 def : Pat<(add GR64:$src, 1), (INC64r GR64:$src)>; 2018 def : Pat<(add GR8:$src, -1), (DEC8r GR8:$src)>; 2019 def : Pat<(add GR16:$src, -1), (DEC16r GR16:$src)>; 2020 def : Pat<(add GR32:$src, -1), (DEC32r GR32:$src)>; 2021 def : Pat<(add GR64:$src, -1), (DEC64r GR64:$src)>; 2022 2023 def : Pat<(X86add_flag_nocf GR8:$src, -1), (DEC8r GR8:$src)>; 2024 def : Pat<(X86add_flag_nocf GR16:$src, -1), (DEC16r GR16:$src)>; 2025 def : Pat<(X86add_flag_nocf GR32:$src, -1), (DEC32r GR32:$src)>; 2026 def : Pat<(X86add_flag_nocf GR64:$src, -1), (DEC64r GR64:$src)>; 2027 def : Pat<(X86sub_flag_nocf GR8:$src, -1), (INC8r GR8:$src)>; 2028 def : Pat<(X86sub_flag_nocf GR16:$src, -1), (INC16r GR16:$src)>; 2029 def : Pat<(X86sub_flag_nocf GR32:$src, -1), (INC32r GR32:$src)>; 2030 def : Pat<(X86sub_flag_nocf GR64:$src, -1), (INC64r GR64:$src)>; 2031} 2032 2033// or reg/reg. 2034def : Pat<(or GR8 :$src1, GR8 :$src2), (OR8rr GR8 :$src1, GR8 :$src2)>; 2035def : Pat<(or GR16:$src1, GR16:$src2), (OR16rr GR16:$src1, GR16:$src2)>; 2036def : Pat<(or GR32:$src1, GR32:$src2), (OR32rr GR32:$src1, GR32:$src2)>; 2037def : Pat<(or GR64:$src1, GR64:$src2), (OR64rr GR64:$src1, GR64:$src2)>; 2038 2039// or reg/mem 2040def : Pat<(or GR8:$src1, (loadi8 addr:$src2)), 2041 (OR8rm GR8:$src1, addr:$src2)>; 2042def : Pat<(or GR16:$src1, (loadi16 addr:$src2)), 2043 (OR16rm GR16:$src1, addr:$src2)>; 2044def : Pat<(or GR32:$src1, (loadi32 addr:$src2)), 2045 (OR32rm GR32:$src1, addr:$src2)>; 2046def : Pat<(or GR64:$src1, (loadi64 addr:$src2)), 2047 (OR64rm GR64:$src1, addr:$src2)>; 2048 2049// or reg/imm 2050def : Pat<(or GR8:$src1 , imm:$src2), (OR8ri GR8 :$src1, imm:$src2)>; 2051def : Pat<(or GR16:$src1, imm:$src2), (OR16ri GR16:$src1, imm:$src2)>; 2052def : Pat<(or GR32:$src1, imm:$src2), (OR32ri GR32:$src1, imm:$src2)>; 2053def : Pat<(or GR16:$src1, i16immSExt8:$src2), 2054 (OR16ri8 GR16:$src1, i16immSExt8:$src2)>; 2055def : Pat<(or GR32:$src1, i32immSExt8:$src2), 2056 (OR32ri8 GR32:$src1, i32immSExt8:$src2)>; 2057def : Pat<(or GR64:$src1, i64immSExt8:$src2), 2058 (OR64ri8 GR64:$src1, i64immSExt8:$src2)>; 2059def : Pat<(or GR64:$src1, i64immSExt32:$src2), 2060 (OR64ri32 GR64:$src1, i64immSExt32:$src2)>; 2061 2062// xor reg/reg 2063def : Pat<(xor GR8 :$src1, GR8 :$src2), (XOR8rr GR8 :$src1, GR8 :$src2)>; 2064def : Pat<(xor GR16:$src1, GR16:$src2), (XOR16rr GR16:$src1, GR16:$src2)>; 2065def : Pat<(xor GR32:$src1, GR32:$src2), (XOR32rr GR32:$src1, GR32:$src2)>; 2066def : Pat<(xor GR64:$src1, GR64:$src2), (XOR64rr GR64:$src1, GR64:$src2)>; 2067 2068// xor reg/mem 2069def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)), 2070 (XOR8rm GR8:$src1, addr:$src2)>; 2071def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)), 2072 (XOR16rm GR16:$src1, addr:$src2)>; 2073def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)), 2074 (XOR32rm GR32:$src1, addr:$src2)>; 2075def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)), 2076 (XOR64rm GR64:$src1, addr:$src2)>; 2077 2078// xor reg/imm 2079def : Pat<(xor GR8:$src1, imm:$src2), 2080 (XOR8ri GR8:$src1, imm:$src2)>; 2081def : Pat<(xor GR16:$src1, imm:$src2), 2082 (XOR16ri GR16:$src1, imm:$src2)>; 2083def : Pat<(xor GR32:$src1, imm:$src2), 2084 (XOR32ri GR32:$src1, imm:$src2)>; 2085def : Pat<(xor GR16:$src1, i16immSExt8:$src2), 2086 (XOR16ri8 GR16:$src1, i16immSExt8:$src2)>; 2087def : Pat<(xor GR32:$src1, i32immSExt8:$src2), 2088 (XOR32ri8 GR32:$src1, i32immSExt8:$src2)>; 2089def : Pat<(xor GR64:$src1, i64immSExt8:$src2), 2090 (XOR64ri8 GR64:$src1, i64immSExt8:$src2)>; 2091def : Pat<(xor GR64:$src1, i64immSExt32:$src2), 2092 (XOR64ri32 GR64:$src1, i64immSExt32:$src2)>; 2093 2094// and reg/reg 2095def : Pat<(and GR8 :$src1, GR8 :$src2), (AND8rr GR8 :$src1, GR8 :$src2)>; 2096def : Pat<(and GR16:$src1, GR16:$src2), (AND16rr GR16:$src1, GR16:$src2)>; 2097def : Pat<(and GR32:$src1, GR32:$src2), (AND32rr GR32:$src1, GR32:$src2)>; 2098def : Pat<(and GR64:$src1, GR64:$src2), (AND64rr GR64:$src1, GR64:$src2)>; 2099 2100// and reg/mem 2101def : Pat<(and GR8:$src1, (loadi8 addr:$src2)), 2102 (AND8rm GR8:$src1, addr:$src2)>; 2103def : Pat<(and GR16:$src1, (loadi16 addr:$src2)), 2104 (AND16rm GR16:$src1, addr:$src2)>; 2105def : Pat<(and GR32:$src1, (loadi32 addr:$src2)), 2106 (AND32rm GR32:$src1, addr:$src2)>; 2107def : Pat<(and GR64:$src1, (loadi64 addr:$src2)), 2108 (AND64rm GR64:$src1, addr:$src2)>; 2109 2110// and reg/imm 2111def : Pat<(and GR8:$src1, imm:$src2), 2112 (AND8ri GR8:$src1, imm:$src2)>; 2113def : Pat<(and GR16:$src1, imm:$src2), 2114 (AND16ri GR16:$src1, imm:$src2)>; 2115def : Pat<(and GR32:$src1, imm:$src2), 2116 (AND32ri GR32:$src1, imm:$src2)>; 2117def : Pat<(and GR16:$src1, i16immSExt8:$src2), 2118 (AND16ri8 GR16:$src1, i16immSExt8:$src2)>; 2119def : Pat<(and GR32:$src1, i32immSExt8:$src2), 2120 (AND32ri8 GR32:$src1, i32immSExt8:$src2)>; 2121def : Pat<(and GR64:$src1, i64immSExt8:$src2), 2122 (AND64ri8 GR64:$src1, i64immSExt8:$src2)>; 2123def : Pat<(and GR64:$src1, i64immSExt32:$src2), 2124 (AND64ri32 GR64:$src1, i64immSExt32:$src2)>; 2125 2126// Bit scan instruction patterns to match explicit zero-undef behavior. 2127def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr GR16:$src)>; 2128def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr GR32:$src)>; 2129def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr GR64:$src)>; 2130def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm addr:$src)>; 2131def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm addr:$src)>; 2132def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm addr:$src)>; 2133 2134// When HasMOVBE is enabled it is possible to get a non-legalized 2135// register-register 16 bit bswap. This maps it to a ROL instruction. 2136let Predicates = [HasMOVBE] in { 2137 def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>; 2138} 2139