Lines Matching full:x86
1 //===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
9 // This file defines the X86-specific support for the FastISel class. Much
15 #include "X86.h"
161 bool foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
219 X86::AddrIndexReg); in addFullAddress()
225 bool X86FastISel::foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I, in foldX86XALUIntrinsic()
245 X86::CondCode TmpCC; in foldX86XALUIntrinsic()
251 case Intrinsic::umul_with_overflow: TmpCC = X86::COND_O; break; in foldX86XALUIntrinsic()
253 case Intrinsic::usub_with_overflow: TmpCC = X86::COND_B; break; in foldX86XALUIntrinsic()
306 // We only handle legal types. For example, on x86-32 the instruction in isTypeLegal()
307 // selector contains all of the 64-bit instructions from x86-64, in isTypeLegal()
337 Opc = X86::MOV8rm; in X86FastEmitLoad()
340 Opc = X86::MOV16rm; in X86FastEmitLoad()
343 Opc = X86::MOV32rm; in X86FastEmitLoad()
346 // Must be in x86-64 mode. in X86FastEmitLoad()
347 Opc = X86::MOV64rm; in X86FastEmitLoad()
350 Opc = HasAVX512 ? X86::VMOVSSZrm_alt in X86FastEmitLoad()
351 : HasAVX ? X86::VMOVSSrm_alt in X86FastEmitLoad()
352 : HasSSE1 ? X86::MOVSSrm_alt in X86FastEmitLoad()
353 : X86::LD_Fp32m; in X86FastEmitLoad()
356 Opc = HasAVX512 ? X86::VMOVSDZrm_alt in X86FastEmitLoad()
357 : HasAVX ? X86::VMOVSDrm_alt in X86FastEmitLoad()
358 : HasSSE2 ? X86::MOVSDrm_alt in X86FastEmitLoad()
359 : X86::LD_Fp64m; in X86FastEmitLoad()
366 Opc = HasVLX ? X86::VMOVNTDQAZ128rm : in X86FastEmitLoad()
367 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm; in X86FastEmitLoad()
369 Opc = HasVLX ? X86::VMOVAPSZ128rm : in X86FastEmitLoad()
370 HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm; in X86FastEmitLoad()
372 Opc = HasVLX ? X86::VMOVUPSZ128rm : in X86FastEmitLoad()
373 HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm; in X86FastEmitLoad()
377 Opc = HasVLX ? X86::VMOVNTDQAZ128rm : in X86FastEmitLoad()
378 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm; in X86FastEmitLoad()
380 Opc = HasVLX ? X86::VMOVAPDZ128rm : in X86FastEmitLoad()
381 HasAVX ? X86::VMOVAPDrm : X86::MOVAPDrm; in X86FastEmitLoad()
383 Opc = HasVLX ? X86::VMOVUPDZ128rm : in X86FastEmitLoad()
384 HasAVX ? X86::VMOVUPDrm : X86::MOVUPDrm; in X86FastEmitLoad()
391 Opc = HasVLX ? X86::VMOVNTDQAZ128rm : in X86FastEmitLoad()
392 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm; in X86FastEmitLoad()
394 Opc = HasVLX ? X86::VMOVDQA64Z128rm : in X86FastEmitLoad()
395 HasAVX ? X86::VMOVDQArm : X86::MOVDQArm; in X86FastEmitLoad()
397 Opc = HasVLX ? X86::VMOVDQU64Z128rm : in X86FastEmitLoad()
398 HasAVX ? X86::VMOVDQUrm : X86::MOVDQUrm; in X86FastEmitLoad()
403 Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm; in X86FastEmitLoad()
405 return false; // Force split for X86::VMOVNTDQArm in X86FastEmitLoad()
407 Opc = HasVLX ? X86::VMOVAPSZ256rm : X86::VMOVAPSYrm; in X86FastEmitLoad()
409 Opc = HasVLX ? X86::VMOVUPSZ256rm : X86::VMOVUPSYrm; in X86FastEmitLoad()
414 Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm; in X86FastEmitLoad()
416 return false; // Force split for X86::VMOVNTDQArm in X86FastEmitLoad()
418 Opc = HasVLX ? X86::VMOVAPDZ256rm : X86::VMOVAPDYrm; in X86FastEmitLoad()
420 Opc = HasVLX ? X86::VMOVUPDZ256rm : X86::VMOVUPDYrm; in X86FastEmitLoad()
428 Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm; in X86FastEmitLoad()
430 return false; // Force split for X86::VMOVNTDQArm in X86FastEmitLoad()
432 Opc = HasVLX ? X86::VMOVDQA64Z256rm : X86::VMOVDQAYrm; in X86FastEmitLoad()
434 Opc = HasVLX ? X86::VMOVDQU64Z256rm : X86::VMOVDQUYrm; in X86FastEmitLoad()
439 Opc = X86::VMOVNTDQAZrm; in X86FastEmitLoad()
441 Opc = (Alignment >= 64) ? X86::VMOVAPSZrm : X86::VMOVUPSZrm; in X86FastEmitLoad()
446 Opc = X86::VMOVNTDQAZrm; in X86FastEmitLoad()
448 Opc = (Alignment >= 64) ? X86::VMOVAPDZrm : X86::VMOVUPDZrm; in X86FastEmitLoad()
458 Opc = X86::VMOVNTDQAZrm; in X86FastEmitLoad()
460 Opc = (Alignment >= 64) ? X86::VMOVDQA64Zrm : X86::VMOVDQU64Zrm; in X86FastEmitLoad()
496 Register AndResult = createResultReg(&X86::GR8RegClass); in X86FastEmitStore()
498 TII.get(X86::AND8ri), AndResult) in X86FastEmitStore()
503 case MVT::i8: Opc = X86::MOV8mr; break; in X86FastEmitStore()
504 case MVT::i16: Opc = X86::MOV16mr; break; in X86FastEmitStore()
506 Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTImr : X86::MOV32mr; in X86FastEmitStore()
509 // Must be in x86-64 mode. in X86FastEmitStore()
510 Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTI_64mr : X86::MOV64mr; in X86FastEmitStore()
515 Opc = X86::MOVNTSS; in X86FastEmitStore()
517 Opc = HasAVX512 ? X86::VMOVSSZmr : in X86FastEmitStore()
518 HasAVX ? X86::VMOVSSmr : X86::MOVSSmr; in X86FastEmitStore()
520 Opc = X86::ST_Fp32m; in X86FastEmitStore()
525 Opc = X86::MOVNTSD; in X86FastEmitStore()
527 Opc = HasAVX512 ? X86::VMOVSDZmr : in X86FastEmitStore()
528 HasAVX ? X86::VMOVSDmr : X86::MOVSDmr; in X86FastEmitStore()
530 Opc = X86::ST_Fp64m; in X86FastEmitStore()
533 Opc = (IsNonTemporal && HasSSE1) ? X86::MMX_MOVNTQmr : X86::MMX_MOVQ64mr; in X86FastEmitStore()
538 Opc = HasVLX ? X86::VMOVNTPSZ128mr : in X86FastEmitStore()
539 HasAVX ? X86::VMOVNTPSmr : X86::MOVNTPSmr; in X86FastEmitStore()
541 Opc = HasVLX ? X86::VMOVAPSZ128mr : in X86FastEmitStore()
542 HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr; in X86FastEmitStore()
544 Opc = HasVLX ? X86::VMOVUPSZ128mr : in X86FastEmitStore()
545 HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr; in X86FastEmitStore()
550 Opc = HasVLX ? X86::VMOVNTPDZ128mr : in X86FastEmitStore()
551 HasAVX ? X86::VMOVNTPDmr : X86::MOVNTPDmr; in X86FastEmitStore()
553 Opc = HasVLX ? X86::VMOVAPDZ128mr : in X86FastEmitStore()
554 HasAVX ? X86::VMOVAPDmr : X86::MOVAPDmr; in X86FastEmitStore()
556 Opc = HasVLX ? X86::VMOVUPDZ128mr : in X86FastEmitStore()
557 HasAVX ? X86::VMOVUPDmr : X86::MOVUPDmr; in X86FastEmitStore()
565 Opc = HasVLX ? X86::VMOVNTDQZ128mr : in X86FastEmitStore()
566 HasAVX ? X86::VMOVNTDQmr : X86::MOVNTDQmr; in X86FastEmitStore()
568 Opc = HasVLX ? X86::VMOVDQA64Z128mr : in X86FastEmitStore()
569 HasAVX ? X86::VMOVDQAmr : X86::MOVDQAmr; in X86FastEmitStore()
571 Opc = HasVLX ? X86::VMOVDQU64Z128mr : in X86FastEmitStore()
572 HasAVX ? X86::VMOVDQUmr : X86::MOVDQUmr; in X86FastEmitStore()
578 Opc = HasVLX ? X86::VMOVNTPSZ256mr : X86::VMOVNTPSYmr; in X86FastEmitStore()
580 Opc = HasVLX ? X86::VMOVAPSZ256mr : X86::VMOVAPSYmr; in X86FastEmitStore()
582 Opc = HasVLX ? X86::VMOVUPSZ256mr : X86::VMOVUPSYmr; in X86FastEmitStore()
588 Opc = HasVLX ? X86::VMOVNTPDZ256mr : X86::VMOVNTPDYmr; in X86FastEmitStore()
590 Opc = HasVLX ? X86::VMOVAPDZ256mr : X86::VMOVAPDYmr; in X86FastEmitStore()
592 Opc = HasVLX ? X86::VMOVUPDZ256mr : X86::VMOVUPDYmr; in X86FastEmitStore()
601 Opc = HasVLX ? X86::VMOVNTDQZ256mr : X86::VMOVNTDQYmr; in X86FastEmitStore()
603 Opc = HasVLX ? X86::VMOVDQA64Z256mr : X86::VMOVDQAYmr; in X86FastEmitStore()
605 Opc = HasVLX ? X86::VMOVDQU64Z256mr : X86::VMOVDQUYmr; in X86FastEmitStore()
610 Opc = IsNonTemporal ? X86::VMOVNTPSZmr : X86::VMOVAPSZmr; in X86FastEmitStore()
612 Opc = X86::VMOVUPSZmr; in X86FastEmitStore()
617 Opc = IsNonTemporal ? X86::VMOVNTPDZmr : X86::VMOVAPDZmr; in X86FastEmitStore()
619 Opc = X86::VMOVUPDZmr; in X86FastEmitStore()
629 Opc = IsNonTemporal ? X86::VMOVNTDQZmr : X86::VMOVDQA64Zmr; in X86FastEmitStore()
631 Opc = X86::VMOVDQU64Zmr; in X86FastEmitStore()
668 case MVT::i8: Opc = X86::MOV8mi; break; in X86FastEmitStore()
669 case MVT::i16: Opc = X86::MOV16mi; break; in X86FastEmitStore()
670 case MVT::i32: Opc = X86::MOV32mi; break; in X86FastEmitStore()
674 Opc = X86::MOV64mi32; in X86FastEmitStore()
754 AM.Base.Reg = X86::RIP; in handleConstantAddresses()
779 Opc = X86::MOV64rm; in handleConstantAddresses()
780 RC = &X86::GR64RegClass; in handleConstantAddresses()
782 Opc = X86::MOV32rm; in handleConstantAddresses()
783 RC = &X86::GR32RegClass; in handleConstantAddresses()
788 StubAM.Base.Reg = X86::RIP; in handleConstantAddresses()
1014 // method, which is not guarantee for X86. in X86SelectCallAddress()
1078 AM.Base.Reg = X86::RIP; in X86SelectCallAddress()
1093 Register CopyReg = createResultReg(&X86::GR32RegClass); in X86SelectCallAddress()
1094 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV32rr), in X86SelectCallAddress()
1098 Register ExtReg = createResultReg(&X86::GR64RegClass); in X86SelectCallAddress()
1103 .addImm(X86::sub_32bit); in X86SelectCallAddress()
1241 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) in X86SelectRet()
1285 // All x86 ABIs require that for returning structs by value we copy in X86SelectRet()
1294 unsigned RetReg = Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX; in X86SelectRet()
1304 TII.get(Subtarget->is64Bit() ? X86::RETI64 : X86::RETI32)) in X86SelectRet()
1308 TII.get(Subtarget->is64Bit() ? X86::RET64 : X86::RET32)); in X86SelectRet()
1366 case MVT::i8: return X86::CMP8rr; in X86ChooseCmpOpcode()
1367 case MVT::i16: return X86::CMP16rr; in X86ChooseCmpOpcode()
1368 case MVT::i32: return X86::CMP32rr; in X86ChooseCmpOpcode()
1369 case MVT::i64: return X86::CMP64rr; in X86ChooseCmpOpcode()
1371 return HasAVX512 ? X86::VUCOMISSZrr in X86ChooseCmpOpcode()
1372 : HasAVX ? X86::VUCOMISSrr in X86ChooseCmpOpcode()
1373 : HasSSE1 ? X86::UCOMISSrr in X86ChooseCmpOpcode()
1376 return HasAVX512 ? X86::VUCOMISDZrr in X86ChooseCmpOpcode()
1377 : HasAVX ? X86::VUCOMISDrr in X86ChooseCmpOpcode()
1378 : HasSSE2 ? X86::UCOMISDrr in X86ChooseCmpOpcode()
1391 return X86::CMP8ri; in X86ChooseCmpImmediateOpcode()
1393 return X86::CMP16ri; in X86ChooseCmpImmediateOpcode()
1395 return X86::CMP32ri; in X86ChooseCmpImmediateOpcode()
1399 return isInt<32>(RHSC->getSExtValue()) ? X86::CMP64ri32 : 0; in X86ChooseCmpImmediateOpcode()
1453 ResultReg = createResultReg(&X86::GR32RegClass); in X86SelectCmp()
1454 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV32r0), in X86SelectCmp()
1456 ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, X86::sub_8bit); in X86SelectCmp()
1462 ResultReg = createResultReg(&X86::GR8RegClass); in X86SelectCmp()
1463 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV8ri), in X86SelectCmp()
1488 { X86::COND_E, X86::COND_NP, X86::AND8rr }, in X86SelectCmp()
1489 { X86::COND_NE, X86::COND_P, X86::OR8rr } in X86SelectCmp()
1498 ResultReg = createResultReg(&X86::GR8RegClass); in X86SelectCmp()
1503 Register FlagReg1 = createResultReg(&X86::GR8RegClass); in X86SelectCmp()
1504 Register FlagReg2 = createResultReg(&X86::GR8RegClass); in X86SelectCmp()
1505 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr), in X86SelectCmp()
1507 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr), in X86SelectCmp()
1515 X86::CondCode CC; in X86SelectCmp()
1517 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate); in X86SelectCmp()
1518 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code."); in X86SelectCmp()
1527 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr), in X86SelectCmp()
1558 case MVT::i8: MovInst = X86::MOVZX32rr8; break; in X86SelectZExt()
1559 case MVT::i16: MovInst = X86::MOVZX32rr16; break; in X86SelectZExt()
1560 case MVT::i32: MovInst = X86::MOV32rr; break; in X86SelectZExt()
1564 Register Result32 = createResultReg(&X86::GR32RegClass); in X86SelectZExt()
1568 ResultReg = createResultReg(&X86::GR64RegClass); in X86SelectZExt()
1571 .addImm(0).addReg(Result32).addImm(X86::sub_32bit); in X86SelectZExt()
1575 Register Result32 = createResultReg(&X86::GR32RegClass); in X86SelectZExt()
1576 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOVZX32rr8), in X86SelectZExt()
1579 ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit); in X86SelectZExt()
1609 ResultReg = createResultReg(&X86::GR8RegClass); in X86SelectSExt()
1610 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::NEG8r), in X86SelectSExt()
1619 Register Result32 = createResultReg(&X86::GR32RegClass); in X86SelectSExt()
1620 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOVSX32rr8), in X86SelectSExt()
1623 ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit); in X86SelectSExt()
1645 X86::CondCode CC; in X86SelectBranch()
1695 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate); in X86SelectBranch()
1696 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code."); in X86SelectBranch()
1705 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1)) in X86SelectBranch()
1708 // X86 requires a second branch to handle UNE (and OEQ, which is mapped in X86SelectBranch()
1711 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1)) in X86SelectBranch()
1712 .addMBB(TrueMBB).addImm(X86::COND_P); in X86SelectBranch()
1727 case MVT::i8: TestOpc = X86::TEST8ri; break; in X86SelectBranch()
1728 case MVT::i16: TestOpc = X86::TEST16ri; break; in X86SelectBranch()
1729 case MVT::i32: TestOpc = X86::TEST32ri; break; in X86SelectBranch()
1730 case MVT::i64: TestOpc = X86::TEST64ri32; break; in X86SelectBranch()
1739 unsigned JmpCond = X86::COND_NE; in X86SelectBranch()
1742 JmpCond = X86::COND_E; in X86SelectBranch()
1745 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1)) in X86SelectBranch()
1759 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1)) in X86SelectBranch()
1772 if (MRI.getRegClass(OpReg) == &X86::VK1RegClass) { in X86SelectBranch()
1774 OpReg = createResultReg(&X86::GR32RegClass); in X86SelectBranch()
1778 OpReg = fastEmitInst_extractsubreg(MVT::i8, OpReg, X86::sub_8bit); in X86SelectBranch()
1780 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TEST8ri)) in X86SelectBranch()
1783 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1)) in X86SelectBranch()
1784 .addMBB(TrueMBB).addImm(X86::COND_NE); in X86SelectBranch()
1793 CReg = X86::CL; in X86SelectShift()
1794 RC = &X86::GR8RegClass; in X86SelectShift()
1796 case Instruction::LShr: OpReg = X86::SHR8rCL; break; in X86SelectShift()
1797 case Instruction::AShr: OpReg = X86::SAR8rCL; break; in X86SelectShift()
1798 case Instruction::Shl: OpReg = X86::SHL8rCL; break; in X86SelectShift()
1802 CReg = X86::CX; in X86SelectShift()
1803 RC = &X86::GR16RegClass; in X86SelectShift()
1806 case Instruction::LShr: OpReg = X86::SHR16rCL; break; in X86SelectShift()
1807 case Instruction::AShr: OpReg = X86::SAR16rCL; break; in X86SelectShift()
1808 case Instruction::Shl: OpReg = X86::SHL16rCL; break; in X86SelectShift()
1811 CReg = X86::ECX; in X86SelectShift()
1812 RC = &X86::GR32RegClass; in X86SelectShift()
1815 case Instruction::LShr: OpReg = X86::SHR32rCL; break; in X86SelectShift()
1816 case Instruction::AShr: OpReg = X86::SAR32rCL; break; in X86SelectShift()
1817 case Instruction::Shl: OpReg = X86::SHL32rCL; break; in X86SelectShift()
1820 CReg = X86::RCX; in X86SelectShift()
1821 RC = &X86::GR64RegClass; in X86SelectShift()
1824 case Instruction::LShr: OpReg = X86::SHR64rCL; break; in X86SelectShift()
1825 case Instruction::AShr: OpReg = X86::SAR64rCL; break; in X86SelectShift()
1826 case Instruction::Shl: OpReg = X86::SHL64rCL; break; in X86SelectShift()
1844 // The shift instruction uses X86::CL. If we defined a super-register in X86SelectShift()
1845 // of X86::CL, emit a subreg KILL to precisely describe what we're doing here. in X86SelectShift()
1846 if (CReg != X86::CL) in X86SelectShift()
1848 TII.get(TargetOpcode::KILL), X86::CL) in X86SelectShift()
1864 // For the X86 DIV/IDIV instruction, in most cases the dividend in X86SelectDivRem()
1889 { &X86::GR8RegClass, X86::AX, 0, { in X86SelectDivRem()
1890 { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S }, // SDiv in X86SelectDivRem()
1891 { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S }, // SRem in X86SelectDivRem()
1892 { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U }, // UDiv in X86SelectDivRem()
1893 { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U }, // URem in X86SelectDivRem()
1896 { &X86::GR16RegClass, X86::AX, X86::DX, { in X86SelectDivRem()
1897 { X86::IDIV16r, X86::CWD, Copy, X86::AX, S }, // SDiv in X86SelectDivRem()
1898 { X86::IDIV16r, X86::CWD, Copy, X86::DX, S }, // SRem in X86SelectDivRem()
1899 { X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U }, // UDiv in X86SelectDivRem()
1900 { X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U }, // URem in X86SelectDivRem()
1903 { &X86::GR32RegClass, X86::EAX, X86::EDX, { in X86SelectDivRem()
1904 { X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S }, // SDiv in X86SelectDivRem()
1905 { X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S }, // SRem in X86SelectDivRem()
1906 { X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U }, // UDiv in X86SelectDivRem()
1907 { X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U }, // URem in X86SelectDivRem()
1910 { &X86::GR64RegClass, X86::RAX, X86::RDX, { in X86SelectDivRem()
1911 { X86::IDIV64r, X86::CQO, Copy, X86::RAX, S }, // SDiv in X86SelectDivRem()
1912 { X86::IDIV64r, X86::CQO, Copy, X86::RDX, S }, // SRem in X86SelectDivRem()
1913 { X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U }, // UDiv in X86SelectDivRem()
1914 { X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U }, // URem in X86SelectDivRem()
1961 Register Zero32 = createResultReg(&X86::GR32RegClass); in X86SelectDivRem()
1963 TII.get(X86::MOV32r0), Zero32); in X86SelectDivRem()
1971 .addReg(Zero32, 0, X86::sub_16bit); in X86SelectDivRem()
1979 .addImm(0).addReg(Zero32).addImm(X86::sub_32bit); in X86SelectDivRem()
1997 OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) { in X86SelectDivRem()
1998 Register SourceSuperReg = createResultReg(&X86::GR16RegClass); in X86SelectDivRem()
1999 Register ResultSuperReg = createResultReg(&X86::GR16RegClass); in X86SelectDivRem()
2001 TII.get(Copy), SourceSuperReg).addReg(X86::AX); in X86SelectDivRem()
2004 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SHR16ri), in X86SelectDivRem()
2009 X86::sub_8bit); in X86SelectDivRem()
2036 X86::CondCode CC = X86::COND_NE; in X86FastEmitCMoveSelect()
2047 { X86::COND_NP, X86::COND_E, X86::TEST8rr }, in X86FastEmitCMoveSelect()
2048 { X86::COND_P, X86::COND_NE, X86::OR8rr } in X86FastEmitCMoveSelect()
2064 std::tie(CC, NeedSwap) = X86::getX86ConditionCode(Predicate); in X86FastEmitCMoveSelect()
2065 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code."); in X86FastEmitCMoveSelect()
2078 Register FlagReg1 = createResultReg(&X86::GR8RegClass); in X86FastEmitCMoveSelect()
2079 Register FlagReg2 = createResultReg(&X86::GR8RegClass); in X86FastEmitCMoveSelect()
2080 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr), in X86FastEmitCMoveSelect()
2082 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr), in X86FastEmitCMoveSelect()
2086 Register TmpReg = createResultReg(&X86::GR8RegClass); in X86FastEmitCMoveSelect()
2116 if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) { in X86FastEmitCMoveSelect()
2118 CondReg = createResultReg(&X86::GR32RegClass); in X86FastEmitCMoveSelect()
2122 CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit); in X86FastEmitCMoveSelect()
2124 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TEST8ri)) in X86FastEmitCMoveSelect()
2138 unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(*RC) / 8, false, in X86FastEmitCMoveSelect()
2200 const TargetRegisterClass *VR128X = &X86::VR128XRegClass; in X86FastEmitSSESelect()
2201 const TargetRegisterClass *VK1 = &X86::VK1RegClass; in X86FastEmitSSESelect()
2204 (RetVT == MVT::f32) ? X86::VCMPSSZrri : X86::VCMPSDZrri; in X86FastEmitSSESelect()
2217 (RetVT == MVT::f32) ? X86::VMOVSSZrrk : X86::VMOVSDZrrk; in X86FastEmitSSESelect()
2226 const TargetRegisterClass *VR128 = &X86::VR128RegClass; in X86FastEmitSSESelect()
2234 (RetVT == MVT::f32) ? X86::VCMPSSrri : X86::VCMPSDrri; in X86FastEmitSSESelect()
2236 (RetVT == MVT::f32) ? X86::VBLENDVPSrrr : X86::VBLENDVPDrrr; in X86FastEmitSSESelect()
2248 { X86::CMPSSrri, X86::ANDPSrr, X86::ANDNPSrr, X86::ORPSrr }, in X86FastEmitSSESelect()
2249 { X86::CMPSDrri, X86::ANDPDrr, X86::ANDNPDrr, X86::ORPDrr } in X86FastEmitSSESelect()
2259 const TargetRegisterClass *VR128 = &X86::VR128RegClass; in X86FastEmitSSESelect()
2278 case MVT::i8: Opc = X86::CMOV_GR8; break; in X86FastEmitPseudoSelect()
2279 case MVT::i16: Opc = X86::CMOV_GR16; break; in X86FastEmitPseudoSelect()
2280 case MVT::i32: Opc = X86::CMOV_GR32; break; in X86FastEmitPseudoSelect()
2282 Opc = Subtarget->hasAVX512() ? X86::CMOV_FR16X : X86::CMOV_FR16; break; in X86FastEmitPseudoSelect()
2284 Opc = Subtarget->hasAVX512() ? X86::CMOV_FR32X : X86::CMOV_FR32; break; in X86FastEmitPseudoSelect()
2286 Opc = Subtarget->hasAVX512() ? X86::CMOV_FR64X : X86::CMOV_FR64; break; in X86FastEmitPseudoSelect()
2290 X86::CondCode CC = X86::COND_NE; in X86FastEmitPseudoSelect()
2298 std::tie(CC, NeedSwap) = X86::getX86ConditionCode(CI->getPredicate()); in X86FastEmitPseudoSelect()
2299 if (CC > X86::LAST_VALID_COND) in X86FastEmitPseudoSelect()
2317 if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) { in X86FastEmitPseudoSelect()
2319 CondReg = createResultReg(&X86::GR32RegClass); in X86FastEmitPseudoSelect()
2323 CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit); in X86FastEmitPseudoSelect()
2325 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TEST8ri)) in X86FastEmitPseudoSelect()
2414 { { X86::VCVTSI2SSrr, X86::VCVTSI642SSrr }, in X86SelectIntToFP()
2415 { X86::VCVTSI2SDrr, X86::VCVTSI642SDrr } }, in X86SelectIntToFP()
2416 { { X86::VCVTSI2SSZrr, X86::VCVTSI642SSZrr }, in X86SelectIntToFP()
2417 { X86::VCVTSI2SDZrr, X86::VCVTSI642SDZrr } }, in X86SelectIntToFP()
2420 { X86::VCVTUSI2SSZrr, X86::VCVTUSI642SSZrr }, in X86SelectIntToFP()
2421 { X86::VCVTUSI2SDZrr, X86::VCVTUSI642SDZrr }, in X86SelectIntToFP()
2492 HasAVX512 ? X86::VCVTSS2SDZrr in X86SelectFPExt()
2493 : Subtarget->hasAVX() ? X86::VCVTSS2SDrr : X86::CVTSS2SDrr; in X86SelectFPExt()
2506 HasAVX512 ? X86::VCVTSD2SSZrr in X86SelectFPTrunc()
2507 : Subtarget->hasAVX() ? X86::VCVTSD2SSrr : X86::CVTSD2SSrr; in X86SelectFPTrunc()
2537 X86::sub_8bit); in X86SelectTrunc()
2619 unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPS2PHZ128rr in fastLowerIntrinsicCall()
2620 : X86::VCVTPS2PHrr; in fastLowerIntrinsicCall()
2624 Opc = Subtarget->hasAVX512() ? X86::VMOVPDI2DIZrr in fastLowerIntrinsicCall()
2625 : X86::VMOVPDI2DIrr; in fastLowerIntrinsicCall()
2626 ResultReg = createResultReg(&X86::GR32RegClass); in fastLowerIntrinsicCall()
2631 unsigned RegIdx = X86::sub_16bit; in fastLowerIntrinsicCall()
2642 unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPH2PSZ128rr in fastLowerIntrinsicCall()
2643 : X86::VCVTPH2PSrr; in fastLowerIntrinsicCall()
2673 case MVT::i32: Opc = X86::MOV32rm; RC = &X86::GR32RegClass; break; in fastLowerIntrinsicCall()
2674 case MVT::i64: Opc = X86::MOV64rm; RC = &X86::GR64RegClass; break; in fastLowerIntrinsicCall()
2684 assert(((FrameReg == X86::RBP && VT == MVT::i64) || in fastLowerIntrinsicCall()
2685 (FrameReg == X86::EBP && VT == MVT::i32)) && in fastLowerIntrinsicCall()
2786 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TRAP)); in fastLowerIntrinsicCall()
2803 { X86::SQRTSSr, X86::SQRTSDr }, in fastLowerIntrinsicCall()
2804 { X86::VSQRTSSr, X86::VSQRTSDr }, in fastLowerIntrinsicCall()
2805 { X86::VSQRTSSZr, X86::VSQRTSDZr }, in fastLowerIntrinsicCall()
2877 BaseOpc = ISD::ADD; CondCode = X86::COND_O; break; in fastLowerIntrinsicCall()
2879 BaseOpc = ISD::ADD; CondCode = X86::COND_B; break; in fastLowerIntrinsicCall()
2881 BaseOpc = ISD::SUB; CondCode = X86::COND_O; break; in fastLowerIntrinsicCall()
2883 BaseOpc = ISD::SUB; CondCode = X86::COND_B; break; in fastLowerIntrinsicCall()
2885 BaseOpc = X86ISD::SMUL; CondCode = X86::COND_O; break; in fastLowerIntrinsicCall()
2887 BaseOpc = X86ISD::UMUL; CondCode = X86::COND_O; break; in fastLowerIntrinsicCall()
2898 { X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r }, in fastLowerIntrinsicCall()
2899 { X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r } in fastLowerIntrinsicCall()
2903 CondCode == X86::COND_O) { in fastLowerIntrinsicCall()
2922 // FastISel doesn't have a pattern for all X86::MUL*r and X86::IMUL*r. Emit in fastLowerIntrinsicCall()
2926 { X86::MUL8r, X86::MUL16r, X86::MUL32r, X86::MUL64r }; in fastLowerIntrinsicCall()
2927 static const MCPhysReg Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX }; in fastLowerIntrinsicCall()
2929 // the X86::MUL*r instruction. in fastLowerIntrinsicCall()
2937 { X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr }; in fastLowerIntrinsicCall()
2940 // X86::IMUL8r instruction. in fastLowerIntrinsicCall()
2942 TII.get(TargetOpcode::COPY), X86::AL) in fastLowerIntrinsicCall()
2954 Register ResultReg2 = createResultReg(&X86::GR8RegClass); in fastLowerIntrinsicCall()
2956 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr), in fastLowerIntrinsicCall()
2989 { { X86::CVTTSS2SIrr, X86::CVTTSS2SI64rr }, in fastLowerIntrinsicCall()
2990 { X86::CVTTSD2SIrr, X86::CVTTSD2SI64rr } }, in fastLowerIntrinsicCall()
2991 { { X86::VCVTTSS2SIrr, X86::VCVTTSS2SI64rr }, in fastLowerIntrinsicCall()
2992 { X86::VCVTTSD2SIrr, X86::VCVTTSD2SI64rr } }, in fastLowerIntrinsicCall()
2993 { { X86::VCVTTSS2SIZrr, X86::VCVTTSS2SI64Zrr }, in fastLowerIntrinsicCall()
2994 { X86::VCVTTSD2SIZrr, X86::VCVTTSD2SI64Zrr } }, in fastLowerIntrinsicCall()
3053 Opc = GET_EGPR_IF_ENABLED(X86::CRC32r32r8); in fastLowerIntrinsicCall()
3054 RC = &X86::GR32RegClass; in fastLowerIntrinsicCall()
3057 Opc = GET_EGPR_IF_ENABLED(X86::CRC32r32r16); in fastLowerIntrinsicCall()
3058 RC = &X86::GR32RegClass; in fastLowerIntrinsicCall()
3061 Opc = GET_EGPR_IF_ENABLED(X86::CRC32r32r32); in fastLowerIntrinsicCall()
3062 RC = &X86::GR32RegClass; in fastLowerIntrinsicCall()
3065 Opc = GET_EGPR_IF_ENABLED(X86::CRC32r64r64); in fastLowerIntrinsicCall()
3066 RC = &X86::GR64RegClass; in fastLowerIntrinsicCall()
3151 X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D in fastLowerArguments()
3154 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8 , X86::R9 in fastLowerArguments()
3157 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, in fastLowerArguments()
3158 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 in fastLowerArguments()
3272 // x86-32. Special handling for x86-64 is implemented. in fastLowerCall()
3289 // calling conventions on x86. in fastLowerCall()
3479 TII.get(TargetOpcode::COPY), X86::EBX).addReg(Base); in fastLowerCall()
3493 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, in fastLowerCall()
3494 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 in fastLowerCall()
3499 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV8ri), in fastLowerCall()
3500 X86::AL).addImm(NumXMMRegs); in fastLowerCall()
3522 unsigned CallOpc = Is64Bit ? X86::CALL64r : X86::CALL32r; in fastLowerCall()
3542 ? (Is64Bit ? X86::CALL64m : X86::CALL32m) in fastLowerCall()
3543 : (Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32); in fastLowerCall()
3547 MIB.addReg(Is64Bit ? X86::RIP : 0).addImm(1).addReg(0); in fastLowerCall()
3562 MIB.addReg(X86::EBX, RegState::Implicit); in fastLowerCall()
3565 MIB.addReg(X86::AL, RegState::Implicit); in fastLowerCall()
3573 X86::isCalleePop(CC, Subtarget->is64Bit(), IsVarArg, in fastLowerCall()
3595 // If this is x86-64, and we disabled SSE, we can't return FP values in fastLowerCall()
3603 if ((SrcReg == X86::FP0 || SrcReg == X86::FP1) && in fastLowerCall()
3606 CopyReg = createResultReg(&X86::RFP80RegClass); in fastLowerCall()
3619 unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64; in fastLowerCall()
3625 Opc = ResVT == MVT::f32 ? X86::MOVSSrm_alt : X86::MOVSDrm_alt; in fastLowerCall()
3732 Register SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass); in X86MaterializeInt()
3737 return fastEmitInst_extractsubreg(MVT::i8, SrcReg, X86::sub_8bit); in X86MaterializeInt()
3739 return fastEmitInst_extractsubreg(MVT::i16, SrcReg, X86::sub_16bit); in X86MaterializeInt()
3743 Register ResultReg = createResultReg(&X86::GR64RegClass); in X86MaterializeInt()
3746 .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit); in X86MaterializeInt()
3758 case MVT::i8: Opc = X86::MOV8ri; break; in X86MaterializeInt()
3759 case MVT::i16: Opc = X86::MOV16ri; break; in X86MaterializeInt()
3760 case MVT::i32: Opc = X86::MOV32ri; break; in X86MaterializeInt()
3763 Opc = X86::MOV32ri64; in X86MaterializeInt()
3765 Opc = X86::MOV64ri32; in X86MaterializeInt()
3767 Opc = X86::MOV64ri; in X86MaterializeInt()
3793 Opc = HasAVX512 ? X86::VMOVSSZrm_alt in X86MaterializeFP()
3794 : HasAVX ? X86::VMOVSSrm_alt in X86MaterializeFP()
3795 : HasSSE1 ? X86::MOVSSrm_alt in X86MaterializeFP()
3796 : X86::LD_Fp32m; in X86MaterializeFP()
3799 Opc = HasAVX512 ? X86::VMOVSDZrm_alt in X86MaterializeFP()
3800 : HasAVX ? X86::VMOVSDrm_alt in X86MaterializeFP()
3801 : HasSSE2 ? X86::MOVSDrm_alt in X86MaterializeFP()
3802 : X86::LD_Fp64m; in X86MaterializeFP()
3812 // x86-32 PIC requires a PIC base register for constant pools. in X86MaterializeFP()
3820 PICBase = X86::RIP; in X86MaterializeFP()
3828 Register AddrReg = createResultReg(&X86::GR64RegClass); in X86MaterializeFP()
3829 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV64ri), in X86MaterializeFP()
3870 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV64ri), in X86MaterializeGV()
3876 ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r) in X86MaterializeGV()
3877 : X86::LEA64r; in X86MaterializeGV()
3907 Opc = X86::LD_Fp032; in fastMaterializeConstant()
3911 Opc = X86::LD_Fp064; in fastMaterializeConstant()
3914 Opc = X86::LD_Fp080; in fastMaterializeConstant()
3946 ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r) in fastMaterializeAlloca()
3947 : X86::LEA64r; in fastMaterializeAlloca()
3968 Opc = HasAVX512 ? X86::AVX512_FsFLD0SH : X86::FsFLD0SH; in fastMaterializeFloatZero()
3971 Opc = HasAVX512 ? X86::AVX512_FsFLD0SS in fastMaterializeFloatZero()
3972 : HasSSE1 ? X86::FsFLD0SS in fastMaterializeFloatZero()
3973 : X86::LD_Fp032; in fastMaterializeFloatZero()
3976 Opc = HasAVX512 ? X86::AVX512_FsFLD0SD in fastMaterializeFloatZero()
3977 : HasSSE2 ? X86::FsFLD0SD in fastMaterializeFloatZero()
3978 : X86::LD_Fp064; in fastMaterializeFloatZero()
4070 FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo, in createFastISel()