Lines Matching refs:Subtarget

71                                  const X86Subtarget &Subtarget) {  in handleMaskRegisterForCallingConv()  argument
86 if (NumElts == 32 && (!Subtarget.hasBWI() || CC != CallingConv::X86_RegCall)) in handleMaskRegisterForCallingConv()
89 if (NumElts == 64 && Subtarget.hasBWI() && CC != CallingConv::X86_RegCall) { in handleMaskRegisterForCallingConv()
90 if (Subtarget.useAVX512Regs()) in handleMaskRegisterForCallingConv()
96 if (!isPowerOf2_32(NumElts) || (NumElts == 64 && !Subtarget.hasBWI()) || in handleMaskRegisterForCallingConv()
107 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512()) { in getRegisterTypeForCallingConv()
113 handleMaskRegisterForCallingConv(NumElts, CC, Subtarget); in getRegisterTypeForCallingConv()
123 if ((VT == MVT::f64 || VT == MVT::f80) && !Subtarget.is64Bit() && in getRegisterTypeForCallingConv()
124 !Subtarget.hasX87()) in getRegisterTypeForCallingConv()
141 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512()) { in getNumRegistersForCallingConv()
147 handleMaskRegisterForCallingConv(NumElts, CC, Subtarget); in getNumRegistersForCallingConv()
158 if (!Subtarget.is64Bit() && !Subtarget.hasX87()) { in getNumRegistersForCallingConv()
177 Subtarget.hasAVX512() && in getVectorTypeBreakdownForCallingConv()
179 (VT.getVectorNumElements() == 64 && !Subtarget.hasBWI()) || in getVectorTypeBreakdownForCallingConv()
188 if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() && in getVectorTypeBreakdownForCallingConv()
210 if (Subtarget.hasAVX512()) { in getSetCCResultType()
220 if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) { in getSetCCResultType()
225 if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32) in getSetCCResultType()
264 if (Subtarget.is64Bit()) { in getByValTypeAlignment()
273 if (Subtarget.hasSSE1()) in getByValTypeAlignment()
286 (!Subtarget.isUnalignedMem16Slow() || Op.isAligned(Align(16)))) { in getOptimalMemOpType()
288 if (Op.size() >= 64 && Subtarget.hasAVX512() && Subtarget.hasEVEX512() && in getOptimalMemOpType()
289 (Subtarget.getPreferVectorWidth() >= 512)) { in getOptimalMemOpType()
290 return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32; in getOptimalMemOpType()
293 if (Op.size() >= 32 && Subtarget.hasAVX() && in getOptimalMemOpType()
294 Subtarget.useLight256BitInstructions()) { in getOptimalMemOpType()
302 if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128)) in getOptimalMemOpType()
306 if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) && in getOptimalMemOpType()
307 (Subtarget.getPreferVectorWidth() >= 128)) in getOptimalMemOpType()
310 Op.size() >= 8 && !Subtarget.is64Bit() && Subtarget.hasSSE2()) { in getOptimalMemOpType()
323 if (Subtarget.is64Bit() && Op.size() >= 8) in getOptimalMemOpType()
330 return Subtarget.hasSSE1(); in isSafeMemOpType()
332 return Subtarget.hasSSE2(); in isSafeMemOpType()
348 return !Subtarget.isUnalignedMem16Slow(); in isMemoryAccessFast()
350 return !Subtarget.isUnalignedMem32Slow(); in isMemoryAccessFast()
367 return (Alignment < 16 || !Subtarget.hasSSE41()); in allowsMisalignedMemoryAccesses()
390 if (!!(Flags & MachineMemOperand::MOLoad) && Subtarget.hasSSE41()) in allowsMemoryAccess()
392 if (!!(Flags & MachineMemOperand::MOStore) && Subtarget.hasSSE2()) in allowsMemoryAccess()
396 if (!!(Flags & MachineMemOperand::MOLoad) && Subtarget.hasAVX2()) in allowsMemoryAccess()
398 if (!!(Flags & MachineMemOperand::MOStore) && Subtarget.hasAVX()) in allowsMemoryAccess()
402 if (Subtarget.hasAVX512() && Subtarget.hasEVEX512()) in allowsMemoryAccess()
418 if (isPositionIndependent() && Subtarget.isPICStyleGOT()) in getJumpTableEncoding()
422 !Subtarget.isTargetCOFF()) in getJumpTableEncoding()
430 return Subtarget.useSoftFloat(); in useSoftFloat()
437 if (Subtarget.is64Bit()) in markLibCallAttributes()
465 assert(isPositionIndependent() && Subtarget.isPICStyleGOT()); in LowerCustomJumpTableEntry()
475 if (!Subtarget.is64Bit()) in getPICJumpTableRelocBase()
489 if (Subtarget.isPICStyleRIPRel() || in getPICJumpTableRelocBaseExpr()
490 (Subtarget.is64Bit() && in getPICJumpTableRelocBaseExpr()
507 RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass; in findRepresentativeClass()
526 if (Subtarget.is64Bit()) in getAddressSpace()
547 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) { in getIRStackGuard()
551 if (Subtarget.isTargetFuchsia()) in getIRStackGuard()
561 Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14; in getIRStackGuard()
574 Type *Ty = Subtarget.is64Bit() ? Type::getInt64Ty(M->getContext()) in getIRStackGuard()
579 if (!Subtarget.isTargetDarwin()) in getIRStackGuard()
592 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() || in insertSSPDeclarations()
593 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) { in insertSSPDeclarations()
613 hasStackGuardSlotTLS(Subtarget.getTargetTriple())) in insertSSPDeclarations()
620 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() || in getSDagStackGuard()
621 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) { in getSDagStackGuard()
629 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() || in getSSPStackGuardCheck()
630 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) { in getSSPStackGuardCheck()
641 if (Subtarget.isTargetAndroid()) { in getSafeStackPointerLocation()
644 int Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24; in getSafeStackPointerLocation()
649 if (Subtarget.isTargetFuchsia()) { in getSafeStackPointerLocation()
715 CCValAssign &NextVA, const X86Subtarget &Subtarget) { in Passv64i1ArgInRegs() argument
716 assert(Subtarget.hasBWI() && "Expected AVX512BW target!"); in Passv64i1ArgInRegs()
717 assert(Subtarget.is32Bit() && "Expecting 32 bit target"); in Passv64i1ArgInRegs()
789 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) { in LowerReturn()
792 } else if (!Subtarget.hasSSE2() && in LowerReturn()
816 if (Subtarget.is64Bit()) { in LowerReturn()
824 if (!Subtarget.hasSSE2()) in LowerReturn()
835 Subtarget); in LowerReturn()
901 = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ? in LowerReturn()
919 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo(); in LowerReturn()
983 bool Darwin = Subtarget.getTargetTriple().isOSDarwin(); in getTypeForExtReturn()
1008 const SDLoc &DL, const X86Subtarget &Subtarget, in getv64i1Argument() argument
1010 assert((Subtarget.hasBWI()) && "Expected AVX512BW target!"); in getv64i1Argument()
1011 assert(Subtarget.is32Bit() && "Expecting 32 bit target"); in getv64i1Argument()
1099 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); in LowerCallResult()
1121 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) { in LowerCallResult()
1127 } else if (!Subtarget.hasSSE2() && in LowerCallResult()
1142 if (!Subtarget.hasX87()) in LowerCallResult()
1153 getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InGlue); in LowerCallResult()
1201 const X86Subtarget &Subtarget) { in hasCalleePopSRet() argument
1209 if (!Subtarget.is32Bit()) in hasCalleePopSRet()
1221 if (Subtarget.getTargetTriple().isOSMSVCRT()) in hasCalleePopSRet()
1225 if (Subtarget.isTargetMCU()) in hasCalleePopSRet()
1395 if (Subtarget.isTargetWindowsMSVC() && !Subtarget.is64Bit() && in LowerMemArgument()
1412 const X86Subtarget &Subtarget) { in get64BitArgumentGPRs() argument
1413 assert(Subtarget.is64Bit()); in get64BitArgumentGPRs()
1415 if (Subtarget.isCallingConvWin64(CallConv)) { in get64BitArgumentGPRs()
1431 const X86Subtarget &Subtarget) { in get64BitArgumentXMMs() argument
1432 assert(Subtarget.is64Bit()); in get64BitArgumentXMMs()
1433 if (Subtarget.isCallingConvWin64(CallConv)) { in get64BitArgumentXMMs()
1441 bool isSoftFloat = Subtarget.useSoftFloat(); in get64BitArgumentXMMs()
1442 if (isSoftFloat || !Subtarget.hasSSE1()) in get64BitArgumentXMMs()
1468 SelectionDAG &DAG, const X86Subtarget &Subtarget, in VarArgsLoweringHelper() argument
1470 : FuncInfo(FuncInfo), DL(Loc), DAG(DAG), Subtarget(Subtarget), in VarArgsLoweringHelper()
1474 FrameLowering(*Subtarget.getFrameLowering()), in VarArgsLoweringHelper()
1486 bool is64Bit() const { return Subtarget.is64Bit(); } in is64Bit()
1487 bool isWin64() const { return Subtarget.isCallingConvWin64(CallConv); } in isWin64()
1492 const X86Subtarget &Subtarget; member in __anon354e4a110211::VarArgsLoweringHelper
1518 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget); in createVarArgAreaAndStoreRegisters()
1520 get64BitArgumentXMMs(TheMachineFunction, CallConv, Subtarget); in createVarArgAreaAndStoreRegisters()
1524 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) && in createVarArgAreaAndStoreRegisters()
1619 if (Subtarget.useAVX512Regs() && in forwardMustTailParameters()
1623 else if (Subtarget.hasAVX()) in forwardMustTailParameters()
1625 else if (Subtarget.hasSSE2()) in forwardMustTailParameters()
1678 if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() && in LowerFormalArguments()
1683 bool Is64Bit = Subtarget.is64Bit(); in LowerFormalArguments()
1684 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv); in LowerFormalArguments()
1727 getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget); in LowerFormalArguments()
1739 RC = Subtarget.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass; in LowerFormalArguments()
1741 RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass; in LowerFormalArguments()
1743 RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass; in LowerFormalArguments()
1751 RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass; in LowerFormalArguments()
1753 RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass; in LowerFormalArguments()
1817 if (X86::isExtendedSwiftAsyncFrameSupported(Subtarget, MF)) in LowerFormalArguments()
1820 int PtrSize = Subtarget.is64Bit() ? 8 : 4; in LowerFormalArguments()
1861 VarArgsLoweringHelper(FuncInfo, dl, DAG, Subtarget, CallConv, CCInfo) in LowerFormalArguments()
1875 if (!canGuaranteeTCO(CallConv) && hasCalleePopSRet(Ins, Subtarget)) in LowerFormalArguments()
1937 if (Subtarget.isTargetWindowsMSVC() && !Subtarget.is64Bit() && in LowerMemOpCallTo()
2007 bool Is64Bit = Subtarget.is64Bit(); in LowerCall()
2008 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv); in LowerCall()
2012 bool IsCalleePopSRet = !IsGuaranteeTCO && hasCalleePopSRet(Outs, Subtarget); in LowerCall()
2043 if (Subtarget.isPICStyleGOT() && !IsGuaranteeTCO && !IsMustTail) { in LowerCall()
2152 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); in LowerCall()
2223 Passv64i1ArgInRegs(dl, DAG, Arg, RegsToPass, VA, ArgLocs[++I], Subtarget); in LowerCall()
2255 if (Subtarget.isPICStyleGOT()) { in LowerCall()
2286 (Subtarget.hasSSE1() || !M->getModuleFlag("SkipRaxSetup"))) { in LowerCall()
2301 assert((Subtarget.hasSSE1() || !NumXMMRegs) in LowerCall()
2410 } else if (Subtarget.isTarget64BitILP32() && in LowerCall()
2477 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); in LowerCall()
2622 const Align StackAlignment = Subtarget.getFrameLowering()->getStackAlign(); in GetAlignedArgumentStackSize()
2623 const uint64_t SlotSize = Subtarget.getRegisterInfo()->getSlotSize(); in GetAlignedArgumentStackSize()
2756 bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC); in IsEligibleForTailCallOptimization()
2757 bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC); in IsEligibleForTailCallOptimization()
2778 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); in IsEligibleForTailCallOptimization()
2834 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo(); in IsEligibleForTailCallOptimization()
2852 const X86InstrInfo *TII = Subtarget.getInstrInfo(); in IsEligibleForTailCallOptimization()
2873 if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) && in IsEligibleForTailCallOptimization()
2901 X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg, in IsEligibleForTailCallOptimization()