Lines Matching +full:low +full:- +full:vt

1 //===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
20 //===----------------------------------------------------------------------===//
105 ILP, // Scheduling for ILP in low register pressure mode.
128 bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register
193 /// This base class for TargetLowering contains the SelectionDAG-independent
215 TypeScalarizeVector, // Replace this one-element vector with its element.
224 // vector, this is non-trivial at SelectionDAG
230 /// in order to type-legalize it.
256 // floating-point to integer type.
259 LLOnly, // Expand the (load) instruction into just a load-linked, which has
262 MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
263 BitTestIntrinsic, // Use a target-specific intrinsic for special bit
265 CmpArithIntrinsic,// Use a target-specific intrinsic for special compare
269 // Rewrite to a non-atomic form for use in a known non-preemptible
373 /// Return the in-memory pointer type for the given address space, defaults to
425 /// and must be at least as large as i32. The EVL is implicitly zero-extended
430 /// target-specific MachineMemOperand flags to them. The default
458 virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const { in shouldExpandGetActiveLaneMask() argument
469 virtual bool shouldExpandCttzElements(EVT VT) const { return true; } in shouldExpandCttzElements() argument
479 virtual bool shouldReassociateReduction(unsigned RedOpc, EVT VT) const { in shouldReassociateReduction() argument
501 getPreferredVectorAction(MVT VT) const { in getPreferredVectorAction() argument
503 if (VT.getVectorElementCount().isScalar()) in getPreferredVectorAction()
505 // The default action for an odd-width vector is to widen. in getPreferredVectorAction()
506 if (!VT.isPow2VectorType()) in getPreferredVectorAction()
518 // Return true if, for soft-promoted half, the half type should be passed
520 // pass as i16. If soft-promoted half is not used, this function is ignored
534 shouldExpandBuildVectorWithShuffles(EVT /* VT */, in shouldExpandBuildVectorWithShuffles()
543 virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; } in isIntDivCheap() argument
546 virtual bool hasStandaloneRem(EVT VT) const { in hasStandaloneRem() argument
558 Unspecified = -1,
567 int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const;
573 int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const;
579 int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
585 int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
620 // Set any field to -1 to make it ignored (setting BaseCost to -1 results in
635 // -1 will always result in splitting. in getJumpConditionMergingParams()
636 return {-1, -1, -1}; in getJumpConditionMergingParams()
650 /// fold (conv (load x)) -> (load (conv*)x)
660 /// (store (y (conv x)), y*)) -> (store x, (x*))
706 virtual bool isCtpopFast(EVT VT) const { in isCtpopFast() argument
707 return isOperationLegal(ISD::CTPOP, VT); in isCtpopFast()
710 /// Return the maximum number of "x & (x - 1)" operations that can be done
712 virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const { in getCustomCtpopCost() argument
752 /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
756 virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const { in convertSetCCLogicToBitwiseLogic() argument
765 MVT VT = MVT::getIntegerVT(NumBits); in hasFastEqualityCompare() local
766 return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE; in hasFastEqualityCompare()
770 /// (X & Y) == Y ---> (~X & Y) == 0
771 /// (X & Y) != Y ---> (~X & Y) != 0
773 /// This may be profitable if the target has a bitwise and-not operation that
777 /// Note that the transform will not occur if Y is known to be a power-of-2
780 /// (X & 8) == 8 ---> (X & 8) != 0
785 /// Return true if the target has a bitwise and-not operation:
794 /// Return true if the target has a bit-test instruction:
800 /// There are two ways to clear extreme bits (either low or high):
801 /// Mask: x & (-1 << y) (the instcombine canonical form)
819 /// Should we tranform the IR-optimal check for whether given truncation
821 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
849 if (OldShiftOpcode == ISD::SHL && CC->isOne()) in shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd()
853 if (XC && NewShiftOpcode == ISD::SHL && XC->isOne()) in shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd()
866 // -> (bitcast_to_FP (add (bitcast_to_INT C), Log2(Pow2) << mantissa))
868 // -> (bitcast_to_FP (sub (bitcast_to_INT C), Log2(Pow2) << mantissa))
879 return N->getOpcode() == ISD::FDIV; in optimizeFMulOrFDivAsShiftAddBitcast()
889 // Do we prefer the shift to be shift-right, shift-left, or rotate.
891 // the shift-amt (`C1`) is a power of 2 (including 0).
894 EVT VT, unsigned ShiftOpc, bool MayTransformRotate, in preferedOpcodeForCmpEqPiecesOfOperand() argument
901 /// sub %y, (xor %x, -1)
903 /// The variant with two add's is IR-canonical.
905 virtual bool preferIncOfAddToSubOfNot(EVT VT) const { in preferIncOfAddToSubOfNot() argument
910 // By default prefer folding (abs (sub nsw x, y)) -> abds(x, y). Some targets
912 virtual bool preferABDSToABSWithNSW(EVT VT) const { in preferABDSToABSWithNSW() argument
916 // Return true if the target wants to transform Op(Splat(X)) -> Splat(Op(X))
920 // (TruncVT truncate(sext_in_reg(VT X, ExtVT))
921 // -> (TruncVT sext_in_reg(truncate(VT X), ExtVT))
922 // Some targets might prefer pre-sextinreg to improve truncation/saturation.
923 virtual bool preferSextInRegOfTruncate(EVT TruncVT, EVT VT, EVT ExtVT) const { in preferSextInRegOfTruncate() argument
958 virtual bool enableAggressiveFMAFusion(EVT VT) const { return false; } in enableAggressiveFMAFusion() argument
967 EVT VT) const;
975 /// For targets without i1 registers, this gives the nature of the high-bits
1026 virtual const TargetRegisterClass *getRegClassFor(MVT VT, bool isDivergent = false) const {
1028 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1044 /// The 'representative' register class is the largest legal super-reg
1048 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const { in getRepRegClassFor() argument
1049 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy]; in getRepRegClassFor()
1055 virtual uint8_t getRepRegClassCostFor(MVT VT) const { in getRepRegClassCostFor() argument
1056 return RepRegClassCostForVT[VT.SimpleTy]; in getRepRegClassCostFor()
1060 /// \p ExpansionFactor being the recursion depth - how many expansion needed.
1077 bool isTypeLegal(EVT VT) const { in isTypeLegal() argument
1078 assert(!VT.isSimple() || in isTypeLegal()
1079 (unsigned)VT.getSimpleVT().SimpleTy < std::size(RegClassForVT)); in isTypeLegal()
1080 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr; in isTypeLegal()
1084 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
1094 LegalizeTypeAction getTypeAction(MVT VT) const { in getTypeAction() argument
1095 return ValueTypeActions[VT.SimpleTy]; in getTypeAction()
1098 void setTypeAction(MVT VT, LegalizeTypeAction Action) { in setTypeAction() argument
1099 ValueTypeActions[VT.SimpleTy] = Action; in setTypeAction()
1108 /// happen to EVT (second) in order to type-legalize it.
1121 LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
1127 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const { in getTypeAction() argument
1128 return getTypeConversion(Context, VT).first; in getTypeAction()
1130 LegalizeTypeAction getTypeAction(MVT VT) const { in getTypeAction() argument
1131 return ValueTypeActions.getTypeAction(VT); in getTypeAction()
1140 virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const { in getTypeToTransformTo() argument
1141 return getTypeConversion(Context, VT).second; in getTypeToTransformTo()
1148 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const { in getTypeToExpandTo() argument
1149 assert(!VT.isVector()); in getTypeToExpandTo()
1151 switch (getTypeAction(Context, VT)) { in getTypeToExpandTo()
1153 return VT; in getTypeToExpandTo()
1155 VT = getTypeToTransformTo(Context, VT); in getTypeToExpandTo()
1168 /// This method returns the number of registers needed, and the VT for each
1169 /// register. It also returns the VT and quantity of the intermediate values
1171 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
1180 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, in getVectorTypeBreakdownForCallingConv() argument
1182 return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates, in getVectorTypeBreakdownForCallingConv()
1188 EVT memVT; // memory VT
1219 virtual bool isFPImmLegal(const APFloat & /*Imm*/, EVT /*VT*/,
1228 virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const { in isShuffleMaskLegal()
1234 /// VT must be a legal type. By default, we optimistically assume most
1236 virtual bool canOpTrap(unsigned Op, EVT VT) const;
1242 EVT /*VT*/) const { in isVectorClearMaskLegal()
1254 LegalizeAction getOperationAction(unsigned Op, EVT VT) const { in getOperationAction() argument
1255 // If a target-specific SDNode requires legalization, require the target in getOperationAction()
1259 if (VT.isExtended()) in getOperationAction()
1261 return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op]; in getOperationAction()
1267 virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT, in isSupportedFixedPointOperation() argument
1276 LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, in getFixedPointOperationAction() argument
1278 auto Action = getOperationAction(Op, VT); in getFixedPointOperationAction()
1296 Supported = isSupportedFixedPointOperation(Op, VT, Scale); in getFixedPointOperationAction()
1303 // If Op is a strict floating-point operation, return the result
1304 // of getOperationAction for the equivalent non-strict operation.
1305 LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const { in getStrictFPOperationAction() argument
1308 default: llvm_unreachable("Unexpected FP pseudo-opcode"); in getStrictFPOperationAction()
1316 return getOperationAction(EqOpc, VT); in getStrictFPOperationAction()
1320 /// made legal with custom lowering. This is used to help guide high-level
1323 bool isOperationLegalOrCustom(unsigned Op, EVT VT,
1326 return isOperationLegal(Op, VT);
1328 return (VT == MVT::Other || isTypeLegal(VT)) &&
1329 (getOperationAction(Op, VT) == Legal ||
1330 getOperationAction(Op, VT) == Custom);
1334 /// made legal using promotion. This is used to help guide high-level lowering
1337 bool isOperationLegalOrPromote(unsigned Op, EVT VT,
1340 return isOperationLegal(Op, VT);
1342 return (VT == MVT::Other || isTypeLegal(VT)) &&
1343 (getOperationAction(Op, VT) == Legal ||
1344 getOperationAction(Op, VT) == Promote);
1349 /// guide high-level lowering decisions. LegalOnly is an optional convenience
1351 bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT,
1354 return isOperationLegal(Op, VT);
1356 return (VT == MVT::Other || isTypeLegal(VT)) &&
1357 (getOperationAction(Op, VT) == Legal ||
1358 getOperationAction(Op, VT) == Custom ||
1359 getOperationAction(Op, VT) == Promote);
1364 bool isOperationCustom(unsigned Op, EVT VT) const { in isOperationCustom() argument
1365 return getOperationAction(Op, VT) == Custom; in isOperationCustom()
1370 if (Fn->getFnAttribute("no-jump-tables").getValueAsBool()) in areJTsAllowed()
1377 /// Check whether the range [Low,High] fits in a machine word.
1378 bool rangeFitsInWord(const APInt &Low, const APInt &High, in rangeFitsInWord() argument
1382 uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1; in rangeFitsInWord()
1397 /// clusters which contains \p NumDests unique destinations, \p Low and
1402 const APInt &Low, const APInt &High, in isSuitableForBitTests() argument
1411 if (!rangeFitsInWord(Low, High, DL)) in isSuitableForBitTests()
1425 /// high-level lowering decisions.
1426 bool isOperationExpand(unsigned Op, EVT VT) const { in isOperationExpand() argument
1427 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand); in isOperationExpand()
1431 bool isOperationLegal(unsigned Op, EVT VT) const { in isOperationLegal() argument
1432 return (VT == MVT::Other || isTypeLegal(VT)) && in isOperationLegal()
1433 getOperationAction(Op, VT) == Legal; in isOperationLegal()
1521 LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const { in getIndexedLoadAction() argument
1522 return getIndexedModeAction(IdxMode, VT, IMAB_Load); in getIndexedLoadAction()
1526 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const { in isIndexedLoadLegal() argument
1527 return VT.isSimple() && in isIndexedLoadLegal()
1528 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal || in isIndexedLoadLegal()
1529 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom); in isIndexedLoadLegal()
1535 LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const { in getIndexedStoreAction() argument
1536 return getIndexedModeAction(IdxMode, VT, IMAB_Store); in getIndexedStoreAction()
1540 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const { in isIndexedStoreLegal() argument
1541 return VT.isSimple() && in isIndexedStoreLegal()
1542 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal || in isIndexedStoreLegal()
1543 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom); in isIndexedStoreLegal()
1549 LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const { in getIndexedMaskedLoadAction() argument
1550 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad); in getIndexedMaskedLoadAction()
1554 bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const { in isIndexedMaskedLoadLegal() argument
1555 return VT.isSimple() && in isIndexedMaskedLoadLegal()
1556 (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal || in isIndexedMaskedLoadLegal()
1557 getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Custom); in isIndexedMaskedLoadLegal()
1563 LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const { in getIndexedMaskedStoreAction() argument
1564 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore); in getIndexedMaskedStoreAction()
1568 bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const { in isIndexedMaskedStoreLegal() argument
1569 return VT.isSimple() && in isIndexedMaskedStoreLegal()
1570 (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal || in isIndexedMaskedStoreLegal()
1571 getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Custom); in isIndexedMaskedStoreLegal()
1576 virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const { return false; } in shouldExtendGSIndex() argument
1600 getCondCodeAction(ISD::CondCode CC, MVT VT) const { in getCondCodeAction() argument
1602 ((unsigned)VT.SimpleTy >> 3) < std::size(CondCodeActions[0]) && in getCondCodeAction()
1605 uint32_t Shift = 4 * (VT.SimpleTy & 0x7); in getCondCodeAction()
1606 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3]; in getCondCodeAction()
1613 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const { in isCondCodeLegal() argument
1614 return getCondCodeAction(CC, VT) == Legal; in isCondCodeLegal()
1619 bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const { in isCondCodeLegalOrCustom() argument
1620 return getCondCodeAction(CC, VT) == Legal || in isCondCodeLegalOrCustom()
1621 getCondCodeAction(CC, VT) == Custom; in isCondCodeLegalOrCustom()
1626 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const { in getTypeToPromoteTo() argument
1627 assert(getOperationAction(Op, VT) == Promote && in getTypeToPromoteTo()
1633 PromoteToType.find(std::make_pair(Op, VT.SimpleTy)); in getTypeToPromoteTo()
1634 if (PTTI != PromoteToType.end()) return PTTI->second; in getTypeToPromoteTo()
1636 assert((VT.isInteger() || VT.isFloatingPoint()) && in getTypeToPromoteTo()
1639 uint64_t VTBits = VT.getScalarSizeInBits(); in getTypeToPromoteTo()
1640 MVT NVT = VT; in getTypeToPromoteTo()
1643 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && in getTypeToPromoteTo()
1663 return getPointerTy(DL, PTy->getAddressSpace());
1666 Type *EltTy = VTy->getElementType();
1669 EVT PointerTy(getPointerTy(DL, PTy->getAddressSpace()));
1670 EltTy = PointerTy.getTypeForEVT(Ty->getContext());
1672 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
1673 VTy->getElementCount());
1683 return getPointerMemTy(DL, PTy->getAddressSpace());
1686 Type *EltTy = VTy->getElementType();
1688 EVT PointerTy(getPointerMemTy(DL, PTy->getAddressSpace()));
1689 EltTy = PointerTy.getTypeForEVT(Ty->getContext());
1691 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
1692 VTy->getElementCount());
1711 MVT getRegisterType(MVT VT) const { in getRegisterType() argument
1712 assert((unsigned)VT.SimpleTy < std::size(RegisterTypeForVT)); in getRegisterType()
1713 return RegisterTypeForVT[VT.SimpleTy]; in getRegisterType()
1717 MVT getRegisterType(LLVMContext &Context, EVT VT) const { in getRegisterType() argument
1718 if (VT.isSimple()) in getRegisterType()
1719 return getRegisterType(VT.getSimpleVT()); in getRegisterType()
1720 if (VT.isVector()) { in getRegisterType()
1724 (void)getVectorTypeBreakdown(Context, VT, VT1, in getRegisterType()
1728 if (VT.isInteger()) { in getRegisterType()
1729 return getRegisterType(Context, getTypeToTransformTo(Context, VT)); in getRegisterType()
1746 getNumRegisters(LLVMContext &Context, EVT VT,
1748 if (VT.isSimple()) {
1749 assert((unsigned)VT.getSimpleVT().SimpleTy <
1751 return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
1753 if (VT.isVector()) {
1757 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
1759 if (VT.isInteger()) {
1760 unsigned BitWidth = VT.getSizeInBits();
1761 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
1762 return (BitWidth + RegWidth - 1) / RegWidth;
1771 CallingConv::ID CC, EVT VT) const { in getRegisterTypeForCallingConv() argument
1772 return getRegisterType(Context, VT); in getRegisterTypeForCallingConv()
1780 EVT VT) const { in getNumRegistersForCallingConv() argument
1781 return getNumRegisters(Context, VT); in getNumRegistersForCallingConv()
1797 /// Example: (i16 (trunc (i32 (load x))) -> i16 load x
1802 if (NewVT.isVector() && !Load->hasOneUse()) in shouldReduceLoadWidth()
1815 bool isPaddedAtMostSignificantBitsWhenStored(EVT VT) const { in isPaddedAtMostSignificantBitsWhenStored() argument
1816 return VT.isScalarInteger() && !VT.isByteSized(); in isPaddedAtMostSignificantBitsWhenStored()
1822 bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const { in hasBigEndianPartOrdering() argument
1823 return DL.isBigEndian() || VT == MVT::ppcf128; in hasBigEndianPartOrdering()
1921 LLVMContext &Context, const DataLayout &DL, EVT VT,
1931 const DataLayout &DL, EVT VT,
1940 allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1949 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1961 /// target-independent logic.
1982 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; } in isSafeMemOpType()
2075 /// Returns the target-specific address of the unsafe stack pointer.
2090 /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer.
2102 //===--------------------------------------------------------------------===//
2111 //===--------------------------------------------------------------------===//
2161 /// Perform a load-linked operation on Addr, returning a "Value *" with the
2162 /// corresponding pointee type. This may entail some non-trivial operations to
2170 /// Perform a store-conditional operation to Addr. Return the status of the
2171 /// store. This should be 0 if the store succeeded, non-zero otherwise.
2177 /// Perform a masked atomicrmw using a target-specific intrinsic. This
2179 /// the backend. The target-specific intrinsic returns the loaded value and
2189 /// Perform a atomicrmw expansion using a target-specific way. This is
2197 /// Perform a bit test atomicrmw using a target-specific intrinsic. This
2206 /// target-specific intrinsic. This represents the combined atomic and compare
2213 /// Perform a masked cmpxchg using a target-specific intrinsic. This
2215 /// the backend. The target-specific intrinsic returns the loaded value and
2223 //===--------------------------------------------------------------------===//
2235 /// Inserts in the IR a target-specific intrinsic specifying a fence.
2243 /// This function should either return a nullptr, or a pointer to an IR-level
2264 // expansion of a cmpxchg instruction is such that the store-conditional will
2265 // not execute. This makes it possible to balance out the load-linked with
2271 /// Returns true if arguments should be sign-extended in lib calls.
2282 /// IR-level AtomicExpand pass.
2287 /// Returns how the given (atomic) load should be cast by the IR-level
2290 if (LI->getType()->isFloatingPointTy()) in shouldCastAtomicLoadInIR()
2295 /// Returns how the given (atomic) store should be expanded by the IR-level
2302 /// Returns how the given (atomic) store should be cast by the IR-level
2306 if (SI->getValueOperand()->getType()->isFloatingPointTy()) in shouldCastAtomicStoreInIR()
2311 /// Returns how the given atomic cmpxchg should be expanded by the IR-level
2318 /// Returns how the IR-level AtomicExpand pass should expand the given
2321 return RMW->isFloatingPointOperation() ? in shouldExpandAtomicRMWInIR()
2325 /// Returns how the given atomic atomicrmw should be cast by the IR-level
2329 if (RMWI->getOperation() == AtomicRMWInst::Xchg && in shouldCastAtomicRMWIInIR()
2330 (RMWI->getValOperand()->getType()->isFloatingPointTy() || in shouldCastAtomicRMWIInIR()
2331 RMWI->getValOperand()->getType()->isPointerTy())) in shouldCastAtomicRMWIInIR()
2343 /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
2362 /// sign-extension of the instruction's output, whereas here we are concerned
2363 /// with the sign-extension of the input. For targets with compare-and-swap
2364 /// instructions (or sub-word comparisons in their LL/SC loop expansions),
2379 EVT VT) const { in shouldNormalizeToSelectSequence() argument
2386 LegalizeTypeAction Action = getTypeAction(Context, VT); in shouldNormalizeToSelectSequence()
2391 virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const { return true; } in isProfitableToCombineMinNumMaxNum() argument
2395 /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
2396 virtual bool convertSelectOfConstantsToMath(EVT VT) const { in convertSelectOfConstantsToMath() argument
2401 /// multiplication-by-constant into simpler operations like shifts and adds.
2406 EVT VT, SDValue C) const { in decomposeMulByConstant() argument
2411 /// (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2).
2425 /// conversion operations - canonicalizing the FP source value instead of
2441 Exponent = -Exponent; in isBeneficialToExpandPowI()
2446 //===--------------------------------------------------------------------===//
2447 // TargetLowering Configuration Methods - These methods should be invoked by
2519 void addRegisterClass(MVT VT, const TargetRegisterClass *RC) { in addRegisterClass() argument
2520 assert((unsigned)VT.SimpleTy < std::size(RegClassForVT)); in addRegisterClass()
2521 RegClassForVT[VT.SimpleTy] = RC; in addRegisterClass()
2524 /// Return the largest legal super-reg register class of the register class
2527 findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;
2534 /// type and indicate what to do about it. Note that VT may refer to either
2536 void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action) { in setOperationAction() argument
2538 OpActions[(unsigned)VT.SimpleTy][Op] = Action; in setOperationAction()
2540 void setOperationAction(ArrayRef<unsigned> Ops, MVT VT, in setOperationAction() argument
2543 setOperationAction(Op, VT, Action); in setOperationAction()
2547 for (auto VT : VTs) in setOperationAction() local
2548 setOperationAction(Ops, VT, Action); in setOperationAction()
2609 void setIndexedLoadAction(ArrayRef<unsigned> IdxModes, MVT VT, in setIndexedLoadAction() argument
2612 setIndexedModeAction(IdxMode, VT, IMAB_Load, Action); in setIndexedLoadAction()
2617 for (auto VT : VTs) in setIndexedLoadAction() local
2618 setIndexedLoadAction(IdxModes, VT, Action); in setIndexedLoadAction()
2626 void setIndexedStoreAction(ArrayRef<unsigned> IdxModes, MVT VT, in setIndexedStoreAction() argument
2629 setIndexedModeAction(IdxMode, VT, IMAB_Store, Action); in setIndexedStoreAction()
2634 for (auto VT : VTs) in setIndexedStoreAction() local
2635 setIndexedStoreAction(IdxModes, VT, Action); in setIndexedStoreAction()
2643 void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT, in setIndexedMaskedLoadAction() argument
2645 setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action); in setIndexedMaskedLoadAction()
2653 void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT, in setIndexedMaskedStoreAction() argument
2655 setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action); in setIndexedMaskedStoreAction()
2660 void setCondCodeAction(ArrayRef<ISD::CondCode> CCs, MVT VT, in setCondCodeAction() argument
2663 assert(VT.isValid() && (unsigned)CC < std::size(CondCodeActions) && in setCondCodeAction()
2667 /// 32-bit value and the upper 29 bits index into the second dimension of in setCondCodeAction()
2668 /// the array to select what 32-bit value to use. in setCondCodeAction()
2669 uint32_t Shift = 4 * (VT.SimpleTy & 0x7); in setCondCodeAction()
2670 CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift); in setCondCodeAction()
2671 CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift; in setCondCodeAction()
2676 for (auto VT : VTs) in setCondCodeAction() local
2677 setCondCodeAction(CCs, VT, Action); in setCondCodeAction()
2718 /// there is a performance benefit to higher-than-minimum alignment
2725 /// override getPrefLoopAlignment to provide per-loop values.
2767 //===--------------------------------------------------------------------===//
2806 /// If the address space cannot be determined, it will be -1.
2875 return (From->isIntegerTy() || From->isFloatingPointTy()) && in shouldConvertPhiType()
2876 (To->isIntegerTy() || To->isFloatingPointTy()); in shouldConvertPhiType()
2926 // These are non-commutative binops. in isBinOp()
2951 /// by referencing its sub-register AX.
2991 switch (I->getOpcode()) { in isExtFree()
2993 if (isFPExtFree(EVT::getEVT(I->getType()), in isExtFree()
2994 EVT::getEVT(I->getOperand(0)->getType()))) in isExtFree()
2998 if (isZExtFree(I->getOperand(0)->getType(), I->getType())) in isExtFree()
3017 EVT VT = getValueType(DL, Ext->getType()); in isExtLoad() local
3018 EVT LoadVT = getValueType(DL, Load->getType()); in isExtLoad()
3022 if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) && in isExtLoad()
3023 !isTruncateFree(Ext->getType(), Load->getType())) in isExtLoad()
3035 return isLoadExtLegal(LType, VT, LoadVT); in isExtLoad()
3039 /// implicitly zero-extends the value to ToTy in the result register.
3059 /// Return true if zero-extending the specific node Val to type VT2 is free
3060 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
3061 /// because it's folded such as X86 zero-extending loads).
3066 /// Return true if sign-extension from FromTy to ToTy is cheaper than
3067 /// zero-extension.
3110 /// Where addr1 = addr2 +/- sizeof(i32).
3112 /// In other words, unless the target performs a post-isel load combining,
3131 /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
3145 /// \p SVI is the shufflevector to RE-interleave the stored vector.
3175 /// single-precision floating-point numbers are implicitly extended to
3176 /// double-precision).
3184 /// (for instance, because half-precision floating-point numbers are
3185 /// implicitly extended to float-precision) for an FMA instruction.
3192 /// (for instance, because half-precision floating-point numbers are
3193 /// implicitly extended to float-precision) for an FMA instruction.
3207 virtual bool isFNegFree(EVT VT) const { in isFNegFree() argument
3208 assert(VT.isFloatingPoint()); in isFNegFree()
3214 virtual bool isFAbsFree(EVT VT) const { in isFAbsFree() argument
3215 assert(VT.isFloatingPoint()); in isFAbsFree()
3277 assert((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB || in isFMADLegal()
3278 N->getOpcode() == ISD::FMUL) && in isFMADLegal()
3280 return isOperationLegal(ISD::FMAD, N->getValueType(0)); in isFMADLegal()
3285 virtual bool generateFMAsInMachineCombiner(EVT VT, in generateFMAsInMachineCombiner() argument
3299 /// Example: X + (Cond ? Y : 0) --> Cond ? (X + Y) : X
3301 EVT VT) const { in shouldFoldSelectWithIdentityConstant() argument
3333 /// be a sub-register rename rather than an actual instruction.
3334 virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const { in isExtractVecEltCheap() argument
3342 virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, in shouldFormOverflowOp() argument
3354 if (VT.isVector()) in shouldFormOverflowOp()
3356 return MathUsed && (VT.isSimple() || !isOperationExpand(Opcode, VT)); in shouldFormOverflowOp()
3372 virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const { in shouldAvoidTransformToShift() argument
3376 // Should we fold (select_cc seteq (and x, y), 0, 0, A) -> (and (sra (shl x))
3378 virtual bool shouldFoldSelectWithSingleBitTest(EVT VT, in shouldFoldSelectWithSingleBitTest() argument
3380 unsigned ShCt = AndMask.getBitWidth() - 1; in shouldFoldSelectWithSingleBitTest()
3381 return !shouldAvoidTransformToShift(VT, ShCt); in shouldFoldSelectWithSingleBitTest()
3384 /// Does this target require the clearing of high-order bits in a register
3388 /// Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT
3390 virtual bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const { in shouldConvertFpToSat() argument
3391 return isOperationLegalOrCustom(Op, VT); in shouldConvertFpToSat()
3464 //===----------------------------------------------------------------------===//
3466 //===----------------------------------------------------------------------===//
3489 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
3498 /// Information about the contents of the high-bits in boolean values held in
3502 /// Information about the contents of the high-bits in boolean values held in
3506 /// Information about the contents of the high-bits in boolean vector values
3562 /// register class is the largest legal super-reg register class of the
3574 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
3575 /// (e.g. i64 -> i16). For types natively supported by the system, this holds
3576 /// the same type (e.g. i32 -> i32).
3583 /// non-legal value types are not described here.
3588 /// specific value type and extension type. Uses 4-bits to store the action
3623 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
3650 void setIndexedModeAction(unsigned IdxMode, MVT VT, unsigned Shift, in setIndexedModeAction() argument
3652 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE && in setIndexedModeAction()
3654 unsigned Ty = (unsigned)VT.SimpleTy; in setIndexedModeAction()
3659 LegalizeAction getIndexedModeAction(unsigned IdxMode, MVT VT, in getIndexedModeAction() argument
3661 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() && in getIndexedModeAction()
3663 unsigned Ty = (unsigned)VT.SimpleTy; in getIndexedModeAction()
3686 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
3687 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
3700 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
3701 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
3702 /// and one 1-byte store. This only applies to copying a constant array of
3721 /// alignment restrictions. For example, loading 7 bytes on a 32-bit machine
3722 /// with 32-bit alignment would result in one 4-byte load, a one 2-byte load
3723 /// and one 1-byte load. This only applies to copying a constant array of
3736 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
3737 /// with 8-bit alignment would result in nine 1-byte stores. This only
3755 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
3767 /// target-specific constructs to SelectionDAG operators.
3787 // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By
3798 // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By
3814 /// pre-indexed load / store address.
3824 /// post-indexed load / store.
3868 /// constraint. -fasm-blocks "__asm call foo" lowers to
3882 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3887 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3923 //===--------------------------------------------------------------------===//
3973 // Target hook to do target-specific const optimization, which is called by
3983 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
4028 /// through" ops that don't contribute to the DemandedBits/DemandedElts -
4101 /// typically be inferred from the number of low known 0 bits. However, for a
4102 /// pointer with a non-integral address space, the alignment value may be
4103 /// independent from the known low bits.
4110 /// Default implementation computes low bits based on alignment
4157 /// through" ops that don't contribute to the DemandedBits/DemandedElts -
4170 /// Return true if Op can create undef or poison from non-undef & non-poison
4182 SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0,
4247 /// Return if \p N is a True value when extended to \p VT.
4248 bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const;
4252 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
4265 /// target-independent nodes that the target has registered with invoke it
4270 /// SDValue.Val == 0 - No change was made
4271 /// SDValue.Val == N - N was replaced, is dead, and is already handled.
4272 /// otherwise - N should be replaced by the returned Operand.
4282 /// disrupts a particularly auspicious target-specific tree (e.g. bitfield
4292 /// GlobalISel - return true if it is profitable to move this shift by a
4295 /// if it disrupts a particularly auspicious target-specific tree (e.g.
4305 /// GlobalISel - return true if it's profitable to perform the combine:
4316 // (icmp eq A, C) | (icmp eq A, -C)
4317 // -> (icmp eq and(add(A, C), ~(C + C)), 0)
4318 // (icmp ne A, C) & (icmp ne A, -C)w
4319 // -> (icmp ne and(add(A, C), ~(C + C)), 0)
4321 // (icmp eq A, C) | (icmp eq A, -C)
4322 // -> (icmp eq Abs(A), C)
4323 // (icmp ne A, C) & (icmp ne A, -C)w
4324 // -> (icmp ne Abs(A), C)
4336 /// if it disrupts a particularly auspicious target-specific tree (e.g.
4346 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const { in isTypeDesirableForOp() argument
4348 return isTypeLegal(VT); in isTypeDesirableForOp()
4353 /// type. e.g. f32 load -> i32 load can be profitable on ARM.
4355 EVT /*VT*/) const { in isDesirableToTransformToIntegerOp()
4423 if (Neg->use_empty())
4445 //===--------------------------------------------------------------------===//
4446 // Lowering methods - These methods must be implemented by targets so that
4450 /// Target-specific splitting of values into parts that fit a register
4458 /// Allows the target to handle physreg-carried dependency
4459 /// in target-specific way. Used from the ScheduleDAGSDNodes to decide whether
4461 /// Def - input: Selection DAG node defininfg physical register
4462 /// User - input: Selection DAG node using physical register
4463 /// Op - input: Number of User operand
4464 /// PhysReg - inout: set to the physical register if the edge is
4466 /// Cost - inout: physical register copy cost.
4475 /// Target-specific combining of register parts into its original value
4486 /// should fill in the InVals array with legal-type argument values, and
4496 /// pointer-authenticating indirect calls. It is equivalent to the "ptrauth"
4528 unsigned NumFixedArgs = -1;
4560 // setCallee with target/module-specific attributes
4599 IsVarArg = FTy->isVarArg(); in setCallee()
4608 NumFixedArgs = FTy->getNumParams(); in setCallee()
4745 /// array. The implementation should fill in the InVals array with legal-type
4753 /// Target-specific cleanup for formal ByVal parameters.
4758 /// is returned, an sret-demotion is performed.
4797 /// global variables extension. There is no target-independent behaviour
4807 /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling
4810 virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, in getTypeForExtReturn() argument
4813 return VT.bitsLT(MinVT) ? MinVT : VT; in getTypeForExtReturn()
4852 /// even if a cache-coherent store is performed by another CPU. The default
4916 //===--------------------------------------------------------------------===//
4940 CW_Invalid = -1, // No match.
4946 // Well-known weights.
5037 StringRef Constraint, MVT VT) const;
5075 //===--------------------------------------------------------------------===//
5083 // Build sdiv by power-of-2 with conditional move instructions
5089 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM
5097 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM
5119 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
5121 /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
5122 /// The boolean UseOneConstNR output is used to select a Newton-Raphson
5140 /// link-time dependency on libm into a file that originally did not have one.
5146 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
5148 /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
5158 /// Return a target-dependent comparison result if the input operand is
5165 /// Return a target-dependent result if the input operand is not suitable for
5172 //===--------------------------------------------------------------------===//
5176 /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes,
5177 /// respectively, each computing an n/2-bit part of the result.
5179 /// in little-endian order.
5180 /// \param LL Low bits of the LHS of the MUL. You can use this parameter
5181 /// if you want to control how low bits are extracted from the LHS.
5183 /// \param RL Low bits of the RHS of the MUL. See LL for meaning
5186 bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS,
5193 /// the result and one that computes the low bits.
5195 /// \param LL Low bits of the LHS of the MUL. You can use this parameter
5196 /// if you want to control how low bits are extracted from the LHS.
5198 /// \param RL Low bits of the RHS of the MUL. See LL for meaning
5206 /// Attempt to expand an n-bit div/rem/divrem by constant using a n/2-bit
5207 /// urem by constant and other arithmetic ops. The n/2-bit urem by constant
5215 /// half of VT.
5216 /// \param LL Low bits of the LHS of the operation. You can use this
5217 /// parameter if you want to control how low bits are extracted from
5238 /// Expand shift-by-parts.
5240 /// \param Lo lower-output-part after conversion
5241 /// \param Hi upper-output-part after conversion
5325 SDValue CTTZTableLookup(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, EVT VT,
5346 /// (ABS x) -> (XOR (ADD x, (SRA x, type_size)), (SRA x, type_size))
5396 /// Expands an unaligned load to 2 half-size loads for an integer, and
5401 /// Expands an unaligned store to 2 half-size stores for integer values, and
5423 /// Get a pointer to a sub-vector of type \p SubVecVT at index \p Idx located
5474 /// forceExpandWideMUL - Unconditionally expand a MUL into either a libcall or
5487 /// sign/zero-extending the operands.
5508 /// temporarily, advance store position, before re-loading the final vector.
5533 bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS,
5538 //===--------------------------------------------------------------------===//
5581 // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits)))
5593 SDValue foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
5595 SDValue foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
5603 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0