Lines Matching +full:fsin +full:- +full:enable
1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
11 //===----------------------------------------------------------------------===//
87 /// makeVTList - Return an instance of the SDVTList struct initialized with the
104 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt",
108 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max",
113 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G);); in NewSDValueDbgMsg()
116 //===----------------------------------------------------------------------===//
118 //===----------------------------------------------------------------------===//
120 /// isExactlyValue - We don't rely on operator== working on double values, as
121 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
122 /// As such, this method can be used to do an exact bit-for-bit comparison of
141 //===----------------------------------------------------------------------===//
143 //===----------------------------------------------------------------------===//
146 if (N->getOpcode() == ISD::SPLAT_VECTOR) { in isConstantSplatVector()
148 N->getValueType(0).getVectorElementType().getSizeInBits(); in isConstantSplatVector()
149 if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0))) { in isConstantSplatVector()
150 SplatVal = Op0->getAPIntValue().trunc(EltSize); in isConstantSplatVector()
153 if (auto *Op0 = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) { in isConstantSplatVector()
154 SplatVal = Op0->getValueAPF().bitcastToAPInt().trunc(EltSize); in isConstantSplatVector()
166 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); in isConstantSplatVector()
172 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs, in isConstantSplatVector()
182 while (N->getOpcode() == ISD::BITCAST) in isConstantSplatVectorAllOnes()
183 N = N->getOperand(0).getNode(); in isConstantSplatVectorAllOnes()
185 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) { in isConstantSplatVectorAllOnes()
190 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; in isConstantSplatVectorAllOnes()
192 unsigned i = 0, e = N->getNumOperands(); in isConstantSplatVectorAllOnes()
195 while (i != e && N->getOperand(i).isUndef()) in isConstantSplatVectorAllOnes()
198 // Do not accept an all-undef vector. in isConstantSplatVectorAllOnes()
201 // Do not accept build_vectors that aren't all constants or which have non-~0 in isConstantSplatVectorAllOnes()
209 SDValue NotZero = N->getOperand(i); in isConstantSplatVectorAllOnes()
210 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); in isConstantSplatVectorAllOnes()
212 if (CN->getAPIntValue().countr_one() < EltSize) in isConstantSplatVectorAllOnes()
215 if (CFPN->getValueAPF().bitcastToAPInt().countr_one() < EltSize) in isConstantSplatVectorAllOnes()
224 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) in isConstantSplatVectorAllOnes()
231 while (N->getOpcode() == ISD::BITCAST) in isConstantSplatVectorAllZeros()
232 N = N->getOperand(0).getNode(); in isConstantSplatVectorAllZeros()
234 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) { in isConstantSplatVectorAllZeros()
239 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; in isConstantSplatVectorAllZeros()
242 for (const SDValue &Op : N->op_values()) { in isConstantSplatVectorAllZeros()
246 // Do not accept build_vectors that aren't all constants or which have non-0 in isConstantSplatVectorAllZeros()
254 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); in isConstantSplatVectorAllZeros()
256 if (CN->getAPIntValue().countr_zero() < EltSize) in isConstantSplatVectorAllZeros()
259 if (CFPN->getValueAPF().bitcastToAPInt().countr_zero() < EltSize) in isConstantSplatVectorAllZeros()
265 // Do not accept an all-undef vector. in isConstantSplatVectorAllZeros()
280 if (N->getOpcode() != ISD::BUILD_VECTOR) in isBuildVectorOfConstantSDNodes()
283 for (const SDValue &Op : N->op_values()) { in isBuildVectorOfConstantSDNodes()
293 if (N->getOpcode() != ISD::BUILD_VECTOR) in isBuildVectorOfConstantFPSDNodes()
296 for (const SDValue &Op : N->op_values()) { in isBuildVectorOfConstantFPSDNodes()
307 assert(N->getValueType(0).isVector() && "Expected a vector!"); in isVectorShrinkable()
309 unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); in isVectorShrinkable()
313 if (N->getOpcode() == ISD::ZERO_EXTEND) { in isVectorShrinkable()
314 return (N->getOperand(0).getValueType().getScalarSizeInBits() <= in isVectorShrinkable()
318 if (N->getOpcode() == ISD::SIGN_EXTEND) { in isVectorShrinkable()
319 return (N->getOperand(0).getValueType().getScalarSizeInBits() <= in isVectorShrinkable()
323 if (N->getOpcode() != ISD::BUILD_VECTOR) in isVectorShrinkable()
326 for (const SDValue &Op : N->op_values()) { in isVectorShrinkable()
332 APInt C = Op->getAsAPIntVal().trunc(EltSize); in isVectorShrinkable()
346 if (N->getNumOperands() == 0) in allOperandsUndef()
348 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); }); in allOperandsUndef()
352 return N->getOpcode() == ISD::FREEZE && N->getOperand(0).isUndef(); in isFreezeUndef()
377 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst)) in matchUnaryPredicateImpl()
677 //===----------------------------------------------------------------------===//
679 //===----------------------------------------------------------------------===//
681 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
686 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
692 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
701 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
719 switch (N->getOpcode()) { in AddNodeIDCustom()
728 ID.AddPointer(C->getConstantIntValue()); in AddNodeIDCustom()
729 ID.AddBoolean(C->isOpaque()); in AddNodeIDCustom()
734 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); in AddNodeIDCustom()
741 ID.AddPointer(GA->getGlobal()); in AddNodeIDCustom()
742 ID.AddInteger(GA->getOffset()); in AddNodeIDCustom()
743 ID.AddInteger(GA->getTargetFlags()); in AddNodeIDCustom()
747 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); in AddNodeIDCustom()
750 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); in AddNodeIDCustom()
753 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); in AddNodeIDCustom()
756 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); in AddNodeIDCustom()
760 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); in AddNodeIDCustom()
764 if (cast<LifetimeSDNode>(N)->hasOffset()) { in AddNodeIDCustom()
765 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize()); in AddNodeIDCustom()
766 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset()); in AddNodeIDCustom()
770 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getGuid()); in AddNodeIDCustom()
771 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getIndex()); in AddNodeIDCustom()
772 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getAttributes()); in AddNodeIDCustom()
776 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); in AddNodeIDCustom()
777 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); in AddNodeIDCustom()
782 ID.AddInteger(CP->getAlign().value()); in AddNodeIDCustom()
783 ID.AddInteger(CP->getOffset()); in AddNodeIDCustom()
784 if (CP->isMachineConstantPoolEntry()) in AddNodeIDCustom()
785 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); in AddNodeIDCustom()
787 ID.AddPointer(CP->getConstVal()); in AddNodeIDCustom()
788 ID.AddInteger(CP->getTargetFlags()); in AddNodeIDCustom()
793 ID.AddInteger(TI->getIndex()); in AddNodeIDCustom()
794 ID.AddInteger(TI->getOffset()); in AddNodeIDCustom()
795 ID.AddInteger(TI->getTargetFlags()); in AddNodeIDCustom()
800 ID.AddInteger(LD->getMemoryVT().getRawBits()); in AddNodeIDCustom()
801 ID.AddInteger(LD->getRawSubclassData()); in AddNodeIDCustom()
802 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); in AddNodeIDCustom()
803 ID.AddInteger(LD->getMemOperand()->getFlags()); in AddNodeIDCustom()
808 ID.AddInteger(ST->getMemoryVT().getRawBits()); in AddNodeIDCustom()
809 ID.AddInteger(ST->getRawSubclassData()); in AddNodeIDCustom()
810 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); in AddNodeIDCustom()
811 ID.AddInteger(ST->getMemOperand()->getFlags()); in AddNodeIDCustom()
816 ID.AddInteger(ELD->getMemoryVT().getRawBits()); in AddNodeIDCustom()
817 ID.AddInteger(ELD->getRawSubclassData()); in AddNodeIDCustom()
818 ID.AddInteger(ELD->getPointerInfo().getAddrSpace()); in AddNodeIDCustom()
819 ID.AddInteger(ELD->getMemOperand()->getFlags()); in AddNodeIDCustom()
824 ID.AddInteger(EST->getMemoryVT().getRawBits()); in AddNodeIDCustom()
825 ID.AddInteger(EST->getRawSubclassData()); in AddNodeIDCustom()
826 ID.AddInteger(EST->getPointerInfo().getAddrSpace()); in AddNodeIDCustom()
827 ID.AddInteger(EST->getMemOperand()->getFlags()); in AddNodeIDCustom()
832 ID.AddInteger(SLD->getMemoryVT().getRawBits()); in AddNodeIDCustom()
833 ID.AddInteger(SLD->getRawSubclassData()); in AddNodeIDCustom()
834 ID.AddInteger(SLD->getPointerInfo().getAddrSpace()); in AddNodeIDCustom()
839 ID.AddInteger(SST->getMemoryVT().getRawBits()); in AddNodeIDCustom()
840 ID.AddInteger(SST->getRawSubclassData()); in AddNodeIDCustom()
841 ID.AddInteger(SST->getPointerInfo().getAddrSpace()); in AddNodeIDCustom()
846 ID.AddInteger(EG->getMemoryVT().getRawBits()); in AddNodeIDCustom()
847 ID.AddInteger(EG->getRawSubclassData()); in AddNodeIDCustom()
848 ID.AddInteger(EG->getPointerInfo().getAddrSpace()); in AddNodeIDCustom()
849 ID.AddInteger(EG->getMemOperand()->getFlags()); in AddNodeIDCustom()
854 ID.AddInteger(ES->getMemoryVT().getRawBits()); in AddNodeIDCustom()
855 ID.AddInteger(ES->getRawSubclassData()); in AddNodeIDCustom()
856 ID.AddInteger(ES->getPointerInfo().getAddrSpace()); in AddNodeIDCustom()
857 ID.AddInteger(ES->getMemOperand()->getFlags()); in AddNodeIDCustom()
862 ID.AddInteger(MLD->getMemoryVT().getRawBits()); in AddNodeIDCustom()
863 ID.AddInteger(MLD->getRawSubclassData()); in AddNodeIDCustom()
864 ID.AddInteger(MLD->getPointerInfo().getAddrSpace()); in AddNodeIDCustom()
865 ID.AddInteger(MLD->getMemOperand()->getFlags()); in AddNodeIDCustom()
870 ID.AddInteger(MST->getMemoryVT().getRawBits()); in AddNodeIDCustom()
871 ID.AddInteger(MST->getRawSubclassData()); in AddNodeIDCustom()
872 ID.AddInteger(MST->getPointerInfo().getAddrSpace()); in AddNodeIDCustom()
873 ID.AddInteger(MST->getMemOperand()->getFlags()); in AddNodeIDCustom()
878 ID.AddInteger(MG->getMemoryVT().getRawBits()); in AddNodeIDCustom()
879 ID.AddInteger(MG->getRawSubclassData()); in AddNodeIDCustom()
880 ID.AddInteger(MG->getPointerInfo().getAddrSpace()); in AddNodeIDCustom()
881 ID.AddInteger(MG->getMemOperand()->getFlags()); in AddNodeIDCustom()
886 ID.AddInteger(MS->getMemoryVT().getRawBits()); in AddNodeIDCustom()
887 ID.AddInteger(MS->getRawSubclassData()); in AddNodeIDCustom()
888 ID.AddInteger(MS->getPointerInfo().getAddrSpace()); in AddNodeIDCustom()
889 ID.AddInteger(MS->getMemOperand()->getFlags()); in AddNodeIDCustom()
909 ID.AddInteger(AT->getMemoryVT().getRawBits()); in AddNodeIDCustom()
910 ID.AddInteger(AT->getRawSubclassData()); in AddNodeIDCustom()
911 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); in AddNodeIDCustom()
912 ID.AddInteger(AT->getMemOperand()->getFlags()); in AddNodeIDCustom()
916 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask(); in AddNodeIDCustom()
924 ID.AddPointer(BA->getBlockAddress()); in AddNodeIDCustom()
925 ID.AddInteger(BA->getOffset()); in AddNodeIDCustom()
926 ID.AddInteger(BA->getTargetFlags()); in AddNodeIDCustom()
930 ID.AddInteger(cast<AssertAlignSDNode>(N)->getAlign().value()); in AddNodeIDCustom()
937 } // end switch (N->getOpcode()) in AddNodeIDCustom()
942 ID.AddInteger(MN->getRawSubclassData()); in AddNodeIDCustom()
943 ID.AddInteger(MN->getPointerInfo().getAddrSpace()); in AddNodeIDCustom()
944 ID.AddInteger(MN->getMemOperand()->getFlags()); in AddNodeIDCustom()
945 ID.AddInteger(MN->getMemoryVT().getRawBits()); in AddNodeIDCustom()
949 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
952 AddNodeIDOpcode(ID, N->getOpcode()); in AddNodeIDNode()
954 AddNodeIDValueTypes(ID, N->getVTList()); in AddNodeIDNode()
956 AddNodeIDOperands(ID, N->ops()); in AddNodeIDNode()
962 //===----------------------------------------------------------------------===//
964 //===----------------------------------------------------------------------===//
966 /// doNotCSE - Return true if CSE should not be performed for this node.
968 if (N->getValueType(0) == MVT::Glue) in doNotCSE()
971 switch (N->getOpcode()) { in doNotCSE()
979 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) in doNotCSE()
980 if (N->getValueType(i) == MVT::Glue) in doNotCSE()
986 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
995 // Add all obviously-dead nodes to the DeadNodes worklist. in RemoveDeadNodes()
1006 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
1017 if (N->getOpcode() == ISD::DELETED_NODE) in RemoveDeadNodes()
1020 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) in RemoveDeadNodes()
1021 DUL->NodeDeleted(N, nullptr); in RemoveDeadNodes()
1028 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { in RemoveDeadNodes()
1034 if (Operand->use_empty()) in RemoveDeadNodes()
1063 assert(N->getIterator() != AllNodes.begin() && in DeleteNodeNotInCSEMaps()
1065 assert(N->use_empty() && "Cannot delete a node that is not dead!"); in DeleteNodeNotInCSEMaps()
1068 N->DropOperands(); in DeleteNodeNotInCSEMaps()
1074 assert(!(V->isVariadic() && isParameter)); in add()
1079 for (const SDNode *Node : V->getSDNodes()) in add()
1088 for (auto &Val: I->second) in erase()
1089 Val->setIsInvalidated(); in erase()
1103 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); in DeallocateNode()
1104 N->NodeType = ISD::DELETED_NODE; in DeallocateNode()
1108 DbgInfo->erase(N); in DeallocateNode()
1115 /// VerifySDNode - Check the given SDNode. Aborts if it is invalid.
1117 switch (N->getOpcode()) { in VerifySDNode()
1119 if (N->getOpcode() > ISD::BUILTIN_OP_END) in VerifySDNode()
1120 TLI->verifyTargetSDNode(N); in VerifySDNode()
1123 EVT VT = N->getValueType(0); in VerifySDNode()
1124 assert(N->getNumValues() == 1 && "Too many results!"); in VerifySDNode()
1127 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); in VerifySDNode()
1128 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && in VerifySDNode()
1130 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && in VerifySDNode()
1132 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && in VerifySDNode()
1137 assert(N->getNumValues() == 1 && "Too many results!"); in VerifySDNode()
1138 assert(N->getValueType(0).isVector() && "Wrong return type!"); in VerifySDNode()
1139 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && in VerifySDNode()
1141 EVT EltVT = N->getValueType(0).getVectorElementType(); in VerifySDNode()
1142 for (const SDUse &Op : N->ops()) { in VerifySDNode()
1147 assert(Op.getValueType() == N->getOperand(0).getValueType() && in VerifySDNode()
1163 N->PersistentId = NextPersistentId++; in InsertNode()
1166 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) in InsertNode()
1167 DUL->NodeInserted(N); in InsertNode()
1170 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
1176 switch (N->getOpcode()) { in RemoveNodeFromCSEMaps()
1179 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && in RemoveNodeFromCSEMaps()
1181 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; in RemoveNodeFromCSEMaps()
1182 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; in RemoveNodeFromCSEMaps()
1185 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); in RemoveNodeFromCSEMaps()
1190 ESN->getSymbol(), ESN->getTargetFlags())); in RemoveNodeFromCSEMaps()
1195 Erased = MCSymbols.erase(MCSN->getMCSymbol()); in RemoveNodeFromCSEMaps()
1199 EVT VT = cast<VTSDNode>(N)->getVT(); in RemoveNodeFromCSEMaps()
1210 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); in RemoveNodeFromCSEMaps()
1211 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); in RemoveNodeFromCSEMaps()
1219 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && in RemoveNodeFromCSEMaps()
1220 !N->isMachineOpcode() && !doNotCSE(N)) { in RemoveNodeFromCSEMaps()
1221 N->dump(this); in RemoveNodeFromCSEMaps()
1229 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
1243 Existing->intersectFlagsWith(N->getFlags()); in AddModifiedNodeToCSEMaps()
1247 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) in AddModifiedNodeToCSEMaps()
1248 DUL->NodeDeleted(N, Existing); in AddModifiedNodeToCSEMaps()
1255 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) in AddModifiedNodeToCSEMaps()
1256 DUL->NodeUpdated(N); in AddModifiedNodeToCSEMaps()
1259 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1262 /// node already exists with these operands, the slot will be non-null.
1270 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); in FindModifiedNodeSlot()
1274 Node->intersectFlagsWith(N->getFlags()); in FindModifiedNodeSlot()
1278 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1281 /// node already exists with these operands, the slot will be non-null.
1290 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); in FindModifiedNodeSlot()
1294 Node->intersectFlagsWith(N->getFlags()); in FindModifiedNodeSlot()
1298 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1301 /// node already exists with these operands, the slot will be non-null.
1308 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); in FindModifiedNodeSlot()
1312 Node->intersectFlagsWith(N->getFlags()); in FindModifiedNodeSlot()
1344 Context = &MF->getFunction().getContext(); in init()
1359 return MF->getFunction().hasOptSize() || in shouldOptForSize()
1360 llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI); in shouldOptForSize()
1377 switch (N->getOpcode()) { in FindNodeOrInsertPos()
1392 switch (N->getOpcode()) { in FindNodeOrInsertPos()
1398 if (N->getDebugLoc() != DL.getDebugLoc()) in FindNodeOrInsertPos()
1399 N->setDebugLoc(DebugLoc()); in FindNodeOrInsertPos()
1405 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) in FindNodeOrInsertPos()
1406 N->setDebugLoc(DL.getDebugLoc()); in FindNodeOrInsertPos()
1430 DbgInfo->clear(); in clear()
1444 "Strict no-op FP extend/round not allowed."); in getStrictFPExtendOrRound()
1522 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); in getBoolExtOrTrunc()
1523 return getNode(TLI->getExtendForContent(BType), SL, VT, Op); in getBoolExtOrTrunc()
1579 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
1614 switch (TLI->getBooleanContents(OpVT)) { in getBoolConstant()
1649 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == in getConstant()
1651 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); in getConstant()
1653 if (TLI->isSExtCheaperThanZExt(VT.getScalarType(), EltVT)) in getConstant()
1654 NewVal = Elt->getValue().sextOrTrunc(EltVT.getSizeInBits()); in getConstant()
1656 NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); in getConstant()
1661 // the value into n parts and use a vector type with n-times the elements. in getConstant()
1666 TLI->getTypeAction(*getContext(), EltVT) == in getConstant()
1668 const APInt &NewVal = Elt->getValue(); in getConstant()
1669 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); in getConstant()
1674 TLI->isOperationLegal(ISD::SPLAT_VECTOR, VT)) { in getConstant()
1693 // isn't a power-of-2 factor of the requested type size. in getConstant()
1703 // big-endian order then reverse it now. in getConstant()
1723 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && in getConstant()
1752 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); in getIntPtrConstant()
1758 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout()); in getShiftAmountConstant()
1770 return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget); in getVectorIdxConstant()
1785 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and in getConstantFP()
1833 "Cannot set target flags on target-independent globals"); in getGlobalAddress()
1835 // Truncate (with sign-extension) the offset value to the pointer size. in getGlobalAddress()
1836 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); in getGlobalAddress()
1841 if (GV->isThreadLocal()) in getGlobalAddress()
1882 "Cannot set target flags on target-independent jump tables"); in getJumpTable()
1910 "Cannot set target flags on target-independent globals"); in getConstantPool()
1913 ? getDataLayout().getABITypeAlign(C->getType()) in getConstantPool()
1914 : getDataLayout().getPrefTypeAlign(C->getType()); in getConstantPool()
1919 ID.AddInteger(Alignment->value()); in getConstantPool()
1940 "Cannot set target flags on target-independent globals"); in getConstantPool()
1942 Alignment = getDataLayout().getPrefTypeAlign(C->getType()); in getConstantPool()
1947 ID.AddInteger(Alignment->value()); in getConstantPool()
1949 C->addSelectionDAGCSEId(ID); in getConstantPool()
2043 return getConstant(MulImm * C->getZExtValue(), DL, VT); in getVScale()
2092 // Canonicalize shuffle undef, undef -> undef in getVectorShuffle()
2100 [&](int M) { return M < (NElts * 2) && M >= -1; }) && in getVectorShuffle()
2106 // Canonicalize shuffle v, v -> v, undef in getVectorShuffle()
2110 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; in getVectorShuffle()
2113 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. in getVectorShuffle()
2117 if (TLI->hasVectorBlend()) { in getVectorShuffle()
2119 // that even when this arises during lowering we don't have to re-handle it. in getVectorShuffle()
2122 SDValue Splat = BV->getSplatValue(&UndefElements); in getVectorShuffle()
2131 if (UndefElements[MaskVec[i] - Offset]) { in getVectorShuffle()
2132 MaskVec[i] = -1; in getVectorShuffle()
2136 // If we can blend a non-undef lane, use that instead. in getVectorShuffle()
2147 // Canonicalize all index into lhs, -> shuffle lhs, undef in getVectorShuffle()
2148 // Canonicalize all index into rhs, -> shuffle rhs, undef in getVectorShuffle()
2154 MaskVec[i] = -1; in getVectorShuffle()
2171 // Re-check whether both sides ended up undef. in getVectorShuffle()
2191 V = V->getOperand(0); in getVectorShuffle()
2196 SDValue Splat = BV->getSplatValue(&UndefElements); in getVectorShuffle()
2215 EVT BuildVT = BV->getValueType(0); in getVectorShuffle()
2216 const SDValue &Splatted = BV->getOperand(MaskVec[0]); in getVectorShuffle()
2276 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, UA); in getRegister()
2404 /// getShiftAmountOperand - Return the specified value casted to
2408 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); in getShiftAmountOperand()
2417 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); in expandVAArg()
2418 EVT VT = Node->getValueType(0); in expandVAArg()
2419 SDValue Tmp1 = Node->getOperand(0); in expandVAArg()
2420 SDValue Tmp2 = Node->getOperand(1); in expandVAArg()
2421 const MaybeAlign MA(Node->getConstantOperandVal(3)); in expandVAArg()
2429 getConstant(MA->value() - 1, dl, VAList.getValueType())); in expandVAArg()
2433 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType())); in expandVAArg()
2453 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); in expandVACopy()
2454 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); in expandVACopy()
2456 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), in expandVACopy()
2457 Node->getOperand(2), MachinePointerInfo(VS)); in expandVACopy()
2458 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), in expandVACopy()
2467 if (TLI->isTypeLegal(VT) || !VT.isVector()) in getReducedAlign()
2470 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); in getReducedAlign()
2471 const Align StackAlign = TFI->getStackAlign(); in getReducedAlign()
2479 TLI->getVectorTypeBreakdown(*getContext(), VT, IntermediateVT, in getReducedAlign()
2491 MachineFrameInfo &MFI = MF->getFrameInfo(); in CreateStackTemporary()
2492 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); in CreateStackTemporary()
2495 StackID = TFI->getStackIDForScalableVectors(); in CreateStackTemporary()
2500 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); in CreateStackTemporary()
2533 TLI->getBooleanContents(OpVT) == in FoldSetCC()
2567 // icmp eq/ne X, undef -> undef. in FoldSetCC()
2573 // icmp undef, undef -> undef. in FoldSetCC()
2577 // icmp X, X -> true/false in FoldSetCC()
2578 // icmp X, undef -> true/false because undef could be X. in FoldSetCC()
2584 const APInt &C2 = N2C->getAPIntValue(); in FoldSetCC()
2586 const APInt &C1 = N1C->getAPIntValue(); in FoldSetCC()
2597 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF()); in FoldSetCC()
2655 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT())) in FoldSetCC()
2658 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) || in FoldSetCC()
2681 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
2688 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
2696 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in
2705 /// MaskedVectorIsZero - Return true if 'Op' is known to be zero in
2712 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'.
2738 /// isSplatValue - Return true if the vector V has the same value
2788 return TLI->isSplatValueForTargetNode(V, DemandedElts, UndefElts, *this, in isSplatValue()
2823 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask(); in isSplatValue()
2835 DemandedRHS.setBit(M - NumElts); in isSplatValue()
2980 // TODO - remove this and rely purely on SelectionDAG::isSplatValue, in getSplatSourceVector()
2983 if (!SVN->isSplat()) in getSplatSourceVector()
2985 int Idx = SVN->getSplatIndex(); in getSplatSourceVector()
3000 if (LegalTypes && !TLI->isTypeLegal(SVT)) { in getSplatValue()
3003 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); in getSplatValue()
3023 const APInt &ShAmt = Cst->getAPIntValue(); in getValidShiftAmountRange()
3031 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { in getValidShiftAmountRange()
3034 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); in getValidShiftAmountRange()
3039 const APInt &ShAmt = SA->getAPIntValue(); in getValidShiftAmountRange()
3042 if (!MinAmt || MinAmt->ugt(ShAmt)) in getValidShiftAmountRange()
3044 if (!MaxAmt || MaxAmt->ult(ShAmt)) in getValidShiftAmountRange()
3070 if (const APInt *ShAmt = AmtRange->getSingleElement()) in getValidShiftAmount()
3071 return ShAmt->getZExtValue(); in getValidShiftAmount()
3092 return AmtRange->getUnsignedMin().getZExtValue(); in getValidMinimumShiftAmount()
3113 return AmtRange->getUnsignedMax().getZExtValue(); in getValidMaximumShiftAmount()
3152 return KnownBits::makeConstant(C->getAPIntValue()); in computeKnownBits()
3156 return KnownBits::makeConstant(C->getValueAPF().bitcastToAPInt()); in computeKnownBits()
3189 for (auto [I, SrcOp] : enumerate(Op->ops())) { in computeKnownBits()
3214 const APInt MaxValue = (MaxNumElts - 1).umul_ov(Step, Overflow); in computeKnownBits()
3253 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); in computeKnownBits()
3254 if (!getShuffleDemandedElts(NumElts, SVN->getMask(), DemandedElts, in computeKnownBits()
3321 break; // early-out. in computeKnownBits()
3393 unsigned Shifts = IsLE ? i : SubScale - 1 - i; in computeKnownBits()
3413 unsigned Shifts = IsLE ? i : NumElts - 1 - i; in computeKnownBits()
3452 // with itself is non-negative. Only do this if we didn't already computed in computeKnownBits()
3454 if (Op->getFlags().hasNoSignedWrap() && in computeKnownBits()
3488 Known.Zero.setHighBits(std::min(SignBits0, SignBits1) - 1); in computeKnownBits()
3564 // We know that we have an integer-based boolean since these operations in computeKnownBits()
3566 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == in computeKnownBits()
3575 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; in computeKnownBits()
3577 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == in computeKnownBits()
3587 bool NUW = Op->getFlags().hasNoUnsignedWrap(); in computeKnownBits()
3588 bool NSW = Op->getFlags().hasNoSignedWrap(); in computeKnownBits()
3604 Op->getFlags().hasExact()); in computeKnownBits()
3615 Op->getFlags().hasExact()); in computeKnownBits()
3620 unsigned Amt = C->getAPIntValue().urem(BitWidth); in computeKnownBits()
3622 // For fshl, 0-shift returns the 1st arg. in computeKnownBits()
3623 // For fshr, 0-shift returns the 2nd arg. in computeKnownBits()
3630 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) in computeKnownBits()
3631 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) in computeKnownBits()
3637 Known2.One.lshrInPlace(BitWidth - Amt); in computeKnownBits()
3638 Known2.Zero.lshrInPlace(BitWidth - Amt); in computeKnownBits()
3640 Known.One <<= BitWidth - Amt; in computeKnownBits()
3641 Known.Zero <<= BitWidth - Amt; in computeKnownBits()
3680 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); in computeKnownBits()
3716 const Constant *Cst = TLI->getTargetConstantFromLoad(LD); in computeKnownBits()
3719 Type *CstTy = Cst->getType(); in computeKnownBits()
3720 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits() && in computeKnownBits()
3724 if (CstTy->isVectorTy()) { in computeKnownBits()
3725 if (const Constant *Splat = Cst->getSplatValue()) { in computeKnownBits()
3727 CstTy = Cst->getType(); in computeKnownBits()
3730 // TODO - do we need to handle different bitwidths? in computeKnownBits()
3731 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) { in computeKnownBits()
3738 if (Constant *Elt = Cst->getAggregateElement(i)) { in computeKnownBits()
3740 const APInt &Value = CInt->getValue(); in computeKnownBits()
3746 APInt Value = CFP->getValueAPF().bitcastToAPInt(); in computeKnownBits()
3756 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) { in computeKnownBits()
3758 Known = KnownBits::makeConstant(CInt->getValue()); in computeKnownBits()
3761 KnownBits::makeConstant(CFP->getValueAPF().bitcastToAPInt()); in computeKnownBits()
3766 KnownBits Known0(!LD->getMemoryVT().isScalableVT() in computeKnownBits()
3767 ? LD->getMemoryVT().getFixedSizeInBits() in computeKnownBits()
3776 if (const MDNode *MD = LD->getRanges()) { in computeKnownBits()
3787 if (LD->getMemoryVT().isVector()) in computeKnownBits()
3788 Known0 = Known0.trunc(LD->getMemoryVT().getScalarSizeInBits()); in computeKnownBits()
3856 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); in computeKnownBits()
3864 unsigned LogOfAlign = Log2(cast<AssertAlignSDNode>(Op)->getAlign()); in computeKnownBits()
3880 SDNodeFlags Flags = Op.getNode()->getFlags(); in computeKnownBits()
3894 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == in computeKnownBits()
3926 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == in computeKnownBits()
3958 Known = KnownBits::udiv(Known, Known2, Op->getFlags().hasExact()); in computeKnownBits()
3964 Known = KnownBits::sdiv(Known, Known2, Op->getFlags().hasExact()); in computeKnownBits()
3985 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth); in computeKnownBits()
3986 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth); in computeKnownBits()
4011 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) in computeKnownBits()
4013 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); in computeKnownBits()
4033 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { in computeKnownBits()
4034 unsigned EltIdx = CEltNo->getZExtValue(); in computeKnownBits()
4064 ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1) - 1); in computeKnownBits()
4099 const APInt &ValueLow = CstLow->getAPIntValue(); in computeKnownBits()
4100 const APInt &ValueHigh = CstHigh->getAPIntValue(); in computeKnownBits()
4123 // For SMAX, if CstLow is non-negative we know the result will be in computeKnownBits()
4124 // non-negative and thus all sign bits are 0. in computeKnownBits()
4128 const APInt &ValueLow = CstLow->getAPIntValue(); in computeKnownBits()
4151 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); in computeKnownBits()
4159 // We know that we have an integer-based boolean since these operations in computeKnownBits()
4161 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == in computeKnownBits()
4183 cast<AtomicSDNode>(Op)->getMemoryVT().getScalarSizeInBits(); in computeKnownBits()
4186 if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND) in computeKnownBits()
4188 else if (Op->getOpcode() == ISD::ATOMIC_LOAD && in computeKnownBits()
4189 cast<AtomicSDNode>(Op)->getExtensionType() == ISD::ZEXTLOAD) in computeKnownBits()
4196 TLI->computeKnownBitsForFrameIndex(cast<FrameIndexSDNode>(Op)->getIndex(), in computeKnownBits()
4213 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); in computeKnownBits()
4274 // X - 0 never overflow in computeOverflowForSignedSub()
4292 // X - 0 never overflow in computeOverflowForUnsignedSub()
4337 // If one of the operands is non-negative, then there's no in computeOverflowForSignedMul()
4355 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); in isKnownToBeAPowerOfTwo()
4359 // A left-shift of a constant one will have exactly one bit set because in isKnownToBeAPowerOfTwo()
4363 if (C && C->getAPIntValue() == 1) in isKnownToBeAPowerOfTwo()
4369 // Similarly, a logical right-shift of a constant sign-bit will have exactly in isKnownToBeAPowerOfTwo()
4373 if (C && C->getAPIntValue().isSignMask()) in isKnownToBeAPowerOfTwo()
4384 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { in isKnownToBeAPowerOfTwo()
4386 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); in isKnownToBeAPowerOfTwo()
4393 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val->getOperand(0))) in isKnownToBeAPowerOfTwo()
4394 if (C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2()) in isKnownToBeAPowerOfTwo()
4397 // vscale(power-of-two) is a power-of-two for some targets in isKnownToBeAPowerOfTwo()
4412 // Looking for `x & -x` pattern: in isKnownToBeAPowerOfTwo()
4414 // x & -x -> 0 in isKnownToBeAPowerOfTwo()
4416 // x & -x -> non-zero pow2 in isKnownToBeAPowerOfTwo()
4417 // so if we find the pattern return whether we know `x` is non-zero. in isKnownToBeAPowerOfTwo()
4432 return C1->getValueAPF().getExactLog2Abs() >= 0; in isKnownToBeAPowerOfTwoFP()
4462 const APInt &Val = C->getAPIntValue(); in ComputeNumSignBits()
4476 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); in ComputeNumSignBits()
4477 return VTBits-Tmp+1; in ComputeNumSignBits()
4479 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); in ComputeNumSignBits()
4480 return VTBits-Tmp; in ComputeNumSignBits()
4488 if (NumSrcSignBits > (NumSrcBits - VTBits)) in ComputeNumSignBits()
4489 return NumSrcSignBits - (NumSrcBits - VTBits); in ComputeNumSignBits()
4503 APInt T = C->getAPIntValue().trunc(VTBits); in ComputeNumSignBits()
4511 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; in ComputeNumSignBits()
4512 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); in ComputeNumSignBits()
4524 assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); in ComputeNumSignBits()
4525 if (!getShuffleDemandedElts(NumElts, SVN->getMask(), DemandedElts, in ComputeNumSignBits()
4536 // If we don't know anything, early out and try computeKnownBits fall-back. in ComputeNumSignBits()
4568 // Fast case - sign splat can be simply split across the small elements. in ComputeNumSignBits()
4573 // Slow case - determine how far the sign extends into each sub-element. in ComputeNumSignBits()
4578 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset); in ComputeNumSignBits()
4582 Tmp2 = std::min(Tmp2, Tmp - SubOffset); in ComputeNumSignBits()
4591 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); in ComputeNumSignBits()
4592 return VTBits - Tmp + 1; in ComputeNumSignBits()
4594 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); in ComputeNumSignBits()
4598 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); in ComputeNumSignBits()
4599 Tmp = VTBits-Tmp+1; in ComputeNumSignBits()
4608 Tmp = VTBits - SrcVT.getScalarSizeInBits(); in ComputeNumSignBits()
4613 // SRA X, C -> adds C sign bits. in ComputeNumSignBits()
4621 uint64_t MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue(); in ComputeNumSignBits()
4622 uint64_t MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue(); in ComputeNumSignBits()
4634 ExtVT.getScalarSizeInBits() - ExtendeeVT.getScalarSizeInBits(); in ComputeNumSignBits()
4639 return Tmp - MaxShAmt; in ComputeNumSignBits()
4645 return Tmp - MaxShAmt; in ComputeNumSignBits()
4687 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) { in ComputeNumSignBits()
4688 Tmp = CstLow->getAPIntValue().getNumSignBits(); in ComputeNumSignBits()
4689 Tmp2 = CstHigh->getAPIntValue().getNumSignBits(); in ComputeNumSignBits()
4694 // Fallback - just get the minimum number of sign bits of the operands. in ComputeNumSignBits()
4721 // If setcc returns 0/-1, all bits are sign bits. in ComputeNumSignBits()
4722 // We know that we have an integer-based boolean since these operations in ComputeNumSignBits()
4724 if (TLI->getBooleanContents(VT.isVector(), false) == in ComputeNumSignBits()
4732 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; in ComputeNumSignBits()
4733 // If setcc returns 0/-1, all bits are sign bits. in ComputeNumSignBits()
4734 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == in ComputeNumSignBits()
4743 // If we're rotating an 0/-1 value, then it stays an 0/-1 value. in ComputeNumSignBits()
4749 unsigned RotAmt = C->getAPIntValue().urem(VTBits); in ComputeNumSignBits()
4751 // Handle rotate right by N like a rotate left by 32-N. in ComputeNumSignBits()
4753 RotAmt = (VTBits - RotAmt) % VTBits; in ComputeNumSignBits()
4755 // If we aren't rotating out all of the known-in sign bits, return the in ComputeNumSignBits()
4757 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt); in ComputeNumSignBits()
4767 // Special case decrementing a value (ADD X, -1): in ComputeNumSignBits()
4770 if (CRHS->isAllOnes()) { in ComputeNumSignBits()
4774 // If the input is known to be 0 or 1, the output is 0/-1, which is all in ComputeNumSignBits()
4787 return std::min(Tmp, Tmp2) - 1; in ComputeNumSignBits()
4795 if (CLHS->isZero()) { in ComputeNumSignBits()
4798 // If the input is known to be 0 or 1, the output is 0/-1, which is all in ComputeNumSignBits()
4815 return std::min(Tmp, Tmp2) - 1; in ComputeNumSignBits()
4825 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1); in ComputeNumSignBits()
4826 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1; in ComputeNumSignBits()
4845 if (NumSrcSignBits > (NumSrcBits - VTBits)) in ComputeNumSignBits()
4846 return NumSrcSignBits - (NumSrcBits - VTBits); in ComputeNumSignBits()
4858 const int rIndex = Items - 1 - Op.getConstantOperandVal(1); in ComputeNumSignBits()
4862 return std::clamp(KnownSign - rIndex * BitWidth, 0, BitWidth); in ComputeNumSignBits()
4876 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { in ComputeNumSignBits()
4877 unsigned EltIdx = CEltNo->getZExtValue(); in ComputeNumSignBits()
4883 // TODO - handle implicit truncation of inserted elements. in ComputeNumSignBits()
4918 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) in ComputeNumSignBits()
4920 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); in ComputeNumSignBits()
4972 return 1; // early-out in ComputeNumSignBits()
4983 if (const MDNode *Ranges = LD->getRanges()) { in ComputeNumSignBits()
4989 switch (LD->getExtensionType()) { in ComputeNumSignBits()
5024 Tmp = cast<AtomicSDNode>(Op)->getMemoryVT().getScalarSizeInBits(); in ComputeNumSignBits()
5028 return 1; // early-out in ComputeNumSignBits()
5029 if (TLI->getExtendForAtomicOps() == ISD::SIGN_EXTEND) in ComputeNumSignBits()
5030 return VTBits - Tmp + 1; in ComputeNumSignBits()
5031 if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND) in ComputeNumSignBits()
5032 return VTBits - Tmp; in ComputeNumSignBits()
5033 if (Op->getOpcode() == ISD::ATOMIC_LOAD) { in ComputeNumSignBits()
5034 ISD::LoadExtType ETy = cast<AtomicSDNode>(Op)->getExtensionType(); in ComputeNumSignBits()
5036 return VTBits - Tmp + 1; in ComputeNumSignBits()
5038 return VTBits - Tmp; in ComputeNumSignBits()
5049 unsigned ExtType = LD->getExtensionType(); in ComputeNumSignBits()
5052 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known. in ComputeNumSignBits()
5053 Tmp = LD->getMemoryVT().getScalarSizeInBits(); in ComputeNumSignBits()
5054 return VTBits - Tmp + 1; in ComputeNumSignBits()
5055 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known. in ComputeNumSignBits()
5056 Tmp = LD->getMemoryVT().getScalarSizeInBits(); in ComputeNumSignBits()
5057 return VTBits - Tmp; in ComputeNumSignBits()
5059 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) { in ComputeNumSignBits()
5060 // We only need to handle vectors - computeKnownBits should handle in ComputeNumSignBits()
5062 Type *CstTy = Cst->getType(); in ComputeNumSignBits()
5063 if (CstTy->isVectorTy() && !VT.isScalableVector() && in ComputeNumSignBits()
5064 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits() && in ComputeNumSignBits()
5065 VTBits == CstTy->getScalarSizeInBits()) { in ComputeNumSignBits()
5070 if (Constant *Elt = Cst->getAggregateElement(i)) { in ComputeNumSignBits()
5072 const APInt &Value = CInt->getValue(); in ComputeNumSignBits()
5077 APInt Value = CFP->getValueAPF().bitcastToAPInt(); in ComputeNumSignBits()
5102 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); in ComputeNumSignBits()
5117 return Op.getScalarValueSizeInBits() - SignBits + 1; in ComputeMaxSignificantBits()
5124 return Op.getScalarValueSizeInBits() - SignBits + 1; in ComputeMaxSignificantBits()
5172 // NOTE: BUILD_VECTOR has implicit truncation of wider scalar elements - in isGuaranteedNotToBeUndefOrPoison()
5186 if (!getShuffleDemandedElts(DemandedElts.getBitWidth(), SVN->getMask(), in isGuaranteedNotToBeUndefOrPoison()
5209 return TLI->isGuaranteedNotToBeUndefOrPoisonForTargetNode( in isGuaranteedNotToBeUndefOrPoison()
5221 all_of(Op->ops(), [&](SDValue V) { in isGuaranteedNotToBeUndefOrPoison()
5249 if (ConsiderFlags && Op->hasPoisonGeneratingFlags()) in canCreateUndefOrPoison()
5298 ISD::CondCode CCCode = cast<CondCodeSDNode>(Op.getOperand(CCOp))->get(); in canCreateUndefOrPoison()
5343 for (auto [Idx, Elt] : enumerate(SVN->getMask())) in canCreateUndefOrPoison()
5353 return TLI->canCreateUndefOrPoisonForTargetNode( in canCreateUndefOrPoison()
5365 return Op->getFlags().hasDisjoint() || in isADDLike()
5379 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs()) in isKnownNeverNaN()
5387 return !C->getValueAPF().isNaN() || in isKnownNeverNaN()
5388 (SNaN && !C->getValueAPF().isSignaling()); in isKnownNeverNaN()
5398 case ISD::FSIN: in isKnownNeverNaN()
5462 // Only one needs to be known not-nan, since it will be returned if the in isKnownNeverNaN()
5480 // TODO: Does this quiet or return the origina NaN as-is? in isKnownNeverNaN()
5488 for (const SDValue &Opnd : Op->ops()) in isKnownNeverNaN()
5498 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth); in isKnownNeverNaN()
5511 Op, [](ConstantFPSDNode *C) { return !C->isZero(); }); in isKnownNeverZeroFloat()
5519 "Floating point types unsupported - use isKnownNeverZeroFloat"); in isKnownNeverZero()
5523 [](ConstantSDNode *C) { return !C->isZero(); })) in isKnownNeverZero()
5542 if (Op->getFlags().hasNoSignedWrap() || Op->getFlags().hasNoUnsignedWrap()) in isKnownNeverZero()
5548 // If max shift cnt of known ones is non-zero, result is non-zero. in isKnownNeverZero()
5606 if (Op->getFlags().hasExact()) in isKnownNeverZero()
5611 // If max shift cnt of known ones is non-zero, result is non-zero. in isKnownNeverZero()
5622 if (Op->getFlags().hasExact()) in isKnownNeverZero()
5627 if (Op->getFlags().hasNoUnsignedWrap()) in isKnownNeverZero()
5645 if (Op->getFlags().hasNoSignedWrap() || Op->getFlags().hasNoUnsignedWrap()) in isKnownNeverZero()
5670 return !C1->isNegative(); in cannotBeOrderedNegativeFP()
5682 if (CA->isZero() && CB->isZero()) return true; in isEqualTo()
5694 // bits in the non-extended part. in getBitwiseNotOperand()
5700 MaskC->getAPIntValue().getActiveBits() && in getBitwiseNotOperand()
5715 if (NotOperand->getOpcode() == ISD::ZERO_EXTEND || in haveNoCommonBitsSetCommutative()
5716 NotOperand->getOpcode() == ISD::TRUNCATE) in haveNoCommonBitsSetCommutative()
5717 NotOperand = NotOperand->getOperand(0); in haveNoCommonBitsSetCommutative()
5721 if (Other->getOpcode() == ISD::AND) in haveNoCommonBitsSetCommutative()
5722 return NotOperand == Other->getOperand(0) || in haveNoCommonBitsSetCommutative()
5723 NotOperand == Other->getOperand(1); in haveNoCommonBitsSetCommutative()
5728 if (A->getOpcode() == ISD::ZERO_EXTEND || A->getOpcode() == ISD::TRUNCATE) in haveNoCommonBitsSetCommutative()
5729 A = A->getOperand(0); in haveNoCommonBitsSetCommutative()
5731 if (B->getOpcode() == ISD::ZERO_EXTEND || B->getOpcode() == ISD::TRUNCATE) in haveNoCommonBitsSetCommutative()
5732 B = B->getOperand(0); in haveNoCommonBitsSetCommutative()
5734 if (A->getOpcode() == ISD::AND) in haveNoCommonBitsSetCommutative()
5735 return MatchNoCommonBitsPattern(A->getOperand(0), A->getOperand(1), B) || in haveNoCommonBitsSetCommutative()
5736 MatchNoCommonBitsPattern(A->getOperand(1), A->getOperand(0), B); in haveNoCommonBitsSetCommutative()
5753 if (cast<ConstantSDNode>(Step)->isZero()) in FoldSTEP_VECTOR()
5854 Elts.append(Op->op_begin(), Op->op_end()); in foldCONCAT_VECTORS()
5902 Flags = Inserter->getFlags(); in getNode()
5950 unsigned OpOpcode = N1.getNode()->getOpcode(); in getNode()
6013 Flags.setNonNeg(N1->getFlags().hasNonNeg()); in getNode()
6031 if (OpOpcode == ISD::ZERO_EXTEND) { // (zext (zext x)) -> (zext x) in getNode()
6033 Flags.setNonNeg(N1->getFlags().hasNonNeg()); in getNode()
6041 // (zext (trunc x)) -> x iff the upper bits are known zero. in getNode()
6074 Flags.setNonNeg(N1->getFlags().hasNonNeg()); in getNode()
6075 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) in getNode()
6081 // (ext (trunc x)) -> x in getNode()
6140 // bswap(bswap(X)) -> X. in getNode()
6153 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) in getNode()
6167 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. in getNode()
6179 if (OpOpcode == ISD::FNEG) // --X -> X in getNode()
6183 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) in getNode()
6233 E->intersectFlagsWith(Flags); in getNode()
6238 N->setFlags(Flags); in getNode()
6256 case ISD::SUB: return C1 - C2; in FoldValue()
6319 // Fold and(x, undef) -> 0 in FoldValueWithUndef()
6320 // Fold mul(x, undef) -> 0 in FoldValueWithUndef()
6330 if (GA->getOpcode() != ISD::GlobalAddress) in FoldSymbolOffset()
6332 if (!TLI->isOffsetFoldingLegal(GA)) in FoldSymbolOffset()
6337 int64_t Offset = C2->getSExtValue(); in FoldSymbolOffset()
6340 case ISD::SUB: Offset = -uint64_t(Offset); break; in FoldSymbolOffset()
6343 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT, in FoldSymbolOffset()
6344 GA->getOffset() + uint64_t(Offset)); in FoldSymbolOffset()
6361 llvm::any_of(Divisor->op_values(), in isUndef()
6375 // If the opcode is a target-specific ISD node, there's nothing we can in FoldConstantArithmetic()
6401 const APInt &Val = C->getAPIntValue(); in FoldConstantArithmetic()
6405 C->isTargetOpcode(), C->isOpaque()); in FoldConstantArithmetic()
6407 if (C->isOpaque()) in FoldConstantArithmetic()
6412 C->isTargetOpcode(), C->isOpaque()); in FoldConstantArithmetic()
6415 if (TLI->isSExtCheaperThanZExt(N1.getValueType(), VT)) in FoldConstantArithmetic()
6417 C->isTargetOpcode(), C->isOpaque()); in FoldConstantArithmetic()
6419 C->isTargetOpcode(), C->isOpaque()); in FoldConstantArithmetic()
6421 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), in FoldConstantArithmetic()
6422 C->isOpaque()); in FoldConstantArithmetic()
6424 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), in FoldConstantArithmetic()
6425 C->isOpaque()); in FoldConstantArithmetic()
6427 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), in FoldConstantArithmetic()
6428 C->isOpaque()); in FoldConstantArithmetic()
6430 return getConstant(Val.popcount(), DL, VT, C->isTargetOpcode(), in FoldConstantArithmetic()
6431 C->isOpaque()); in FoldConstantArithmetic()
6434 return getConstant(Val.countl_zero(), DL, VT, C->isTargetOpcode(), in FoldConstantArithmetic()
6435 C->isOpaque()); in FoldConstantArithmetic()
6438 return getConstant(Val.countr_zero(), DL, VT, C->isTargetOpcode(), in FoldConstantArithmetic()
6439 C->isOpaque()); in FoldConstantArithmetic()
6466 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) in FoldConstantArithmetic()
6468 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) in FoldConstantArithmetic()
6470 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) in FoldConstantArithmetic()
6472 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) in FoldConstantArithmetic()
6480 APFloat V = C->getValueAPF(); // make copy in FoldConstantArithmetic()
6536 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) in FoldConstantArithmetic()
6539 if (VT == MVT::i16 && C->getValueType(0) == MVT::bf16) in FoldConstantArithmetic()
6542 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) in FoldConstantArithmetic()
6545 if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) in FoldConstantArithmetic()
6551 // Early-out if we failed to constant fold a bitcast. in FoldConstantArithmetic()
6563 if (C1->isOpaque() || C2->isOpaque()) in FoldConstantArithmetic()
6567 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue()); in FoldConstantArithmetic()
6578 // fold (add Sym, c) -> Sym+c in FoldConstantArithmetic()
6581 if (TLI->isCommutativeBinOp(Opcode)) in FoldConstantArithmetic()
6607 if (BV1->getConstantRawBits(IsLE, EltBits, RawBits1, UndefElts1) && in FoldConstantArithmetic()
6608 BV2->getConstantRawBits(IsLE, EltBits, RawBits2, UndefElts2)) { in FoldConstantArithmetic()
6623 BVEltVT = BV1->getOperand(0).getValueType(); in FoldConstantArithmetic()
6626 BVEltVT = BV2->getOperand(0).getValueType(); in FoldConstantArithmetic()
6647 // (shl step_vector(C0), C1) -> (step_vector(C0 << C1)) in FoldConstantArithmetic()
6683 ? TargetLowering::getExtendForContent(TLI->getBooleanContents(VT)) in FoldConstantArithmetic()
6690 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); in FoldConstantArithmetic()
6696 // only have one operand to check. For fixed-length vector types we may have in FoldConstantArithmetic()
6720 // truncation - do this before constant folding. in FoldConstantArithmetic()
6722 // Don't create illegally-typed nodes unless they're constants or undef in FoldConstantArithmetic()
6723 // - if we fail to constant fold we can't guarantee the (dead) nodes in FoldConstantArithmetic()
6728 TLI->getTypeAction(*getContext(), InSVT) != in FoldConstantArithmetic()
6764 // should. That will require dealing with a potentially non-default in foldConstantFPMath()
6772 APFloat C1 = N1CFP->getValueAPF(); // make copy in foldConstantFPMath()
6773 const APFloat &C2 = N2CFP->getValueAPF(); in foldConstantFPMath()
6805 APFloat C1 = N1CFP->getValueAPF(); // make copy in foldConstantFPMath()
6816 // -0.0 - undef --> undef (consistent with "fneg undef") in foldConstantFPMath()
6818 if (N1C && N1C->getValueAPF().isNegZero() && N2.isUndef()) in foldConstantFPMath()
6839 // There's no need to assert on a byte-aligned pointer. All pointers are at in getAssertAlign()
6869 Flags = Inserter->getFlags(); in getNode()
6875 if (!TLI->isCommutativeBinOp(Opcode)) in canonicalizeCommutativeBinop()
6879 // binop(const, nonconst) -> binop(nonconst, const) in canonicalizeCommutativeBinop()
6888 // binop(splat(x), step_vector) -> binop(step_vector, splat(x)) in canonicalizeCommutativeBinop()
6905 // Don't allow undefs in vector splats - we might be returning N2 when folding in getNode()
6937 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's in getNode()
6939 if (N2CV && N2CV->isZero()) in getNode()
6941 if (N2CV && N2CV->isAllOnes()) // X & -1 -> X in getNode()
6951 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so in getNode()
6953 if (N2CV && N2CV->isZero()) in getNode()
6966 const APInt &MulImm = N1->getConstantOperandAPInt(0); in getNode()
6967 const APInt &N2CImm = N2C->getAPIntValue(); in getNode()
6985 // fold (add_sat x, y) -> (or x, y) for bool types. in getNode()
6988 // fold (sub_sat x, y) -> (and x, ~y) for bool types. in getNode()
7053 const APInt &MulImm = N1->getConstantOperandAPInt(0); in getNode()
7054 const APInt &ShiftImm = N2C->getAPIntValue(); in getNode()
7084 if (N2CV && N2CV->isZero()) in getNode()
7091 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && in getNode()
7097 EVT EVT = cast<VTSDNode>(N2)->getVT(); in getNode()
7109 EVT EVT = cast<VTSDNode>(N2)->getVT(); in getNode()
7124 Val <<= Val.getBitWidth() - FromBits; in getNode()
7125 Val.ashrInPlace(Val.getBitWidth() - FromBits); in getNode()
7130 const APInt &Val = N1C->getAPIntValue(); in getNode()
7144 APInt Val = C->getAPIntValue(); in getNode()
7160 assert(VT.isInteger() && cast<VTSDNode>(N2)->getVT().isInteger() && in getNode()
7168 assert(!cast<VTSDNode>(N2)->getVT().isVector() && in getNode()
7170 assert(cast<VTSDNode>(N2)->getVT().bitsLE(VT.getScalarType()) && in getNode()
7183 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF for fixed length in getNode()
7187 N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements())) in getNode()
7199 N1.getOperand(N2C->getZExtValue() / Factor), in getNode()
7200 getVectorIdxConstant(N2C->getZExtValue() % Factor, DL)); in getNode()
7211 N1.getOpcode() == ISD::BUILD_VECTOR ? N2C->getZExtValue() : 0; in getNode()
7233 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { in getNode()
7248 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx). in getNode()
7252 // vectors - in theory we could support this, but we don't want to do this in getNode()
7262 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); in getNode()
7269 // 64-bit integers into 32-bit parts. Instead of building the extract of in getNode()
7272 return N1.getOperand(N2C->getZExtValue()); in getNode()
7277 unsigned Shift = ElementSize * N2C->getZExtValue(); in getNode()
7278 const APInt &Val = N1C->getAPIntValue(); in getNode()
7295 (VT.getVectorMinNumElements() + N2C->getZExtValue()) <= in getNode()
7298 assert(N2C->getAPIntValue().getBitWidth() == in getNode()
7299 TLI->getVectorIdxTy(getDataLayout()).getFixedSizeInBits() && in getNode()
7315 return N1.getOperand(N2C->getZExtValue() / Factor); in getNode()
7333 if (TLI->isCommutativeBinOp(Opcode)) { in getNode()
7338 return getUNDEF(VT); // fold op(undef, arg2) -> undef in getNode()
7346 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 in getNode()
7356 // Handle undef ^ undef -> 0 special case. This is a common in getNode()
7366 return getUNDEF(VT); // fold op(arg1, undef) -> undef in getNode()
7371 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 in getNode()
7388 E->intersectFlagsWith(Flags); in getNode()
7393 N->setFlags(Flags); in getNode()
7411 Flags = Inserter->getFlags(); in getNode()
7433 APFloat V1 = N1CFP->getValueAPF(); in getNode()
7434 const APFloat &V2 = N2CFP->getValueAPF(); in getNode()
7435 const APFloat &V3 = N3CFP->getValueAPF(); in getNode()
7468 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) in getNode()
7486 if (cast<ConstantSDNode>(N3)->isZero()) in getNode()
7492 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF, except in getNode()
7494 // deal with out-of-bounds cases correctly. in getNode()
7496 N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) in getNode()
7499 // Undefined index can be assumed out-of-bounds, so that's UNDEF too. in getNode()
7529 (N2VT.getVectorMinNumElements() + N3->getAsZExtVal()) <= in getNode()
7532 assert(N3->getAsAPIntVal().getBitWidth() == in getNode()
7533 TLI->getVectorIdxTy(getDataLayout()).getFixedSizeInBits() && in getNode()
7586 E->intersectFlagsWith(Flags); in getNode()
7591 N->setFlags(Flags); in getNode()
7618 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
7629 for (SDNode *U : getEntryNode().getNode()->uses()) in getStackArgumentTokenFactor()
7631 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) in getStackArgumentTokenFactor()
7632 if (FI->getIndex() < 0) in getStackArgumentTokenFactor()
7639 /// getMemsetValue - Vectorized representation of the memset value
7647 assert(C->getAPIntValue().getBitWidth() == 8); in getMemsetValue()
7648 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); in getMemsetValue()
7651 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue()); in getMemsetValue()
7658 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); in getMemsetValue()
7680 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
7714 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8; in getMemsetStringVal()
7764 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8, in isMemSrcFromConstant()
7765 SrcDelta + G->getOffset()); in isMemSrcFromConstant()
7770 // On Darwin, -Os means optimize for size without hurting performance, so in shouldLowerMemFuncForSize()
7771 // only really optimize for size when -Oz (MinSize) is used. in shouldLowerMemFuncForSize()
7795 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(), in chainLoadsAndStoresForMemcpy()
7796 ST->getBasePtr(), ST->getMemoryVT(), in chainLoadsAndStoresForMemcpy()
7797 ST->getMemOperand()); in chainLoadsAndStoresForMemcpy()
7827 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) in getMemcpyLoadsAndStores()
7856 if (!TRI->hasStackRealignment(MF)) in getMemcpyLoadsAndStores()
7862 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) in getMemcpyLoadsAndStores()
7863 MFI.setObjectAlignment(FI->getIndex(), NewAlign); in getMemcpyLoadsAndStores()
7875 AA->pointsToConstantMemory(MemoryLocation(SrcVal, Size, AAInfo)); in getMemcpyLoadsAndStores()
7892 assert(i == NumMemOps-1 && i != 0); in getMemcpyLoadsAndStores()
7893 SrcOff -= VTSize - Size; in getMemcpyLoadsAndStores()
7894 DstOff -= VTSize - Size; in getMemcpyLoadsAndStores()
7909 // This is an out-of-bounds access and hence UB. Pretend we read zero. in getMemcpyLoadsAndStores()
7956 Size -= VTSize; in getMemcpyLoadsAndStores()
7985 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit; in getMemcpyLoadsAndStores()
7986 unsigned IndexTo = NumLdStInMemcpy - GlueIter; in getMemcpyLoadsAndStores()
8028 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) in getMemmoveLoadsAndStores()
8051 if (!TRI->hasStackRealignment(MF)) in getMemmoveLoadsAndStores()
8057 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) in getMemmoveLoadsAndStores()
8058 MFI.setObjectAlignment(FI->getIndex(), NewAlign); in getMemmoveLoadsAndStores()
8149 if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) in getMemsetStores()
8169 if (!TRI->hasStackRealignment(MF)) in getMemsetStores()
8175 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) in getMemsetStores()
8176 MFI.setObjectAlignment(FI->getIndex(), NewAlign); in getMemsetStores()
8202 assert(i == NumMemOps-1 && i != 0); in getMemsetStores()
8203 DstOff -= VTSize - Size; in getMemsetStores()
8240 Size -= VTSize; in getMemsetStores()
8250 if (AS != 0 && !TLI->getTargetMachine().isNoopAddrSpaceCast(AS, 0)) { in checkAddrSpaceIsValidForLibcall()
8262 // For cases within the target-specified limits, this is the best choice. in getMemcpy()
8266 if (ConstantSize->isZero()) in getMemcpy()
8270 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment, in getMemcpy()
8276 // Then check to see if we should lower the memcpy with target-specific in getMemcpy()
8279 SDValue Result = TSI->EmitTargetCodeForMemcpy( in getMemcpy()
8291 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment, in getMemcpy()
8320 TLI->getLibcallName(RTLIB::MEMCPY) == StringRef("memcpy"); in getMemcpy()
8322 IsTailCall = CI && CI->isTailCall() && in getMemcpy()
8329 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), in getMemcpy()
8331 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), in getMemcpy()
8332 TLI->getPointerTy(getDataLayout())), in getMemcpy()
8337 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); in getMemcpy()
8369 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), in getAtomicMemcpy()
8371 getExternalSymbol(TLI->getLibcallName(LibraryCall), in getAtomicMemcpy()
8372 TLI->getPointerTy(getDataLayout())), in getAtomicMemcpy()
8377 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); in getAtomicMemcpy()
8389 // For cases within the target-specified limits, this is the best choice. in getMemmove()
8393 if (ConstantSize->isZero()) in getMemmove()
8397 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment, in getMemmove()
8403 // Then check to see if we should lower the memmove with target-specific in getMemmove()
8407 TSI->EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, in getMemmove()
8436 TLI->getLibcallName(RTLIB::MEMMOVE) == StringRef("memmove"); in getMemmove()
8438 IsTailCall = CI && CI->isTailCall() && in getMemmove()
8445 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), in getMemmove()
8447 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), in getMemmove()
8448 TLI->getPointerTy(getDataLayout())), in getMemmove()
8453 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); in getMemmove()
8485 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), in getAtomicMemmove()
8487 getExternalSymbol(TLI->getLibcallName(LibraryCall), in getAtomicMemmove()
8488 TLI->getPointerTy(getDataLayout())), in getAtomicMemmove()
8493 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); in getAtomicMemmove()
8504 // For cases within the target-specified limits, this is the best choice. in getMemset()
8508 if (ConstantSize->isZero()) in getMemset()
8512 ConstantSize->getZExtValue(), Alignment, in getMemset()
8519 // Then check to see if we should lower the memset with target-specific in getMemset()
8522 SDValue Result = TSI->EmitTargetCodeForMemset( in getMemset()
8533 ConstantSize->getZExtValue(), Alignment, in getMemset()
8567 TLI->getLibcallCallingConv(RTLIB::BZERO), Type::getVoidTy(Ctx), in getMemset()
8568 getExternalSymbol(BzeroName, TLI->getPointerTy(DL)), std::move(Args)); in getMemset()
8574 CLI.setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), in getMemset()
8576 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), in getMemset()
8577 TLI->getPointerTy(DL)), in getMemset()
8581 TLI->getLibcallName(RTLIB::MEMSET) == StringRef("memset"); in getMemset()
8587 CI && CI->isTailCall() && in getMemset()
8591 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); in getMemset()
8623 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), in getAtomicMemset()
8625 getExternalSymbol(TLI->getLibcallName(LibraryCall), in getAtomicMemset()
8626 TLI->getPointerTy(getDataLayout())), in getAtomicMemset()
8631 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); in getAtomicMemset()
8641 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getAtomic()
8642 ID.AddInteger(MMO->getFlags()); in getAtomic()
8645 cast<AtomicSDNode>(E)->refineAlignment(MMO); in getAtomic()
8712 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
8748 "Opcode is not a memory-accessing opcode!"); in getMemIntrinsicNode()
8752 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { in getMemIntrinsicNode()
8757 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getMemIntrinsicNode()
8758 ID.AddInteger(MMO->getFlags()); in getMemIntrinsicNode()
8762 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); in getMemIntrinsicNode()
8836 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
8846 FI->getIndex(), Offset); in InferPointerInfo()
8854 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); in InferPointerInfo()
8857 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); in InferPointerInfo()
8860 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
8869 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue()); in InferPointerInfo()
8906 assert(VT == MemVT && "Non-extending load from different memory type!"); in getLoad()
8912 "Cannot convert from FP to Int or Int -> FP!"); in getLoad()
8931 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getLoad()
8932 ID.AddInteger(MMO->getFlags()); in getLoad()
8935 cast<LoadSDNode>(E)->refineAlignment(MMO); in getLoad()
8989 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); in getIndexedLoad()
8992 LD->getMemOperand()->getFlags() & in getIndexedLoad()
8994 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, in getIndexedLoad()
8995 LD->getChain(), Base, Offset, LD->getPointerInfo(), in getIndexedLoad()
8996 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo()); in getIndexedLoad()
9032 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getStore()
9033 ID.AddInteger(MMO->getFlags()); in getStore()
9036 cast<StoreSDNode>(E)->refineAlignment(MMO); in getStore()
9084 "Can't do FP-INT conversion!"); in getTruncStore()
9099 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getTruncStore()
9100 ID.AddInteger(MMO->getFlags()); in getTruncStore()
9103 cast<StoreSDNode>(E)->refineAlignment(MMO); in getTruncStore()
9121 assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); in getIndexedStore()
9123 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; in getIndexedStore()
9126 ID.AddInteger(ST->getMemoryVT().getRawBits()); in getIndexedStore()
9127 ID.AddInteger(ST->getRawSubclassData()); in getIndexedStore()
9128 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); in getIndexedStore()
9129 ID.AddInteger(ST->getMemOperand()->getFlags()); in getIndexedStore()
9135 ST->isTruncatingStore(), ST->getMemoryVT(), in getIndexedStore()
9136 ST->getMemOperand()); in getIndexedStore()
9186 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getLoadVP()
9187 ID.AddInteger(MMO->getFlags()); in getLoadVP()
9190 cast<VPLoadSDNode>(E)->refineAlignment(MMO); in getLoadVP()
9251 assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); in getIndexedLoadVP()
9254 LD->getMemOperand()->getFlags() & in getIndexedLoadVP()
9256 return getLoadVP(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, in getIndexedLoadVP()
9257 LD->getChain(), Base, Offset, LD->getMask(), in getIndexedLoadVP()
9258 LD->getVectorLength(), LD->getPointerInfo(), in getIndexedLoadVP()
9259 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo(), in getIndexedLoadVP()
9260 nullptr, LD->isExpandingLoad()); in getIndexedLoadVP()
9279 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getStoreVP()
9280 ID.AddInteger(MMO->getFlags()); in getStoreVP()
9283 cast<VPStoreSDNode>(E)->refineAlignment(MMO); in getStoreVP()
9335 assert(VT.isInteger() == SVT.isInteger() && "Can't do FP-INT conversion!"); in getTruncStoreVP()
9350 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getTruncStoreVP()
9351 ID.AddInteger(MMO->getFlags()); in getTruncStoreVP()
9354 cast<VPStoreSDNode>(E)->refineAlignment(MMO); in getTruncStoreVP()
9373 assert(ST->getOffset().isUndef() && "Store is already an indexed store!"); in getIndexedStoreVP()
9375 SDValue Ops[] = {ST->getChain(), ST->getValue(), Base, in getIndexedStoreVP()
9376 Offset, ST->getMask(), ST->getVectorLength()}; in getIndexedStoreVP()
9379 ID.AddInteger(ST->getMemoryVT().getRawBits()); in getIndexedStoreVP()
9380 ID.AddInteger(ST->getRawSubclassData()); in getIndexedStoreVP()
9381 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); in getIndexedStoreVP()
9382 ID.AddInteger(ST->getMemOperand()->getFlags()); in getIndexedStoreVP()
9388 dl.getIROrder(), dl.getDebugLoc(), VTs, AM, ST->isTruncatingStore(), in getIndexedStoreVP()
9389 ST->isCompressingStore(), ST->getMemoryVT(), ST->getMemOperand()); in getIndexedStoreVP()
9414 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getStridedLoadVP()
9418 cast<VPStridedLoadSDNode>(E)->refineAlignment(MMO); in getStridedLoadVP()
9470 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getStridedStoreVP()
9473 cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO); in getStridedStoreVP()
9504 assert(VT.isInteger() == SVT.isInteger() && "Can't do FP-INT conversion!"); in getTruncStridedStoreVP()
9519 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getTruncStridedStoreVP()
9522 cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO); in getTruncStridedStoreVP()
9547 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getGatherVP()
9548 ID.AddInteger(MMO->getFlags()); in getGatherVP()
9551 cast<VPGatherSDNode>(E)->refineAlignment(MMO); in getGatherVP()
9559 assert(N->getMask().getValueType().getVectorElementCount() == in getGatherVP()
9560 N->getValueType(0).getVectorElementCount() && in getGatherVP()
9562 assert(N->getIndex().getValueType().getVectorElementCount().isScalable() == in getGatherVP()
9563 N->getValueType(0).getVectorElementCount().isScalable() && in getGatherVP()
9566 N->getIndex().getValueType().getVectorElementCount(), in getGatherVP()
9567 N->getValueType(0).getVectorElementCount()) && in getGatherVP()
9569 assert(isa<ConstantSDNode>(N->getScale()) && in getGatherVP()
9570 N->getScale()->getAsAPIntVal().isPowerOf2() && in getGatherVP()
9591 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getScatterVP()
9592 ID.AddInteger(MMO->getFlags()); in getScatterVP()
9595 cast<VPScatterSDNode>(E)->refineAlignment(MMO); in getScatterVP()
9602 assert(N->getMask().getValueType().getVectorElementCount() == in getScatterVP()
9603 N->getValue().getValueType().getVectorElementCount() && in getScatterVP()
9606 N->getIndex().getValueType().getVectorElementCount().isScalable() == in getScatterVP()
9607 N->getValue().getValueType().getVectorElementCount().isScalable() && in getScatterVP()
9610 N->getIndex().getValueType().getVectorElementCount(), in getScatterVP()
9611 N->getValue().getValueType().getVectorElementCount()) && in getScatterVP()
9613 assert(isa<ConstantSDNode>(N->getScale()) && in getScatterVP()
9614 N->getScale()->getAsAPIntVal().isPowerOf2() && in getScatterVP()
9641 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getMaskedLoad()
9642 ID.AddInteger(MMO->getFlags()); in getMaskedLoad()
9645 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); in getMaskedLoad()
9663 assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!"); in getIndexedMaskedLoad()
9664 return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base, in getIndexedMaskedLoad()
9665 Offset, LD->getMask(), LD->getPassThru(), in getIndexedMaskedLoad()
9666 LD->getMemoryVT(), LD->getMemOperand(), AM, in getIndexedMaskedLoad()
9667 LD->getExtensionType(), LD->isExpandingLoad()); in getIndexedMaskedLoad()
9689 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getMaskedStore()
9690 ID.AddInteger(MMO->getFlags()); in getMaskedStore()
9693 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); in getMaskedStore()
9712 assert(ST->getOffset().isUndef() && in getIndexedMaskedStore()
9714 return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset, in getIndexedMaskedStore()
9715 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(), in getIndexedMaskedStore()
9716 AM, ST->isTruncatingStore(), ST->isCompressingStore()); in getIndexedMaskedStore()
9731 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getMaskedGather()
9732 ID.AddInteger(MMO->getFlags()); in getMaskedGather()
9735 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); in getMaskedGather()
9743 assert(N->getPassThru().getValueType() == N->getValueType(0) && in getMaskedGather()
9745 assert(N->getMask().getValueType().getVectorElementCount() == in getMaskedGather()
9746 N->getValueType(0).getVectorElementCount() && in getMaskedGather()
9748 assert(N->getIndex().getValueType().getVectorElementCount().isScalable() == in getMaskedGather()
9749 N->getValueType(0).getVectorElementCount().isScalable() && in getMaskedGather()
9752 N->getIndex().getValueType().getVectorElementCount(), in getMaskedGather()
9753 N->getValueType(0).getVectorElementCount()) && in getMaskedGather()
9755 assert(isa<ConstantSDNode>(N->getScale()) && in getMaskedGather()
9756 N->getScale()->getAsAPIntVal().isPowerOf2() && in getMaskedGather()
9778 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getMaskedScatter()
9779 ID.AddInteger(MMO->getFlags()); in getMaskedScatter()
9782 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); in getMaskedScatter()
9790 assert(N->getMask().getValueType().getVectorElementCount() == in getMaskedScatter()
9791 N->getValue().getValueType().getVectorElementCount() && in getMaskedScatter()
9794 N->getIndex().getValueType().getVectorElementCount().isScalable() == in getMaskedScatter()
9795 N->getValue().getValueType().getVectorElementCount().isScalable() && in getMaskedScatter()
9798 N->getIndex().getValueType().getVectorElementCount(), in getMaskedScatter()
9799 N->getValue().getValueType().getVectorElementCount()) && in getMaskedScatter()
9801 assert(isa<ConstantSDNode>(N->getScale()) && in getMaskedScatter()
9802 N->getScale()->getAsAPIntVal().isPowerOf2() && in getMaskedScatter()
9823 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getMaskedHistogram()
9824 ID.AddInteger(MMO->getFlags()); in getMaskedHistogram()
9827 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); in getMaskedHistogram()
9835 assert(N->getMask().getValueType().getVectorElementCount() == in getMaskedHistogram()
9836 N->getIndex().getValueType().getVectorElementCount() && in getMaskedHistogram()
9838 assert(isa<ConstantSDNode>(N->getScale()) && in getMaskedHistogram()
9839 N->getScale()->getAsAPIntVal().isPowerOf2() && in getMaskedHistogram()
9841 assert(N->getInc().getValueType().isInteger() && "Non integer update value"); in getMaskedHistogram()
9860 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getGetFPEnv()
9861 ID.AddInteger(MMO->getFlags()); in getGetFPEnv()
9887 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); in getSetFPEnv()
9888 ID.AddInteger(MMO->getFlags()); in getSetFPEnv()
9905 // select undef, T, F --> T (if T is a constant), otherwise F in simplifySelect()
9906 // select, ?, undef, F --> F in simplifySelect()
9907 // select, ?, T, undef --> T in simplifySelect()
9915 // select true, T, F --> T in simplifySelect()
9916 // select false, T, F --> F in simplifySelect()
9918 return CondC->isZero() ? F : T; in simplifySelect()
9920 // TODO: This should simplify VSELECT with non-zero constant condition using in simplifySelect()
9924 if (CondC->isZero()) in simplifySelect()
9927 // select ?, T, T --> T in simplifySelect()
9935 // shift undef, Y --> 0 (can always assume that the undef value is 0) in simplifyShift()
9938 // shift X, undef --> undef (because it may shift by the bitwidth) in simplifyShift()
9942 // shift 0, Y --> 0 in simplifyShift()
9943 // shift X, 0 --> X in simplifyShift()
9947 // shift X, C >= bitwidth(X) --> undef in simplifyShift()
9950 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits()); in simplifyShift()
9955 // shift i1/vXi1 X, Y --> X (any non-zero shift amount is undefined). in simplifyShift()
9969 bool HasNan = (XC && XC->getValueAPF().isNaN()) || in simplifyFPBinop()
9970 (YC && YC->getValueAPF().isNaN()); in simplifyFPBinop()
9971 bool HasInf = (XC && XC->getValueAPF().isInfinity()) || in simplifyFPBinop()
9972 (YC && YC->getValueAPF().isInfinity()); in simplifyFPBinop()
9983 // X + -0.0 --> X in simplifyFPBinop()
9985 if (YC->getValueAPF().isNegZero()) in simplifyFPBinop()
9988 // X - +0.0 --> X in simplifyFPBinop()
9990 if (YC->getValueAPF().isPosZero()) in simplifyFPBinop()
9993 // X * 1.0 --> X in simplifyFPBinop()
9994 // X / 1.0 --> X in simplifyFPBinop()
9996 if (YC->getValueAPF().isExactlyValue(1.0)) in simplifyFPBinop()
9999 // X * 0.0 --> 0.0 in simplifyFPBinop()
10001 if (YC->getValueAPF().isZero()) in simplifyFPBinop()
10033 Flags = Inserter->getFlags(); in getNode()
10142 N->setFlags(Flags); in getNode()
10158 Flags = Inserter->getFlags(); in getNode()
10187 // (X +- 0) -> X with zero-overflow. in getNode()
10190 if (N2CV && N2CV->isZero()) { in getNode()
10200 // {vXi1,vXi1} (u/s)addo(vXi1 x, vXi1y) -> {xor(x,y),and(x,y)} in getNode()
10206 // {vXi1,vXi1} (u/s)subo(vXi1 x, vXi1y) -> {xor(x,y),and(~x,y)} in getNode()
10242 APInt Val = LHS->getAPIntValue(); in getNode()
10243 APInt Mul = RHS->getAPIntValue(); in getNode()
10268 frexp(C->getValueAPF(), FrexpExp, APFloat::rmNearestTiesToEven); in getNode()
10305 (Ops[2]->getAsZExtVal() == 0 || Ops[2]->getAsZExtVal() == 1) && in getNode()
10316 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) in getNode()
10323 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) in getNode()
10332 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { in getNode()
10347 N->setFlags(Flags); in getNode()
10409 return Result->getSDVTList(); in getVTList()
10429 return Result->getSDVTList(); in getVTList()
10451 return Result->getSDVTList(); in getVTList()
10470 return Result->getSDVTList(); in getVTList()
10474 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
10481 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); in UpdateNodeOperands()
10484 if (Op == N->getOperand(0)) return N; in UpdateNodeOperands()
10497 N->OperandList[0].set(Op); in UpdateNodeOperands()
10506 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); in UpdateNodeOperands()
10509 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) in UpdateNodeOperands()
10523 if (N->OperandList[0] != Op1) in UpdateNodeOperands()
10524 N->OperandList[0].set(Op1); in UpdateNodeOperands()
10525 if (N->OperandList[1] != Op2) in UpdateNodeOperands()
10526 N->OperandList[1].set(Op2); in UpdateNodeOperands()
10557 assert(N->getNumOperands() == NumOps && in UpdateNodeOperands()
10561 if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) in UpdateNodeOperands()
10576 if (N->OperandList[i] != Ops[i]) in UpdateNodeOperands()
10577 N->OperandList[i].set(Ops[i]); in UpdateNodeOperands()
10585 /// DropOperands - Release the operands and set this node to have
10599 N->clearMemRefs(); in setNodeMemRefs()
10605 N->MemRefs = NewMemRefs[0]; in setNodeMemRefs()
10606 N->NumMemRefs = 1; in setNodeMemRefs()
10613 N->MemRefs = MemRefsBuffer; in setNodeMemRefs()
10614 N->NumMemRefs = static_cast<int>(NewMemRefs.size()); in setNodeMemRefs()
10617 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
10685 // Reset the NodeID to -1. in SelectNodeTo()
10686 New->setNodeId(-1); in SelectNodeTo()
10694 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away
10697 /// This will make the debugger working better at -O0, were there is a higher
10702 DebugLoc NLoc = N->getDebugLoc(); in UpdateSDLocOnMergeSDNode()
10704 N->setDebugLoc(DebugLoc()); in UpdateSDLocOnMergeSDNode()
10706 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); in UpdateSDLocOnMergeSDNode()
10707 N->setIROrder(Order); in UpdateSDLocOnMergeSDNode()
10711 /// MorphNodeTo - This *mutates* the specified node to have the specified
10731 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { in MorphNodeTo()
10742 N->NodeType = Opc; in MorphNodeTo()
10743 N->ValueList = VTs.VTs; in MorphNodeTo()
10744 N->NumValues = VTs.NumVTs; in MorphNodeTo()
10749 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { in MorphNodeTo()
10753 if (Used->use_empty()) in MorphNodeTo()
10759 MN->clearMemRefs(); in MorphNodeTo()
10770 if (N->use_empty()) in MorphNodeTo()
10781 unsigned OrigOpc = Node->getOpcode(); in mutateStrictFPToFP()
10793 assert(Node->getNumValues() == 2 && "Unexpected number of results!"); in mutateStrictFPToFP()
10795 // We're taking this node out of the chain, so we need to re-link things. in mutateStrictFPToFP()
10796 SDValue InputChain = Node->getOperand(0); in mutateStrictFPToFP()
10801 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) in mutateStrictFPToFP()
10802 Ops.push_back(Node->getOperand(i)); in mutateStrictFPToFP()
10804 SDVTList VTs = getVTList(Node->getValueType(0)); in mutateStrictFPToFP()
10813 Res->setNodeId(-1); in mutateStrictFPToFP()
10822 /// getMachineNode - These are used for target selectors to create a new node
10919 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; in getMachineNode()
10944 /// getTargetExtractSubreg - A convenience function for creating
10954 /// getTargetInsertSubreg - A convenience function for creating
10964 /// getNodeIfExists - Get the specified node if it's already available, or
10970 Flags = Inserter->getFlags(); in getNodeIfExists()
10977 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { in getNodeIfExists()
10982 E->intersectFlagsWith(Flags); in getNodeIfExists()
10989 /// doesNodeExist - Check if a node exists without modifying its flags.
10992 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { in doesNodeExist()
11002 /// getDbgValue - Creates a SDDbgValue node.
11008 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && in getDbgValue()
11009 "Expected inlined-at fields to agree"); in getDbgValue()
11010 return new (DbgInfo->getAlloc()) in getDbgValue()
11011 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromNode(N, R), in getDbgValue()
11021 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && in getConstantDbgValue()
11022 "Expected inlined-at fields to agree"); in getConstantDbgValue()
11023 return new (DbgInfo->getAlloc()) in getConstantDbgValue()
11024 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromConst(C), {}, in getConstantDbgValue()
11035 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && in getFrameIndexDbgValue()
11036 "Expected inlined-at fields to agree"); in getFrameIndexDbgValue()
11047 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && in getFrameIndexDbgValue()
11048 "Expected inlined-at fields to agree"); in getFrameIndexDbgValue()
11049 return new (DbgInfo->getAlloc()) in getFrameIndexDbgValue()
11050 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromFrameIdx(FI), in getFrameIndexDbgValue()
11059 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && in getVRegDbgValue()
11060 "Expected inlined-at fields to agree"); in getVRegDbgValue()
11061 return new (DbgInfo->getAlloc()) in getVRegDbgValue()
11062 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromVReg(VReg), in getVRegDbgValue()
11072 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && in getDbgValueList()
11073 "Expected inlined-at fields to agree"); in getDbgValueList()
11074 return new (DbgInfo->getAlloc()) in getDbgValueList()
11075 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, Locs, Dependencies, IsIndirect, in getDbgValueList()
11092 if (!FromNode->getHasDebugValue()) in transferDbgValues()
11101 if (Dbg->isInvalidated()) in transferDbgValues()
11104 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value"); in transferDbgValues()
11109 auto NewLocOps = Dbg->copyLocationOps(); in transferDbgValues()
11122 DIVariable *Var = Dbg->getVariable(); in transferDbgValues()
11123 auto *Expr = Dbg->getExpression(); in transferDbgValues()
11126 // When splitting a larger (e.g., sign-extended) value whose in transferDbgValues()
11129 if (auto FI = Expr->getFragmentInfo()) in transferDbgValues()
11130 if (OffsetInBits + SizeInBits > FI->SizeInBits) in transferDbgValues()
11139 auto AdditionalDependencies = Dbg->getAdditionalDependencies(); in transferDbgValues()
11142 Var, Expr, NewLocOps, AdditionalDependencies, Dbg->isIndirect(), in transferDbgValues()
11143 Dbg->getDebugLoc(), std::max(ToNode->getIROrder(), Dbg->getOrder()), in transferDbgValues()
11144 Dbg->isVariadic()); in transferDbgValues()
11149 Dbg->setIsInvalidated(); in transferDbgValues()
11150 Dbg->setIsEmitted(); in transferDbgValues()
11155 assert(is_contained(Dbg->getSDNodes(), ToNode) && in transferDbgValues()
11167 if (DV->isInvalidated()) in salvageDebugInfo()
11182 if (!RHSConstant && DV->isIndirect()) in salvageDebugInfo()
11189 auto *DIExpr = DV->getExpression(); in salvageDebugInfo()
11190 auto NewLocOps = DV->copyLocationOps(); in salvageDebugInfo()
11226 DV->isVariadic() || OrigLocOpsSize != NewLocOps.size(); in salvageDebugInfo()
11228 auto AdditionalDependencies = DV->getAdditionalDependencies(); in salvageDebugInfo()
11230 DV->getVariable(), DIExpr, NewLocOps, AdditionalDependencies, in salvageDebugInfo()
11231 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder(), IsVariadic); in salvageDebugInfo()
11233 DV->setIsInvalidated(); in salvageDebugInfo()
11234 DV->setIsEmitted(); in salvageDebugInfo()
11236 N0.getNode()->dumprFull(this); in salvageDebugInfo()
11246 DIExpression *DbgExpression = DV->getExpression(); in salvageDebugInfo()
11248 auto NewLocOps = DV->copyLocationOps(); in salvageDebugInfo()
11263 getDbgValueList(DV->getVariable(), DbgExpression, NewLocOps, in salvageDebugInfo()
11264 DV->getAdditionalDependencies(), DV->isIndirect(), in salvageDebugInfo()
11265 DV->getDebugLoc(), DV->getOrder(), DV->isVariadic()); in salvageDebugInfo()
11268 DV->setIsInvalidated(); in salvageDebugInfo()
11269 DV->setIsEmitted(); in salvageDebugInfo()
11270 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting"; N0.getNode()->dumprFull(this); in salvageDebugInfo()
11278 assert(!Dbg->getSDNodes().empty() && in salvageDebugInfo()
11287 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && in getDbgLabel()
11288 "Expected inlined-at fields to agree"); in getDbgLabel()
11289 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O); in getDbgLabel()
11294 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
11317 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
11324 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && in ReplaceAllUsesWith()
11340 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); in ReplaceAllUsesWith()
11356 if (To->isDivergent() != From->isDivergent()) in ReplaceAllUsesWith()
11369 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
11377 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) in ReplaceAllUsesWith()
11378 assert((!From->hasAnyUseOfValue(i) || in ReplaceAllUsesWith()
11379 From->getValueType(i) == To->getValueType(i)) && in ReplaceAllUsesWith()
11388 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) in ReplaceAllUsesWith()
11389 if (From->hasAnyUseOfValue(i)) { in ReplaceAllUsesWith()
11390 assert((i < To->getNumValues()) && "Invalid To location"); in ReplaceAllUsesWith()
11398 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); in ReplaceAllUsesWith()
11414 if (To->isDivergent() != From->isDivergent()) in ReplaceAllUsesWith()
11428 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
11434 if (From->getNumValues() == 1) // Handle the simple case efficiently. in ReplaceAllUsesWith()
11437 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) { in ReplaceAllUsesWith()
11446 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); in ReplaceAllUsesWith()
11464 To_IsDivergent |= ToOp->isDivergent(); in ReplaceAllUsesWith()
11467 if (To_IsDivergent != From->isDivergent()) in ReplaceAllUsesWith()
11480 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
11488 if (From.getNode()->getNumValues() == 1) { in ReplaceAllUsesOfValueWith()
11499 SDNode::use_iterator UI = From.getNode()->use_begin(), in ReplaceAllUsesOfValueWith()
11500 UE = From.getNode()->use_end(); in ReplaceAllUsesOfValueWith()
11528 if (To->isDivergent() != From->isDivergent()) in ReplaceAllUsesOfValueWith()
11548 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
11556 /// operator< - Sort Memos by User.
11561 /// RAUOVWUpdateListener - Helper for ReplaceAllUsesOfValuesWith - When the node
11581 if (TLI->isSDNodeAlwaysUniform(N)) { in calculateDivergence()
11582 assert(!TLI->isSDNodeSourceOfDivergence(N, FLI, UA) && in calculateDivergence()
11586 if (TLI->isSDNodeSourceOfDivergence(N, FLI, UA)) in calculateDivergence()
11588 for (const auto &Op : N->ops()) { in calculateDivergence()
11589 if (Op.Val.getValueType() != MVT::Other && Op.getNode()->isDivergent()) in calculateDivergence()
11600 if (N->SDNodeBits.IsDivergent != IsDivergent) { in updateDivergence()
11601 N->SDNodeBits.IsDivergent = IsDivergent; in updateDivergence()
11602 llvm::append_range(Worklist, N->uses()); in updateDivergence()
11618 for (auto *U : N->uses()) { in CreateTopologicalOrder()
11620 if (0 == --UnsortedOps) in CreateTopologicalOrder()
11631 assert(calculateDivergence(N) == N->isDivergent() && in VerifyDAGDivergence()
11637 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
11649 copyExtraInfo(From->getNode(), To->getNode()); in ReplaceAllUsesOfValuesWith()
11658 for (SDNode::use_iterator UI = FromNode->use_begin(), in ReplaceAllUsesOfValuesWith()
11659 E = FromNode->use_end(); UI != E; ++UI) { in ReplaceAllUsesOfValuesWith()
11705 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
11747 for (SDNode *P : N->uses()) { in AssignTopologicalOrder()
11748 unsigned Degree = P->getNodeId(); in AssignTopologicalOrder()
11750 --Degree; in AssignTopologicalOrder()
11753 P->setNodeId(DAGSize++); in AssignTopologicalOrder()
11754 if (P->getIterator() != SortedPos) in AssignTopologicalOrder()
11760 P->setNodeId(Degree); in AssignTopologicalOrder()
11768 S->dumprFull(this); dbgs() << "\n"; in AssignTopologicalOrder()
11781 "First node in topological sort has non-zero id!"); in AssignTopologicalOrder()
11784 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && in AssignTopologicalOrder()
11792 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
11795 for (SDNode *SD : DB->getSDNodes()) { in AddDbgValue()
11798 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); in AddDbgValue()
11799 SD->setHasDebugValue(true); in AddDbgValue()
11801 DbgInfo->add(DB, isParameter); in AddDbgValue()
11804 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) { DbgInfo->add(DB); } in AddDbgLabel()
11836 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol(); in getSymbolFunctionGlobalAddress()
11837 auto *Module = MF->getFunction().getParent(); in getSymbolFunctionGlobalAddress()
11838 auto *Function = Module->getFunction(Symbol); in getSymbolFunctionGlobalAddress()
11844 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace()); in getSymbolFunctionGlobalAddress()
11855 //===----------------------------------------------------------------------===//
11857 //===----------------------------------------------------------------------===//
11861 return Const != nullptr && Const->isZero(); in isNullConstant()
11870 return Const != nullptr && Const->isZero() && !Const->isNegative(); in isNullFPConstant()
11875 return Const != nullptr && Const->isAllOnes(); in isAllOnesConstant()
11880 return Const != nullptr && Const->isOne(); in isOneConstant()
11885 return Const != nullptr && Const->isMinSignedValue(); in isMinSignedConstant()
11891 // TODO: Target-specific opcodes could be added. in isNeutralConstant()
11894 APInt Const = ConstV->getAPIntValue().trunc(V.getScalarValueSizeInBits()); in isNeutralConstant()
11922 return ConstFP->isZero() && in isNeutralConstant()
11923 (Flags.hasNoSignedZeros() || ConstFP->isNegative()); in isNeutralConstant()
11925 return OperandNo == 1 && ConstFP->isZero() && in isNeutralConstant()
11926 (Flags.hasNoSignedZeros() || !ConstFP->isNegative()); in isNeutralConstant()
11928 return ConstFP->isExactlyValue(1.0); in isNeutralConstant()
11930 return OperandNo == 1 && ConstFP->isExactlyValue(1.0); in isNeutralConstant()
11944 return ConstFP->isExactlyValue(NeutralAF); in isNeutralConstant()
11982 return C && (C->getAPIntValue().countr_one() >= NumBits); in isBitwiseNot()
12002 if (N->getOpcode() == ISD::SPLAT_VECTOR) { in isConstOrConstSplat()
12003 EVT VecEltVT = N->getValueType(0).getVectorElementType(); in isConstOrConstSplat()
12004 if (auto *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) { in isConstOrConstSplat()
12005 EVT CVT = CN->getValueType(0); in isConstOrConstSplat()
12014 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements); in isConstOrConstSplat()
12018 // TODO: Look into whether we should allow UndefElements in non-DemandedElts in isConstOrConstSplat()
12020 EVT CVT = CN->getValueType(0); in isConstOrConstSplat()
12048 BV->getConstantFPSplatNode(DemandedElts, &UndefElements); in isConstOrConstSplatFP()
12049 // TODO: Look into whether we should allow UndefElements in non-DemandedElts in isConstOrConstSplatFP()
12065 return C && C->isZero(); in isNullOrNullSplat()
12071 return C && C->isOne(); in isOneOrOneSplat()
12078 return C && C->isAllOnes() && C->getValueSizeInBits(0) == BitWidth; in isAllOnesOrAllOnesSplat()
12088 MemSDNodeBits.IsVolatile = MMO->isVolatile(); in MemSDNode()
12089 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); in MemSDNode()
12090 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); in MemSDNode()
12091 MemSDNodeBits.IsInvariant = MMO->isInvariant(); in MemSDNode()
12097 (!MMO->getType().isValid() || in MemSDNode()
12098 TypeSize::isKnownLE(memvt.getStoreSize(), MMO->getSize().getValue())) && in MemSDNode()
12102 /// Profile - Gather unique data for the node.
12122 /// getValueTypeList - Return a pointer to the specified value type.
12137 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
12148 --NUses; in hasNUsesOfValue()
12156 /// hasAnyUseOfValue - Return true if there are any use of the indicated
12168 /// isOnlyUserOf - Return true if this node is the only use of N.
12171 for (const SDNode *User : N->uses()) { in isOnlyUserOf()
12184 for (const SDNode *User : N->uses()) { in areOnlyUsersOf()
12194 /// isOperand - Return true if this node is an operand of N.
12196 return is_contained(N->op_values(), *this); in isOperandOf()
12200 return any_of(N->op_values(), in isOperandOf()
12204 /// reachesChainWithoutSideEffects - Return true if this operand (which must
12206 /// side-effecting instructions on any chain path. In practice, this looks
12207 /// through token factors and non-volatile loads. In order to remain efficient,
12211 /// side-effects; SelectionDAG requires that all side-effects are represented
12225 if (is_contained((*this)->ops(), Dest)) { in reachesChainWithoutSideEffects()
12227 // Essentially, we reach the chain without side-effects if we could in reachesChainWithoutSideEffects()
12232 // use of Dest might force a side-effect between Dest and the current in reachesChainWithoutSideEffects()
12239 return llvm::all_of((*this)->ops(), [=](SDValue Op) { in reachesChainWithoutSideEffects()
12240 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1); in reachesChainWithoutSideEffects()
12246 if (Ld->isUnordered()) in reachesChainWithoutSideEffects()
12247 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); in reachesChainWithoutSideEffects()
12260 this->Flags.intersectWith(Flags); in intersectFlagsWith()
12268 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT || in matchBinOpReduction()
12269 !isNullConstant(Extract->getOperand(1))) in matchBinOpReduction()
12273 SDValue Op = Extract->getOperand(0); in matchBinOpReduction()
12279 // Floating-point reductions may require relaxed constraints on the final step in matchBinOpReduction()
12283 SDNodeFlags Flags = Op->getFlags(); in matchBinOpReduction()
12294 // Matching failed - attempt to see if we did enough stages that a partial in matchBinOpReduction()
12302 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0)) in matchBinOpReduction()
12314 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid, in matchBinOpReduction()
12343 if (!Shuffle || Shuffle->getOperand(0) != Op) in matchBinOpReduction()
12348 if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index)) in matchBinOpReduction()
12381 EVT VT = N->getValueType(0); in UnrollVectorOp()
12393 if (N->getNumValues() == 2) { in UnrollVectorOp()
12395 SmallVector<SDValue, 4> Operands(N->getNumOperands()); in UnrollVectorOp()
12396 EVT VT1 = N->getValueType(1); in UnrollVectorOp()
12401 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { in UnrollVectorOp()
12402 SDValue Operand = N->getOperand(j); in UnrollVectorOp()
12411 SDValue EltOp = getNode(N->getOpcode(), dl, {EltVT, EltVT1}, Operands); in UnrollVectorOp()
12421 assert(N->getNumValues() == 1 && in UnrollVectorOp()
12425 SmallVector<SDValue, 4> Operands(N->getNumOperands()); in UnrollVectorOp()
12429 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { in UnrollVectorOp()
12430 SDValue Operand = N->getOperand(j); in UnrollVectorOp()
12443 switch (N->getOpcode()) { in UnrollVectorOp()
12445 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, in UnrollVectorOp()
12446 N->getFlags())); in UnrollVectorOp()
12457 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], in UnrollVectorOp()
12462 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); in UnrollVectorOp()
12463 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, in UnrollVectorOp()
12479 unsigned Opcode = N->getOpcode(); in UnrollVectorOverflowOp()
12485 EVT ResVT = N->getValueType(0); in UnrollVectorOverflowOp()
12486 EVT OvVT = N->getValueType(1); in UnrollVectorOverflowOp()
12500 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE); in UnrollVectorOverflowOp()
12501 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE); in UnrollVectorOverflowOp()
12503 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT); in UnrollVectorOverflowOp()
12518 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT)); in UnrollVectorOverflowOp()
12519 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT)); in UnrollVectorOverflowOp()
12531 if (LD->isVolatile() || Base->isVolatile()) in areNonVolatileConsecutiveLoads()
12534 if (!LD->isSimple()) in areNonVolatileConsecutiveLoads()
12536 if (LD->isIndexed() || Base->isIndexed()) in areNonVolatileConsecutiveLoads()
12538 if (LD->getChain() != Base->getChain()) in areNonVolatileConsecutiveLoads()
12540 EVT VT = LD->getMemoryVT(); in areNonVolatileConsecutiveLoads()
12553 /// InferPtrAlignment - Infer alignment of a load / store address. Return
12559 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { in InferPtrAlign()
12560 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); in InferPtrAlign()
12573 FrameIdx = FI->getIndex(); in InferPtrAlign()
12577 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); in InferPtrAlign()
12604 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
12610 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); in GetSplitDestVTs()
12617 /// GetDependentSplitDestVTs - Compute the VTs needed for the low/hi parts of a
12636 HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts); in GetDependentSplitDestVTs()
12648 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
12666 // fixed-width result vectors, that runtime scaling factor is 1. in SplitVector()
12675 // %evl -> umin(%evl, %halfnumelts) and usubsat(%evl - %halfnumelts). in SplitEVL()
12678 "Expecting the mask to be an evenly-sized vector"); in SplitEVL()
12714 // getAddressSpace - Return the address space this GlobalAddress belongs to.
12716 return getGlobal()->getType()->getAddressSpace(); in getAddressSpace()
12721 return Val.MachineCPVal->getType(); in getType()
12722 return Val.ConstVal->getType(); in getType()
12746 assert(NumOps > 0 && "isConstantSplat has 0-size build vector"); in isConstantSplat()
12750 unsigned i = IsBigEndian ? NumOps - 1 - j : j; in isConstantSplat()
12757 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos); in isConstantSplat()
12759 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos); in isConstantSplat()
12795 // vectors normally was power-of-2 sized. in isConstantSplat()
12805 UndefElements->clear(); in getSplatValue()
12806 UndefElements->resize(NumOps); in getSplatValue()
12847 UndefElements->clear(); in getRepeatedSequence()
12848 UndefElements->resize(NumOps); in getRepeatedSequence()
12883 assert(Sequence.empty() && "Failed to empty non-repeating sequence pattern"); in getRepeatedSequence()
12924 const APFloat &APF = CN->getValueAPF(); in getConstantFPSplatPow2ToLog2Int()
12928 return -1; in getConstantFPSplatPow2ToLog2Int()
12932 return -1; in getConstantFPSplatPow2ToLog2Int()
12938 // Early-out if this contains anything but Undef/Constant/ConstantFP. in getConstantRawBits()
12961 SrcBitElements[I] = CInt ? CInt->getAPIntValue().trunc(SrcEltSizeInBits) in getConstantRawBits()
12962 : CFP->getValueAPF().bitcastToAPInt(); in getConstantRawBits()
12996 unsigned Idx = (I * Scale) + (IsLittleEndian ? J : (Scale - J - 1)); in recastRawBits()
13018 unsigned Idx = (I * Scale) + (IsLittleEndian ? J : (Scale - J - 1)); in recastRawBits()
13046 APInt Stride = getConstantOperandAPInt(1).trunc(EltSize) - Start; in isConstantSequence()
13064 // Find the first non-undef value in the shuffle mask. in isSplatMask()
13075 // non-undef value. in isSplatMask()
13092 if (GA->getOpcode() == ISD::GlobalAddress && in isConstantIntBuildVectorOrConstantInt()
13093 TLI->isOffsetFoldingLegal(GA)) in isConstantIntBuildVectorOrConstantInt()
13118 assert(!Node->OperandList && "Node already has operands"); in createOperands()
13129 IsDivergent |= Ops[I].getNode()->isDivergent(); in createOperands()
13131 Node->NumOperands = Vals.size(); in createOperands()
13132 Node->OperandList = Ops; in createOperands()
13133 if (!TLI->isSDNodeAlwaysUniform(Node)) { in createOperands()
13134 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, UA); in createOperands()
13135 Node->SDNodeBits.IsDivergent = IsDivergent; in createOperands()
13144 unsigned SliceIdx = Vals.size() - Limit; in getTokenFactor()
13173 return getConstantFP(-0.0, DL, VT); in getNeutralElement()
13207 /// used to get or set floating-point state. They have one argument of pointer
13209 /// floating-point state. The value returned by such function is ignored in the
13226 SDValue Callee = getExternalSymbol(TLI->getLibcallName(LC), in makeStateFunctionCall()
13227 TLI->getPointerTy(getDataLayout())); in makeStateFunctionCall()
13230 TLI->getLibcallCallingConv(LC), Type::getVoidTy(*getContext()), Callee, in makeStateFunctionCall()
13232 return TLI->LowerCallTo(CLI).second; in makeStateFunctionCall()
13242 // the iterator, hence the need to make a copy to prevent a use-after-free. in copyExtraInfo()
13243 NodeExtraInfo NEI = I->second; in copyExtraInfo()
13262 // In the first step pre-populate the visited set with the nodes reachable in copyExtraInfo()
13276 for (const SDValue &Op : N->op_values()) in copyExtraInfo()
13277 Self(Self, Op.getNode(), MaxDepth - 1); in copyExtraInfo()
13289 for (const SDValue &Op : N->op_values()) { in copyExtraInfo()
13311 VisitFrom(VisitFrom, N, MaxDepth - PrevDepth); in copyExtraInfo()
13319 // This should not happen - but if it did, that means the subgraph reachable in copyExtraInfo()
13324 assert(false && "From subgraph too complex - increase max. MaxDepth?"); in copyExtraInfo()
13325 // Best-effort fallback if assertions disabled. in copyExtraInfo()
13338 // If a node has already been visited on this depth-first walk, reject it as in checkForCyclesHelper()
13343 N->dumprFull(DAG); dbgs() << "\n"; in checkForCyclesHelper()
13347 for (const SDValue &Op : N->op_values()) in checkForCyclesHelper()
13373 checkForCycles(DAG->getRoot().getNode(), DAG, force); in checkForCycles()