xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
10b57cec5SDimitry Andric //===-- RISCVTargetTransformInfo.cpp - RISC-V specific TTI ----------------===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric 
90b57cec5SDimitry Andric #include "RISCVTargetTransformInfo.h"
10e8d8bef9SDimitry Andric #include "MCTargetDesc/RISCVMatInt.h"
1106c3fb27SDimitry Andric #include "llvm/ADT/STLExtras.h"
120b57cec5SDimitry Andric #include "llvm/Analysis/TargetTransformInfo.h"
130b57cec5SDimitry Andric #include "llvm/CodeGen/BasicTTIImpl.h"
14bdd1243dSDimitry Andric #include "llvm/CodeGen/CostTable.h"
150b57cec5SDimitry Andric #include "llvm/CodeGen/TargetLowering.h"
1606c3fb27SDimitry Andric #include "llvm/IR/Instructions.h"
17*0fca6ea1SDimitry Andric #include "llvm/IR/PatternMatch.h"
1881ad6265SDimitry Andric #include <cmath>
19bdd1243dSDimitry Andric #include <optional>
200b57cec5SDimitry Andric using namespace llvm;
21*0fca6ea1SDimitry Andric using namespace llvm::PatternMatch;
220b57cec5SDimitry Andric 
230b57cec5SDimitry Andric #define DEBUG_TYPE "riscvtti"
240b57cec5SDimitry Andric 
2504eeddc0SDimitry Andric static cl::opt<unsigned> RVVRegisterWidthLMUL(
2604eeddc0SDimitry Andric     "riscv-v-register-bit-width-lmul",
2704eeddc0SDimitry Andric     cl::desc(
2804eeddc0SDimitry Andric         "The LMUL to use for getRegisterBitWidth queries. Affects LMUL used "
2904eeddc0SDimitry Andric         "by autovectorized code. Fractional LMULs are not supported."),
3006c3fb27SDimitry Andric     cl::init(2), cl::Hidden);
3104eeddc0SDimitry Andric 
32bdd1243dSDimitry Andric static cl::opt<unsigned> SLPMaxVF(
33bdd1243dSDimitry Andric     "riscv-v-slp-max-vf",
34bdd1243dSDimitry Andric     cl::desc(
3506c3fb27SDimitry Andric         "Overrides result used for getMaximumVF query which is used "
3606c3fb27SDimitry Andric         "exclusively by SLP vectorizer."),
3706c3fb27SDimitry Andric     cl::Hidden);
38bdd1243dSDimitry Andric 
391db9f3b2SDimitry Andric InstructionCost
getRISCVInstructionCost(ArrayRef<unsigned> OpCodes,MVT VT,TTI::TargetCostKind CostKind)401db9f3b2SDimitry Andric RISCVTTIImpl::getRISCVInstructionCost(ArrayRef<unsigned> OpCodes, MVT VT,
411db9f3b2SDimitry Andric                                       TTI::TargetCostKind CostKind) {
42439352acSDimitry Andric   // Check if the type is valid for all CostKind
43439352acSDimitry Andric   if (!VT.isVector())
44439352acSDimitry Andric     return InstructionCost::getInvalid();
451db9f3b2SDimitry Andric   size_t NumInstr = OpCodes.size();
461db9f3b2SDimitry Andric   if (CostKind == TTI::TCK_CodeSize)
471db9f3b2SDimitry Andric     return NumInstr;
481db9f3b2SDimitry Andric   InstructionCost LMULCost = TLI->getLMULCost(VT);
491db9f3b2SDimitry Andric   if ((CostKind != TTI::TCK_RecipThroughput) && (CostKind != TTI::TCK_Latency))
501db9f3b2SDimitry Andric     return LMULCost * NumInstr;
511db9f3b2SDimitry Andric   InstructionCost Cost = 0;
521db9f3b2SDimitry Andric   for (auto Op : OpCodes) {
531db9f3b2SDimitry Andric     switch (Op) {
541db9f3b2SDimitry Andric     case RISCV::VRGATHER_VI:
551db9f3b2SDimitry Andric       Cost += TLI->getVRGatherVICost(VT);
561db9f3b2SDimitry Andric       break;
571db9f3b2SDimitry Andric     case RISCV::VRGATHER_VV:
581db9f3b2SDimitry Andric       Cost += TLI->getVRGatherVVCost(VT);
591db9f3b2SDimitry Andric       break;
601db9f3b2SDimitry Andric     case RISCV::VSLIDEUP_VI:
611db9f3b2SDimitry Andric     case RISCV::VSLIDEDOWN_VI:
621db9f3b2SDimitry Andric       Cost += TLI->getVSlideVICost(VT);
631db9f3b2SDimitry Andric       break;
641db9f3b2SDimitry Andric     case RISCV::VSLIDEUP_VX:
651db9f3b2SDimitry Andric     case RISCV::VSLIDEDOWN_VX:
661db9f3b2SDimitry Andric       Cost += TLI->getVSlideVXCost(VT);
671db9f3b2SDimitry Andric       break;
681db9f3b2SDimitry Andric     case RISCV::VREDMAX_VS:
691db9f3b2SDimitry Andric     case RISCV::VREDMIN_VS:
701db9f3b2SDimitry Andric     case RISCV::VREDMAXU_VS:
711db9f3b2SDimitry Andric     case RISCV::VREDMINU_VS:
721db9f3b2SDimitry Andric     case RISCV::VREDSUM_VS:
731db9f3b2SDimitry Andric     case RISCV::VREDAND_VS:
741db9f3b2SDimitry Andric     case RISCV::VREDOR_VS:
751db9f3b2SDimitry Andric     case RISCV::VREDXOR_VS:
761db9f3b2SDimitry Andric     case RISCV::VFREDMAX_VS:
771db9f3b2SDimitry Andric     case RISCV::VFREDMIN_VS:
781db9f3b2SDimitry Andric     case RISCV::VFREDUSUM_VS: {
791db9f3b2SDimitry Andric       unsigned VL = VT.getVectorMinNumElements();
801db9f3b2SDimitry Andric       if (!VT.isFixedLengthVector())
811db9f3b2SDimitry Andric         VL *= *getVScaleForTuning();
821db9f3b2SDimitry Andric       Cost += Log2_32_Ceil(VL);
831db9f3b2SDimitry Andric       break;
841db9f3b2SDimitry Andric     }
851db9f3b2SDimitry Andric     case RISCV::VFREDOSUM_VS: {
861db9f3b2SDimitry Andric       unsigned VL = VT.getVectorMinNumElements();
871db9f3b2SDimitry Andric       if (!VT.isFixedLengthVector())
881db9f3b2SDimitry Andric         VL *= *getVScaleForTuning();
891db9f3b2SDimitry Andric       Cost += VL;
901db9f3b2SDimitry Andric       break;
911db9f3b2SDimitry Andric     }
927a6dacacSDimitry Andric     case RISCV::VMV_X_S:
931db9f3b2SDimitry Andric     case RISCV::VMV_S_X:
94*0fca6ea1SDimitry Andric     case RISCV::VFMV_F_S:
95*0fca6ea1SDimitry Andric     case RISCV::VFMV_S_F:
96*0fca6ea1SDimitry Andric     case RISCV::VMOR_MM:
97*0fca6ea1SDimitry Andric     case RISCV::VMXOR_MM:
98*0fca6ea1SDimitry Andric     case RISCV::VMAND_MM:
99*0fca6ea1SDimitry Andric     case RISCV::VMANDN_MM:
100*0fca6ea1SDimitry Andric     case RISCV::VMNAND_MM:
101*0fca6ea1SDimitry Andric     case RISCV::VCPOP_M:
102*0fca6ea1SDimitry Andric     case RISCV::VFIRST_M:
1037a6dacacSDimitry Andric       Cost += 1;
1047a6dacacSDimitry Andric       break;
1051db9f3b2SDimitry Andric     default:
1061db9f3b2SDimitry Andric       Cost += LMULCost;
1071db9f3b2SDimitry Andric     }
1081db9f3b2SDimitry Andric   }
1091db9f3b2SDimitry Andric   return Cost;
1101db9f3b2SDimitry Andric }
1111db9f3b2SDimitry Andric 
getIntImmCostImpl(const DataLayout & DL,const RISCVSubtarget * ST,const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind,bool FreeZeroes)112*0fca6ea1SDimitry Andric static InstructionCost getIntImmCostImpl(const DataLayout &DL,
113*0fca6ea1SDimitry Andric                                          const RISCVSubtarget *ST,
114*0fca6ea1SDimitry Andric                                          const APInt &Imm, Type *Ty,
115*0fca6ea1SDimitry Andric                                          TTI::TargetCostKind CostKind,
116*0fca6ea1SDimitry Andric                                          bool FreeZeroes) {
1170b57cec5SDimitry Andric   assert(Ty->isIntegerTy() &&
1180b57cec5SDimitry Andric          "getIntImmCost can only estimate cost of materialising integers");
1190b57cec5SDimitry Andric 
1200b57cec5SDimitry Andric   // We have a Zero register, so 0 is always free.
1210b57cec5SDimitry Andric   if (Imm == 0)
1220b57cec5SDimitry Andric     return TTI::TCC_Free;
1230b57cec5SDimitry Andric 
1240b57cec5SDimitry Andric   // Otherwise, we check how many instructions it will take to materialise.
125*0fca6ea1SDimitry Andric   return RISCVMatInt::getIntMatCost(Imm, DL.getTypeSizeInBits(Ty), *ST,
126*0fca6ea1SDimitry Andric                                     /*CompressionCost=*/false, FreeZeroes);
127*0fca6ea1SDimitry Andric }
128*0fca6ea1SDimitry Andric 
getIntImmCost(const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind)129*0fca6ea1SDimitry Andric InstructionCost RISCVTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
130*0fca6ea1SDimitry Andric                                             TTI::TargetCostKind CostKind) {
131*0fca6ea1SDimitry Andric   return getIntImmCostImpl(getDataLayout(), getST(), Imm, Ty, CostKind, false);
1320b57cec5SDimitry Andric }
1330b57cec5SDimitry Andric 
134bdd1243dSDimitry Andric // Look for patterns of shift followed by AND that can be turned into a pair of
135bdd1243dSDimitry Andric // shifts. We won't need to materialize an immediate for the AND so these can
136bdd1243dSDimitry Andric // be considered free.
canUseShiftPair(Instruction * Inst,const APInt & Imm)137bdd1243dSDimitry Andric static bool canUseShiftPair(Instruction *Inst, const APInt &Imm) {
138bdd1243dSDimitry Andric   uint64_t Mask = Imm.getZExtValue();
139bdd1243dSDimitry Andric   auto *BO = dyn_cast<BinaryOperator>(Inst->getOperand(0));
140bdd1243dSDimitry Andric   if (!BO || !BO->hasOneUse())
141bdd1243dSDimitry Andric     return false;
142bdd1243dSDimitry Andric 
143bdd1243dSDimitry Andric   if (BO->getOpcode() != Instruction::Shl)
144bdd1243dSDimitry Andric     return false;
145bdd1243dSDimitry Andric 
146bdd1243dSDimitry Andric   if (!isa<ConstantInt>(BO->getOperand(1)))
147bdd1243dSDimitry Andric     return false;
148bdd1243dSDimitry Andric 
149bdd1243dSDimitry Andric   unsigned ShAmt = cast<ConstantInt>(BO->getOperand(1))->getZExtValue();
150bdd1243dSDimitry Andric   // (and (shl x, c2), c1) will be matched to (srli (slli x, c2+c3), c3) if c1
151bdd1243dSDimitry Andric   // is a mask shifted by c2 bits with c3 leading zeros.
152bdd1243dSDimitry Andric   if (isShiftedMask_64(Mask)) {
15306c3fb27SDimitry Andric     unsigned Trailing = llvm::countr_zero(Mask);
154bdd1243dSDimitry Andric     if (ShAmt == Trailing)
155bdd1243dSDimitry Andric       return true;
156bdd1243dSDimitry Andric   }
157bdd1243dSDimitry Andric 
158bdd1243dSDimitry Andric   return false;
159bdd1243dSDimitry Andric }
160bdd1243dSDimitry Andric 
getIntImmCostInst(unsigned Opcode,unsigned Idx,const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind,Instruction * Inst)161fe6060f1SDimitry Andric InstructionCost RISCVTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
162e8d8bef9SDimitry Andric                                                 const APInt &Imm, Type *Ty,
163e8d8bef9SDimitry Andric                                                 TTI::TargetCostKind CostKind,
164e8d8bef9SDimitry Andric                                                 Instruction *Inst) {
1650b57cec5SDimitry Andric   assert(Ty->isIntegerTy() &&
1660b57cec5SDimitry Andric          "getIntImmCost can only estimate cost of materialising integers");
1670b57cec5SDimitry Andric 
1680b57cec5SDimitry Andric   // We have a Zero register, so 0 is always free.
1690b57cec5SDimitry Andric   if (Imm == 0)
1700b57cec5SDimitry Andric     return TTI::TCC_Free;
1710b57cec5SDimitry Andric 
1720b57cec5SDimitry Andric   // Some instructions in RISC-V can take a 12-bit immediate. Some of these are
1730b57cec5SDimitry Andric   // commutative, in others the immediate comes from a specific argument index.
1740b57cec5SDimitry Andric   bool Takes12BitImm = false;
1750b57cec5SDimitry Andric   unsigned ImmArgIdx = ~0U;
1760b57cec5SDimitry Andric 
1770b57cec5SDimitry Andric   switch (Opcode) {
1780b57cec5SDimitry Andric   case Instruction::GetElementPtr:
1790b57cec5SDimitry Andric     // Never hoist any arguments to a GetElementPtr. CodeGenPrepare will
1800b57cec5SDimitry Andric     // split up large offsets in GEP into better parts than ConstantHoisting
1810b57cec5SDimitry Andric     // can.
1820b57cec5SDimitry Andric     return TTI::TCC_Free;
183*0fca6ea1SDimitry Andric   case Instruction::Store: {
184*0fca6ea1SDimitry Andric     // Use the materialization cost regardless of if it's the address or the
185*0fca6ea1SDimitry Andric     // value that is constant, except for if the store is misaligned and
186*0fca6ea1SDimitry Andric     // misaligned accesses are not legal (experience shows constant hoisting
187*0fca6ea1SDimitry Andric     // can sometimes be harmful in such cases).
188*0fca6ea1SDimitry Andric     if (Idx == 1 || !Inst)
189*0fca6ea1SDimitry Andric       return getIntImmCostImpl(getDataLayout(), getST(), Imm, Ty, CostKind,
190*0fca6ea1SDimitry Andric                                /*FreeZeroes=*/true);
191*0fca6ea1SDimitry Andric 
192*0fca6ea1SDimitry Andric     StoreInst *ST = cast<StoreInst>(Inst);
193*0fca6ea1SDimitry Andric     if (!getTLI()->allowsMemoryAccessForAlignment(
194*0fca6ea1SDimitry Andric             Ty->getContext(), DL, getTLI()->getValueType(DL, Ty),
195*0fca6ea1SDimitry Andric             ST->getPointerAddressSpace(), ST->getAlign()))
196*0fca6ea1SDimitry Andric       return TTI::TCC_Free;
197*0fca6ea1SDimitry Andric 
198*0fca6ea1SDimitry Andric     return getIntImmCostImpl(getDataLayout(), getST(), Imm, Ty, CostKind,
199*0fca6ea1SDimitry Andric                              /*FreeZeroes=*/true);
200*0fca6ea1SDimitry Andric   }
201*0fca6ea1SDimitry Andric   case Instruction::Load:
202*0fca6ea1SDimitry Andric     // If the address is a constant, use the materialization cost.
203*0fca6ea1SDimitry Andric     return getIntImmCost(Imm, Ty, CostKind);
2040b57cec5SDimitry Andric   case Instruction::And:
205349cc55cSDimitry Andric     // zext.h
206349cc55cSDimitry Andric     if (Imm == UINT64_C(0xffff) && ST->hasStdExtZbb())
207349cc55cSDimitry Andric       return TTI::TCC_Free;
208349cc55cSDimitry Andric     // zext.w
209fcaf7f86SDimitry Andric     if (Imm == UINT64_C(0xffffffff) && ST->hasStdExtZba())
210349cc55cSDimitry Andric       return TTI::TCC_Free;
211bdd1243dSDimitry Andric     // bclri
212bdd1243dSDimitry Andric     if (ST->hasStdExtZbs() && (~Imm).isPowerOf2())
213bdd1243dSDimitry Andric       return TTI::TCC_Free;
214bdd1243dSDimitry Andric     if (Inst && Idx == 1 && Imm.getBitWidth() <= ST->getXLen() &&
215bdd1243dSDimitry Andric         canUseShiftPair(Inst, Imm))
216bdd1243dSDimitry Andric       return TTI::TCC_Free;
217bdd1243dSDimitry Andric     Takes12BitImm = true;
218bdd1243dSDimitry Andric     break;
219349cc55cSDimitry Andric   case Instruction::Add:
220bdd1243dSDimitry Andric     Takes12BitImm = true;
221bdd1243dSDimitry Andric     break;
2220b57cec5SDimitry Andric   case Instruction::Or:
2230b57cec5SDimitry Andric   case Instruction::Xor:
224bdd1243dSDimitry Andric     // bseti/binvi
225bdd1243dSDimitry Andric     if (ST->hasStdExtZbs() && Imm.isPowerOf2())
226bdd1243dSDimitry Andric       return TTI::TCC_Free;
227bdd1243dSDimitry Andric     Takes12BitImm = true;
228bdd1243dSDimitry Andric     break;
2290b57cec5SDimitry Andric   case Instruction::Mul:
23006c3fb27SDimitry Andric     // Power of 2 is a shift. Negated power of 2 is a shift and a negate.
23106c3fb27SDimitry Andric     if (Imm.isPowerOf2() || Imm.isNegatedPowerOf2())
232bdd1243dSDimitry Andric       return TTI::TCC_Free;
2335f757f3fSDimitry Andric     // One more or less than a power of 2 can use SLLI+ADD/SUB.
2345f757f3fSDimitry Andric     if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2())
2355f757f3fSDimitry Andric       return TTI::TCC_Free;
236bdd1243dSDimitry Andric     // FIXME: There is no MULI instruction.
2370b57cec5SDimitry Andric     Takes12BitImm = true;
2380b57cec5SDimitry Andric     break;
2390b57cec5SDimitry Andric   case Instruction::Sub:
2400b57cec5SDimitry Andric   case Instruction::Shl:
2410b57cec5SDimitry Andric   case Instruction::LShr:
2420b57cec5SDimitry Andric   case Instruction::AShr:
2430b57cec5SDimitry Andric     Takes12BitImm = true;
2440b57cec5SDimitry Andric     ImmArgIdx = 1;
2450b57cec5SDimitry Andric     break;
2460b57cec5SDimitry Andric   default:
2470b57cec5SDimitry Andric     break;
2480b57cec5SDimitry Andric   }
2490b57cec5SDimitry Andric 
2500b57cec5SDimitry Andric   if (Takes12BitImm) {
2510b57cec5SDimitry Andric     // Check immediate is the correct argument...
2520b57cec5SDimitry Andric     if (Instruction::isCommutative(Opcode) || Idx == ImmArgIdx) {
2530b57cec5SDimitry Andric       // ... and fits into the 12-bit immediate.
25406c3fb27SDimitry Andric       if (Imm.getSignificantBits() <= 64 &&
2550b57cec5SDimitry Andric           getTLI()->isLegalAddImmediate(Imm.getSExtValue())) {
2560b57cec5SDimitry Andric         return TTI::TCC_Free;
2570b57cec5SDimitry Andric       }
2580b57cec5SDimitry Andric     }
2590b57cec5SDimitry Andric 
2600b57cec5SDimitry Andric     // Otherwise, use the full materialisation cost.
2615ffd83dbSDimitry Andric     return getIntImmCost(Imm, Ty, CostKind);
2620b57cec5SDimitry Andric   }
2630b57cec5SDimitry Andric 
2640b57cec5SDimitry Andric   // By default, prevent hoisting.
2650b57cec5SDimitry Andric   return TTI::TCC_Free;
2660b57cec5SDimitry Andric }
2670b57cec5SDimitry Andric 
268fe6060f1SDimitry Andric InstructionCost
getIntImmCostIntrin(Intrinsic::ID IID,unsigned Idx,const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind)269fe6060f1SDimitry Andric RISCVTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
2705ffd83dbSDimitry Andric                                   const APInt &Imm, Type *Ty,
2715ffd83dbSDimitry Andric                                   TTI::TargetCostKind CostKind) {
2720b57cec5SDimitry Andric   // Prevent hoisting in unknown cases.
2730b57cec5SDimitry Andric   return TTI::TCC_Free;
2740b57cec5SDimitry Andric }
275fe6060f1SDimitry Andric 
hasActiveVectorLength(unsigned,Type * DataTy,Align) const276*0fca6ea1SDimitry Andric bool RISCVTTIImpl::hasActiveVectorLength(unsigned, Type *DataTy, Align) const {
277*0fca6ea1SDimitry Andric   return ST->hasVInstructions();
278*0fca6ea1SDimitry Andric }
279*0fca6ea1SDimitry Andric 
280fe6060f1SDimitry Andric TargetTransformInfo::PopcntSupportKind
getPopcntSupport(unsigned TyWidth)281fe6060f1SDimitry Andric RISCVTTIImpl::getPopcntSupport(unsigned TyWidth) {
282fe6060f1SDimitry Andric   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
2835f757f3fSDimitry Andric   return ST->hasStdExtZbb() || ST->hasVendorXCVbitmanip()
2845f757f3fSDimitry Andric              ? TTI::PSK_FastHardware
2855f757f3fSDimitry Andric              : TTI::PSK_Software;
286fe6060f1SDimitry Andric }
287fe6060f1SDimitry Andric 
shouldExpandReduction(const IntrinsicInst * II) const288fe6060f1SDimitry Andric bool RISCVTTIImpl::shouldExpandReduction(const IntrinsicInst *II) const {
289fe6060f1SDimitry Andric   // Currently, the ExpandReductions pass can't expand scalable-vector
290fe6060f1SDimitry Andric   // reductions, but we still request expansion as RVV doesn't support certain
291fe6060f1SDimitry Andric   // reductions and the SelectionDAG can't legalize them either.
292fe6060f1SDimitry Andric   switch (II->getIntrinsicID()) {
293fe6060f1SDimitry Andric   default:
294fe6060f1SDimitry Andric     return false;
295fe6060f1SDimitry Andric   // These reductions have no equivalent in RVV
296fe6060f1SDimitry Andric   case Intrinsic::vector_reduce_mul:
297fe6060f1SDimitry Andric   case Intrinsic::vector_reduce_fmul:
298fe6060f1SDimitry Andric     return true;
299fe6060f1SDimitry Andric   }
300fe6060f1SDimitry Andric }
301fe6060f1SDimitry Andric 
getMaxVScale() const302bdd1243dSDimitry Andric std::optional<unsigned> RISCVTTIImpl::getMaxVScale() const {
30381ad6265SDimitry Andric   if (ST->hasVInstructions())
30481ad6265SDimitry Andric     return ST->getRealMaxVLen() / RISCV::RVVBitsPerBlock;
305fe6060f1SDimitry Andric   return BaseT::getMaxVScale();
306fe6060f1SDimitry Andric }
307fe6060f1SDimitry Andric 
getVScaleForTuning() const308bdd1243dSDimitry Andric std::optional<unsigned> RISCVTTIImpl::getVScaleForTuning() const {
30981ad6265SDimitry Andric   if (ST->hasVInstructions())
310bdd1243dSDimitry Andric     if (unsigned MinVLen = ST->getRealMinVLen();
311bdd1243dSDimitry Andric         MinVLen >= RISCV::RVVBitsPerBlock)
312bdd1243dSDimitry Andric       return MinVLen / RISCV::RVVBitsPerBlock;
31381ad6265SDimitry Andric   return BaseT::getVScaleForTuning();
31481ad6265SDimitry Andric }
31581ad6265SDimitry Andric 
31604eeddc0SDimitry Andric TypeSize
getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const31704eeddc0SDimitry Andric RISCVTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
31806c3fb27SDimitry Andric   unsigned LMUL =
31906c3fb27SDimitry Andric       llvm::bit_floor(std::clamp<unsigned>(RVVRegisterWidthLMUL, 1, 8));
32004eeddc0SDimitry Andric   switch (K) {
32104eeddc0SDimitry Andric   case TargetTransformInfo::RGK_Scalar:
32204eeddc0SDimitry Andric     return TypeSize::getFixed(ST->getXLen());
32304eeddc0SDimitry Andric   case TargetTransformInfo::RGK_FixedWidthVector:
32404eeddc0SDimitry Andric     return TypeSize::getFixed(
32581ad6265SDimitry Andric         ST->useRVVForFixedLengthVectors() ? LMUL * ST->getRealMinVLen() : 0);
32604eeddc0SDimitry Andric   case TargetTransformInfo::RGK_ScalableVector:
32704eeddc0SDimitry Andric     return TypeSize::getScalable(
328bdd1243dSDimitry Andric         (ST->hasVInstructions() &&
329bdd1243dSDimitry Andric          ST->getRealMinVLen() >= RISCV::RVVBitsPerBlock)
330bdd1243dSDimitry Andric             ? LMUL * RISCV::RVVBitsPerBlock
331bdd1243dSDimitry Andric             : 0);
33204eeddc0SDimitry Andric   }
33304eeddc0SDimitry Andric 
33404eeddc0SDimitry Andric   llvm_unreachable("Unsupported register kind");
33504eeddc0SDimitry Andric }
33604eeddc0SDimitry Andric 
33706c3fb27SDimitry Andric InstructionCost
getConstantPoolLoadCost(Type * Ty,TTI::TargetCostKind CostKind)33806c3fb27SDimitry Andric RISCVTTIImpl::getConstantPoolLoadCost(Type *Ty,  TTI::TargetCostKind CostKind) {
33906c3fb27SDimitry Andric   // Add a cost of address generation + the cost of the load. The address
34006c3fb27SDimitry Andric   // is expected to be a PC relative offset to a constant pool entry
34106c3fb27SDimitry Andric   // using auipc/addi.
34206c3fb27SDimitry Andric   return 2 + getMemoryOpCost(Instruction::Load, Ty, DL.getABITypeAlign(Ty),
34306c3fb27SDimitry Andric                              /*AddressSpace=*/0, CostKind);
34406c3fb27SDimitry Andric }
34581ad6265SDimitry Andric 
getVRGatherIndexType(MVT DataVT,const RISCVSubtarget & ST,LLVMContext & C)34606c3fb27SDimitry Andric static VectorType *getVRGatherIndexType(MVT DataVT, const RISCVSubtarget &ST,
34706c3fb27SDimitry Andric                                         LLVMContext &C) {
34806c3fb27SDimitry Andric   assert((DataVT.getScalarSizeInBits() != 8 ||
34906c3fb27SDimitry Andric           DataVT.getVectorNumElements() <= 256) && "unhandled case in lowering");
35006c3fb27SDimitry Andric   MVT IndexVT = DataVT.changeTypeToInteger();
35106c3fb27SDimitry Andric   if (IndexVT.getScalarType().bitsGT(ST.getXLenVT()))
35206c3fb27SDimitry Andric     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
35306c3fb27SDimitry Andric   return cast<VectorType>(EVT(IndexVT).getTypeForEVT(C));
35406c3fb27SDimitry Andric }
35506c3fb27SDimitry Andric 
getShuffleCost(TTI::ShuffleKind Kind,VectorType * Tp,ArrayRef<int> Mask,TTI::TargetCostKind CostKind,int Index,VectorType * SubTp,ArrayRef<const Value * > Args,const Instruction * CxtI)35681ad6265SDimitry Andric InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
35781ad6265SDimitry Andric                                              VectorType *Tp, ArrayRef<int> Mask,
358bdd1243dSDimitry Andric                                              TTI::TargetCostKind CostKind,
35981ad6265SDimitry Andric                                              int Index, VectorType *SubTp,
360*0fca6ea1SDimitry Andric                                              ArrayRef<const Value *> Args,
361*0fca6ea1SDimitry Andric                                              const Instruction *CxtI) {
3625f757f3fSDimitry Andric   Kind = improveShuffleKindFromMask(Kind, Mask, Tp, Index, SubTp);
36306c3fb27SDimitry Andric 
364bdd1243dSDimitry Andric   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
36506c3fb27SDimitry Andric 
36606c3fb27SDimitry Andric   // First, handle cases where having a fixed length vector enables us to
36706c3fb27SDimitry Andric   // give a more accurate cost than falling back to generic scalable codegen.
36806c3fb27SDimitry Andric   // TODO: Each of these cases hints at a modeling gap around scalable vectors.
36906c3fb27SDimitry Andric   if (isa<FixedVectorType>(Tp)) {
37006c3fb27SDimitry Andric     switch (Kind) {
37106c3fb27SDimitry Andric     default:
37206c3fb27SDimitry Andric       break;
37306c3fb27SDimitry Andric     case TTI::SK_PermuteSingleSrc: {
37406c3fb27SDimitry Andric       if (Mask.size() >= 2 && LT.second.isFixedLengthVector()) {
37506c3fb27SDimitry Andric         MVT EltTp = LT.second.getVectorElementType();
37606c3fb27SDimitry Andric         // If the size of the element is < ELEN then shuffles of interleaves and
37706c3fb27SDimitry Andric         // deinterleaves of 2 vectors can be lowered into the following
37806c3fb27SDimitry Andric         // sequences
3795f757f3fSDimitry Andric         if (EltTp.getScalarSizeInBits() < ST->getELen()) {
38006c3fb27SDimitry Andric           // Example sequence:
38106c3fb27SDimitry Andric           //   vsetivli     zero, 4, e8, mf4, ta, ma (ignored)
38206c3fb27SDimitry Andric           //   vwaddu.vv    v10, v8, v9
38306c3fb27SDimitry Andric           //   li       a0, -1                   (ignored)
38406c3fb27SDimitry Andric           //   vwmaccu.vx   v10, a0, v9
38506c3fb27SDimitry Andric           if (ShuffleVectorInst::isInterleaveMask(Mask, 2, Mask.size()))
3865f757f3fSDimitry Andric             return 2 * LT.first * TLI->getLMULCost(LT.second);
38706c3fb27SDimitry Andric 
38806c3fb27SDimitry Andric           if (Mask[0] == 0 || Mask[0] == 1) {
38906c3fb27SDimitry Andric             auto DeinterleaveMask = createStrideMask(Mask[0], 2, Mask.size());
39006c3fb27SDimitry Andric             // Example sequence:
39106c3fb27SDimitry Andric             //   vnsrl.wi   v10, v8, 0
39206c3fb27SDimitry Andric             if (equal(DeinterleaveMask, Mask))
3931db9f3b2SDimitry Andric               return LT.first * getRISCVInstructionCost(RISCV::VNSRL_WI,
3941db9f3b2SDimitry Andric                                                         LT.second, CostKind);
39506c3fb27SDimitry Andric           }
39606c3fb27SDimitry Andric         }
3975f757f3fSDimitry Andric       }
39806c3fb27SDimitry Andric       // vrgather + cost of generating the mask constant.
39906c3fb27SDimitry Andric       // We model this for an unknown mask with a single vrgather.
4005f757f3fSDimitry Andric       if (LT.second.isFixedLengthVector() && LT.first == 1 &&
40106c3fb27SDimitry Andric           (LT.second.getScalarSizeInBits() != 8 ||
40206c3fb27SDimitry Andric            LT.second.getVectorNumElements() <= 256)) {
40306c3fb27SDimitry Andric         VectorType *IdxTy = getVRGatherIndexType(LT.second, *ST, Tp->getContext());
40406c3fb27SDimitry Andric         InstructionCost IndexCost = getConstantPoolLoadCost(IdxTy, CostKind);
4051db9f3b2SDimitry Andric         return IndexCost +
4061db9f3b2SDimitry Andric                getRISCVInstructionCost(RISCV::VRGATHER_VV, LT.second, CostKind);
40706c3fb27SDimitry Andric       }
4085f757f3fSDimitry Andric       [[fallthrough]];
40906c3fb27SDimitry Andric     }
41006c3fb27SDimitry Andric     case TTI::SK_Transpose:
41106c3fb27SDimitry Andric     case TTI::SK_PermuteTwoSrc: {
41206c3fb27SDimitry Andric       // 2 x (vrgather + cost of generating the mask constant) + cost of mask
41306c3fb27SDimitry Andric       // register for the second vrgather. We model this for an unknown
41406c3fb27SDimitry Andric       // (shuffle) mask.
4155f757f3fSDimitry Andric       if (LT.second.isFixedLengthVector() && LT.first == 1 &&
41606c3fb27SDimitry Andric           (LT.second.getScalarSizeInBits() != 8 ||
41706c3fb27SDimitry Andric            LT.second.getVectorNumElements() <= 256)) {
41806c3fb27SDimitry Andric         auto &C = Tp->getContext();
41906c3fb27SDimitry Andric         auto EC = Tp->getElementCount();
42006c3fb27SDimitry Andric         VectorType *IdxTy = getVRGatherIndexType(LT.second, *ST, C);
42106c3fb27SDimitry Andric         VectorType *MaskTy = VectorType::get(IntegerType::getInt1Ty(C), EC);
42206c3fb27SDimitry Andric         InstructionCost IndexCost = getConstantPoolLoadCost(IdxTy, CostKind);
42306c3fb27SDimitry Andric         InstructionCost MaskCost = getConstantPoolLoadCost(MaskTy, CostKind);
4241db9f3b2SDimitry Andric         return 2 * IndexCost +
4251db9f3b2SDimitry Andric                getRISCVInstructionCost({RISCV::VRGATHER_VV, RISCV::VRGATHER_VV},
4261db9f3b2SDimitry Andric                                        LT.second, CostKind) +
4271db9f3b2SDimitry Andric                MaskCost;
4285f757f3fSDimitry Andric       }
4295f757f3fSDimitry Andric       [[fallthrough]];
4305f757f3fSDimitry Andric     }
4315f757f3fSDimitry Andric     case TTI::SK_Select: {
4325f757f3fSDimitry Andric       // We are going to permute multiple sources and the result will be in
4335f757f3fSDimitry Andric       // multiple destinations. Providing an accurate cost only for splits where
4345f757f3fSDimitry Andric       // the element type remains the same.
4355f757f3fSDimitry Andric       if (!Mask.empty() && LT.first.isValid() && LT.first != 1 &&
4365f757f3fSDimitry Andric           LT.second.isFixedLengthVector() &&
4375f757f3fSDimitry Andric           LT.second.getVectorElementType().getSizeInBits() ==
4385f757f3fSDimitry Andric               Tp->getElementType()->getPrimitiveSizeInBits() &&
4395f757f3fSDimitry Andric           LT.second.getVectorNumElements() <
4405f757f3fSDimitry Andric               cast<FixedVectorType>(Tp)->getNumElements() &&
4415f757f3fSDimitry Andric           divideCeil(Mask.size(),
4425f757f3fSDimitry Andric                      cast<FixedVectorType>(Tp)->getNumElements()) ==
4435f757f3fSDimitry Andric               static_cast<unsigned>(*LT.first.getValue())) {
4445f757f3fSDimitry Andric         unsigned NumRegs = *LT.first.getValue();
4455f757f3fSDimitry Andric         unsigned VF = cast<FixedVectorType>(Tp)->getNumElements();
4465f757f3fSDimitry Andric         unsigned SubVF = PowerOf2Ceil(VF / NumRegs);
4475f757f3fSDimitry Andric         auto *SubVecTy = FixedVectorType::get(Tp->getElementType(), SubVF);
4485f757f3fSDimitry Andric 
4495f757f3fSDimitry Andric         InstructionCost Cost = 0;
4505f757f3fSDimitry Andric         for (unsigned I = 0; I < NumRegs; ++I) {
4515f757f3fSDimitry Andric           bool IsSingleVector = true;
4525f757f3fSDimitry Andric           SmallVector<int> SubMask(SubVF, PoisonMaskElem);
4535f757f3fSDimitry Andric           transform(Mask.slice(I * SubVF,
4545f757f3fSDimitry Andric                                I == NumRegs - 1 ? Mask.size() % SubVF : SubVF),
4555f757f3fSDimitry Andric                     SubMask.begin(), [&](int I) {
4565f757f3fSDimitry Andric                       bool SingleSubVector = I / VF == 0;
4575f757f3fSDimitry Andric                       IsSingleVector &= SingleSubVector;
4585f757f3fSDimitry Andric                       return (SingleSubVector ? 0 : 1) * SubVF + I % VF;
4595f757f3fSDimitry Andric                     });
4605f757f3fSDimitry Andric           Cost += getShuffleCost(IsSingleVector ? TTI::SK_PermuteSingleSrc
4615f757f3fSDimitry Andric                                                 : TTI::SK_PermuteTwoSrc,
4625f757f3fSDimitry Andric                                  SubVecTy, SubMask, CostKind, 0, nullptr);
4635f757f3fSDimitry Andric           return Cost;
46406c3fb27SDimitry Andric         }
46506c3fb27SDimitry Andric       }
46606c3fb27SDimitry Andric       break;
46706c3fb27SDimitry Andric     }
46806c3fb27SDimitry Andric     }
46906c3fb27SDimitry Andric   };
47006c3fb27SDimitry Andric 
47106c3fb27SDimitry Andric   // Handle scalable vectors (and fixed vectors legalized to scalable vectors).
47281ad6265SDimitry Andric   switch (Kind) {
47381ad6265SDimitry Andric   default:
47481ad6265SDimitry Andric     // Fallthrough to generic handling.
47581ad6265SDimitry Andric     // TODO: Most of these cases will return getInvalid in generic code, and
47681ad6265SDimitry Andric     // must be implemented here.
47781ad6265SDimitry Andric     break;
47806c3fb27SDimitry Andric   case TTI::SK_ExtractSubvector:
479*0fca6ea1SDimitry Andric     // Extract at zero is always a subregister extract
480*0fca6ea1SDimitry Andric     if (Index == 0)
481*0fca6ea1SDimitry Andric       return TTI::TCC_Free;
482*0fca6ea1SDimitry Andric 
483*0fca6ea1SDimitry Andric     // If we're extracting a subvector of at most m1 size at a sub-register
484*0fca6ea1SDimitry Andric     // boundary - which unfortunately we need exact vlen to identify - this is
485*0fca6ea1SDimitry Andric     // a subregister extract at worst and thus won't require a vslidedown.
486*0fca6ea1SDimitry Andric     // TODO: Extend for aligned m2, m4 subvector extracts
487*0fca6ea1SDimitry Andric     // TODO: Extend for misalgined (but contained) extracts
488*0fca6ea1SDimitry Andric     // TODO: Extend for scalable subvector types
489*0fca6ea1SDimitry Andric     if (std::pair<InstructionCost, MVT> SubLT = getTypeLegalizationCost(SubTp);
490*0fca6ea1SDimitry Andric         SubLT.second.isValid() && SubLT.second.isFixedLengthVector()) {
491*0fca6ea1SDimitry Andric       const unsigned MinVLen = ST->getRealMinVLen();
492*0fca6ea1SDimitry Andric       const unsigned MaxVLen = ST->getRealMaxVLen();
493*0fca6ea1SDimitry Andric       if (MinVLen == MaxVLen &&
494*0fca6ea1SDimitry Andric           SubLT.second.getScalarSizeInBits() * Index % MinVLen == 0 &&
495*0fca6ea1SDimitry Andric           SubLT.second.getSizeInBits() <= MinVLen)
496*0fca6ea1SDimitry Andric         return TTI::TCC_Free;
497*0fca6ea1SDimitry Andric     }
498*0fca6ea1SDimitry Andric 
49981ad6265SDimitry Andric     // Example sequence:
50006c3fb27SDimitry Andric     // vsetivli     zero, 4, e8, mf2, tu, ma (ignored)
50106c3fb27SDimitry Andric     // vslidedown.vi  v8, v9, 2
5021db9f3b2SDimitry Andric     return LT.first *
5031db9f3b2SDimitry Andric            getRISCVInstructionCost(RISCV::VSLIDEDOWN_VI, LT.second, CostKind);
50406c3fb27SDimitry Andric   case TTI::SK_InsertSubvector:
50506c3fb27SDimitry Andric     // Example sequence:
50606c3fb27SDimitry Andric     // vsetivli     zero, 4, e8, mf2, tu, ma (ignored)
50706c3fb27SDimitry Andric     // vslideup.vi  v8, v9, 2
5081db9f3b2SDimitry Andric     return LT.first *
5091db9f3b2SDimitry Andric            getRISCVInstructionCost(RISCV::VSLIDEUP_VI, LT.second, CostKind);
51006c3fb27SDimitry Andric   case TTI::SK_Select: {
51106c3fb27SDimitry Andric     // Example sequence:
51206c3fb27SDimitry Andric     // li           a0, 90
51306c3fb27SDimitry Andric     // vsetivli     zero, 8, e8, mf2, ta, ma (ignored)
51406c3fb27SDimitry Andric     // vmv.s.x      v0, a0
51506c3fb27SDimitry Andric     // vmerge.vvm   v8, v9, v8, v0
5167a6dacacSDimitry Andric     // We use 2 for the cost of the mask materialization as this is the true
5177a6dacacSDimitry Andric     // cost for small masks and most shuffles are small.  At worst, this cost
5187a6dacacSDimitry Andric     // should be a very small constant for the constant pool load.  As such,
5197a6dacacSDimitry Andric     // we may bias towards large selects slightly more than truely warranted.
5201db9f3b2SDimitry Andric     return LT.first *
5217a6dacacSDimitry Andric            (1 + getRISCVInstructionCost({RISCV::VMV_S_X, RISCV::VMERGE_VVM},
5221db9f3b2SDimitry Andric                                         LT.second, CostKind));
52381ad6265SDimitry Andric   }
52406c3fb27SDimitry Andric   case TTI::SK_Broadcast: {
525bdd1243dSDimitry Andric     bool HasScalar = (Args.size() > 0) && (Operator::getOpcode(Args[0]) ==
526bdd1243dSDimitry Andric                                            Instruction::InsertElement);
527bdd1243dSDimitry Andric     if (LT.second.getScalarSizeInBits() == 1) {
528bdd1243dSDimitry Andric       if (HasScalar) {
529bdd1243dSDimitry Andric         // Example sequence:
530bdd1243dSDimitry Andric         //   andi a0, a0, 1
531bdd1243dSDimitry Andric         //   vsetivli zero, 2, e8, mf8, ta, ma (ignored)
532bdd1243dSDimitry Andric         //   vmv.v.x v8, a0
533bdd1243dSDimitry Andric         //   vmsne.vi v0, v8, 0
5341db9f3b2SDimitry Andric         return LT.first *
535*0fca6ea1SDimitry Andric                (1 + getRISCVInstructionCost({RISCV::VMV_V_X, RISCV::VMSNE_VI},
5361db9f3b2SDimitry Andric                                             LT.second, CostKind));
537bdd1243dSDimitry Andric       }
538bdd1243dSDimitry Andric       // Example sequence:
539bdd1243dSDimitry Andric       //   vsetivli  zero, 2, e8, mf8, ta, mu (ignored)
540bdd1243dSDimitry Andric       //   vmv.v.i v8, 0
541bdd1243dSDimitry Andric       //   vmerge.vim      v8, v8, 1, v0
542bdd1243dSDimitry Andric       //   vmv.x.s a0, v8
543bdd1243dSDimitry Andric       //   andi    a0, a0, 1
544bdd1243dSDimitry Andric       //   vmv.v.x v8, a0
545bdd1243dSDimitry Andric       //   vmsne.vi  v0, v8, 0
546bdd1243dSDimitry Andric 
5471db9f3b2SDimitry Andric       return LT.first *
548*0fca6ea1SDimitry Andric              (1 + getRISCVInstructionCost({RISCV::VMV_V_I, RISCV::VMERGE_VIM,
5497a6dacacSDimitry Andric                                            RISCV::VMV_X_S, RISCV::VMV_V_X,
5507a6dacacSDimitry Andric                                            RISCV::VMSNE_VI},
5511db9f3b2SDimitry Andric                                           LT.second, CostKind));
552bdd1243dSDimitry Andric     }
553bdd1243dSDimitry Andric 
554bdd1243dSDimitry Andric     if (HasScalar) {
555bdd1243dSDimitry Andric       // Example sequence:
556bdd1243dSDimitry Andric       //   vmv.v.x v8, a0
5571db9f3b2SDimitry Andric       return LT.first *
5581db9f3b2SDimitry Andric              getRISCVInstructionCost(RISCV::VMV_V_X, LT.second, CostKind);
559bdd1243dSDimitry Andric     }
560bdd1243dSDimitry Andric 
561bdd1243dSDimitry Andric     // Example sequence:
562bdd1243dSDimitry Andric     //   vrgather.vi     v9, v8, 0
5631db9f3b2SDimitry Andric     return LT.first *
5641db9f3b2SDimitry Andric            getRISCVInstructionCost(RISCV::VRGATHER_VI, LT.second, CostKind);
565bdd1243dSDimitry Andric   }
5661db9f3b2SDimitry Andric   case TTI::SK_Splice: {
56706c3fb27SDimitry Andric     // vslidedown+vslideup.
56806c3fb27SDimitry Andric     // TODO: Multiplying by LT.first implies this legalizes into multiple copies
56906c3fb27SDimitry Andric     // of similar code, but I think we expand through memory.
5701db9f3b2SDimitry Andric     unsigned Opcodes[2] = {RISCV::VSLIDEDOWN_VX, RISCV::VSLIDEUP_VX};
5711db9f3b2SDimitry Andric     if (Index >= 0 && Index < 32)
5721db9f3b2SDimitry Andric       Opcodes[0] = RISCV::VSLIDEDOWN_VI;
5731db9f3b2SDimitry Andric     else if (Index < 0 && Index > -32)
5741db9f3b2SDimitry Andric       Opcodes[1] = RISCV::VSLIDEUP_VI;
5751db9f3b2SDimitry Andric     return LT.first * getRISCVInstructionCost(Opcodes, LT.second, CostKind);
5761db9f3b2SDimitry Andric   }
57706c3fb27SDimitry Andric   case TTI::SK_Reverse: {
57806c3fb27SDimitry Andric     // TODO: Cases to improve here:
57906c3fb27SDimitry Andric     // * Illegal vector types
58006c3fb27SDimitry Andric     // * i64 on RV32
58106c3fb27SDimitry Andric     // * i1 vector
58206c3fb27SDimitry Andric     // At low LMUL, most of the cost is producing the vrgather index register.
58306c3fb27SDimitry Andric     // At high LMUL, the cost of the vrgather itself will dominate.
58406c3fb27SDimitry Andric     // Example sequence:
58506c3fb27SDimitry Andric     //   csrr a0, vlenb
58606c3fb27SDimitry Andric     //   srli a0, a0, 3
58706c3fb27SDimitry Andric     //   addi a0, a0, -1
58806c3fb27SDimitry Andric     //   vsetvli a1, zero, e8, mf8, ta, mu (ignored)
58906c3fb27SDimitry Andric     //   vid.v v9
59006c3fb27SDimitry Andric     //   vrsub.vx v10, v9, a0
59106c3fb27SDimitry Andric     //   vrgather.vv v9, v8, v10
59206c3fb27SDimitry Andric     InstructionCost LenCost = 3;
59306c3fb27SDimitry Andric     if (LT.second.isFixedLengthVector())
59406c3fb27SDimitry Andric       // vrsub.vi has a 5 bit immediate field, otherwise an li suffices
59506c3fb27SDimitry Andric       LenCost = isInt<5>(LT.second.getVectorNumElements() - 1) ? 0 : 1;
596*0fca6ea1SDimitry Andric     unsigned Opcodes[] = {RISCV::VID_V, RISCV::VRSUB_VX, RISCV::VRGATHER_VV};
597*0fca6ea1SDimitry Andric     if (LT.second.isFixedLengthVector() &&
598*0fca6ea1SDimitry Andric         isInt<5>(LT.second.getVectorNumElements() - 1))
599*0fca6ea1SDimitry Andric       Opcodes[1] = RISCV::VRSUB_VI;
6001db9f3b2SDimitry Andric     InstructionCost GatherCost =
601*0fca6ea1SDimitry Andric         getRISCVInstructionCost(Opcodes, LT.second, CostKind);
60206c3fb27SDimitry Andric     // Mask operation additionally required extend and truncate
60306c3fb27SDimitry Andric     InstructionCost ExtendCost = Tp->getElementType()->isIntegerTy(1) ? 3 : 0;
60406c3fb27SDimitry Andric     return LT.first * (LenCost + GatherCost + ExtendCost);
60506c3fb27SDimitry Andric   }
60606c3fb27SDimitry Andric   }
607bdd1243dSDimitry Andric   return BaseT::getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp);
60881ad6265SDimitry Andric }
60981ad6265SDimitry Andric 
61081ad6265SDimitry Andric InstructionCost
getMaskedMemoryOpCost(unsigned Opcode,Type * Src,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind)61181ad6265SDimitry Andric RISCVTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
61281ad6265SDimitry Andric                                     unsigned AddressSpace,
61381ad6265SDimitry Andric                                     TTI::TargetCostKind CostKind) {
614bdd1243dSDimitry Andric   if (!isLegalMaskedLoadStore(Src, Alignment) ||
615bdd1243dSDimitry Andric       CostKind != TTI::TCK_RecipThroughput)
61681ad6265SDimitry Andric     return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
61781ad6265SDimitry Andric                                         CostKind);
61881ad6265SDimitry Andric 
61981ad6265SDimitry Andric   return getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind);
62081ad6265SDimitry Andric }
62181ad6265SDimitry Andric 
getInterleavedMemoryOpCost(unsigned Opcode,Type * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,bool UseMaskForCond,bool UseMaskForGaps)62206c3fb27SDimitry Andric InstructionCost RISCVTTIImpl::getInterleavedMemoryOpCost(
62306c3fb27SDimitry Andric     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
62406c3fb27SDimitry Andric     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
62506c3fb27SDimitry Andric     bool UseMaskForCond, bool UseMaskForGaps) {
626*0fca6ea1SDimitry Andric   if (isa<ScalableVectorType>(VecTy) && Factor != 2)
62706c3fb27SDimitry Andric     return InstructionCost::getInvalid();
62806c3fb27SDimitry Andric 
62906c3fb27SDimitry Andric   // The interleaved memory access pass will lower interleaved memory ops (i.e
63006c3fb27SDimitry Andric   // a load and store followed by a specific shuffle) to vlseg/vsseg
63106c3fb27SDimitry Andric   // intrinsics. In those cases then we can treat it as if it's just one (legal)
63206c3fb27SDimitry Andric   // memory op
63306c3fb27SDimitry Andric   if (!UseMaskForCond && !UseMaskForGaps &&
63406c3fb27SDimitry Andric       Factor <= TLI->getMaxSupportedInterleaveFactor()) {
635*0fca6ea1SDimitry Andric     auto *VTy = cast<VectorType>(VecTy);
636*0fca6ea1SDimitry Andric     std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(VTy);
63706c3fb27SDimitry Andric     // Need to make sure type has't been scalarized
638*0fca6ea1SDimitry Andric     if (LT.second.isVector()) {
639*0fca6ea1SDimitry Andric       auto *SubVecTy =
640*0fca6ea1SDimitry Andric           VectorType::get(VTy->getElementType(),
641*0fca6ea1SDimitry Andric                           VTy->getElementCount().divideCoefficientBy(Factor));
642*0fca6ea1SDimitry Andric 
643*0fca6ea1SDimitry Andric       if (VTy->getElementCount().isKnownMultipleOf(Factor) &&
644*0fca6ea1SDimitry Andric           TLI->isLegalInterleavedAccessType(SubVecTy, Factor, Alignment,
64506c3fb27SDimitry Andric                                             AddressSpace, DL)) {
646*0fca6ea1SDimitry Andric         // FIXME: We use the memory op cost of the *legalized* type here,
647*0fca6ea1SDimitry Andric         // because it's getMemoryOpCost returns a really expensive cost for
648*0fca6ea1SDimitry Andric         // types like <6 x i8>, which show up when doing interleaves of
649*0fca6ea1SDimitry Andric         // Factor=3 etc. Should the memory op cost of these be cheaper?
650*0fca6ea1SDimitry Andric         auto *LegalVTy = VectorType::get(VTy->getElementType(),
651*0fca6ea1SDimitry Andric                                          LT.second.getVectorElementCount());
65206c3fb27SDimitry Andric         InstructionCost LegalMemCost = getMemoryOpCost(
653*0fca6ea1SDimitry Andric             Opcode, LegalVTy, Alignment, AddressSpace, CostKind);
65406c3fb27SDimitry Andric         return LT.first + LegalMemCost;
65506c3fb27SDimitry Andric       }
65606c3fb27SDimitry Andric     }
65706c3fb27SDimitry Andric   }
65806c3fb27SDimitry Andric 
659*0fca6ea1SDimitry Andric   // TODO: Return the cost of interleaved accesses for scalable vector when
660*0fca6ea1SDimitry Andric   // unable to convert to segment accesses instructions.
661*0fca6ea1SDimitry Andric   if (isa<ScalableVectorType>(VecTy))
662*0fca6ea1SDimitry Andric     return InstructionCost::getInvalid();
663*0fca6ea1SDimitry Andric 
664*0fca6ea1SDimitry Andric   auto *FVTy = cast<FixedVectorType>(VecTy);
665*0fca6ea1SDimitry Andric   InstructionCost MemCost =
666*0fca6ea1SDimitry Andric       getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace, CostKind);
667*0fca6ea1SDimitry Andric   unsigned VF = FVTy->getNumElements() / Factor;
668*0fca6ea1SDimitry Andric 
66906c3fb27SDimitry Andric   // An interleaved load will look like this for Factor=3:
67006c3fb27SDimitry Andric   // %wide.vec = load <12 x i32>, ptr %3, align 4
67106c3fb27SDimitry Andric   // %strided.vec = shufflevector %wide.vec, poison, <4 x i32> <stride mask>
67206c3fb27SDimitry Andric   // %strided.vec1 = shufflevector %wide.vec, poison, <4 x i32> <stride mask>
67306c3fb27SDimitry Andric   // %strided.vec2 = shufflevector %wide.vec, poison, <4 x i32> <stride mask>
67406c3fb27SDimitry Andric   if (Opcode == Instruction::Load) {
67506c3fb27SDimitry Andric     InstructionCost Cost = MemCost;
67606c3fb27SDimitry Andric     for (unsigned Index : Indices) {
67706c3fb27SDimitry Andric       FixedVectorType *SubVecTy =
6785f757f3fSDimitry Andric           FixedVectorType::get(FVTy->getElementType(), VF * Factor);
67906c3fb27SDimitry Andric       auto Mask = createStrideMask(Index, Factor, VF);
68006c3fb27SDimitry Andric       InstructionCost ShuffleCost =
68106c3fb27SDimitry Andric           getShuffleCost(TTI::ShuffleKind::SK_PermuteSingleSrc, SubVecTy, Mask,
68206c3fb27SDimitry Andric                          CostKind, 0, nullptr, {});
68306c3fb27SDimitry Andric       Cost += ShuffleCost;
68406c3fb27SDimitry Andric     }
68506c3fb27SDimitry Andric     return Cost;
68606c3fb27SDimitry Andric   }
68706c3fb27SDimitry Andric 
68806c3fb27SDimitry Andric   // TODO: Model for NF > 2
68906c3fb27SDimitry Andric   // We'll need to enhance getShuffleCost to model shuffles that are just
69006c3fb27SDimitry Andric   // inserts and extracts into subvectors, since they won't have the full cost
69106c3fb27SDimitry Andric   // of a vrgather.
69206c3fb27SDimitry Andric   // An interleaved store for 3 vectors of 4 lanes will look like
69306c3fb27SDimitry Andric   // %11 = shufflevector <4 x i32> %4, <4 x i32> %6, <8 x i32> <0...7>
69406c3fb27SDimitry Andric   // %12 = shufflevector <4 x i32> %9, <4 x i32> poison, <8 x i32> <0...3>
69506c3fb27SDimitry Andric   // %13 = shufflevector <8 x i32> %11, <8 x i32> %12, <12 x i32> <0...11>
69606c3fb27SDimitry Andric   // %interleaved.vec = shufflevector %13, poison, <12 x i32> <interleave mask>
69706c3fb27SDimitry Andric   // store <12 x i32> %interleaved.vec, ptr %10, align 4
69806c3fb27SDimitry Andric   if (Factor != 2)
69906c3fb27SDimitry Andric     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
70006c3fb27SDimitry Andric                                              Alignment, AddressSpace, CostKind,
70106c3fb27SDimitry Andric                                              UseMaskForCond, UseMaskForGaps);
70206c3fb27SDimitry Andric 
70306c3fb27SDimitry Andric   assert(Opcode == Instruction::Store && "Opcode must be a store");
70406c3fb27SDimitry Andric   // For an interleaving store of 2 vectors, we perform one large interleaving
70506c3fb27SDimitry Andric   // shuffle that goes into the wide store
70606c3fb27SDimitry Andric   auto Mask = createInterleaveMask(VF, Factor);
70706c3fb27SDimitry Andric   InstructionCost ShuffleCost =
70806c3fb27SDimitry Andric       getShuffleCost(TTI::ShuffleKind::SK_PermuteSingleSrc, FVTy, Mask,
70906c3fb27SDimitry Andric                      CostKind, 0, nullptr, {});
71006c3fb27SDimitry Andric   return MemCost + ShuffleCost;
71106c3fb27SDimitry Andric }
71206c3fb27SDimitry Andric 
getGatherScatterOpCost(unsigned Opcode,Type * DataTy,const Value * Ptr,bool VariableMask,Align Alignment,TTI::TargetCostKind CostKind,const Instruction * I)713fe6060f1SDimitry Andric InstructionCost RISCVTTIImpl::getGatherScatterOpCost(
714fe6060f1SDimitry Andric     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
715fe6060f1SDimitry Andric     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
716fe6060f1SDimitry Andric   if (CostKind != TTI::TCK_RecipThroughput)
717fe6060f1SDimitry Andric     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
718fe6060f1SDimitry Andric                                          Alignment, CostKind, I);
719fe6060f1SDimitry Andric 
720fe6060f1SDimitry Andric   if ((Opcode == Instruction::Load &&
721fe6060f1SDimitry Andric        !isLegalMaskedGather(DataTy, Align(Alignment))) ||
722fe6060f1SDimitry Andric       (Opcode == Instruction::Store &&
723fe6060f1SDimitry Andric        !isLegalMaskedScatter(DataTy, Align(Alignment))))
724fe6060f1SDimitry Andric     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
725fe6060f1SDimitry Andric                                          Alignment, CostKind, I);
726fe6060f1SDimitry Andric 
72781ad6265SDimitry Andric   // Cost is proportional to the number of memory operations implied.  For
728bdd1243dSDimitry Andric   // scalable vectors, we use an estimate on that number since we don't
72981ad6265SDimitry Andric   // know exactly what VL will be.
73081ad6265SDimitry Andric   auto &VTy = *cast<VectorType>(DataTy);
731bdd1243dSDimitry Andric   InstructionCost MemOpCost =
732bdd1243dSDimitry Andric       getMemoryOpCost(Opcode, VTy.getElementType(), Alignment, 0, CostKind,
733bdd1243dSDimitry Andric                       {TTI::OK_AnyValue, TTI::OP_None}, I);
734bdd1243dSDimitry Andric   unsigned NumLoads = getEstimatedVLFor(&VTy);
735fe6060f1SDimitry Andric   return NumLoads * MemOpCost;
736fe6060f1SDimitry Andric }
7370eae32dcSDimitry Andric 
getStridedMemoryOpCost(unsigned Opcode,Type * DataTy,const Value * Ptr,bool VariableMask,Align Alignment,TTI::TargetCostKind CostKind,const Instruction * I)738*0fca6ea1SDimitry Andric InstructionCost RISCVTTIImpl::getStridedMemoryOpCost(
739*0fca6ea1SDimitry Andric     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
740*0fca6ea1SDimitry Andric     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
741*0fca6ea1SDimitry Andric   if (((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
742*0fca6ea1SDimitry Andric        !isLegalStridedLoadStore(DataTy, Alignment)) ||
743*0fca6ea1SDimitry Andric       (Opcode != Instruction::Load && Opcode != Instruction::Store))
744*0fca6ea1SDimitry Andric     return BaseT::getStridedMemoryOpCost(Opcode, DataTy, Ptr, VariableMask,
745*0fca6ea1SDimitry Andric                                          Alignment, CostKind, I);
746*0fca6ea1SDimitry Andric 
747*0fca6ea1SDimitry Andric   if (CostKind == TTI::TCK_CodeSize)
748*0fca6ea1SDimitry Andric     return TTI::TCC_Basic;
749*0fca6ea1SDimitry Andric 
750*0fca6ea1SDimitry Andric   // Cost is proportional to the number of memory operations implied.  For
751*0fca6ea1SDimitry Andric   // scalable vectors, we use an estimate on that number since we don't
752*0fca6ea1SDimitry Andric   // know exactly what VL will be.
753*0fca6ea1SDimitry Andric   auto &VTy = *cast<VectorType>(DataTy);
754*0fca6ea1SDimitry Andric   InstructionCost MemOpCost =
755*0fca6ea1SDimitry Andric       getMemoryOpCost(Opcode, VTy.getElementType(), Alignment, 0, CostKind,
756*0fca6ea1SDimitry Andric                       {TTI::OK_AnyValue, TTI::OP_None}, I);
757*0fca6ea1SDimitry Andric   unsigned NumLoads = getEstimatedVLFor(&VTy);
758*0fca6ea1SDimitry Andric   return NumLoads * MemOpCost;
759*0fca6ea1SDimitry Andric }
760*0fca6ea1SDimitry Andric 
761bdd1243dSDimitry Andric // Currently, these represent both throughput and codesize costs
762bdd1243dSDimitry Andric // for the respective intrinsics.  The costs in this table are simply
763bdd1243dSDimitry Andric // instruction counts with the following adjustments made:
764bdd1243dSDimitry Andric // * One vsetvli is considered free.
765bdd1243dSDimitry Andric static const CostTblEntry VectorIntrinsicCostTable[]{
7667a6dacacSDimitry Andric     {Intrinsic::floor, MVT::f32, 9},
7677a6dacacSDimitry Andric     {Intrinsic::floor, MVT::f64, 9},
7687a6dacacSDimitry Andric     {Intrinsic::ceil, MVT::f32, 9},
7697a6dacacSDimitry Andric     {Intrinsic::ceil, MVT::f64, 9},
7707a6dacacSDimitry Andric     {Intrinsic::trunc, MVT::f32, 7},
7717a6dacacSDimitry Andric     {Intrinsic::trunc, MVT::f64, 7},
7727a6dacacSDimitry Andric     {Intrinsic::round, MVT::f32, 9},
7737a6dacacSDimitry Andric     {Intrinsic::round, MVT::f64, 9},
7747a6dacacSDimitry Andric     {Intrinsic::roundeven, MVT::f32, 9},
7757a6dacacSDimitry Andric     {Intrinsic::roundeven, MVT::f64, 9},
7767a6dacacSDimitry Andric     {Intrinsic::rint, MVT::f32, 7},
7777a6dacacSDimitry Andric     {Intrinsic::rint, MVT::f64, 7},
7787a6dacacSDimitry Andric     {Intrinsic::lrint, MVT::i32, 1},
7797a6dacacSDimitry Andric     {Intrinsic::lrint, MVT::i64, 1},
7807a6dacacSDimitry Andric     {Intrinsic::llrint, MVT::i64, 1},
7817a6dacacSDimitry Andric     {Intrinsic::nearbyint, MVT::f32, 9},
7827a6dacacSDimitry Andric     {Intrinsic::nearbyint, MVT::f64, 9},
7837a6dacacSDimitry Andric     {Intrinsic::bswap, MVT::i16, 3},
7847a6dacacSDimitry Andric     {Intrinsic::bswap, MVT::i32, 12},
7857a6dacacSDimitry Andric     {Intrinsic::bswap, MVT::i64, 31},
7867a6dacacSDimitry Andric     {Intrinsic::vp_bswap, MVT::i16, 3},
7877a6dacacSDimitry Andric     {Intrinsic::vp_bswap, MVT::i32, 12},
7887a6dacacSDimitry Andric     {Intrinsic::vp_bswap, MVT::i64, 31},
7897a6dacacSDimitry Andric     {Intrinsic::vp_fshl, MVT::i8, 7},
7907a6dacacSDimitry Andric     {Intrinsic::vp_fshl, MVT::i16, 7},
7917a6dacacSDimitry Andric     {Intrinsic::vp_fshl, MVT::i32, 7},
7927a6dacacSDimitry Andric     {Intrinsic::vp_fshl, MVT::i64, 7},
7937a6dacacSDimitry Andric     {Intrinsic::vp_fshr, MVT::i8, 7},
7947a6dacacSDimitry Andric     {Intrinsic::vp_fshr, MVT::i16, 7},
7957a6dacacSDimitry Andric     {Intrinsic::vp_fshr, MVT::i32, 7},
7967a6dacacSDimitry Andric     {Intrinsic::vp_fshr, MVT::i64, 7},
7977a6dacacSDimitry Andric     {Intrinsic::bitreverse, MVT::i8, 17},
7987a6dacacSDimitry Andric     {Intrinsic::bitreverse, MVT::i16, 24},
7997a6dacacSDimitry Andric     {Intrinsic::bitreverse, MVT::i32, 33},
8007a6dacacSDimitry Andric     {Intrinsic::bitreverse, MVT::i64, 52},
8017a6dacacSDimitry Andric     {Intrinsic::vp_bitreverse, MVT::i8, 17},
8027a6dacacSDimitry Andric     {Intrinsic::vp_bitreverse, MVT::i16, 24},
8037a6dacacSDimitry Andric     {Intrinsic::vp_bitreverse, MVT::i32, 33},
8047a6dacacSDimitry Andric     {Intrinsic::vp_bitreverse, MVT::i64, 52},
8057a6dacacSDimitry Andric     {Intrinsic::ctpop, MVT::i8, 12},
8067a6dacacSDimitry Andric     {Intrinsic::ctpop, MVT::i16, 19},
8077a6dacacSDimitry Andric     {Intrinsic::ctpop, MVT::i32, 20},
8087a6dacacSDimitry Andric     {Intrinsic::ctpop, MVT::i64, 21},
8097a6dacacSDimitry Andric     {Intrinsic::vp_ctpop, MVT::i8, 12},
8107a6dacacSDimitry Andric     {Intrinsic::vp_ctpop, MVT::i16, 19},
8117a6dacacSDimitry Andric     {Intrinsic::vp_ctpop, MVT::i32, 20},
8127a6dacacSDimitry Andric     {Intrinsic::vp_ctpop, MVT::i64, 21},
8137a6dacacSDimitry Andric     {Intrinsic::vp_ctlz, MVT::i8, 19},
8147a6dacacSDimitry Andric     {Intrinsic::vp_ctlz, MVT::i16, 28},
8157a6dacacSDimitry Andric     {Intrinsic::vp_ctlz, MVT::i32, 31},
8167a6dacacSDimitry Andric     {Intrinsic::vp_ctlz, MVT::i64, 35},
8177a6dacacSDimitry Andric     {Intrinsic::vp_cttz, MVT::i8, 16},
8187a6dacacSDimitry Andric     {Intrinsic::vp_cttz, MVT::i16, 23},
8197a6dacacSDimitry Andric     {Intrinsic::vp_cttz, MVT::i32, 24},
8207a6dacacSDimitry Andric     {Intrinsic::vp_cttz, MVT::i64, 25},
821bdd1243dSDimitry Andric };
822bdd1243dSDimitry Andric 
getISDForVPIntrinsicID(Intrinsic::ID ID)823bdd1243dSDimitry Andric static unsigned getISDForVPIntrinsicID(Intrinsic::ID ID) {
824bdd1243dSDimitry Andric   switch (ID) {
825bdd1243dSDimitry Andric #define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD)                                    \
826bdd1243dSDimitry Andric   case Intrinsic::VPID:                                                        \
827bdd1243dSDimitry Andric     return ISD::VPSD;
828bdd1243dSDimitry Andric #include "llvm/IR/VPIntrinsics.def"
829bdd1243dSDimitry Andric #undef HELPER_MAP_VPID_TO_VPSD
830bdd1243dSDimitry Andric   }
831bdd1243dSDimitry Andric   return ISD::DELETED_NODE;
832bdd1243dSDimitry Andric }
833bdd1243dSDimitry Andric 
83481ad6265SDimitry Andric InstructionCost
getIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind)83581ad6265SDimitry Andric RISCVTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
83681ad6265SDimitry Andric                                     TTI::TargetCostKind CostKind) {
83781ad6265SDimitry Andric   auto *RetTy = ICA.getReturnType();
83881ad6265SDimitry Andric   switch (ICA.getID()) {
839bdd1243dSDimitry Andric   case Intrinsic::ceil:
840bdd1243dSDimitry Andric   case Intrinsic::floor:
841bdd1243dSDimitry Andric   case Intrinsic::trunc:
842bdd1243dSDimitry Andric   case Intrinsic::rint:
8435f757f3fSDimitry Andric   case Intrinsic::lrint:
8445f757f3fSDimitry Andric   case Intrinsic::llrint:
845bdd1243dSDimitry Andric   case Intrinsic::round:
846bdd1243dSDimitry Andric   case Intrinsic::roundeven: {
847bdd1243dSDimitry Andric     // These all use the same code.
848bdd1243dSDimitry Andric     auto LT = getTypeLegalizationCost(RetTy);
849bdd1243dSDimitry Andric     if (!LT.second.isVector() && TLI->isOperationCustom(ISD::FCEIL, LT.second))
850bdd1243dSDimitry Andric       return LT.first * 8;
851bdd1243dSDimitry Andric     break;
852bdd1243dSDimitry Andric   }
853bdd1243dSDimitry Andric   case Intrinsic::umin:
854bdd1243dSDimitry Andric   case Intrinsic::umax:
855bdd1243dSDimitry Andric   case Intrinsic::smin:
856bdd1243dSDimitry Andric   case Intrinsic::smax: {
857bdd1243dSDimitry Andric     auto LT = getTypeLegalizationCost(RetTy);
858*0fca6ea1SDimitry Andric     if (LT.second.isScalarInteger() && ST->hasStdExtZbb())
859bdd1243dSDimitry Andric       return LT.first;
860*0fca6ea1SDimitry Andric 
861*0fca6ea1SDimitry Andric     if (ST->hasVInstructions() && LT.second.isVector()) {
862*0fca6ea1SDimitry Andric       unsigned Op;
863*0fca6ea1SDimitry Andric       switch (ICA.getID()) {
864*0fca6ea1SDimitry Andric       case Intrinsic::umin:
865*0fca6ea1SDimitry Andric         Op = RISCV::VMINU_VV;
866*0fca6ea1SDimitry Andric         break;
867*0fca6ea1SDimitry Andric       case Intrinsic::umax:
868*0fca6ea1SDimitry Andric         Op = RISCV::VMAXU_VV;
869*0fca6ea1SDimitry Andric         break;
870*0fca6ea1SDimitry Andric       case Intrinsic::smin:
871*0fca6ea1SDimitry Andric         Op = RISCV::VMIN_VV;
872*0fca6ea1SDimitry Andric         break;
873*0fca6ea1SDimitry Andric       case Intrinsic::smax:
874*0fca6ea1SDimitry Andric         Op = RISCV::VMAX_VV;
875*0fca6ea1SDimitry Andric         break;
876*0fca6ea1SDimitry Andric       }
877*0fca6ea1SDimitry Andric       return LT.first * getRISCVInstructionCost(Op, LT.second, CostKind);
878*0fca6ea1SDimitry Andric     }
879bdd1243dSDimitry Andric     break;
880bdd1243dSDimitry Andric   }
881bdd1243dSDimitry Andric   case Intrinsic::sadd_sat:
882bdd1243dSDimitry Andric   case Intrinsic::ssub_sat:
883bdd1243dSDimitry Andric   case Intrinsic::uadd_sat:
88406c3fb27SDimitry Andric   case Intrinsic::usub_sat:
88506c3fb27SDimitry Andric   case Intrinsic::fabs:
88606c3fb27SDimitry Andric   case Intrinsic::sqrt: {
887bdd1243dSDimitry Andric     auto LT = getTypeLegalizationCost(RetTy);
888bdd1243dSDimitry Andric     if (ST->hasVInstructions() && LT.second.isVector())
889bdd1243dSDimitry Andric       return LT.first;
890bdd1243dSDimitry Andric     break;
891bdd1243dSDimitry Andric   }
8925f757f3fSDimitry Andric   case Intrinsic::ctpop: {
8935f757f3fSDimitry Andric     auto LT = getTypeLegalizationCost(RetTy);
8945f757f3fSDimitry Andric     if (ST->hasVInstructions() && ST->hasStdExtZvbb() && LT.second.isVector())
8955f757f3fSDimitry Andric       return LT.first;
8965f757f3fSDimitry Andric     break;
8975f757f3fSDimitry Andric   }
898bdd1243dSDimitry Andric   case Intrinsic::abs: {
899bdd1243dSDimitry Andric     auto LT = getTypeLegalizationCost(RetTy);
900bdd1243dSDimitry Andric     if (ST->hasVInstructions() && LT.second.isVector()) {
901bdd1243dSDimitry Andric       // vrsub.vi v10, v8, 0
902bdd1243dSDimitry Andric       // vmax.vv v8, v8, v10
903bdd1243dSDimitry Andric       return LT.first * 2;
904bdd1243dSDimitry Andric     }
905bdd1243dSDimitry Andric     break;
906bdd1243dSDimitry Andric   }
907*0fca6ea1SDimitry Andric   case Intrinsic::get_active_lane_mask: {
908*0fca6ea1SDimitry Andric     if (ST->hasVInstructions()) {
909*0fca6ea1SDimitry Andric       Type *ExpRetTy = VectorType::get(
910*0fca6ea1SDimitry Andric           ICA.getArgTypes()[0], cast<VectorType>(RetTy)->getElementCount());
911*0fca6ea1SDimitry Andric       auto LT = getTypeLegalizationCost(ExpRetTy);
912*0fca6ea1SDimitry Andric 
913*0fca6ea1SDimitry Andric       // vid.v   v8  // considered hoisted
914*0fca6ea1SDimitry Andric       // vsaddu.vx   v8, v8, a0
915*0fca6ea1SDimitry Andric       // vmsltu.vx   v0, v8, a1
916*0fca6ea1SDimitry Andric       return LT.first *
917*0fca6ea1SDimitry Andric              getRISCVInstructionCost({RISCV::VSADDU_VX, RISCV::VMSLTU_VX},
918*0fca6ea1SDimitry Andric                                      LT.second, CostKind);
919*0fca6ea1SDimitry Andric     }
920*0fca6ea1SDimitry Andric     break;
921*0fca6ea1SDimitry Andric   }
92281ad6265SDimitry Andric   // TODO: add more intrinsic
92381ad6265SDimitry Andric   case Intrinsic::experimental_stepvector: {
924bdd1243dSDimitry Andric     auto LT = getTypeLegalizationCost(RetTy);
925*0fca6ea1SDimitry Andric     // Legalisation of illegal types involves an `index' instruction plus
926*0fca6ea1SDimitry Andric     // (LT.first - 1) vector adds.
927*0fca6ea1SDimitry Andric     if (ST->hasVInstructions())
928*0fca6ea1SDimitry Andric       return getRISCVInstructionCost(RISCV::VID_V, LT.second, CostKind) +
929*0fca6ea1SDimitry Andric              (LT.first - 1) *
930*0fca6ea1SDimitry Andric                  getRISCVInstructionCost(RISCV::VADD_VX, LT.second, CostKind);
931*0fca6ea1SDimitry Andric     return 1 + (LT.first - 1);
932*0fca6ea1SDimitry Andric   }
933*0fca6ea1SDimitry Andric   case Intrinsic::experimental_cttz_elts: {
934*0fca6ea1SDimitry Andric     Type *ArgTy = ICA.getArgTypes()[0];
935*0fca6ea1SDimitry Andric     EVT ArgType = TLI->getValueType(DL, ArgTy, true);
936*0fca6ea1SDimitry Andric     if (getTLI()->shouldExpandCttzElements(ArgType))
937*0fca6ea1SDimitry Andric       break;
938*0fca6ea1SDimitry Andric     InstructionCost Cost = getRISCVInstructionCost(
939*0fca6ea1SDimitry Andric         RISCV::VFIRST_M, getTypeLegalizationCost(ArgTy).second, CostKind);
940*0fca6ea1SDimitry Andric 
941*0fca6ea1SDimitry Andric     // If zero_is_poison is false, then we will generate additional
942*0fca6ea1SDimitry Andric     // cmp + select instructions to convert -1 to EVL.
943*0fca6ea1SDimitry Andric     Type *BoolTy = Type::getInt1Ty(RetTy->getContext());
944*0fca6ea1SDimitry Andric     if (ICA.getArgs().size() > 1 &&
945*0fca6ea1SDimitry Andric         cast<ConstantInt>(ICA.getArgs()[1])->isZero())
946*0fca6ea1SDimitry Andric       Cost += getCmpSelInstrCost(Instruction::ICmp, BoolTy, RetTy,
947*0fca6ea1SDimitry Andric                                  CmpInst::ICMP_SLT, CostKind) +
948*0fca6ea1SDimitry Andric               getCmpSelInstrCost(Instruction::Select, RetTy, BoolTy,
949*0fca6ea1SDimitry Andric                                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
950*0fca6ea1SDimitry Andric 
951*0fca6ea1SDimitry Andric     return Cost;
95281ad6265SDimitry Andric   }
953bdd1243dSDimitry Andric   case Intrinsic::vp_rint: {
954bdd1243dSDimitry Andric     // RISC-V target uses at least 5 instructions to lower rounding intrinsics.
955bdd1243dSDimitry Andric     unsigned Cost = 5;
956bdd1243dSDimitry Andric     auto LT = getTypeLegalizationCost(RetTy);
957bdd1243dSDimitry Andric     if (TLI->isOperationCustom(ISD::VP_FRINT, LT.second))
958bdd1243dSDimitry Andric       return Cost * LT.first;
95981ad6265SDimitry Andric     break;
96081ad6265SDimitry Andric   }
961bdd1243dSDimitry Andric   case Intrinsic::vp_nearbyint: {
962bdd1243dSDimitry Andric     // More one read and one write for fflags than vp_rint.
963bdd1243dSDimitry Andric     unsigned Cost = 7;
964bdd1243dSDimitry Andric     auto LT = getTypeLegalizationCost(RetTy);
965bdd1243dSDimitry Andric     if (TLI->isOperationCustom(ISD::VP_FRINT, LT.second))
966bdd1243dSDimitry Andric       return Cost * LT.first;
967bdd1243dSDimitry Andric     break;
968bdd1243dSDimitry Andric   }
969bdd1243dSDimitry Andric   case Intrinsic::vp_ceil:
970bdd1243dSDimitry Andric   case Intrinsic::vp_floor:
971bdd1243dSDimitry Andric   case Intrinsic::vp_round:
972bdd1243dSDimitry Andric   case Intrinsic::vp_roundeven:
973bdd1243dSDimitry Andric   case Intrinsic::vp_roundtozero: {
974bdd1243dSDimitry Andric     // Rounding with static rounding mode needs two more instructions to
975bdd1243dSDimitry Andric     // swap/write FRM than vp_rint.
976bdd1243dSDimitry Andric     unsigned Cost = 7;
977bdd1243dSDimitry Andric     auto LT = getTypeLegalizationCost(RetTy);
978bdd1243dSDimitry Andric     unsigned VPISD = getISDForVPIntrinsicID(ICA.getID());
979bdd1243dSDimitry Andric     if (TLI->isOperationCustom(VPISD, LT.second))
980bdd1243dSDimitry Andric       return Cost * LT.first;
981bdd1243dSDimitry Andric     break;
982bdd1243dSDimitry Andric   }
983*0fca6ea1SDimitry Andric   // vp integer arithmetic ops.
984*0fca6ea1SDimitry Andric   case Intrinsic::vp_add:
985*0fca6ea1SDimitry Andric   case Intrinsic::vp_and:
986*0fca6ea1SDimitry Andric   case Intrinsic::vp_ashr:
987*0fca6ea1SDimitry Andric   case Intrinsic::vp_lshr:
988*0fca6ea1SDimitry Andric   case Intrinsic::vp_mul:
989*0fca6ea1SDimitry Andric   case Intrinsic::vp_or:
990*0fca6ea1SDimitry Andric   case Intrinsic::vp_sdiv:
991*0fca6ea1SDimitry Andric   case Intrinsic::vp_shl:
992*0fca6ea1SDimitry Andric   case Intrinsic::vp_srem:
993*0fca6ea1SDimitry Andric   case Intrinsic::vp_sub:
994*0fca6ea1SDimitry Andric   case Intrinsic::vp_udiv:
995*0fca6ea1SDimitry Andric   case Intrinsic::vp_urem:
996*0fca6ea1SDimitry Andric   case Intrinsic::vp_xor:
997*0fca6ea1SDimitry Andric   // vp float arithmetic ops.
998*0fca6ea1SDimitry Andric   case Intrinsic::vp_fadd:
999*0fca6ea1SDimitry Andric   case Intrinsic::vp_fsub:
1000*0fca6ea1SDimitry Andric   case Intrinsic::vp_fmul:
1001*0fca6ea1SDimitry Andric   case Intrinsic::vp_fdiv:
1002*0fca6ea1SDimitry Andric   case Intrinsic::vp_frem: {
1003*0fca6ea1SDimitry Andric     std::optional<unsigned> FOp =
1004*0fca6ea1SDimitry Andric         VPIntrinsic::getFunctionalOpcodeForVP(ICA.getID());
1005*0fca6ea1SDimitry Andric     if (FOp)
1006*0fca6ea1SDimitry Andric       return getArithmeticInstrCost(*FOp, ICA.getReturnType(), CostKind);
1007*0fca6ea1SDimitry Andric     break;
1008*0fca6ea1SDimitry Andric   }
1009bdd1243dSDimitry Andric   }
1010bdd1243dSDimitry Andric 
1011bdd1243dSDimitry Andric   if (ST->hasVInstructions() && RetTy->isVectorTy()) {
10127a6dacacSDimitry Andric     if (auto LT = getTypeLegalizationCost(RetTy);
10137a6dacacSDimitry Andric         LT.second.isVector()) {
10147a6dacacSDimitry Andric       MVT EltTy = LT.second.getVectorElementType();
1015bdd1243dSDimitry Andric       if (const auto *Entry = CostTableLookup(VectorIntrinsicCostTable,
10167a6dacacSDimitry Andric                                               ICA.getID(), EltTy))
1017bdd1243dSDimitry Andric         return LT.first * Entry->Cost;
1018bdd1243dSDimitry Andric     }
10197a6dacacSDimitry Andric   }
1020bdd1243dSDimitry Andric 
102181ad6265SDimitry Andric   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
102281ad6265SDimitry Andric }
102381ad6265SDimitry Andric 
getCastInstrCost(unsigned Opcode,Type * Dst,Type * Src,TTI::CastContextHint CCH,TTI::TargetCostKind CostKind,const Instruction * I)102481ad6265SDimitry Andric InstructionCost RISCVTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
102581ad6265SDimitry Andric                                                Type *Src,
102681ad6265SDimitry Andric                                                TTI::CastContextHint CCH,
102781ad6265SDimitry Andric                                                TTI::TargetCostKind CostKind,
102881ad6265SDimitry Andric                                                const Instruction *I) {
1029*0fca6ea1SDimitry Andric   bool IsVectorType = isa<VectorType>(Dst) && isa<VectorType>(Src);
1030*0fca6ea1SDimitry Andric   if (!IsVectorType)
103181ad6265SDimitry Andric     return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
103281ad6265SDimitry Andric 
1033*0fca6ea1SDimitry Andric   bool IsTypeLegal = isTypeLegal(Src) && isTypeLegal(Dst) &&
1034*0fca6ea1SDimitry Andric                      (Src->getScalarSizeInBits() <= ST->getELen()) &&
1035*0fca6ea1SDimitry Andric                      (Dst->getScalarSizeInBits() <= ST->getELen());
1036*0fca6ea1SDimitry Andric 
1037*0fca6ea1SDimitry Andric   // FIXME: Need to compute legalizing cost for illegal types.
1038*0fca6ea1SDimitry Andric   if (!IsTypeLegal)
103981ad6265SDimitry Andric     return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
104081ad6265SDimitry Andric 
1041*0fca6ea1SDimitry Andric   std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Src);
1042*0fca6ea1SDimitry Andric   std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(Dst);
1043*0fca6ea1SDimitry Andric 
104481ad6265SDimitry Andric   int ISD = TLI->InstructionOpcodeToISD(Opcode);
104581ad6265SDimitry Andric   assert(ISD && "Invalid opcode");
104681ad6265SDimitry Andric 
104781ad6265SDimitry Andric   int PowDiff = (int)Log2_32(Dst->getScalarSizeInBits()) -
104881ad6265SDimitry Andric                 (int)Log2_32(Src->getScalarSizeInBits());
104981ad6265SDimitry Andric   switch (ISD) {
105081ad6265SDimitry Andric   case ISD::SIGN_EXTEND:
1051*0fca6ea1SDimitry Andric   case ISD::ZERO_EXTEND: {
1052*0fca6ea1SDimitry Andric     const unsigned SrcEltSize = Src->getScalarSizeInBits();
1053*0fca6ea1SDimitry Andric     if (SrcEltSize == 1) {
1054bdd1243dSDimitry Andric       // We do not use vsext/vzext to extend from mask vector.
1055bdd1243dSDimitry Andric       // Instead we use the following instructions to extend from mask vector:
1056bdd1243dSDimitry Andric       // vmv.v.i v8, 0
1057bdd1243dSDimitry Andric       // vmerge.vim v8, v8, -1, v0
1058*0fca6ea1SDimitry Andric       return getRISCVInstructionCost({RISCV::VMV_V_I, RISCV::VMERGE_VIM},
1059*0fca6ea1SDimitry Andric                                      DstLT.second, CostKind);
1060bdd1243dSDimitry Andric     }
1061*0fca6ea1SDimitry Andric     if ((PowDiff < 1) || (PowDiff > 3))
1062*0fca6ea1SDimitry Andric       return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
1063*0fca6ea1SDimitry Andric     unsigned SExtOp[] = {RISCV::VSEXT_VF2, RISCV::VSEXT_VF4, RISCV::VSEXT_VF8};
1064*0fca6ea1SDimitry Andric     unsigned ZExtOp[] = {RISCV::VZEXT_VF2, RISCV::VZEXT_VF4, RISCV::VZEXT_VF8};
1065*0fca6ea1SDimitry Andric     unsigned Op =
1066*0fca6ea1SDimitry Andric         (ISD == ISD::SIGN_EXTEND) ? SExtOp[PowDiff - 1] : ZExtOp[PowDiff - 1];
1067*0fca6ea1SDimitry Andric     return getRISCVInstructionCost(Op, DstLT.second, CostKind);
1068*0fca6ea1SDimitry Andric   }
106981ad6265SDimitry Andric   case ISD::TRUNCATE:
1070bdd1243dSDimitry Andric     if (Dst->getScalarSizeInBits() == 1) {
1071bdd1243dSDimitry Andric       // We do not use several vncvt to truncate to mask vector. So we could
1072bdd1243dSDimitry Andric       // not use PowDiff to calculate it.
1073bdd1243dSDimitry Andric       // Instead we use the following instructions to truncate to mask vector:
1074bdd1243dSDimitry Andric       // vand.vi v8, v8, 1
1075bdd1243dSDimitry Andric       // vmsne.vi v0, v8, 0
1076*0fca6ea1SDimitry Andric       return getRISCVInstructionCost({RISCV::VAND_VI, RISCV::VMSNE_VI},
1077*0fca6ea1SDimitry Andric                                      SrcLT.second, CostKind);
1078bdd1243dSDimitry Andric     }
1079bdd1243dSDimitry Andric     [[fallthrough]];
108081ad6265SDimitry Andric   case ISD::FP_EXTEND:
1081*0fca6ea1SDimitry Andric   case ISD::FP_ROUND: {
108281ad6265SDimitry Andric     // Counts of narrow/widen instructions.
1083*0fca6ea1SDimitry Andric     unsigned SrcEltSize = Src->getScalarSizeInBits();
1084*0fca6ea1SDimitry Andric     unsigned DstEltSize = Dst->getScalarSizeInBits();
1085*0fca6ea1SDimitry Andric 
1086*0fca6ea1SDimitry Andric     unsigned Op = (ISD == ISD::TRUNCATE)    ? RISCV::VNSRL_WI
1087*0fca6ea1SDimitry Andric                   : (ISD == ISD::FP_EXTEND) ? RISCV::VFWCVT_F_F_V
1088*0fca6ea1SDimitry Andric                                             : RISCV::VFNCVT_F_F_W;
1089*0fca6ea1SDimitry Andric     InstructionCost Cost = 0;
1090*0fca6ea1SDimitry Andric     for (; SrcEltSize != DstEltSize;) {
1091*0fca6ea1SDimitry Andric       MVT ElementMVT = (ISD == ISD::TRUNCATE)
1092*0fca6ea1SDimitry Andric                            ? MVT::getIntegerVT(DstEltSize)
1093*0fca6ea1SDimitry Andric                            : MVT::getFloatingPointVT(DstEltSize);
1094*0fca6ea1SDimitry Andric       MVT DstMVT = DstLT.second.changeVectorElementType(ElementMVT);
1095*0fca6ea1SDimitry Andric       DstEltSize =
1096*0fca6ea1SDimitry Andric           (DstEltSize > SrcEltSize) ? DstEltSize >> 1 : DstEltSize << 1;
1097*0fca6ea1SDimitry Andric       Cost += getRISCVInstructionCost(Op, DstMVT, CostKind);
1098*0fca6ea1SDimitry Andric     }
1099*0fca6ea1SDimitry Andric     return Cost;
1100*0fca6ea1SDimitry Andric   }
110181ad6265SDimitry Andric   case ISD::FP_TO_SINT:
110281ad6265SDimitry Andric   case ISD::FP_TO_UINT:
110381ad6265SDimitry Andric   case ISD::SINT_TO_FP:
110481ad6265SDimitry Andric   case ISD::UINT_TO_FP:
1105bdd1243dSDimitry Andric     if (Src->getScalarSizeInBits() == 1 || Dst->getScalarSizeInBits() == 1) {
1106bdd1243dSDimitry Andric       // The cost of convert from or to mask vector is different from other
1107bdd1243dSDimitry Andric       // cases. We could not use PowDiff to calculate it.
1108bdd1243dSDimitry Andric       // For mask vector to fp, we should use the following instructions:
1109bdd1243dSDimitry Andric       // vmv.v.i v8, 0
1110bdd1243dSDimitry Andric       // vmerge.vim v8, v8, -1, v0
1111bdd1243dSDimitry Andric       // vfcvt.f.x.v v8, v8
1112bdd1243dSDimitry Andric 
1113bdd1243dSDimitry Andric       // And for fp vector to mask, we use:
1114bdd1243dSDimitry Andric       // vfncvt.rtz.x.f.w v9, v8
1115bdd1243dSDimitry Andric       // vand.vi v8, v9, 1
1116bdd1243dSDimitry Andric       // vmsne.vi v0, v8, 0
1117bdd1243dSDimitry Andric       return 3;
1118bdd1243dSDimitry Andric     }
111981ad6265SDimitry Andric     if (std::abs(PowDiff) <= 1)
112081ad6265SDimitry Andric       return 1;
112181ad6265SDimitry Andric     // Backend could lower (v[sz]ext i8 to double) to vfcvt(v[sz]ext.f8 i8),
112281ad6265SDimitry Andric     // so it only need two conversion.
112381ad6265SDimitry Andric     if (Src->isIntOrIntVectorTy())
112481ad6265SDimitry Andric       return 2;
112581ad6265SDimitry Andric     // Counts of narrow/widen instructions.
112681ad6265SDimitry Andric     return std::abs(PowDiff);
112781ad6265SDimitry Andric   }
112881ad6265SDimitry Andric   return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
112981ad6265SDimitry Andric }
113081ad6265SDimitry Andric 
getEstimatedVLFor(VectorType * Ty)1131bdd1243dSDimitry Andric unsigned RISCVTTIImpl::getEstimatedVLFor(VectorType *Ty) {
113281ad6265SDimitry Andric   if (isa<ScalableVectorType>(Ty)) {
113381ad6265SDimitry Andric     const unsigned EltSize = DL.getTypeSizeInBits(Ty->getElementType());
113481ad6265SDimitry Andric     const unsigned MinSize = DL.getTypeSizeInBits(Ty).getKnownMinValue();
1135bdd1243dSDimitry Andric     const unsigned VectorBits = *getVScaleForTuning() * RISCV::RVVBitsPerBlock;
1136bdd1243dSDimitry Andric     return RISCVTargetLowering::computeVLMAX(VectorBits, EltSize, MinSize);
113781ad6265SDimitry Andric   }
113881ad6265SDimitry Andric   return cast<FixedVectorType>(Ty)->getNumElements();
113981ad6265SDimitry Andric }
114081ad6265SDimitry Andric 
114181ad6265SDimitry Andric InstructionCost
getMinMaxReductionCost(Intrinsic::ID IID,VectorType * Ty,FastMathFlags FMF,TTI::TargetCostKind CostKind)114206c3fb27SDimitry Andric RISCVTTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty,
114306c3fb27SDimitry Andric                                      FastMathFlags FMF,
114481ad6265SDimitry Andric                                      TTI::TargetCostKind CostKind) {
114581ad6265SDimitry Andric   if (isa<FixedVectorType>(Ty) && !ST->useRVVForFixedLengthVectors())
114606c3fb27SDimitry Andric     return BaseT::getMinMaxReductionCost(IID, Ty, FMF, CostKind);
114781ad6265SDimitry Andric 
114881ad6265SDimitry Andric   // Skip if scalar size of Ty is bigger than ELEN.
11495f757f3fSDimitry Andric   if (Ty->getScalarSizeInBits() > ST->getELen())
115006c3fb27SDimitry Andric     return BaseT::getMinMaxReductionCost(IID, Ty, FMF, CostKind);
115181ad6265SDimitry Andric 
1152bdd1243dSDimitry Andric   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
1153*0fca6ea1SDimitry Andric   if (Ty->getElementType()->isIntegerTy(1)) {
1154*0fca6ea1SDimitry Andric     // SelectionDAGBuilder does following transforms:
1155*0fca6ea1SDimitry Andric     //   vector_reduce_{smin,umax}(<n x i1>) --> vector_reduce_or(<n x i1>)
1156*0fca6ea1SDimitry Andric     //   vector_reduce_{smax,umin}(<n x i1>) --> vector_reduce_and(<n x i1>)
1157*0fca6ea1SDimitry Andric     if (IID == Intrinsic::umax || IID == Intrinsic::smin)
1158*0fca6ea1SDimitry Andric       return getArithmeticReductionCost(Instruction::Or, Ty, FMF, CostKind);
1159*0fca6ea1SDimitry Andric     else
1160*0fca6ea1SDimitry Andric       return getArithmeticReductionCost(Instruction::And, Ty, FMF, CostKind);
1161*0fca6ea1SDimitry Andric   }
1162*0fca6ea1SDimitry Andric 
1163*0fca6ea1SDimitry Andric   if (IID == Intrinsic::maximum || IID == Intrinsic::minimum) {
1164*0fca6ea1SDimitry Andric     SmallVector<unsigned, 3> Opcodes;
1165*0fca6ea1SDimitry Andric     InstructionCost ExtraCost = 0;
1166*0fca6ea1SDimitry Andric     switch (IID) {
1167*0fca6ea1SDimitry Andric     case Intrinsic::maximum:
1168*0fca6ea1SDimitry Andric       if (FMF.noNaNs()) {
1169*0fca6ea1SDimitry Andric         Opcodes = {RISCV::VFREDMAX_VS, RISCV::VFMV_F_S};
1170*0fca6ea1SDimitry Andric       } else {
1171*0fca6ea1SDimitry Andric         Opcodes = {RISCV::VMFNE_VV, RISCV::VCPOP_M, RISCV::VFREDMAX_VS,
1172*0fca6ea1SDimitry Andric                    RISCV::VFMV_F_S};
1173*0fca6ea1SDimitry Andric         // Cost of Canonical Nan + branch
1174*0fca6ea1SDimitry Andric         // lui a0, 523264
1175*0fca6ea1SDimitry Andric         // fmv.w.x fa0, a0
1176*0fca6ea1SDimitry Andric         Type *DstTy = Ty->getScalarType();
1177*0fca6ea1SDimitry Andric         const unsigned EltTyBits = DstTy->getScalarSizeInBits();
1178*0fca6ea1SDimitry Andric         Type *SrcTy = IntegerType::getIntNTy(DstTy->getContext(), EltTyBits);
1179*0fca6ea1SDimitry Andric         ExtraCost = 1 +
1180*0fca6ea1SDimitry Andric                     getCastInstrCost(Instruction::UIToFP, DstTy, SrcTy,
1181*0fca6ea1SDimitry Andric                                      TTI::CastContextHint::None, CostKind) +
1182*0fca6ea1SDimitry Andric                     getCFInstrCost(Instruction::Br, CostKind);
1183*0fca6ea1SDimitry Andric       }
1184*0fca6ea1SDimitry Andric       break;
1185*0fca6ea1SDimitry Andric 
1186*0fca6ea1SDimitry Andric     case Intrinsic::minimum:
1187*0fca6ea1SDimitry Andric       if (FMF.noNaNs()) {
1188*0fca6ea1SDimitry Andric         Opcodes = {RISCV::VFREDMIN_VS, RISCV::VFMV_F_S};
1189*0fca6ea1SDimitry Andric       } else {
1190*0fca6ea1SDimitry Andric         Opcodes = {RISCV::VMFNE_VV, RISCV::VCPOP_M, RISCV::VFREDMIN_VS,
1191*0fca6ea1SDimitry Andric                    RISCV::VFMV_F_S};
1192*0fca6ea1SDimitry Andric         // Cost of Canonical Nan + branch
1193*0fca6ea1SDimitry Andric         // lui a0, 523264
1194*0fca6ea1SDimitry Andric         // fmv.w.x fa0, a0
1195*0fca6ea1SDimitry Andric         Type *DstTy = Ty->getScalarType();
1196*0fca6ea1SDimitry Andric         const unsigned EltTyBits = DL.getTypeSizeInBits(DstTy);
1197*0fca6ea1SDimitry Andric         Type *SrcTy = IntegerType::getIntNTy(DstTy->getContext(), EltTyBits);
1198*0fca6ea1SDimitry Andric         ExtraCost = 1 +
1199*0fca6ea1SDimitry Andric                     getCastInstrCost(Instruction::UIToFP, DstTy, SrcTy,
1200*0fca6ea1SDimitry Andric                                      TTI::CastContextHint::None, CostKind) +
1201*0fca6ea1SDimitry Andric                     getCFInstrCost(Instruction::Br, CostKind);
1202*0fca6ea1SDimitry Andric       }
1203*0fca6ea1SDimitry Andric       break;
1204*0fca6ea1SDimitry Andric     }
1205*0fca6ea1SDimitry Andric     return ExtraCost + getRISCVInstructionCost(Opcodes, LT.second, CostKind);
1206*0fca6ea1SDimitry Andric   }
120781ad6265SDimitry Andric 
120881ad6265SDimitry Andric   // IR Reduction is composed by two vmv and one rvv reduction instruction.
1209*0fca6ea1SDimitry Andric   unsigned SplitOp;
1210*0fca6ea1SDimitry Andric   SmallVector<unsigned, 3> Opcodes;
1211*0fca6ea1SDimitry Andric   switch (IID) {
1212*0fca6ea1SDimitry Andric   default:
1213*0fca6ea1SDimitry Andric     llvm_unreachable("Unsupported intrinsic");
1214*0fca6ea1SDimitry Andric   case Intrinsic::smax:
1215*0fca6ea1SDimitry Andric     SplitOp = RISCV::VMAX_VV;
1216*0fca6ea1SDimitry Andric     Opcodes = {RISCV::VMV_S_X, RISCV::VREDMAX_VS, RISCV::VMV_X_S};
1217*0fca6ea1SDimitry Andric     break;
1218*0fca6ea1SDimitry Andric   case Intrinsic::smin:
1219*0fca6ea1SDimitry Andric     SplitOp = RISCV::VMIN_VV;
1220*0fca6ea1SDimitry Andric     Opcodes = {RISCV::VMV_S_X, RISCV::VREDMIN_VS, RISCV::VMV_X_S};
1221*0fca6ea1SDimitry Andric     break;
1222*0fca6ea1SDimitry Andric   case Intrinsic::umax:
1223*0fca6ea1SDimitry Andric     SplitOp = RISCV::VMAXU_VV;
1224*0fca6ea1SDimitry Andric     Opcodes = {RISCV::VMV_S_X, RISCV::VREDMAXU_VS, RISCV::VMV_X_S};
1225*0fca6ea1SDimitry Andric     break;
1226*0fca6ea1SDimitry Andric   case Intrinsic::umin:
1227*0fca6ea1SDimitry Andric     SplitOp = RISCV::VMINU_VV;
1228*0fca6ea1SDimitry Andric     Opcodes = {RISCV::VMV_S_X, RISCV::VREDMINU_VS, RISCV::VMV_X_S};
1229*0fca6ea1SDimitry Andric     break;
1230*0fca6ea1SDimitry Andric   case Intrinsic::maxnum:
1231*0fca6ea1SDimitry Andric     SplitOp = RISCV::VFMAX_VV;
1232*0fca6ea1SDimitry Andric     Opcodes = {RISCV::VFMV_S_F, RISCV::VFREDMAX_VS, RISCV::VFMV_F_S};
1233*0fca6ea1SDimitry Andric     break;
1234*0fca6ea1SDimitry Andric   case Intrinsic::minnum:
1235*0fca6ea1SDimitry Andric     SplitOp = RISCV::VFMIN_VV;
1236*0fca6ea1SDimitry Andric     Opcodes = {RISCV::VFMV_S_F, RISCV::VFREDMIN_VS, RISCV::VFMV_F_S};
1237*0fca6ea1SDimitry Andric     break;
1238*0fca6ea1SDimitry Andric   }
1239*0fca6ea1SDimitry Andric   // Add a cost for data larger than LMUL8
1240*0fca6ea1SDimitry Andric   InstructionCost SplitCost =
1241*0fca6ea1SDimitry Andric       (LT.first > 1) ? (LT.first - 1) *
1242*0fca6ea1SDimitry Andric                            getRISCVInstructionCost(SplitOp, LT.second, CostKind)
1243*0fca6ea1SDimitry Andric                      : 0;
1244*0fca6ea1SDimitry Andric   return SplitCost + getRISCVInstructionCost(Opcodes, LT.second, CostKind);
124581ad6265SDimitry Andric }
124681ad6265SDimitry Andric 
124781ad6265SDimitry Andric InstructionCost
getArithmeticReductionCost(unsigned Opcode,VectorType * Ty,std::optional<FastMathFlags> FMF,TTI::TargetCostKind CostKind)124881ad6265SDimitry Andric RISCVTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
1249bdd1243dSDimitry Andric                                          std::optional<FastMathFlags> FMF,
125081ad6265SDimitry Andric                                          TTI::TargetCostKind CostKind) {
125181ad6265SDimitry Andric   if (isa<FixedVectorType>(Ty) && !ST->useRVVForFixedLengthVectors())
125281ad6265SDimitry Andric     return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
125381ad6265SDimitry Andric 
125481ad6265SDimitry Andric   // Skip if scalar size of Ty is bigger than ELEN.
12555f757f3fSDimitry Andric   if (Ty->getScalarSizeInBits() > ST->getELen())
125681ad6265SDimitry Andric     return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
125781ad6265SDimitry Andric 
125881ad6265SDimitry Andric   int ISD = TLI->InstructionOpcodeToISD(Opcode);
125981ad6265SDimitry Andric   assert(ISD && "Invalid opcode");
126081ad6265SDimitry Andric 
126181ad6265SDimitry Andric   if (ISD != ISD::ADD && ISD != ISD::OR && ISD != ISD::XOR && ISD != ISD::AND &&
126281ad6265SDimitry Andric       ISD != ISD::FADD)
126381ad6265SDimitry Andric     return BaseT::getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
126481ad6265SDimitry Andric 
1265bdd1243dSDimitry Andric   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
1266*0fca6ea1SDimitry Andric   SmallVector<unsigned, 3> Opcodes;
1267*0fca6ea1SDimitry Andric   Type *ElementTy = Ty->getElementType();
1268*0fca6ea1SDimitry Andric   if (ElementTy->isIntegerTy(1)) {
1269*0fca6ea1SDimitry Andric     if (ISD == ISD::AND) {
1270*0fca6ea1SDimitry Andric       // Example sequences:
1271*0fca6ea1SDimitry Andric       //   vsetvli a0, zero, e8, mf8, ta, ma
1272*0fca6ea1SDimitry Andric       //   vmnot.m v8, v0
1273*0fca6ea1SDimitry Andric       //   vcpop.m a0, v8
1274*0fca6ea1SDimitry Andric       //   seqz a0, a0
1275*0fca6ea1SDimitry Andric       Opcodes = {RISCV::VMNAND_MM, RISCV::VCPOP_M};
1276*0fca6ea1SDimitry Andric       return (LT.first - 1) +
1277*0fca6ea1SDimitry Andric              getRISCVInstructionCost(Opcodes, LT.second, CostKind) +
1278*0fca6ea1SDimitry Andric              getCmpSelInstrCost(Instruction::ICmp, ElementTy, ElementTy,
1279*0fca6ea1SDimitry Andric                                 CmpInst::ICMP_EQ, CostKind);
1280*0fca6ea1SDimitry Andric     } else {
1281*0fca6ea1SDimitry Andric       // Example sequences:
1282*0fca6ea1SDimitry Andric       //   vsetvli a0, zero, e8, mf8, ta, ma
1283*0fca6ea1SDimitry Andric       //   vcpop.m a0, v0
1284*0fca6ea1SDimitry Andric       //   snez a0, a0
1285*0fca6ea1SDimitry Andric       Opcodes = {RISCV::VCPOP_M};
1286*0fca6ea1SDimitry Andric       return (LT.first - 1) +
1287*0fca6ea1SDimitry Andric              getRISCVInstructionCost(Opcodes, LT.second, CostKind) +
1288*0fca6ea1SDimitry Andric              getCmpSelInstrCost(Instruction::ICmp, ElementTy, ElementTy,
1289*0fca6ea1SDimitry Andric                                 CmpInst::ICMP_NE, CostKind);
1290*0fca6ea1SDimitry Andric     }
1291*0fca6ea1SDimitry Andric   }
129281ad6265SDimitry Andric 
129381ad6265SDimitry Andric   // IR Reduction is composed by two vmv and one rvv reduction instruction.
1294*0fca6ea1SDimitry Andric   if (TTI::requiresOrderedReduction(FMF)) {
1295*0fca6ea1SDimitry Andric     Opcodes.push_back(RISCV::VFMV_S_F);
1296*0fca6ea1SDimitry Andric     for (unsigned i = 0; i < LT.first.getValue(); i++)
1297*0fca6ea1SDimitry Andric       Opcodes.push_back(RISCV::VFREDOSUM_VS);
1298*0fca6ea1SDimitry Andric     Opcodes.push_back(RISCV::VFMV_F_S);
1299*0fca6ea1SDimitry Andric     return getRISCVInstructionCost(Opcodes, LT.second, CostKind);
1300*0fca6ea1SDimitry Andric   }
1301*0fca6ea1SDimitry Andric   unsigned SplitOp;
1302*0fca6ea1SDimitry Andric   switch (ISD) {
1303*0fca6ea1SDimitry Andric   case ISD::ADD:
1304*0fca6ea1SDimitry Andric     SplitOp = RISCV::VADD_VV;
1305*0fca6ea1SDimitry Andric     Opcodes = {RISCV::VMV_S_X, RISCV::VREDSUM_VS, RISCV::VMV_X_S};
1306*0fca6ea1SDimitry Andric     break;
1307*0fca6ea1SDimitry Andric   case ISD::OR:
1308*0fca6ea1SDimitry Andric     SplitOp = RISCV::VOR_VV;
1309*0fca6ea1SDimitry Andric     Opcodes = {RISCV::VMV_S_X, RISCV::VREDOR_VS, RISCV::VMV_X_S};
1310*0fca6ea1SDimitry Andric     break;
1311*0fca6ea1SDimitry Andric   case ISD::XOR:
1312*0fca6ea1SDimitry Andric     SplitOp = RISCV::VXOR_VV;
1313*0fca6ea1SDimitry Andric     Opcodes = {RISCV::VMV_S_X, RISCV::VREDXOR_VS, RISCV::VMV_X_S};
1314*0fca6ea1SDimitry Andric     break;
1315*0fca6ea1SDimitry Andric   case ISD::AND:
1316*0fca6ea1SDimitry Andric     SplitOp = RISCV::VAND_VV;
1317*0fca6ea1SDimitry Andric     Opcodes = {RISCV::VMV_S_X, RISCV::VREDAND_VS, RISCV::VMV_X_S};
1318*0fca6ea1SDimitry Andric     break;
1319*0fca6ea1SDimitry Andric   case ISD::FADD:
1320*0fca6ea1SDimitry Andric     SplitOp = RISCV::VFADD_VV;
1321*0fca6ea1SDimitry Andric     Opcodes = {RISCV::VFMV_S_F, RISCV::VFREDUSUM_VS, RISCV::VFMV_F_S};
1322*0fca6ea1SDimitry Andric     break;
1323*0fca6ea1SDimitry Andric   }
1324*0fca6ea1SDimitry Andric   // Add a cost for data larger than LMUL8
1325*0fca6ea1SDimitry Andric   InstructionCost SplitCost =
1326*0fca6ea1SDimitry Andric       (LT.first > 1) ? (LT.first - 1) *
1327*0fca6ea1SDimitry Andric                            getRISCVInstructionCost(SplitOp, LT.second, CostKind)
1328*0fca6ea1SDimitry Andric                      : 0;
1329*0fca6ea1SDimitry Andric   return SplitCost + getRISCVInstructionCost(Opcodes, LT.second, CostKind);
133081ad6265SDimitry Andric }
133181ad6265SDimitry Andric 
getExtendedReductionCost(unsigned Opcode,bool IsUnsigned,Type * ResTy,VectorType * ValTy,FastMathFlags FMF,TTI::TargetCostKind CostKind)1332bdd1243dSDimitry Andric InstructionCost RISCVTTIImpl::getExtendedReductionCost(
1333bdd1243dSDimitry Andric     unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy,
133406c3fb27SDimitry Andric     FastMathFlags FMF, TTI::TargetCostKind CostKind) {
1335bdd1243dSDimitry Andric   if (isa<FixedVectorType>(ValTy) && !ST->useRVVForFixedLengthVectors())
1336bdd1243dSDimitry Andric     return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, ValTy,
1337bdd1243dSDimitry Andric                                            FMF, CostKind);
1338bdd1243dSDimitry Andric 
1339bdd1243dSDimitry Andric   // Skip if scalar size of ResTy is bigger than ELEN.
13405f757f3fSDimitry Andric   if (ResTy->getScalarSizeInBits() > ST->getELen())
1341bdd1243dSDimitry Andric     return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, ValTy,
1342bdd1243dSDimitry Andric                                            FMF, CostKind);
1343bdd1243dSDimitry Andric 
1344bdd1243dSDimitry Andric   if (Opcode != Instruction::Add && Opcode != Instruction::FAdd)
1345bdd1243dSDimitry Andric     return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, ValTy,
1346bdd1243dSDimitry Andric                                            FMF, CostKind);
1347bdd1243dSDimitry Andric 
1348bdd1243dSDimitry Andric   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1349bdd1243dSDimitry Andric 
1350bdd1243dSDimitry Andric   if (ResTy->getScalarSizeInBits() != 2 * LT.second.getScalarSizeInBits())
1351bdd1243dSDimitry Andric     return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, ValTy,
1352bdd1243dSDimitry Andric                                            FMF, CostKind);
1353bdd1243dSDimitry Andric 
1354bdd1243dSDimitry Andric   return (LT.first - 1) +
1355bdd1243dSDimitry Andric          getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
1356bdd1243dSDimitry Andric }
1357bdd1243dSDimitry Andric 
getStoreImmCost(Type * Ty,TTI::OperandValueInfo OpInfo,TTI::TargetCostKind CostKind)1358bdd1243dSDimitry Andric InstructionCost RISCVTTIImpl::getStoreImmCost(Type *Ty,
1359bdd1243dSDimitry Andric                                               TTI::OperandValueInfo OpInfo,
1360bdd1243dSDimitry Andric                                               TTI::TargetCostKind CostKind) {
1361bdd1243dSDimitry Andric   assert(OpInfo.isConstant() && "non constant operand?");
1362bdd1243dSDimitry Andric   if (!isa<VectorType>(Ty))
1363bdd1243dSDimitry Andric     // FIXME: We need to account for immediate materialization here, but doing
1364bdd1243dSDimitry Andric     // a decent job requires more knowledge about the immediate than we
1365bdd1243dSDimitry Andric     // currently have here.
1366bdd1243dSDimitry Andric     return 0;
1367bdd1243dSDimitry Andric 
1368bdd1243dSDimitry Andric   if (OpInfo.isUniform())
1369bdd1243dSDimitry Andric     // vmv.x.i, vmv.v.x, or vfmv.v.f
1370bdd1243dSDimitry Andric     // We ignore the cost of the scalar constant materialization to be consistent
1371bdd1243dSDimitry Andric     // with how we treat scalar constants themselves just above.
1372bdd1243dSDimitry Andric     return 1;
1373bdd1243dSDimitry Andric 
137406c3fb27SDimitry Andric   return getConstantPoolLoadCost(Ty, CostKind);
1375bdd1243dSDimitry Andric }
1376bdd1243dSDimitry Andric 
1377bdd1243dSDimitry Andric 
getMemoryOpCost(unsigned Opcode,Type * Src,MaybeAlign Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,TTI::OperandValueInfo OpInfo,const Instruction * I)1378bdd1243dSDimitry Andric InstructionCost RISCVTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
1379bdd1243dSDimitry Andric                                               MaybeAlign Alignment,
1380bdd1243dSDimitry Andric                                               unsigned AddressSpace,
1381bdd1243dSDimitry Andric                                               TTI::TargetCostKind CostKind,
1382bdd1243dSDimitry Andric                                               TTI::OperandValueInfo OpInfo,
1383bdd1243dSDimitry Andric                                               const Instruction *I) {
138406c3fb27SDimitry Andric   EVT VT = TLI->getValueType(DL, Src, true);
138506c3fb27SDimitry Andric   // Type legalization can't handle structs
138606c3fb27SDimitry Andric   if (VT == MVT::Other)
138706c3fb27SDimitry Andric     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
138806c3fb27SDimitry Andric                                   CostKind, OpInfo, I);
138906c3fb27SDimitry Andric 
1390bdd1243dSDimitry Andric   InstructionCost Cost = 0;
1391bdd1243dSDimitry Andric   if (Opcode == Instruction::Store && OpInfo.isConstant())
1392bdd1243dSDimitry Andric     Cost += getStoreImmCost(Src, OpInfo, CostKind);
139306c3fb27SDimitry Andric   InstructionCost BaseCost =
139406c3fb27SDimitry Andric     BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1395bdd1243dSDimitry Andric                            CostKind, OpInfo, I);
139606c3fb27SDimitry Andric   // Assume memory ops cost scale with the number of vector registers
139706c3fb27SDimitry Andric   // possible accessed by the instruction.  Note that BasicTTI already
139806c3fb27SDimitry Andric   // handles the LT.first term for us.
139906c3fb27SDimitry Andric   if (std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
14007a6dacacSDimitry Andric       LT.second.isVector() && CostKind != TTI::TCK_CodeSize)
14015f757f3fSDimitry Andric     BaseCost *= TLI->getLMULCost(LT.second);
140206c3fb27SDimitry Andric   return Cost + BaseCost;
140306c3fb27SDimitry Andric 
1404bdd1243dSDimitry Andric }
1405bdd1243dSDimitry Andric 
getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy,CmpInst::Predicate VecPred,TTI::TargetCostKind CostKind,const Instruction * I)1406bdd1243dSDimitry Andric InstructionCost RISCVTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
1407bdd1243dSDimitry Andric                                                  Type *CondTy,
1408bdd1243dSDimitry Andric                                                  CmpInst::Predicate VecPred,
1409bdd1243dSDimitry Andric                                                  TTI::TargetCostKind CostKind,
1410bdd1243dSDimitry Andric                                                  const Instruction *I) {
1411bdd1243dSDimitry Andric   if (CostKind != TTI::TCK_RecipThroughput)
1412bdd1243dSDimitry Andric     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1413bdd1243dSDimitry Andric                                      I);
1414bdd1243dSDimitry Andric 
1415bdd1243dSDimitry Andric   if (isa<FixedVectorType>(ValTy) && !ST->useRVVForFixedLengthVectors())
1416bdd1243dSDimitry Andric     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1417bdd1243dSDimitry Andric                                      I);
1418bdd1243dSDimitry Andric 
1419bdd1243dSDimitry Andric   // Skip if scalar size of ValTy is bigger than ELEN.
14205f757f3fSDimitry Andric   if (ValTy->isVectorTy() && ValTy->getScalarSizeInBits() > ST->getELen())
1421bdd1243dSDimitry Andric     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1422bdd1243dSDimitry Andric                                      I);
1423bdd1243dSDimitry Andric 
1424bdd1243dSDimitry Andric   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1425*0fca6ea1SDimitry Andric   if (Opcode == Instruction::Select && ValTy->isVectorTy()) {
1426bdd1243dSDimitry Andric     if (CondTy->isVectorTy()) {
1427bdd1243dSDimitry Andric       if (ValTy->getScalarSizeInBits() == 1) {
1428bdd1243dSDimitry Andric         // vmandn.mm v8, v8, v9
1429bdd1243dSDimitry Andric         // vmand.mm v9, v0, v9
1430bdd1243dSDimitry Andric         // vmor.mm v0, v9, v8
1431*0fca6ea1SDimitry Andric         return LT.first *
1432*0fca6ea1SDimitry Andric                getRISCVInstructionCost(
1433*0fca6ea1SDimitry Andric                    {RISCV::VMANDN_MM, RISCV::VMAND_MM, RISCV::VMOR_MM},
1434*0fca6ea1SDimitry Andric                    LT.second, CostKind);
1435bdd1243dSDimitry Andric       }
1436bdd1243dSDimitry Andric       // vselect and max/min are supported natively.
1437*0fca6ea1SDimitry Andric       return LT.first *
1438*0fca6ea1SDimitry Andric              getRISCVInstructionCost(RISCV::VMERGE_VVM, LT.second, CostKind);
1439bdd1243dSDimitry Andric     }
1440bdd1243dSDimitry Andric 
1441bdd1243dSDimitry Andric     if (ValTy->getScalarSizeInBits() == 1) {
1442bdd1243dSDimitry Andric       //  vmv.v.x v9, a0
1443bdd1243dSDimitry Andric       //  vmsne.vi v9, v9, 0
1444bdd1243dSDimitry Andric       //  vmandn.mm v8, v8, v9
1445bdd1243dSDimitry Andric       //  vmand.mm v9, v0, v9
1446bdd1243dSDimitry Andric       //  vmor.mm v0, v9, v8
1447*0fca6ea1SDimitry Andric       MVT InterimVT = LT.second.changeVectorElementType(MVT::i8);
1448*0fca6ea1SDimitry Andric       return LT.first *
1449*0fca6ea1SDimitry Andric                  getRISCVInstructionCost({RISCV::VMV_V_X, RISCV::VMSNE_VI},
1450*0fca6ea1SDimitry Andric                                          InterimVT, CostKind) +
1451*0fca6ea1SDimitry Andric              LT.first * getRISCVInstructionCost(
1452*0fca6ea1SDimitry Andric                             {RISCV::VMANDN_MM, RISCV::VMAND_MM, RISCV::VMOR_MM},
1453*0fca6ea1SDimitry Andric                             LT.second, CostKind);
1454bdd1243dSDimitry Andric     }
1455bdd1243dSDimitry Andric 
1456bdd1243dSDimitry Andric     // vmv.v.x v10, a0
1457bdd1243dSDimitry Andric     // vmsne.vi v0, v10, 0
1458bdd1243dSDimitry Andric     // vmerge.vvm v8, v9, v8, v0
1459*0fca6ea1SDimitry Andric     return LT.first * getRISCVInstructionCost(
1460*0fca6ea1SDimitry Andric                           {RISCV::VMV_V_X, RISCV::VMSNE_VI, RISCV::VMERGE_VVM},
1461*0fca6ea1SDimitry Andric                           LT.second, CostKind);
1462bdd1243dSDimitry Andric   }
1463bdd1243dSDimitry Andric 
1464*0fca6ea1SDimitry Andric   if ((Opcode == Instruction::ICmp) && ValTy->isVectorTy() &&
1465*0fca6ea1SDimitry Andric       CmpInst::isIntPredicate(VecPred)) {
1466*0fca6ea1SDimitry Andric     // Use VMSLT_VV to represent VMSEQ, VMSNE, VMSLTU, VMSLEU, VMSLT, VMSLE
1467*0fca6ea1SDimitry Andric     // provided they incur the same cost across all implementations
1468*0fca6ea1SDimitry Andric     return LT.first *
1469*0fca6ea1SDimitry Andric            getRISCVInstructionCost(RISCV::VMSLT_VV, LT.second, CostKind);
1470*0fca6ea1SDimitry Andric   }
1471bdd1243dSDimitry Andric 
1472*0fca6ea1SDimitry Andric   if ((Opcode == Instruction::FCmp) && ValTy->isVectorTy() &&
1473*0fca6ea1SDimitry Andric       CmpInst::isFPPredicate(VecPred)) {
1474*0fca6ea1SDimitry Andric 
1475*0fca6ea1SDimitry Andric     // Use VMXOR_MM and VMXNOR_MM to generate all true/false mask
1476*0fca6ea1SDimitry Andric     if ((VecPred == CmpInst::FCMP_FALSE) || (VecPred == CmpInst::FCMP_TRUE))
1477*0fca6ea1SDimitry Andric       return getRISCVInstructionCost(RISCV::VMXOR_MM, LT.second, CostKind);
1478bdd1243dSDimitry Andric 
1479bdd1243dSDimitry Andric     // If we do not support the input floating point vector type, use the base
1480bdd1243dSDimitry Andric     // one which will calculate as:
1481bdd1243dSDimitry Andric     // ScalarizeCost + Num * Cost for fixed vector,
1482bdd1243dSDimitry Andric     // InvalidCost for scalable vector.
1483bdd1243dSDimitry Andric     if ((ValTy->getScalarSizeInBits() == 16 && !ST->hasVInstructionsF16()) ||
1484bdd1243dSDimitry Andric         (ValTy->getScalarSizeInBits() == 32 && !ST->hasVInstructionsF32()) ||
1485bdd1243dSDimitry Andric         (ValTy->getScalarSizeInBits() == 64 && !ST->hasVInstructionsF64()))
1486bdd1243dSDimitry Andric       return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1487bdd1243dSDimitry Andric                                        I);
1488*0fca6ea1SDimitry Andric 
1489*0fca6ea1SDimitry Andric     // Assuming vector fp compare and mask instructions are all the same cost
1490*0fca6ea1SDimitry Andric     // until a need arises to differentiate them.
1491bdd1243dSDimitry Andric     switch (VecPred) {
1492*0fca6ea1SDimitry Andric     case CmpInst::FCMP_ONE: // vmflt.vv + vmflt.vv + vmor.mm
1493*0fca6ea1SDimitry Andric     case CmpInst::FCMP_ORD: // vmfeq.vv + vmfeq.vv + vmand.mm
1494*0fca6ea1SDimitry Andric     case CmpInst::FCMP_UNO: // vmfne.vv + vmfne.vv + vmor.mm
1495*0fca6ea1SDimitry Andric     case CmpInst::FCMP_UEQ: // vmflt.vv + vmflt.vv + vmnor.mm
1496*0fca6ea1SDimitry Andric       return LT.first * getRISCVInstructionCost(
1497*0fca6ea1SDimitry Andric                             {RISCV::VMFLT_VV, RISCV::VMFLT_VV, RISCV::VMOR_MM},
1498*0fca6ea1SDimitry Andric                             LT.second, CostKind);
1499*0fca6ea1SDimitry Andric 
1500*0fca6ea1SDimitry Andric     case CmpInst::FCMP_UGT: // vmfle.vv + vmnot.m
1501*0fca6ea1SDimitry Andric     case CmpInst::FCMP_UGE: // vmflt.vv + vmnot.m
1502*0fca6ea1SDimitry Andric     case CmpInst::FCMP_ULT: // vmfle.vv + vmnot.m
1503*0fca6ea1SDimitry Andric     case CmpInst::FCMP_ULE: // vmflt.vv + vmnot.m
1504*0fca6ea1SDimitry Andric       return LT.first *
1505*0fca6ea1SDimitry Andric              getRISCVInstructionCost({RISCV::VMFLT_VV, RISCV::VMNAND_MM},
1506*0fca6ea1SDimitry Andric                                      LT.second, CostKind);
1507*0fca6ea1SDimitry Andric 
1508*0fca6ea1SDimitry Andric     case CmpInst::FCMP_OEQ: // vmfeq.vv
1509*0fca6ea1SDimitry Andric     case CmpInst::FCMP_OGT: // vmflt.vv
1510*0fca6ea1SDimitry Andric     case CmpInst::FCMP_OGE: // vmfle.vv
1511*0fca6ea1SDimitry Andric     case CmpInst::FCMP_OLT: // vmflt.vv
1512*0fca6ea1SDimitry Andric     case CmpInst::FCMP_OLE: // vmfle.vv
1513*0fca6ea1SDimitry Andric     case CmpInst::FCMP_UNE: // vmfne.vv
1514*0fca6ea1SDimitry Andric       return LT.first *
1515*0fca6ea1SDimitry Andric              getRISCVInstructionCost(RISCV::VMFLT_VV, LT.second, CostKind);
1516bdd1243dSDimitry Andric     default:
1517bdd1243dSDimitry Andric       break;
1518bdd1243dSDimitry Andric     }
1519bdd1243dSDimitry Andric   }
1520bdd1243dSDimitry Andric 
1521*0fca6ea1SDimitry Andric   // With ShortForwardBranchOpt or ConditionalMoveFusion, scalar icmp + select
1522*0fca6ea1SDimitry Andric   // instructions will lower to SELECT_CC and lower to PseudoCCMOVGPR which will
1523*0fca6ea1SDimitry Andric   // generate a conditional branch + mv. The cost of scalar (icmp + select) will
1524*0fca6ea1SDimitry Andric   // be (0 + select instr cost).
1525*0fca6ea1SDimitry Andric   if (ST->hasConditionalMoveFusion() && I && isa<ICmpInst>(I) &&
1526*0fca6ea1SDimitry Andric       ValTy->isIntegerTy() && !I->user_empty()) {
1527*0fca6ea1SDimitry Andric     if (all_of(I->users(), [&](const User *U) {
1528*0fca6ea1SDimitry Andric           return match(U, m_Select(m_Specific(I), m_Value(), m_Value())) &&
1529*0fca6ea1SDimitry Andric                  U->getType()->isIntegerTy() &&
1530*0fca6ea1SDimitry Andric                  !isa<ConstantData>(U->getOperand(1)) &&
1531*0fca6ea1SDimitry Andric                  !isa<ConstantData>(U->getOperand(2));
1532*0fca6ea1SDimitry Andric         }))
1533*0fca6ea1SDimitry Andric       return 0;
1534*0fca6ea1SDimitry Andric   }
1535*0fca6ea1SDimitry Andric 
1536bdd1243dSDimitry Andric   // TODO: Add cost for scalar type.
1537bdd1243dSDimitry Andric 
1538bdd1243dSDimitry Andric   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
1539bdd1243dSDimitry Andric }
1540bdd1243dSDimitry Andric 
getCFInstrCost(unsigned Opcode,TTI::TargetCostKind CostKind,const Instruction * I)15415f757f3fSDimitry Andric InstructionCost RISCVTTIImpl::getCFInstrCost(unsigned Opcode,
15425f757f3fSDimitry Andric                                              TTI::TargetCostKind CostKind,
15435f757f3fSDimitry Andric                                              const Instruction *I) {
15445f757f3fSDimitry Andric   if (CostKind != TTI::TCK_RecipThroughput)
15455f757f3fSDimitry Andric     return Opcode == Instruction::PHI ? 0 : 1;
15465f757f3fSDimitry Andric   // Branches are assumed to be predicted.
15475f757f3fSDimitry Andric   return 0;
15485f757f3fSDimitry Andric }
15495f757f3fSDimitry Andric 
getVectorInstrCost(unsigned Opcode,Type * Val,TTI::TargetCostKind CostKind,unsigned Index,Value * Op0,Value * Op1)1550bdd1243dSDimitry Andric InstructionCost RISCVTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
1551bdd1243dSDimitry Andric                                                  TTI::TargetCostKind CostKind,
1552bdd1243dSDimitry Andric                                                  unsigned Index, Value *Op0,
1553bdd1243dSDimitry Andric                                                  Value *Op1) {
1554bdd1243dSDimitry Andric   assert(Val->isVectorTy() && "This must be a vector type");
1555bdd1243dSDimitry Andric 
1556bdd1243dSDimitry Andric   if (Opcode != Instruction::ExtractElement &&
1557bdd1243dSDimitry Andric       Opcode != Instruction::InsertElement)
1558bdd1243dSDimitry Andric     return BaseT::getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1);
1559bdd1243dSDimitry Andric 
1560bdd1243dSDimitry Andric   // Legalize the type.
1561bdd1243dSDimitry Andric   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Val);
1562bdd1243dSDimitry Andric 
1563bdd1243dSDimitry Andric   // This type is legalized to a scalar type.
15645f757f3fSDimitry Andric   if (!LT.second.isVector()) {
15655f757f3fSDimitry Andric     auto *FixedVecTy = cast<FixedVectorType>(Val);
15665f757f3fSDimitry Andric     // If Index is a known constant, cost is zero.
15675f757f3fSDimitry Andric     if (Index != -1U)
1568bdd1243dSDimitry Andric       return 0;
15695f757f3fSDimitry Andric     // Extract/InsertElement with non-constant index is very costly when
15705f757f3fSDimitry Andric     // scalarized; estimate cost of loads/stores sequence via the stack:
15715f757f3fSDimitry Andric     // ExtractElement cost: store vector to stack, load scalar;
15725f757f3fSDimitry Andric     // InsertElement cost: store vector to stack, store scalar, load vector.
15735f757f3fSDimitry Andric     Type *ElemTy = FixedVecTy->getElementType();
15745f757f3fSDimitry Andric     auto NumElems = FixedVecTy->getNumElements();
15755f757f3fSDimitry Andric     auto Align = DL.getPrefTypeAlign(ElemTy);
15765f757f3fSDimitry Andric     InstructionCost LoadCost =
15775f757f3fSDimitry Andric         getMemoryOpCost(Instruction::Load, ElemTy, Align, 0, CostKind);
15785f757f3fSDimitry Andric     InstructionCost StoreCost =
15795f757f3fSDimitry Andric         getMemoryOpCost(Instruction::Store, ElemTy, Align, 0, CostKind);
15805f757f3fSDimitry Andric     return Opcode == Instruction::ExtractElement
15815f757f3fSDimitry Andric                ? StoreCost * NumElems + LoadCost
15825f757f3fSDimitry Andric                : (StoreCost + LoadCost) * NumElems + StoreCost;
15835f757f3fSDimitry Andric   }
1584bdd1243dSDimitry Andric 
1585bdd1243dSDimitry Andric   // For unsupported scalable vector.
1586bdd1243dSDimitry Andric   if (LT.second.isScalableVector() && !LT.first.isValid())
1587bdd1243dSDimitry Andric     return LT.first;
1588bdd1243dSDimitry Andric 
1589bdd1243dSDimitry Andric   if (!isTypeLegal(Val))
1590bdd1243dSDimitry Andric     return BaseT::getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1);
1591bdd1243dSDimitry Andric 
15925f757f3fSDimitry Andric   // Mask vector extract/insert is expanded via e8.
15935f757f3fSDimitry Andric   if (Val->getScalarSizeInBits() == 1) {
15945f757f3fSDimitry Andric     VectorType *WideTy =
15955f757f3fSDimitry Andric       VectorType::get(IntegerType::get(Val->getContext(), 8),
15965f757f3fSDimitry Andric                       cast<VectorType>(Val)->getElementCount());
15975f757f3fSDimitry Andric     if (Opcode == Instruction::ExtractElement) {
15985f757f3fSDimitry Andric       InstructionCost ExtendCost
15995f757f3fSDimitry Andric         = getCastInstrCost(Instruction::ZExt, WideTy, Val,
16005f757f3fSDimitry Andric                            TTI::CastContextHint::None, CostKind);
16015f757f3fSDimitry Andric       InstructionCost ExtractCost
16025f757f3fSDimitry Andric         = getVectorInstrCost(Opcode, WideTy, CostKind, Index, nullptr, nullptr);
16035f757f3fSDimitry Andric       return ExtendCost + ExtractCost;
16045f757f3fSDimitry Andric     }
16055f757f3fSDimitry Andric     InstructionCost ExtendCost
16065f757f3fSDimitry Andric       = getCastInstrCost(Instruction::ZExt, WideTy, Val,
16075f757f3fSDimitry Andric                          TTI::CastContextHint::None, CostKind);
16085f757f3fSDimitry Andric     InstructionCost InsertCost
16095f757f3fSDimitry Andric       = getVectorInstrCost(Opcode, WideTy, CostKind, Index, nullptr, nullptr);
16105f757f3fSDimitry Andric     InstructionCost TruncCost
16115f757f3fSDimitry Andric       = getCastInstrCost(Instruction::Trunc, Val, WideTy,
16125f757f3fSDimitry Andric                          TTI::CastContextHint::None, CostKind);
16135f757f3fSDimitry Andric     return ExtendCost + InsertCost + TruncCost;
16145f757f3fSDimitry Andric   }
16155f757f3fSDimitry Andric 
16165f757f3fSDimitry Andric 
1617bdd1243dSDimitry Andric   // In RVV, we could use vslidedown + vmv.x.s to extract element from vector
1618bdd1243dSDimitry Andric   // and vslideup + vmv.s.x to insert element to vector.
1619bdd1243dSDimitry Andric   unsigned BaseCost = 1;
1620bdd1243dSDimitry Andric   // When insertelement we should add the index with 1 as the input of vslideup.
1621bdd1243dSDimitry Andric   unsigned SlideCost = Opcode == Instruction::InsertElement ? 2 : 1;
1622bdd1243dSDimitry Andric 
1623bdd1243dSDimitry Andric   if (Index != -1U) {
1624bdd1243dSDimitry Andric     // The type may be split. For fixed-width vectors we can normalize the
1625bdd1243dSDimitry Andric     // index to the new type.
1626bdd1243dSDimitry Andric     if (LT.second.isFixedLengthVector()) {
1627bdd1243dSDimitry Andric       unsigned Width = LT.second.getVectorNumElements();
1628bdd1243dSDimitry Andric       Index = Index % Width;
1629bdd1243dSDimitry Andric     }
1630bdd1243dSDimitry Andric 
1631bdd1243dSDimitry Andric     // We could extract/insert the first element without vslidedown/vslideup.
1632bdd1243dSDimitry Andric     if (Index == 0)
1633bdd1243dSDimitry Andric       SlideCost = 0;
1634bdd1243dSDimitry Andric     else if (Opcode == Instruction::InsertElement)
1635bdd1243dSDimitry Andric       SlideCost = 1; // With a constant index, we do not need to use addi.
1636bdd1243dSDimitry Andric   }
1637bdd1243dSDimitry Andric 
1638bdd1243dSDimitry Andric   // Extract i64 in the target that has XLEN=32 need more instruction.
1639bdd1243dSDimitry Andric   if (Val->getScalarType()->isIntegerTy() &&
1640bdd1243dSDimitry Andric       ST->getXLen() < Val->getScalarSizeInBits()) {
1641bdd1243dSDimitry Andric     // For extractelement, we need the following instructions:
1642bdd1243dSDimitry Andric     // vsetivli zero, 1, e64, m1, ta, mu (not count)
1643bdd1243dSDimitry Andric     // vslidedown.vx v8, v8, a0
1644bdd1243dSDimitry Andric     // vmv.x.s a0, v8
1645bdd1243dSDimitry Andric     // li a1, 32
1646bdd1243dSDimitry Andric     // vsrl.vx v8, v8, a1
1647bdd1243dSDimitry Andric     // vmv.x.s a1, v8
1648bdd1243dSDimitry Andric 
1649bdd1243dSDimitry Andric     // For insertelement, we need the following instructions:
1650bdd1243dSDimitry Andric     // vsetivli zero, 2, e32, m4, ta, mu (not count)
1651bdd1243dSDimitry Andric     // vmv.v.i v12, 0
1652bdd1243dSDimitry Andric     // vslide1up.vx v16, v12, a1
1653bdd1243dSDimitry Andric     // vslide1up.vx v12, v16, a0
1654bdd1243dSDimitry Andric     // addi a0, a2, 1
1655bdd1243dSDimitry Andric     // vsetvli zero, a0, e64, m4, tu, mu (not count)
1656bdd1243dSDimitry Andric     // vslideup.vx v8, v12, a2
1657bdd1243dSDimitry Andric 
1658bdd1243dSDimitry Andric     // TODO: should we count these special vsetvlis?
1659bdd1243dSDimitry Andric     BaseCost = Opcode == Instruction::InsertElement ? 3 : 4;
1660bdd1243dSDimitry Andric   }
1661bdd1243dSDimitry Andric   return BaseCost + SlideCost;
1662bdd1243dSDimitry Andric }
1663bdd1243dSDimitry Andric 
getArithmeticInstrCost(unsigned Opcode,Type * Ty,TTI::TargetCostKind CostKind,TTI::OperandValueInfo Op1Info,TTI::OperandValueInfo Op2Info,ArrayRef<const Value * > Args,const Instruction * CxtI)1664bdd1243dSDimitry Andric InstructionCost RISCVTTIImpl::getArithmeticInstrCost(
1665bdd1243dSDimitry Andric     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1666bdd1243dSDimitry Andric     TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info,
1667bdd1243dSDimitry Andric     ArrayRef<const Value *> Args, const Instruction *CxtI) {
1668bdd1243dSDimitry Andric 
1669bdd1243dSDimitry Andric   // TODO: Handle more cost kinds.
1670bdd1243dSDimitry Andric   if (CostKind != TTI::TCK_RecipThroughput)
1671bdd1243dSDimitry Andric     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
1672bdd1243dSDimitry Andric                                          Args, CxtI);
1673bdd1243dSDimitry Andric 
1674bdd1243dSDimitry Andric   if (isa<FixedVectorType>(Ty) && !ST->useRVVForFixedLengthVectors())
1675bdd1243dSDimitry Andric     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
1676bdd1243dSDimitry Andric                                          Args, CxtI);
1677bdd1243dSDimitry Andric 
1678bdd1243dSDimitry Andric   // Skip if scalar size of Ty is bigger than ELEN.
16795f757f3fSDimitry Andric   if (isa<VectorType>(Ty) && Ty->getScalarSizeInBits() > ST->getELen())
1680bdd1243dSDimitry Andric     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
1681bdd1243dSDimitry Andric                                          Args, CxtI);
1682bdd1243dSDimitry Andric 
1683bdd1243dSDimitry Andric   // Legalize the type.
1684bdd1243dSDimitry Andric   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
1685bdd1243dSDimitry Andric 
1686bdd1243dSDimitry Andric   // TODO: Handle scalar type.
1687bdd1243dSDimitry Andric   if (!LT.second.isVector())
1688bdd1243dSDimitry Andric     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
1689bdd1243dSDimitry Andric                                          Args, CxtI);
1690bdd1243dSDimitry Andric 
1691bdd1243dSDimitry Andric   auto getConstantMatCost =
1692bdd1243dSDimitry Andric     [&](unsigned Operand, TTI::OperandValueInfo OpInfo) -> InstructionCost {
1693bdd1243dSDimitry Andric     if (OpInfo.isUniform() && TLI->canSplatOperand(Opcode, Operand))
1694bdd1243dSDimitry Andric       // Two sub-cases:
1695bdd1243dSDimitry Andric       // * Has a 5 bit immediate operand which can be splatted.
1696bdd1243dSDimitry Andric       // * Has a larger immediate which must be materialized in scalar register
1697bdd1243dSDimitry Andric       // We return 0 for both as we currently ignore the cost of materializing
1698bdd1243dSDimitry Andric       // scalar constants in GPRs.
1699bdd1243dSDimitry Andric       return 0;
1700bdd1243dSDimitry Andric 
170106c3fb27SDimitry Andric     return getConstantPoolLoadCost(Ty, CostKind);
1702bdd1243dSDimitry Andric   };
1703bdd1243dSDimitry Andric 
1704bdd1243dSDimitry Andric   // Add the cost of materializing any constant vectors required.
1705bdd1243dSDimitry Andric   InstructionCost ConstantMatCost = 0;
1706bdd1243dSDimitry Andric   if (Op1Info.isConstant())
1707bdd1243dSDimitry Andric     ConstantMatCost += getConstantMatCost(0, Op1Info);
1708bdd1243dSDimitry Andric   if (Op2Info.isConstant())
1709bdd1243dSDimitry Andric     ConstantMatCost += getConstantMatCost(1, Op2Info);
1710bdd1243dSDimitry Andric 
1711*0fca6ea1SDimitry Andric   unsigned Op;
1712bdd1243dSDimitry Andric   switch (TLI->InstructionOpcodeToISD(Opcode)) {
1713bdd1243dSDimitry Andric   case ISD::ADD:
1714bdd1243dSDimitry Andric   case ISD::SUB:
1715*0fca6ea1SDimitry Andric     Op = RISCV::VADD_VV;
1716*0fca6ea1SDimitry Andric     break;
1717bdd1243dSDimitry Andric   case ISD::SHL:
1718bdd1243dSDimitry Andric   case ISD::SRL:
1719bdd1243dSDimitry Andric   case ISD::SRA:
1720*0fca6ea1SDimitry Andric     Op = RISCV::VSLL_VV;
1721*0fca6ea1SDimitry Andric     break;
1722*0fca6ea1SDimitry Andric   case ISD::AND:
1723*0fca6ea1SDimitry Andric   case ISD::OR:
1724*0fca6ea1SDimitry Andric   case ISD::XOR:
1725*0fca6ea1SDimitry Andric     Op = (Ty->getScalarSizeInBits() == 1) ? RISCV::VMAND_MM : RISCV::VAND_VV;
1726*0fca6ea1SDimitry Andric     break;
1727bdd1243dSDimitry Andric   case ISD::MUL:
1728bdd1243dSDimitry Andric   case ISD::MULHS:
1729bdd1243dSDimitry Andric   case ISD::MULHU:
1730*0fca6ea1SDimitry Andric     Op = RISCV::VMUL_VV;
1731*0fca6ea1SDimitry Andric     break;
1732*0fca6ea1SDimitry Andric   case ISD::SDIV:
1733*0fca6ea1SDimitry Andric   case ISD::UDIV:
1734*0fca6ea1SDimitry Andric     Op = RISCV::VDIV_VV;
1735*0fca6ea1SDimitry Andric     break;
1736*0fca6ea1SDimitry Andric   case ISD::SREM:
1737*0fca6ea1SDimitry Andric   case ISD::UREM:
1738*0fca6ea1SDimitry Andric     Op = RISCV::VREM_VV;
1739*0fca6ea1SDimitry Andric     break;
1740bdd1243dSDimitry Andric   case ISD::FADD:
1741bdd1243dSDimitry Andric   case ISD::FSUB:
1742*0fca6ea1SDimitry Andric     // TODO: Address FP16 with VFHMIN
1743*0fca6ea1SDimitry Andric     Op = RISCV::VFADD_VV;
1744*0fca6ea1SDimitry Andric     break;
1745bdd1243dSDimitry Andric   case ISD::FMUL:
1746*0fca6ea1SDimitry Andric     // TODO: Address FP16 with VFHMIN
1747*0fca6ea1SDimitry Andric     Op = RISCV::VFMUL_VV;
1748*0fca6ea1SDimitry Andric     break;
1749*0fca6ea1SDimitry Andric   case ISD::FDIV:
1750*0fca6ea1SDimitry Andric     Op = RISCV::VFDIV_VV;
1751*0fca6ea1SDimitry Andric     break;
1752*0fca6ea1SDimitry Andric   case ISD::FNEG:
1753*0fca6ea1SDimitry Andric     Op = RISCV::VFSGNJN_VV;
1754*0fca6ea1SDimitry Andric     break;
1755bdd1243dSDimitry Andric   default:
1756*0fca6ea1SDimitry Andric     // Assuming all other instructions have the same cost until a need arises to
1757*0fca6ea1SDimitry Andric     // differentiate them.
1758*0fca6ea1SDimitry Andric     return ConstantMatCost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind,
1759*0fca6ea1SDimitry Andric                                                            Op1Info, Op2Info,
1760bdd1243dSDimitry Andric                                                            Args, CxtI);
1761bdd1243dSDimitry Andric   }
1762*0fca6ea1SDimitry Andric 
1763*0fca6ea1SDimitry Andric   InstructionCost InstrCost = getRISCVInstructionCost(Op, LT.second, CostKind);
1764*0fca6ea1SDimitry Andric   // We use BasicTTIImpl to calculate scalar costs, which assumes floating point
1765*0fca6ea1SDimitry Andric   // ops are twice as expensive as integer ops. Do the same for vectors so
1766*0fca6ea1SDimitry Andric   // scalar floating point ops aren't cheaper than their vector equivalents.
1767*0fca6ea1SDimitry Andric   if (Ty->isFPOrFPVectorTy())
1768*0fca6ea1SDimitry Andric     InstrCost *= 2;
1769*0fca6ea1SDimitry Andric   return ConstantMatCost + LT.first * InstrCost;
1770bdd1243dSDimitry Andric }
1771bdd1243dSDimitry Andric 
177206c3fb27SDimitry Andric // TODO: Deduplicate from TargetTransformInfoImplCRTPBase.
getPointersChainCost(ArrayRef<const Value * > Ptrs,const Value * Base,const TTI::PointersChainInfo & Info,Type * AccessTy,TTI::TargetCostKind CostKind)177306c3fb27SDimitry Andric InstructionCost RISCVTTIImpl::getPointersChainCost(
177406c3fb27SDimitry Andric     ArrayRef<const Value *> Ptrs, const Value *Base,
177506c3fb27SDimitry Andric     const TTI::PointersChainInfo &Info, Type *AccessTy,
177606c3fb27SDimitry Andric     TTI::TargetCostKind CostKind) {
177706c3fb27SDimitry Andric   InstructionCost Cost = TTI::TCC_Free;
177806c3fb27SDimitry Andric   // In the basic model we take into account GEP instructions only
177906c3fb27SDimitry Andric   // (although here can come alloca instruction, a value, constants and/or
178006c3fb27SDimitry Andric   // constant expressions, PHIs, bitcasts ... whatever allowed to be used as a
178106c3fb27SDimitry Andric   // pointer). Typically, if Base is a not a GEP-instruction and all the
178206c3fb27SDimitry Andric   // pointers are relative to the same base address, all the rest are
178306c3fb27SDimitry Andric   // either GEP instructions, PHIs, bitcasts or constants. When we have same
178406c3fb27SDimitry Andric   // base, we just calculate cost of each non-Base GEP as an ADD operation if
178506c3fb27SDimitry Andric   // any their index is a non-const.
178606c3fb27SDimitry Andric   // If no known dependecies between the pointers cost is calculated as a sum
178706c3fb27SDimitry Andric   // of costs of GEP instructions.
178806c3fb27SDimitry Andric   for (auto [I, V] : enumerate(Ptrs)) {
178906c3fb27SDimitry Andric     const auto *GEP = dyn_cast<GetElementPtrInst>(V);
179006c3fb27SDimitry Andric     if (!GEP)
179106c3fb27SDimitry Andric       continue;
179206c3fb27SDimitry Andric     if (Info.isSameBase() && V != Base) {
179306c3fb27SDimitry Andric       if (GEP->hasAllConstantIndices())
179406c3fb27SDimitry Andric         continue;
179506c3fb27SDimitry Andric       // If the chain is unit-stride and BaseReg + stride*i is a legal
179606c3fb27SDimitry Andric       // addressing mode, then presume the base GEP is sitting around in a
179706c3fb27SDimitry Andric       // register somewhere and check if we can fold the offset relative to
179806c3fb27SDimitry Andric       // it.
179906c3fb27SDimitry Andric       unsigned Stride = DL.getTypeStoreSize(AccessTy);
180006c3fb27SDimitry Andric       if (Info.isUnitStride() &&
180106c3fb27SDimitry Andric           isLegalAddressingMode(AccessTy,
180206c3fb27SDimitry Andric                                 /* BaseGV */ nullptr,
180306c3fb27SDimitry Andric                                 /* BaseOffset */ Stride * I,
180406c3fb27SDimitry Andric                                 /* HasBaseReg */ true,
180506c3fb27SDimitry Andric                                 /* Scale */ 0,
180606c3fb27SDimitry Andric                                 GEP->getType()->getPointerAddressSpace()))
180706c3fb27SDimitry Andric         continue;
180806c3fb27SDimitry Andric       Cost += getArithmeticInstrCost(Instruction::Add, GEP->getType(), CostKind,
180906c3fb27SDimitry Andric                                      {TTI::OK_AnyValue, TTI::OP_None},
181006c3fb27SDimitry Andric                                      {TTI::OK_AnyValue, TTI::OP_None},
181106c3fb27SDimitry Andric                                      std::nullopt);
181206c3fb27SDimitry Andric     } else {
181306c3fb27SDimitry Andric       SmallVector<const Value *> Indices(GEP->indices());
181406c3fb27SDimitry Andric       Cost += getGEPCost(GEP->getSourceElementType(), GEP->getPointerOperand(),
181506c3fb27SDimitry Andric                          Indices, AccessTy, CostKind);
181606c3fb27SDimitry Andric     }
181706c3fb27SDimitry Andric   }
181806c3fb27SDimitry Andric   return Cost;
181906c3fb27SDimitry Andric }
182006c3fb27SDimitry Andric 
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,TTI::UnrollingPreferences & UP,OptimizationRemarkEmitter * ORE)18210eae32dcSDimitry Andric void RISCVTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
18220eae32dcSDimitry Andric                                            TTI::UnrollingPreferences &UP,
18230eae32dcSDimitry Andric                                            OptimizationRemarkEmitter *ORE) {
18240eae32dcSDimitry Andric   // TODO: More tuning on benchmarks and metrics with changes as needed
18250eae32dcSDimitry Andric   //       would apply to all settings below to enable performance.
18260eae32dcSDimitry Andric 
18270eae32dcSDimitry Andric 
182881ad6265SDimitry Andric   if (ST->enableDefaultUnroll())
18290eae32dcSDimitry Andric     return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP, ORE);
18300eae32dcSDimitry Andric 
18310eae32dcSDimitry Andric   // Enable Upper bound unrolling universally, not dependant upon the conditions
18320eae32dcSDimitry Andric   // below.
18330eae32dcSDimitry Andric   UP.UpperBound = true;
18340eae32dcSDimitry Andric 
18350eae32dcSDimitry Andric   // Disable loop unrolling for Oz and Os.
18360eae32dcSDimitry Andric   UP.OptSizeThreshold = 0;
18370eae32dcSDimitry Andric   UP.PartialOptSizeThreshold = 0;
18380eae32dcSDimitry Andric   if (L->getHeader()->getParent()->hasOptSize())
18390eae32dcSDimitry Andric     return;
18400eae32dcSDimitry Andric 
18410eae32dcSDimitry Andric   SmallVector<BasicBlock *, 4> ExitingBlocks;
18420eae32dcSDimitry Andric   L->getExitingBlocks(ExitingBlocks);
18430eae32dcSDimitry Andric   LLVM_DEBUG(dbgs() << "Loop has:\n"
18440eae32dcSDimitry Andric                     << "Blocks: " << L->getNumBlocks() << "\n"
18450eae32dcSDimitry Andric                     << "Exit blocks: " << ExitingBlocks.size() << "\n");
18460eae32dcSDimitry Andric 
18470eae32dcSDimitry Andric   // Only allow another exit other than the latch. This acts as an early exit
18480eae32dcSDimitry Andric   // as it mirrors the profitability calculation of the runtime unroller.
18490eae32dcSDimitry Andric   if (ExitingBlocks.size() > 2)
18500eae32dcSDimitry Andric     return;
18510eae32dcSDimitry Andric 
18520eae32dcSDimitry Andric   // Limit the CFG of the loop body for targets with a branch predictor.
18530eae32dcSDimitry Andric   // Allowing 4 blocks permits if-then-else diamonds in the body.
18540eae32dcSDimitry Andric   if (L->getNumBlocks() > 4)
18550eae32dcSDimitry Andric     return;
18560eae32dcSDimitry Andric 
18570eae32dcSDimitry Andric   // Don't unroll vectorized loops, including the remainder loop
18580eae32dcSDimitry Andric   if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
18590eae32dcSDimitry Andric     return;
18600eae32dcSDimitry Andric 
18610eae32dcSDimitry Andric   // Scan the loop: don't unroll loops with calls as this could prevent
18620eae32dcSDimitry Andric   // inlining.
18630eae32dcSDimitry Andric   InstructionCost Cost = 0;
18640eae32dcSDimitry Andric   for (auto *BB : L->getBlocks()) {
18650eae32dcSDimitry Andric     for (auto &I : *BB) {
18660eae32dcSDimitry Andric       // Initial setting - Don't unroll loops containing vectorized
18670eae32dcSDimitry Andric       // instructions.
18680eae32dcSDimitry Andric       if (I.getType()->isVectorTy())
18690eae32dcSDimitry Andric         return;
18700eae32dcSDimitry Andric 
18710eae32dcSDimitry Andric       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
18720eae32dcSDimitry Andric         if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
18730eae32dcSDimitry Andric           if (!isLoweredToCall(F))
18740eae32dcSDimitry Andric             continue;
18750eae32dcSDimitry Andric         }
18760eae32dcSDimitry Andric         return;
18770eae32dcSDimitry Andric       }
18780eae32dcSDimitry Andric 
18790eae32dcSDimitry Andric       SmallVector<const Value *> Operands(I.operand_values());
1880bdd1243dSDimitry Andric       Cost += getInstructionCost(&I, Operands,
1881bdd1243dSDimitry Andric                                  TargetTransformInfo::TCK_SizeAndLatency);
18820eae32dcSDimitry Andric     }
18830eae32dcSDimitry Andric   }
18840eae32dcSDimitry Andric 
18850eae32dcSDimitry Andric   LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
18860eae32dcSDimitry Andric 
18870eae32dcSDimitry Andric   UP.Partial = true;
18880eae32dcSDimitry Andric   UP.Runtime = true;
18890eae32dcSDimitry Andric   UP.UnrollRemainder = true;
18900eae32dcSDimitry Andric   UP.UnrollAndJam = true;
18910eae32dcSDimitry Andric   UP.UnrollAndJamInnerLoopThreshold = 60;
18920eae32dcSDimitry Andric 
18930eae32dcSDimitry Andric   // Force unrolling small loops can be very useful because of the branch
18940eae32dcSDimitry Andric   // taken cost of the backedge.
18950eae32dcSDimitry Andric   if (Cost < 12)
18960eae32dcSDimitry Andric     UP.Force = true;
18970eae32dcSDimitry Andric }
18980eae32dcSDimitry Andric 
getPeelingPreferences(Loop * L,ScalarEvolution & SE,TTI::PeelingPreferences & PP)18990eae32dcSDimitry Andric void RISCVTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
19000eae32dcSDimitry Andric                                          TTI::PeelingPreferences &PP) {
19010eae32dcSDimitry Andric   BaseT::getPeelingPreferences(L, SE, PP);
19020eae32dcSDimitry Andric }
190304eeddc0SDimitry Andric 
getRegUsageForType(Type * Ty)190481ad6265SDimitry Andric unsigned RISCVTTIImpl::getRegUsageForType(Type *Ty) {
1905bdd1243dSDimitry Andric   TypeSize Size = DL.getTypeSizeInBits(Ty);
190604eeddc0SDimitry Andric   if (Ty->isVectorTy()) {
190704eeddc0SDimitry Andric     if (Size.isScalable() && ST->hasVInstructions())
190804eeddc0SDimitry Andric       return divideCeil(Size.getKnownMinValue(), RISCV::RVVBitsPerBlock);
190904eeddc0SDimitry Andric 
191004eeddc0SDimitry Andric     if (ST->useRVVForFixedLengthVectors())
191181ad6265SDimitry Andric       return divideCeil(Size, ST->getRealMinVLen());
191204eeddc0SDimitry Andric   }
191304eeddc0SDimitry Andric 
191404eeddc0SDimitry Andric   return BaseT::getRegUsageForType(Ty);
191504eeddc0SDimitry Andric }
1916bdd1243dSDimitry Andric 
getMaximumVF(unsigned ElemWidth,unsigned Opcode) const1917bdd1243dSDimitry Andric unsigned RISCVTTIImpl::getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
191806c3fb27SDimitry Andric   if (SLPMaxVF.getNumOccurrences())
1919bdd1243dSDimitry Andric     return SLPMaxVF;
192006c3fb27SDimitry Andric 
192106c3fb27SDimitry Andric   // Return how many elements can fit in getRegisterBitwidth.  This is the
192206c3fb27SDimitry Andric   // same routine as used in LoopVectorizer.  We should probably be
192306c3fb27SDimitry Andric   // accounting for whether we actually have instructions with the right
192406c3fb27SDimitry Andric   // lane type, but we don't have enough information to do that without
192506c3fb27SDimitry Andric   // some additional plumbing which hasn't been justified yet.
192606c3fb27SDimitry Andric   TypeSize RegWidth =
192706c3fb27SDimitry Andric     getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector);
192806c3fb27SDimitry Andric   // If no vector registers, or absurd element widths, disable
192906c3fb27SDimitry Andric   // vectorization by returning 1.
193006c3fb27SDimitry Andric   return std::max<unsigned>(1U, RegWidth.getFixedValue() / ElemWidth);
1931bdd1243dSDimitry Andric }
1932bdd1243dSDimitry Andric 
isLSRCostLess(const TargetTransformInfo::LSRCost & C1,const TargetTransformInfo::LSRCost & C2)1933bdd1243dSDimitry Andric bool RISCVTTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
1934bdd1243dSDimitry Andric                                  const TargetTransformInfo::LSRCost &C2) {
193506c3fb27SDimitry Andric   // RISC-V specific here are "instruction number 1st priority".
1936*0fca6ea1SDimitry Andric   // If we need to emit adds inside the loop to add up base registers, then
1937*0fca6ea1SDimitry Andric   // we need at least one extra temporary register.
1938*0fca6ea1SDimitry Andric   unsigned C1NumRegs = C1.NumRegs + (C1.NumBaseAdds != 0);
1939*0fca6ea1SDimitry Andric   unsigned C2NumRegs = C2.NumRegs + (C2.NumBaseAdds != 0);
1940*0fca6ea1SDimitry Andric   return std::tie(C1.Insns, C1NumRegs, C1.AddRecCost,
1941bdd1243dSDimitry Andric                   C1.NumIVMuls, C1.NumBaseAdds,
1942bdd1243dSDimitry Andric                   C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
1943*0fca6ea1SDimitry Andric          std::tie(C2.Insns, C2NumRegs, C2.AddRecCost,
1944bdd1243dSDimitry Andric                   C2.NumIVMuls, C2.NumBaseAdds,
1945bdd1243dSDimitry Andric                   C2.ScaleCost, C2.ImmCost, C2.SetupCost);
1946bdd1243dSDimitry Andric }
1947*0fca6ea1SDimitry Andric 
isLegalMaskedCompressStore(Type * DataTy,Align Alignment)1948*0fca6ea1SDimitry Andric bool RISCVTTIImpl::isLegalMaskedCompressStore(Type *DataTy, Align Alignment) {
1949*0fca6ea1SDimitry Andric   auto *VTy = dyn_cast<VectorType>(DataTy);
1950*0fca6ea1SDimitry Andric   if (!VTy || VTy->isScalableTy())
1951*0fca6ea1SDimitry Andric     return false;
1952*0fca6ea1SDimitry Andric 
1953*0fca6ea1SDimitry Andric   if (!isLegalMaskedLoadStore(DataTy, Alignment))
1954*0fca6ea1SDimitry Andric     return false;
1955*0fca6ea1SDimitry Andric   return true;
1956*0fca6ea1SDimitry Andric }
1957*0fca6ea1SDimitry Andric 
areInlineCompatible(const Function * Caller,const Function * Callee) const1958*0fca6ea1SDimitry Andric bool RISCVTTIImpl::areInlineCompatible(const Function *Caller,
1959*0fca6ea1SDimitry Andric                                        const Function *Callee) const {
1960*0fca6ea1SDimitry Andric   const TargetMachine &TM = getTLI()->getTargetMachine();
1961*0fca6ea1SDimitry Andric 
1962*0fca6ea1SDimitry Andric   const FeatureBitset &CallerBits =
1963*0fca6ea1SDimitry Andric       TM.getSubtargetImpl(*Caller)->getFeatureBits();
1964*0fca6ea1SDimitry Andric   const FeatureBitset &CalleeBits =
1965*0fca6ea1SDimitry Andric       TM.getSubtargetImpl(*Callee)->getFeatureBits();
1966*0fca6ea1SDimitry Andric 
1967*0fca6ea1SDimitry Andric   // Inline a callee if its target-features are a subset of the callers
1968*0fca6ea1SDimitry Andric   // target-features.
1969*0fca6ea1SDimitry Andric   return (CallerBits & CalleeBits) == CalleeBits;
1970*0fca6ea1SDimitry Andric }
1971