10b57cec5SDimitry Andric //===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric /// \file
80b57cec5SDimitry Andric /// This file implements a TargetTransformInfo analysis pass specific to the
90b57cec5SDimitry Andric /// Hexagon target machine. It uses the target's detailed information to provide
100b57cec5SDimitry Andric /// more precise answers to certain TTI queries, while letting the target
110b57cec5SDimitry Andric /// independent and default TTI implementations handle the rest.
120b57cec5SDimitry Andric ///
130b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
140b57cec5SDimitry Andric
150b57cec5SDimitry Andric #include "HexagonTargetTransformInfo.h"
160b57cec5SDimitry Andric #include "HexagonSubtarget.h"
170b57cec5SDimitry Andric #include "llvm/Analysis/TargetTransformInfo.h"
180b57cec5SDimitry Andric #include "llvm/CodeGen/ValueTypes.h"
190b57cec5SDimitry Andric #include "llvm/IR/InstrTypes.h"
200b57cec5SDimitry Andric #include "llvm/IR/Instructions.h"
210b57cec5SDimitry Andric #include "llvm/IR/User.h"
220b57cec5SDimitry Andric #include "llvm/Support/Casting.h"
230b57cec5SDimitry Andric #include "llvm/Support/CommandLine.h"
24e8d8bef9SDimitry Andric #include "llvm/Transforms/Utils/LoopPeel.h"
250b57cec5SDimitry Andric #include "llvm/Transforms/Utils/UnrollLoop.h"
260b57cec5SDimitry Andric
270b57cec5SDimitry Andric using namespace llvm;
280b57cec5SDimitry Andric
290b57cec5SDimitry Andric #define DEBUG_TYPE "hexagontti"
300b57cec5SDimitry Andric
310b57cec5SDimitry Andric static cl::opt<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
320b57cec5SDimitry Andric cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));
330b57cec5SDimitry Andric
34bdd1243dSDimitry Andric static cl::opt<bool> EnableV68FloatAutoHVX(
35bdd1243dSDimitry Andric "force-hvx-float", cl::Hidden,
36bdd1243dSDimitry Andric cl::desc("Enable auto-vectorization of floatint point types on v68."));
37bdd1243dSDimitry Andric
380b57cec5SDimitry Andric static cl::opt<bool> EmitLookupTables("hexagon-emit-lookup-tables",
390b57cec5SDimitry Andric cl::init(true), cl::Hidden,
400b57cec5SDimitry Andric cl::desc("Control lookup table emission on Hexagon target"));
410b57cec5SDimitry Andric
42e8d8bef9SDimitry Andric static cl::opt<bool> HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true),
43e8d8bef9SDimitry Andric cl::Hidden, cl::desc("Enable masked loads/stores for HVX"));
44e8d8bef9SDimitry Andric
450b57cec5SDimitry Andric // Constant "cost factor" to make floating point operations more expensive
460b57cec5SDimitry Andric // in terms of vectorization cost. This isn't the best way, but it should
470b57cec5SDimitry Andric // do. Ultimately, the cost should use cycles.
480b57cec5SDimitry Andric static const unsigned FloatFactor = 4;
490b57cec5SDimitry Andric
useHVX() const500b57cec5SDimitry Andric bool HexagonTTIImpl::useHVX() const {
510b57cec5SDimitry Andric return ST.useHVXOps() && HexagonAutoHVX;
520b57cec5SDimitry Andric }
530b57cec5SDimitry Andric
isHVXVectorType(Type * Ty) const54bdd1243dSDimitry Andric bool HexagonTTIImpl::isHVXVectorType(Type *Ty) const {
55bdd1243dSDimitry Andric auto *VecTy = dyn_cast<VectorType>(Ty);
56bdd1243dSDimitry Andric if (!VecTy)
57bdd1243dSDimitry Andric return false;
58bdd1243dSDimitry Andric if (!ST.isTypeForHVX(VecTy))
59bdd1243dSDimitry Andric return false;
60bdd1243dSDimitry Andric if (ST.useHVXV69Ops() || !VecTy->getElementType()->isFloatingPointTy())
61bdd1243dSDimitry Andric return true;
62bdd1243dSDimitry Andric return ST.useHVXV68Ops() && EnableV68FloatAutoHVX;
63bdd1243dSDimitry Andric }
64bdd1243dSDimitry Andric
getTypeNumElements(Type * Ty) const650b57cec5SDimitry Andric unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const {
665ffd83dbSDimitry Andric if (auto *VTy = dyn_cast<FixedVectorType>(Ty))
675ffd83dbSDimitry Andric return VTy->getNumElements();
680b57cec5SDimitry Andric assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
690b57cec5SDimitry Andric "Expecting scalar type");
700b57cec5SDimitry Andric return 1;
710b57cec5SDimitry Andric }
720b57cec5SDimitry Andric
730b57cec5SDimitry Andric TargetTransformInfo::PopcntSupportKind
getPopcntSupport(unsigned IntTyWidthInBit) const740b57cec5SDimitry Andric HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
750b57cec5SDimitry Andric // Return fast hardware support as every input < 64 bits will be promoted
760b57cec5SDimitry Andric // to 64 bits.
770b57cec5SDimitry Andric return TargetTransformInfo::PSK_FastHardware;
780b57cec5SDimitry Andric }
790b57cec5SDimitry Andric
800b57cec5SDimitry Andric // The Hexagon target can unroll loops with run-time trip counts.
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,TTI::UnrollingPreferences & UP,OptimizationRemarkEmitter * ORE)810b57cec5SDimitry Andric void HexagonTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
82349cc55cSDimitry Andric TTI::UnrollingPreferences &UP,
83349cc55cSDimitry Andric OptimizationRemarkEmitter *ORE) {
840b57cec5SDimitry Andric UP.Runtime = UP.Partial = true;
855ffd83dbSDimitry Andric }
865ffd83dbSDimitry Andric
getPeelingPreferences(Loop * L,ScalarEvolution & SE,TTI::PeelingPreferences & PP)875ffd83dbSDimitry Andric void HexagonTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
885ffd83dbSDimitry Andric TTI::PeelingPreferences &PP) {
895ffd83dbSDimitry Andric BaseT::getPeelingPreferences(L, SE, PP);
900b57cec5SDimitry Andric // Only try to peel innermost loops with small runtime trip counts.
91e8d8bef9SDimitry Andric if (L && L->isInnermost() && canPeel(L) &&
920b57cec5SDimitry Andric SE.getSmallConstantTripCount(L) == 0 &&
930b57cec5SDimitry Andric SE.getSmallConstantMaxTripCount(L) > 0 &&
940b57cec5SDimitry Andric SE.getSmallConstantMaxTripCount(L) <= 5) {
955ffd83dbSDimitry Andric PP.PeelCount = 2;
960b57cec5SDimitry Andric }
970b57cec5SDimitry Andric }
980b57cec5SDimitry Andric
99fe6060f1SDimitry Andric TTI::AddressingModeKind
getPreferredAddressingMode(const Loop * L,ScalarEvolution * SE) const100fe6060f1SDimitry Andric HexagonTTIImpl::getPreferredAddressingMode(const Loop *L,
101fe6060f1SDimitry Andric ScalarEvolution *SE) const {
102fe6060f1SDimitry Andric return TTI::AMK_PostIndexed;
1030b57cec5SDimitry Andric }
1040b57cec5SDimitry Andric
1050b57cec5SDimitry Andric /// --- Vector TTI begin ---
1060b57cec5SDimitry Andric
getNumberOfRegisters(bool Vector) const1070b57cec5SDimitry Andric unsigned HexagonTTIImpl::getNumberOfRegisters(bool Vector) const {
1080b57cec5SDimitry Andric if (Vector)
1090b57cec5SDimitry Andric return useHVX() ? 32 : 0;
1100b57cec5SDimitry Andric return 32;
1110b57cec5SDimitry Andric }
1120b57cec5SDimitry Andric
getMaxInterleaveFactor(ElementCount VF)11306c3fb27SDimitry Andric unsigned HexagonTTIImpl::getMaxInterleaveFactor(ElementCount VF) {
114e8d8bef9SDimitry Andric return useHVX() ? 2 : 1;
1150b57cec5SDimitry Andric }
1160b57cec5SDimitry Andric
117fe6060f1SDimitry Andric TypeSize
getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const118fe6060f1SDimitry Andric HexagonTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
119fe6060f1SDimitry Andric switch (K) {
120fe6060f1SDimitry Andric case TargetTransformInfo::RGK_Scalar:
121fe6060f1SDimitry Andric return TypeSize::getFixed(32);
122fe6060f1SDimitry Andric case TargetTransformInfo::RGK_FixedWidthVector:
123fe6060f1SDimitry Andric return TypeSize::getFixed(getMinVectorRegisterBitWidth());
124fe6060f1SDimitry Andric case TargetTransformInfo::RGK_ScalableVector:
125fe6060f1SDimitry Andric return TypeSize::getScalable(0);
126fe6060f1SDimitry Andric }
127fe6060f1SDimitry Andric
128fe6060f1SDimitry Andric llvm_unreachable("Unsupported register kind");
1290b57cec5SDimitry Andric }
1300b57cec5SDimitry Andric
getMinVectorRegisterBitWidth() const1310b57cec5SDimitry Andric unsigned HexagonTTIImpl::getMinVectorRegisterBitWidth() const {
132e8d8bef9SDimitry Andric return useHVX() ? ST.getVectorLength()*8 : 32;
1330b57cec5SDimitry Andric }
1340b57cec5SDimitry Andric
getMinimumVF(unsigned ElemWidth,bool IsScalable) const135fe6060f1SDimitry Andric ElementCount HexagonTTIImpl::getMinimumVF(unsigned ElemWidth,
136fe6060f1SDimitry Andric bool IsScalable) const {
137fe6060f1SDimitry Andric assert(!IsScalable && "Scalable VFs are not supported for Hexagon");
138fe6060f1SDimitry Andric return ElementCount::getFixed((8 * ST.getVectorLength()) / ElemWidth);
1390b57cec5SDimitry Andric }
1400b57cec5SDimitry Andric
getCallInstrCost(Function * F,Type * RetTy,ArrayRef<Type * > Tys,TTI::TargetCostKind CostKind)141fe6060f1SDimitry Andric InstructionCost HexagonTTIImpl::getCallInstrCost(Function *F, Type *RetTy,
142fe6060f1SDimitry Andric ArrayRef<Type *> Tys,
143fe6060f1SDimitry Andric TTI::TargetCostKind CostKind) {
1445ffd83dbSDimitry Andric return BaseT::getCallInstrCost(F, RetTy, Tys, CostKind);
1450b57cec5SDimitry Andric }
1460b57cec5SDimitry Andric
147fe6060f1SDimitry Andric InstructionCost
getIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind)1485ffd83dbSDimitry Andric HexagonTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1495ffd83dbSDimitry Andric TTI::TargetCostKind CostKind) {
1505ffd83dbSDimitry Andric if (ICA.getID() == Intrinsic::bswap) {
151fe6060f1SDimitry Andric std::pair<InstructionCost, MVT> LT =
152bdd1243dSDimitry Andric getTypeLegalizationCost(ICA.getReturnType());
1530b57cec5SDimitry Andric return LT.first + 2;
1540b57cec5SDimitry Andric }
1555ffd83dbSDimitry Andric return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1560b57cec5SDimitry Andric }
1570b57cec5SDimitry Andric
getAddressComputationCost(Type * Tp,ScalarEvolution * SE,const SCEV * S)158fe6060f1SDimitry Andric InstructionCost HexagonTTIImpl::getAddressComputationCost(Type *Tp,
159fe6060f1SDimitry Andric ScalarEvolution *SE,
160fe6060f1SDimitry Andric const SCEV *S) {
1610b57cec5SDimitry Andric return 0;
1620b57cec5SDimitry Andric }
1630b57cec5SDimitry Andric
getMemoryOpCost(unsigned Opcode,Type * Src,MaybeAlign Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,TTI::OperandValueInfo OpInfo,const Instruction * I)164fe6060f1SDimitry Andric InstructionCost HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
165480093f4SDimitry Andric MaybeAlign Alignment,
166480093f4SDimitry Andric unsigned AddressSpace,
1675ffd83dbSDimitry Andric TTI::TargetCostKind CostKind,
168bdd1243dSDimitry Andric TTI::OperandValueInfo OpInfo,
169480093f4SDimitry Andric const Instruction *I) {
1700b57cec5SDimitry Andric assert(Opcode == Instruction::Load || Opcode == Instruction::Store);
1715ffd83dbSDimitry Andric // TODO: Handle other cost kinds.
1725ffd83dbSDimitry Andric if (CostKind != TTI::TCK_RecipThroughput)
1735ffd83dbSDimitry Andric return 1;
1745ffd83dbSDimitry Andric
1750b57cec5SDimitry Andric if (Opcode == Instruction::Store)
1765ffd83dbSDimitry Andric return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
177bdd1243dSDimitry Andric CostKind, OpInfo, I);
1780b57cec5SDimitry Andric
1790b57cec5SDimitry Andric if (Src->isVectorTy()) {
1800b57cec5SDimitry Andric VectorType *VecTy = cast<VectorType>(Src);
181bdd1243dSDimitry Andric unsigned VecWidth = VecTy->getPrimitiveSizeInBits().getFixedValue();
182bdd1243dSDimitry Andric if (isHVXVectorType(VecTy)) {
183fe6060f1SDimitry Andric unsigned RegWidth =
184fe6060f1SDimitry Andric getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
185bdd1243dSDimitry Andric .getFixedValue();
1860b57cec5SDimitry Andric assert(RegWidth && "Non-zero vector register width expected");
1870b57cec5SDimitry Andric // Cost of HVX loads.
1880b57cec5SDimitry Andric if (VecWidth % RegWidth == 0)
1890b57cec5SDimitry Andric return VecWidth / RegWidth;
190480093f4SDimitry Andric // Cost of constructing HVX vector from scalar loads
191480093f4SDimitry Andric const Align RegAlign(RegWidth / 8);
192480093f4SDimitry Andric if (!Alignment || *Alignment > RegAlign)
193480093f4SDimitry Andric Alignment = RegAlign;
194480093f4SDimitry Andric assert(Alignment);
195480093f4SDimitry Andric unsigned AlignWidth = 8 * Alignment->value();
1960b57cec5SDimitry Andric unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
1970b57cec5SDimitry Andric return 3 * NumLoads;
1980b57cec5SDimitry Andric }
1990b57cec5SDimitry Andric
2000b57cec5SDimitry Andric // Non-HVX vectors.
2010b57cec5SDimitry Andric // Add extra cost for floating point types.
202480093f4SDimitry Andric unsigned Cost =
203480093f4SDimitry Andric VecTy->getElementType()->isFloatingPointTy() ? FloatFactor : 1;
204480093f4SDimitry Andric
2055ffd83dbSDimitry Andric // At this point unspecified alignment is considered as Align(1).
206480093f4SDimitry Andric const Align BoundAlignment = std::min(Alignment.valueOrOne(), Align(8));
207480093f4SDimitry Andric unsigned AlignWidth = 8 * BoundAlignment.value();
2080b57cec5SDimitry Andric unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
209480093f4SDimitry Andric if (Alignment == Align(4) || Alignment == Align(8))
2100b57cec5SDimitry Andric return Cost * NumLoads;
2110b57cec5SDimitry Andric // Loads of less than 32 bits will need extra inserts to compose a vector.
212480093f4SDimitry Andric assert(BoundAlignment <= Align(8));
213480093f4SDimitry Andric unsigned LogA = Log2(BoundAlignment);
2140b57cec5SDimitry Andric return (3 - LogA) * Cost * NumLoads;
2150b57cec5SDimitry Andric }
2160b57cec5SDimitry Andric
217bdd1243dSDimitry Andric return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind,
218bdd1243dSDimitry Andric OpInfo, I);
2190b57cec5SDimitry Andric }
2200b57cec5SDimitry Andric
221fe6060f1SDimitry Andric InstructionCost
getMaskedMemoryOpCost(unsigned Opcode,Type * Src,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind)222fe6060f1SDimitry Andric HexagonTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
223fe6060f1SDimitry Andric Align Alignment, unsigned AddressSpace,
2245ffd83dbSDimitry Andric TTI::TargetCostKind CostKind) {
2255ffd83dbSDimitry Andric return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
2265ffd83dbSDimitry Andric CostKind);
2270b57cec5SDimitry Andric }
2280b57cec5SDimitry Andric
getShuffleCost(TTI::ShuffleKind Kind,Type * Tp,ArrayRef<int> Mask,TTI::TargetCostKind CostKind,int Index,Type * SubTp,ArrayRef<const Value * > Args,const Instruction * CxtI)229fe6060f1SDimitry Andric InstructionCost HexagonTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
230bdd1243dSDimitry Andric ArrayRef<int> Mask,
231bdd1243dSDimitry Andric TTI::TargetCostKind CostKind,
232bdd1243dSDimitry Andric int Index, Type *SubTp,
233*0fca6ea1SDimitry Andric ArrayRef<const Value *> Args,
234*0fca6ea1SDimitry Andric const Instruction *CxtI) {
2350b57cec5SDimitry Andric return 1;
2360b57cec5SDimitry Andric }
2370b57cec5SDimitry Andric
getGatherScatterOpCost(unsigned Opcode,Type * DataTy,const Value * Ptr,bool VariableMask,Align Alignment,TTI::TargetCostKind CostKind,const Instruction * I)238fe6060f1SDimitry Andric InstructionCost HexagonTTIImpl::getGatherScatterOpCost(
2395ffd83dbSDimitry Andric unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
2405ffd83dbSDimitry Andric Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
2410b57cec5SDimitry Andric return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
2425ffd83dbSDimitry Andric Alignment, CostKind, I);
2430b57cec5SDimitry Andric }
2440b57cec5SDimitry Andric
getInterleavedMemoryOpCost(unsigned Opcode,Type * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,bool UseMaskForCond,bool UseMaskForGaps)245fe6060f1SDimitry Andric InstructionCost HexagonTTIImpl::getInterleavedMemoryOpCost(
2465ffd83dbSDimitry Andric unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
2475ffd83dbSDimitry Andric Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
2485ffd83dbSDimitry Andric bool UseMaskForCond, bool UseMaskForGaps) {
2490b57cec5SDimitry Andric if (Indices.size() != Factor || UseMaskForCond || UseMaskForGaps)
2500b57cec5SDimitry Andric return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
2510b57cec5SDimitry Andric Alignment, AddressSpace,
2525ffd83dbSDimitry Andric CostKind,
2530b57cec5SDimitry Andric UseMaskForCond, UseMaskForGaps);
254480093f4SDimitry Andric return getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), AddressSpace,
2555ffd83dbSDimitry Andric CostKind);
2560b57cec5SDimitry Andric }
2570b57cec5SDimitry Andric
getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy,CmpInst::Predicate VecPred,TTI::TargetCostKind CostKind,const Instruction * I)258fe6060f1SDimitry Andric InstructionCost HexagonTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
259e8d8bef9SDimitry Andric Type *CondTy,
260e8d8bef9SDimitry Andric CmpInst::Predicate VecPred,
261e8d8bef9SDimitry Andric TTI::TargetCostKind CostKind,
262e8d8bef9SDimitry Andric const Instruction *I) {
2635ffd83dbSDimitry Andric if (ValTy->isVectorTy() && CostKind == TTI::TCK_RecipThroughput) {
264bdd1243dSDimitry Andric if (!isHVXVectorType(ValTy) && ValTy->isFPOrFPVectorTy())
265bdd1243dSDimitry Andric return InstructionCost::getMax();
266bdd1243dSDimitry Andric std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
2670b57cec5SDimitry Andric if (Opcode == Instruction::FCmp)
2680b57cec5SDimitry Andric return LT.first + FloatFactor * getTypeNumElements(ValTy);
2690b57cec5SDimitry Andric }
270e8d8bef9SDimitry Andric return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
2710b57cec5SDimitry Andric }
2720b57cec5SDimitry Andric
getArithmeticInstrCost(unsigned Opcode,Type * Ty,TTI::TargetCostKind CostKind,TTI::OperandValueInfo Op1Info,TTI::OperandValueInfo Op2Info,ArrayRef<const Value * > Args,const Instruction * CxtI)273fe6060f1SDimitry Andric InstructionCost HexagonTTIImpl::getArithmeticInstrCost(
2745ffd83dbSDimitry Andric unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
275bdd1243dSDimitry Andric TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info,
276bdd1243dSDimitry Andric ArrayRef<const Value *> Args,
277480093f4SDimitry Andric const Instruction *CxtI) {
2785ffd83dbSDimitry Andric // TODO: Handle more cost kinds.
2795ffd83dbSDimitry Andric if (CostKind != TTI::TCK_RecipThroughput)
280bdd1243dSDimitry Andric return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
281bdd1243dSDimitry Andric Op2Info, Args, CxtI);
2825ffd83dbSDimitry Andric
2830b57cec5SDimitry Andric if (Ty->isVectorTy()) {
284bdd1243dSDimitry Andric if (!isHVXVectorType(Ty) && Ty->isFPOrFPVectorTy())
285bdd1243dSDimitry Andric return InstructionCost::getMax();
286bdd1243dSDimitry Andric std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
2870b57cec5SDimitry Andric if (LT.second.isFloatingPoint())
2880b57cec5SDimitry Andric return LT.first + FloatFactor * getTypeNumElements(Ty);
2890b57cec5SDimitry Andric }
290bdd1243dSDimitry Andric return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
291bdd1243dSDimitry Andric Args, CxtI);
2920b57cec5SDimitry Andric }
2930b57cec5SDimitry Andric
getCastInstrCost(unsigned Opcode,Type * DstTy,Type * SrcTy,TTI::CastContextHint CCH,TTI::TargetCostKind CostKind,const Instruction * I)294fe6060f1SDimitry Andric InstructionCost HexagonTTIImpl::getCastInstrCost(unsigned Opcode, Type *DstTy,
295fe6060f1SDimitry Andric Type *SrcTy,
296fe6060f1SDimitry Andric TTI::CastContextHint CCH,
297e8d8bef9SDimitry Andric TTI::TargetCostKind CostKind,
298e8d8bef9SDimitry Andric const Instruction *I) {
299bdd1243dSDimitry Andric auto isNonHVXFP = [this] (Type *Ty) {
300bdd1243dSDimitry Andric return Ty->isVectorTy() && !isHVXVectorType(Ty) && Ty->isFPOrFPVectorTy();
301bdd1243dSDimitry Andric };
302bdd1243dSDimitry Andric if (isNonHVXFP(SrcTy) || isNonHVXFP(DstTy))
303bdd1243dSDimitry Andric return InstructionCost::getMax();
304bdd1243dSDimitry Andric
3050b57cec5SDimitry Andric if (SrcTy->isFPOrFPVectorTy() || DstTy->isFPOrFPVectorTy()) {
3060b57cec5SDimitry Andric unsigned SrcN = SrcTy->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy) : 0;
3070b57cec5SDimitry Andric unsigned DstN = DstTy->isFPOrFPVectorTy() ? getTypeNumElements(DstTy) : 0;
3080b57cec5SDimitry Andric
309bdd1243dSDimitry Andric std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(SrcTy);
310bdd1243dSDimitry Andric std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(DstTy);
311fe6060f1SDimitry Andric InstructionCost Cost =
312fe6060f1SDimitry Andric std::max(SrcLT.first, DstLT.first) + FloatFactor * (SrcN + DstN);
3135ffd83dbSDimitry Andric // TODO: Allow non-throughput costs that aren't binary.
3145ffd83dbSDimitry Andric if (CostKind != TTI::TCK_RecipThroughput)
3155ffd83dbSDimitry Andric return Cost == 0 ? 0 : 1;
3165ffd83dbSDimitry Andric return Cost;
3170b57cec5SDimitry Andric }
3180b57cec5SDimitry Andric return 1;
3190b57cec5SDimitry Andric }
3200b57cec5SDimitry Andric
getVectorInstrCost(unsigned Opcode,Type * Val,TTI::TargetCostKind CostKind,unsigned Index,Value * Op0,Value * Op1)321fe6060f1SDimitry Andric InstructionCost HexagonTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
322bdd1243dSDimitry Andric TTI::TargetCostKind CostKind,
323bdd1243dSDimitry Andric unsigned Index, Value *Op0,
324bdd1243dSDimitry Andric Value *Op1) {
3250b57cec5SDimitry Andric Type *ElemTy = Val->isVectorTy() ? cast<VectorType>(Val)->getElementType()
3260b57cec5SDimitry Andric : Val;
3270b57cec5SDimitry Andric if (Opcode == Instruction::InsertElement) {
3280b57cec5SDimitry Andric // Need two rotations for non-zero index.
3290b57cec5SDimitry Andric unsigned Cost = (Index != 0) ? 2 : 0;
3300b57cec5SDimitry Andric if (ElemTy->isIntegerTy(32))
3310b57cec5SDimitry Andric return Cost;
3320b57cec5SDimitry Andric // If it's not a 32-bit value, there will need to be an extract.
333bdd1243dSDimitry Andric return Cost + getVectorInstrCost(Instruction::ExtractElement, Val, CostKind,
334bdd1243dSDimitry Andric Index, Op0, Op1);
3350b57cec5SDimitry Andric }
3360b57cec5SDimitry Andric
3370b57cec5SDimitry Andric if (Opcode == Instruction::ExtractElement)
3380b57cec5SDimitry Andric return 2;
3390b57cec5SDimitry Andric
3400b57cec5SDimitry Andric return 1;
3410b57cec5SDimitry Andric }
3420b57cec5SDimitry Andric
isLegalMaskedStore(Type * DataType,Align)343e8d8bef9SDimitry Andric bool HexagonTTIImpl::isLegalMaskedStore(Type *DataType, Align /*Alignment*/) {
344bdd1243dSDimitry Andric // This function is called from scalarize-masked-mem-intrin, which runs
345bdd1243dSDimitry Andric // in pre-isel. Use ST directly instead of calling isHVXVectorType.
346e8d8bef9SDimitry Andric return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
347e8d8bef9SDimitry Andric }
348e8d8bef9SDimitry Andric
isLegalMaskedLoad(Type * DataType,Align)349e8d8bef9SDimitry Andric bool HexagonTTIImpl::isLegalMaskedLoad(Type *DataType, Align /*Alignment*/) {
350bdd1243dSDimitry Andric // This function is called from scalarize-masked-mem-intrin, which runs
351bdd1243dSDimitry Andric // in pre-isel. Use ST directly instead of calling isHVXVectorType.
352e8d8bef9SDimitry Andric return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
353e8d8bef9SDimitry Andric }
354e8d8bef9SDimitry Andric
3550b57cec5SDimitry Andric /// --- Vector TTI end ---
3560b57cec5SDimitry Andric
getPrefetchDistance() const3570b57cec5SDimitry Andric unsigned HexagonTTIImpl::getPrefetchDistance() const {
3580b57cec5SDimitry Andric return ST.getL1PrefetchDistance();
3590b57cec5SDimitry Andric }
3600b57cec5SDimitry Andric
getCacheLineSize() const3610b57cec5SDimitry Andric unsigned HexagonTTIImpl::getCacheLineSize() const {
3620b57cec5SDimitry Andric return ST.getL1CacheLineSize();
3630b57cec5SDimitry Andric }
3640b57cec5SDimitry Andric
365bdd1243dSDimitry Andric InstructionCost
getInstructionCost(const User * U,ArrayRef<const Value * > Operands,TTI::TargetCostKind CostKind)366bdd1243dSDimitry Andric HexagonTTIImpl::getInstructionCost(const User *U,
3675ffd83dbSDimitry Andric ArrayRef<const Value *> Operands,
3685ffd83dbSDimitry Andric TTI::TargetCostKind CostKind) {
3690b57cec5SDimitry Andric auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
3700b57cec5SDimitry Andric if (!CI->isIntegerCast())
3710b57cec5SDimitry Andric return false;
3720b57cec5SDimitry Andric // Only extensions from an integer type shorter than 32-bit to i32
3730b57cec5SDimitry Andric // can be folded into the load.
3740b57cec5SDimitry Andric const DataLayout &DL = getDataLayout();
3750b57cec5SDimitry Andric unsigned SBW = DL.getTypeSizeInBits(CI->getSrcTy());
3760b57cec5SDimitry Andric unsigned DBW = DL.getTypeSizeInBits(CI->getDestTy());
3770b57cec5SDimitry Andric if (DBW != 32 || SBW >= DBW)
3780b57cec5SDimitry Andric return false;
3790b57cec5SDimitry Andric
3800b57cec5SDimitry Andric const LoadInst *LI = dyn_cast<const LoadInst>(CI->getOperand(0));
3810b57cec5SDimitry Andric // Technically, this code could allow multiple uses of the load, and
3820b57cec5SDimitry Andric // check if all the uses are the same extension operation, but this
3830b57cec5SDimitry Andric // should be sufficient for most cases.
3840b57cec5SDimitry Andric return LI && LI->hasOneUse();
3850b57cec5SDimitry Andric };
3860b57cec5SDimitry Andric
3870b57cec5SDimitry Andric if (const CastInst *CI = dyn_cast<const CastInst>(U))
3880b57cec5SDimitry Andric if (isCastFoldedIntoLoad(CI))
3890b57cec5SDimitry Andric return TargetTransformInfo::TCC_Free;
390bdd1243dSDimitry Andric return BaseT::getInstructionCost(U, Operands, CostKind);
3910b57cec5SDimitry Andric }
3920b57cec5SDimitry Andric
shouldBuildLookupTables() const3930b57cec5SDimitry Andric bool HexagonTTIImpl::shouldBuildLookupTables() const {
3940b57cec5SDimitry Andric return EmitLookupTables;
3950b57cec5SDimitry Andric }
396