1 //===- ARMTargetTransformInfo.h - ARM specific TTI --------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file a TargetTransformInfo::Concept conforming object specific to the 11 /// ARM target machine. It uses the target's detailed information to 12 /// provide more precise answers to certain TTI queries, while letting the 13 /// target independent and default TTI implementations handle the rest. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #ifndef LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H 18 #define LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H 19 20 #include "ARM.h" 21 #include "ARMSubtarget.h" 22 #include "ARMTargetMachine.h" 23 #include "llvm/ADT/ArrayRef.h" 24 #include "llvm/Analysis/TargetTransformInfo.h" 25 #include "llvm/CodeGen/BasicTTIImpl.h" 26 #include "llvm/IR/Constant.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/TargetParser/SubtargetFeature.h" 29 #include <optional> 30 31 namespace llvm { 32 33 class APInt; 34 class ARMTargetLowering; 35 class Instruction; 36 class Loop; 37 class SCEV; 38 class ScalarEvolution; 39 class Type; 40 class Value; 41 42 namespace TailPredication { 43 enum Mode { 44 Disabled = 0, 45 EnabledNoReductions, 46 Enabled, 47 ForceEnabledNoReductions, 48 ForceEnabled 49 }; 50 } 51 52 // For controlling conversion of memcpy into Tail Predicated loop. 53 namespace TPLoop { 54 enum MemTransfer { ForceDisabled = 0, ForceEnabled, Allow }; 55 } 56 57 class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> { 58 using BaseT = BasicTTIImplBase<ARMTTIImpl>; 59 using TTI = TargetTransformInfo; 60 61 friend BaseT; 62 63 const ARMSubtarget *ST; 64 const ARMTargetLowering *TLI; 65 66 // Currently the following features are excluded from InlineFeaturesAllowed. 67 // ModeThumb, FeatureNoARM, ModeSoftFloat, FeatureFP64, FeatureD32 68 // Depending on whether they are set or unset, different 69 // instructions/registers are available. For example, inlining a callee with 70 // -thumb-mode in a caller with +thumb-mode, may cause the assembler to 71 // fail if the callee uses ARM only instructions, e.g. in inline asm. 72 const FeatureBitset InlineFeaturesAllowed = { 73 ARM::FeatureVFP2, ARM::FeatureVFP3, ARM::FeatureNEON, ARM::FeatureThumb2, 74 ARM::FeatureFP16, ARM::FeatureVFP4, ARM::FeatureFPARMv8, 75 ARM::FeatureFullFP16, ARM::FeatureFP16FML, ARM::FeatureHWDivThumb, 76 ARM::FeatureHWDivARM, ARM::FeatureDB, ARM::FeatureV7Clrex, 77 ARM::FeatureAcquireRelease, ARM::FeatureSlowFPBrcc, 78 ARM::FeaturePerfMon, ARM::FeatureTrustZone, ARM::Feature8MSecExt, 79 ARM::FeatureCrypto, ARM::FeatureCRC, ARM::FeatureRAS, 80 ARM::FeatureFPAO, ARM::FeatureFuseAES, ARM::FeatureZCZeroing, 81 ARM::FeatureProfUnpredicate, ARM::FeatureSlowVGETLNi32, 82 ARM::FeatureSlowVDUP32, ARM::FeaturePreferVMOVSR, 83 ARM::FeaturePrefISHSTBarrier, ARM::FeatureMuxedUnits, 84 ARM::FeatureSlowOddRegister, ARM::FeatureSlowLoadDSubreg, 85 ARM::FeatureDontWidenVMOVS, ARM::FeatureExpandMLx, 86 ARM::FeatureHasVMLxHazards, ARM::FeatureNEONForFPMovs, 87 ARM::FeatureNEONForFP, ARM::FeatureCheckVLDnAlign, 88 ARM::FeatureHasSlowFPVMLx, ARM::FeatureHasSlowFPVFMx, 89 ARM::FeatureVMLxForwarding, ARM::FeaturePref32BitThumb, 90 ARM::FeatureAvoidPartialCPSR, ARM::FeatureCheapPredicableCPSR, 91 ARM::FeatureAvoidMOVsShOp, ARM::FeatureHasRetAddrStack, 92 ARM::FeatureHasNoBranchPredictor, ARM::FeatureDSP, ARM::FeatureMP, 93 ARM::FeatureVirtualization, ARM::FeatureMClass, ARM::FeatureRClass, 94 ARM::FeatureAClass, ARM::FeatureNaClTrap, ARM::FeatureStrictAlign, 95 ARM::FeatureLongCalls, ARM::FeatureExecuteOnly, ARM::FeatureReserveR9, 96 ARM::FeatureNoMovt, ARM::FeatureNoNegativeImmediates 97 }; 98 99 const ARMSubtarget *getST() const { return ST; } 100 const ARMTargetLowering *getTLI() const { return TLI; } 101 102 public: 103 explicit ARMTTIImpl(const ARMBaseTargetMachine *TM, const Function &F) 104 : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)), 105 TLI(ST->getTargetLowering()) {} 106 107 bool areInlineCompatible(const Function *Caller, 108 const Function *Callee) const; 109 110 bool enableInterleavedAccessVectorization() { return true; } 111 112 TTI::AddressingModeKind 113 getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const; 114 115 /// Floating-point computation using ARMv8 AArch32 Advanced 116 /// SIMD instructions remains unchanged from ARMv7. Only AArch64 SIMD 117 /// and Arm MVE are IEEE-754 compliant. 118 bool isFPVectorizationPotentiallyUnsafe() { 119 return !ST->isTargetDarwin() && !ST->hasMVEFloatOps(); 120 } 121 122 std::optional<Instruction *> instCombineIntrinsic(InstCombiner &IC, 123 IntrinsicInst &II) const; 124 std::optional<Value *> simplifyDemandedVectorEltsIntrinsic( 125 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, 126 APInt &UndefElts2, APInt &UndefElts3, 127 std::function<void(Instruction *, unsigned, APInt, APInt &)> 128 SimplifyAndSetOp) const; 129 130 /// \name Scalar TTI Implementations 131 /// @{ 132 133 InstructionCost getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, 134 const APInt &Imm, Type *Ty); 135 136 using BaseT::getIntImmCost; 137 InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, 138 TTI::TargetCostKind CostKind); 139 140 InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx, 141 const APInt &Imm, Type *Ty, 142 TTI::TargetCostKind CostKind, 143 Instruction *Inst = nullptr); 144 145 /// @} 146 147 /// \name Vector TTI Implementations 148 /// @{ 149 150 unsigned getNumberOfRegisters(unsigned ClassID) const { 151 bool Vector = (ClassID == 1); 152 if (Vector) { 153 if (ST->hasNEON()) 154 return 16; 155 if (ST->hasMVEIntegerOps()) 156 return 8; 157 return 0; 158 } 159 160 if (ST->isThumb1Only()) 161 return 8; 162 return 13; 163 } 164 165 TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const { 166 switch (K) { 167 case TargetTransformInfo::RGK_Scalar: 168 return TypeSize::getFixed(32); 169 case TargetTransformInfo::RGK_FixedWidthVector: 170 if (ST->hasNEON()) 171 return TypeSize::getFixed(128); 172 if (ST->hasMVEIntegerOps()) 173 return TypeSize::getFixed(128); 174 return TypeSize::getFixed(0); 175 case TargetTransformInfo::RGK_ScalableVector: 176 return TypeSize::getScalable(0); 177 } 178 llvm_unreachable("Unsupported register kind"); 179 } 180 181 unsigned getMaxInterleaveFactor(ElementCount VF) { 182 return ST->getMaxInterleaveFactor(); 183 } 184 185 bool isProfitableLSRChainElement(Instruction *I); 186 187 bool isLegalMaskedLoad(Type *DataTy, Align Alignment); 188 189 bool isLegalMaskedStore(Type *DataTy, Align Alignment) { 190 return isLegalMaskedLoad(DataTy, Alignment); 191 } 192 193 bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) { 194 // For MVE, we have a custom lowering pass that will already have custom 195 // legalised any gathers that we can lower to MVE intrinsics, and want to 196 // expand all the rest. The pass runs before the masked intrinsic lowering 197 // pass. 198 return true; 199 } 200 201 bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) { 202 return forceScalarizeMaskedGather(VTy, Alignment); 203 } 204 205 bool isLegalMaskedGather(Type *Ty, Align Alignment); 206 207 bool isLegalMaskedScatter(Type *Ty, Align Alignment) { 208 return isLegalMaskedGather(Ty, Alignment); 209 } 210 211 InstructionCost getMemcpyCost(const Instruction *I); 212 213 uint64_t getMaxMemIntrinsicInlineSizeThreshold() const { 214 return ST->getMaxInlineSizeThreshold(); 215 } 216 217 int getNumMemOps(const IntrinsicInst *I) const; 218 219 InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, 220 ArrayRef<int> Mask, 221 TTI::TargetCostKind CostKind, int Index, 222 VectorType *SubTp, 223 ArrayRef<const Value *> Args = std::nullopt); 224 225 bool preferInLoopReduction(unsigned Opcode, Type *Ty, 226 TTI::ReductionFlags Flags) const; 227 228 bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty, 229 TTI::ReductionFlags Flags) const; 230 231 bool shouldExpandReduction(const IntrinsicInst *II) const { return false; } 232 233 InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, 234 const Instruction *I = nullptr); 235 236 InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 237 TTI::CastContextHint CCH, 238 TTI::TargetCostKind CostKind, 239 const Instruction *I = nullptr); 240 241 InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 242 CmpInst::Predicate VecPred, 243 TTI::TargetCostKind CostKind, 244 const Instruction *I = nullptr); 245 246 using BaseT::getVectorInstrCost; 247 InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, 248 TTI::TargetCostKind CostKind, 249 unsigned Index, Value *Op0, Value *Op1); 250 251 InstructionCost getAddressComputationCost(Type *Val, ScalarEvolution *SE, 252 const SCEV *Ptr); 253 254 InstructionCost getArithmeticInstrCost( 255 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 256 TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None}, 257 TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None}, 258 ArrayRef<const Value *> Args = ArrayRef<const Value *>(), 259 const Instruction *CxtI = nullptr); 260 261 InstructionCost 262 getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment, 263 unsigned AddressSpace, TTI::TargetCostKind CostKind, 264 TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None}, 265 const Instruction *I = nullptr); 266 267 InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, 268 Align Alignment, unsigned AddressSpace, 269 TTI::TargetCostKind CostKind); 270 271 InstructionCost getInterleavedMemoryOpCost( 272 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 273 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 274 bool UseMaskForCond = false, bool UseMaskForGaps = false); 275 276 InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, 277 const Value *Ptr, bool VariableMask, 278 Align Alignment, 279 TTI::TargetCostKind CostKind, 280 const Instruction *I = nullptr); 281 282 InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, 283 std::optional<FastMathFlags> FMF, 284 TTI::TargetCostKind CostKind); 285 InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, 286 Type *ResTy, VectorType *ValTy, 287 FastMathFlags FMF, 288 TTI::TargetCostKind CostKind); 289 InstructionCost getMulAccReductionCost(bool IsUnsigned, Type *ResTy, 290 VectorType *ValTy, 291 TTI::TargetCostKind CostKind); 292 293 InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 294 TTI::TargetCostKind CostKind); 295 296 /// getScalingFactorCost - Return the cost of the scaling used in 297 /// addressing mode represented by AM. 298 /// If the AM is supported, the return value must be >= 0. 299 /// If the AM is not supported, the return value must be negative. 300 InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, 301 int64_t BaseOffset, bool HasBaseReg, 302 int64_t Scale, unsigned AddrSpace) const; 303 304 bool maybeLoweredToCall(Instruction &I); 305 bool isLoweredToCall(const Function *F); 306 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, 307 AssumptionCache &AC, 308 TargetLibraryInfo *LibInfo, 309 HardwareLoopInfo &HWLoopInfo); 310 bool preferPredicateOverEpilogue(TailFoldingInfo *TFI); 311 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 312 TTI::UnrollingPreferences &UP, 313 OptimizationRemarkEmitter *ORE); 314 315 TailFoldingStyle 316 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) const; 317 318 void getPeelingPreferences(Loop *L, ScalarEvolution &SE, 319 TTI::PeelingPreferences &PP); 320 bool shouldBuildLookupTablesForConstant(Constant *C) const { 321 // In the ROPI and RWPI relocation models we can't have pointers to global 322 // variables or functions in constant data, so don't convert switches to 323 // lookup tables if any of the values would need relocation. 324 if (ST->isROPI() || ST->isRWPI()) 325 return !C->needsDynamicRelocation(); 326 327 return true; 328 } 329 330 bool hasArmWideBranch(bool Thumb) const; 331 332 /// @} 333 }; 334 335 /// isVREVMask - Check if a vector shuffle corresponds to a VREV 336 /// instruction with the specified blocksize. (The order of the elements 337 /// within each block of the vector is reversed.) 338 inline bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { 339 assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) && 340 "Only possible block sizes for VREV are: 16, 32, 64"); 341 342 unsigned EltSz = VT.getScalarSizeInBits(); 343 if (EltSz != 8 && EltSz != 16 && EltSz != 32) 344 return false; 345 346 unsigned BlockElts = M[0] + 1; 347 // If the first shuffle index is UNDEF, be optimistic. 348 if (M[0] < 0) 349 BlockElts = BlockSize / EltSz; 350 351 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 352 return false; 353 354 for (unsigned i = 0, e = M.size(); i < e; ++i) { 355 if (M[i] < 0) 356 continue; // ignore UNDEF indices 357 if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts)) 358 return false; 359 } 360 361 return true; 362 } 363 364 } // end namespace llvm 365 366 #endif // LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H 367