1 //===- ARMTargetTransformInfo.h - ARM specific TTI --------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file a TargetTransformInfo::Concept conforming object specific to the 11 /// ARM target machine. It uses the target's detailed information to 12 /// provide more precise answers to certain TTI queries, while letting the 13 /// target independent and default TTI implementations handle the rest. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #ifndef LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H 18 #define LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H 19 20 #include "ARM.h" 21 #include "ARMSubtarget.h" 22 #include "ARMTargetMachine.h" 23 #include "llvm/ADT/ArrayRef.h" 24 #include "llvm/Analysis/TargetTransformInfo.h" 25 #include "llvm/CodeGen/BasicTTIImpl.h" 26 #include "llvm/IR/Constant.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/MC/SubtargetFeature.h" 29 30 namespace llvm { 31 32 class APInt; 33 class ARMTargetLowering; 34 class Instruction; 35 class Loop; 36 class SCEV; 37 class ScalarEvolution; 38 class Type; 39 class Value; 40 41 class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> { 42 using BaseT = BasicTTIImplBase<ARMTTIImpl>; 43 using TTI = TargetTransformInfo; 44 45 friend BaseT; 46 47 const ARMSubtarget *ST; 48 const ARMTargetLowering *TLI; 49 50 // Currently the following features are excluded from InlineFeatureWhitelist. 51 // ModeThumb, FeatureNoARM, ModeSoftFloat, FeatureFP64, FeatureD32 52 // Depending on whether they are set or unset, different 53 // instructions/registers are available. For example, inlining a callee with 54 // -thumb-mode in a caller with +thumb-mode, may cause the assembler to 55 // fail if the callee uses ARM only instructions, e.g. in inline asm. 56 const FeatureBitset InlineFeatureWhitelist = { 57 ARM::FeatureVFP2, ARM::FeatureVFP3, ARM::FeatureNEON, ARM::FeatureThumb2, 58 ARM::FeatureFP16, ARM::FeatureVFP4, ARM::FeatureFPARMv8, 59 ARM::FeatureFullFP16, ARM::FeatureFP16FML, ARM::FeatureHWDivThumb, 60 ARM::FeatureHWDivARM, ARM::FeatureDB, ARM::FeatureV7Clrex, 61 ARM::FeatureAcquireRelease, ARM::FeatureSlowFPBrcc, 62 ARM::FeaturePerfMon, ARM::FeatureTrustZone, ARM::Feature8MSecExt, 63 ARM::FeatureCrypto, ARM::FeatureCRC, ARM::FeatureRAS, 64 ARM::FeatureFPAO, ARM::FeatureFuseAES, ARM::FeatureZCZeroing, 65 ARM::FeatureProfUnpredicate, ARM::FeatureSlowVGETLNi32, 66 ARM::FeatureSlowVDUP32, ARM::FeaturePreferVMOVSR, 67 ARM::FeaturePrefISHSTBarrier, ARM::FeatureMuxedUnits, 68 ARM::FeatureSlowOddRegister, ARM::FeatureSlowLoadDSubreg, 69 ARM::FeatureDontWidenVMOVS, ARM::FeatureExpandMLx, 70 ARM::FeatureHasVMLxHazards, ARM::FeatureNEONForFPMovs, 71 ARM::FeatureNEONForFP, ARM::FeatureCheckVLDnAlign, 72 ARM::FeatureHasSlowFPVMLx, ARM::FeatureVMLxForwarding, 73 ARM::FeaturePref32BitThumb, ARM::FeatureAvoidPartialCPSR, 74 ARM::FeatureCheapPredicableCPSR, ARM::FeatureAvoidMOVsShOp, 75 ARM::FeatureHasRetAddrStack, ARM::FeatureHasNoBranchPredictor, 76 ARM::FeatureDSP, ARM::FeatureMP, ARM::FeatureVirtualization, 77 ARM::FeatureMClass, ARM::FeatureRClass, ARM::FeatureAClass, 78 ARM::FeatureNaClTrap, ARM::FeatureStrictAlign, ARM::FeatureLongCalls, 79 ARM::FeatureExecuteOnly, ARM::FeatureReserveR9, ARM::FeatureNoMovt, 80 ARM::FeatureNoNegativeImmediates 81 }; 82 83 const ARMSubtarget *getST() const { return ST; } 84 const ARMTargetLowering *getTLI() const { return TLI; } 85 86 public: 87 explicit ARMTTIImpl(const ARMBaseTargetMachine *TM, const Function &F) 88 : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)), 89 TLI(ST->getTargetLowering()) {} 90 91 bool areInlineCompatible(const Function *Caller, 92 const Function *Callee) const; 93 94 bool enableInterleavedAccessVectorization() { return true; } 95 96 bool shouldFavorBackedgeIndex(const Loop *L) const { 97 if (L->getHeader()->getParent()->hasOptSize()) 98 return false; 99 return ST->isMClass() && ST->isThumb2() && L->getNumBlocks() == 1; 100 } 101 102 /// Floating-point computation using ARMv8 AArch32 Advanced 103 /// SIMD instructions remains unchanged from ARMv7. Only AArch64 SIMD 104 /// and Arm MVE are IEEE-754 compliant. 105 bool isFPVectorizationPotentiallyUnsafe() { 106 return !ST->isTargetDarwin() && !ST->hasMVEFloatOps(); 107 } 108 109 /// \name Scalar TTI Implementations 110 /// @{ 111 112 int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 113 Type *Ty); 114 115 using BaseT::getIntImmCost; 116 int getIntImmCost(const APInt &Imm, Type *Ty); 117 118 int getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty); 119 120 /// @} 121 122 /// \name Vector TTI Implementations 123 /// @{ 124 125 unsigned getNumberOfRegisters(unsigned ClassID) const { 126 bool Vector = (ClassID == 1); 127 if (Vector) { 128 if (ST->hasNEON()) 129 return 16; 130 if (ST->hasMVEIntegerOps()) 131 return 8; 132 return 0; 133 } 134 135 if (ST->isThumb1Only()) 136 return 8; 137 return 13; 138 } 139 140 unsigned getRegisterBitWidth(bool Vector) const { 141 if (Vector) { 142 if (ST->hasNEON()) 143 return 128; 144 if (ST->hasMVEIntegerOps()) 145 return 128; 146 return 0; 147 } 148 149 return 32; 150 } 151 152 unsigned getMaxInterleaveFactor(unsigned VF) { 153 return ST->getMaxInterleaveFactor(); 154 } 155 156 bool isLegalMaskedLoad(Type *DataTy, MaybeAlign Alignment); 157 158 bool isLegalMaskedStore(Type *DataTy, MaybeAlign Alignment) { 159 return isLegalMaskedLoad(DataTy, Alignment); 160 } 161 162 int getMemcpyCost(const Instruction *I); 163 164 int getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp); 165 166 bool useReductionIntrinsic(unsigned Opcode, Type *Ty, 167 TTI::ReductionFlags Flags) const; 168 169 bool shouldExpandReduction(const IntrinsicInst *II) const { 170 return false; 171 } 172 173 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 174 const Instruction *I = nullptr); 175 176 int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 177 const Instruction *I = nullptr); 178 179 int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index); 180 181 int getAddressComputationCost(Type *Val, ScalarEvolution *SE, 182 const SCEV *Ptr); 183 184 int getArithmeticInstrCost( 185 unsigned Opcode, Type *Ty, 186 TTI::OperandValueKind Op1Info = TTI::OK_AnyValue, 187 TTI::OperandValueKind Op2Info = TTI::OK_AnyValue, 188 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None, 189 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None, 190 ArrayRef<const Value *> Args = ArrayRef<const Value *>()); 191 192 int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 193 unsigned AddressSpace, const Instruction *I = nullptr); 194 195 int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, 196 ArrayRef<unsigned> Indices, unsigned Alignment, 197 unsigned AddressSpace, 198 bool UseMaskForCond = false, 199 bool UseMaskForGaps = false); 200 201 bool isLoweredToCall(const Function *F); 202 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, 203 AssumptionCache &AC, 204 TargetLibraryInfo *LibInfo, 205 HardwareLoopInfo &HWLoopInfo); 206 207 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 208 TTI::UnrollingPreferences &UP); 209 210 bool shouldBuildLookupTablesForConstant(Constant *C) const { 211 // In the ROPI and RWPI relocation models we can't have pointers to global 212 // variables or functions in constant data, so don't convert switches to 213 // lookup tables if any of the values would need relocation. 214 if (ST->isROPI() || ST->isRWPI()) 215 return !C->needsRelocation(); 216 217 return true; 218 } 219 /// @} 220 }; 221 222 } // end namespace llvm 223 224 #endif // LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H 225