1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements a TargetTransformInfo analysis pass specific to the 10 /// X86 target machine. It uses the target's detailed information to provide 11 /// more precise answers to certain TTI queries, while letting the target 12 /// independent and default TTI implementations handle the rest. 13 /// 14 //===----------------------------------------------------------------------===// 15 /// About Cost Model numbers used below it's necessary to say the following: 16 /// the numbers correspond to some "generic" X86 CPU instead of usage of 17 /// concrete CPU model. Usually the numbers correspond to CPU where the feature 18 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in 19 /// the lookups below the cost is based on Nehalem as that was the first CPU 20 /// to support that feature level and thus has most likely the worst case cost. 21 /// Some examples of other technologies/CPUs: 22 /// SSE 3 - Pentium4 / Athlon64 23 /// SSE 4.1 - Penryn 24 /// SSE 4.2 - Nehalem 25 /// AVX - Sandy Bridge 26 /// AVX2 - Haswell 27 /// AVX-512 - Xeon Phi / Skylake 28 /// And some examples of instruction target dependent costs (latency) 29 /// divss sqrtss rsqrtss 30 /// AMD K7 11-16 19 3 31 /// Piledriver 9-24 13-15 5 32 /// Jaguar 14 16 2 33 /// Pentium II,III 18 30 2 34 /// Nehalem 7-14 7-18 3 35 /// Haswell 10-13 11 5 36 /// TODO: Develop and implement the target dependent cost model and 37 /// specialize cost numbers for different Cost Model Targets such as throughput, 38 /// code size, latency and uop count. 39 //===----------------------------------------------------------------------===// 40 41 #include "X86TargetTransformInfo.h" 42 #include "llvm/Analysis/TargetTransformInfo.h" 43 #include "llvm/CodeGen/BasicTTIImpl.h" 44 #include "llvm/CodeGen/CostTable.h" 45 #include "llvm/CodeGen/TargetLowering.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/Support/Debug.h" 48 49 using namespace llvm; 50 51 #define DEBUG_TYPE "x86tti" 52 53 //===----------------------------------------------------------------------===// 54 // 55 // X86 cost model. 56 // 57 //===----------------------------------------------------------------------===// 58 59 TargetTransformInfo::PopcntSupportKind 60 X86TTIImpl::getPopcntSupport(unsigned TyWidth) { 61 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 62 // TODO: Currently the __builtin_popcount() implementation using SSE3 63 // instructions is inefficient. Once the problem is fixed, we should 64 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 65 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software; 66 } 67 68 llvm::Optional<unsigned> X86TTIImpl::getCacheSize( 69 TargetTransformInfo::CacheLevel Level) const { 70 switch (Level) { 71 case TargetTransformInfo::CacheLevel::L1D: 72 // - Penryn 73 // - Nehalem 74 // - Westmere 75 // - Sandy Bridge 76 // - Ivy Bridge 77 // - Haswell 78 // - Broadwell 79 // - Skylake 80 // - Kabylake 81 return 32 * 1024; // 32 KByte 82 case TargetTransformInfo::CacheLevel::L2D: 83 // - Penryn 84 // - Nehalem 85 // - Westmere 86 // - Sandy Bridge 87 // - Ivy Bridge 88 // - Haswell 89 // - Broadwell 90 // - Skylake 91 // - Kabylake 92 return 256 * 1024; // 256 KByte 93 } 94 95 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 96 } 97 98 llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity( 99 TargetTransformInfo::CacheLevel Level) const { 100 // - Penryn 101 // - Nehalem 102 // - Westmere 103 // - Sandy Bridge 104 // - Ivy Bridge 105 // - Haswell 106 // - Broadwell 107 // - Skylake 108 // - Kabylake 109 switch (Level) { 110 case TargetTransformInfo::CacheLevel::L1D: 111 LLVM_FALLTHROUGH; 112 case TargetTransformInfo::CacheLevel::L2D: 113 return 8; 114 } 115 116 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 117 } 118 119 unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const { 120 bool Vector = (ClassID == 1); 121 if (Vector && !ST->hasSSE1()) 122 return 0; 123 124 if (ST->is64Bit()) { 125 if (Vector && ST->hasAVX512()) 126 return 32; 127 return 16; 128 } 129 return 8; 130 } 131 132 TypeSize 133 X86TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const { 134 unsigned PreferVectorWidth = ST->getPreferVectorWidth(); 135 switch (K) { 136 case TargetTransformInfo::RGK_Scalar: 137 return TypeSize::getFixed(ST->is64Bit() ? 64 : 32); 138 case TargetTransformInfo::RGK_FixedWidthVector: 139 if (ST->hasAVX512() && PreferVectorWidth >= 512) 140 return TypeSize::getFixed(512); 141 if (ST->hasAVX() && PreferVectorWidth >= 256) 142 return TypeSize::getFixed(256); 143 if (ST->hasSSE1() && PreferVectorWidth >= 128) 144 return TypeSize::getFixed(128); 145 return TypeSize::getFixed(0); 146 case TargetTransformInfo::RGK_ScalableVector: 147 return TypeSize::getScalable(0); 148 } 149 150 llvm_unreachable("Unsupported register kind"); 151 } 152 153 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const { 154 return getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 155 .getFixedSize(); 156 } 157 158 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) { 159 // If the loop will not be vectorized, don't interleave the loop. 160 // Let regular unroll to unroll the loop, which saves the overflow 161 // check and memory check cost. 162 if (VF == 1) 163 return 1; 164 165 if (ST->isAtom()) 166 return 1; 167 168 // Sandybridge and Haswell have multiple execution ports and pipelined 169 // vector units. 170 if (ST->hasAVX()) 171 return 4; 172 173 return 2; 174 } 175 176 InstructionCost X86TTIImpl::getArithmeticInstrCost( 177 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 178 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info, 179 TTI::OperandValueProperties Opd1PropInfo, 180 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args, 181 const Instruction *CxtI) { 182 // TODO: Handle more cost kinds. 183 if (CostKind != TTI::TCK_RecipThroughput) 184 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, 185 Op2Info, Opd1PropInfo, 186 Opd2PropInfo, Args, CxtI); 187 188 // vXi8 multiplications are always promoted to vXi16. 189 if (Opcode == Instruction::Mul && Ty->isVectorTy() && 190 Ty->getScalarSizeInBits() == 8) { 191 Type *WideVecTy = 192 VectorType::getExtendedElementVectorType(cast<VectorType>(Ty)); 193 return getCastInstrCost(Instruction::ZExt, WideVecTy, Ty, 194 TargetTransformInfo::CastContextHint::None, 195 CostKind) + 196 getCastInstrCost(Instruction::Trunc, Ty, WideVecTy, 197 TargetTransformInfo::CastContextHint::None, 198 CostKind) + 199 getArithmeticInstrCost(Opcode, WideVecTy, CostKind, Op1Info, Op2Info, 200 Opd1PropInfo, Opd2PropInfo); 201 } 202 203 // Legalize the type. 204 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 205 206 int ISD = TLI->InstructionOpcodeToISD(Opcode); 207 assert(ISD && "Invalid opcode"); 208 209 static const CostTblEntry GLMCostTable[] = { 210 { ISD::FDIV, MVT::f32, 18 }, // divss 211 { ISD::FDIV, MVT::v4f32, 35 }, // divps 212 { ISD::FDIV, MVT::f64, 33 }, // divsd 213 { ISD::FDIV, MVT::v2f64, 65 }, // divpd 214 }; 215 216 if (ST->useGLMDivSqrtCosts()) 217 if (const auto *Entry = CostTableLookup(GLMCostTable, ISD, 218 LT.second)) 219 return LT.first * Entry->Cost; 220 221 static const CostTblEntry SLMCostTable[] = { 222 { ISD::MUL, MVT::v4i32, 11 }, // pmulld 223 { ISD::MUL, MVT::v8i16, 2 }, // pmullw 224 { ISD::FMUL, MVT::f64, 2 }, // mulsd 225 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd 226 { ISD::FMUL, MVT::v4f32, 2 }, // mulps 227 { ISD::FDIV, MVT::f32, 17 }, // divss 228 { ISD::FDIV, MVT::v4f32, 39 }, // divps 229 { ISD::FDIV, MVT::f64, 32 }, // divsd 230 { ISD::FDIV, MVT::v2f64, 69 }, // divpd 231 { ISD::FADD, MVT::v2f64, 2 }, // addpd 232 { ISD::FSUB, MVT::v2f64, 2 }, // subpd 233 // v2i64/v4i64 mul is custom lowered as a series of long: 234 // multiplies(3), shifts(3) and adds(2) 235 // slm muldq version throughput is 2 and addq throughput 4 236 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) + 237 // 3X4 (addq throughput) = 17 238 { ISD::MUL, MVT::v2i64, 17 }, 239 // slm addq\subq throughput is 4 240 { ISD::ADD, MVT::v2i64, 4 }, 241 { ISD::SUB, MVT::v2i64, 4 }, 242 }; 243 244 if (ST->isSLM()) { 245 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) { 246 // Check if the operands can be shrinked into a smaller datatype. 247 bool Op1Signed = false; 248 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed); 249 bool Op2Signed = false; 250 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed); 251 252 bool SignedMode = Op1Signed || Op2Signed; 253 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize); 254 255 if (OpMinSize <= 7) 256 return LT.first * 3; // pmullw/sext 257 if (!SignedMode && OpMinSize <= 8) 258 return LT.first * 3; // pmullw/zext 259 if (OpMinSize <= 15) 260 return LT.first * 5; // pmullw/pmulhw/pshuf 261 if (!SignedMode && OpMinSize <= 16) 262 return LT.first * 5; // pmullw/pmulhw/pshuf 263 } 264 265 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD, 266 LT.second)) { 267 return LT.first * Entry->Cost; 268 } 269 } 270 271 if ((ISD == ISD::SDIV || ISD == ISD::SREM || ISD == ISD::UDIV || 272 ISD == ISD::UREM) && 273 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 274 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 275 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 276 if (ISD == ISD::SDIV || ISD == ISD::SREM) { 277 // On X86, vector signed division by constants power-of-two are 278 // normally expanded to the sequence SRA + SRL + ADD + SRA. 279 // The OperandValue properties may not be the same as that of the previous 280 // operation; conservatively assume OP_None. 281 InstructionCost Cost = 282 2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Op1Info, 283 Op2Info, TargetTransformInfo::OP_None, 284 TargetTransformInfo::OP_None); 285 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info, 286 Op2Info, 287 TargetTransformInfo::OP_None, 288 TargetTransformInfo::OP_None); 289 Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, Op1Info, 290 Op2Info, 291 TargetTransformInfo::OP_None, 292 TargetTransformInfo::OP_None); 293 294 if (ISD == ISD::SREM) { 295 // For SREM: (X % C) is the equivalent of (X - (X/C)*C) 296 Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info, 297 Op2Info); 298 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info, 299 Op2Info); 300 } 301 302 return Cost; 303 } 304 305 // Vector unsigned division/remainder will be simplified to shifts/masks. 306 if (ISD == ISD::UDIV) 307 return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, 308 Op1Info, Op2Info, 309 TargetTransformInfo::OP_None, 310 TargetTransformInfo::OP_None); 311 312 else // UREM 313 return getArithmeticInstrCost(Instruction::And, Ty, CostKind, 314 Op1Info, Op2Info, 315 TargetTransformInfo::OP_None, 316 TargetTransformInfo::OP_None); 317 } 318 319 static const CostTblEntry AVX512BWUniformConstCostTable[] = { 320 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand. 321 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand. 322 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb. 323 }; 324 325 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 326 ST->hasBWI()) { 327 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD, 328 LT.second)) 329 return LT.first * Entry->Cost; 330 } 331 332 static const CostTblEntry AVX512UniformConstCostTable[] = { 333 { ISD::SRA, MVT::v2i64, 1 }, 334 { ISD::SRA, MVT::v4i64, 1 }, 335 { ISD::SRA, MVT::v8i64, 1 }, 336 337 { ISD::SHL, MVT::v64i8, 4 }, // psllw + pand. 338 { ISD::SRL, MVT::v64i8, 4 }, // psrlw + pand. 339 { ISD::SRA, MVT::v64i8, 8 }, // psrlw, pand, pxor, psubb. 340 341 { ISD::SDIV, MVT::v16i32, 6 }, // pmuludq sequence 342 { ISD::SREM, MVT::v16i32, 8 }, // pmuludq+mul+sub sequence 343 { ISD::UDIV, MVT::v16i32, 5 }, // pmuludq sequence 344 { ISD::UREM, MVT::v16i32, 7 }, // pmuludq+mul+sub sequence 345 }; 346 347 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 348 ST->hasAVX512()) { 349 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD, 350 LT.second)) 351 return LT.first * Entry->Cost; 352 } 353 354 static const CostTblEntry AVX2UniformConstCostTable[] = { 355 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand. 356 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand. 357 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb. 358 359 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle. 360 361 { ISD::SDIV, MVT::v8i32, 6 }, // pmuludq sequence 362 { ISD::SREM, MVT::v8i32, 8 }, // pmuludq+mul+sub sequence 363 { ISD::UDIV, MVT::v8i32, 5 }, // pmuludq sequence 364 { ISD::UREM, MVT::v8i32, 7 }, // pmuludq+mul+sub sequence 365 }; 366 367 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 368 ST->hasAVX2()) { 369 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD, 370 LT.second)) 371 return LT.first * Entry->Cost; 372 } 373 374 static const CostTblEntry SSE2UniformConstCostTable[] = { 375 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand. 376 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand. 377 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 378 379 { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split. 380 { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split. 381 { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split. 382 383 { ISD::SDIV, MVT::v8i32, 12+2 }, // 2*pmuludq sequence + split. 384 { ISD::SREM, MVT::v8i32, 16+2 }, // 2*pmuludq+mul+sub sequence + split. 385 { ISD::SDIV, MVT::v4i32, 6 }, // pmuludq sequence 386 { ISD::SREM, MVT::v4i32, 8 }, // pmuludq+mul+sub sequence 387 { ISD::UDIV, MVT::v8i32, 10+2 }, // 2*pmuludq sequence + split. 388 { ISD::UREM, MVT::v8i32, 14+2 }, // 2*pmuludq+mul+sub sequence + split. 389 { ISD::UDIV, MVT::v4i32, 5 }, // pmuludq sequence 390 { ISD::UREM, MVT::v4i32, 7 }, // pmuludq+mul+sub sequence 391 }; 392 393 // XOP has faster vXi8 shifts. 394 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 395 ST->hasSSE2() && !ST->hasXOP()) { 396 if (const auto *Entry = 397 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second)) 398 return LT.first * Entry->Cost; 399 } 400 401 static const CostTblEntry AVX512BWConstCostTable[] = { 402 { ISD::SDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence 403 { ISD::SREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 404 { ISD::UDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence 405 { ISD::UREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 406 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence 407 { ISD::SREM, MVT::v32i16, 8 }, // vpmulhw+mul+sub sequence 408 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence 409 { ISD::UREM, MVT::v32i16, 8 }, // vpmulhuw+mul+sub sequence 410 }; 411 412 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 413 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 414 ST->hasBWI()) { 415 if (const auto *Entry = 416 CostTableLookup(AVX512BWConstCostTable, ISD, LT.second)) 417 return LT.first * Entry->Cost; 418 } 419 420 static const CostTblEntry AVX512ConstCostTable[] = { 421 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence 422 { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence 423 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence 424 { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence 425 { ISD::SDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence 426 { ISD::SREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence 427 { ISD::UDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence 428 { ISD::UREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence 429 { ISD::SDIV, MVT::v32i16, 12 }, // 2*vpmulhw sequence 430 { ISD::SREM, MVT::v32i16, 16 }, // 2*vpmulhw+mul+sub sequence 431 { ISD::UDIV, MVT::v32i16, 12 }, // 2*vpmulhuw sequence 432 { ISD::UREM, MVT::v32i16, 16 }, // 2*vpmulhuw+mul+sub sequence 433 }; 434 435 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 436 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 437 ST->hasAVX512()) { 438 if (const auto *Entry = 439 CostTableLookup(AVX512ConstCostTable, ISD, LT.second)) 440 return LT.first * Entry->Cost; 441 } 442 443 static const CostTblEntry AVX2ConstCostTable[] = { 444 { ISD::SDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence 445 { ISD::SREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 446 { ISD::UDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence 447 { ISD::UREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 448 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 449 { ISD::SREM, MVT::v16i16, 8 }, // vpmulhw+mul+sub sequence 450 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 451 { ISD::UREM, MVT::v16i16, 8 }, // vpmulhuw+mul+sub sequence 452 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 453 { ISD::SREM, MVT::v8i32, 19 }, // vpmuldq+mul+sub sequence 454 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 455 { ISD::UREM, MVT::v8i32, 19 }, // vpmuludq+mul+sub sequence 456 }; 457 458 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 459 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 460 ST->hasAVX2()) { 461 if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second)) 462 return LT.first * Entry->Cost; 463 } 464 465 static const CostTblEntry SSE2ConstCostTable[] = { 466 { ISD::SDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split. 467 { ISD::SREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split. 468 { ISD::SDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence 469 { ISD::SREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 470 { ISD::UDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split. 471 { ISD::UREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split. 472 { ISD::UDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence 473 { ISD::UREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 474 { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split. 475 { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split. 476 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 477 { ISD::SREM, MVT::v8i16, 8 }, // pmulhw+mul+sub sequence 478 { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split. 479 { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split. 480 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 481 { ISD::UREM, MVT::v8i16, 8 }, // pmulhuw+mul+sub sequence 482 { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split. 483 { ISD::SREM, MVT::v8i32, 48+2 }, // 2*pmuludq+mul+sub sequence + split. 484 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 485 { ISD::SREM, MVT::v4i32, 24 }, // pmuludq+mul+sub sequence 486 { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split. 487 { ISD::UREM, MVT::v8i32, 40+2 }, // 2*pmuludq+mul+sub sequence + split. 488 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 489 { ISD::UREM, MVT::v4i32, 20 }, // pmuludq+mul+sub sequence 490 }; 491 492 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 493 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 494 ST->hasSSE2()) { 495 // pmuldq sequence. 496 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX()) 497 return LT.first * 32; 498 if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX()) 499 return LT.first * 38; 500 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 501 return LT.first * 15; 502 if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41()) 503 return LT.first * 20; 504 505 if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second)) 506 return LT.first * Entry->Cost; 507 } 508 509 static const CostTblEntry AVX512BWShiftCostTable[] = { 510 { ISD::SHL, MVT::v16i8, 4 }, // extend/vpsllvw/pack sequence. 511 { ISD::SRL, MVT::v16i8, 4 }, // extend/vpsrlvw/pack sequence. 512 { ISD::SRA, MVT::v16i8, 4 }, // extend/vpsravw/pack sequence. 513 { ISD::SHL, MVT::v32i8, 4 }, // extend/vpsllvw/pack sequence. 514 { ISD::SRL, MVT::v32i8, 4 }, // extend/vpsrlvw/pack sequence. 515 { ISD::SRA, MVT::v32i8, 6 }, // extend/vpsravw/pack sequence. 516 { ISD::SHL, MVT::v64i8, 6 }, // extend/vpsllvw/pack sequence. 517 { ISD::SRL, MVT::v64i8, 7 }, // extend/vpsrlvw/pack sequence. 518 { ISD::SRA, MVT::v64i8, 15 }, // extend/vpsravw/pack sequence. 519 520 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw 521 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw 522 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw 523 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw 524 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw 525 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw 526 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw 527 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw 528 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw 529 }; 530 531 if (ST->hasBWI()) 532 if (const auto *Entry = CostTableLookup(AVX512BWShiftCostTable, ISD, LT.second)) 533 return LT.first * Entry->Cost; 534 535 static const CostTblEntry AVX2UniformCostTable[] = { 536 // Uniform splats are cheaper for the following instructions. 537 { ISD::SHL, MVT::v16i16, 1 }, // psllw. 538 { ISD::SRL, MVT::v16i16, 1 }, // psrlw. 539 { ISD::SRA, MVT::v16i16, 1 }, // psraw. 540 { ISD::SHL, MVT::v32i16, 2 }, // 2*psllw. 541 { ISD::SRL, MVT::v32i16, 2 }, // 2*psrlw. 542 { ISD::SRA, MVT::v32i16, 2 }, // 2*psraw. 543 544 { ISD::SHL, MVT::v8i32, 1 }, // pslld 545 { ISD::SRL, MVT::v8i32, 1 }, // psrld 546 { ISD::SRA, MVT::v8i32, 1 }, // psrad 547 { ISD::SHL, MVT::v4i64, 1 }, // psllq 548 { ISD::SRL, MVT::v4i64, 1 }, // psrlq 549 }; 550 551 if (ST->hasAVX2() && 552 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 553 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 554 if (const auto *Entry = 555 CostTableLookup(AVX2UniformCostTable, ISD, LT.second)) 556 return LT.first * Entry->Cost; 557 } 558 559 static const CostTblEntry SSE2UniformCostTable[] = { 560 // Uniform splats are cheaper for the following instructions. 561 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 562 { ISD::SHL, MVT::v4i32, 1 }, // pslld 563 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 564 565 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 566 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 567 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 568 569 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 570 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 571 }; 572 573 if (ST->hasSSE2() && 574 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 575 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 576 if (const auto *Entry = 577 CostTableLookup(SSE2UniformCostTable, ISD, LT.second)) 578 return LT.first * Entry->Cost; 579 } 580 581 static const CostTblEntry AVX512DQCostTable[] = { 582 { ISD::MUL, MVT::v2i64, 2 }, // pmullq 583 { ISD::MUL, MVT::v4i64, 2 }, // pmullq 584 { ISD::MUL, MVT::v8i64, 2 } // pmullq 585 }; 586 587 // Look for AVX512DQ lowering tricks for custom cases. 588 if (ST->hasDQI()) 589 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second)) 590 return LT.first * Entry->Cost; 591 592 static const CostTblEntry AVX512BWCostTable[] = { 593 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence. 594 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence. 595 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence. 596 }; 597 598 // Look for AVX512BW lowering tricks for custom cases. 599 if (ST->hasBWI()) 600 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second)) 601 return LT.first * Entry->Cost; 602 603 static const CostTblEntry AVX512CostTable[] = { 604 { ISD::SHL, MVT::v4i32, 1 }, 605 { ISD::SRL, MVT::v4i32, 1 }, 606 { ISD::SRA, MVT::v4i32, 1 }, 607 { ISD::SHL, MVT::v8i32, 1 }, 608 { ISD::SRL, MVT::v8i32, 1 }, 609 { ISD::SRA, MVT::v8i32, 1 }, 610 { ISD::SHL, MVT::v16i32, 1 }, 611 { ISD::SRL, MVT::v16i32, 1 }, 612 { ISD::SRA, MVT::v16i32, 1 }, 613 614 { ISD::SHL, MVT::v2i64, 1 }, 615 { ISD::SRL, MVT::v2i64, 1 }, 616 { ISD::SHL, MVT::v4i64, 1 }, 617 { ISD::SRL, MVT::v4i64, 1 }, 618 { ISD::SHL, MVT::v8i64, 1 }, 619 { ISD::SRL, MVT::v8i64, 1 }, 620 621 { ISD::SRA, MVT::v2i64, 1 }, 622 { ISD::SRA, MVT::v4i64, 1 }, 623 { ISD::SRA, MVT::v8i64, 1 }, 624 625 { ISD::MUL, MVT::v16i32, 1 }, // pmulld (Skylake from agner.org) 626 { ISD::MUL, MVT::v8i32, 1 }, // pmulld (Skylake from agner.org) 627 { ISD::MUL, MVT::v4i32, 1 }, // pmulld (Skylake from agner.org) 628 { ISD::MUL, MVT::v8i64, 6 }, // 3*pmuludq/3*shift/2*add 629 630 { ISD::FNEG, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 631 { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 632 { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 633 { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 634 { ISD::FDIV, MVT::f64, 4 }, // Skylake from http://www.agner.org/ 635 { ISD::FDIV, MVT::v2f64, 4 }, // Skylake from http://www.agner.org/ 636 { ISD::FDIV, MVT::v4f64, 8 }, // Skylake from http://www.agner.org/ 637 { ISD::FDIV, MVT::v8f64, 16 }, // Skylake from http://www.agner.org/ 638 639 { ISD::FNEG, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 640 { ISD::FADD, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 641 { ISD::FSUB, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 642 { ISD::FMUL, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 643 { ISD::FDIV, MVT::f32, 3 }, // Skylake from http://www.agner.org/ 644 { ISD::FDIV, MVT::v4f32, 3 }, // Skylake from http://www.agner.org/ 645 { ISD::FDIV, MVT::v8f32, 5 }, // Skylake from http://www.agner.org/ 646 { ISD::FDIV, MVT::v16f32, 10 }, // Skylake from http://www.agner.org/ 647 }; 648 649 if (ST->hasAVX512()) 650 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second)) 651 return LT.first * Entry->Cost; 652 653 static const CostTblEntry AVX2ShiftCostTable[] = { 654 // Shifts on vXi64/vXi32 on AVX2 is legal even though we declare to 655 // customize them to detect the cases where shift amount is a scalar one. 656 { ISD::SHL, MVT::v4i32, 2 }, // vpsllvd (Haswell from agner.org) 657 { ISD::SRL, MVT::v4i32, 2 }, // vpsrlvd (Haswell from agner.org) 658 { ISD::SRA, MVT::v4i32, 2 }, // vpsravd (Haswell from agner.org) 659 { ISD::SHL, MVT::v8i32, 2 }, // vpsllvd (Haswell from agner.org) 660 { ISD::SRL, MVT::v8i32, 2 }, // vpsrlvd (Haswell from agner.org) 661 { ISD::SRA, MVT::v8i32, 2 }, // vpsravd (Haswell from agner.org) 662 { ISD::SHL, MVT::v2i64, 1 }, // vpsllvq (Haswell from agner.org) 663 { ISD::SRL, MVT::v2i64, 1 }, // vpsrlvq (Haswell from agner.org) 664 { ISD::SHL, MVT::v4i64, 1 }, // vpsllvq (Haswell from agner.org) 665 { ISD::SRL, MVT::v4i64, 1 }, // vpsrlvq (Haswell from agner.org) 666 }; 667 668 if (ST->hasAVX512()) { 669 if (ISD == ISD::SHL && LT.second == MVT::v32i16 && 670 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 671 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 672 // On AVX512, a packed v32i16 shift left by a constant build_vector 673 // is lowered into a vector multiply (vpmullw). 674 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, 675 Op1Info, Op2Info, 676 TargetTransformInfo::OP_None, 677 TargetTransformInfo::OP_None); 678 } 679 680 // Look for AVX2 lowering tricks (XOP is always better at v4i32 shifts). 681 if (ST->hasAVX2() && !(ST->hasXOP() && LT.second == MVT::v4i32)) { 682 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 683 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 684 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 685 // On AVX2, a packed v16i16 shift left by a constant build_vector 686 // is lowered into a vector multiply (vpmullw). 687 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, 688 Op1Info, Op2Info, 689 TargetTransformInfo::OP_None, 690 TargetTransformInfo::OP_None); 691 692 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second)) 693 return LT.first * Entry->Cost; 694 } 695 696 static const CostTblEntry XOPShiftCostTable[] = { 697 // 128bit shifts take 1cy, but right shifts require negation beforehand. 698 { ISD::SHL, MVT::v16i8, 1 }, 699 { ISD::SRL, MVT::v16i8, 2 }, 700 { ISD::SRA, MVT::v16i8, 2 }, 701 { ISD::SHL, MVT::v8i16, 1 }, 702 { ISD::SRL, MVT::v8i16, 2 }, 703 { ISD::SRA, MVT::v8i16, 2 }, 704 { ISD::SHL, MVT::v4i32, 1 }, 705 { ISD::SRL, MVT::v4i32, 2 }, 706 { ISD::SRA, MVT::v4i32, 2 }, 707 { ISD::SHL, MVT::v2i64, 1 }, 708 { ISD::SRL, MVT::v2i64, 2 }, 709 { ISD::SRA, MVT::v2i64, 2 }, 710 // 256bit shifts require splitting if AVX2 didn't catch them above. 711 { ISD::SHL, MVT::v32i8, 2+2 }, 712 { ISD::SRL, MVT::v32i8, 4+2 }, 713 { ISD::SRA, MVT::v32i8, 4+2 }, 714 { ISD::SHL, MVT::v16i16, 2+2 }, 715 { ISD::SRL, MVT::v16i16, 4+2 }, 716 { ISD::SRA, MVT::v16i16, 4+2 }, 717 { ISD::SHL, MVT::v8i32, 2+2 }, 718 { ISD::SRL, MVT::v8i32, 4+2 }, 719 { ISD::SRA, MVT::v8i32, 4+2 }, 720 { ISD::SHL, MVT::v4i64, 2+2 }, 721 { ISD::SRL, MVT::v4i64, 4+2 }, 722 { ISD::SRA, MVT::v4i64, 4+2 }, 723 }; 724 725 // Look for XOP lowering tricks. 726 if (ST->hasXOP()) { 727 // If the right shift is constant then we'll fold the negation so 728 // it's as cheap as a left shift. 729 int ShiftISD = ISD; 730 if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) && 731 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 732 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 733 ShiftISD = ISD::SHL; 734 if (const auto *Entry = 735 CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second)) 736 return LT.first * Entry->Cost; 737 } 738 739 static const CostTblEntry SSE2UniformShiftCostTable[] = { 740 // Uniform splats are cheaper for the following instructions. 741 { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split. 742 { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split. 743 { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split. 744 745 { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split. 746 { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split. 747 { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split. 748 749 { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split. 750 { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split. 751 { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle. 752 { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split. 753 }; 754 755 if (ST->hasSSE2() && 756 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 757 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 758 759 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table. 760 if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2()) 761 return LT.first * 4; // 2*psrad + shuffle. 762 763 if (const auto *Entry = 764 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second)) 765 return LT.first * Entry->Cost; 766 } 767 768 if (ISD == ISD::SHL && 769 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 770 MVT VT = LT.second; 771 // Vector shift left by non uniform constant can be lowered 772 // into vector multiply. 773 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) || 774 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX())) 775 ISD = ISD::MUL; 776 } 777 778 static const CostTblEntry AVX2CostTable[] = { 779 { ISD::SHL, MVT::v16i8, 6 }, // vpblendvb sequence. 780 { ISD::SHL, MVT::v32i8, 6 }, // vpblendvb sequence. 781 { ISD::SHL, MVT::v64i8, 12 }, // 2*vpblendvb sequence. 782 { ISD::SHL, MVT::v8i16, 5 }, // extend/vpsrlvd/pack sequence. 783 { ISD::SHL, MVT::v16i16, 7 }, // extend/vpsrlvd/pack sequence. 784 { ISD::SHL, MVT::v32i16, 14 }, // 2*extend/vpsrlvd/pack sequence. 785 786 { ISD::SRL, MVT::v16i8, 6 }, // vpblendvb sequence. 787 { ISD::SRL, MVT::v32i8, 6 }, // vpblendvb sequence. 788 { ISD::SRL, MVT::v64i8, 12 }, // 2*vpblendvb sequence. 789 { ISD::SRL, MVT::v8i16, 5 }, // extend/vpsrlvd/pack sequence. 790 { ISD::SRL, MVT::v16i16, 7 }, // extend/vpsrlvd/pack sequence. 791 { ISD::SRL, MVT::v32i16, 14 }, // 2*extend/vpsrlvd/pack sequence. 792 793 { ISD::SRA, MVT::v16i8, 17 }, // vpblendvb sequence. 794 { ISD::SRA, MVT::v32i8, 17 }, // vpblendvb sequence. 795 { ISD::SRA, MVT::v64i8, 34 }, // 2*vpblendvb sequence. 796 { ISD::SRA, MVT::v8i16, 5 }, // extend/vpsravd/pack sequence. 797 { ISD::SRA, MVT::v16i16, 7 }, // extend/vpsravd/pack sequence. 798 { ISD::SRA, MVT::v32i16, 14 }, // 2*extend/vpsravd/pack sequence. 799 { ISD::SRA, MVT::v2i64, 2 }, // srl/xor/sub sequence. 800 { ISD::SRA, MVT::v4i64, 2 }, // srl/xor/sub sequence. 801 802 { ISD::SUB, MVT::v32i8, 1 }, // psubb 803 { ISD::ADD, MVT::v32i8, 1 }, // paddb 804 { ISD::SUB, MVT::v16i16, 1 }, // psubw 805 { ISD::ADD, MVT::v16i16, 1 }, // paddw 806 { ISD::SUB, MVT::v8i32, 1 }, // psubd 807 { ISD::ADD, MVT::v8i32, 1 }, // paddd 808 { ISD::SUB, MVT::v4i64, 1 }, // psubq 809 { ISD::ADD, MVT::v4i64, 1 }, // paddq 810 811 { ISD::MUL, MVT::v16i16, 1 }, // pmullw 812 { ISD::MUL, MVT::v8i32, 2 }, // pmulld (Haswell from agner.org) 813 { ISD::MUL, MVT::v4i64, 6 }, // 3*pmuludq/3*shift/2*add 814 815 { ISD::FNEG, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 816 { ISD::FNEG, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 817 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 818 { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 819 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 820 { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 821 { ISD::FMUL, MVT::f64, 1 }, // Haswell from http://www.agner.org/ 822 { ISD::FMUL, MVT::v2f64, 1 }, // Haswell from http://www.agner.org/ 823 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 824 { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 825 826 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 827 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 828 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 829 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 830 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 831 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 832 }; 833 834 // Look for AVX2 lowering tricks for custom cases. 835 if (ST->hasAVX2()) 836 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second)) 837 return LT.first * Entry->Cost; 838 839 static const CostTblEntry AVX1CostTable[] = { 840 // We don't have to scalarize unsupported ops. We can issue two half-sized 841 // operations and we only need to extract the upper YMM half. 842 // Two ops + 1 extract + 1 insert = 4. 843 { ISD::MUL, MVT::v16i16, 4 }, 844 { ISD::MUL, MVT::v8i32, 5 }, // BTVER2 from http://www.agner.org/ 845 { ISD::MUL, MVT::v4i64, 12 }, 846 847 { ISD::SUB, MVT::v32i8, 4 }, 848 { ISD::ADD, MVT::v32i8, 4 }, 849 { ISD::SUB, MVT::v16i16, 4 }, 850 { ISD::ADD, MVT::v16i16, 4 }, 851 { ISD::SUB, MVT::v8i32, 4 }, 852 { ISD::ADD, MVT::v8i32, 4 }, 853 { ISD::SUB, MVT::v4i64, 4 }, 854 { ISD::ADD, MVT::v4i64, 4 }, 855 856 { ISD::SHL, MVT::v32i8, 22 }, // pblendvb sequence + split. 857 { ISD::SHL, MVT::v8i16, 6 }, // pblendvb sequence. 858 { ISD::SHL, MVT::v16i16, 13 }, // pblendvb sequence + split. 859 { ISD::SHL, MVT::v4i32, 3 }, // pslld/paddd/cvttps2dq/pmulld 860 { ISD::SHL, MVT::v8i32, 9 }, // pslld/paddd/cvttps2dq/pmulld + split 861 { ISD::SHL, MVT::v2i64, 2 }, // Shift each lane + blend. 862 { ISD::SHL, MVT::v4i64, 6 }, // Shift each lane + blend + split. 863 864 { ISD::SRL, MVT::v32i8, 23 }, // pblendvb sequence + split. 865 { ISD::SRL, MVT::v16i16, 28 }, // pblendvb sequence + split. 866 { ISD::SRL, MVT::v4i32, 6 }, // Shift each lane + blend. 867 { ISD::SRL, MVT::v8i32, 14 }, // Shift each lane + blend + split. 868 { ISD::SRL, MVT::v2i64, 2 }, // Shift each lane + blend. 869 { ISD::SRL, MVT::v4i64, 6 }, // Shift each lane + blend + split. 870 871 { ISD::SRA, MVT::v32i8, 44 }, // pblendvb sequence + split. 872 { ISD::SRA, MVT::v16i16, 28 }, // pblendvb sequence + split. 873 { ISD::SRA, MVT::v4i32, 6 }, // Shift each lane + blend. 874 { ISD::SRA, MVT::v8i32, 14 }, // Shift each lane + blend + split. 875 { ISD::SRA, MVT::v2i64, 5 }, // Shift each lane + blend. 876 { ISD::SRA, MVT::v4i64, 12 }, // Shift each lane + blend + split. 877 878 { ISD::FNEG, MVT::v4f64, 2 }, // BTVER2 from http://www.agner.org/ 879 { ISD::FNEG, MVT::v8f32, 2 }, // BTVER2 from http://www.agner.org/ 880 881 { ISD::FMUL, MVT::f64, 2 }, // BTVER2 from http://www.agner.org/ 882 { ISD::FMUL, MVT::v2f64, 2 }, // BTVER2 from http://www.agner.org/ 883 { ISD::FMUL, MVT::v4f64, 4 }, // BTVER2 from http://www.agner.org/ 884 885 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/ 886 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 887 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 888 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/ 889 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/ 890 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/ 891 }; 892 893 if (ST->hasAVX()) 894 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second)) 895 return LT.first * Entry->Cost; 896 897 static const CostTblEntry SSE42CostTable[] = { 898 { ISD::FADD, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 899 { ISD::FADD, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 900 { ISD::FADD, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 901 { ISD::FADD, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 902 903 { ISD::FSUB, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 904 { ISD::FSUB, MVT::f32 , 1 }, // Nehalem from http://www.agner.org/ 905 { ISD::FSUB, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 906 { ISD::FSUB, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 907 908 { ISD::FMUL, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 909 { ISD::FMUL, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 910 { ISD::FMUL, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 911 { ISD::FMUL, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 912 913 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/ 914 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/ 915 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/ 916 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/ 917 918 { ISD::MUL, MVT::v2i64, 6 } // 3*pmuludq/3*shift/2*add 919 }; 920 921 if (ST->hasSSE42()) 922 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second)) 923 return LT.first * Entry->Cost; 924 925 static const CostTblEntry SSE41CostTable[] = { 926 { ISD::SHL, MVT::v16i8, 10 }, // pblendvb sequence. 927 { ISD::SHL, MVT::v8i16, 11 }, // pblendvb sequence. 928 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld 929 930 { ISD::SRL, MVT::v16i8, 11 }, // pblendvb sequence. 931 { ISD::SRL, MVT::v8i16, 13 }, // pblendvb sequence. 932 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. 933 934 { ISD::SRA, MVT::v16i8, 21 }, // pblendvb sequence. 935 { ISD::SRA, MVT::v8i16, 13 }, // pblendvb sequence. 936 937 { ISD::MUL, MVT::v4i32, 2 } // pmulld (Nehalem from agner.org) 938 }; 939 940 if (ST->hasSSE41()) 941 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second)) 942 return LT.first * Entry->Cost; 943 944 static const CostTblEntry SSE2CostTable[] = { 945 // We don't correctly identify costs of casts because they are marked as 946 // custom. 947 { ISD::SHL, MVT::v16i8, 13 }, // cmpgtb sequence. 948 { ISD::SHL, MVT::v8i16, 25 }, // cmpgtw sequence. 949 { ISD::SHL, MVT::v4i32, 16 }, // pslld/paddd/cvttps2dq/pmuludq. 950 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. 951 952 { ISD::SRL, MVT::v16i8, 14 }, // cmpgtb sequence. 953 { ISD::SRL, MVT::v8i16, 16 }, // cmpgtw sequence. 954 { ISD::SRL, MVT::v4i32, 12 }, // Shift each lane + blend. 955 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence. 956 957 { ISD::SRA, MVT::v16i8, 27 }, // unpacked cmpgtb sequence. 958 { ISD::SRA, MVT::v8i16, 16 }, // cmpgtw sequence. 959 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend. 960 { ISD::SRA, MVT::v2i64, 8 }, // srl/xor/sub splat+shuffle sequence. 961 962 { ISD::MUL, MVT::v8i16, 1 }, // pmullw 963 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle 964 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add 965 966 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/ 967 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/ 968 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/ 969 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/ 970 971 { ISD::FNEG, MVT::f32, 1 }, // Pentium IV from http://www.agner.org/ 972 { ISD::FNEG, MVT::f64, 1 }, // Pentium IV from http://www.agner.org/ 973 { ISD::FNEG, MVT::v4f32, 1 }, // Pentium IV from http://www.agner.org/ 974 { ISD::FNEG, MVT::v2f64, 1 }, // Pentium IV from http://www.agner.org/ 975 976 { ISD::FADD, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/ 977 { ISD::FADD, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/ 978 979 { ISD::FSUB, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/ 980 { ISD::FSUB, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/ 981 }; 982 983 if (ST->hasSSE2()) 984 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second)) 985 return LT.first * Entry->Cost; 986 987 static const CostTblEntry SSE1CostTable[] = { 988 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/ 989 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/ 990 991 { ISD::FNEG, MVT::f32, 2 }, // Pentium III from http://www.agner.org/ 992 { ISD::FNEG, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/ 993 994 { ISD::FADD, MVT::f32, 1 }, // Pentium III from http://www.agner.org/ 995 { ISD::FADD, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/ 996 997 { ISD::FSUB, MVT::f32, 1 }, // Pentium III from http://www.agner.org/ 998 { ISD::FSUB, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/ 999 }; 1000 1001 if (ST->hasSSE1()) 1002 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second)) 1003 return LT.first * Entry->Cost; 1004 1005 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 1006 { ISD::ADD, MVT::i64, 1 }, // Core (Merom) from http://www.agner.org/ 1007 { ISD::SUB, MVT::i64, 1 }, // Core (Merom) from http://www.agner.org/ 1008 }; 1009 1010 if (ST->is64Bit()) 1011 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, LT.second)) 1012 return LT.first * Entry->Cost; 1013 1014 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 1015 { ISD::ADD, MVT::i8, 1 }, // Pentium III from http://www.agner.org/ 1016 { ISD::ADD, MVT::i16, 1 }, // Pentium III from http://www.agner.org/ 1017 { ISD::ADD, MVT::i32, 1 }, // Pentium III from http://www.agner.org/ 1018 1019 { ISD::SUB, MVT::i8, 1 }, // Pentium III from http://www.agner.org/ 1020 { ISD::SUB, MVT::i16, 1 }, // Pentium III from http://www.agner.org/ 1021 { ISD::SUB, MVT::i32, 1 }, // Pentium III from http://www.agner.org/ 1022 }; 1023 1024 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, LT.second)) 1025 return LT.first * Entry->Cost; 1026 1027 // It is not a good idea to vectorize division. We have to scalarize it and 1028 // in the process we will often end up having to spilling regular 1029 // registers. The overhead of division is going to dominate most kernels 1030 // anyways so try hard to prevent vectorization of division - it is 1031 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 1032 // to hide "20 cycles" for each lane. 1033 if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM || 1034 ISD == ISD::UDIV || ISD == ISD::UREM)) { 1035 InstructionCost ScalarCost = getArithmeticInstrCost( 1036 Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info, 1037 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 1038 return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost; 1039 } 1040 1041 // Fallback to the default implementation. 1042 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info); 1043 } 1044 1045 InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, 1046 VectorType *BaseTp, 1047 ArrayRef<int> Mask, int Index, 1048 VectorType *SubTp) { 1049 // 64-bit packed float vectors (v2f32) are widened to type v4f32. 1050 // 64-bit packed integer vectors (v2i32) are widened to type v4i32. 1051 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, BaseTp); 1052 1053 Kind = improveShuffleKindFromMask(Kind, Mask); 1054 // Treat Transpose as 2-op shuffles - there's no difference in lowering. 1055 if (Kind == TTI::SK_Transpose) 1056 Kind = TTI::SK_PermuteTwoSrc; 1057 1058 // For Broadcasts we are splatting the first element from the first input 1059 // register, so only need to reference that input and all the output 1060 // registers are the same. 1061 if (Kind == TTI::SK_Broadcast) 1062 LT.first = 1; 1063 1064 // Subvector extractions are free if they start at the beginning of a 1065 // vector and cheap if the subvectors are aligned. 1066 if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) { 1067 int NumElts = LT.second.getVectorNumElements(); 1068 if ((Index % NumElts) == 0) 1069 return 0; 1070 std::pair<InstructionCost, MVT> SubLT = 1071 TLI->getTypeLegalizationCost(DL, SubTp); 1072 if (SubLT.second.isVector()) { 1073 int NumSubElts = SubLT.second.getVectorNumElements(); 1074 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0) 1075 return SubLT.first; 1076 // Handle some cases for widening legalization. For now we only handle 1077 // cases where the original subvector was naturally aligned and evenly 1078 // fit in its legalized subvector type. 1079 // FIXME: Remove some of the alignment restrictions. 1080 // FIXME: We can use permq for 64-bit or larger extracts from 256-bit 1081 // vectors. 1082 int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements(); 1083 if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 && 1084 (NumSubElts % OrigSubElts) == 0 && 1085 LT.second.getVectorElementType() == 1086 SubLT.second.getVectorElementType() && 1087 LT.second.getVectorElementType().getSizeInBits() == 1088 BaseTp->getElementType()->getPrimitiveSizeInBits()) { 1089 assert(NumElts >= NumSubElts && NumElts > OrigSubElts && 1090 "Unexpected number of elements!"); 1091 auto *VecTy = FixedVectorType::get(BaseTp->getElementType(), 1092 LT.second.getVectorNumElements()); 1093 auto *SubTy = FixedVectorType::get(BaseTp->getElementType(), 1094 SubLT.second.getVectorNumElements()); 1095 int ExtractIndex = alignDown((Index % NumElts), NumSubElts); 1096 InstructionCost ExtractCost = getShuffleCost( 1097 TTI::SK_ExtractSubvector, VecTy, None, ExtractIndex, SubTy); 1098 1099 // If the original size is 32-bits or more, we can use pshufd. Otherwise 1100 // if we have SSSE3 we can use pshufb. 1101 if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3()) 1102 return ExtractCost + 1; // pshufd or pshufb 1103 1104 assert(SubTp->getPrimitiveSizeInBits() == 16 && 1105 "Unexpected vector size"); 1106 1107 return ExtractCost + 2; // worst case pshufhw + pshufd 1108 } 1109 } 1110 } 1111 1112 // Subvector insertions are cheap if the subvectors are aligned. 1113 // Note that in general, the insertion starting at the beginning of a vector 1114 // isn't free, because we need to preserve the rest of the wide vector. 1115 if (Kind == TTI::SK_InsertSubvector && LT.second.isVector()) { 1116 int NumElts = LT.second.getVectorNumElements(); 1117 std::pair<InstructionCost, MVT> SubLT = 1118 TLI->getTypeLegalizationCost(DL, SubTp); 1119 if (SubLT.second.isVector()) { 1120 int NumSubElts = SubLT.second.getVectorNumElements(); 1121 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0) 1122 return SubLT.first; 1123 } 1124 } 1125 1126 // Handle some common (illegal) sub-vector types as they are often very cheap 1127 // to shuffle even on targets without PSHUFB. 1128 EVT VT = TLI->getValueType(DL, BaseTp); 1129 if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 && 1130 !ST->hasSSSE3()) { 1131 static const CostTblEntry SSE2SubVectorShuffleTbl[] = { 1132 {TTI::SK_Broadcast, MVT::v4i16, 1}, // pshuflw 1133 {TTI::SK_Broadcast, MVT::v2i16, 1}, // pshuflw 1134 {TTI::SK_Broadcast, MVT::v8i8, 2}, // punpck/pshuflw 1135 {TTI::SK_Broadcast, MVT::v4i8, 2}, // punpck/pshuflw 1136 {TTI::SK_Broadcast, MVT::v2i8, 1}, // punpck 1137 1138 {TTI::SK_Reverse, MVT::v4i16, 1}, // pshuflw 1139 {TTI::SK_Reverse, MVT::v2i16, 1}, // pshuflw 1140 {TTI::SK_Reverse, MVT::v4i8, 3}, // punpck/pshuflw/packus 1141 {TTI::SK_Reverse, MVT::v2i8, 1}, // punpck 1142 1143 {TTI::SK_PermuteTwoSrc, MVT::v4i16, 2}, // punpck/pshuflw 1144 {TTI::SK_PermuteTwoSrc, MVT::v2i16, 2}, // punpck/pshuflw 1145 {TTI::SK_PermuteTwoSrc, MVT::v8i8, 7}, // punpck/pshuflw 1146 {TTI::SK_PermuteTwoSrc, MVT::v4i8, 4}, // punpck/pshuflw 1147 {TTI::SK_PermuteTwoSrc, MVT::v2i8, 2}, // punpck 1148 1149 {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw 1150 {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw 1151 {TTI::SK_PermuteSingleSrc, MVT::v8i8, 5}, // punpck/pshuflw 1152 {TTI::SK_PermuteSingleSrc, MVT::v4i8, 3}, // punpck/pshuflw 1153 {TTI::SK_PermuteSingleSrc, MVT::v2i8, 1}, // punpck 1154 }; 1155 1156 if (ST->hasSSE2()) 1157 if (const auto *Entry = 1158 CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT())) 1159 return Entry->Cost; 1160 } 1161 1162 // We are going to permute multiple sources and the result will be in multiple 1163 // destinations. Providing an accurate cost only for splits where the element 1164 // type remains the same. 1165 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) { 1166 MVT LegalVT = LT.second; 1167 if (LegalVT.isVector() && 1168 LegalVT.getVectorElementType().getSizeInBits() == 1169 BaseTp->getElementType()->getPrimitiveSizeInBits() && 1170 LegalVT.getVectorNumElements() < 1171 cast<FixedVectorType>(BaseTp)->getNumElements()) { 1172 1173 unsigned VecTySize = DL.getTypeStoreSize(BaseTp); 1174 unsigned LegalVTSize = LegalVT.getStoreSize(); 1175 // Number of source vectors after legalization: 1176 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize; 1177 // Number of destination vectors after legalization: 1178 InstructionCost NumOfDests = LT.first; 1179 1180 auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(), 1181 LegalVT.getVectorNumElements()); 1182 1183 InstructionCost NumOfShuffles = (NumOfSrcs - 1) * NumOfDests; 1184 return NumOfShuffles * getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 1185 None, 0, nullptr); 1186 } 1187 1188 return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp); 1189 } 1190 1191 // For 2-input shuffles, we must account for splitting the 2 inputs into many. 1192 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) { 1193 // We assume that source and destination have the same vector type. 1194 InstructionCost NumOfDests = LT.first; 1195 InstructionCost NumOfShufflesPerDest = LT.first * 2 - 1; 1196 LT.first = NumOfDests * NumOfShufflesPerDest; 1197 } 1198 1199 static const CostTblEntry AVX512VBMIShuffleTbl[] = { 1200 {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb 1201 {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb 1202 1203 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb 1204 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb 1205 1206 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b 1207 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b 1208 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2} // vpermt2b 1209 }; 1210 1211 if (ST->hasVBMI()) 1212 if (const auto *Entry = 1213 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second)) 1214 return LT.first * Entry->Cost; 1215 1216 static const CostTblEntry AVX512BWShuffleTbl[] = { 1217 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw 1218 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb 1219 1220 {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw 1221 {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw 1222 {TTI::SK_Reverse, MVT::v64i8, 2}, // pshufb + vshufi64x2 1223 1224 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw 1225 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw 1226 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8}, // extend to v32i16 1227 1228 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w 1229 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w 1230 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2}, // vpermt2w 1231 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1 1232 1233 {TTI::SK_Select, MVT::v32i16, 1}, // vblendmw 1234 {TTI::SK_Select, MVT::v64i8, 1}, // vblendmb 1235 }; 1236 1237 if (ST->hasBWI()) 1238 if (const auto *Entry = 1239 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second)) 1240 return LT.first * Entry->Cost; 1241 1242 static const CostTblEntry AVX512ShuffleTbl[] = { 1243 {TTI::SK_Broadcast, MVT::v8f64, 1}, // vbroadcastpd 1244 {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps 1245 {TTI::SK_Broadcast, MVT::v8i64, 1}, // vpbroadcastq 1246 {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd 1247 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw 1248 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb 1249 1250 {TTI::SK_Reverse, MVT::v8f64, 1}, // vpermpd 1251 {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps 1252 {TTI::SK_Reverse, MVT::v8i64, 1}, // vpermq 1253 {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd 1254 {TTI::SK_Reverse, MVT::v32i16, 7}, // per mca 1255 {TTI::SK_Reverse, MVT::v64i8, 7}, // per mca 1256 1257 {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1}, // vpermpd 1258 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd 1259 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // vpermpd 1260 {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps 1261 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps 1262 {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1}, // vpermps 1263 {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1}, // vpermq 1264 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq 1265 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // vpermq 1266 {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd 1267 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd 1268 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // vpermd 1269 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb 1270 1271 {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1}, // vpermt2pd 1272 {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps 1273 {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1}, // vpermt2q 1274 {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d 1275 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1}, // vpermt2pd 1276 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1}, // vpermt2ps 1277 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1}, // vpermt2q 1278 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1}, // vpermt2d 1279 {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1}, // vpermt2pd 1280 {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1}, // vpermt2ps 1281 {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1}, // vpermt2q 1282 {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1}, // vpermt2d 1283 1284 // FIXME: This just applies the type legalization cost rules above 1285 // assuming these completely split. 1286 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 14}, 1287 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 14}, 1288 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 42}, 1289 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 42}, 1290 1291 {TTI::SK_Select, MVT::v32i16, 1}, // vpternlogq 1292 {TTI::SK_Select, MVT::v64i8, 1}, // vpternlogq 1293 {TTI::SK_Select, MVT::v8f64, 1}, // vblendmpd 1294 {TTI::SK_Select, MVT::v16f32, 1}, // vblendmps 1295 {TTI::SK_Select, MVT::v8i64, 1}, // vblendmq 1296 {TTI::SK_Select, MVT::v16i32, 1}, // vblendmd 1297 }; 1298 1299 if (ST->hasAVX512()) 1300 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second)) 1301 return LT.first * Entry->Cost; 1302 1303 static const CostTblEntry AVX2ShuffleTbl[] = { 1304 {TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd 1305 {TTI::SK_Broadcast, MVT::v8f32, 1}, // vbroadcastps 1306 {TTI::SK_Broadcast, MVT::v4i64, 1}, // vpbroadcastq 1307 {TTI::SK_Broadcast, MVT::v8i32, 1}, // vpbroadcastd 1308 {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw 1309 {TTI::SK_Broadcast, MVT::v32i8, 1}, // vpbroadcastb 1310 1311 {TTI::SK_Reverse, MVT::v4f64, 1}, // vpermpd 1312 {TTI::SK_Reverse, MVT::v8f32, 1}, // vpermps 1313 {TTI::SK_Reverse, MVT::v4i64, 1}, // vpermq 1314 {TTI::SK_Reverse, MVT::v8i32, 1}, // vpermd 1315 {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb 1316 {TTI::SK_Reverse, MVT::v32i8, 2}, // vperm2i128 + pshufb 1317 1318 {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb 1319 {TTI::SK_Select, MVT::v32i8, 1}, // vpblendvb 1320 1321 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd 1322 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps 1323 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq 1324 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd 1325 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb 1326 // + vpblendvb 1327 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vperm2i128 + 2*vpshufb 1328 // + vpblendvb 1329 1330 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vpermpd + vblendpd 1331 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3}, // 2*vpermps + vblendps 1332 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vpermq + vpblendd 1333 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3}, // 2*vpermd + vpblendd 1334 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb 1335 // + vpblendvb 1336 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7}, // 2*vperm2i128 + 4*vpshufb 1337 // + vpblendvb 1338 }; 1339 1340 if (ST->hasAVX2()) 1341 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second)) 1342 return LT.first * Entry->Cost; 1343 1344 static const CostTblEntry XOPShuffleTbl[] = { 1345 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vpermil2pd 1346 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2}, // vperm2f128 + vpermil2ps 1347 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vpermil2pd 1348 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2}, // vperm2f128 + vpermil2ps 1349 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm 1350 // + vinsertf128 1351 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vextractf128 + 2*vpperm 1352 // + vinsertf128 1353 1354 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm 1355 // + vinsertf128 1356 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpperm 1357 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9}, // 2*vextractf128 + 6*vpperm 1358 // + vinsertf128 1359 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1}, // vpperm 1360 }; 1361 1362 if (ST->hasXOP()) 1363 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second)) 1364 return LT.first * Entry->Cost; 1365 1366 static const CostTblEntry AVX1ShuffleTbl[] = { 1367 {TTI::SK_Broadcast, MVT::v4f64, 2}, // vperm2f128 + vpermilpd 1368 {TTI::SK_Broadcast, MVT::v8f32, 2}, // vperm2f128 + vpermilps 1369 {TTI::SK_Broadcast, MVT::v4i64, 2}, // vperm2f128 + vpermilpd 1370 {TTI::SK_Broadcast, MVT::v8i32, 2}, // vperm2f128 + vpermilps 1371 {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128 1372 {TTI::SK_Broadcast, MVT::v32i8, 2}, // vpshufb + vinsertf128 1373 1374 {TTI::SK_Reverse, MVT::v4f64, 2}, // vperm2f128 + vpermilpd 1375 {TTI::SK_Reverse, MVT::v8f32, 2}, // vperm2f128 + vpermilps 1376 {TTI::SK_Reverse, MVT::v4i64, 2}, // vperm2f128 + vpermilpd 1377 {TTI::SK_Reverse, MVT::v8i32, 2}, // vperm2f128 + vpermilps 1378 {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb 1379 // + vinsertf128 1380 {TTI::SK_Reverse, MVT::v32i8, 4}, // vextractf128 + 2*pshufb 1381 // + vinsertf128 1382 1383 {TTI::SK_Select, MVT::v4i64, 1}, // vblendpd 1384 {TTI::SK_Select, MVT::v4f64, 1}, // vblendpd 1385 {TTI::SK_Select, MVT::v8i32, 1}, // vblendps 1386 {TTI::SK_Select, MVT::v8f32, 1}, // vblendps 1387 {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor 1388 {TTI::SK_Select, MVT::v32i8, 3}, // vpand + vpandn + vpor 1389 1390 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vshufpd 1391 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vshufpd 1392 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps 1393 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps 1394 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb 1395 // + 2*por + vinsertf128 1396 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8}, // vextractf128 + 4*pshufb 1397 // + 2*por + vinsertf128 1398 1399 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vperm2f128 + vshufpd 1400 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vperm2f128 + vshufpd 1401 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps 1402 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps 1403 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb 1404 // + 4*por + vinsertf128 1405 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15}, // 2*vextractf128 + 8*pshufb 1406 // + 4*por + vinsertf128 1407 }; 1408 1409 if (ST->hasAVX()) 1410 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second)) 1411 return LT.first * Entry->Cost; 1412 1413 static const CostTblEntry SSE41ShuffleTbl[] = { 1414 {TTI::SK_Select, MVT::v2i64, 1}, // pblendw 1415 {TTI::SK_Select, MVT::v2f64, 1}, // movsd 1416 {TTI::SK_Select, MVT::v4i32, 1}, // pblendw 1417 {TTI::SK_Select, MVT::v4f32, 1}, // blendps 1418 {TTI::SK_Select, MVT::v8i16, 1}, // pblendw 1419 {TTI::SK_Select, MVT::v16i8, 1} // pblendvb 1420 }; 1421 1422 if (ST->hasSSE41()) 1423 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second)) 1424 return LT.first * Entry->Cost; 1425 1426 static const CostTblEntry SSSE3ShuffleTbl[] = { 1427 {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb 1428 {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb 1429 1430 {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb 1431 {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb 1432 1433 {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por 1434 {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por 1435 1436 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb 1437 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb 1438 1439 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por 1440 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por 1441 }; 1442 1443 if (ST->hasSSSE3()) 1444 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second)) 1445 return LT.first * Entry->Cost; 1446 1447 static const CostTblEntry SSE2ShuffleTbl[] = { 1448 {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd 1449 {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd 1450 {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd 1451 {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd 1452 {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd 1453 1454 {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd 1455 {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd 1456 {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd 1457 {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd 1458 {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw 1459 // + 2*pshufd + 2*unpck + packus 1460 1461 {TTI::SK_Select, MVT::v2i64, 1}, // movsd 1462 {TTI::SK_Select, MVT::v2f64, 1}, // movsd 1463 {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps 1464 {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por 1465 {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por 1466 1467 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd 1468 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd 1469 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd 1470 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw 1471 // + pshufd/unpck 1472 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw 1473 // + 2*pshufd + 2*unpck + 2*packus 1474 1475 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd 1476 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd 1477 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd} 1478 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute 1479 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute 1480 }; 1481 1482 if (ST->hasSSE2()) 1483 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second)) 1484 return LT.first * Entry->Cost; 1485 1486 static const CostTblEntry SSE1ShuffleTbl[] = { 1487 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps 1488 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps 1489 { TTI::SK_Select, MVT::v4f32, 2 }, // 2*shufps 1490 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps 1491 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps 1492 }; 1493 1494 if (ST->hasSSE1()) 1495 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second)) 1496 return LT.first * Entry->Cost; 1497 1498 return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp); 1499 } 1500 1501 InstructionCost X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, 1502 Type *Src, 1503 TTI::CastContextHint CCH, 1504 TTI::TargetCostKind CostKind, 1505 const Instruction *I) { 1506 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1507 assert(ISD && "Invalid opcode"); 1508 1509 // TODO: Allow non-throughput costs that aren't binary. 1510 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost { 1511 if (CostKind != TTI::TCK_RecipThroughput) 1512 return Cost == 0 ? 0 : 1; 1513 return Cost; 1514 }; 1515 1516 // The cost tables include both specific, custom (non-legal) src/dst type 1517 // conversions and generic, legalized types. We test for customs first, before 1518 // falling back to legalization. 1519 // FIXME: Need a better design of the cost table to handle non-simple types of 1520 // potential massive combinations (elem_num x src_type x dst_type). 1521 static const TypeConversionCostTblEntry AVX512BWConversionTbl[] { 1522 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 }, 1523 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 }, 1524 1525 // Mask sign extend has an instruction. 1526 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 }, 1527 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 }, 1528 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 }, 1529 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 }, 1530 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 }, 1531 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 }, 1532 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 }, 1533 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1534 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 }, 1535 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 }, 1536 { ISD::SIGN_EXTEND, MVT::v64i8, MVT::v64i1, 1 }, 1537 1538 // Mask zero extend is a sext + shift. 1539 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 }, 1540 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 }, 1541 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 }, 1542 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 }, 1543 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 }, 1544 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 }, 1545 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 }, 1546 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 }, 1547 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 }, 1548 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 }, 1549 { ISD::ZERO_EXTEND, MVT::v64i8, MVT::v64i1, 2 }, 1550 1551 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 2 }, 1552 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // widen to zmm 1553 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // widen to zmm 1554 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // widen to zmm 1555 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 2 }, // vpmovwb 1556 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // widen to zmm 1557 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // widen to zmm 1558 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, // vpmovwb 1559 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // widen to zmm 1560 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // widen to zmm 1561 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, // vpmovwb 1562 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // widen to zmm 1563 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // widen to zmm 1564 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // widen to zmm 1565 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i16, 2 }, 1566 { ISD::TRUNCATE, MVT::v64i1, MVT::v64i8, 2 }, 1567 }; 1568 1569 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = { 1570 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1571 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1572 1573 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1574 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1575 1576 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 }, 1577 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 }, 1578 1579 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 }, 1580 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 }, 1581 }; 1582 1583 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and 1584 // 256-bit wide vectors. 1585 1586 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = { 1587 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, 1588 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, 1589 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, 1590 1591 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd 1592 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd 1593 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd 1594 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 3 }, // sext+vpslld+vptestmd 1595 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq 1596 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq 1597 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq 1598 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 3 }, // sext+vpslld+vptestmd 1599 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // zmm vpslld+vptestmd 1600 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // zmm vpslld+vptestmd 1601 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // zmm vpslld+vptestmd 1602 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 }, // vpslld+vptestmd 1603 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // zmm vpsllq+vptestmq 1604 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // zmm vpsllq+vptestmq 1605 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 }, // vpsllq+vptestmq 1606 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i32, 2 }, // vpmovdb 1607 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 2 }, // vpmovdb 1608 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 2 }, // vpmovdb 1609 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2 }, // vpmovdb 1610 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 2 }, // vpmovqb 1611 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 1 }, // vpshufb 1612 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 2 }, // vpmovqb 1613 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 2 }, // vpmovqw 1614 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, // vpmovqd 1615 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // zmm vpmovqd 1616 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb 1617 1618 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, // extend to v16i32 1619 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 8 }, 1620 1621 // Sign extend is zmm vpternlogd+vptruncdb. 1622 // Zero extend is zmm broadcast load+vptruncdw. 1623 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 3 }, 1624 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 4 }, 1625 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 3 }, 1626 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 4 }, 1627 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 3 }, 1628 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 4 }, 1629 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 3 }, 1630 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 4 }, 1631 1632 // Sign extend is zmm vpternlogd+vptruncdw. 1633 // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw. 1634 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 3 }, 1635 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 4 }, 1636 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 3 }, 1637 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 4 }, 1638 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 3 }, 1639 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 4 }, 1640 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 3 }, 1641 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, 1642 1643 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // zmm vpternlogd 1644 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // zmm vpternlogd+psrld 1645 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // zmm vpternlogd 1646 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // zmm vpternlogd+psrld 1647 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // zmm vpternlogd 1648 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // zmm vpternlogd+psrld 1649 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // zmm vpternlogq 1650 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // zmm vpternlogq+psrlq 1651 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // zmm vpternlogq 1652 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // zmm vpternlogq+psrlq 1653 1654 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 }, // vpternlogd 1655 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, // vpternlogd+psrld 1656 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 }, // vpternlogq 1657 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 }, // vpternlogq+psrlq 1658 1659 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1660 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1661 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1662 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1663 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 1 }, 1664 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 1 }, 1665 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1666 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1667 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1668 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1669 1670 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right 1671 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right 1672 1673 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1674 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1675 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v16i8, 2 }, 1676 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 1 }, 1677 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1678 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 1 }, 1679 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1680 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1681 1682 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1683 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1684 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v16i8, 2 }, 1685 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 1 }, 1686 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1687 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 1 }, 1688 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1689 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1690 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 }, 1691 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 5 }, 1692 1693 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 2 }, 1694 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f64, 7 }, 1695 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v32f64,15 }, 1696 { ISD::FP_TO_SINT, MVT::v64i8, MVT::v64f32,11 }, 1697 { ISD::FP_TO_SINT, MVT::v64i8, MVT::v64f64,31 }, 1698 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f64, 3 }, 1699 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f64, 7 }, 1700 { ISD::FP_TO_SINT, MVT::v32i16, MVT::v32f32, 5 }, 1701 { ISD::FP_TO_SINT, MVT::v32i16, MVT::v32f64,15 }, 1702 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 1 }, 1703 { ISD::FP_TO_SINT, MVT::v16i32, MVT::v16f64, 3 }, 1704 1705 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 }, 1706 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 3 }, 1707 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 3 }, 1708 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 }, 1709 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 3 }, 1710 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 3 }, 1711 }; 1712 1713 static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] { 1714 // Mask sign extend has an instruction. 1715 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 }, 1716 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 }, 1717 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 }, 1718 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 }, 1719 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 }, 1720 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 }, 1721 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 }, 1722 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1723 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 }, 1724 1725 // Mask zero extend is a sext + shift. 1726 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 }, 1727 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 }, 1728 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 }, 1729 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 }, 1730 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 }, 1731 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 }, 1732 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 }, 1733 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 }, 1734 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 }, 1735 1736 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, 1737 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // vpsllw+vptestmb 1738 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // vpsllw+vptestmw 1739 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // vpsllw+vptestmb 1740 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // vpsllw+vptestmw 1741 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // vpsllw+vptestmb 1742 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // vpsllw+vptestmw 1743 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // vpsllw+vptestmb 1744 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // vpsllw+vptestmw 1745 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // vpsllw+vptestmb 1746 }; 1747 1748 static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = { 1749 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 1750 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1751 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 1752 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 1753 1754 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 1755 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1756 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 1757 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 1758 1759 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v4f32, 1 }, 1760 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 }, 1761 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 1762 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 }, 1763 1764 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v4f32, 1 }, 1765 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 }, 1766 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 1767 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 }, 1768 }; 1769 1770 static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = { 1771 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd 1772 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd 1773 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd 1774 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 8 }, // split+2*v8i8 1775 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq 1776 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq 1777 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq 1778 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 8 }, // split+2*v8i16 1779 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // vpslld+vptestmd 1780 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // vpslld+vptestmd 1781 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // vpslld+vptestmd 1782 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // vpsllq+vptestmq 1783 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // vpsllq+vptestmq 1784 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // vpmovqd 1785 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, // vpmovqb 1786 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, // vpmovqw 1787 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, // vpmovwb 1788 1789 // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb 1790 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb 1791 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 5 }, 1792 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 6 }, 1793 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 5 }, 1794 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 6 }, 1795 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 5 }, 1796 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 6 }, 1797 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 10 }, 1798 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 12 }, 1799 1800 // sign extend is vpcmpeq+maskedmove+vpmovdw 1801 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw 1802 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 4 }, 1803 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 5 }, 1804 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 4 }, 1805 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 5 }, 1806 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 4 }, 1807 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 5 }, 1808 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 }, 1809 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 }, 1810 1811 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // vpternlogd 1812 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // vpternlogd+psrld 1813 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // vpternlogd 1814 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // vpternlogd+psrld 1815 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // vpternlogd 1816 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // vpternlogd+psrld 1817 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // vpternlogq 1818 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // vpternlogq+psrlq 1819 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // vpternlogq 1820 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // vpternlogq+psrlq 1821 1822 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 1 }, 1823 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 1 }, 1824 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 1 }, 1825 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 1 }, 1826 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1827 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1828 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 1 }, 1829 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 1 }, 1830 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1831 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1832 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1833 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1834 1835 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 }, 1836 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 1 }, 1837 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 }, 1838 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 1 }, 1839 1840 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 1 }, 1841 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 1 }, 1842 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 }, 1843 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 1 }, 1844 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 }, 1845 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 1 }, 1846 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 1847 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1848 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1849 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1850 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 }, 1851 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 1852 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 5 }, 1853 1854 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v8f32, 2 }, 1855 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 2 }, 1856 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v32f32, 5 }, 1857 1858 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 1 }, 1859 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 1 }, 1860 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 1861 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 1 }, 1862 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 1 }, 1863 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 }, 1864 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 }, 1865 }; 1866 1867 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = { 1868 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1869 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1870 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1871 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1872 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1873 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1874 1875 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 2 }, 1876 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 2 }, 1877 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 2 }, 1878 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 2 }, 1879 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1880 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1881 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 2 }, 1882 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 2 }, 1883 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1884 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1885 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 }, 1886 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 }, 1887 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1888 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1889 1890 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, 1891 1892 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i16, 1 }, 1893 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 1 }, 1894 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 1 }, 1895 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i32, 4 }, 1896 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i64, 4 }, 1897 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 1 }, 1898 { ISD::TRUNCATE, MVT::v8i16, MVT::v2i64, 1 }, 1899 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i64, 5 }, 1900 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, 1901 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 1902 1903 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, 1904 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 }, 1905 1906 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v8f32, 1 }, 1907 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f64, 1 }, 1908 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f32, 1 }, 1909 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 3 }, 1910 1911 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 3 }, 1912 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 3 }, 1913 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v8f32, 1 }, 1914 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 3 }, 1915 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 }, 1916 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4 }, 1917 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 3 }, 1918 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v4f64, 4 }, 1919 1920 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 2 }, 1921 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 2 }, 1922 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 2 }, 1923 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, 1924 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1925 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1926 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 3 }, 1927 1928 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 2 }, 1929 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 2 }, 1930 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 2 }, 1931 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, 1932 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 }, 1933 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 }, 1934 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 2 }, 1935 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 }, 1936 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 1937 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 4 }, 1938 }; 1939 1940 static const TypeConversionCostTblEntry AVXConversionTbl[] = { 1941 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 1942 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 1943 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 1944 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 1945 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, 1946 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, 1947 1948 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 3 }, 1949 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 3 }, 1950 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 3 }, 1951 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 3 }, 1952 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 3 }, 1953 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 }, 1954 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 3 }, 1955 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 3 }, 1956 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 3 }, 1957 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 }, 1958 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 3 }, 1959 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 }, 1960 1961 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 4 }, 1962 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 5 }, 1963 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 4 }, 1964 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 9 }, 1965 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i64, 11 }, 1966 1967 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // and+extract+packuswb 1968 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i32, 5 }, 1969 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 1970 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i64, 5 }, 1971 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i64, 3 }, // and+extract+2*packusdw 1972 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 1973 1974 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 1975 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 1976 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 1977 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 4 }, 1978 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v16i8, 2 }, 1979 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 1980 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v8i16, 2 }, 1981 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 }, 1982 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 1983 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 4 }, 1984 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 5 }, 1985 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 8 }, 1986 1987 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 1988 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 1989 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 1990 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 4 }, 1991 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v16i8, 2 }, 1992 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 1993 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v8i16, 2 }, 1994 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 4 }, 1995 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 4 }, 1996 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 }, 1997 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 1998 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 }, 1999 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 10 }, 2000 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 10 }, 2001 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 18 }, 2002 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 2003 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 10 }, 2004 2005 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v8f32, 2 }, 2006 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f64, 2 }, 2007 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v8f32, 2 }, 2008 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v4f64, 2 }, 2009 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 2 }, 2010 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f64, 2 }, 2011 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v8f32, 2 }, 2012 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v4f64, 2 }, 2013 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f64, 2 }, 2014 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f32, 2 }, 2015 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 5 }, 2016 2017 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v8f32, 2 }, 2018 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f64, 2 }, 2019 { ISD::FP_TO_UINT, MVT::v32i8, MVT::v8f32, 2 }, 2020 { ISD::FP_TO_UINT, MVT::v32i8, MVT::v4f64, 2 }, 2021 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 2 }, 2022 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f64, 2 }, 2023 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v8f32, 2 }, 2024 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v4f64, 2 }, 2025 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 3 }, 2026 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 }, 2027 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 6 }, 2028 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 7 }, 2029 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v4f64, 7 }, 2030 2031 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 }, 2032 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 }, 2033 }; 2034 2035 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = { 2036 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8, 1 }, 2037 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8, 1 }, 2038 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8, 1 }, 2039 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8, 1 }, 2040 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8, 1 }, 2041 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8, 1 }, 2042 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16, 1 }, 2043 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16, 1 }, 2044 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16, 1 }, 2045 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16, 1 }, 2046 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32, 1 }, 2047 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32, 1 }, 2048 2049 // These truncates end up widening elements. 2050 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 1 }, // PMOVXZBQ 2051 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 1 }, // PMOVXZWQ 2052 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 1 }, // PMOVXZBD 2053 2054 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 2 }, 2055 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 2 }, 2056 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 2 }, 2057 2058 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 1 }, 2059 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 1 }, 2060 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 1 }, 2061 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 1 }, 2062 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 1 }, 2063 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 }, 2064 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 1 }, 2065 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 }, 2066 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 2067 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 1 }, 2068 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 }, 2069 2070 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 1 }, 2071 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 1 }, 2072 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 4 }, 2073 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 4 }, 2074 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 1 }, 2075 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 }, 2076 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 1 }, 2077 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 }, 2078 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 3 }, 2079 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 3 }, 2080 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 2 }, 2081 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 12 }, 2082 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 22 }, 2083 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 4 }, 2084 2085 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 1 }, 2086 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 1 }, 2087 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 1 }, 2088 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 1 }, 2089 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f32, 2 }, 2090 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v2f64, 2 }, 2091 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f32, 1 }, 2092 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v2f64, 1 }, 2093 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 2094 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v2f64, 1 }, 2095 2096 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 1 }, 2097 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 }, 2098 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 1 }, 2099 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 4 }, 2100 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f32, 2 }, 2101 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v2f64, 2 }, 2102 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f32, 1 }, 2103 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v2f64, 1 }, 2104 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 4 }, 2105 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 }, 2106 }; 2107 2108 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = { 2109 // These are somewhat magic numbers justified by comparing the 2110 // output of llvm-mca for our various supported scheduler models 2111 // and basing it off the worst case scenario. 2112 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 3 }, 2113 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 3 }, 2114 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 3 }, 2115 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 3 }, 2116 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 3 }, 2117 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 4 }, 2118 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 3 }, 2119 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 4 }, 2120 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 3 }, 2121 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4 }, 2122 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 8 }, 2123 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 8 }, 2124 2125 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 3 }, 2126 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 3 }, 2127 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 8 }, 2128 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 9 }, 2129 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 4 }, 2130 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 4 }, 2131 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 4 }, 2132 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 4 }, 2133 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 7 }, 2134 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 7 }, 2135 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 }, 2136 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 15 }, 2137 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 18 }, 2138 2139 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 4 }, 2140 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 4 }, 2141 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 4 }, 2142 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 4 }, 2143 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f32, 6 }, 2144 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v2f64, 6 }, 2145 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f32, 5 }, 2146 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v2f64, 5 }, 2147 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 4 }, 2148 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v2f64, 4 }, 2149 2150 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 4 }, 2151 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 }, 2152 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 4 }, 2153 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 15 }, 2154 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f32, 6 }, 2155 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v2f64, 6 }, 2156 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f32, 5 }, 2157 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v2f64, 5 }, 2158 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 8 }, 2159 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 8 }, 2160 2161 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8, 4 }, 2162 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8, 4 }, 2163 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8, 2 }, 2164 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8, 3 }, 2165 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8, 1 }, 2166 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8, 2 }, 2167 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16, 2 }, 2168 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16, 3 }, 2169 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16, 1 }, 2170 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16, 2 }, 2171 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32, 1 }, 2172 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32, 2 }, 2173 2174 // These truncates are really widening elements. 2175 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 1 }, // PSHUFD 2176 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // PUNPCKLWD+DQ 2177 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // PUNPCKLBW+WD+PSHUFD 2178 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 1 }, // PUNPCKLWD 2179 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // PUNPCKLBW+WD 2180 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 1 }, // PUNPCKLBW 2181 2182 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i16, 2 }, // PAND+PACKUSWB 2183 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, 2184 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 3 }, // PAND+2*PACKUSWB 2185 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 }, 2186 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i32, 1 }, 2187 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 3 }, 2188 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 2189 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32,10 }, 2190 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 4 }, // PAND+3*PACKUSWB 2191 { ISD::TRUNCATE, MVT::v8i16, MVT::v2i64, 2 }, // PSHUFD+PSHUFLW 2192 { ISD::TRUNCATE, MVT::v4i32, MVT::v2i64, 1 }, // PSHUFD 2193 }; 2194 2195 // Attempt to map directly to (simple) MVT types to let us match custom entries. 2196 EVT SrcTy = TLI->getValueType(DL, Src); 2197 EVT DstTy = TLI->getValueType(DL, Dst); 2198 2199 // The function getSimpleVT only handles simple value types. 2200 if (SrcTy.isSimple() && DstTy.isSimple()) { 2201 MVT SimpleSrcTy = SrcTy.getSimpleVT(); 2202 MVT SimpleDstTy = DstTy.getSimpleVT(); 2203 2204 if (ST->useAVX512Regs()) { 2205 if (ST->hasBWI()) 2206 if (const auto *Entry = ConvertCostTableLookup( 2207 AVX512BWConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2208 return AdjustCost(Entry->Cost); 2209 2210 if (ST->hasDQI()) 2211 if (const auto *Entry = ConvertCostTableLookup( 2212 AVX512DQConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2213 return AdjustCost(Entry->Cost); 2214 2215 if (ST->hasAVX512()) 2216 if (const auto *Entry = ConvertCostTableLookup( 2217 AVX512FConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2218 return AdjustCost(Entry->Cost); 2219 } 2220 2221 if (ST->hasBWI()) 2222 if (const auto *Entry = ConvertCostTableLookup( 2223 AVX512BWVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2224 return AdjustCost(Entry->Cost); 2225 2226 if (ST->hasDQI()) 2227 if (const auto *Entry = ConvertCostTableLookup( 2228 AVX512DQVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2229 return AdjustCost(Entry->Cost); 2230 2231 if (ST->hasAVX512()) 2232 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD, 2233 SimpleDstTy, SimpleSrcTy)) 2234 return AdjustCost(Entry->Cost); 2235 2236 if (ST->hasAVX2()) { 2237 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 2238 SimpleDstTy, SimpleSrcTy)) 2239 return AdjustCost(Entry->Cost); 2240 } 2241 2242 if (ST->hasAVX()) { 2243 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD, 2244 SimpleDstTy, SimpleSrcTy)) 2245 return AdjustCost(Entry->Cost); 2246 } 2247 2248 if (ST->hasSSE41()) { 2249 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD, 2250 SimpleDstTy, SimpleSrcTy)) 2251 return AdjustCost(Entry->Cost); 2252 } 2253 2254 if (ST->hasSSE2()) { 2255 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 2256 SimpleDstTy, SimpleSrcTy)) 2257 return AdjustCost(Entry->Cost); 2258 } 2259 } 2260 2261 // Fall back to legalized types. 2262 std::pair<InstructionCost, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src); 2263 std::pair<InstructionCost, MVT> LTDest = 2264 TLI->getTypeLegalizationCost(DL, Dst); 2265 2266 if (ST->useAVX512Regs()) { 2267 if (ST->hasBWI()) 2268 if (const auto *Entry = ConvertCostTableLookup( 2269 AVX512BWConversionTbl, ISD, LTDest.second, LTSrc.second)) 2270 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2271 2272 if (ST->hasDQI()) 2273 if (const auto *Entry = ConvertCostTableLookup( 2274 AVX512DQConversionTbl, ISD, LTDest.second, LTSrc.second)) 2275 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2276 2277 if (ST->hasAVX512()) 2278 if (const auto *Entry = ConvertCostTableLookup( 2279 AVX512FConversionTbl, ISD, LTDest.second, LTSrc.second)) 2280 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2281 } 2282 2283 if (ST->hasBWI()) 2284 if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD, 2285 LTDest.second, LTSrc.second)) 2286 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2287 2288 if (ST->hasDQI()) 2289 if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD, 2290 LTDest.second, LTSrc.second)) 2291 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2292 2293 if (ST->hasAVX512()) 2294 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD, 2295 LTDest.second, LTSrc.second)) 2296 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2297 2298 if (ST->hasAVX2()) 2299 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 2300 LTDest.second, LTSrc.second)) 2301 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2302 2303 if (ST->hasAVX()) 2304 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD, 2305 LTDest.second, LTSrc.second)) 2306 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2307 2308 if (ST->hasSSE41()) 2309 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD, 2310 LTDest.second, LTSrc.second)) 2311 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2312 2313 if (ST->hasSSE2()) 2314 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 2315 LTDest.second, LTSrc.second)) 2316 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2317 2318 // Fallback, for i8/i16 sitofp/uitofp cases we need to extend to i32 for 2319 // sitofp. 2320 if ((ISD == ISD::SINT_TO_FP || ISD == ISD::UINT_TO_FP) && 2321 1 < Src->getScalarSizeInBits() && Src->getScalarSizeInBits() < 32) { 2322 Type *ExtSrc = Src->getWithNewBitWidth(32); 2323 unsigned ExtOpc = 2324 (ISD == ISD::SINT_TO_FP) ? Instruction::SExt : Instruction::ZExt; 2325 2326 // For scalar loads the extend would be free. 2327 InstructionCost ExtCost = 0; 2328 if (!(Src->isIntegerTy() && I && isa<LoadInst>(I->getOperand(0)))) 2329 ExtCost = getCastInstrCost(ExtOpc, ExtSrc, Src, CCH, CostKind); 2330 2331 return ExtCost + getCastInstrCost(Instruction::SIToFP, Dst, ExtSrc, 2332 TTI::CastContextHint::None, CostKind); 2333 } 2334 2335 // Fallback for fptosi/fptoui i8/i16 cases we need to truncate from fptosi 2336 // i32. 2337 if ((ISD == ISD::FP_TO_SINT || ISD == ISD::FP_TO_UINT) && 2338 1 < Dst->getScalarSizeInBits() && Dst->getScalarSizeInBits() < 32) { 2339 Type *TruncDst = Dst->getWithNewBitWidth(32); 2340 return getCastInstrCost(Instruction::FPToSI, TruncDst, Src, CCH, CostKind) + 2341 getCastInstrCost(Instruction::Trunc, Dst, TruncDst, 2342 TTI::CastContextHint::None, CostKind); 2343 } 2344 2345 return AdjustCost( 2346 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 2347 } 2348 2349 InstructionCost X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 2350 Type *CondTy, 2351 CmpInst::Predicate VecPred, 2352 TTI::TargetCostKind CostKind, 2353 const Instruction *I) { 2354 // TODO: Handle other cost kinds. 2355 if (CostKind != TTI::TCK_RecipThroughput) 2356 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, 2357 I); 2358 2359 // Legalize the type. 2360 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 2361 2362 MVT MTy = LT.second; 2363 2364 int ISD = TLI->InstructionOpcodeToISD(Opcode); 2365 assert(ISD && "Invalid opcode"); 2366 2367 unsigned ExtraCost = 0; 2368 if (I && (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)) { 2369 // Some vector comparison predicates cost extra instructions. 2370 if (MTy.isVector() && 2371 !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) || 2372 (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) || 2373 ST->hasBWI())) { 2374 switch (cast<CmpInst>(I)->getPredicate()) { 2375 case CmpInst::Predicate::ICMP_NE: 2376 // xor(cmpeq(x,y),-1) 2377 ExtraCost = 1; 2378 break; 2379 case CmpInst::Predicate::ICMP_SGE: 2380 case CmpInst::Predicate::ICMP_SLE: 2381 // xor(cmpgt(x,y),-1) 2382 ExtraCost = 1; 2383 break; 2384 case CmpInst::Predicate::ICMP_ULT: 2385 case CmpInst::Predicate::ICMP_UGT: 2386 // cmpgt(xor(x,signbit),xor(y,signbit)) 2387 // xor(cmpeq(pmaxu(x,y),x),-1) 2388 ExtraCost = 2; 2389 break; 2390 case CmpInst::Predicate::ICMP_ULE: 2391 case CmpInst::Predicate::ICMP_UGE: 2392 if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) || 2393 (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) { 2394 // cmpeq(psubus(x,y),0) 2395 // cmpeq(pminu(x,y),x) 2396 ExtraCost = 1; 2397 } else { 2398 // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1) 2399 ExtraCost = 3; 2400 } 2401 break; 2402 default: 2403 break; 2404 } 2405 } 2406 } 2407 2408 static const CostTblEntry SLMCostTbl[] = { 2409 // slm pcmpeq/pcmpgt throughput is 2 2410 { ISD::SETCC, MVT::v2i64, 2 }, 2411 }; 2412 2413 static const CostTblEntry AVX512BWCostTbl[] = { 2414 { ISD::SETCC, MVT::v32i16, 1 }, 2415 { ISD::SETCC, MVT::v64i8, 1 }, 2416 2417 { ISD::SELECT, MVT::v32i16, 1 }, 2418 { ISD::SELECT, MVT::v64i8, 1 }, 2419 }; 2420 2421 static const CostTblEntry AVX512CostTbl[] = { 2422 { ISD::SETCC, MVT::v8i64, 1 }, 2423 { ISD::SETCC, MVT::v16i32, 1 }, 2424 { ISD::SETCC, MVT::v8f64, 1 }, 2425 { ISD::SETCC, MVT::v16f32, 1 }, 2426 2427 { ISD::SELECT, MVT::v8i64, 1 }, 2428 { ISD::SELECT, MVT::v16i32, 1 }, 2429 { ISD::SELECT, MVT::v8f64, 1 }, 2430 { ISD::SELECT, MVT::v16f32, 1 }, 2431 2432 { ISD::SETCC, MVT::v32i16, 2 }, // FIXME: should probably be 4 2433 { ISD::SETCC, MVT::v64i8, 2 }, // FIXME: should probably be 4 2434 2435 { ISD::SELECT, MVT::v32i16, 2 }, // FIXME: should be 3 2436 { ISD::SELECT, MVT::v64i8, 2 }, // FIXME: should be 3 2437 }; 2438 2439 static const CostTblEntry AVX2CostTbl[] = { 2440 { ISD::SETCC, MVT::v4i64, 1 }, 2441 { ISD::SETCC, MVT::v8i32, 1 }, 2442 { ISD::SETCC, MVT::v16i16, 1 }, 2443 { ISD::SETCC, MVT::v32i8, 1 }, 2444 2445 { ISD::SELECT, MVT::v4i64, 1 }, // pblendvb 2446 { ISD::SELECT, MVT::v8i32, 1 }, // pblendvb 2447 { ISD::SELECT, MVT::v16i16, 1 }, // pblendvb 2448 { ISD::SELECT, MVT::v32i8, 1 }, // pblendvb 2449 }; 2450 2451 static const CostTblEntry AVX1CostTbl[] = { 2452 { ISD::SETCC, MVT::v4f64, 1 }, 2453 { ISD::SETCC, MVT::v8f32, 1 }, 2454 // AVX1 does not support 8-wide integer compare. 2455 { ISD::SETCC, MVT::v4i64, 4 }, 2456 { ISD::SETCC, MVT::v8i32, 4 }, 2457 { ISD::SETCC, MVT::v16i16, 4 }, 2458 { ISD::SETCC, MVT::v32i8, 4 }, 2459 2460 { ISD::SELECT, MVT::v4f64, 1 }, // vblendvpd 2461 { ISD::SELECT, MVT::v8f32, 1 }, // vblendvps 2462 { ISD::SELECT, MVT::v4i64, 1 }, // vblendvpd 2463 { ISD::SELECT, MVT::v8i32, 1 }, // vblendvps 2464 { ISD::SELECT, MVT::v16i16, 3 }, // vandps + vandnps + vorps 2465 { ISD::SELECT, MVT::v32i8, 3 }, // vandps + vandnps + vorps 2466 }; 2467 2468 static const CostTblEntry SSE42CostTbl[] = { 2469 { ISD::SETCC, MVT::v2f64, 1 }, 2470 { ISD::SETCC, MVT::v4f32, 1 }, 2471 { ISD::SETCC, MVT::v2i64, 1 }, 2472 }; 2473 2474 static const CostTblEntry SSE41CostTbl[] = { 2475 { ISD::SELECT, MVT::v2f64, 1 }, // blendvpd 2476 { ISD::SELECT, MVT::v4f32, 1 }, // blendvps 2477 { ISD::SELECT, MVT::v2i64, 1 }, // pblendvb 2478 { ISD::SELECT, MVT::v4i32, 1 }, // pblendvb 2479 { ISD::SELECT, MVT::v8i16, 1 }, // pblendvb 2480 { ISD::SELECT, MVT::v16i8, 1 }, // pblendvb 2481 }; 2482 2483 static const CostTblEntry SSE2CostTbl[] = { 2484 { ISD::SETCC, MVT::v2f64, 2 }, 2485 { ISD::SETCC, MVT::f64, 1 }, 2486 { ISD::SETCC, MVT::v2i64, 8 }, 2487 { ISD::SETCC, MVT::v4i32, 1 }, 2488 { ISD::SETCC, MVT::v8i16, 1 }, 2489 { ISD::SETCC, MVT::v16i8, 1 }, 2490 2491 { ISD::SELECT, MVT::v2f64, 3 }, // andpd + andnpd + orpd 2492 { ISD::SELECT, MVT::v2i64, 3 }, // pand + pandn + por 2493 { ISD::SELECT, MVT::v4i32, 3 }, // pand + pandn + por 2494 { ISD::SELECT, MVT::v8i16, 3 }, // pand + pandn + por 2495 { ISD::SELECT, MVT::v16i8, 3 }, // pand + pandn + por 2496 }; 2497 2498 static const CostTblEntry SSE1CostTbl[] = { 2499 { ISD::SETCC, MVT::v4f32, 2 }, 2500 { ISD::SETCC, MVT::f32, 1 }, 2501 2502 { ISD::SELECT, MVT::v4f32, 3 }, // andps + andnps + orps 2503 }; 2504 2505 if (ST->isSLM()) 2506 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy)) 2507 return LT.first * (ExtraCost + Entry->Cost); 2508 2509 if (ST->hasBWI()) 2510 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 2511 return LT.first * (ExtraCost + Entry->Cost); 2512 2513 if (ST->hasAVX512()) 2514 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 2515 return LT.first * (ExtraCost + Entry->Cost); 2516 2517 if (ST->hasAVX2()) 2518 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 2519 return LT.first * (ExtraCost + Entry->Cost); 2520 2521 if (ST->hasAVX()) 2522 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 2523 return LT.first * (ExtraCost + Entry->Cost); 2524 2525 if (ST->hasSSE42()) 2526 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 2527 return LT.first * (ExtraCost + Entry->Cost); 2528 2529 if (ST->hasSSE41()) 2530 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) 2531 return LT.first * (ExtraCost + Entry->Cost); 2532 2533 if (ST->hasSSE2()) 2534 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 2535 return LT.first * (ExtraCost + Entry->Cost); 2536 2537 if (ST->hasSSE1()) 2538 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 2539 return LT.first * (ExtraCost + Entry->Cost); 2540 2541 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); 2542 } 2543 2544 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; } 2545 2546 InstructionCost 2547 X86TTIImpl::getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 2548 TTI::TargetCostKind CostKind) { 2549 2550 // Costs should match the codegen from: 2551 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll 2552 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll 2553 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll 2554 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll 2555 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll 2556 2557 // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not 2558 // specialized in these tables yet. 2559 static const CostTblEntry AVX512CDCostTbl[] = { 2560 { ISD::CTLZ, MVT::v8i64, 1 }, 2561 { ISD::CTLZ, MVT::v16i32, 1 }, 2562 { ISD::CTLZ, MVT::v32i16, 8 }, 2563 { ISD::CTLZ, MVT::v64i8, 20 }, 2564 { ISD::CTLZ, MVT::v4i64, 1 }, 2565 { ISD::CTLZ, MVT::v8i32, 1 }, 2566 { ISD::CTLZ, MVT::v16i16, 4 }, 2567 { ISD::CTLZ, MVT::v32i8, 10 }, 2568 { ISD::CTLZ, MVT::v2i64, 1 }, 2569 { ISD::CTLZ, MVT::v4i32, 1 }, 2570 { ISD::CTLZ, MVT::v8i16, 4 }, 2571 { ISD::CTLZ, MVT::v16i8, 4 }, 2572 }; 2573 static const CostTblEntry AVX512BWCostTbl[] = { 2574 { ISD::ABS, MVT::v32i16, 1 }, 2575 { ISD::ABS, MVT::v64i8, 1 }, 2576 { ISD::BITREVERSE, MVT::v8i64, 5 }, 2577 { ISD::BITREVERSE, MVT::v16i32, 5 }, 2578 { ISD::BITREVERSE, MVT::v32i16, 5 }, 2579 { ISD::BITREVERSE, MVT::v64i8, 5 }, 2580 { ISD::BSWAP, MVT::v8i64, 1 }, 2581 { ISD::BSWAP, MVT::v16i32, 1 }, 2582 { ISD::BSWAP, MVT::v32i16, 1 }, 2583 { ISD::CTLZ, MVT::v8i64, 23 }, 2584 { ISD::CTLZ, MVT::v16i32, 22 }, 2585 { ISD::CTLZ, MVT::v32i16, 18 }, 2586 { ISD::CTLZ, MVT::v64i8, 17 }, 2587 { ISD::CTPOP, MVT::v8i64, 7 }, 2588 { ISD::CTPOP, MVT::v16i32, 11 }, 2589 { ISD::CTPOP, MVT::v32i16, 9 }, 2590 { ISD::CTPOP, MVT::v64i8, 6 }, 2591 { ISD::CTTZ, MVT::v8i64, 10 }, 2592 { ISD::CTTZ, MVT::v16i32, 14 }, 2593 { ISD::CTTZ, MVT::v32i16, 12 }, 2594 { ISD::CTTZ, MVT::v64i8, 9 }, 2595 { ISD::SADDSAT, MVT::v32i16, 1 }, 2596 { ISD::SADDSAT, MVT::v64i8, 1 }, 2597 { ISD::SMAX, MVT::v32i16, 1 }, 2598 { ISD::SMAX, MVT::v64i8, 1 }, 2599 { ISD::SMIN, MVT::v32i16, 1 }, 2600 { ISD::SMIN, MVT::v64i8, 1 }, 2601 { ISD::SSUBSAT, MVT::v32i16, 1 }, 2602 { ISD::SSUBSAT, MVT::v64i8, 1 }, 2603 { ISD::UADDSAT, MVT::v32i16, 1 }, 2604 { ISD::UADDSAT, MVT::v64i8, 1 }, 2605 { ISD::UMAX, MVT::v32i16, 1 }, 2606 { ISD::UMAX, MVT::v64i8, 1 }, 2607 { ISD::UMIN, MVT::v32i16, 1 }, 2608 { ISD::UMIN, MVT::v64i8, 1 }, 2609 { ISD::USUBSAT, MVT::v32i16, 1 }, 2610 { ISD::USUBSAT, MVT::v64i8, 1 }, 2611 }; 2612 static const CostTblEntry AVX512CostTbl[] = { 2613 { ISD::ABS, MVT::v8i64, 1 }, 2614 { ISD::ABS, MVT::v16i32, 1 }, 2615 { ISD::ABS, MVT::v32i16, 2 }, // FIXME: include split 2616 { ISD::ABS, MVT::v64i8, 2 }, // FIXME: include split 2617 { ISD::ABS, MVT::v4i64, 1 }, 2618 { ISD::ABS, MVT::v2i64, 1 }, 2619 { ISD::BITREVERSE, MVT::v8i64, 36 }, 2620 { ISD::BITREVERSE, MVT::v16i32, 24 }, 2621 { ISD::BITREVERSE, MVT::v32i16, 10 }, 2622 { ISD::BITREVERSE, MVT::v64i8, 10 }, 2623 { ISD::BSWAP, MVT::v8i64, 4 }, 2624 { ISD::BSWAP, MVT::v16i32, 4 }, 2625 { ISD::BSWAP, MVT::v32i16, 4 }, 2626 { ISD::CTLZ, MVT::v8i64, 29 }, 2627 { ISD::CTLZ, MVT::v16i32, 35 }, 2628 { ISD::CTLZ, MVT::v32i16, 28 }, 2629 { ISD::CTLZ, MVT::v64i8, 18 }, 2630 { ISD::CTPOP, MVT::v8i64, 16 }, 2631 { ISD::CTPOP, MVT::v16i32, 24 }, 2632 { ISD::CTPOP, MVT::v32i16, 18 }, 2633 { ISD::CTPOP, MVT::v64i8, 12 }, 2634 { ISD::CTTZ, MVT::v8i64, 20 }, 2635 { ISD::CTTZ, MVT::v16i32, 28 }, 2636 { ISD::CTTZ, MVT::v32i16, 24 }, 2637 { ISD::CTTZ, MVT::v64i8, 18 }, 2638 { ISD::SMAX, MVT::v8i64, 1 }, 2639 { ISD::SMAX, MVT::v16i32, 1 }, 2640 { ISD::SMAX, MVT::v32i16, 2 }, // FIXME: include split 2641 { ISD::SMAX, MVT::v64i8, 2 }, // FIXME: include split 2642 { ISD::SMAX, MVT::v4i64, 1 }, 2643 { ISD::SMAX, MVT::v2i64, 1 }, 2644 { ISD::SMIN, MVT::v8i64, 1 }, 2645 { ISD::SMIN, MVT::v16i32, 1 }, 2646 { ISD::SMIN, MVT::v32i16, 2 }, // FIXME: include split 2647 { ISD::SMIN, MVT::v64i8, 2 }, // FIXME: include split 2648 { ISD::SMIN, MVT::v4i64, 1 }, 2649 { ISD::SMIN, MVT::v2i64, 1 }, 2650 { ISD::UMAX, MVT::v8i64, 1 }, 2651 { ISD::UMAX, MVT::v16i32, 1 }, 2652 { ISD::UMAX, MVT::v32i16, 2 }, // FIXME: include split 2653 { ISD::UMAX, MVT::v64i8, 2 }, // FIXME: include split 2654 { ISD::UMAX, MVT::v4i64, 1 }, 2655 { ISD::UMAX, MVT::v2i64, 1 }, 2656 { ISD::UMIN, MVT::v8i64, 1 }, 2657 { ISD::UMIN, MVT::v16i32, 1 }, 2658 { ISD::UMIN, MVT::v32i16, 2 }, // FIXME: include split 2659 { ISD::UMIN, MVT::v64i8, 2 }, // FIXME: include split 2660 { ISD::UMIN, MVT::v4i64, 1 }, 2661 { ISD::UMIN, MVT::v2i64, 1 }, 2662 { ISD::USUBSAT, MVT::v16i32, 2 }, // pmaxud + psubd 2663 { ISD::USUBSAT, MVT::v2i64, 2 }, // pmaxuq + psubq 2664 { ISD::USUBSAT, MVT::v4i64, 2 }, // pmaxuq + psubq 2665 { ISD::USUBSAT, MVT::v8i64, 2 }, // pmaxuq + psubq 2666 { ISD::UADDSAT, MVT::v16i32, 3 }, // not + pminud + paddd 2667 { ISD::UADDSAT, MVT::v2i64, 3 }, // not + pminuq + paddq 2668 { ISD::UADDSAT, MVT::v4i64, 3 }, // not + pminuq + paddq 2669 { ISD::UADDSAT, MVT::v8i64, 3 }, // not + pminuq + paddq 2670 { ISD::SADDSAT, MVT::v32i16, 2 }, // FIXME: include split 2671 { ISD::SADDSAT, MVT::v64i8, 2 }, // FIXME: include split 2672 { ISD::SSUBSAT, MVT::v32i16, 2 }, // FIXME: include split 2673 { ISD::SSUBSAT, MVT::v64i8, 2 }, // FIXME: include split 2674 { ISD::UADDSAT, MVT::v32i16, 2 }, // FIXME: include split 2675 { ISD::UADDSAT, MVT::v64i8, 2 }, // FIXME: include split 2676 { ISD::USUBSAT, MVT::v32i16, 2 }, // FIXME: include split 2677 { ISD::USUBSAT, MVT::v64i8, 2 }, // FIXME: include split 2678 { ISD::FMAXNUM, MVT::f32, 2 }, 2679 { ISD::FMAXNUM, MVT::v4f32, 2 }, 2680 { ISD::FMAXNUM, MVT::v8f32, 2 }, 2681 { ISD::FMAXNUM, MVT::v16f32, 2 }, 2682 { ISD::FMAXNUM, MVT::f64, 2 }, 2683 { ISD::FMAXNUM, MVT::v2f64, 2 }, 2684 { ISD::FMAXNUM, MVT::v4f64, 2 }, 2685 { ISD::FMAXNUM, MVT::v8f64, 2 }, 2686 }; 2687 static const CostTblEntry XOPCostTbl[] = { 2688 { ISD::BITREVERSE, MVT::v4i64, 4 }, 2689 { ISD::BITREVERSE, MVT::v8i32, 4 }, 2690 { ISD::BITREVERSE, MVT::v16i16, 4 }, 2691 { ISD::BITREVERSE, MVT::v32i8, 4 }, 2692 { ISD::BITREVERSE, MVT::v2i64, 1 }, 2693 { ISD::BITREVERSE, MVT::v4i32, 1 }, 2694 { ISD::BITREVERSE, MVT::v8i16, 1 }, 2695 { ISD::BITREVERSE, MVT::v16i8, 1 }, 2696 { ISD::BITREVERSE, MVT::i64, 3 }, 2697 { ISD::BITREVERSE, MVT::i32, 3 }, 2698 { ISD::BITREVERSE, MVT::i16, 3 }, 2699 { ISD::BITREVERSE, MVT::i8, 3 } 2700 }; 2701 static const CostTblEntry AVX2CostTbl[] = { 2702 { ISD::ABS, MVT::v4i64, 2 }, // VBLENDVPD(X,VPSUBQ(0,X),X) 2703 { ISD::ABS, MVT::v8i32, 1 }, 2704 { ISD::ABS, MVT::v16i16, 1 }, 2705 { ISD::ABS, MVT::v32i8, 1 }, 2706 { ISD::BITREVERSE, MVT::v4i64, 5 }, 2707 { ISD::BITREVERSE, MVT::v8i32, 5 }, 2708 { ISD::BITREVERSE, MVT::v16i16, 5 }, 2709 { ISD::BITREVERSE, MVT::v32i8, 5 }, 2710 { ISD::BSWAP, MVT::v4i64, 1 }, 2711 { ISD::BSWAP, MVT::v8i32, 1 }, 2712 { ISD::BSWAP, MVT::v16i16, 1 }, 2713 { ISD::CTLZ, MVT::v4i64, 23 }, 2714 { ISD::CTLZ, MVT::v8i32, 18 }, 2715 { ISD::CTLZ, MVT::v16i16, 14 }, 2716 { ISD::CTLZ, MVT::v32i8, 9 }, 2717 { ISD::CTPOP, MVT::v4i64, 7 }, 2718 { ISD::CTPOP, MVT::v8i32, 11 }, 2719 { ISD::CTPOP, MVT::v16i16, 9 }, 2720 { ISD::CTPOP, MVT::v32i8, 6 }, 2721 { ISD::CTTZ, MVT::v4i64, 10 }, 2722 { ISD::CTTZ, MVT::v8i32, 14 }, 2723 { ISD::CTTZ, MVT::v16i16, 12 }, 2724 { ISD::CTTZ, MVT::v32i8, 9 }, 2725 { ISD::SADDSAT, MVT::v16i16, 1 }, 2726 { ISD::SADDSAT, MVT::v32i8, 1 }, 2727 { ISD::SMAX, MVT::v8i32, 1 }, 2728 { ISD::SMAX, MVT::v16i16, 1 }, 2729 { ISD::SMAX, MVT::v32i8, 1 }, 2730 { ISD::SMIN, MVT::v8i32, 1 }, 2731 { ISD::SMIN, MVT::v16i16, 1 }, 2732 { ISD::SMIN, MVT::v32i8, 1 }, 2733 { ISD::SSUBSAT, MVT::v16i16, 1 }, 2734 { ISD::SSUBSAT, MVT::v32i8, 1 }, 2735 { ISD::UADDSAT, MVT::v16i16, 1 }, 2736 { ISD::UADDSAT, MVT::v32i8, 1 }, 2737 { ISD::UADDSAT, MVT::v8i32, 3 }, // not + pminud + paddd 2738 { ISD::UMAX, MVT::v8i32, 1 }, 2739 { ISD::UMAX, MVT::v16i16, 1 }, 2740 { ISD::UMAX, MVT::v32i8, 1 }, 2741 { ISD::UMIN, MVT::v8i32, 1 }, 2742 { ISD::UMIN, MVT::v16i16, 1 }, 2743 { ISD::UMIN, MVT::v32i8, 1 }, 2744 { ISD::USUBSAT, MVT::v16i16, 1 }, 2745 { ISD::USUBSAT, MVT::v32i8, 1 }, 2746 { ISD::USUBSAT, MVT::v8i32, 2 }, // pmaxud + psubd 2747 { ISD::FMAXNUM, MVT::v8f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS 2748 { ISD::FMAXNUM, MVT::v4f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD 2749 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 2750 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 2751 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 2752 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 2753 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 2754 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 2755 }; 2756 static const CostTblEntry AVX1CostTbl[] = { 2757 { ISD::ABS, MVT::v4i64, 5 }, // VBLENDVPD(X,VPSUBQ(0,X),X) 2758 { ISD::ABS, MVT::v8i32, 3 }, 2759 { ISD::ABS, MVT::v16i16, 3 }, 2760 { ISD::ABS, MVT::v32i8, 3 }, 2761 { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert 2762 { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert 2763 { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert 2764 { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert 2765 { ISD::BSWAP, MVT::v4i64, 4 }, 2766 { ISD::BSWAP, MVT::v8i32, 4 }, 2767 { ISD::BSWAP, MVT::v16i16, 4 }, 2768 { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert 2769 { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert 2770 { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert 2771 { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 2772 { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert 2773 { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert 2774 { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert 2775 { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert 2776 { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert 2777 { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert 2778 { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert 2779 { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 2780 { ISD::SADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2781 { ISD::SADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2782 { ISD::SMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 2783 { ISD::SMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2784 { ISD::SMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2785 { ISD::SMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 2786 { ISD::SMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2787 { ISD::SMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2788 { ISD::SSUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2789 { ISD::SSUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2790 { ISD::UADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2791 { ISD::UADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2792 { ISD::UADDSAT, MVT::v8i32, 8 }, // 2 x 128-bit Op + extract/insert 2793 { ISD::UMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 2794 { ISD::UMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2795 { ISD::UMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2796 { ISD::UMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 2797 { ISD::UMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2798 { ISD::UMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2799 { ISD::USUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2800 { ISD::USUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2801 { ISD::USUBSAT, MVT::v8i32, 6 }, // 2 x 128-bit Op + extract/insert 2802 { ISD::FMAXNUM, MVT::f32, 3 }, // MAXSS + CMPUNORDSS + BLENDVPS 2803 { ISD::FMAXNUM, MVT::v4f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS 2804 { ISD::FMAXNUM, MVT::v8f32, 5 }, // MAXPS + CMPUNORDPS + BLENDVPS + ? 2805 { ISD::FMAXNUM, MVT::f64, 3 }, // MAXSD + CMPUNORDSD + BLENDVPD 2806 { ISD::FMAXNUM, MVT::v2f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD 2807 { ISD::FMAXNUM, MVT::v4f64, 5 }, // MAXPD + CMPUNORDPD + BLENDVPD + ? 2808 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/ 2809 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 2810 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 2811 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/ 2812 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/ 2813 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/ 2814 }; 2815 static const CostTblEntry GLMCostTbl[] = { 2816 { ISD::FSQRT, MVT::f32, 19 }, // sqrtss 2817 { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps 2818 { ISD::FSQRT, MVT::f64, 34 }, // sqrtsd 2819 { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd 2820 }; 2821 static const CostTblEntry SLMCostTbl[] = { 2822 { ISD::FSQRT, MVT::f32, 20 }, // sqrtss 2823 { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps 2824 { ISD::FSQRT, MVT::f64, 35 }, // sqrtsd 2825 { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd 2826 }; 2827 static const CostTblEntry SSE42CostTbl[] = { 2828 { ISD::USUBSAT, MVT::v4i32, 2 }, // pmaxud + psubd 2829 { ISD::UADDSAT, MVT::v4i32, 3 }, // not + pminud + paddd 2830 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/ 2831 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/ 2832 }; 2833 static const CostTblEntry SSE41CostTbl[] = { 2834 { ISD::ABS, MVT::v2i64, 2 }, // BLENDVPD(X,PSUBQ(0,X),X) 2835 { ISD::SMAX, MVT::v4i32, 1 }, 2836 { ISD::SMAX, MVT::v16i8, 1 }, 2837 { ISD::SMIN, MVT::v4i32, 1 }, 2838 { ISD::SMIN, MVT::v16i8, 1 }, 2839 { ISD::UMAX, MVT::v4i32, 1 }, 2840 { ISD::UMAX, MVT::v8i16, 1 }, 2841 { ISD::UMIN, MVT::v4i32, 1 }, 2842 { ISD::UMIN, MVT::v8i16, 1 }, 2843 }; 2844 static const CostTblEntry SSSE3CostTbl[] = { 2845 { ISD::ABS, MVT::v4i32, 1 }, 2846 { ISD::ABS, MVT::v8i16, 1 }, 2847 { ISD::ABS, MVT::v16i8, 1 }, 2848 { ISD::BITREVERSE, MVT::v2i64, 5 }, 2849 { ISD::BITREVERSE, MVT::v4i32, 5 }, 2850 { ISD::BITREVERSE, MVT::v8i16, 5 }, 2851 { ISD::BITREVERSE, MVT::v16i8, 5 }, 2852 { ISD::BSWAP, MVT::v2i64, 1 }, 2853 { ISD::BSWAP, MVT::v4i32, 1 }, 2854 { ISD::BSWAP, MVT::v8i16, 1 }, 2855 { ISD::CTLZ, MVT::v2i64, 23 }, 2856 { ISD::CTLZ, MVT::v4i32, 18 }, 2857 { ISD::CTLZ, MVT::v8i16, 14 }, 2858 { ISD::CTLZ, MVT::v16i8, 9 }, 2859 { ISD::CTPOP, MVT::v2i64, 7 }, 2860 { ISD::CTPOP, MVT::v4i32, 11 }, 2861 { ISD::CTPOP, MVT::v8i16, 9 }, 2862 { ISD::CTPOP, MVT::v16i8, 6 }, 2863 { ISD::CTTZ, MVT::v2i64, 10 }, 2864 { ISD::CTTZ, MVT::v4i32, 14 }, 2865 { ISD::CTTZ, MVT::v8i16, 12 }, 2866 { ISD::CTTZ, MVT::v16i8, 9 } 2867 }; 2868 static const CostTblEntry SSE2CostTbl[] = { 2869 { ISD::ABS, MVT::v2i64, 4 }, 2870 { ISD::ABS, MVT::v4i32, 3 }, 2871 { ISD::ABS, MVT::v8i16, 2 }, 2872 { ISD::ABS, MVT::v16i8, 2 }, 2873 { ISD::BITREVERSE, MVT::v2i64, 29 }, 2874 { ISD::BITREVERSE, MVT::v4i32, 27 }, 2875 { ISD::BITREVERSE, MVT::v8i16, 27 }, 2876 { ISD::BITREVERSE, MVT::v16i8, 20 }, 2877 { ISD::BSWAP, MVT::v2i64, 7 }, 2878 { ISD::BSWAP, MVT::v4i32, 7 }, 2879 { ISD::BSWAP, MVT::v8i16, 7 }, 2880 { ISD::CTLZ, MVT::v2i64, 25 }, 2881 { ISD::CTLZ, MVT::v4i32, 26 }, 2882 { ISD::CTLZ, MVT::v8i16, 20 }, 2883 { ISD::CTLZ, MVT::v16i8, 17 }, 2884 { ISD::CTPOP, MVT::v2i64, 12 }, 2885 { ISD::CTPOP, MVT::v4i32, 15 }, 2886 { ISD::CTPOP, MVT::v8i16, 13 }, 2887 { ISD::CTPOP, MVT::v16i8, 10 }, 2888 { ISD::CTTZ, MVT::v2i64, 14 }, 2889 { ISD::CTTZ, MVT::v4i32, 18 }, 2890 { ISD::CTTZ, MVT::v8i16, 16 }, 2891 { ISD::CTTZ, MVT::v16i8, 13 }, 2892 { ISD::SADDSAT, MVT::v8i16, 1 }, 2893 { ISD::SADDSAT, MVT::v16i8, 1 }, 2894 { ISD::SMAX, MVT::v8i16, 1 }, 2895 { ISD::SMIN, MVT::v8i16, 1 }, 2896 { ISD::SSUBSAT, MVT::v8i16, 1 }, 2897 { ISD::SSUBSAT, MVT::v16i8, 1 }, 2898 { ISD::UADDSAT, MVT::v8i16, 1 }, 2899 { ISD::UADDSAT, MVT::v16i8, 1 }, 2900 { ISD::UMAX, MVT::v8i16, 2 }, 2901 { ISD::UMAX, MVT::v16i8, 1 }, 2902 { ISD::UMIN, MVT::v8i16, 2 }, 2903 { ISD::UMIN, MVT::v16i8, 1 }, 2904 { ISD::USUBSAT, MVT::v8i16, 1 }, 2905 { ISD::USUBSAT, MVT::v16i8, 1 }, 2906 { ISD::FMAXNUM, MVT::f64, 4 }, 2907 { ISD::FMAXNUM, MVT::v2f64, 4 }, 2908 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/ 2909 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/ 2910 }; 2911 static const CostTblEntry SSE1CostTbl[] = { 2912 { ISD::FMAXNUM, MVT::f32, 4 }, 2913 { ISD::FMAXNUM, MVT::v4f32, 4 }, 2914 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/ 2915 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/ 2916 }; 2917 static const CostTblEntry BMI64CostTbl[] = { // 64-bit targets 2918 { ISD::CTTZ, MVT::i64, 1 }, 2919 }; 2920 static const CostTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets 2921 { ISD::CTTZ, MVT::i32, 1 }, 2922 { ISD::CTTZ, MVT::i16, 1 }, 2923 { ISD::CTTZ, MVT::i8, 1 }, 2924 }; 2925 static const CostTblEntry LZCNT64CostTbl[] = { // 64-bit targets 2926 { ISD::CTLZ, MVT::i64, 1 }, 2927 }; 2928 static const CostTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets 2929 { ISD::CTLZ, MVT::i32, 1 }, 2930 { ISD::CTLZ, MVT::i16, 1 }, 2931 { ISD::CTLZ, MVT::i8, 1 }, 2932 }; 2933 static const CostTblEntry POPCNT64CostTbl[] = { // 64-bit targets 2934 { ISD::CTPOP, MVT::i64, 1 }, 2935 }; 2936 static const CostTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets 2937 { ISD::CTPOP, MVT::i32, 1 }, 2938 { ISD::CTPOP, MVT::i16, 1 }, 2939 { ISD::CTPOP, MVT::i8, 1 }, 2940 }; 2941 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 2942 { ISD::ABS, MVT::i64, 2 }, // SUB+CMOV 2943 { ISD::BITREVERSE, MVT::i64, 14 }, 2944 { ISD::BSWAP, MVT::i64, 1 }, 2945 { ISD::CTLZ, MVT::i64, 4 }, // BSR+XOR or BSR+XOR+CMOV 2946 { ISD::CTTZ, MVT::i64, 3 }, // TEST+BSF+CMOV/BRANCH 2947 { ISD::CTPOP, MVT::i64, 10 }, 2948 { ISD::SADDO, MVT::i64, 1 }, 2949 { ISD::UADDO, MVT::i64, 1 }, 2950 { ISD::UMULO, MVT::i64, 2 }, // mulq + seto 2951 }; 2952 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 2953 { ISD::ABS, MVT::i32, 2 }, // SUB+CMOV 2954 { ISD::ABS, MVT::i16, 2 }, // SUB+CMOV 2955 { ISD::BITREVERSE, MVT::i32, 14 }, 2956 { ISD::BITREVERSE, MVT::i16, 14 }, 2957 { ISD::BITREVERSE, MVT::i8, 11 }, 2958 { ISD::BSWAP, MVT::i32, 1 }, 2959 { ISD::BSWAP, MVT::i16, 1 }, // ROL 2960 { ISD::CTLZ, MVT::i32, 4 }, // BSR+XOR or BSR+XOR+CMOV 2961 { ISD::CTLZ, MVT::i16, 4 }, // BSR+XOR or BSR+XOR+CMOV 2962 { ISD::CTLZ, MVT::i8, 4 }, // BSR+XOR or BSR+XOR+CMOV 2963 { ISD::CTTZ, MVT::i32, 3 }, // TEST+BSF+CMOV/BRANCH 2964 { ISD::CTTZ, MVT::i16, 3 }, // TEST+BSF+CMOV/BRANCH 2965 { ISD::CTTZ, MVT::i8, 3 }, // TEST+BSF+CMOV/BRANCH 2966 { ISD::CTPOP, MVT::i32, 8 }, 2967 { ISD::CTPOP, MVT::i16, 9 }, 2968 { ISD::CTPOP, MVT::i8, 7 }, 2969 { ISD::SADDO, MVT::i32, 1 }, 2970 { ISD::SADDO, MVT::i16, 1 }, 2971 { ISD::SADDO, MVT::i8, 1 }, 2972 { ISD::UADDO, MVT::i32, 1 }, 2973 { ISD::UADDO, MVT::i16, 1 }, 2974 { ISD::UADDO, MVT::i8, 1 }, 2975 { ISD::UMULO, MVT::i32, 2 }, // mul + seto 2976 { ISD::UMULO, MVT::i16, 2 }, 2977 { ISD::UMULO, MVT::i8, 2 }, 2978 }; 2979 2980 Type *RetTy = ICA.getReturnType(); 2981 Type *OpTy = RetTy; 2982 Intrinsic::ID IID = ICA.getID(); 2983 unsigned ISD = ISD::DELETED_NODE; 2984 switch (IID) { 2985 default: 2986 break; 2987 case Intrinsic::abs: 2988 ISD = ISD::ABS; 2989 break; 2990 case Intrinsic::bitreverse: 2991 ISD = ISD::BITREVERSE; 2992 break; 2993 case Intrinsic::bswap: 2994 ISD = ISD::BSWAP; 2995 break; 2996 case Intrinsic::ctlz: 2997 ISD = ISD::CTLZ; 2998 break; 2999 case Intrinsic::ctpop: 3000 ISD = ISD::CTPOP; 3001 break; 3002 case Intrinsic::cttz: 3003 ISD = ISD::CTTZ; 3004 break; 3005 case Intrinsic::maxnum: 3006 case Intrinsic::minnum: 3007 // FMINNUM has same costs so don't duplicate. 3008 ISD = ISD::FMAXNUM; 3009 break; 3010 case Intrinsic::sadd_sat: 3011 ISD = ISD::SADDSAT; 3012 break; 3013 case Intrinsic::smax: 3014 ISD = ISD::SMAX; 3015 break; 3016 case Intrinsic::smin: 3017 ISD = ISD::SMIN; 3018 break; 3019 case Intrinsic::ssub_sat: 3020 ISD = ISD::SSUBSAT; 3021 break; 3022 case Intrinsic::uadd_sat: 3023 ISD = ISD::UADDSAT; 3024 break; 3025 case Intrinsic::umax: 3026 ISD = ISD::UMAX; 3027 break; 3028 case Intrinsic::umin: 3029 ISD = ISD::UMIN; 3030 break; 3031 case Intrinsic::usub_sat: 3032 ISD = ISD::USUBSAT; 3033 break; 3034 case Intrinsic::sqrt: 3035 ISD = ISD::FSQRT; 3036 break; 3037 case Intrinsic::sadd_with_overflow: 3038 case Intrinsic::ssub_with_overflow: 3039 // SSUBO has same costs so don't duplicate. 3040 ISD = ISD::SADDO; 3041 OpTy = RetTy->getContainedType(0); 3042 break; 3043 case Intrinsic::uadd_with_overflow: 3044 case Intrinsic::usub_with_overflow: 3045 // USUBO has same costs so don't duplicate. 3046 ISD = ISD::UADDO; 3047 OpTy = RetTy->getContainedType(0); 3048 break; 3049 case Intrinsic::umul_with_overflow: 3050 case Intrinsic::smul_with_overflow: 3051 // SMULO has same costs so don't duplicate. 3052 ISD = ISD::UMULO; 3053 OpTy = RetTy->getContainedType(0); 3054 break; 3055 } 3056 3057 if (ISD != ISD::DELETED_NODE) { 3058 // Legalize the type. 3059 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, OpTy); 3060 MVT MTy = LT.second; 3061 3062 // Attempt to lookup cost. 3063 if (ISD == ISD::BITREVERSE && ST->hasGFNI() && ST->hasSSSE3() && 3064 MTy.isVector()) { 3065 // With PSHUFB the code is very similar for all types. If we have integer 3066 // byte operations, we just need a GF2P8AFFINEQB for vXi8. For other types 3067 // we also need a PSHUFB. 3068 unsigned Cost = MTy.getVectorElementType() == MVT::i8 ? 1 : 2; 3069 3070 // Without byte operations, we need twice as many GF2P8AFFINEQB and PSHUFB 3071 // instructions. We also need an extract and an insert. 3072 if (!(MTy.is128BitVector() || (ST->hasAVX2() && MTy.is256BitVector()) || 3073 (ST->hasBWI() && MTy.is512BitVector()))) 3074 Cost = Cost * 2 + 2; 3075 3076 return LT.first * Cost; 3077 } 3078 3079 auto adjustTableCost = [](const CostTblEntry &Entry, 3080 InstructionCost LegalizationCost, 3081 FastMathFlags FMF) { 3082 // If there are no NANs to deal with, then these are reduced to a 3083 // single MIN** or MAX** instruction instead of the MIN/CMP/SELECT that we 3084 // assume is used in the non-fast case. 3085 if (Entry.ISD == ISD::FMAXNUM || Entry.ISD == ISD::FMINNUM) { 3086 if (FMF.noNaNs()) 3087 return LegalizationCost * 1; 3088 } 3089 return LegalizationCost * (int)Entry.Cost; 3090 }; 3091 3092 if (ST->useGLMDivSqrtCosts()) 3093 if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy)) 3094 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3095 3096 if (ST->isSLM()) 3097 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy)) 3098 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3099 3100 if (ST->hasCDI()) 3101 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy)) 3102 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3103 3104 if (ST->hasBWI()) 3105 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 3106 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3107 3108 if (ST->hasAVX512()) 3109 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 3110 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3111 3112 if (ST->hasXOP()) 3113 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 3114 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3115 3116 if (ST->hasAVX2()) 3117 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 3118 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3119 3120 if (ST->hasAVX()) 3121 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 3122 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3123 3124 if (ST->hasSSE42()) 3125 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 3126 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3127 3128 if (ST->hasSSE41()) 3129 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) 3130 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3131 3132 if (ST->hasSSSE3()) 3133 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy)) 3134 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3135 3136 if (ST->hasSSE2()) 3137 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 3138 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3139 3140 if (ST->hasSSE1()) 3141 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 3142 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3143 3144 if (ST->hasBMI()) { 3145 if (ST->is64Bit()) 3146 if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy)) 3147 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3148 3149 if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy)) 3150 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3151 } 3152 3153 if (ST->hasLZCNT()) { 3154 if (ST->is64Bit()) 3155 if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy)) 3156 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3157 3158 if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy)) 3159 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3160 } 3161 3162 if (ST->hasPOPCNT()) { 3163 if (ST->is64Bit()) 3164 if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy)) 3165 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3166 3167 if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy)) 3168 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3169 } 3170 3171 if (ISD == ISD::BSWAP && ST->hasMOVBE() && ST->hasFastMOVBE()) { 3172 if (const Instruction *II = ICA.getInst()) { 3173 if (II->hasOneUse() && isa<StoreInst>(II->user_back())) 3174 return TTI::TCC_Free; 3175 if (auto *LI = dyn_cast<LoadInst>(II->getOperand(0))) { 3176 if (LI->hasOneUse()) 3177 return TTI::TCC_Free; 3178 } 3179 } 3180 } 3181 3182 // TODO - add BMI (TZCNT) scalar handling 3183 3184 if (ST->is64Bit()) 3185 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy)) 3186 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3187 3188 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy)) 3189 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3190 } 3191 3192 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 3193 } 3194 3195 InstructionCost 3196 X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 3197 TTI::TargetCostKind CostKind) { 3198 if (ICA.isTypeBasedOnly()) 3199 return getTypeBasedIntrinsicInstrCost(ICA, CostKind); 3200 3201 static const CostTblEntry AVX512CostTbl[] = { 3202 { ISD::ROTL, MVT::v8i64, 1 }, 3203 { ISD::ROTL, MVT::v4i64, 1 }, 3204 { ISD::ROTL, MVT::v2i64, 1 }, 3205 { ISD::ROTL, MVT::v16i32, 1 }, 3206 { ISD::ROTL, MVT::v8i32, 1 }, 3207 { ISD::ROTL, MVT::v4i32, 1 }, 3208 { ISD::ROTR, MVT::v8i64, 1 }, 3209 { ISD::ROTR, MVT::v4i64, 1 }, 3210 { ISD::ROTR, MVT::v2i64, 1 }, 3211 { ISD::ROTR, MVT::v16i32, 1 }, 3212 { ISD::ROTR, MVT::v8i32, 1 }, 3213 { ISD::ROTR, MVT::v4i32, 1 } 3214 }; 3215 // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y)) 3216 static const CostTblEntry XOPCostTbl[] = { 3217 { ISD::ROTL, MVT::v4i64, 4 }, 3218 { ISD::ROTL, MVT::v8i32, 4 }, 3219 { ISD::ROTL, MVT::v16i16, 4 }, 3220 { ISD::ROTL, MVT::v32i8, 4 }, 3221 { ISD::ROTL, MVT::v2i64, 1 }, 3222 { ISD::ROTL, MVT::v4i32, 1 }, 3223 { ISD::ROTL, MVT::v8i16, 1 }, 3224 { ISD::ROTL, MVT::v16i8, 1 }, 3225 { ISD::ROTR, MVT::v4i64, 6 }, 3226 { ISD::ROTR, MVT::v8i32, 6 }, 3227 { ISD::ROTR, MVT::v16i16, 6 }, 3228 { ISD::ROTR, MVT::v32i8, 6 }, 3229 { ISD::ROTR, MVT::v2i64, 2 }, 3230 { ISD::ROTR, MVT::v4i32, 2 }, 3231 { ISD::ROTR, MVT::v8i16, 2 }, 3232 { ISD::ROTR, MVT::v16i8, 2 } 3233 }; 3234 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 3235 { ISD::ROTL, MVT::i64, 1 }, 3236 { ISD::ROTR, MVT::i64, 1 }, 3237 { ISD::FSHL, MVT::i64, 4 } 3238 }; 3239 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 3240 { ISD::ROTL, MVT::i32, 1 }, 3241 { ISD::ROTL, MVT::i16, 1 }, 3242 { ISD::ROTL, MVT::i8, 1 }, 3243 { ISD::ROTR, MVT::i32, 1 }, 3244 { ISD::ROTR, MVT::i16, 1 }, 3245 { ISD::ROTR, MVT::i8, 1 }, 3246 { ISD::FSHL, MVT::i32, 4 }, 3247 { ISD::FSHL, MVT::i16, 4 }, 3248 { ISD::FSHL, MVT::i8, 4 } 3249 }; 3250 3251 Intrinsic::ID IID = ICA.getID(); 3252 Type *RetTy = ICA.getReturnType(); 3253 const SmallVectorImpl<const Value *> &Args = ICA.getArgs(); 3254 unsigned ISD = ISD::DELETED_NODE; 3255 switch (IID) { 3256 default: 3257 break; 3258 case Intrinsic::fshl: 3259 ISD = ISD::FSHL; 3260 if (Args[0] == Args[1]) 3261 ISD = ISD::ROTL; 3262 break; 3263 case Intrinsic::fshr: 3264 // FSHR has same costs so don't duplicate. 3265 ISD = ISD::FSHL; 3266 if (Args[0] == Args[1]) 3267 ISD = ISD::ROTR; 3268 break; 3269 } 3270 3271 if (ISD != ISD::DELETED_NODE) { 3272 // Legalize the type. 3273 std::pair<InstructionCost, MVT> LT = 3274 TLI->getTypeLegalizationCost(DL, RetTy); 3275 MVT MTy = LT.second; 3276 3277 // Attempt to lookup cost. 3278 if (ST->hasAVX512()) 3279 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 3280 return LT.first * Entry->Cost; 3281 3282 if (ST->hasXOP()) 3283 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 3284 return LT.first * Entry->Cost; 3285 3286 if (ST->is64Bit()) 3287 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy)) 3288 return LT.first * Entry->Cost; 3289 3290 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy)) 3291 return LT.first * Entry->Cost; 3292 } 3293 3294 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 3295 } 3296 3297 InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, 3298 unsigned Index) { 3299 static const CostTblEntry SLMCostTbl[] = { 3300 { ISD::EXTRACT_VECTOR_ELT, MVT::i8, 4 }, 3301 { ISD::EXTRACT_VECTOR_ELT, MVT::i16, 4 }, 3302 { ISD::EXTRACT_VECTOR_ELT, MVT::i32, 4 }, 3303 { ISD::EXTRACT_VECTOR_ELT, MVT::i64, 7 } 3304 }; 3305 3306 assert(Val->isVectorTy() && "This must be a vector type"); 3307 Type *ScalarType = Val->getScalarType(); 3308 int RegisterFileMoveCost = 0; 3309 3310 // Non-immediate extraction/insertion can be handled as a sequence of 3311 // aliased loads+stores via the stack. 3312 if (Index == -1U && (Opcode == Instruction::ExtractElement || 3313 Opcode == Instruction::InsertElement)) { 3314 // TODO: On some SSE41+ targets, we expand to cmp+splat+select patterns: 3315 // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0. 3316 3317 // TODO: Move this to BasicTTIImpl.h? We'd need better gep + index handling. 3318 assert(isa<FixedVectorType>(Val) && "Fixed vector type expected"); 3319 Align VecAlign = DL.getPrefTypeAlign(Val); 3320 Align SclAlign = DL.getPrefTypeAlign(ScalarType); 3321 3322 // Extract - store vector to stack, load scalar. 3323 if (Opcode == Instruction::ExtractElement) { 3324 return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0, 3325 TTI::TargetCostKind::TCK_RecipThroughput) + 3326 getMemoryOpCost(Instruction::Load, ScalarType, SclAlign, 0, 3327 TTI::TargetCostKind::TCK_RecipThroughput); 3328 } 3329 // Insert - store vector to stack, store scalar, load vector. 3330 if (Opcode == Instruction::InsertElement) { 3331 return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0, 3332 TTI::TargetCostKind::TCK_RecipThroughput) + 3333 getMemoryOpCost(Instruction::Store, ScalarType, SclAlign, 0, 3334 TTI::TargetCostKind::TCK_RecipThroughput) + 3335 getMemoryOpCost(Instruction::Load, Val, VecAlign, 0, 3336 TTI::TargetCostKind::TCK_RecipThroughput); 3337 } 3338 } 3339 3340 if (Index != -1U && (Opcode == Instruction::ExtractElement || 3341 Opcode == Instruction::InsertElement)) { 3342 // Legalize the type. 3343 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 3344 3345 // This type is legalized to a scalar type. 3346 if (!LT.second.isVector()) 3347 return 0; 3348 3349 // The type may be split. Normalize the index to the new type. 3350 unsigned NumElts = LT.second.getVectorNumElements(); 3351 unsigned SubNumElts = NumElts; 3352 Index = Index % NumElts; 3353 3354 // For >128-bit vectors, we need to extract higher 128-bit subvectors. 3355 // For inserts, we also need to insert the subvector back. 3356 if (LT.second.getSizeInBits() > 128) { 3357 assert((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector"); 3358 unsigned NumSubVecs = LT.second.getSizeInBits() / 128; 3359 SubNumElts = NumElts / NumSubVecs; 3360 if (SubNumElts <= Index) { 3361 RegisterFileMoveCost += (Opcode == Instruction::InsertElement ? 2 : 1); 3362 Index %= SubNumElts; 3363 } 3364 } 3365 3366 if (Index == 0) { 3367 // Floating point scalars are already located in index #0. 3368 // Many insertions to #0 can fold away for scalar fp-ops, so let's assume 3369 // true for all. 3370 if (ScalarType->isFloatingPointTy()) 3371 return RegisterFileMoveCost; 3372 3373 // Assume movd/movq XMM -> GPR is relatively cheap on all targets. 3374 if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement) 3375 return 1 + RegisterFileMoveCost; 3376 } 3377 3378 int ISD = TLI->InstructionOpcodeToISD(Opcode); 3379 assert(ISD && "Unexpected vector opcode"); 3380 MVT MScalarTy = LT.second.getScalarType(); 3381 if (ST->isSLM()) 3382 if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy)) 3383 return Entry->Cost + RegisterFileMoveCost; 3384 3385 // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets. 3386 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) || 3387 (MScalarTy.isInteger() && ST->hasSSE41())) 3388 return 1 + RegisterFileMoveCost; 3389 3390 // Assume insertps is relatively cheap on all targets. 3391 if (MScalarTy == MVT::f32 && ST->hasSSE41() && 3392 Opcode == Instruction::InsertElement) 3393 return 1 + RegisterFileMoveCost; 3394 3395 // For extractions we just need to shuffle the element to index 0, which 3396 // should be very cheap (assume cost = 1). For insertions we need to shuffle 3397 // the elements to its destination. In both cases we must handle the 3398 // subvector move(s). 3399 // If the vector type is already less than 128-bits then don't reduce it. 3400 // TODO: Under what circumstances should we shuffle using the full width? 3401 InstructionCost ShuffleCost = 1; 3402 if (Opcode == Instruction::InsertElement) { 3403 auto *SubTy = cast<VectorType>(Val); 3404 EVT VT = TLI->getValueType(DL, Val); 3405 if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128) 3406 SubTy = FixedVectorType::get(ScalarType, SubNumElts); 3407 ShuffleCost = 3408 getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, None, 0, SubTy); 3409 } 3410 int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1; 3411 return ShuffleCost + IntOrFpCost + RegisterFileMoveCost; 3412 } 3413 3414 // Add to the base cost if we know that the extracted element of a vector is 3415 // destined to be moved to and used in the integer register file. 3416 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy()) 3417 RegisterFileMoveCost += 1; 3418 3419 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost; 3420 } 3421 3422 InstructionCost X86TTIImpl::getScalarizationOverhead(VectorType *Ty, 3423 const APInt &DemandedElts, 3424 bool Insert, 3425 bool Extract) { 3426 InstructionCost Cost = 0; 3427 3428 // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much 3429 // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT. 3430 if (Insert) { 3431 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 3432 MVT MScalarTy = LT.second.getScalarType(); 3433 3434 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) || 3435 (MScalarTy.isInteger() && ST->hasSSE41()) || 3436 (MScalarTy == MVT::f32 && ST->hasSSE41())) { 3437 // For types we can insert directly, insertion into 128-bit sub vectors is 3438 // cheap, followed by a cheap chain of concatenations. 3439 if (LT.second.getSizeInBits() <= 128) { 3440 Cost += 3441 BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, false); 3442 } else { 3443 // In each 128-lane, if at least one index is demanded but not all 3444 // indices are demanded and this 128-lane is not the first 128-lane of 3445 // the legalized-vector, then this 128-lane needs a extracti128; If in 3446 // each 128-lane, there is at least one demanded index, this 128-lane 3447 // needs a inserti128. 3448 3449 // The following cases will help you build a better understanding: 3450 // Assume we insert several elements into a v8i32 vector in avx2, 3451 // Case#1: inserting into 1th index needs vpinsrd + inserti128. 3452 // Case#2: inserting into 5th index needs extracti128 + vpinsrd + 3453 // inserti128. 3454 // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128. 3455 const int CostValue = *LT.first.getValue(); 3456 assert(CostValue >= 0 && "Negative cost!"); 3457 unsigned Num128Lanes = LT.second.getSizeInBits() / 128 * CostValue; 3458 unsigned NumElts = LT.second.getVectorNumElements() * CostValue; 3459 APInt WidenedDemandedElts = DemandedElts.zextOrSelf(NumElts); 3460 unsigned Scale = NumElts / Num128Lanes; 3461 // We iterate each 128-lane, and check if we need a 3462 // extracti128/inserti128 for this 128-lane. 3463 for (unsigned I = 0; I < NumElts; I += Scale) { 3464 APInt Mask = WidenedDemandedElts.getBitsSet(NumElts, I, I + Scale); 3465 APInt MaskedDE = Mask & WidenedDemandedElts; 3466 unsigned Population = MaskedDE.countPopulation(); 3467 Cost += (Population > 0 && Population != Scale && 3468 I % LT.second.getVectorNumElements() != 0); 3469 Cost += Population > 0; 3470 } 3471 Cost += DemandedElts.countPopulation(); 3472 3473 // For vXf32 cases, insertion into the 0'th index in each v4f32 3474 // 128-bit vector is free. 3475 // NOTE: This assumes legalization widens vXf32 vectors. 3476 if (MScalarTy == MVT::f32) 3477 for (unsigned i = 0, e = cast<FixedVectorType>(Ty)->getNumElements(); 3478 i < e; i += 4) 3479 if (DemandedElts[i]) 3480 Cost--; 3481 } 3482 } else if (LT.second.isVector()) { 3483 // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded 3484 // integer element as a SCALAR_TO_VECTOR, then we build the vector as a 3485 // series of UNPCK followed by CONCAT_VECTORS - all of these can be 3486 // considered cheap. 3487 if (Ty->isIntOrIntVectorTy()) 3488 Cost += DemandedElts.countPopulation(); 3489 3490 // Get the smaller of the legalized or original pow2-extended number of 3491 // vector elements, which represents the number of unpacks we'll end up 3492 // performing. 3493 unsigned NumElts = LT.second.getVectorNumElements(); 3494 unsigned Pow2Elts = 3495 PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements()); 3496 Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first; 3497 } 3498 } 3499 3500 // TODO: Use default extraction for now, but we should investigate extending this 3501 // to handle repeated subvector extraction. 3502 if (Extract) 3503 Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, false, Extract); 3504 3505 return Cost; 3506 } 3507 3508 InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, 3509 MaybeAlign Alignment, 3510 unsigned AddressSpace, 3511 TTI::TargetCostKind CostKind, 3512 const Instruction *I) { 3513 // TODO: Handle other cost kinds. 3514 if (CostKind != TTI::TCK_RecipThroughput) { 3515 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) { 3516 // Store instruction with index and scale costs 2 Uops. 3517 // Check the preceding GEP to identify non-const indices. 3518 if (auto *GEP = dyn_cast<GetElementPtrInst>(SI->getPointerOperand())) { 3519 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); })) 3520 return TTI::TCC_Basic * 2; 3521 } 3522 } 3523 return TTI::TCC_Basic; 3524 } 3525 3526 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 3527 "Invalid Opcode"); 3528 // Type legalization can't handle structs 3529 if (TLI->getValueType(DL, Src, true) == MVT::Other) 3530 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 3531 CostKind); 3532 3533 // Legalize the type. 3534 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 3535 3536 auto *VTy = dyn_cast<FixedVectorType>(Src); 3537 3538 // Handle the simple case of non-vectors. 3539 // NOTE: this assumes that legalization never creates vector from scalars! 3540 if (!VTy || !LT.second.isVector()) 3541 // Each load/store unit costs 1. 3542 return LT.first * 1; 3543 3544 bool IsLoad = Opcode == Instruction::Load; 3545 3546 Type *EltTy = VTy->getElementType(); 3547 3548 const int EltTyBits = DL.getTypeSizeInBits(EltTy); 3549 3550 InstructionCost Cost = 0; 3551 3552 // Source of truth: how many elements were there in the original IR vector? 3553 const unsigned SrcNumElt = VTy->getNumElements(); 3554 3555 // How far have we gotten? 3556 int NumEltRemaining = SrcNumElt; 3557 // Note that we intentionally capture by-reference, NumEltRemaining changes. 3558 auto NumEltDone = [&]() { return SrcNumElt - NumEltRemaining; }; 3559 3560 const int MaxLegalOpSizeBytes = divideCeil(LT.second.getSizeInBits(), 8); 3561 3562 // Note that even if we can store 64 bits of an XMM, we still operate on XMM. 3563 const unsigned XMMBits = 128; 3564 if (XMMBits % EltTyBits != 0) 3565 // Vector size must be a multiple of the element size. I.e. no padding. 3566 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 3567 CostKind); 3568 const int NumEltPerXMM = XMMBits / EltTyBits; 3569 3570 auto *XMMVecTy = FixedVectorType::get(EltTy, NumEltPerXMM); 3571 3572 for (int CurrOpSizeBytes = MaxLegalOpSizeBytes, SubVecEltsLeft = 0; 3573 NumEltRemaining > 0; CurrOpSizeBytes /= 2) { 3574 // How many elements would a single op deal with at once? 3575 if ((8 * CurrOpSizeBytes) % EltTyBits != 0) 3576 // Vector size must be a multiple of the element size. I.e. no padding. 3577 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 3578 CostKind); 3579 int CurrNumEltPerOp = (8 * CurrOpSizeBytes) / EltTyBits; 3580 3581 assert(CurrOpSizeBytes > 0 && CurrNumEltPerOp > 0 && "How'd we get here?"); 3582 assert((((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) || 3583 (CurrOpSizeBytes == MaxLegalOpSizeBytes)) && 3584 "Unless we haven't halved the op size yet, " 3585 "we have less than two op's sized units of work left."); 3586 3587 auto *CurrVecTy = CurrNumEltPerOp > NumEltPerXMM 3588 ? FixedVectorType::get(EltTy, CurrNumEltPerOp) 3589 : XMMVecTy; 3590 3591 assert(CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 && 3592 "After halving sizes, the vector elt count is no longer a multiple " 3593 "of number of elements per operation?"); 3594 auto *CoalescedVecTy = 3595 CurrNumEltPerOp == 1 3596 ? CurrVecTy 3597 : FixedVectorType::get( 3598 IntegerType::get(Src->getContext(), 3599 EltTyBits * CurrNumEltPerOp), 3600 CurrVecTy->getNumElements() / CurrNumEltPerOp); 3601 assert(DL.getTypeSizeInBits(CoalescedVecTy) == 3602 DL.getTypeSizeInBits(CurrVecTy) && 3603 "coalesciing elements doesn't change vector width."); 3604 3605 while (NumEltRemaining > 0) { 3606 assert(SubVecEltsLeft >= 0 && "Subreg element count overconsumtion?"); 3607 3608 // Can we use this vector size, as per the remaining element count? 3609 // Iff the vector is naturally aligned, we can do a wide load regardless. 3610 if (NumEltRemaining < CurrNumEltPerOp && 3611 (!IsLoad || Alignment.valueOrOne() < CurrOpSizeBytes) && 3612 CurrOpSizeBytes != 1) 3613 break; // Try smalled vector size. 3614 3615 bool Is0thSubVec = (NumEltDone() % LT.second.getVectorNumElements()) == 0; 3616 3617 // If we have fully processed the previous reg, we need to replenish it. 3618 if (SubVecEltsLeft == 0) { 3619 SubVecEltsLeft += CurrVecTy->getNumElements(); 3620 // And that's free only for the 0'th subvector of a legalized vector. 3621 if (!Is0thSubVec) 3622 Cost += getShuffleCost(IsLoad ? TTI::ShuffleKind::SK_InsertSubvector 3623 : TTI::ShuffleKind::SK_ExtractSubvector, 3624 VTy, None, NumEltDone(), CurrVecTy); 3625 } 3626 3627 // While we can directly load/store ZMM, YMM, and 64-bit halves of XMM, 3628 // for smaller widths (32/16/8) we have to insert/extract them separately. 3629 // Again, it's free for the 0'th subreg (if op is 32/64 bit wide, 3630 // but let's pretend that it is also true for 16/8 bit wide ops...) 3631 if (CurrOpSizeBytes <= 32 / 8 && !Is0thSubVec) { 3632 int NumEltDoneInCurrXMM = NumEltDone() % NumEltPerXMM; 3633 assert(NumEltDoneInCurrXMM % CurrNumEltPerOp == 0 && ""); 3634 int CoalescedVecEltIdx = NumEltDoneInCurrXMM / CurrNumEltPerOp; 3635 APInt DemandedElts = 3636 APInt::getBitsSet(CoalescedVecTy->getNumElements(), 3637 CoalescedVecEltIdx, CoalescedVecEltIdx + 1); 3638 assert(DemandedElts.countPopulation() == 1 && "Inserting single value"); 3639 Cost += getScalarizationOverhead(CoalescedVecTy, DemandedElts, IsLoad, 3640 !IsLoad); 3641 } 3642 3643 // This isn't exactly right. We're using slow unaligned 32-byte accesses 3644 // as a proxy for a double-pumped AVX memory interface such as on 3645 // Sandybridge. 3646 if (CurrOpSizeBytes == 32 && ST->isUnalignedMem32Slow()) 3647 Cost += 2; 3648 else 3649 Cost += 1; 3650 3651 SubVecEltsLeft -= CurrNumEltPerOp; 3652 NumEltRemaining -= CurrNumEltPerOp; 3653 Alignment = commonAlignment(Alignment.valueOrOne(), CurrOpSizeBytes); 3654 } 3655 } 3656 3657 assert(NumEltRemaining <= 0 && "Should have processed all the elements."); 3658 3659 return Cost; 3660 } 3661 3662 InstructionCost 3663 X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment, 3664 unsigned AddressSpace, 3665 TTI::TargetCostKind CostKind) { 3666 bool IsLoad = (Instruction::Load == Opcode); 3667 bool IsStore = (Instruction::Store == Opcode); 3668 3669 auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy); 3670 if (!SrcVTy) 3671 // To calculate scalar take the regular cost, without mask 3672 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind); 3673 3674 unsigned NumElem = SrcVTy->getNumElements(); 3675 auto *MaskTy = 3676 FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem); 3677 if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) || 3678 (IsStore && !isLegalMaskedStore(SrcVTy, Alignment))) { 3679 // Scalarization 3680 APInt DemandedElts = APInt::getAllOnesValue(NumElem); 3681 InstructionCost MaskSplitCost = 3682 getScalarizationOverhead(MaskTy, DemandedElts, false, true); 3683 InstructionCost ScalarCompareCost = getCmpSelInstrCost( 3684 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr, 3685 CmpInst::BAD_ICMP_PREDICATE, CostKind); 3686 InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind); 3687 InstructionCost MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost); 3688 InstructionCost ValueSplitCost = 3689 getScalarizationOverhead(SrcVTy, DemandedElts, IsLoad, IsStore); 3690 InstructionCost MemopCost = 3691 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 3692 Alignment, AddressSpace, CostKind); 3693 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost; 3694 } 3695 3696 // Legalize the type. 3697 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy); 3698 auto VT = TLI->getValueType(DL, SrcVTy); 3699 InstructionCost Cost = 0; 3700 if (VT.isSimple() && LT.second != VT.getSimpleVT() && 3701 LT.second.getVectorNumElements() == NumElem) 3702 // Promotion requires extend/truncate for data and a shuffle for mask. 3703 Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, None, 0, nullptr) + 3704 getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, None, 0, nullptr); 3705 3706 else if (LT.first * LT.second.getVectorNumElements() > NumElem) { 3707 auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(), 3708 LT.second.getVectorNumElements()); 3709 // Expanding requires fill mask with zeroes 3710 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, None, 0, MaskTy); 3711 } 3712 3713 // Pre-AVX512 - each maskmov load costs 2 + store costs ~8. 3714 if (!ST->hasAVX512()) 3715 return Cost + LT.first * (IsLoad ? 2 : 8); 3716 3717 // AVX-512 masked load/store is cheapper 3718 return Cost + LT.first; 3719 } 3720 3721 InstructionCost X86TTIImpl::getAddressComputationCost(Type *Ty, 3722 ScalarEvolution *SE, 3723 const SCEV *Ptr) { 3724 // Address computations in vectorized code with non-consecutive addresses will 3725 // likely result in more instructions compared to scalar code where the 3726 // computation can more often be merged into the index mode. The resulting 3727 // extra micro-ops can significantly decrease throughput. 3728 const unsigned NumVectorInstToHideOverhead = 10; 3729 3730 // Cost modeling of Strided Access Computation is hidden by the indexing 3731 // modes of X86 regardless of the stride value. We dont believe that there 3732 // is a difference between constant strided access in gerenal and constant 3733 // strided value which is less than or equal to 64. 3734 // Even in the case of (loop invariant) stride whose value is not known at 3735 // compile time, the address computation will not incur more than one extra 3736 // ADD instruction. 3737 if (Ty->isVectorTy() && SE) { 3738 if (!BaseT::isStridedAccess(Ptr)) 3739 return NumVectorInstToHideOverhead; 3740 if (!BaseT::getConstantStrideStep(SE, Ptr)) 3741 return 1; 3742 } 3743 3744 return BaseT::getAddressComputationCost(Ty, SE, Ptr); 3745 } 3746 3747 InstructionCost 3748 X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, 3749 Optional<FastMathFlags> FMF, 3750 TTI::TargetCostKind CostKind) { 3751 if (TTI::requiresOrderedReduction(FMF)) 3752 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind); 3753 3754 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 3755 // and make it as the cost. 3756 3757 static const CostTblEntry SLMCostTblNoPairWise[] = { 3758 { ISD::FADD, MVT::v2f64, 3 }, 3759 { ISD::ADD, MVT::v2i64, 5 }, 3760 }; 3761 3762 static const CostTblEntry SSE2CostTblNoPairWise[] = { 3763 { ISD::FADD, MVT::v2f64, 2 }, 3764 { ISD::FADD, MVT::v2f32, 2 }, 3765 { ISD::FADD, MVT::v4f32, 4 }, 3766 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 3767 { ISD::ADD, MVT::v2i32, 2 }, // FIXME: chosen to be less than v4i32 3768 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 3769 { ISD::ADD, MVT::v2i16, 2 }, // The data reported by the IACA tool is "4.3". 3770 { ISD::ADD, MVT::v4i16, 3 }, // The data reported by the IACA tool is "4.3". 3771 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 3772 { ISD::ADD, MVT::v2i8, 2 }, 3773 { ISD::ADD, MVT::v4i8, 2 }, 3774 { ISD::ADD, MVT::v8i8, 2 }, 3775 { ISD::ADD, MVT::v16i8, 3 }, 3776 }; 3777 3778 static const CostTblEntry AVX1CostTblNoPairWise[] = { 3779 { ISD::FADD, MVT::v4f64, 3 }, 3780 { ISD::FADD, MVT::v4f32, 3 }, 3781 { ISD::FADD, MVT::v8f32, 4 }, 3782 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 3783 { ISD::ADD, MVT::v4i64, 3 }, 3784 { ISD::ADD, MVT::v8i32, 5 }, 3785 { ISD::ADD, MVT::v16i16, 5 }, 3786 { ISD::ADD, MVT::v32i8, 4 }, 3787 }; 3788 3789 int ISD = TLI->InstructionOpcodeToISD(Opcode); 3790 assert(ISD && "Invalid opcode"); 3791 3792 // Before legalizing the type, give a chance to look up illegal narrow types 3793 // in the table. 3794 // FIXME: Is there a better way to do this? 3795 EVT VT = TLI->getValueType(DL, ValTy); 3796 if (VT.isSimple()) { 3797 MVT MTy = VT.getSimpleVT(); 3798 if (ST->isSLM()) 3799 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy)) 3800 return Entry->Cost; 3801 3802 if (ST->hasAVX()) 3803 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 3804 return Entry->Cost; 3805 3806 if (ST->hasSSE2()) 3807 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 3808 return Entry->Cost; 3809 } 3810 3811 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 3812 3813 MVT MTy = LT.second; 3814 3815 auto *ValVTy = cast<FixedVectorType>(ValTy); 3816 3817 // Special case: vXi8 mul reductions are performed as vXi16. 3818 if (ISD == ISD::MUL && MTy.getScalarType() == MVT::i8) { 3819 auto *WideSclTy = IntegerType::get(ValVTy->getContext(), 16); 3820 auto *WideVecTy = FixedVectorType::get(WideSclTy, ValVTy->getNumElements()); 3821 return getCastInstrCost(Instruction::ZExt, WideVecTy, ValTy, 3822 TargetTransformInfo::CastContextHint::None, 3823 CostKind) + 3824 getArithmeticReductionCost(Opcode, WideVecTy, FMF, CostKind); 3825 } 3826 3827 InstructionCost ArithmeticCost = 0; 3828 if (LT.first != 1 && MTy.isVector() && 3829 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 3830 // Type needs to be split. We need LT.first - 1 arithmetic ops. 3831 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(), 3832 MTy.getVectorNumElements()); 3833 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind); 3834 ArithmeticCost *= LT.first - 1; 3835 } 3836 3837 if (ST->isSLM()) 3838 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy)) 3839 return ArithmeticCost + Entry->Cost; 3840 3841 if (ST->hasAVX()) 3842 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 3843 return ArithmeticCost + Entry->Cost; 3844 3845 if (ST->hasSSE2()) 3846 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 3847 return ArithmeticCost + Entry->Cost; 3848 3849 // FIXME: These assume a naive kshift+binop lowering, which is probably 3850 // conservative in most cases. 3851 static const CostTblEntry AVX512BoolReduction[] = { 3852 { ISD::AND, MVT::v2i1, 3 }, 3853 { ISD::AND, MVT::v4i1, 5 }, 3854 { ISD::AND, MVT::v8i1, 7 }, 3855 { ISD::AND, MVT::v16i1, 9 }, 3856 { ISD::AND, MVT::v32i1, 11 }, 3857 { ISD::AND, MVT::v64i1, 13 }, 3858 { ISD::OR, MVT::v2i1, 3 }, 3859 { ISD::OR, MVT::v4i1, 5 }, 3860 { ISD::OR, MVT::v8i1, 7 }, 3861 { ISD::OR, MVT::v16i1, 9 }, 3862 { ISD::OR, MVT::v32i1, 11 }, 3863 { ISD::OR, MVT::v64i1, 13 }, 3864 }; 3865 3866 static const CostTblEntry AVX2BoolReduction[] = { 3867 { ISD::AND, MVT::v16i16, 2 }, // vpmovmskb + cmp 3868 { ISD::AND, MVT::v32i8, 2 }, // vpmovmskb + cmp 3869 { ISD::OR, MVT::v16i16, 2 }, // vpmovmskb + cmp 3870 { ISD::OR, MVT::v32i8, 2 }, // vpmovmskb + cmp 3871 }; 3872 3873 static const CostTblEntry AVX1BoolReduction[] = { 3874 { ISD::AND, MVT::v4i64, 2 }, // vmovmskpd + cmp 3875 { ISD::AND, MVT::v8i32, 2 }, // vmovmskps + cmp 3876 { ISD::AND, MVT::v16i16, 4 }, // vextractf128 + vpand + vpmovmskb + cmp 3877 { ISD::AND, MVT::v32i8, 4 }, // vextractf128 + vpand + vpmovmskb + cmp 3878 { ISD::OR, MVT::v4i64, 2 }, // vmovmskpd + cmp 3879 { ISD::OR, MVT::v8i32, 2 }, // vmovmskps + cmp 3880 { ISD::OR, MVT::v16i16, 4 }, // vextractf128 + vpor + vpmovmskb + cmp 3881 { ISD::OR, MVT::v32i8, 4 }, // vextractf128 + vpor + vpmovmskb + cmp 3882 }; 3883 3884 static const CostTblEntry SSE2BoolReduction[] = { 3885 { ISD::AND, MVT::v2i64, 2 }, // movmskpd + cmp 3886 { ISD::AND, MVT::v4i32, 2 }, // movmskps + cmp 3887 { ISD::AND, MVT::v8i16, 2 }, // pmovmskb + cmp 3888 { ISD::AND, MVT::v16i8, 2 }, // pmovmskb + cmp 3889 { ISD::OR, MVT::v2i64, 2 }, // movmskpd + cmp 3890 { ISD::OR, MVT::v4i32, 2 }, // movmskps + cmp 3891 { ISD::OR, MVT::v8i16, 2 }, // pmovmskb + cmp 3892 { ISD::OR, MVT::v16i8, 2 }, // pmovmskb + cmp 3893 }; 3894 3895 // Handle bool allof/anyof patterns. 3896 if (ValVTy->getElementType()->isIntegerTy(1)) { 3897 InstructionCost ArithmeticCost = 0; 3898 if (LT.first != 1 && MTy.isVector() && 3899 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 3900 // Type needs to be split. We need LT.first - 1 arithmetic ops. 3901 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(), 3902 MTy.getVectorNumElements()); 3903 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind); 3904 ArithmeticCost *= LT.first - 1; 3905 } 3906 3907 if (ST->hasAVX512()) 3908 if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy)) 3909 return ArithmeticCost + Entry->Cost; 3910 if (ST->hasAVX2()) 3911 if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy)) 3912 return ArithmeticCost + Entry->Cost; 3913 if (ST->hasAVX()) 3914 if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy)) 3915 return ArithmeticCost + Entry->Cost; 3916 if (ST->hasSSE2()) 3917 if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy)) 3918 return ArithmeticCost + Entry->Cost; 3919 3920 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind); 3921 } 3922 3923 unsigned NumVecElts = ValVTy->getNumElements(); 3924 unsigned ScalarSize = ValVTy->getScalarSizeInBits(); 3925 3926 // Special case power of 2 reductions where the scalar type isn't changed 3927 // by type legalization. 3928 if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits()) 3929 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind); 3930 3931 InstructionCost ReductionCost = 0; 3932 3933 auto *Ty = ValVTy; 3934 if (LT.first != 1 && MTy.isVector() && 3935 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 3936 // Type needs to be split. We need LT.first - 1 arithmetic ops. 3937 Ty = FixedVectorType::get(ValVTy->getElementType(), 3938 MTy.getVectorNumElements()); 3939 ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind); 3940 ReductionCost *= LT.first - 1; 3941 NumVecElts = MTy.getVectorNumElements(); 3942 } 3943 3944 // Now handle reduction with the legal type, taking into account size changes 3945 // at each level. 3946 while (NumVecElts > 1) { 3947 // Determine the size of the remaining vector we need to reduce. 3948 unsigned Size = NumVecElts * ScalarSize; 3949 NumVecElts /= 2; 3950 // If we're reducing from 256/512 bits, use an extract_subvector. 3951 if (Size > 128) { 3952 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts); 3953 ReductionCost += 3954 getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy); 3955 Ty = SubTy; 3956 } else if (Size == 128) { 3957 // Reducing from 128 bits is a permute of v2f64/v2i64. 3958 FixedVectorType *ShufTy; 3959 if (ValVTy->isFloatingPointTy()) 3960 ShufTy = 3961 FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2); 3962 else 3963 ShufTy = 3964 FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2); 3965 ReductionCost += 3966 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 3967 } else if (Size == 64) { 3968 // Reducing from 64 bits is a shuffle of v4f32/v4i32. 3969 FixedVectorType *ShufTy; 3970 if (ValVTy->isFloatingPointTy()) 3971 ShufTy = 3972 FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4); 3973 else 3974 ShufTy = 3975 FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4); 3976 ReductionCost += 3977 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 3978 } else { 3979 // Reducing from smaller size is a shift by immediate. 3980 auto *ShiftTy = FixedVectorType::get( 3981 Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size); 3982 ReductionCost += getArithmeticInstrCost( 3983 Instruction::LShr, ShiftTy, CostKind, 3984 TargetTransformInfo::OK_AnyValue, 3985 TargetTransformInfo::OK_UniformConstantValue, 3986 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 3987 } 3988 3989 // Add the arithmetic op for this level. 3990 ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind); 3991 } 3992 3993 // Add the final extract element to the cost. 3994 return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0); 3995 } 3996 3997 InstructionCost X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy, 3998 bool IsUnsigned) { 3999 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 4000 4001 MVT MTy = LT.second; 4002 4003 int ISD; 4004 if (Ty->isIntOrIntVectorTy()) { 4005 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN; 4006 } else { 4007 assert(Ty->isFPOrFPVectorTy() && 4008 "Expected float point or integer vector type."); 4009 ISD = ISD::FMINNUM; 4010 } 4011 4012 static const CostTblEntry SSE1CostTbl[] = { 4013 {ISD::FMINNUM, MVT::v4f32, 1}, 4014 }; 4015 4016 static const CostTblEntry SSE2CostTbl[] = { 4017 {ISD::FMINNUM, MVT::v2f64, 1}, 4018 {ISD::SMIN, MVT::v8i16, 1}, 4019 {ISD::UMIN, MVT::v16i8, 1}, 4020 }; 4021 4022 static const CostTblEntry SSE41CostTbl[] = { 4023 {ISD::SMIN, MVT::v4i32, 1}, 4024 {ISD::UMIN, MVT::v4i32, 1}, 4025 {ISD::UMIN, MVT::v8i16, 1}, 4026 {ISD::SMIN, MVT::v16i8, 1}, 4027 }; 4028 4029 static const CostTblEntry SSE42CostTbl[] = { 4030 {ISD::UMIN, MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd 4031 }; 4032 4033 static const CostTblEntry AVX1CostTbl[] = { 4034 {ISD::FMINNUM, MVT::v8f32, 1}, 4035 {ISD::FMINNUM, MVT::v4f64, 1}, 4036 {ISD::SMIN, MVT::v8i32, 3}, 4037 {ISD::UMIN, MVT::v8i32, 3}, 4038 {ISD::SMIN, MVT::v16i16, 3}, 4039 {ISD::UMIN, MVT::v16i16, 3}, 4040 {ISD::SMIN, MVT::v32i8, 3}, 4041 {ISD::UMIN, MVT::v32i8, 3}, 4042 }; 4043 4044 static const CostTblEntry AVX2CostTbl[] = { 4045 {ISD::SMIN, MVT::v8i32, 1}, 4046 {ISD::UMIN, MVT::v8i32, 1}, 4047 {ISD::SMIN, MVT::v16i16, 1}, 4048 {ISD::UMIN, MVT::v16i16, 1}, 4049 {ISD::SMIN, MVT::v32i8, 1}, 4050 {ISD::UMIN, MVT::v32i8, 1}, 4051 }; 4052 4053 static const CostTblEntry AVX512CostTbl[] = { 4054 {ISD::FMINNUM, MVT::v16f32, 1}, 4055 {ISD::FMINNUM, MVT::v8f64, 1}, 4056 {ISD::SMIN, MVT::v2i64, 1}, 4057 {ISD::UMIN, MVT::v2i64, 1}, 4058 {ISD::SMIN, MVT::v4i64, 1}, 4059 {ISD::UMIN, MVT::v4i64, 1}, 4060 {ISD::SMIN, MVT::v8i64, 1}, 4061 {ISD::UMIN, MVT::v8i64, 1}, 4062 {ISD::SMIN, MVT::v16i32, 1}, 4063 {ISD::UMIN, MVT::v16i32, 1}, 4064 }; 4065 4066 static const CostTblEntry AVX512BWCostTbl[] = { 4067 {ISD::SMIN, MVT::v32i16, 1}, 4068 {ISD::UMIN, MVT::v32i16, 1}, 4069 {ISD::SMIN, MVT::v64i8, 1}, 4070 {ISD::UMIN, MVT::v64i8, 1}, 4071 }; 4072 4073 // If we have a native MIN/MAX instruction for this type, use it. 4074 if (ST->hasBWI()) 4075 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 4076 return LT.first * Entry->Cost; 4077 4078 if (ST->hasAVX512()) 4079 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 4080 return LT.first * Entry->Cost; 4081 4082 if (ST->hasAVX2()) 4083 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 4084 return LT.first * Entry->Cost; 4085 4086 if (ST->hasAVX()) 4087 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 4088 return LT.first * Entry->Cost; 4089 4090 if (ST->hasSSE42()) 4091 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 4092 return LT.first * Entry->Cost; 4093 4094 if (ST->hasSSE41()) 4095 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) 4096 return LT.first * Entry->Cost; 4097 4098 if (ST->hasSSE2()) 4099 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 4100 return LT.first * Entry->Cost; 4101 4102 if (ST->hasSSE1()) 4103 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 4104 return LT.first * Entry->Cost; 4105 4106 unsigned CmpOpcode; 4107 if (Ty->isFPOrFPVectorTy()) { 4108 CmpOpcode = Instruction::FCmp; 4109 } else { 4110 assert(Ty->isIntOrIntVectorTy() && 4111 "expecting floating point or integer type for min/max reduction"); 4112 CmpOpcode = Instruction::ICmp; 4113 } 4114 4115 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 4116 // Otherwise fall back to cmp+select. 4117 InstructionCost Result = 4118 getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CmpInst::BAD_ICMP_PREDICATE, 4119 CostKind) + 4120 getCmpSelInstrCost(Instruction::Select, Ty, CondTy, 4121 CmpInst::BAD_ICMP_PREDICATE, CostKind); 4122 return Result; 4123 } 4124 4125 InstructionCost 4126 X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy, 4127 bool IsUnsigned, 4128 TTI::TargetCostKind CostKind) { 4129 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 4130 4131 MVT MTy = LT.second; 4132 4133 int ISD; 4134 if (ValTy->isIntOrIntVectorTy()) { 4135 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN; 4136 } else { 4137 assert(ValTy->isFPOrFPVectorTy() && 4138 "Expected float point or integer vector type."); 4139 ISD = ISD::FMINNUM; 4140 } 4141 4142 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 4143 // and make it as the cost. 4144 4145 static const CostTblEntry SSE2CostTblNoPairWise[] = { 4146 {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw 4147 {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw 4148 {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw 4149 }; 4150 4151 static const CostTblEntry SSE41CostTblNoPairWise[] = { 4152 {ISD::SMIN, MVT::v2i16, 3}, // same as sse2 4153 {ISD::SMIN, MVT::v4i16, 5}, // same as sse2 4154 {ISD::UMIN, MVT::v2i16, 5}, // same as sse2 4155 {ISD::UMIN, MVT::v4i16, 7}, // same as sse2 4156 {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor 4157 {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax 4158 {ISD::SMIN, MVT::v2i8, 3}, // pminsb 4159 {ISD::SMIN, MVT::v4i8, 5}, // pminsb 4160 {ISD::SMIN, MVT::v8i8, 7}, // pminsb 4161 {ISD::SMIN, MVT::v16i8, 6}, 4162 {ISD::UMIN, MVT::v2i8, 3}, // same as sse2 4163 {ISD::UMIN, MVT::v4i8, 5}, // same as sse2 4164 {ISD::UMIN, MVT::v8i8, 7}, // same as sse2 4165 {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax 4166 }; 4167 4168 static const CostTblEntry AVX1CostTblNoPairWise[] = { 4169 {ISD::SMIN, MVT::v16i16, 6}, 4170 {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax 4171 {ISD::SMIN, MVT::v32i8, 8}, 4172 {ISD::UMIN, MVT::v32i8, 8}, 4173 }; 4174 4175 static const CostTblEntry AVX512BWCostTblNoPairWise[] = { 4176 {ISD::SMIN, MVT::v32i16, 8}, 4177 {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax 4178 {ISD::SMIN, MVT::v64i8, 10}, 4179 {ISD::UMIN, MVT::v64i8, 10}, 4180 }; 4181 4182 // Before legalizing the type, give a chance to look up illegal narrow types 4183 // in the table. 4184 // FIXME: Is there a better way to do this? 4185 EVT VT = TLI->getValueType(DL, ValTy); 4186 if (VT.isSimple()) { 4187 MVT MTy = VT.getSimpleVT(); 4188 if (ST->hasBWI()) 4189 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy)) 4190 return Entry->Cost; 4191 4192 if (ST->hasAVX()) 4193 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 4194 return Entry->Cost; 4195 4196 if (ST->hasSSE41()) 4197 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy)) 4198 return Entry->Cost; 4199 4200 if (ST->hasSSE2()) 4201 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 4202 return Entry->Cost; 4203 } 4204 4205 auto *ValVTy = cast<FixedVectorType>(ValTy); 4206 unsigned NumVecElts = ValVTy->getNumElements(); 4207 4208 auto *Ty = ValVTy; 4209 InstructionCost MinMaxCost = 0; 4210 if (LT.first != 1 && MTy.isVector() && 4211 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 4212 // Type needs to be split. We need LT.first - 1 operations ops. 4213 Ty = FixedVectorType::get(ValVTy->getElementType(), 4214 MTy.getVectorNumElements()); 4215 auto *SubCondTy = FixedVectorType::get(CondTy->getElementType(), 4216 MTy.getVectorNumElements()); 4217 MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned); 4218 MinMaxCost *= LT.first - 1; 4219 NumVecElts = MTy.getVectorNumElements(); 4220 } 4221 4222 if (ST->hasBWI()) 4223 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy)) 4224 return MinMaxCost + Entry->Cost; 4225 4226 if (ST->hasAVX()) 4227 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 4228 return MinMaxCost + Entry->Cost; 4229 4230 if (ST->hasSSE41()) 4231 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy)) 4232 return MinMaxCost + Entry->Cost; 4233 4234 if (ST->hasSSE2()) 4235 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 4236 return MinMaxCost + Entry->Cost; 4237 4238 unsigned ScalarSize = ValTy->getScalarSizeInBits(); 4239 4240 // Special case power of 2 reductions where the scalar type isn't changed 4241 // by type legalization. 4242 if (!isPowerOf2_32(ValVTy->getNumElements()) || 4243 ScalarSize != MTy.getScalarSizeInBits()) 4244 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsUnsigned, CostKind); 4245 4246 // Now handle reduction with the legal type, taking into account size changes 4247 // at each level. 4248 while (NumVecElts > 1) { 4249 // Determine the size of the remaining vector we need to reduce. 4250 unsigned Size = NumVecElts * ScalarSize; 4251 NumVecElts /= 2; 4252 // If we're reducing from 256/512 bits, use an extract_subvector. 4253 if (Size > 128) { 4254 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts); 4255 MinMaxCost += 4256 getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy); 4257 Ty = SubTy; 4258 } else if (Size == 128) { 4259 // Reducing from 128 bits is a permute of v2f64/v2i64. 4260 VectorType *ShufTy; 4261 if (ValTy->isFloatingPointTy()) 4262 ShufTy = 4263 FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2); 4264 else 4265 ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2); 4266 MinMaxCost += 4267 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 4268 } else if (Size == 64) { 4269 // Reducing from 64 bits is a shuffle of v4f32/v4i32. 4270 FixedVectorType *ShufTy; 4271 if (ValTy->isFloatingPointTy()) 4272 ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4); 4273 else 4274 ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4); 4275 MinMaxCost += 4276 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 4277 } else { 4278 // Reducing from smaller size is a shift by immediate. 4279 auto *ShiftTy = FixedVectorType::get( 4280 Type::getIntNTy(ValTy->getContext(), Size), 128 / Size); 4281 MinMaxCost += getArithmeticInstrCost( 4282 Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput, 4283 TargetTransformInfo::OK_AnyValue, 4284 TargetTransformInfo::OK_UniformConstantValue, 4285 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 4286 } 4287 4288 // Add the arithmetic op for this level. 4289 auto *SubCondTy = 4290 FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements()); 4291 MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned); 4292 } 4293 4294 // Add the final extract element to the cost. 4295 return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0); 4296 } 4297 4298 /// Calculate the cost of materializing a 64-bit value. This helper 4299 /// method might only calculate a fraction of a larger immediate. Therefore it 4300 /// is valid to return a cost of ZERO. 4301 InstructionCost X86TTIImpl::getIntImmCost(int64_t Val) { 4302 if (Val == 0) 4303 return TTI::TCC_Free; 4304 4305 if (isInt<32>(Val)) 4306 return TTI::TCC_Basic; 4307 4308 return 2 * TTI::TCC_Basic; 4309 } 4310 4311 InstructionCost X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, 4312 TTI::TargetCostKind CostKind) { 4313 assert(Ty->isIntegerTy()); 4314 4315 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 4316 if (BitSize == 0) 4317 return ~0U; 4318 4319 // Never hoist constants larger than 128bit, because this might lead to 4320 // incorrect code generation or assertions in codegen. 4321 // Fixme: Create a cost model for types larger than i128 once the codegen 4322 // issues have been fixed. 4323 if (BitSize > 128) 4324 return TTI::TCC_Free; 4325 4326 if (Imm == 0) 4327 return TTI::TCC_Free; 4328 4329 // Sign-extend all constants to a multiple of 64-bit. 4330 APInt ImmVal = Imm; 4331 if (BitSize % 64 != 0) 4332 ImmVal = Imm.sext(alignTo(BitSize, 64)); 4333 4334 // Split the constant into 64-bit chunks and calculate the cost for each 4335 // chunk. 4336 InstructionCost Cost = 0; 4337 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 4338 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 4339 int64_t Val = Tmp.getSExtValue(); 4340 Cost += getIntImmCost(Val); 4341 } 4342 // We need at least one instruction to materialize the constant. 4343 return std::max<InstructionCost>(1, Cost); 4344 } 4345 4346 InstructionCost X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, 4347 const APInt &Imm, Type *Ty, 4348 TTI::TargetCostKind CostKind, 4349 Instruction *Inst) { 4350 assert(Ty->isIntegerTy()); 4351 4352 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 4353 // There is no cost model for constants with a bit size of 0. Return TCC_Free 4354 // here, so that constant hoisting will ignore this constant. 4355 if (BitSize == 0) 4356 return TTI::TCC_Free; 4357 4358 unsigned ImmIdx = ~0U; 4359 switch (Opcode) { 4360 default: 4361 return TTI::TCC_Free; 4362 case Instruction::GetElementPtr: 4363 // Always hoist the base address of a GetElementPtr. This prevents the 4364 // creation of new constants for every base constant that gets constant 4365 // folded with the offset. 4366 if (Idx == 0) 4367 return 2 * TTI::TCC_Basic; 4368 return TTI::TCC_Free; 4369 case Instruction::Store: 4370 ImmIdx = 0; 4371 break; 4372 case Instruction::ICmp: 4373 // This is an imperfect hack to prevent constant hoisting of 4374 // compares that might be trying to check if a 64-bit value fits in 4375 // 32-bits. The backend can optimize these cases using a right shift by 32. 4376 // Ideally we would check the compare predicate here. There also other 4377 // similar immediates the backend can use shifts for. 4378 if (Idx == 1 && Imm.getBitWidth() == 64) { 4379 uint64_t ImmVal = Imm.getZExtValue(); 4380 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff) 4381 return TTI::TCC_Free; 4382 } 4383 ImmIdx = 1; 4384 break; 4385 case Instruction::And: 4386 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes 4387 // by using a 32-bit operation with implicit zero extension. Detect such 4388 // immediates here as the normal path expects bit 31 to be sign extended. 4389 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue())) 4390 return TTI::TCC_Free; 4391 ImmIdx = 1; 4392 break; 4393 case Instruction::Add: 4394 case Instruction::Sub: 4395 // For add/sub, we can use the opposite instruction for INT32_MIN. 4396 if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000) 4397 return TTI::TCC_Free; 4398 ImmIdx = 1; 4399 break; 4400 case Instruction::UDiv: 4401 case Instruction::SDiv: 4402 case Instruction::URem: 4403 case Instruction::SRem: 4404 // Division by constant is typically expanded later into a different 4405 // instruction sequence. This completely changes the constants. 4406 // Report them as "free" to stop ConstantHoist from marking them as opaque. 4407 return TTI::TCC_Free; 4408 case Instruction::Mul: 4409 case Instruction::Or: 4410 case Instruction::Xor: 4411 ImmIdx = 1; 4412 break; 4413 // Always return TCC_Free for the shift value of a shift instruction. 4414 case Instruction::Shl: 4415 case Instruction::LShr: 4416 case Instruction::AShr: 4417 if (Idx == 1) 4418 return TTI::TCC_Free; 4419 break; 4420 case Instruction::Trunc: 4421 case Instruction::ZExt: 4422 case Instruction::SExt: 4423 case Instruction::IntToPtr: 4424 case Instruction::PtrToInt: 4425 case Instruction::BitCast: 4426 case Instruction::PHI: 4427 case Instruction::Call: 4428 case Instruction::Select: 4429 case Instruction::Ret: 4430 case Instruction::Load: 4431 break; 4432 } 4433 4434 if (Idx == ImmIdx) { 4435 int NumConstants = divideCeil(BitSize, 64); 4436 InstructionCost Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); 4437 return (Cost <= NumConstants * TTI::TCC_Basic) 4438 ? static_cast<int>(TTI::TCC_Free) 4439 : Cost; 4440 } 4441 4442 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); 4443 } 4444 4445 InstructionCost X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 4446 const APInt &Imm, Type *Ty, 4447 TTI::TargetCostKind CostKind) { 4448 assert(Ty->isIntegerTy()); 4449 4450 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 4451 // There is no cost model for constants with a bit size of 0. Return TCC_Free 4452 // here, so that constant hoisting will ignore this constant. 4453 if (BitSize == 0) 4454 return TTI::TCC_Free; 4455 4456 switch (IID) { 4457 default: 4458 return TTI::TCC_Free; 4459 case Intrinsic::sadd_with_overflow: 4460 case Intrinsic::uadd_with_overflow: 4461 case Intrinsic::ssub_with_overflow: 4462 case Intrinsic::usub_with_overflow: 4463 case Intrinsic::smul_with_overflow: 4464 case Intrinsic::umul_with_overflow: 4465 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 4466 return TTI::TCC_Free; 4467 break; 4468 case Intrinsic::experimental_stackmap: 4469 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 4470 return TTI::TCC_Free; 4471 break; 4472 case Intrinsic::experimental_patchpoint_void: 4473 case Intrinsic::experimental_patchpoint_i64: 4474 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 4475 return TTI::TCC_Free; 4476 break; 4477 } 4478 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); 4479 } 4480 4481 InstructionCost X86TTIImpl::getCFInstrCost(unsigned Opcode, 4482 TTI::TargetCostKind CostKind, 4483 const Instruction *I) { 4484 if (CostKind != TTI::TCK_RecipThroughput) 4485 return Opcode == Instruction::PHI ? 0 : 1; 4486 // Branches are assumed to be predicted. 4487 return 0; 4488 } 4489 4490 int X86TTIImpl::getGatherOverhead() const { 4491 // Some CPUs have more overhead for gather. The specified overhead is relative 4492 // to the Load operation. "2" is the number provided by Intel architects. This 4493 // parameter is used for cost estimation of Gather Op and comparison with 4494 // other alternatives. 4495 // TODO: Remove the explicit hasAVX512()?, That would mean we would only 4496 // enable gather with a -march. 4497 if (ST->hasAVX512() || (ST->hasAVX2() && ST->hasFastGather())) 4498 return 2; 4499 4500 return 1024; 4501 } 4502 4503 int X86TTIImpl::getScatterOverhead() const { 4504 if (ST->hasAVX512()) 4505 return 2; 4506 4507 return 1024; 4508 } 4509 4510 // Return an average cost of Gather / Scatter instruction, maybe improved later. 4511 // FIXME: Add TargetCostKind support. 4512 InstructionCost X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, 4513 const Value *Ptr, Align Alignment, 4514 unsigned AddressSpace) { 4515 4516 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost"); 4517 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements(); 4518 4519 // Try to reduce index size from 64 bit (default for GEP) 4520 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the 4521 // operation will use 16 x 64 indices which do not fit in a zmm and needs 4522 // to split. Also check that the base pointer is the same for all lanes, 4523 // and that there's at most one variable index. 4524 auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) { 4525 unsigned IndexSize = DL.getPointerSizeInBits(); 4526 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 4527 if (IndexSize < 64 || !GEP) 4528 return IndexSize; 4529 4530 unsigned NumOfVarIndices = 0; 4531 const Value *Ptrs = GEP->getPointerOperand(); 4532 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs)) 4533 return IndexSize; 4534 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) { 4535 if (isa<Constant>(GEP->getOperand(i))) 4536 continue; 4537 Type *IndxTy = GEP->getOperand(i)->getType(); 4538 if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy)) 4539 IndxTy = IndexVTy->getElementType(); 4540 if ((IndxTy->getPrimitiveSizeInBits() == 64 && 4541 !isa<SExtInst>(GEP->getOperand(i))) || 4542 ++NumOfVarIndices > 1) 4543 return IndexSize; // 64 4544 } 4545 return (unsigned)32; 4546 }; 4547 4548 // Trying to reduce IndexSize to 32 bits for vector 16. 4549 // By default the IndexSize is equal to pointer size. 4550 unsigned IndexSize = (ST->hasAVX512() && VF >= 16) 4551 ? getIndexSizeInBits(Ptr, DL) 4552 : DL.getPointerSizeInBits(); 4553 4554 auto *IndexVTy = FixedVectorType::get( 4555 IntegerType::get(SrcVTy->getContext(), IndexSize), VF); 4556 std::pair<InstructionCost, MVT> IdxsLT = 4557 TLI->getTypeLegalizationCost(DL, IndexVTy); 4558 std::pair<InstructionCost, MVT> SrcLT = 4559 TLI->getTypeLegalizationCost(DL, SrcVTy); 4560 InstructionCost::CostType SplitFactor = 4561 *std::max(IdxsLT.first, SrcLT.first).getValue(); 4562 if (SplitFactor > 1) { 4563 // Handle splitting of vector of pointers 4564 auto *SplitSrcTy = 4565 FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor); 4566 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment, 4567 AddressSpace); 4568 } 4569 4570 // The gather / scatter cost is given by Intel architects. It is a rough 4571 // number since we are looking at one instruction in a time. 4572 const int GSOverhead = (Opcode == Instruction::Load) 4573 ? getGatherOverhead() 4574 : getScatterOverhead(); 4575 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 4576 MaybeAlign(Alignment), AddressSpace, 4577 TTI::TCK_RecipThroughput); 4578 } 4579 4580 /// Return the cost of full scalarization of gather / scatter operation. 4581 /// 4582 /// Opcode - Load or Store instruction. 4583 /// SrcVTy - The type of the data vector that should be gathered or scattered. 4584 /// VariableMask - The mask is non-constant at compile time. 4585 /// Alignment - Alignment for one element. 4586 /// AddressSpace - pointer[s] address space. 4587 /// 4588 /// FIXME: Add TargetCostKind support. 4589 InstructionCost X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy, 4590 bool VariableMask, Align Alignment, 4591 unsigned AddressSpace) { 4592 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements(); 4593 APInt DemandedElts = APInt::getAllOnesValue(VF); 4594 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 4595 4596 InstructionCost MaskUnpackCost = 0; 4597 if (VariableMask) { 4598 auto *MaskTy = 4599 FixedVectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF); 4600 MaskUnpackCost = 4601 getScalarizationOverhead(MaskTy, DemandedElts, false, true); 4602 InstructionCost ScalarCompareCost = getCmpSelInstrCost( 4603 Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), nullptr, 4604 CmpInst::BAD_ICMP_PREDICATE, CostKind); 4605 InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind); 4606 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost); 4607 } 4608 4609 // The cost of the scalar loads/stores. 4610 InstructionCost MemoryOpCost = 4611 VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 4612 MaybeAlign(Alignment), AddressSpace, CostKind); 4613 4614 InstructionCost InsertExtractCost = 0; 4615 if (Opcode == Instruction::Load) 4616 for (unsigned i = 0; i < VF; ++i) 4617 // Add the cost of inserting each scalar load into the vector 4618 InsertExtractCost += 4619 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i); 4620 else 4621 for (unsigned i = 0; i < VF; ++i) 4622 // Add the cost of extracting each element out of the data vector 4623 InsertExtractCost += 4624 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i); 4625 4626 return MemoryOpCost + MaskUnpackCost + InsertExtractCost; 4627 } 4628 4629 /// Calculate the cost of Gather / Scatter operation 4630 InstructionCost X86TTIImpl::getGatherScatterOpCost( 4631 unsigned Opcode, Type *SrcVTy, const Value *Ptr, bool VariableMask, 4632 Align Alignment, TTI::TargetCostKind CostKind, 4633 const Instruction *I = nullptr) { 4634 if (CostKind != TTI::TCK_RecipThroughput) { 4635 if ((Opcode == Instruction::Load && 4636 isLegalMaskedGather(SrcVTy, Align(Alignment))) || 4637 (Opcode == Instruction::Store && 4638 isLegalMaskedScatter(SrcVTy, Align(Alignment)))) 4639 return 1; 4640 return BaseT::getGatherScatterOpCost(Opcode, SrcVTy, Ptr, VariableMask, 4641 Alignment, CostKind, I); 4642 } 4643 4644 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter"); 4645 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 4646 if (!PtrTy && Ptr->getType()->isVectorTy()) 4647 PtrTy = dyn_cast<PointerType>( 4648 cast<VectorType>(Ptr->getType())->getElementType()); 4649 assert(PtrTy && "Unexpected type for Ptr argument"); 4650 unsigned AddressSpace = PtrTy->getAddressSpace(); 4651 4652 if ((Opcode == Instruction::Load && 4653 !isLegalMaskedGather(SrcVTy, Align(Alignment))) || 4654 (Opcode == Instruction::Store && 4655 !isLegalMaskedScatter(SrcVTy, Align(Alignment)))) 4656 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment, 4657 AddressSpace); 4658 4659 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace); 4660 } 4661 4662 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1, 4663 TargetTransformInfo::LSRCost &C2) { 4664 // X86 specific here are "instruction number 1st priority". 4665 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, 4666 C1.NumIVMuls, C1.NumBaseAdds, 4667 C1.ScaleCost, C1.ImmCost, C1.SetupCost) < 4668 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, 4669 C2.NumIVMuls, C2.NumBaseAdds, 4670 C2.ScaleCost, C2.ImmCost, C2.SetupCost); 4671 } 4672 4673 bool X86TTIImpl::canMacroFuseCmp() { 4674 return ST->hasMacroFusion() || ST->hasBranchFusion(); 4675 } 4676 4677 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) { 4678 if (!ST->hasAVX()) 4679 return false; 4680 4681 // The backend can't handle a single element vector. 4682 if (isa<VectorType>(DataTy) && 4683 cast<FixedVectorType>(DataTy)->getNumElements() == 1) 4684 return false; 4685 Type *ScalarTy = DataTy->getScalarType(); 4686 4687 if (ScalarTy->isPointerTy()) 4688 return true; 4689 4690 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) 4691 return true; 4692 4693 if (!ScalarTy->isIntegerTy()) 4694 return false; 4695 4696 unsigned IntWidth = ScalarTy->getIntegerBitWidth(); 4697 return IntWidth == 32 || IntWidth == 64 || 4698 ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI()); 4699 } 4700 4701 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) { 4702 return isLegalMaskedLoad(DataType, Alignment); 4703 } 4704 4705 bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) { 4706 unsigned DataSize = DL.getTypeStoreSize(DataType); 4707 // The only supported nontemporal loads are for aligned vectors of 16 or 32 4708 // bytes. Note that 32-byte nontemporal vector loads are supported by AVX2 4709 // (the equivalent stores only require AVX). 4710 if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32)) 4711 return DataSize == 16 ? ST->hasSSE1() : ST->hasAVX2(); 4712 4713 return false; 4714 } 4715 4716 bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) { 4717 unsigned DataSize = DL.getTypeStoreSize(DataType); 4718 4719 // SSE4A supports nontemporal stores of float and double at arbitrary 4720 // alignment. 4721 if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy())) 4722 return true; 4723 4724 // Besides the SSE4A subtarget exception above, only aligned stores are 4725 // available nontemporaly on any other subtarget. And only stores with a size 4726 // of 4..32 bytes (powers of 2, only) are permitted. 4727 if (Alignment < DataSize || DataSize < 4 || DataSize > 32 || 4728 !isPowerOf2_32(DataSize)) 4729 return false; 4730 4731 // 32-byte vector nontemporal stores are supported by AVX (the equivalent 4732 // loads require AVX2). 4733 if (DataSize == 32) 4734 return ST->hasAVX(); 4735 else if (DataSize == 16) 4736 return ST->hasSSE1(); 4737 return true; 4738 } 4739 4740 bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) { 4741 if (!isa<VectorType>(DataTy)) 4742 return false; 4743 4744 if (!ST->hasAVX512()) 4745 return false; 4746 4747 // The backend can't handle a single element vector. 4748 if (cast<FixedVectorType>(DataTy)->getNumElements() == 1) 4749 return false; 4750 4751 Type *ScalarTy = cast<VectorType>(DataTy)->getElementType(); 4752 4753 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) 4754 return true; 4755 4756 if (!ScalarTy->isIntegerTy()) 4757 return false; 4758 4759 unsigned IntWidth = ScalarTy->getIntegerBitWidth(); 4760 return IntWidth == 32 || IntWidth == 64 || 4761 ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2()); 4762 } 4763 4764 bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) { 4765 return isLegalMaskedExpandLoad(DataTy); 4766 } 4767 4768 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) { 4769 // Some CPUs have better gather performance than others. 4770 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only 4771 // enable gather with a -march. 4772 if (!(ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2()))) 4773 return false; 4774 4775 // This function is called now in two cases: from the Loop Vectorizer 4776 // and from the Scalarizer. 4777 // When the Loop Vectorizer asks about legality of the feature, 4778 // the vectorization factor is not calculated yet. The Loop Vectorizer 4779 // sends a scalar type and the decision is based on the width of the 4780 // scalar element. 4781 // Later on, the cost model will estimate usage this intrinsic based on 4782 // the vector type. 4783 // The Scalarizer asks again about legality. It sends a vector type. 4784 // In this case we can reject non-power-of-2 vectors. 4785 // We also reject single element vectors as the type legalizer can't 4786 // scalarize it. 4787 if (auto *DataVTy = dyn_cast<FixedVectorType>(DataTy)) { 4788 unsigned NumElts = DataVTy->getNumElements(); 4789 if (NumElts == 1) 4790 return false; 4791 // Gather / Scatter for vector 2 is not profitable on KNL / SKX 4792 // Vector-4 of gather/scatter instruction does not exist on KNL. 4793 // We can extend it to 8 elements, but zeroing upper bits of 4794 // the mask vector will add more instructions. Right now we give the scalar 4795 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter 4796 // instruction is better in the VariableMask case. 4797 if (ST->hasAVX512() && (NumElts == 2 || (NumElts == 4 && !ST->hasVLX()))) 4798 return false; 4799 } 4800 Type *ScalarTy = DataTy->getScalarType(); 4801 if (ScalarTy->isPointerTy()) 4802 return true; 4803 4804 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) 4805 return true; 4806 4807 if (!ScalarTy->isIntegerTy()) 4808 return false; 4809 4810 unsigned IntWidth = ScalarTy->getIntegerBitWidth(); 4811 return IntWidth == 32 || IntWidth == 64; 4812 } 4813 4814 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) { 4815 // AVX2 doesn't support scatter 4816 if (!ST->hasAVX512()) 4817 return false; 4818 return isLegalMaskedGather(DataType, Alignment); 4819 } 4820 4821 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) { 4822 EVT VT = TLI->getValueType(DL, DataType); 4823 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT); 4824 } 4825 4826 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) { 4827 return false; 4828 } 4829 4830 bool X86TTIImpl::areInlineCompatible(const Function *Caller, 4831 const Function *Callee) const { 4832 const TargetMachine &TM = getTLI()->getTargetMachine(); 4833 4834 // Work this as a subsetting of subtarget features. 4835 const FeatureBitset &CallerBits = 4836 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 4837 const FeatureBitset &CalleeBits = 4838 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 4839 4840 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList; 4841 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList; 4842 return (RealCallerBits & RealCalleeBits) == RealCalleeBits; 4843 } 4844 4845 bool X86TTIImpl::areFunctionArgsABICompatible( 4846 const Function *Caller, const Function *Callee, 4847 SmallPtrSetImpl<Argument *> &Args) const { 4848 if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args)) 4849 return false; 4850 4851 // If we get here, we know the target features match. If one function 4852 // considers 512-bit vectors legal and the other does not, consider them 4853 // incompatible. 4854 const TargetMachine &TM = getTLI()->getTargetMachine(); 4855 4856 if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() == 4857 TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs()) 4858 return true; 4859 4860 // Consider the arguments compatible if they aren't vectors or aggregates. 4861 // FIXME: Look at the size of vectors. 4862 // FIXME: Look at the element types of aggregates to see if there are vectors. 4863 // FIXME: The API of this function seems intended to allow arguments 4864 // to be removed from the set, but the caller doesn't check if the set 4865 // becomes empty so that may not work in practice. 4866 return llvm::none_of(Args, [](Argument *A) { 4867 auto *EltTy = cast<PointerType>(A->getType())->getElementType(); 4868 return EltTy->isVectorTy() || EltTy->isAggregateType(); 4869 }); 4870 } 4871 4872 X86TTIImpl::TTI::MemCmpExpansionOptions 4873 X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 4874 TTI::MemCmpExpansionOptions Options; 4875 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); 4876 Options.NumLoadsPerBlock = 2; 4877 // All GPR and vector loads can be unaligned. 4878 Options.AllowOverlappingLoads = true; 4879 if (IsZeroCmp) { 4880 // Only enable vector loads for equality comparison. Right now the vector 4881 // version is not as fast for three way compare (see #33329). 4882 const unsigned PreferredWidth = ST->getPreferVectorWidth(); 4883 if (PreferredWidth >= 512 && ST->hasAVX512()) Options.LoadSizes.push_back(64); 4884 if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32); 4885 if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16); 4886 } 4887 if (ST->is64Bit()) { 4888 Options.LoadSizes.push_back(8); 4889 } 4890 Options.LoadSizes.push_back(4); 4891 Options.LoadSizes.push_back(2); 4892 Options.LoadSizes.push_back(1); 4893 return Options; 4894 } 4895 4896 bool X86TTIImpl::enableInterleavedAccessVectorization() { 4897 // TODO: We expect this to be beneficial regardless of arch, 4898 // but there are currently some unexplained performance artifacts on Atom. 4899 // As a temporary solution, disable on Atom. 4900 return !(ST->isAtom()); 4901 } 4902 4903 // Get estimation for interleaved load/store operations for AVX2. 4904 // \p Factor is the interleaved-access factor (stride) - number of 4905 // (interleaved) elements in the group. 4906 // \p Indices contains the indices for a strided load: when the 4907 // interleaved load has gaps they indicate which elements are used. 4908 // If Indices is empty (or if the number of indices is equal to the size 4909 // of the interleaved-access as given in \p Factor) the access has no gaps. 4910 // 4911 // As opposed to AVX-512, AVX2 does not have generic shuffles that allow 4912 // computing the cost using a generic formula as a function of generic 4913 // shuffles. We therefore use a lookup table instead, filled according to 4914 // the instruction sequences that codegen currently generates. 4915 InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX2( 4916 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor, 4917 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace, 4918 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) { 4919 4920 if (UseMaskForCond || UseMaskForGaps) 4921 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 4922 Alignment, AddressSpace, CostKind, 4923 UseMaskForCond, UseMaskForGaps); 4924 4925 // We currently Support only fully-interleaved groups, with no gaps. 4926 // TODO: Support also strided loads (interleaved-groups with gaps). 4927 if (Indices.size() && Indices.size() != Factor) 4928 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 4929 Alignment, AddressSpace, CostKind); 4930 4931 // VecTy for interleave memop is <VF*Factor x Elt>. 4932 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 4933 // VecTy = <12 x i32>. 4934 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 4935 4936 // This function can be called with VecTy=<6xi128>, Factor=3, in which case 4937 // the VF=2, while v2i128 is an unsupported MVT vector type 4938 // (see MachineValueType.h::getVectorVT()). 4939 if (!LegalVT.isVector()) 4940 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 4941 Alignment, AddressSpace, CostKind); 4942 4943 unsigned VF = VecTy->getNumElements() / Factor; 4944 Type *ScalarTy = VecTy->getElementType(); 4945 // Deduplicate entries, model floats/pointers as appropriately-sized integers. 4946 if (!ScalarTy->isIntegerTy()) 4947 ScalarTy = 4948 Type::getIntNTy(ScalarTy->getContext(), DL.getTypeSizeInBits(ScalarTy)); 4949 4950 // Get the cost of all the memory operations. 4951 InstructionCost MemOpCosts = getMemoryOpCost( 4952 Opcode, VecTy, MaybeAlign(Alignment), AddressSpace, CostKind); 4953 4954 auto *VT = FixedVectorType::get(ScalarTy, VF); 4955 EVT ETy = TLI->getValueType(DL, VT); 4956 if (!ETy.isSimple()) 4957 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 4958 Alignment, AddressSpace, CostKind); 4959 4960 // TODO: Complete for other data-types and strides. 4961 // Each combination of Stride, element bit width and VF results in a different 4962 // sequence; The cost tables are therefore accessed with: 4963 // Factor (stride) and VectorType=VFxiN. 4964 // The Cost accounts only for the shuffle sequence; 4965 // The cost of the loads/stores is accounted for separately. 4966 // 4967 static const CostTblEntry AVX2InterleavedLoadTbl[] = { 4968 {2, MVT::v4i64, 6}, // (load 8i64 and) deinterleave into 2 x 4i64 4969 4970 {3, MVT::v2i8, 10}, // (load 6i8 and) deinterleave into 3 x 2i8 4971 {3, MVT::v4i8, 4}, // (load 12i8 and) deinterleave into 3 x 4i8 4972 {3, MVT::v8i8, 9}, // (load 24i8 and) deinterleave into 3 x 8i8 4973 {3, MVT::v16i8, 11}, // (load 48i8 and) deinterleave into 3 x 16i8 4974 {3, MVT::v32i8, 13}, // (load 96i8 and) deinterleave into 3 x 32i8 4975 4976 {3, MVT::v8i32, 17}, // (load 24i32 and) deinterleave into 3 x 8i32 4977 4978 {4, MVT::v2i8, 12}, // (load 8i8 and) deinterleave into 4 x 2i8 4979 {4, MVT::v4i8, 4}, // (load 16i8 and) deinterleave into 4 x 4i8 4980 {4, MVT::v8i8, 20}, // (load 32i8 and) deinterleave into 4 x 8i8 4981 {4, MVT::v16i8, 39}, // (load 64i8 and) deinterleave into 4 x 16i8 4982 {4, MVT::v32i8, 80}, // (load 128i8 and) deinterleave into 4 x 32i8 4983 4984 {8, MVT::v8i32, 40} // (load 64i32 and) deinterleave into 8 x 8i32 4985 }; 4986 4987 static const CostTblEntry AVX2InterleavedStoreTbl[] = { 4988 {2, MVT::v4i64, 6}, // interleave 2 x 4i64 into 8i64 (and store) 4989 4990 {3, MVT::v2i8, 7}, // interleave 3 x 2i8 into 6i8 (and store) 4991 {3, MVT::v4i8, 8}, // interleave 3 x 4i8 into 12i8 (and store) 4992 {3, MVT::v8i8, 11}, // interleave 3 x 8i8 into 24i8 (and store) 4993 {3, MVT::v16i8, 11}, // interleave 3 x 16i8 into 48i8 (and store) 4994 {3, MVT::v32i8, 13}, // interleave 3 x 32i8 into 96i8 (and store) 4995 4996 {4, MVT::v2i8, 12}, // interleave 4 x 2i8 into 8i8 (and store) 4997 {4, MVT::v4i8, 9}, // interleave 4 x 4i8 into 16i8 (and store) 4998 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store) 4999 {4, MVT::v16i8, 10}, // interleave 4 x 16i8 into 64i8 (and store) 5000 {4, MVT::v32i8, 12} // interleave 4 x 32i8 into 128i8 (and store) 5001 }; 5002 5003 if (Opcode == Instruction::Load) { 5004 if (const auto *Entry = 5005 CostTableLookup(AVX2InterleavedLoadTbl, Factor, ETy.getSimpleVT())) 5006 return MemOpCosts + Entry->Cost; 5007 } else { 5008 assert(Opcode == Instruction::Store && 5009 "Expected Store Instruction at this point"); 5010 if (const auto *Entry = 5011 CostTableLookup(AVX2InterleavedStoreTbl, Factor, ETy.getSimpleVT())) 5012 return MemOpCosts + Entry->Cost; 5013 } 5014 5015 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 5016 Alignment, AddressSpace, CostKind); 5017 } 5018 5019 // Get estimation for interleaved load/store operations and strided load. 5020 // \p Indices contains indices for strided load. 5021 // \p Factor - the factor of interleaving. 5022 // AVX-512 provides 3-src shuffles that significantly reduces the cost. 5023 InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512( 5024 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor, 5025 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace, 5026 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) { 5027 5028 if (UseMaskForCond || UseMaskForGaps) 5029 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 5030 Alignment, AddressSpace, CostKind, 5031 UseMaskForCond, UseMaskForGaps); 5032 5033 // VecTy for interleave memop is <VF*Factor x Elt>. 5034 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 5035 // VecTy = <12 x i32>. 5036 5037 // Calculate the number of memory operations (NumOfMemOps), required 5038 // for load/store the VecTy. 5039 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 5040 unsigned VecTySize = DL.getTypeStoreSize(VecTy); 5041 unsigned LegalVTSize = LegalVT.getStoreSize(); 5042 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; 5043 5044 // Get the cost of one memory operation. 5045 auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(), 5046 LegalVT.getVectorNumElements()); 5047 InstructionCost MemOpCost = getMemoryOpCost( 5048 Opcode, SingleMemOpTy, MaybeAlign(Alignment), AddressSpace, CostKind); 5049 5050 unsigned VF = VecTy->getNumElements() / Factor; 5051 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF); 5052 5053 if (Opcode == Instruction::Load) { 5054 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl) 5055 // contain the cost of the optimized shuffle sequence that the 5056 // X86InterleavedAccess pass will generate. 5057 // The cost of loads and stores are computed separately from the table. 5058 5059 // X86InterleavedAccess support only the following interleaved-access group. 5060 static const CostTblEntry AVX512InterleavedLoadTbl[] = { 5061 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8 5062 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8 5063 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8 5064 }; 5065 5066 if (const auto *Entry = 5067 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT)) 5068 return NumOfMemOps * MemOpCost + Entry->Cost; 5069 //If an entry does not exist, fallback to the default implementation. 5070 5071 // Kind of shuffle depends on number of loaded values. 5072 // If we load the entire data in one register, we can use a 1-src shuffle. 5073 // Otherwise, we'll merge 2 sources in each operation. 5074 TTI::ShuffleKind ShuffleKind = 5075 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc; 5076 5077 InstructionCost ShuffleCost = 5078 getShuffleCost(ShuffleKind, SingleMemOpTy, None, 0, nullptr); 5079 5080 unsigned NumOfLoadsInInterleaveGrp = 5081 Indices.size() ? Indices.size() : Factor; 5082 auto *ResultTy = FixedVectorType::get(VecTy->getElementType(), 5083 VecTy->getNumElements() / Factor); 5084 InstructionCost NumOfResults = 5085 getTLI()->getTypeLegalizationCost(DL, ResultTy).first * 5086 NumOfLoadsInInterleaveGrp; 5087 5088 // About a half of the loads may be folded in shuffles when we have only 5089 // one result. If we have more than one result, we do not fold loads at all. 5090 unsigned NumOfUnfoldedLoads = 5091 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2; 5092 5093 // Get a number of shuffle operations per result. 5094 unsigned NumOfShufflesPerResult = 5095 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1)); 5096 5097 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 5098 // When we have more than one destination, we need additional instructions 5099 // to keep sources. 5100 InstructionCost NumOfMoves = 0; 5101 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc) 5102 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2; 5103 5104 InstructionCost Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost + 5105 NumOfUnfoldedLoads * MemOpCost + NumOfMoves; 5106 5107 return Cost; 5108 } 5109 5110 // Store. 5111 assert(Opcode == Instruction::Store && 5112 "Expected Store Instruction at this point"); 5113 // X86InterleavedAccess support only the following interleaved-access group. 5114 static const CostTblEntry AVX512InterleavedStoreTbl[] = { 5115 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store) 5116 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store) 5117 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store) 5118 5119 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store) 5120 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store) 5121 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store) 5122 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store) 5123 }; 5124 5125 if (const auto *Entry = 5126 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT)) 5127 return NumOfMemOps * MemOpCost + Entry->Cost; 5128 //If an entry does not exist, fallback to the default implementation. 5129 5130 // There is no strided stores meanwhile. And store can't be folded in 5131 // shuffle. 5132 unsigned NumOfSources = Factor; // The number of values to be merged. 5133 InstructionCost ShuffleCost = 5134 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, None, 0, nullptr); 5135 unsigned NumOfShufflesPerStore = NumOfSources - 1; 5136 5137 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 5138 // We need additional instructions to keep sources. 5139 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2; 5140 InstructionCost Cost = 5141 NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) + 5142 NumOfMoves; 5143 return Cost; 5144 } 5145 5146 InstructionCost X86TTIImpl::getInterleavedMemoryOpCost( 5147 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 5148 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 5149 bool UseMaskForCond, bool UseMaskForGaps) { 5150 auto isSupportedOnAVX512 = [](Type *VecTy, bool HasBW) { 5151 Type *EltTy = cast<VectorType>(VecTy)->getElementType(); 5152 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) || 5153 EltTy->isIntegerTy(32) || EltTy->isPointerTy()) 5154 return true; 5155 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8)) 5156 return HasBW; 5157 return false; 5158 }; 5159 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI())) 5160 return getInterleavedMemoryOpCostAVX512( 5161 Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment, 5162 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps); 5163 if (ST->hasAVX2()) 5164 return getInterleavedMemoryOpCostAVX2( 5165 Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment, 5166 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps); 5167 5168 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 5169 Alignment, AddressSpace, CostKind, 5170 UseMaskForCond, UseMaskForGaps); 5171 } 5172