1 //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines vectorizer utilities. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Analysis/VectorUtils.h" 14 #include "llvm/ADT/EquivalenceClasses.h" 15 #include "llvm/Analysis/DemandedBits.h" 16 #include "llvm/Analysis/LoopInfo.h" 17 #include "llvm/Analysis/LoopIterator.h" 18 #include "llvm/Analysis/ScalarEvolution.h" 19 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 20 #include "llvm/Analysis/TargetTransformInfo.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/IR/Constants.h" 23 #include "llvm/IR/GetElementPtrTypeIterator.h" 24 #include "llvm/IR/IRBuilder.h" 25 #include "llvm/IR/PatternMatch.h" 26 #include "llvm/IR/Value.h" 27 28 #define DEBUG_TYPE "vectorutils" 29 30 using namespace llvm; 31 using namespace llvm::PatternMatch; 32 33 /// Maximum factor for an interleaved memory access. 34 static cl::opt<unsigned> MaxInterleaveGroupFactor( 35 "max-interleave-group-factor", cl::Hidden, 36 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 37 cl::init(8)); 38 39 /// Return true if all of the intrinsic's arguments and return type are scalars 40 /// for the scalar form of the intrinsic, and vectors for the vector form of the 41 /// intrinsic (except operands that are marked as always being scalar by 42 /// hasVectorInstrinsicScalarOpd). 43 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) { 44 switch (ID) { 45 case Intrinsic::bswap: // Begin integer bit-manipulation. 46 case Intrinsic::bitreverse: 47 case Intrinsic::ctpop: 48 case Intrinsic::ctlz: 49 case Intrinsic::cttz: 50 case Intrinsic::fshl: 51 case Intrinsic::fshr: 52 case Intrinsic::sadd_sat: 53 case Intrinsic::ssub_sat: 54 case Intrinsic::uadd_sat: 55 case Intrinsic::usub_sat: 56 case Intrinsic::smul_fix: 57 case Intrinsic::smul_fix_sat: 58 case Intrinsic::umul_fix: 59 case Intrinsic::umul_fix_sat: 60 case Intrinsic::sqrt: // Begin floating-point. 61 case Intrinsic::sin: 62 case Intrinsic::cos: 63 case Intrinsic::exp: 64 case Intrinsic::exp2: 65 case Intrinsic::log: 66 case Intrinsic::log10: 67 case Intrinsic::log2: 68 case Intrinsic::fabs: 69 case Intrinsic::minnum: 70 case Intrinsic::maxnum: 71 case Intrinsic::minimum: 72 case Intrinsic::maximum: 73 case Intrinsic::copysign: 74 case Intrinsic::floor: 75 case Intrinsic::ceil: 76 case Intrinsic::trunc: 77 case Intrinsic::rint: 78 case Intrinsic::nearbyint: 79 case Intrinsic::round: 80 case Intrinsic::pow: 81 case Intrinsic::fma: 82 case Intrinsic::fmuladd: 83 case Intrinsic::powi: 84 case Intrinsic::canonicalize: 85 return true; 86 default: 87 return false; 88 } 89 } 90 91 /// Identifies if the vector form of the intrinsic has a scalar operand. 92 bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID, 93 unsigned ScalarOpdIdx) { 94 switch (ID) { 95 case Intrinsic::ctlz: 96 case Intrinsic::cttz: 97 case Intrinsic::powi: 98 return (ScalarOpdIdx == 1); 99 case Intrinsic::smul_fix: 100 case Intrinsic::smul_fix_sat: 101 case Intrinsic::umul_fix: 102 case Intrinsic::umul_fix_sat: 103 return (ScalarOpdIdx == 2); 104 default: 105 return false; 106 } 107 } 108 109 /// Returns intrinsic ID for call. 110 /// For the input call instruction it finds mapping intrinsic and returns 111 /// its ID, in case it does not found it return not_intrinsic. 112 Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI, 113 const TargetLibraryInfo *TLI) { 114 Intrinsic::ID ID = getIntrinsicForCallSite(CI, TLI); 115 if (ID == Intrinsic::not_intrinsic) 116 return Intrinsic::not_intrinsic; 117 118 if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start || 119 ID == Intrinsic::lifetime_end || ID == Intrinsic::assume || 120 ID == Intrinsic::sideeffect) 121 return ID; 122 return Intrinsic::not_intrinsic; 123 } 124 125 /// Find the operand of the GEP that should be checked for consecutive 126 /// stores. This ignores trailing indices that have no effect on the final 127 /// pointer. 128 unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) { 129 const DataLayout &DL = Gep->getModule()->getDataLayout(); 130 unsigned LastOperand = Gep->getNumOperands() - 1; 131 unsigned GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType()); 132 133 // Walk backwards and try to peel off zeros. 134 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) { 135 // Find the type we're currently indexing into. 136 gep_type_iterator GEPTI = gep_type_begin(Gep); 137 std::advance(GEPTI, LastOperand - 2); 138 139 // If it's a type with the same allocation size as the result of the GEP we 140 // can peel off the zero index. 141 if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize) 142 break; 143 --LastOperand; 144 } 145 146 return LastOperand; 147 } 148 149 /// If the argument is a GEP, then returns the operand identified by 150 /// getGEPInductionOperand. However, if there is some other non-loop-invariant 151 /// operand, it returns that instead. 152 Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) { 153 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 154 if (!GEP) 155 return Ptr; 156 157 unsigned InductionOperand = getGEPInductionOperand(GEP); 158 159 // Check that all of the gep indices are uniform except for our induction 160 // operand. 161 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) 162 if (i != InductionOperand && 163 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp)) 164 return Ptr; 165 return GEP->getOperand(InductionOperand); 166 } 167 168 /// If a value has only one user that is a CastInst, return it. 169 Value *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) { 170 Value *UniqueCast = nullptr; 171 for (User *U : Ptr->users()) { 172 CastInst *CI = dyn_cast<CastInst>(U); 173 if (CI && CI->getType() == Ty) { 174 if (!UniqueCast) 175 UniqueCast = CI; 176 else 177 return nullptr; 178 } 179 } 180 return UniqueCast; 181 } 182 183 /// Get the stride of a pointer access in a loop. Looks for symbolic 184 /// strides "a[i*stride]". Returns the symbolic stride, or null otherwise. 185 Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) { 186 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 187 if (!PtrTy || PtrTy->isAggregateType()) 188 return nullptr; 189 190 // Try to remove a gep instruction to make the pointer (actually index at this 191 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the 192 // pointer, otherwise, we are analyzing the index. 193 Value *OrigPtr = Ptr; 194 195 // The size of the pointer access. 196 int64_t PtrAccessSize = 1; 197 198 Ptr = stripGetElementPtr(Ptr, SE, Lp); 199 const SCEV *V = SE->getSCEV(Ptr); 200 201 if (Ptr != OrigPtr) 202 // Strip off casts. 203 while (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V)) 204 V = C->getOperand(); 205 206 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V); 207 if (!S) 208 return nullptr; 209 210 V = S->getStepRecurrence(*SE); 211 if (!V) 212 return nullptr; 213 214 // Strip off the size of access multiplication if we are still analyzing the 215 // pointer. 216 if (OrigPtr == Ptr) { 217 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) { 218 if (M->getOperand(0)->getSCEVType() != scConstant) 219 return nullptr; 220 221 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt(); 222 223 // Huge step value - give up. 224 if (APStepVal.getBitWidth() > 64) 225 return nullptr; 226 227 int64_t StepVal = APStepVal.getSExtValue(); 228 if (PtrAccessSize != StepVal) 229 return nullptr; 230 V = M->getOperand(1); 231 } 232 } 233 234 // Strip off casts. 235 Type *StripedOffRecurrenceCast = nullptr; 236 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V)) { 237 StripedOffRecurrenceCast = C->getType(); 238 V = C->getOperand(); 239 } 240 241 // Look for the loop invariant symbolic value. 242 const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V); 243 if (!U) 244 return nullptr; 245 246 Value *Stride = U->getValue(); 247 if (!Lp->isLoopInvariant(Stride)) 248 return nullptr; 249 250 // If we have stripped off the recurrence cast we have to make sure that we 251 // return the value that is used in this loop so that we can replace it later. 252 if (StripedOffRecurrenceCast) 253 Stride = getUniqueCastUse(Stride, Lp, StripedOffRecurrenceCast); 254 255 return Stride; 256 } 257 258 /// Given a vector and an element number, see if the scalar value is 259 /// already around as a register, for example if it were inserted then extracted 260 /// from the vector. 261 Value *llvm::findScalarElement(Value *V, unsigned EltNo) { 262 assert(V->getType()->isVectorTy() && "Not looking at a vector?"); 263 VectorType *VTy = cast<VectorType>(V->getType()); 264 unsigned Width = VTy->getNumElements(); 265 if (EltNo >= Width) // Out of range access. 266 return UndefValue::get(VTy->getElementType()); 267 268 if (Constant *C = dyn_cast<Constant>(V)) 269 return C->getAggregateElement(EltNo); 270 271 if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) { 272 // If this is an insert to a variable element, we don't know what it is. 273 if (!isa<ConstantInt>(III->getOperand(2))) 274 return nullptr; 275 unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue(); 276 277 // If this is an insert to the element we are looking for, return the 278 // inserted value. 279 if (EltNo == IIElt) 280 return III->getOperand(1); 281 282 // Otherwise, the insertelement doesn't modify the value, recurse on its 283 // vector input. 284 return findScalarElement(III->getOperand(0), EltNo); 285 } 286 287 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V)) { 288 unsigned LHSWidth = SVI->getOperand(0)->getType()->getVectorNumElements(); 289 int InEl = SVI->getMaskValue(EltNo); 290 if (InEl < 0) 291 return UndefValue::get(VTy->getElementType()); 292 if (InEl < (int)LHSWidth) 293 return findScalarElement(SVI->getOperand(0), InEl); 294 return findScalarElement(SVI->getOperand(1), InEl - LHSWidth); 295 } 296 297 // Extract a value from a vector add operation with a constant zero. 298 // TODO: Use getBinOpIdentity() to generalize this. 299 Value *Val; Constant *C; 300 if (match(V, m_Add(m_Value(Val), m_Constant(C)))) 301 if (Constant *Elt = C->getAggregateElement(EltNo)) 302 if (Elt->isNullValue()) 303 return findScalarElement(Val, EltNo); 304 305 // Otherwise, we don't know. 306 return nullptr; 307 } 308 309 /// Get splat value if the input is a splat vector or return nullptr. 310 /// This function is not fully general. It checks only 2 cases: 311 /// the input value is (1) a splat constant vector or (2) a sequence 312 /// of instructions that broadcasts a scalar at element 0. 313 const llvm::Value *llvm::getSplatValue(const Value *V) { 314 if (isa<VectorType>(V->getType())) 315 if (auto *C = dyn_cast<Constant>(V)) 316 return C->getSplatValue(); 317 318 // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...> 319 Value *Splat; 320 if (match(V, m_ShuffleVector(m_InsertElement(m_Value(), m_Value(Splat), 321 m_ZeroInt()), 322 m_Value(), m_ZeroInt()))) 323 return Splat; 324 325 return nullptr; 326 } 327 328 // This setting is based on its counterpart in value tracking, but it could be 329 // adjusted if needed. 330 const unsigned MaxDepth = 6; 331 332 bool llvm::isSplatValue(const Value *V, unsigned Depth) { 333 assert(Depth <= MaxDepth && "Limit Search Depth"); 334 335 if (isa<VectorType>(V->getType())) { 336 if (isa<UndefValue>(V)) 337 return true; 338 // FIXME: Constant splat analysis does not allow undef elements. 339 if (auto *C = dyn_cast<Constant>(V)) 340 return C->getSplatValue() != nullptr; 341 } 342 343 // FIXME: Constant splat analysis does not allow undef elements. 344 Constant *Mask; 345 if (match(V, m_ShuffleVector(m_Value(), m_Value(), m_Constant(Mask)))) 346 return Mask->getSplatValue() != nullptr; 347 348 // The remaining tests are all recursive, so bail out if we hit the limit. 349 if (Depth++ == MaxDepth) 350 return false; 351 352 // If both operands of a binop are splats, the result is a splat. 353 Value *X, *Y, *Z; 354 if (match(V, m_BinOp(m_Value(X), m_Value(Y)))) 355 return isSplatValue(X, Depth) && isSplatValue(Y, Depth); 356 357 // If all operands of a select are splats, the result is a splat. 358 if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z)))) 359 return isSplatValue(X, Depth) && isSplatValue(Y, Depth) && 360 isSplatValue(Z, Depth); 361 362 // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops). 363 364 return false; 365 } 366 367 MapVector<Instruction *, uint64_t> 368 llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB, 369 const TargetTransformInfo *TTI) { 370 371 // DemandedBits will give us every value's live-out bits. But we want 372 // to ensure no extra casts would need to be inserted, so every DAG 373 // of connected values must have the same minimum bitwidth. 374 EquivalenceClasses<Value *> ECs; 375 SmallVector<Value *, 16> Worklist; 376 SmallPtrSet<Value *, 4> Roots; 377 SmallPtrSet<Value *, 16> Visited; 378 DenseMap<Value *, uint64_t> DBits; 379 SmallPtrSet<Instruction *, 4> InstructionSet; 380 MapVector<Instruction *, uint64_t> MinBWs; 381 382 // Determine the roots. We work bottom-up, from truncs or icmps. 383 bool SeenExtFromIllegalType = false; 384 for (auto *BB : Blocks) 385 for (auto &I : *BB) { 386 InstructionSet.insert(&I); 387 388 if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) && 389 !TTI->isTypeLegal(I.getOperand(0)->getType())) 390 SeenExtFromIllegalType = true; 391 392 // Only deal with non-vector integers up to 64-bits wide. 393 if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) && 394 !I.getType()->isVectorTy() && 395 I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) { 396 // Don't make work for ourselves. If we know the loaded type is legal, 397 // don't add it to the worklist. 398 if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType())) 399 continue; 400 401 Worklist.push_back(&I); 402 Roots.insert(&I); 403 } 404 } 405 // Early exit. 406 if (Worklist.empty() || (TTI && !SeenExtFromIllegalType)) 407 return MinBWs; 408 409 // Now proceed breadth-first, unioning values together. 410 while (!Worklist.empty()) { 411 Value *Val = Worklist.pop_back_val(); 412 Value *Leader = ECs.getOrInsertLeaderValue(Val); 413 414 if (Visited.count(Val)) 415 continue; 416 Visited.insert(Val); 417 418 // Non-instructions terminate a chain successfully. 419 if (!isa<Instruction>(Val)) 420 continue; 421 Instruction *I = cast<Instruction>(Val); 422 423 // If we encounter a type that is larger than 64 bits, we can't represent 424 // it so bail out. 425 if (DB.getDemandedBits(I).getBitWidth() > 64) 426 return MapVector<Instruction *, uint64_t>(); 427 428 uint64_t V = DB.getDemandedBits(I).getZExtValue(); 429 DBits[Leader] |= V; 430 DBits[I] = V; 431 432 // Casts, loads and instructions outside of our range terminate a chain 433 // successfully. 434 if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) || 435 !InstructionSet.count(I)) 436 continue; 437 438 // Unsafe casts terminate a chain unsuccessfully. We can't do anything 439 // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to 440 // transform anything that relies on them. 441 if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) || 442 !I->getType()->isIntegerTy()) { 443 DBits[Leader] |= ~0ULL; 444 continue; 445 } 446 447 // We don't modify the types of PHIs. Reductions will already have been 448 // truncated if possible, and inductions' sizes will have been chosen by 449 // indvars. 450 if (isa<PHINode>(I)) 451 continue; 452 453 if (DBits[Leader] == ~0ULL) 454 // All bits demanded, no point continuing. 455 continue; 456 457 for (Value *O : cast<User>(I)->operands()) { 458 ECs.unionSets(Leader, O); 459 Worklist.push_back(O); 460 } 461 } 462 463 // Now we've discovered all values, walk them to see if there are 464 // any users we didn't see. If there are, we can't optimize that 465 // chain. 466 for (auto &I : DBits) 467 for (auto *U : I.first->users()) 468 if (U->getType()->isIntegerTy() && DBits.count(U) == 0) 469 DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL; 470 471 for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) { 472 uint64_t LeaderDemandedBits = 0; 473 for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI) 474 LeaderDemandedBits |= DBits[*MI]; 475 476 uint64_t MinBW = (sizeof(LeaderDemandedBits) * 8) - 477 llvm::countLeadingZeros(LeaderDemandedBits); 478 // Round up to a power of 2 479 if (!isPowerOf2_64((uint64_t)MinBW)) 480 MinBW = NextPowerOf2(MinBW); 481 482 // We don't modify the types of PHIs. Reductions will already have been 483 // truncated if possible, and inductions' sizes will have been chosen by 484 // indvars. 485 // If we are required to shrink a PHI, abandon this entire equivalence class. 486 bool Abort = false; 487 for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI) 488 if (isa<PHINode>(*MI) && MinBW < (*MI)->getType()->getScalarSizeInBits()) { 489 Abort = true; 490 break; 491 } 492 if (Abort) 493 continue; 494 495 for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI) { 496 if (!isa<Instruction>(*MI)) 497 continue; 498 Type *Ty = (*MI)->getType(); 499 if (Roots.count(*MI)) 500 Ty = cast<Instruction>(*MI)->getOperand(0)->getType(); 501 if (MinBW < Ty->getScalarSizeInBits()) 502 MinBWs[cast<Instruction>(*MI)] = MinBW; 503 } 504 } 505 506 return MinBWs; 507 } 508 509 /// Add all access groups in @p AccGroups to @p List. 510 template <typename ListT> 511 static void addToAccessGroupList(ListT &List, MDNode *AccGroups) { 512 // Interpret an access group as a list containing itself. 513 if (AccGroups->getNumOperands() == 0) { 514 assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group"); 515 List.insert(AccGroups); 516 return; 517 } 518 519 for (auto &AccGroupListOp : AccGroups->operands()) { 520 auto *Item = cast<MDNode>(AccGroupListOp.get()); 521 assert(isValidAsAccessGroup(Item) && "List item must be an access group"); 522 List.insert(Item); 523 } 524 } 525 526 MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) { 527 if (!AccGroups1) 528 return AccGroups2; 529 if (!AccGroups2) 530 return AccGroups1; 531 if (AccGroups1 == AccGroups2) 532 return AccGroups1; 533 534 SmallSetVector<Metadata *, 4> Union; 535 addToAccessGroupList(Union, AccGroups1); 536 addToAccessGroupList(Union, AccGroups2); 537 538 if (Union.size() == 0) 539 return nullptr; 540 if (Union.size() == 1) 541 return cast<MDNode>(Union.front()); 542 543 LLVMContext &Ctx = AccGroups1->getContext(); 544 return MDNode::get(Ctx, Union.getArrayRef()); 545 } 546 547 MDNode *llvm::intersectAccessGroups(const Instruction *Inst1, 548 const Instruction *Inst2) { 549 bool MayAccessMem1 = Inst1->mayReadOrWriteMemory(); 550 bool MayAccessMem2 = Inst2->mayReadOrWriteMemory(); 551 552 if (!MayAccessMem1 && !MayAccessMem2) 553 return nullptr; 554 if (!MayAccessMem1) 555 return Inst2->getMetadata(LLVMContext::MD_access_group); 556 if (!MayAccessMem2) 557 return Inst1->getMetadata(LLVMContext::MD_access_group); 558 559 MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group); 560 MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group); 561 if (!MD1 || !MD2) 562 return nullptr; 563 if (MD1 == MD2) 564 return MD1; 565 566 // Use set for scalable 'contains' check. 567 SmallPtrSet<Metadata *, 4> AccGroupSet2; 568 addToAccessGroupList(AccGroupSet2, MD2); 569 570 SmallVector<Metadata *, 4> Intersection; 571 if (MD1->getNumOperands() == 0) { 572 assert(isValidAsAccessGroup(MD1) && "Node must be an access group"); 573 if (AccGroupSet2.count(MD1)) 574 Intersection.push_back(MD1); 575 } else { 576 for (const MDOperand &Node : MD1->operands()) { 577 auto *Item = cast<MDNode>(Node.get()); 578 assert(isValidAsAccessGroup(Item) && "List item must be an access group"); 579 if (AccGroupSet2.count(Item)) 580 Intersection.push_back(Item); 581 } 582 } 583 584 if (Intersection.size() == 0) 585 return nullptr; 586 if (Intersection.size() == 1) 587 return cast<MDNode>(Intersection.front()); 588 589 LLVMContext &Ctx = Inst1->getContext(); 590 return MDNode::get(Ctx, Intersection); 591 } 592 593 /// \returns \p I after propagating metadata from \p VL. 594 Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) { 595 Instruction *I0 = cast<Instruction>(VL[0]); 596 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 597 I0->getAllMetadataOtherThanDebugLoc(Metadata); 598 599 for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 600 LLVMContext::MD_noalias, LLVMContext::MD_fpmath, 601 LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load, 602 LLVMContext::MD_access_group}) { 603 MDNode *MD = I0->getMetadata(Kind); 604 605 for (int J = 1, E = VL.size(); MD && J != E; ++J) { 606 const Instruction *IJ = cast<Instruction>(VL[J]); 607 MDNode *IMD = IJ->getMetadata(Kind); 608 switch (Kind) { 609 case LLVMContext::MD_tbaa: 610 MD = MDNode::getMostGenericTBAA(MD, IMD); 611 break; 612 case LLVMContext::MD_alias_scope: 613 MD = MDNode::getMostGenericAliasScope(MD, IMD); 614 break; 615 case LLVMContext::MD_fpmath: 616 MD = MDNode::getMostGenericFPMath(MD, IMD); 617 break; 618 case LLVMContext::MD_noalias: 619 case LLVMContext::MD_nontemporal: 620 case LLVMContext::MD_invariant_load: 621 MD = MDNode::intersect(MD, IMD); 622 break; 623 case LLVMContext::MD_access_group: 624 MD = intersectAccessGroups(Inst, IJ); 625 break; 626 default: 627 llvm_unreachable("unhandled metadata"); 628 } 629 } 630 631 Inst->setMetadata(Kind, MD); 632 } 633 634 return Inst; 635 } 636 637 Constant * 638 llvm::createBitMaskForGaps(IRBuilder<> &Builder, unsigned VF, 639 const InterleaveGroup<Instruction> &Group) { 640 // All 1's means mask is not needed. 641 if (Group.getNumMembers() == Group.getFactor()) 642 return nullptr; 643 644 // TODO: support reversed access. 645 assert(!Group.isReverse() && "Reversed group not supported."); 646 647 SmallVector<Constant *, 16> Mask; 648 for (unsigned i = 0; i < VF; i++) 649 for (unsigned j = 0; j < Group.getFactor(); ++j) { 650 unsigned HasMember = Group.getMember(j) ? 1 : 0; 651 Mask.push_back(Builder.getInt1(HasMember)); 652 } 653 654 return ConstantVector::get(Mask); 655 } 656 657 Constant *llvm::createReplicatedMask(IRBuilder<> &Builder, 658 unsigned ReplicationFactor, unsigned VF) { 659 SmallVector<Constant *, 16> MaskVec; 660 for (unsigned i = 0; i < VF; i++) 661 for (unsigned j = 0; j < ReplicationFactor; j++) 662 MaskVec.push_back(Builder.getInt32(i)); 663 664 return ConstantVector::get(MaskVec); 665 } 666 667 Constant *llvm::createInterleaveMask(IRBuilder<> &Builder, unsigned VF, 668 unsigned NumVecs) { 669 SmallVector<Constant *, 16> Mask; 670 for (unsigned i = 0; i < VF; i++) 671 for (unsigned j = 0; j < NumVecs; j++) 672 Mask.push_back(Builder.getInt32(j * VF + i)); 673 674 return ConstantVector::get(Mask); 675 } 676 677 Constant *llvm::createStrideMask(IRBuilder<> &Builder, unsigned Start, 678 unsigned Stride, unsigned VF) { 679 SmallVector<Constant *, 16> Mask; 680 for (unsigned i = 0; i < VF; i++) 681 Mask.push_back(Builder.getInt32(Start + i * Stride)); 682 683 return ConstantVector::get(Mask); 684 } 685 686 Constant *llvm::createSequentialMask(IRBuilder<> &Builder, unsigned Start, 687 unsigned NumInts, unsigned NumUndefs) { 688 SmallVector<Constant *, 16> Mask; 689 for (unsigned i = 0; i < NumInts; i++) 690 Mask.push_back(Builder.getInt32(Start + i)); 691 692 Constant *Undef = UndefValue::get(Builder.getInt32Ty()); 693 for (unsigned i = 0; i < NumUndefs; i++) 694 Mask.push_back(Undef); 695 696 return ConstantVector::get(Mask); 697 } 698 699 /// A helper function for concatenating vectors. This function concatenates two 700 /// vectors having the same element type. If the second vector has fewer 701 /// elements than the first, it is padded with undefs. 702 static Value *concatenateTwoVectors(IRBuilder<> &Builder, Value *V1, 703 Value *V2) { 704 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 705 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 706 assert(VecTy1 && VecTy2 && 707 VecTy1->getScalarType() == VecTy2->getScalarType() && 708 "Expect two vectors with the same element type"); 709 710 unsigned NumElts1 = VecTy1->getNumElements(); 711 unsigned NumElts2 = VecTy2->getNumElements(); 712 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 713 714 if (NumElts1 > NumElts2) { 715 // Extend with UNDEFs. 716 Constant *ExtMask = 717 createSequentialMask(Builder, 0, NumElts2, NumElts1 - NumElts2); 718 V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask); 719 } 720 721 Constant *Mask = createSequentialMask(Builder, 0, NumElts1 + NumElts2, 0); 722 return Builder.CreateShuffleVector(V1, V2, Mask); 723 } 724 725 Value *llvm::concatenateVectors(IRBuilder<> &Builder, ArrayRef<Value *> Vecs) { 726 unsigned NumVecs = Vecs.size(); 727 assert(NumVecs > 1 && "Should be at least two vectors"); 728 729 SmallVector<Value *, 8> ResList; 730 ResList.append(Vecs.begin(), Vecs.end()); 731 do { 732 SmallVector<Value *, 8> TmpList; 733 for (unsigned i = 0; i < NumVecs - 1; i += 2) { 734 Value *V0 = ResList[i], *V1 = ResList[i + 1]; 735 assert((V0->getType() == V1->getType() || i == NumVecs - 2) && 736 "Only the last vector may have a different type"); 737 738 TmpList.push_back(concatenateTwoVectors(Builder, V0, V1)); 739 } 740 741 // Push the last vector if the total number of vectors is odd. 742 if (NumVecs % 2 != 0) 743 TmpList.push_back(ResList[NumVecs - 1]); 744 745 ResList = TmpList; 746 NumVecs = ResList.size(); 747 } while (NumVecs > 1); 748 749 return ResList[0]; 750 } 751 752 bool llvm::maskIsAllZeroOrUndef(Value *Mask) { 753 auto *ConstMask = dyn_cast<Constant>(Mask); 754 if (!ConstMask) 755 return false; 756 if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask)) 757 return true; 758 for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E; 759 ++I) { 760 if (auto *MaskElt = ConstMask->getAggregateElement(I)) 761 if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt)) 762 continue; 763 return false; 764 } 765 return true; 766 } 767 768 769 bool llvm::maskIsAllOneOrUndef(Value *Mask) { 770 auto *ConstMask = dyn_cast<Constant>(Mask); 771 if (!ConstMask) 772 return false; 773 if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask)) 774 return true; 775 for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E; 776 ++I) { 777 if (auto *MaskElt = ConstMask->getAggregateElement(I)) 778 if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt)) 779 continue; 780 return false; 781 } 782 return true; 783 } 784 785 /// TODO: This is a lot like known bits, but for 786 /// vectors. Is there something we can common this with? 787 APInt llvm::possiblyDemandedEltsInMask(Value *Mask) { 788 789 const unsigned VWidth = cast<VectorType>(Mask->getType())->getNumElements(); 790 APInt DemandedElts = APInt::getAllOnesValue(VWidth); 791 if (auto *CV = dyn_cast<ConstantVector>(Mask)) 792 for (unsigned i = 0; i < VWidth; i++) 793 if (CV->getAggregateElement(i)->isNullValue()) 794 DemandedElts.clearBit(i); 795 return DemandedElts; 796 } 797 798 bool InterleavedAccessInfo::isStrided(int Stride) { 799 unsigned Factor = std::abs(Stride); 800 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 801 } 802 803 void InterleavedAccessInfo::collectConstStrideAccesses( 804 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 805 const ValueToValueMap &Strides) { 806 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 807 808 // Since it's desired that the load/store instructions be maintained in 809 // "program order" for the interleaved access analysis, we have to visit the 810 // blocks in the loop in reverse postorder (i.e., in a topological order). 811 // Such an ordering will ensure that any load/store that may be executed 812 // before a second load/store will precede the second load/store in 813 // AccessStrideInfo. 814 LoopBlocksDFS DFS(TheLoop); 815 DFS.perform(LI); 816 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 817 for (auto &I : *BB) { 818 auto *LI = dyn_cast<LoadInst>(&I); 819 auto *SI = dyn_cast<StoreInst>(&I); 820 if (!LI && !SI) 821 continue; 822 823 Value *Ptr = getLoadStorePointerOperand(&I); 824 // We don't check wrapping here because we don't know yet if Ptr will be 825 // part of a full group or a group with gaps. Checking wrapping for all 826 // pointers (even those that end up in groups with no gaps) will be overly 827 // conservative. For full groups, wrapping should be ok since if we would 828 // wrap around the address space we would do a memory access at nullptr 829 // even without the transformation. The wrapping checks are therefore 830 // deferred until after we've formed the interleaved groups. 831 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, 832 /*Assume=*/true, /*ShouldCheckWrap=*/false); 833 834 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 835 PointerType *PtrTy = cast<PointerType>(Ptr->getType()); 836 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 837 838 // An alignment of 0 means target ABI alignment. 839 MaybeAlign Alignment = MaybeAlign(getLoadStoreAlignment(&I)); 840 if (!Alignment) 841 Alignment = Align(DL.getABITypeAlignment(PtrTy->getElementType())); 842 843 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, *Alignment); 844 } 845 } 846 847 // Analyze interleaved accesses and collect them into interleaved load and 848 // store groups. 849 // 850 // When generating code for an interleaved load group, we effectively hoist all 851 // loads in the group to the location of the first load in program order. When 852 // generating code for an interleaved store group, we sink all stores to the 853 // location of the last store. This code motion can change the order of load 854 // and store instructions and may break dependences. 855 // 856 // The code generation strategy mentioned above ensures that we won't violate 857 // any write-after-read (WAR) dependences. 858 // 859 // E.g., for the WAR dependence: a = A[i]; // (1) 860 // A[i] = b; // (2) 861 // 862 // The store group of (2) is always inserted at or below (2), and the load 863 // group of (1) is always inserted at or above (1). Thus, the instructions will 864 // never be reordered. All other dependences are checked to ensure the 865 // correctness of the instruction reordering. 866 // 867 // The algorithm visits all memory accesses in the loop in bottom-up program 868 // order. Program order is established by traversing the blocks in the loop in 869 // reverse postorder when collecting the accesses. 870 // 871 // We visit the memory accesses in bottom-up order because it can simplify the 872 // construction of store groups in the presence of write-after-write (WAW) 873 // dependences. 874 // 875 // E.g., for the WAW dependence: A[i] = a; // (1) 876 // A[i] = b; // (2) 877 // A[i + 1] = c; // (3) 878 // 879 // We will first create a store group with (3) and (2). (1) can't be added to 880 // this group because it and (2) are dependent. However, (1) can be grouped 881 // with other accesses that may precede it in program order. Note that a 882 // bottom-up order does not imply that WAW dependences should not be checked. 883 void InterleavedAccessInfo::analyzeInterleaving( 884 bool EnablePredicatedInterleavedMemAccesses) { 885 LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 886 const ValueToValueMap &Strides = LAI->getSymbolicStrides(); 887 888 // Holds all accesses with a constant stride. 889 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 890 collectConstStrideAccesses(AccessStrideInfo, Strides); 891 892 if (AccessStrideInfo.empty()) 893 return; 894 895 // Collect the dependences in the loop. 896 collectDependences(); 897 898 // Holds all interleaved store groups temporarily. 899 SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups; 900 // Holds all interleaved load groups temporarily. 901 SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups; 902 903 // Search in bottom-up program order for pairs of accesses (A and B) that can 904 // form interleaved load or store groups. In the algorithm below, access A 905 // precedes access B in program order. We initialize a group for B in the 906 // outer loop of the algorithm, and then in the inner loop, we attempt to 907 // insert each A into B's group if: 908 // 909 // 1. A and B have the same stride, 910 // 2. A and B have the same memory object size, and 911 // 3. A belongs in B's group according to its distance from B. 912 // 913 // Special care is taken to ensure group formation will not break any 914 // dependences. 915 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 916 BI != E; ++BI) { 917 Instruction *B = BI->first; 918 StrideDescriptor DesB = BI->second; 919 920 // Initialize a group for B if it has an allowable stride. Even if we don't 921 // create a group for B, we continue with the bottom-up algorithm to ensure 922 // we don't break any of B's dependences. 923 InterleaveGroup<Instruction> *Group = nullptr; 924 if (isStrided(DesB.Stride) && 925 (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) { 926 Group = getInterleaveGroup(B); 927 if (!Group) { 928 LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B 929 << '\n'); 930 Group = createInterleaveGroup(B, DesB.Stride, DesB.Alignment); 931 } 932 if (B->mayWriteToMemory()) 933 StoreGroups.insert(Group); 934 else 935 LoadGroups.insert(Group); 936 } 937 938 for (auto AI = std::next(BI); AI != E; ++AI) { 939 Instruction *A = AI->first; 940 StrideDescriptor DesA = AI->second; 941 942 // Our code motion strategy implies that we can't have dependences 943 // between accesses in an interleaved group and other accesses located 944 // between the first and last member of the group. Note that this also 945 // means that a group can't have more than one member at a given offset. 946 // The accesses in a group can have dependences with other accesses, but 947 // we must ensure we don't extend the boundaries of the group such that 948 // we encompass those dependent accesses. 949 // 950 // For example, assume we have the sequence of accesses shown below in a 951 // stride-2 loop: 952 // 953 // (1, 2) is a group | A[i] = a; // (1) 954 // | A[i-1] = b; // (2) | 955 // A[i-3] = c; // (3) 956 // A[i] = d; // (4) | (2, 4) is not a group 957 // 958 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 959 // but not with (4). If we did, the dependent access (3) would be within 960 // the boundaries of the (2, 4) group. 961 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 962 // If a dependence exists and A is already in a group, we know that A 963 // must be a store since A precedes B and WAR dependences are allowed. 964 // Thus, A would be sunk below B. We release A's group to prevent this 965 // illegal code motion. A will then be free to form another group with 966 // instructions that precede it. 967 if (isInterleaved(A)) { 968 InterleaveGroup<Instruction> *StoreGroup = getInterleaveGroup(A); 969 970 LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to " 971 "dependence between " << *A << " and "<< *B << '\n'); 972 973 StoreGroups.remove(StoreGroup); 974 releaseGroup(StoreGroup); 975 } 976 977 // If a dependence exists and A is not already in a group (or it was 978 // and we just released it), B might be hoisted above A (if B is a 979 // load) or another store might be sunk below A (if B is a store). In 980 // either case, we can't add additional instructions to B's group. B 981 // will only form a group with instructions that it precedes. 982 break; 983 } 984 985 // At this point, we've checked for illegal code motion. If either A or B 986 // isn't strided, there's nothing left to do. 987 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 988 continue; 989 990 // Ignore A if it's already in a group or isn't the same kind of memory 991 // operation as B. 992 // Note that mayReadFromMemory() isn't mutually exclusive to 993 // mayWriteToMemory in the case of atomic loads. We shouldn't see those 994 // here, canVectorizeMemory() should have returned false - except for the 995 // case we asked for optimization remarks. 996 if (isInterleaved(A) || 997 (A->mayReadFromMemory() != B->mayReadFromMemory()) || 998 (A->mayWriteToMemory() != B->mayWriteToMemory())) 999 continue; 1000 1001 // Check rules 1 and 2. Ignore A if its stride or size is different from 1002 // that of B. 1003 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 1004 continue; 1005 1006 // Ignore A if the memory object of A and B don't belong to the same 1007 // address space 1008 if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B)) 1009 continue; 1010 1011 // Calculate the distance from A to B. 1012 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 1013 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 1014 if (!DistToB) 1015 continue; 1016 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 1017 1018 // Check rule 3. Ignore A if its distance to B is not a multiple of the 1019 // size. 1020 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 1021 continue; 1022 1023 // All members of a predicated interleave-group must have the same predicate, 1024 // and currently must reside in the same BB. 1025 BasicBlock *BlockA = A->getParent(); 1026 BasicBlock *BlockB = B->getParent(); 1027 if ((isPredicated(BlockA) || isPredicated(BlockB)) && 1028 (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB)) 1029 continue; 1030 1031 // The index of A is the index of B plus A's distance to B in multiples 1032 // of the size. 1033 int IndexA = 1034 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 1035 1036 // Try to insert A into B's group. 1037 if (Group->insertMember(A, IndexA, DesA.Alignment)) { 1038 LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 1039 << " into the interleave group with" << *B 1040 << '\n'); 1041 InterleaveGroupMap[A] = Group; 1042 1043 // Set the first load in program order as the insert position. 1044 if (A->mayReadFromMemory()) 1045 Group->setInsertPos(A); 1046 } 1047 } // Iteration over A accesses. 1048 } // Iteration over B accesses. 1049 1050 // Remove interleaved store groups with gaps. 1051 for (auto *Group : StoreGroups) 1052 if (Group->getNumMembers() != Group->getFactor()) { 1053 LLVM_DEBUG( 1054 dbgs() << "LV: Invalidate candidate interleaved store group due " 1055 "to gaps.\n"); 1056 releaseGroup(Group); 1057 } 1058 // Remove interleaved groups with gaps (currently only loads) whose memory 1059 // accesses may wrap around. We have to revisit the getPtrStride analysis, 1060 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does 1061 // not check wrapping (see documentation there). 1062 // FORNOW we use Assume=false; 1063 // TODO: Change to Assume=true but making sure we don't exceed the threshold 1064 // of runtime SCEV assumptions checks (thereby potentially failing to 1065 // vectorize altogether). 1066 // Additional optional optimizations: 1067 // TODO: If we are peeling the loop and we know that the first pointer doesn't 1068 // wrap then we can deduce that all pointers in the group don't wrap. 1069 // This means that we can forcefully peel the loop in order to only have to 1070 // check the first pointer for no-wrap. When we'll change to use Assume=true 1071 // we'll only need at most one runtime check per interleaved group. 1072 for (auto *Group : LoadGroups) { 1073 // Case 1: A full group. Can Skip the checks; For full groups, if the wide 1074 // load would wrap around the address space we would do a memory access at 1075 // nullptr even without the transformation. 1076 if (Group->getNumMembers() == Group->getFactor()) 1077 continue; 1078 1079 // Case 2: If first and last members of the group don't wrap this implies 1080 // that all the pointers in the group don't wrap. 1081 // So we check only group member 0 (which is always guaranteed to exist), 1082 // and group member Factor - 1; If the latter doesn't exist we rely on 1083 // peeling (if it is a non-reversed accsess -- see Case 3). 1084 Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0)); 1085 if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false, 1086 /*ShouldCheckWrap=*/true)) { 1087 LLVM_DEBUG( 1088 dbgs() << "LV: Invalidate candidate interleaved group due to " 1089 "first group member potentially pointer-wrapping.\n"); 1090 releaseGroup(Group); 1091 continue; 1092 } 1093 Instruction *LastMember = Group->getMember(Group->getFactor() - 1); 1094 if (LastMember) { 1095 Value *LastMemberPtr = getLoadStorePointerOperand(LastMember); 1096 if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false, 1097 /*ShouldCheckWrap=*/true)) { 1098 LLVM_DEBUG( 1099 dbgs() << "LV: Invalidate candidate interleaved group due to " 1100 "last group member potentially pointer-wrapping.\n"); 1101 releaseGroup(Group); 1102 } 1103 } else { 1104 // Case 3: A non-reversed interleaved load group with gaps: We need 1105 // to execute at least one scalar epilogue iteration. This will ensure 1106 // we don't speculatively access memory out-of-bounds. We only need 1107 // to look for a member at index factor - 1, since every group must have 1108 // a member at index zero. 1109 if (Group->isReverse()) { 1110 LLVM_DEBUG( 1111 dbgs() << "LV: Invalidate candidate interleaved group due to " 1112 "a reverse access with gaps.\n"); 1113 releaseGroup(Group); 1114 continue; 1115 } 1116 LLVM_DEBUG( 1117 dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 1118 RequiresScalarEpilogue = true; 1119 } 1120 } 1121 } 1122 1123 void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() { 1124 // If no group had triggered the requirement to create an epilogue loop, 1125 // there is nothing to do. 1126 if (!requiresScalarEpilogue()) 1127 return; 1128 1129 // Avoid releasing a Group twice. 1130 SmallPtrSet<InterleaveGroup<Instruction> *, 4> DelSet; 1131 for (auto &I : InterleaveGroupMap) { 1132 InterleaveGroup<Instruction> *Group = I.second; 1133 if (Group->requiresScalarEpilogue()) 1134 DelSet.insert(Group); 1135 } 1136 for (auto *Ptr : DelSet) { 1137 LLVM_DEBUG( 1138 dbgs() 1139 << "LV: Invalidate candidate interleaved group due to gaps that " 1140 "require a scalar epilogue (not allowed under optsize) and cannot " 1141 "be masked (not enabled). \n"); 1142 releaseGroup(Ptr); 1143 } 1144 1145 RequiresScalarEpilogue = false; 1146 } 1147 1148 template <typename InstT> 1149 void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const { 1150 llvm_unreachable("addMetadata can only be used for Instruction"); 1151 } 1152 1153 namespace llvm { 1154 template <> 1155 void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const { 1156 SmallVector<Value *, 4> VL; 1157 std::transform(Members.begin(), Members.end(), std::back_inserter(VL), 1158 [](std::pair<int, Instruction *> p) { return p.second; }); 1159 propagateMetadata(NewInst, VL); 1160 } 1161 } 1162