1 //===- Scalarizer.cpp - Scalarize vector operations -----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass converts vector operations into scalar operations, in order 10 // to expose optimization opportunities on the individual scalar operations. 11 // It is mainly intended for targets that do not have vector units, but it 12 // may also be useful for revectorizing code to different vector widths. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Transforms/Scalar/Scalarizer.h" 17 #include "llvm/ADT/PostOrderIterator.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Twine.h" 20 #include "llvm/Analysis/VectorUtils.h" 21 #include "llvm/IR/Argument.h" 22 #include "llvm/IR/BasicBlock.h" 23 #include "llvm/IR/Constants.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/DerivedTypes.h" 26 #include "llvm/IR/Dominators.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/IR/IRBuilder.h" 29 #include "llvm/IR/InstVisitor.h" 30 #include "llvm/IR/InstrTypes.h" 31 #include "llvm/IR/Instruction.h" 32 #include "llvm/IR/Instructions.h" 33 #include "llvm/IR/Intrinsics.h" 34 #include "llvm/IR/LLVMContext.h" 35 #include "llvm/IR/Module.h" 36 #include "llvm/IR/Type.h" 37 #include "llvm/IR/Value.h" 38 #include "llvm/InitializePasses.h" 39 #include "llvm/Pass.h" 40 #include "llvm/Support/Casting.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/MathExtras.h" 43 #include "llvm/Transforms/Scalar.h" 44 #include "llvm/Transforms/Utils/Local.h" 45 #include <cassert> 46 #include <cstdint> 47 #include <iterator> 48 #include <map> 49 #include <utility> 50 51 using namespace llvm; 52 53 #define DEBUG_TYPE "scalarizer" 54 55 static cl::opt<bool> ScalarizeVariableInsertExtract( 56 "scalarize-variable-insert-extract", cl::init(true), cl::Hidden, 57 cl::desc("Allow the scalarizer pass to scalarize " 58 "insertelement/extractelement with variable index")); 59 60 // This is disabled by default because having separate loads and stores 61 // makes it more likely that the -combiner-alias-analysis limits will be 62 // reached. 63 static cl::opt<bool> 64 ScalarizeLoadStore("scalarize-load-store", cl::init(false), cl::Hidden, 65 cl::desc("Allow the scalarizer pass to scalarize loads and store")); 66 67 namespace { 68 69 BasicBlock::iterator skipPastPhiNodesAndDbg(BasicBlock::iterator Itr) { 70 BasicBlock *BB = Itr->getParent(); 71 if (isa<PHINode>(Itr)) 72 Itr = BB->getFirstInsertionPt(); 73 if (Itr != BB->end()) 74 Itr = skipDebugIntrinsics(Itr); 75 return Itr; 76 } 77 78 // Used to store the scattered form of a vector. 79 using ValueVector = SmallVector<Value *, 8>; 80 81 // Used to map a vector Value to its scattered form. We use std::map 82 // because we want iterators to persist across insertion and because the 83 // values are relatively large. 84 using ScatterMap = std::map<Value *, ValueVector>; 85 86 // Lists Instructions that have been replaced with scalar implementations, 87 // along with a pointer to their scattered forms. 88 using GatherList = SmallVector<std::pair<Instruction *, ValueVector *>, 16>; 89 90 // Provides a very limited vector-like interface for lazily accessing one 91 // component of a scattered vector or vector pointer. 92 class Scatterer { 93 public: 94 Scatterer() = default; 95 96 // Scatter V into Size components. If new instructions are needed, 97 // insert them before BBI in BB. If Cache is nonnull, use it to cache 98 // the results. 99 Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v, 100 ValueVector *cachePtr = nullptr); 101 102 // Return component I, creating a new Value for it if necessary. 103 Value *operator[](unsigned I); 104 105 // Return the number of components. 106 unsigned size() const { return Size; } 107 108 private: 109 BasicBlock *BB; 110 BasicBlock::iterator BBI; 111 Value *V; 112 ValueVector *CachePtr; 113 PointerType *PtrTy; 114 ValueVector Tmp; 115 unsigned Size; 116 }; 117 118 // FCmpSpliiter(FCI)(Builder, X, Y, Name) uses Builder to create an FCmp 119 // called Name that compares X and Y in the same way as FCI. 120 struct FCmpSplitter { 121 FCmpSplitter(FCmpInst &fci) : FCI(fci) {} 122 123 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1, 124 const Twine &Name) const { 125 return Builder.CreateFCmp(FCI.getPredicate(), Op0, Op1, Name); 126 } 127 128 FCmpInst &FCI; 129 }; 130 131 // ICmpSpliiter(ICI)(Builder, X, Y, Name) uses Builder to create an ICmp 132 // called Name that compares X and Y in the same way as ICI. 133 struct ICmpSplitter { 134 ICmpSplitter(ICmpInst &ici) : ICI(ici) {} 135 136 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1, 137 const Twine &Name) const { 138 return Builder.CreateICmp(ICI.getPredicate(), Op0, Op1, Name); 139 } 140 141 ICmpInst &ICI; 142 }; 143 144 // UnarySpliiter(UO)(Builder, X, Name) uses Builder to create 145 // a unary operator like UO called Name with operand X. 146 struct UnarySplitter { 147 UnarySplitter(UnaryOperator &uo) : UO(uo) {} 148 149 Value *operator()(IRBuilder<> &Builder, Value *Op, const Twine &Name) const { 150 return Builder.CreateUnOp(UO.getOpcode(), Op, Name); 151 } 152 153 UnaryOperator &UO; 154 }; 155 156 // BinarySpliiter(BO)(Builder, X, Y, Name) uses Builder to create 157 // a binary operator like BO called Name with operands X and Y. 158 struct BinarySplitter { 159 BinarySplitter(BinaryOperator &bo) : BO(bo) {} 160 161 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1, 162 const Twine &Name) const { 163 return Builder.CreateBinOp(BO.getOpcode(), Op0, Op1, Name); 164 } 165 166 BinaryOperator &BO; 167 }; 168 169 // Information about a load or store that we're scalarizing. 170 struct VectorLayout { 171 VectorLayout() = default; 172 173 // Return the alignment of element I. 174 Align getElemAlign(unsigned I) { 175 return commonAlignment(VecAlign, I * ElemSize); 176 } 177 178 // The type of the vector. 179 VectorType *VecTy = nullptr; 180 181 // The type of each element. 182 Type *ElemTy = nullptr; 183 184 // The alignment of the vector. 185 Align VecAlign; 186 187 // The size of each element. 188 uint64_t ElemSize = 0; 189 }; 190 191 class ScalarizerVisitor : public InstVisitor<ScalarizerVisitor, bool> { 192 public: 193 ScalarizerVisitor(unsigned ParallelLoopAccessMDKind, DominatorTree *DT) 194 : ParallelLoopAccessMDKind(ParallelLoopAccessMDKind), DT(DT) { 195 } 196 197 bool visit(Function &F); 198 199 // InstVisitor methods. They return true if the instruction was scalarized, 200 // false if nothing changed. 201 bool visitInstruction(Instruction &I) { return false; } 202 bool visitSelectInst(SelectInst &SI); 203 bool visitICmpInst(ICmpInst &ICI); 204 bool visitFCmpInst(FCmpInst &FCI); 205 bool visitUnaryOperator(UnaryOperator &UO); 206 bool visitBinaryOperator(BinaryOperator &BO); 207 bool visitGetElementPtrInst(GetElementPtrInst &GEPI); 208 bool visitCastInst(CastInst &CI); 209 bool visitBitCastInst(BitCastInst &BCI); 210 bool visitInsertElementInst(InsertElementInst &IEI); 211 bool visitExtractElementInst(ExtractElementInst &EEI); 212 bool visitShuffleVectorInst(ShuffleVectorInst &SVI); 213 bool visitPHINode(PHINode &PHI); 214 bool visitLoadInst(LoadInst &LI); 215 bool visitStoreInst(StoreInst &SI); 216 bool visitCallInst(CallInst &ICI); 217 218 private: 219 Scatterer scatter(Instruction *Point, Value *V); 220 void gather(Instruction *Op, const ValueVector &CV); 221 bool canTransferMetadata(unsigned Kind); 222 void transferMetadataAndIRFlags(Instruction *Op, const ValueVector &CV); 223 Optional<VectorLayout> getVectorLayout(Type *Ty, Align Alignment, 224 const DataLayout &DL); 225 bool finish(); 226 227 template<typename T> bool splitUnary(Instruction &, const T &); 228 template<typename T> bool splitBinary(Instruction &, const T &); 229 230 bool splitCall(CallInst &CI); 231 232 ScatterMap Scattered; 233 GatherList Gathered; 234 235 SmallVector<WeakTrackingVH, 32> PotentiallyDeadInstrs; 236 237 unsigned ParallelLoopAccessMDKind; 238 239 DominatorTree *DT; 240 }; 241 242 class ScalarizerLegacyPass : public FunctionPass { 243 public: 244 static char ID; 245 246 ScalarizerLegacyPass() : FunctionPass(ID) { 247 initializeScalarizerLegacyPassPass(*PassRegistry::getPassRegistry()); 248 } 249 250 bool runOnFunction(Function &F) override; 251 252 void getAnalysisUsage(AnalysisUsage& AU) const override { 253 AU.addRequired<DominatorTreeWrapperPass>(); 254 AU.addPreserved<DominatorTreeWrapperPass>(); 255 } 256 }; 257 258 } // end anonymous namespace 259 260 char ScalarizerLegacyPass::ID = 0; 261 INITIALIZE_PASS_BEGIN(ScalarizerLegacyPass, "scalarizer", 262 "Scalarize vector operations", false, false) 263 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 264 INITIALIZE_PASS_END(ScalarizerLegacyPass, "scalarizer", 265 "Scalarize vector operations", false, false) 266 267 Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v, 268 ValueVector *cachePtr) 269 : BB(bb), BBI(bbi), V(v), CachePtr(cachePtr) { 270 Type *Ty = V->getType(); 271 PtrTy = dyn_cast<PointerType>(Ty); 272 if (PtrTy) 273 Ty = PtrTy->getPointerElementType(); 274 Size = cast<FixedVectorType>(Ty)->getNumElements(); 275 if (!CachePtr) 276 Tmp.resize(Size, nullptr); 277 else if (CachePtr->empty()) 278 CachePtr->resize(Size, nullptr); 279 else 280 assert(Size == CachePtr->size() && "Inconsistent vector sizes"); 281 } 282 283 // Return component I, creating a new Value for it if necessary. 284 Value *Scatterer::operator[](unsigned I) { 285 ValueVector &CV = (CachePtr ? *CachePtr : Tmp); 286 // Try to reuse a previous value. 287 if (CV[I]) 288 return CV[I]; 289 IRBuilder<> Builder(BB, BBI); 290 if (PtrTy) { 291 Type *ElTy = 292 cast<VectorType>(PtrTy->getPointerElementType())->getElementType(); 293 if (!CV[0]) { 294 Type *NewPtrTy = PointerType::get(ElTy, PtrTy->getAddressSpace()); 295 CV[0] = Builder.CreateBitCast(V, NewPtrTy, V->getName() + ".i0"); 296 } 297 if (I != 0) 298 CV[I] = Builder.CreateConstGEP1_32(ElTy, CV[0], I, 299 V->getName() + ".i" + Twine(I)); 300 } else { 301 // Search through a chain of InsertElementInsts looking for element I. 302 // Record other elements in the cache. The new V is still suitable 303 // for all uncached indices. 304 while (true) { 305 InsertElementInst *Insert = dyn_cast<InsertElementInst>(V); 306 if (!Insert) 307 break; 308 ConstantInt *Idx = dyn_cast<ConstantInt>(Insert->getOperand(2)); 309 if (!Idx) 310 break; 311 unsigned J = Idx->getZExtValue(); 312 V = Insert->getOperand(0); 313 if (I == J) { 314 CV[J] = Insert->getOperand(1); 315 return CV[J]; 316 } else if (!CV[J]) { 317 // Only cache the first entry we find for each index we're not actively 318 // searching for. This prevents us from going too far up the chain and 319 // caching incorrect entries. 320 CV[J] = Insert->getOperand(1); 321 } 322 } 323 CV[I] = Builder.CreateExtractElement(V, Builder.getInt32(I), 324 V->getName() + ".i" + Twine(I)); 325 } 326 return CV[I]; 327 } 328 329 bool ScalarizerLegacyPass::runOnFunction(Function &F) { 330 if (skipFunction(F)) 331 return false; 332 333 Module &M = *F.getParent(); 334 unsigned ParallelLoopAccessMDKind = 335 M.getContext().getMDKindID("llvm.mem.parallel_loop_access"); 336 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 337 ScalarizerVisitor Impl(ParallelLoopAccessMDKind, DT); 338 return Impl.visit(F); 339 } 340 341 FunctionPass *llvm::createScalarizerPass() { 342 return new ScalarizerLegacyPass(); 343 } 344 345 bool ScalarizerVisitor::visit(Function &F) { 346 assert(Gathered.empty() && Scattered.empty()); 347 348 // To ensure we replace gathered components correctly we need to do an ordered 349 // traversal of the basic blocks in the function. 350 ReversePostOrderTraversal<BasicBlock *> RPOT(&F.getEntryBlock()); 351 for (BasicBlock *BB : RPOT) { 352 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) { 353 Instruction *I = &*II; 354 bool Done = InstVisitor::visit(I); 355 ++II; 356 if (Done && I->getType()->isVoidTy()) 357 I->eraseFromParent(); 358 } 359 } 360 return finish(); 361 } 362 363 // Return a scattered form of V that can be accessed by Point. V must be a 364 // vector or a pointer to a vector. 365 Scatterer ScalarizerVisitor::scatter(Instruction *Point, Value *V) { 366 if (Argument *VArg = dyn_cast<Argument>(V)) { 367 // Put the scattered form of arguments in the entry block, 368 // so that it can be used everywhere. 369 Function *F = VArg->getParent(); 370 BasicBlock *BB = &F->getEntryBlock(); 371 return Scatterer(BB, BB->begin(), V, &Scattered[V]); 372 } 373 if (Instruction *VOp = dyn_cast<Instruction>(V)) { 374 // When scalarizing PHI nodes we might try to examine/rewrite InsertElement 375 // nodes in predecessors. If those predecessors are unreachable from entry, 376 // then the IR in those blocks could have unexpected properties resulting in 377 // infinite loops in Scatterer::operator[]. By simply treating values 378 // originating from instructions in unreachable blocks as undef we do not 379 // need to analyse them further. 380 if (!DT->isReachableFromEntry(VOp->getParent())) 381 return Scatterer(Point->getParent(), Point->getIterator(), 382 UndefValue::get(V->getType())); 383 // Put the scattered form of an instruction directly after the 384 // instruction, skipping over PHI nodes and debug intrinsics. 385 BasicBlock *BB = VOp->getParent(); 386 return Scatterer( 387 BB, skipPastPhiNodesAndDbg(std::next(BasicBlock::iterator(VOp))), V, 388 &Scattered[V]); 389 } 390 // In the fallback case, just put the scattered before Point and 391 // keep the result local to Point. 392 return Scatterer(Point->getParent(), Point->getIterator(), V); 393 } 394 395 // Replace Op with the gathered form of the components in CV. Defer the 396 // deletion of Op and creation of the gathered form to the end of the pass, 397 // so that we can avoid creating the gathered form if all uses of Op are 398 // replaced with uses of CV. 399 void ScalarizerVisitor::gather(Instruction *Op, const ValueVector &CV) { 400 transferMetadataAndIRFlags(Op, CV); 401 402 // If we already have a scattered form of Op (created from ExtractElements 403 // of Op itself), replace them with the new form. 404 ValueVector &SV = Scattered[Op]; 405 if (!SV.empty()) { 406 for (unsigned I = 0, E = SV.size(); I != E; ++I) { 407 Value *V = SV[I]; 408 if (V == nullptr || SV[I] == CV[I]) 409 continue; 410 411 Instruction *Old = cast<Instruction>(V); 412 if (isa<Instruction>(CV[I])) 413 CV[I]->takeName(Old); 414 Old->replaceAllUsesWith(CV[I]); 415 PotentiallyDeadInstrs.emplace_back(Old); 416 } 417 } 418 SV = CV; 419 Gathered.push_back(GatherList::value_type(Op, &SV)); 420 } 421 422 // Return true if it is safe to transfer the given metadata tag from 423 // vector to scalar instructions. 424 bool ScalarizerVisitor::canTransferMetadata(unsigned Tag) { 425 return (Tag == LLVMContext::MD_tbaa 426 || Tag == LLVMContext::MD_fpmath 427 || Tag == LLVMContext::MD_tbaa_struct 428 || Tag == LLVMContext::MD_invariant_load 429 || Tag == LLVMContext::MD_alias_scope 430 || Tag == LLVMContext::MD_noalias 431 || Tag == ParallelLoopAccessMDKind 432 || Tag == LLVMContext::MD_access_group); 433 } 434 435 // Transfer metadata from Op to the instructions in CV if it is known 436 // to be safe to do so. 437 void ScalarizerVisitor::transferMetadataAndIRFlags(Instruction *Op, 438 const ValueVector &CV) { 439 SmallVector<std::pair<unsigned, MDNode *>, 4> MDs; 440 Op->getAllMetadataOtherThanDebugLoc(MDs); 441 for (unsigned I = 0, E = CV.size(); I != E; ++I) { 442 if (Instruction *New = dyn_cast<Instruction>(CV[I])) { 443 for (const auto &MD : MDs) 444 if (canTransferMetadata(MD.first)) 445 New->setMetadata(MD.first, MD.second); 446 New->copyIRFlags(Op); 447 if (Op->getDebugLoc() && !New->getDebugLoc()) 448 New->setDebugLoc(Op->getDebugLoc()); 449 } 450 } 451 } 452 453 // Try to fill in Layout from Ty, returning true on success. Alignment is 454 // the alignment of the vector, or None if the ABI default should be used. 455 Optional<VectorLayout> 456 ScalarizerVisitor::getVectorLayout(Type *Ty, Align Alignment, 457 const DataLayout &DL) { 458 VectorLayout Layout; 459 // Make sure we're dealing with a vector. 460 Layout.VecTy = dyn_cast<VectorType>(Ty); 461 if (!Layout.VecTy) 462 return None; 463 // Check that we're dealing with full-byte elements. 464 Layout.ElemTy = Layout.VecTy->getElementType(); 465 if (!DL.typeSizeEqualsStoreSize(Layout.ElemTy)) 466 return None; 467 Layout.VecAlign = Alignment; 468 Layout.ElemSize = DL.getTypeStoreSize(Layout.ElemTy); 469 return Layout; 470 } 471 472 // Scalarize one-operand instruction I, using Split(Builder, X, Name) 473 // to create an instruction like I with operand X and name Name. 474 template<typename Splitter> 475 bool ScalarizerVisitor::splitUnary(Instruction &I, const Splitter &Split) { 476 VectorType *VT = dyn_cast<VectorType>(I.getType()); 477 if (!VT) 478 return false; 479 480 unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements(); 481 IRBuilder<> Builder(&I); 482 Scatterer Op = scatter(&I, I.getOperand(0)); 483 assert(Op.size() == NumElems && "Mismatched unary operation"); 484 ValueVector Res; 485 Res.resize(NumElems); 486 for (unsigned Elem = 0; Elem < NumElems; ++Elem) 487 Res[Elem] = Split(Builder, Op[Elem], I.getName() + ".i" + Twine(Elem)); 488 gather(&I, Res); 489 return true; 490 } 491 492 // Scalarize two-operand instruction I, using Split(Builder, X, Y, Name) 493 // to create an instruction like I with operands X and Y and name Name. 494 template<typename Splitter> 495 bool ScalarizerVisitor::splitBinary(Instruction &I, const Splitter &Split) { 496 VectorType *VT = dyn_cast<VectorType>(I.getType()); 497 if (!VT) 498 return false; 499 500 unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements(); 501 IRBuilder<> Builder(&I); 502 Scatterer VOp0 = scatter(&I, I.getOperand(0)); 503 Scatterer VOp1 = scatter(&I, I.getOperand(1)); 504 assert(VOp0.size() == NumElems && "Mismatched binary operation"); 505 assert(VOp1.size() == NumElems && "Mismatched binary operation"); 506 ValueVector Res; 507 Res.resize(NumElems); 508 for (unsigned Elem = 0; Elem < NumElems; ++Elem) { 509 Value *Op0 = VOp0[Elem]; 510 Value *Op1 = VOp1[Elem]; 511 Res[Elem] = Split(Builder, Op0, Op1, I.getName() + ".i" + Twine(Elem)); 512 } 513 gather(&I, Res); 514 return true; 515 } 516 517 static bool isTriviallyScalariable(Intrinsic::ID ID) { 518 return isTriviallyVectorizable(ID); 519 } 520 521 // All of the current scalarizable intrinsics only have one mangled type. 522 static Function *getScalarIntrinsicDeclaration(Module *M, 523 Intrinsic::ID ID, 524 ArrayRef<Type*> Tys) { 525 return Intrinsic::getDeclaration(M, ID, Tys); 526 } 527 528 /// If a call to a vector typed intrinsic function, split into a scalar call per 529 /// element if possible for the intrinsic. 530 bool ScalarizerVisitor::splitCall(CallInst &CI) { 531 VectorType *VT = dyn_cast<VectorType>(CI.getType()); 532 if (!VT) 533 return false; 534 535 Function *F = CI.getCalledFunction(); 536 if (!F) 537 return false; 538 539 Intrinsic::ID ID = F->getIntrinsicID(); 540 if (ID == Intrinsic::not_intrinsic || !isTriviallyScalariable(ID)) 541 return false; 542 543 unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements(); 544 unsigned NumArgs = CI.arg_size(); 545 546 ValueVector ScalarOperands(NumArgs); 547 SmallVector<Scatterer, 8> Scattered(NumArgs); 548 549 Scattered.resize(NumArgs); 550 551 SmallVector<llvm::Type *, 3> Tys; 552 Tys.push_back(VT->getScalarType()); 553 554 // Assumes that any vector type has the same number of elements as the return 555 // vector type, which is true for all current intrinsics. 556 for (unsigned I = 0; I != NumArgs; ++I) { 557 Value *OpI = CI.getOperand(I); 558 if (OpI->getType()->isVectorTy()) { 559 Scattered[I] = scatter(&CI, OpI); 560 assert(Scattered[I].size() == NumElems && "mismatched call operands"); 561 } else { 562 ScalarOperands[I] = OpI; 563 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I)) 564 Tys.push_back(OpI->getType()); 565 } 566 } 567 568 ValueVector Res(NumElems); 569 ValueVector ScalarCallOps(NumArgs); 570 571 Function *NewIntrin = getScalarIntrinsicDeclaration(F->getParent(), ID, Tys); 572 IRBuilder<> Builder(&CI); 573 574 // Perform actual scalarization, taking care to preserve any scalar operands. 575 for (unsigned Elem = 0; Elem < NumElems; ++Elem) { 576 ScalarCallOps.clear(); 577 578 for (unsigned J = 0; J != NumArgs; ++J) { 579 if (hasVectorInstrinsicScalarOpd(ID, J)) 580 ScalarCallOps.push_back(ScalarOperands[J]); 581 else 582 ScalarCallOps.push_back(Scattered[J][Elem]); 583 } 584 585 Res[Elem] = Builder.CreateCall(NewIntrin, ScalarCallOps, 586 CI.getName() + ".i" + Twine(Elem)); 587 } 588 589 gather(&CI, Res); 590 return true; 591 } 592 593 bool ScalarizerVisitor::visitSelectInst(SelectInst &SI) { 594 VectorType *VT = dyn_cast<VectorType>(SI.getType()); 595 if (!VT) 596 return false; 597 598 unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements(); 599 IRBuilder<> Builder(&SI); 600 Scatterer VOp1 = scatter(&SI, SI.getOperand(1)); 601 Scatterer VOp2 = scatter(&SI, SI.getOperand(2)); 602 assert(VOp1.size() == NumElems && "Mismatched select"); 603 assert(VOp2.size() == NumElems && "Mismatched select"); 604 ValueVector Res; 605 Res.resize(NumElems); 606 607 if (SI.getOperand(0)->getType()->isVectorTy()) { 608 Scatterer VOp0 = scatter(&SI, SI.getOperand(0)); 609 assert(VOp0.size() == NumElems && "Mismatched select"); 610 for (unsigned I = 0; I < NumElems; ++I) { 611 Value *Op0 = VOp0[I]; 612 Value *Op1 = VOp1[I]; 613 Value *Op2 = VOp2[I]; 614 Res[I] = Builder.CreateSelect(Op0, Op1, Op2, 615 SI.getName() + ".i" + Twine(I)); 616 } 617 } else { 618 Value *Op0 = SI.getOperand(0); 619 for (unsigned I = 0; I < NumElems; ++I) { 620 Value *Op1 = VOp1[I]; 621 Value *Op2 = VOp2[I]; 622 Res[I] = Builder.CreateSelect(Op0, Op1, Op2, 623 SI.getName() + ".i" + Twine(I)); 624 } 625 } 626 gather(&SI, Res); 627 return true; 628 } 629 630 bool ScalarizerVisitor::visitICmpInst(ICmpInst &ICI) { 631 return splitBinary(ICI, ICmpSplitter(ICI)); 632 } 633 634 bool ScalarizerVisitor::visitFCmpInst(FCmpInst &FCI) { 635 return splitBinary(FCI, FCmpSplitter(FCI)); 636 } 637 638 bool ScalarizerVisitor::visitUnaryOperator(UnaryOperator &UO) { 639 return splitUnary(UO, UnarySplitter(UO)); 640 } 641 642 bool ScalarizerVisitor::visitBinaryOperator(BinaryOperator &BO) { 643 return splitBinary(BO, BinarySplitter(BO)); 644 } 645 646 bool ScalarizerVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) { 647 VectorType *VT = dyn_cast<VectorType>(GEPI.getType()); 648 if (!VT) 649 return false; 650 651 IRBuilder<> Builder(&GEPI); 652 unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements(); 653 unsigned NumIndices = GEPI.getNumIndices(); 654 655 // The base pointer might be scalar even if it's a vector GEP. In those cases, 656 // splat the pointer into a vector value, and scatter that vector. 657 Value *Op0 = GEPI.getOperand(0); 658 if (!Op0->getType()->isVectorTy()) 659 Op0 = Builder.CreateVectorSplat(NumElems, Op0); 660 Scatterer Base = scatter(&GEPI, Op0); 661 662 SmallVector<Scatterer, 8> Ops; 663 Ops.resize(NumIndices); 664 for (unsigned I = 0; I < NumIndices; ++I) { 665 Value *Op = GEPI.getOperand(I + 1); 666 667 // The indices might be scalars even if it's a vector GEP. In those cases, 668 // splat the scalar into a vector value, and scatter that vector. 669 if (!Op->getType()->isVectorTy()) 670 Op = Builder.CreateVectorSplat(NumElems, Op); 671 672 Ops[I] = scatter(&GEPI, Op); 673 } 674 675 ValueVector Res; 676 Res.resize(NumElems); 677 for (unsigned I = 0; I < NumElems; ++I) { 678 SmallVector<Value *, 8> Indices; 679 Indices.resize(NumIndices); 680 for (unsigned J = 0; J < NumIndices; ++J) 681 Indices[J] = Ops[J][I]; 682 Res[I] = Builder.CreateGEP(GEPI.getSourceElementType(), Base[I], Indices, 683 GEPI.getName() + ".i" + Twine(I)); 684 if (GEPI.isInBounds()) 685 if (GetElementPtrInst *NewGEPI = dyn_cast<GetElementPtrInst>(Res[I])) 686 NewGEPI->setIsInBounds(); 687 } 688 gather(&GEPI, Res); 689 return true; 690 } 691 692 bool ScalarizerVisitor::visitCastInst(CastInst &CI) { 693 VectorType *VT = dyn_cast<VectorType>(CI.getDestTy()); 694 if (!VT) 695 return false; 696 697 unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements(); 698 IRBuilder<> Builder(&CI); 699 Scatterer Op0 = scatter(&CI, CI.getOperand(0)); 700 assert(Op0.size() == NumElems && "Mismatched cast"); 701 ValueVector Res; 702 Res.resize(NumElems); 703 for (unsigned I = 0; I < NumElems; ++I) 704 Res[I] = Builder.CreateCast(CI.getOpcode(), Op0[I], VT->getElementType(), 705 CI.getName() + ".i" + Twine(I)); 706 gather(&CI, Res); 707 return true; 708 } 709 710 bool ScalarizerVisitor::visitBitCastInst(BitCastInst &BCI) { 711 VectorType *DstVT = dyn_cast<VectorType>(BCI.getDestTy()); 712 VectorType *SrcVT = dyn_cast<VectorType>(BCI.getSrcTy()); 713 if (!DstVT || !SrcVT) 714 return false; 715 716 unsigned DstNumElems = cast<FixedVectorType>(DstVT)->getNumElements(); 717 unsigned SrcNumElems = cast<FixedVectorType>(SrcVT)->getNumElements(); 718 IRBuilder<> Builder(&BCI); 719 Scatterer Op0 = scatter(&BCI, BCI.getOperand(0)); 720 ValueVector Res; 721 Res.resize(DstNumElems); 722 723 if (DstNumElems == SrcNumElems) { 724 for (unsigned I = 0; I < DstNumElems; ++I) 725 Res[I] = Builder.CreateBitCast(Op0[I], DstVT->getElementType(), 726 BCI.getName() + ".i" + Twine(I)); 727 } else if (DstNumElems > SrcNumElems) { 728 // <M x t1> -> <N*M x t2>. Convert each t1 to <N x t2> and copy the 729 // individual elements to the destination. 730 unsigned FanOut = DstNumElems / SrcNumElems; 731 auto *MidTy = FixedVectorType::get(DstVT->getElementType(), FanOut); 732 unsigned ResI = 0; 733 for (unsigned Op0I = 0; Op0I < SrcNumElems; ++Op0I) { 734 Value *V = Op0[Op0I]; 735 Instruction *VI; 736 // Look through any existing bitcasts before converting to <N x t2>. 737 // In the best case, the resulting conversion might be a no-op. 738 while ((VI = dyn_cast<Instruction>(V)) && 739 VI->getOpcode() == Instruction::BitCast) 740 V = VI->getOperand(0); 741 V = Builder.CreateBitCast(V, MidTy, V->getName() + ".cast"); 742 Scatterer Mid = scatter(&BCI, V); 743 for (unsigned MidI = 0; MidI < FanOut; ++MidI) 744 Res[ResI++] = Mid[MidI]; 745 } 746 } else { 747 // <N*M x t1> -> <M x t2>. Convert each group of <N x t1> into a t2. 748 unsigned FanIn = SrcNumElems / DstNumElems; 749 auto *MidTy = FixedVectorType::get(SrcVT->getElementType(), FanIn); 750 unsigned Op0I = 0; 751 for (unsigned ResI = 0; ResI < DstNumElems; ++ResI) { 752 Value *V = PoisonValue::get(MidTy); 753 for (unsigned MidI = 0; MidI < FanIn; ++MidI) 754 V = Builder.CreateInsertElement(V, Op0[Op0I++], Builder.getInt32(MidI), 755 BCI.getName() + ".i" + Twine(ResI) 756 + ".upto" + Twine(MidI)); 757 Res[ResI] = Builder.CreateBitCast(V, DstVT->getElementType(), 758 BCI.getName() + ".i" + Twine(ResI)); 759 } 760 } 761 gather(&BCI, Res); 762 return true; 763 } 764 765 bool ScalarizerVisitor::visitInsertElementInst(InsertElementInst &IEI) { 766 VectorType *VT = dyn_cast<VectorType>(IEI.getType()); 767 if (!VT) 768 return false; 769 770 unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements(); 771 IRBuilder<> Builder(&IEI); 772 Scatterer Op0 = scatter(&IEI, IEI.getOperand(0)); 773 Value *NewElt = IEI.getOperand(1); 774 Value *InsIdx = IEI.getOperand(2); 775 776 ValueVector Res; 777 Res.resize(NumElems); 778 779 if (auto *CI = dyn_cast<ConstantInt>(InsIdx)) { 780 for (unsigned I = 0; I < NumElems; ++I) 781 Res[I] = CI->getValue().getZExtValue() == I ? NewElt : Op0[I]; 782 } else { 783 if (!ScalarizeVariableInsertExtract) 784 return false; 785 786 for (unsigned I = 0; I < NumElems; ++I) { 787 Value *ShouldReplace = 788 Builder.CreateICmpEQ(InsIdx, ConstantInt::get(InsIdx->getType(), I), 789 InsIdx->getName() + ".is." + Twine(I)); 790 Value *OldElt = Op0[I]; 791 Res[I] = Builder.CreateSelect(ShouldReplace, NewElt, OldElt, 792 IEI.getName() + ".i" + Twine(I)); 793 } 794 } 795 796 gather(&IEI, Res); 797 return true; 798 } 799 800 bool ScalarizerVisitor::visitExtractElementInst(ExtractElementInst &EEI) { 801 VectorType *VT = dyn_cast<VectorType>(EEI.getOperand(0)->getType()); 802 if (!VT) 803 return false; 804 805 unsigned NumSrcElems = cast<FixedVectorType>(VT)->getNumElements(); 806 IRBuilder<> Builder(&EEI); 807 Scatterer Op0 = scatter(&EEI, EEI.getOperand(0)); 808 Value *ExtIdx = EEI.getOperand(1); 809 810 if (auto *CI = dyn_cast<ConstantInt>(ExtIdx)) { 811 Value *Res = Op0[CI->getValue().getZExtValue()]; 812 gather(&EEI, {Res}); 813 return true; 814 } 815 816 if (!ScalarizeVariableInsertExtract) 817 return false; 818 819 Value *Res = UndefValue::get(VT->getElementType()); 820 for (unsigned I = 0; I < NumSrcElems; ++I) { 821 Value *ShouldExtract = 822 Builder.CreateICmpEQ(ExtIdx, ConstantInt::get(ExtIdx->getType(), I), 823 ExtIdx->getName() + ".is." + Twine(I)); 824 Value *Elt = Op0[I]; 825 Res = Builder.CreateSelect(ShouldExtract, Elt, Res, 826 EEI.getName() + ".upto" + Twine(I)); 827 } 828 gather(&EEI, {Res}); 829 return true; 830 } 831 832 bool ScalarizerVisitor::visitShuffleVectorInst(ShuffleVectorInst &SVI) { 833 VectorType *VT = dyn_cast<VectorType>(SVI.getType()); 834 if (!VT) 835 return false; 836 837 unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements(); 838 Scatterer Op0 = scatter(&SVI, SVI.getOperand(0)); 839 Scatterer Op1 = scatter(&SVI, SVI.getOperand(1)); 840 ValueVector Res; 841 Res.resize(NumElems); 842 843 for (unsigned I = 0; I < NumElems; ++I) { 844 int Selector = SVI.getMaskValue(I); 845 if (Selector < 0) 846 Res[I] = UndefValue::get(VT->getElementType()); 847 else if (unsigned(Selector) < Op0.size()) 848 Res[I] = Op0[Selector]; 849 else 850 Res[I] = Op1[Selector - Op0.size()]; 851 } 852 gather(&SVI, Res); 853 return true; 854 } 855 856 bool ScalarizerVisitor::visitPHINode(PHINode &PHI) { 857 VectorType *VT = dyn_cast<VectorType>(PHI.getType()); 858 if (!VT) 859 return false; 860 861 unsigned NumElems = cast<FixedVectorType>(VT)->getNumElements(); 862 IRBuilder<> Builder(&PHI); 863 ValueVector Res; 864 Res.resize(NumElems); 865 866 unsigned NumOps = PHI.getNumOperands(); 867 for (unsigned I = 0; I < NumElems; ++I) 868 Res[I] = Builder.CreatePHI(VT->getElementType(), NumOps, 869 PHI.getName() + ".i" + Twine(I)); 870 871 for (unsigned I = 0; I < NumOps; ++I) { 872 Scatterer Op = scatter(&PHI, PHI.getIncomingValue(I)); 873 BasicBlock *IncomingBlock = PHI.getIncomingBlock(I); 874 for (unsigned J = 0; J < NumElems; ++J) 875 cast<PHINode>(Res[J])->addIncoming(Op[J], IncomingBlock); 876 } 877 gather(&PHI, Res); 878 return true; 879 } 880 881 bool ScalarizerVisitor::visitLoadInst(LoadInst &LI) { 882 if (!ScalarizeLoadStore) 883 return false; 884 if (!LI.isSimple()) 885 return false; 886 887 Optional<VectorLayout> Layout = getVectorLayout( 888 LI.getType(), LI.getAlign(), LI.getModule()->getDataLayout()); 889 if (!Layout) 890 return false; 891 892 unsigned NumElems = cast<FixedVectorType>(Layout->VecTy)->getNumElements(); 893 IRBuilder<> Builder(&LI); 894 Scatterer Ptr = scatter(&LI, LI.getPointerOperand()); 895 ValueVector Res; 896 Res.resize(NumElems); 897 898 for (unsigned I = 0; I < NumElems; ++I) 899 Res[I] = Builder.CreateAlignedLoad(Layout->VecTy->getElementType(), Ptr[I], 900 Align(Layout->getElemAlign(I)), 901 LI.getName() + ".i" + Twine(I)); 902 gather(&LI, Res); 903 return true; 904 } 905 906 bool ScalarizerVisitor::visitStoreInst(StoreInst &SI) { 907 if (!ScalarizeLoadStore) 908 return false; 909 if (!SI.isSimple()) 910 return false; 911 912 Value *FullValue = SI.getValueOperand(); 913 Optional<VectorLayout> Layout = getVectorLayout( 914 FullValue->getType(), SI.getAlign(), SI.getModule()->getDataLayout()); 915 if (!Layout) 916 return false; 917 918 unsigned NumElems = cast<FixedVectorType>(Layout->VecTy)->getNumElements(); 919 IRBuilder<> Builder(&SI); 920 Scatterer VPtr = scatter(&SI, SI.getPointerOperand()); 921 Scatterer VVal = scatter(&SI, FullValue); 922 923 ValueVector Stores; 924 Stores.resize(NumElems); 925 for (unsigned I = 0; I < NumElems; ++I) { 926 Value *Val = VVal[I]; 927 Value *Ptr = VPtr[I]; 928 Stores[I] = Builder.CreateAlignedStore(Val, Ptr, Layout->getElemAlign(I)); 929 } 930 transferMetadataAndIRFlags(&SI, Stores); 931 return true; 932 } 933 934 bool ScalarizerVisitor::visitCallInst(CallInst &CI) { 935 return splitCall(CI); 936 } 937 938 // Delete the instructions that we scalarized. If a full vector result 939 // is still needed, recreate it using InsertElements. 940 bool ScalarizerVisitor::finish() { 941 // The presence of data in Gathered or Scattered indicates changes 942 // made to the Function. 943 if (Gathered.empty() && Scattered.empty()) 944 return false; 945 for (const auto &GMI : Gathered) { 946 Instruction *Op = GMI.first; 947 ValueVector &CV = *GMI.second; 948 if (!Op->use_empty()) { 949 // The value is still needed, so recreate it using a series of 950 // InsertElements. 951 Value *Res = PoisonValue::get(Op->getType()); 952 if (auto *Ty = dyn_cast<VectorType>(Op->getType())) { 953 BasicBlock *BB = Op->getParent(); 954 unsigned Count = cast<FixedVectorType>(Ty)->getNumElements(); 955 IRBuilder<> Builder(Op); 956 if (isa<PHINode>(Op)) 957 Builder.SetInsertPoint(BB, BB->getFirstInsertionPt()); 958 for (unsigned I = 0; I < Count; ++I) 959 Res = Builder.CreateInsertElement(Res, CV[I], Builder.getInt32(I), 960 Op->getName() + ".upto" + Twine(I)); 961 Res->takeName(Op); 962 } else { 963 assert(CV.size() == 1 && Op->getType() == CV[0]->getType()); 964 Res = CV[0]; 965 if (Op == Res) 966 continue; 967 } 968 Op->replaceAllUsesWith(Res); 969 } 970 PotentiallyDeadInstrs.emplace_back(Op); 971 } 972 Gathered.clear(); 973 Scattered.clear(); 974 975 RecursivelyDeleteTriviallyDeadInstructionsPermissive(PotentiallyDeadInstrs); 976 977 return true; 978 } 979 980 PreservedAnalyses ScalarizerPass::run(Function &F, FunctionAnalysisManager &AM) { 981 Module &M = *F.getParent(); 982 unsigned ParallelLoopAccessMDKind = 983 M.getContext().getMDKindID("llvm.mem.parallel_loop_access"); 984 DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(F); 985 ScalarizerVisitor Impl(ParallelLoopAccessMDKind, DT); 986 bool Changed = Impl.visit(F); 987 PreservedAnalyses PA; 988 PA.preserve<DominatorTreeAnalysis>(); 989 return Changed ? PA : PreservedAnalyses::all(); 990 } 991