1 //===-- HexagonVectorCombine.cpp ------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // HexagonVectorCombine is a utility class implementing a variety of functions 9 // that assist in vector-based optimizations. 10 // 11 // AlignVectors: replace unaligned vector loads and stores with aligned ones. 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/ADT/APInt.h" 15 #include "llvm/ADT/ArrayRef.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/Optional.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/Analysis/AssumptionCache.h" 22 #include "llvm/Analysis/InstructionSimplify.h" 23 #include "llvm/Analysis/TargetLibraryInfo.h" 24 #include "llvm/Analysis/ValueTracking.h" 25 #include "llvm/Analysis/VectorUtils.h" 26 #include "llvm/CodeGen/TargetPassConfig.h" 27 #include "llvm/IR/Dominators.h" 28 #include "llvm/IR/IRBuilder.h" 29 #include "llvm/IR/IntrinsicInst.h" 30 #include "llvm/IR/Intrinsics.h" 31 #include "llvm/IR/IntrinsicsHexagon.h" 32 #include "llvm/IR/Metadata.h" 33 #include "llvm/InitializePasses.h" 34 #include "llvm/Pass.h" 35 #include "llvm/Support/KnownBits.h" 36 #include "llvm/Support/MathExtras.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include "llvm/Target/TargetMachine.h" 39 40 #include "HexagonSubtarget.h" 41 #include "HexagonTargetMachine.h" 42 43 #include <algorithm> 44 #include <deque> 45 #include <map> 46 #include <set> 47 #include <utility> 48 #include <vector> 49 50 #define DEBUG_TYPE "hexagon-vc" 51 52 using namespace llvm; 53 54 namespace { 55 class HexagonVectorCombine { 56 public: 57 HexagonVectorCombine(Function &F_, AliasAnalysis &AA_, AssumptionCache &AC_, 58 DominatorTree &DT_, TargetLibraryInfo &TLI_, 59 const TargetMachine &TM_) 60 : F(F_), DL(F.getParent()->getDataLayout()), AA(AA_), AC(AC_), DT(DT_), 61 TLI(TLI_), 62 HST(static_cast<const HexagonSubtarget &>(*TM_.getSubtargetImpl(F))) {} 63 64 bool run(); 65 66 // Common integer type. 67 IntegerType *getIntTy() const; 68 // Byte type: either scalar (when Length = 0), or vector with given 69 // element count. 70 Type *getByteTy(int ElemCount = 0) const; 71 // Boolean type: either scalar (when Length = 0), or vector with given 72 // element count. 73 Type *getBoolTy(int ElemCount = 0) const; 74 // Create a ConstantInt of type returned by getIntTy with the value Val. 75 ConstantInt *getConstInt(int Val) const; 76 // Get the integer value of V, if it exists. 77 Optional<APInt> getIntValue(const Value *Val) const; 78 // Is V a constant 0, or a vector of 0s? 79 bool isZero(const Value *Val) const; 80 // Is V an undef value? 81 bool isUndef(const Value *Val) const; 82 83 int getSizeOf(const Value *Val) const; 84 int getSizeOf(const Type *Ty) const; 85 int getAllocSizeOf(const Type *Ty) const; 86 int getTypeAlignment(Type *Ty) const; 87 88 Constant *getNullValue(Type *Ty) const; 89 Constant *getFullValue(Type *Ty) const; 90 91 Value *insertb(IRBuilder<> &Builder, Value *Dest, Value *Src, int Start, 92 int Length, int Where) const; 93 Value *vlalignb(IRBuilder<> &Builder, Value *Lo, Value *Hi, Value *Amt) const; 94 Value *vralignb(IRBuilder<> &Builder, Value *Lo, Value *Hi, Value *Amt) const; 95 Value *concat(IRBuilder<> &Builder, ArrayRef<Value *> Vecs) const; 96 Value *vresize(IRBuilder<> &Builder, Value *Val, int NewSize, 97 Value *Pad) const; 98 Value *rescale(IRBuilder<> &Builder, Value *Mask, Type *FromTy, 99 Type *ToTy) const; 100 Value *vlsb(IRBuilder<> &Builder, Value *Val) const; 101 Value *vbytes(IRBuilder<> &Builder, Value *Val) const; 102 103 Value *createHvxIntrinsic(IRBuilder<> &Builder, Intrinsic::ID IntID, 104 Type *RetTy, ArrayRef<Value *> Args) const; 105 106 Optional<int> calculatePointerDifference(Value *Ptr0, Value *Ptr1) const; 107 108 template <typename T = std::vector<Instruction *>> 109 bool isSafeToMoveBeforeInBB(const Instruction &In, 110 BasicBlock::const_iterator To, 111 const T &Ignore = {}) const; 112 113 Function &F; 114 const DataLayout &DL; 115 AliasAnalysis &AA; 116 AssumptionCache &AC; 117 DominatorTree &DT; 118 TargetLibraryInfo &TLI; 119 const HexagonSubtarget &HST; 120 121 private: 122 #ifndef NDEBUG 123 // These two functions are only used for assertions at the moment. 124 bool isByteVecTy(Type *Ty) const; 125 bool isSectorTy(Type *Ty) const; 126 #endif 127 Value *getElementRange(IRBuilder<> &Builder, Value *Lo, Value *Hi, int Start, 128 int Length) const; 129 }; 130 131 class AlignVectors { 132 public: 133 AlignVectors(HexagonVectorCombine &HVC_) : HVC(HVC_) {} 134 135 bool run(); 136 137 private: 138 using InstList = std::vector<Instruction *>; 139 140 struct Segment { 141 void *Data; 142 int Start; 143 int Size; 144 }; 145 146 struct AddrInfo { 147 AddrInfo(const AddrInfo &) = default; 148 AddrInfo(const HexagonVectorCombine &HVC, Instruction *I, Value *A, Type *T, 149 Align H) 150 : Inst(I), Addr(A), ValTy(T), HaveAlign(H), 151 NeedAlign(HVC.getTypeAlignment(ValTy)) {} 152 AddrInfo &operator=(const AddrInfo &) = default; 153 154 // XXX: add Size member? 155 Instruction *Inst; 156 Value *Addr; 157 Type *ValTy; 158 Align HaveAlign; 159 Align NeedAlign; 160 int Offset = 0; // Offset (in bytes) from the first member of the 161 // containing AddrList. 162 }; 163 using AddrList = std::vector<AddrInfo>; 164 165 struct InstrLess { 166 bool operator()(const Instruction *A, const Instruction *B) const { 167 return A->comesBefore(B); 168 } 169 }; 170 using DepList = std::set<Instruction *, InstrLess>; 171 172 struct MoveGroup { 173 MoveGroup(const AddrInfo &AI, Instruction *B, bool Hvx, bool Load) 174 : Base(B), Main{AI.Inst}, IsHvx(Hvx), IsLoad(Load) {} 175 Instruction *Base; // Base instruction of the parent address group. 176 InstList Main; // Main group of instructions. 177 InstList Deps; // List of dependencies. 178 bool IsHvx; // Is this group of HVX instructions? 179 bool IsLoad; // Is this a load group? 180 }; 181 using MoveList = std::vector<MoveGroup>; 182 183 struct ByteSpan { 184 struct Segment { 185 // Segment of a Value: 'Len' bytes starting at byte 'Begin'. 186 Segment(Value *Val, int Begin, int Len) 187 : Val(Val), Start(Begin), Size(Len) {} 188 Segment(const Segment &Seg) = default; 189 Segment &operator=(const Segment &Seg) = default; 190 Value *Val; // Value representable as a sequence of bytes. 191 int Start; // First byte of the value that belongs to the segment. 192 int Size; // Number of bytes in the segment. 193 }; 194 195 struct Block { 196 Block(Value *Val, int Len, int Pos) : Seg(Val, 0, Len), Pos(Pos) {} 197 Block(Value *Val, int Off, int Len, int Pos) 198 : Seg(Val, Off, Len), Pos(Pos) {} 199 Block(const Block &Blk) = default; 200 Block &operator=(const Block &Blk) = default; 201 Segment Seg; // Value segment. 202 int Pos; // Position (offset) of the segment in the Block. 203 }; 204 205 int extent() const; 206 ByteSpan section(int Start, int Length) const; 207 ByteSpan &shift(int Offset); 208 SmallVector<Value *, 8> values() const; 209 210 int size() const { return Blocks.size(); } 211 Block &operator[](int i) { return Blocks[i]; } 212 213 std::vector<Block> Blocks; 214 215 using iterator = decltype(Blocks)::iterator; 216 iterator begin() { return Blocks.begin(); } 217 iterator end() { return Blocks.end(); } 218 using const_iterator = decltype(Blocks)::const_iterator; 219 const_iterator begin() const { return Blocks.begin(); } 220 const_iterator end() const { return Blocks.end(); } 221 }; 222 223 Align getAlignFromValue(const Value *V) const; 224 Optional<MemoryLocation> getLocation(const Instruction &In) const; 225 Optional<AddrInfo> getAddrInfo(Instruction &In) const; 226 bool isHvx(const AddrInfo &AI) const; 227 228 Value *getPayload(Value *Val) const; 229 Value *getMask(Value *Val) const; 230 Value *getPassThrough(Value *Val) const; 231 232 Value *createAdjustedPointer(IRBuilder<> &Builder, Value *Ptr, Type *ValTy, 233 int Adjust) const; 234 Value *createAlignedPointer(IRBuilder<> &Builder, Value *Ptr, Type *ValTy, 235 int Alignment) const; 236 Value *createAlignedLoad(IRBuilder<> &Builder, Type *ValTy, Value *Ptr, 237 int Alignment, Value *Mask, Value *PassThru) const; 238 Value *createAlignedStore(IRBuilder<> &Builder, Value *Val, Value *Ptr, 239 int Alignment, Value *Mask) const; 240 241 bool createAddressGroups(); 242 MoveList createLoadGroups(const AddrList &Group) const; 243 MoveList createStoreGroups(const AddrList &Group) const; 244 bool move(const MoveGroup &Move) const; 245 bool realignGroup(const MoveGroup &Move) const; 246 247 friend raw_ostream &operator<<(raw_ostream &OS, const AddrInfo &AI); 248 friend raw_ostream &operator<<(raw_ostream &OS, const MoveGroup &MG); 249 friend raw_ostream &operator<<(raw_ostream &OS, const ByteSpan &BS); 250 251 std::map<Instruction *, AddrList> AddrGroups; 252 HexagonVectorCombine &HVC; 253 }; 254 255 LLVM_ATTRIBUTE_UNUSED 256 raw_ostream &operator<<(raw_ostream &OS, const AlignVectors::AddrInfo &AI) { 257 OS << "Inst: " << AI.Inst << " " << *AI.Inst << '\n'; 258 OS << "Addr: " << *AI.Addr << '\n'; 259 OS << "Type: " << *AI.ValTy << '\n'; 260 OS << "HaveAlign: " << AI.HaveAlign.value() << '\n'; 261 OS << "NeedAlign: " << AI.NeedAlign.value() << '\n'; 262 OS << "Offset: " << AI.Offset; 263 return OS; 264 } 265 266 LLVM_ATTRIBUTE_UNUSED 267 raw_ostream &operator<<(raw_ostream &OS, const AlignVectors::MoveGroup &MG) { 268 OS << "Main\n"; 269 for (Instruction *I : MG.Main) 270 OS << " " << *I << '\n'; 271 OS << "Deps\n"; 272 for (Instruction *I : MG.Deps) 273 OS << " " << *I << '\n'; 274 return OS; 275 } 276 277 LLVM_ATTRIBUTE_UNUSED 278 raw_ostream &operator<<(raw_ostream &OS, const AlignVectors::ByteSpan &BS) { 279 OS << "ByteSpan[size=" << BS.size() << ", extent=" << BS.extent() << '\n'; 280 for (const AlignVectors::ByteSpan::Block &B : BS) { 281 OS << " @" << B.Pos << " [" << B.Seg.Start << ',' << B.Seg.Size << "] " 282 << *B.Seg.Val << '\n'; 283 } 284 OS << ']'; 285 return OS; 286 } 287 288 } // namespace 289 290 namespace { 291 292 template <typename T> T *getIfUnordered(T *MaybeT) { 293 return MaybeT && MaybeT->isUnordered() ? MaybeT : nullptr; 294 } 295 template <typename T> T *isCandidate(Instruction *In) { 296 return dyn_cast<T>(In); 297 } 298 template <> LoadInst *isCandidate<LoadInst>(Instruction *In) { 299 return getIfUnordered(dyn_cast<LoadInst>(In)); 300 } 301 template <> StoreInst *isCandidate<StoreInst>(Instruction *In) { 302 return getIfUnordered(dyn_cast<StoreInst>(In)); 303 } 304 305 #if !defined(_MSC_VER) || _MSC_VER >= 1926 306 // VS2017 and some versions of VS2019 have trouble compiling this: 307 // error C2976: 'std::map': too few template arguments 308 // VS 2019 16.x is known to work, except for 16.4/16.5 (MSC_VER 1924/1925) 309 template <typename Pred, typename... Ts> 310 void erase_if(std::map<Ts...> &map, Pred p) 311 #else 312 template <typename Pred, typename T, typename U> 313 void erase_if(std::map<T, U> &map, Pred p) 314 #endif 315 { 316 for (auto i = map.begin(), e = map.end(); i != e;) { 317 if (p(*i)) 318 i = map.erase(i); 319 else 320 i = std::next(i); 321 } 322 } 323 324 // Forward other erase_ifs to the LLVM implementations. 325 template <typename Pred, typename T> void erase_if(T &&container, Pred p) { 326 llvm::erase_if(std::forward<T>(container), p); 327 } 328 329 } // namespace 330 331 // --- Begin AlignVectors 332 333 auto AlignVectors::ByteSpan::extent() const -> int { 334 if (size() == 0) 335 return 0; 336 int Min = Blocks[0].Pos; 337 int Max = Blocks[0].Pos + Blocks[0].Seg.Size; 338 for (int i = 1, e = size(); i != e; ++i) { 339 Min = std::min(Min, Blocks[i].Pos); 340 Max = std::max(Max, Blocks[i].Pos + Blocks[i].Seg.Size); 341 } 342 return Max - Min; 343 } 344 345 auto AlignVectors::ByteSpan::section(int Start, int Length) const -> ByteSpan { 346 ByteSpan Section; 347 for (const ByteSpan::Block &B : Blocks) { 348 int L = std::max(B.Pos, Start); // Left end. 349 int R = std::min(B.Pos + B.Seg.Size, Start + Length); // Right end+1. 350 if (L < R) { 351 // How much to chop off the beginning of the segment: 352 int Off = L > B.Pos ? L - B.Pos : 0; 353 Section.Blocks.emplace_back(B.Seg.Val, B.Seg.Start + Off, R - L, L); 354 } 355 } 356 return Section; 357 } 358 359 auto AlignVectors::ByteSpan::shift(int Offset) -> ByteSpan & { 360 for (Block &B : Blocks) 361 B.Pos += Offset; 362 return *this; 363 } 364 365 auto AlignVectors::ByteSpan::values() const -> SmallVector<Value *, 8> { 366 SmallVector<Value *, 8> Values(Blocks.size()); 367 for (int i = 0, e = Blocks.size(); i != e; ++i) 368 Values[i] = Blocks[i].Seg.Val; 369 return Values; 370 } 371 372 auto AlignVectors::getAlignFromValue(const Value *V) const -> Align { 373 const auto *C = dyn_cast<ConstantInt>(V); 374 assert(C && "Alignment must be a compile-time constant integer"); 375 return C->getAlignValue(); 376 } 377 378 auto AlignVectors::getAddrInfo(Instruction &In) const -> Optional<AddrInfo> { 379 if (auto *L = isCandidate<LoadInst>(&In)) 380 return AddrInfo(HVC, L, L->getPointerOperand(), L->getType(), 381 L->getAlign()); 382 if (auto *S = isCandidate<StoreInst>(&In)) 383 return AddrInfo(HVC, S, S->getPointerOperand(), 384 S->getValueOperand()->getType(), S->getAlign()); 385 if (auto *II = isCandidate<IntrinsicInst>(&In)) { 386 Intrinsic::ID ID = II->getIntrinsicID(); 387 switch (ID) { 388 case Intrinsic::masked_load: 389 return AddrInfo(HVC, II, II->getArgOperand(0), II->getType(), 390 getAlignFromValue(II->getArgOperand(1))); 391 case Intrinsic::masked_store: 392 return AddrInfo(HVC, II, II->getArgOperand(1), 393 II->getArgOperand(0)->getType(), 394 getAlignFromValue(II->getArgOperand(2))); 395 } 396 } 397 return Optional<AddrInfo>(); 398 } 399 400 auto AlignVectors::isHvx(const AddrInfo &AI) const -> bool { 401 return HVC.HST.isTypeForHVX(AI.ValTy); 402 } 403 404 auto AlignVectors::getPayload(Value *Val) const -> Value * { 405 if (auto *In = dyn_cast<Instruction>(Val)) { 406 Intrinsic::ID ID = 0; 407 if (auto *II = dyn_cast<IntrinsicInst>(In)) 408 ID = II->getIntrinsicID(); 409 if (isa<StoreInst>(In) || ID == Intrinsic::masked_store) 410 return In->getOperand(0); 411 } 412 return Val; 413 } 414 415 auto AlignVectors::getMask(Value *Val) const -> Value * { 416 if (auto *II = dyn_cast<IntrinsicInst>(Val)) { 417 switch (II->getIntrinsicID()) { 418 case Intrinsic::masked_load: 419 return II->getArgOperand(2); 420 case Intrinsic::masked_store: 421 return II->getArgOperand(3); 422 } 423 } 424 425 Type *ValTy = getPayload(Val)->getType(); 426 if (auto *VecTy = dyn_cast<VectorType>(ValTy)) { 427 int ElemCount = VecTy->getElementCount().getFixedValue(); 428 return HVC.getFullValue(HVC.getBoolTy(ElemCount)); 429 } 430 return HVC.getFullValue(HVC.getBoolTy()); 431 } 432 433 auto AlignVectors::getPassThrough(Value *Val) const -> Value * { 434 if (auto *II = dyn_cast<IntrinsicInst>(Val)) { 435 if (II->getIntrinsicID() == Intrinsic::masked_load) 436 return II->getArgOperand(3); 437 } 438 return UndefValue::get(getPayload(Val)->getType()); 439 } 440 441 auto AlignVectors::createAdjustedPointer(IRBuilder<> &Builder, Value *Ptr, 442 Type *ValTy, int Adjust) const 443 -> Value * { 444 // The adjustment is in bytes, but if it's a multiple of the type size, 445 // we don't need to do pointer casts. 446 auto *PtrTy = cast<PointerType>(Ptr->getType()); 447 if (!PtrTy->isOpaque()) { 448 Type *ElemTy = PtrTy->getNonOpaquePointerElementType(); 449 int ElemSize = HVC.getAllocSizeOf(ElemTy); 450 if (Adjust % ElemSize == 0 && Adjust != 0) { 451 Value *Tmp0 = 452 Builder.CreateGEP(ElemTy, Ptr, HVC.getConstInt(Adjust / ElemSize)); 453 return Builder.CreatePointerCast(Tmp0, ValTy->getPointerTo()); 454 } 455 } 456 457 PointerType *CharPtrTy = Type::getInt8PtrTy(HVC.F.getContext()); 458 Value *Tmp0 = Builder.CreatePointerCast(Ptr, CharPtrTy); 459 Value *Tmp1 = Builder.CreateGEP(Type::getInt8Ty(HVC.F.getContext()), Tmp0, 460 HVC.getConstInt(Adjust)); 461 return Builder.CreatePointerCast(Tmp1, ValTy->getPointerTo()); 462 } 463 464 auto AlignVectors::createAlignedPointer(IRBuilder<> &Builder, Value *Ptr, 465 Type *ValTy, int Alignment) const 466 -> Value * { 467 Value *AsInt = Builder.CreatePtrToInt(Ptr, HVC.getIntTy()); 468 Value *Mask = HVC.getConstInt(-Alignment); 469 Value *And = Builder.CreateAnd(AsInt, Mask); 470 return Builder.CreateIntToPtr(And, ValTy->getPointerTo()); 471 } 472 473 auto AlignVectors::createAlignedLoad(IRBuilder<> &Builder, Type *ValTy, 474 Value *Ptr, int Alignment, Value *Mask, 475 Value *PassThru) const -> Value * { 476 assert(!HVC.isUndef(Mask)); // Should this be allowed? 477 if (HVC.isZero(Mask)) 478 return PassThru; 479 if (Mask == ConstantInt::getTrue(Mask->getType())) 480 return Builder.CreateAlignedLoad(ValTy, Ptr, Align(Alignment)); 481 return Builder.CreateMaskedLoad(ValTy, Ptr, Align(Alignment), Mask, PassThru); 482 } 483 484 auto AlignVectors::createAlignedStore(IRBuilder<> &Builder, Value *Val, 485 Value *Ptr, int Alignment, 486 Value *Mask) const -> Value * { 487 if (HVC.isZero(Mask) || HVC.isUndef(Val) || HVC.isUndef(Mask)) 488 return UndefValue::get(Val->getType()); 489 if (Mask == ConstantInt::getTrue(Mask->getType())) 490 return Builder.CreateAlignedStore(Val, Ptr, Align(Alignment)); 491 return Builder.CreateMaskedStore(Val, Ptr, Align(Alignment), Mask); 492 } 493 494 auto AlignVectors::createAddressGroups() -> bool { 495 // An address group created here may contain instructions spanning 496 // multiple basic blocks. 497 AddrList WorkStack; 498 499 auto findBaseAndOffset = [&](AddrInfo &AI) -> std::pair<Instruction *, int> { 500 for (AddrInfo &W : WorkStack) { 501 if (auto D = HVC.calculatePointerDifference(AI.Addr, W.Addr)) 502 return std::make_pair(W.Inst, *D); 503 } 504 return std::make_pair(nullptr, 0); 505 }; 506 507 auto traverseBlock = [&](DomTreeNode *DomN, auto Visit) -> void { 508 BasicBlock &Block = *DomN->getBlock(); 509 for (Instruction &I : Block) { 510 auto AI = this->getAddrInfo(I); // Use this-> for gcc6. 511 if (!AI) 512 continue; 513 auto F = findBaseAndOffset(*AI); 514 Instruction *GroupInst; 515 if (Instruction *BI = F.first) { 516 AI->Offset = F.second; 517 GroupInst = BI; 518 } else { 519 WorkStack.push_back(*AI); 520 GroupInst = AI->Inst; 521 } 522 AddrGroups[GroupInst].push_back(*AI); 523 } 524 525 for (DomTreeNode *C : DomN->children()) 526 Visit(C, Visit); 527 528 while (!WorkStack.empty() && WorkStack.back().Inst->getParent() == &Block) 529 WorkStack.pop_back(); 530 }; 531 532 traverseBlock(HVC.DT.getRootNode(), traverseBlock); 533 assert(WorkStack.empty()); 534 535 // AddrGroups are formed. 536 537 // Remove groups of size 1. 538 erase_if(AddrGroups, [](auto &G) { return G.second.size() == 1; }); 539 // Remove groups that don't use HVX types. 540 erase_if(AddrGroups, [&](auto &G) { 541 return llvm::none_of( 542 G.second, [&](auto &I) { return HVC.HST.isTypeForHVX(I.ValTy); }); 543 }); 544 545 return !AddrGroups.empty(); 546 } 547 548 auto AlignVectors::createLoadGroups(const AddrList &Group) const -> MoveList { 549 // Form load groups. 550 // To avoid complications with moving code across basic blocks, only form 551 // groups that are contained within a single basic block. 552 553 auto getUpwardDeps = [](Instruction *In, Instruction *Base) { 554 BasicBlock *Parent = Base->getParent(); 555 assert(In->getParent() == Parent && 556 "Base and In should be in the same block"); 557 assert(Base->comesBefore(In) && "Base should come before In"); 558 559 DepList Deps; 560 std::deque<Instruction *> WorkQ = {In}; 561 while (!WorkQ.empty()) { 562 Instruction *D = WorkQ.front(); 563 WorkQ.pop_front(); 564 Deps.insert(D); 565 for (Value *Op : D->operands()) { 566 if (auto *I = dyn_cast<Instruction>(Op)) { 567 if (I->getParent() == Parent && Base->comesBefore(I)) 568 WorkQ.push_back(I); 569 } 570 } 571 } 572 return Deps; 573 }; 574 575 auto tryAddTo = [&](const AddrInfo &Info, MoveGroup &Move) { 576 assert(!Move.Main.empty() && "Move group should have non-empty Main"); 577 // Don't mix HVX and non-HVX instructions. 578 if (Move.IsHvx != isHvx(Info)) 579 return false; 580 // Leading instruction in the load group. 581 Instruction *Base = Move.Main.front(); 582 if (Base->getParent() != Info.Inst->getParent()) 583 return false; 584 585 auto isSafeToMoveToBase = [&](const Instruction *I) { 586 return HVC.isSafeToMoveBeforeInBB(*I, Base->getIterator()); 587 }; 588 DepList Deps = getUpwardDeps(Info.Inst, Base); 589 if (!llvm::all_of(Deps, isSafeToMoveToBase)) 590 return false; 591 592 // The dependencies will be moved together with the load, so make sure 593 // that none of them could be moved independently in another group. 594 Deps.erase(Info.Inst); 595 auto inAddrMap = [&](Instruction *I) { return AddrGroups.count(I) > 0; }; 596 if (llvm::any_of(Deps, inAddrMap)) 597 return false; 598 Move.Main.push_back(Info.Inst); 599 llvm::append_range(Move.Deps, Deps); 600 return true; 601 }; 602 603 MoveList LoadGroups; 604 605 for (const AddrInfo &Info : Group) { 606 if (!Info.Inst->mayReadFromMemory()) 607 continue; 608 if (LoadGroups.empty() || !tryAddTo(Info, LoadGroups.back())) 609 LoadGroups.emplace_back(Info, Group.front().Inst, isHvx(Info), true); 610 } 611 612 // Erase singleton groups. 613 erase_if(LoadGroups, [](const MoveGroup &G) { return G.Main.size() <= 1; }); 614 return LoadGroups; 615 } 616 617 auto AlignVectors::createStoreGroups(const AddrList &Group) const -> MoveList { 618 // Form store groups. 619 // To avoid complications with moving code across basic blocks, only form 620 // groups that are contained within a single basic block. 621 622 auto tryAddTo = [&](const AddrInfo &Info, MoveGroup &Move) { 623 assert(!Move.Main.empty() && "Move group should have non-empty Main"); 624 // For stores with return values we'd have to collect downward depenencies. 625 // There are no such stores that we handle at the moment, so omit that. 626 assert(Info.Inst->getType()->isVoidTy() && 627 "Not handling stores with return values"); 628 // Don't mix HVX and non-HVX instructions. 629 if (Move.IsHvx != isHvx(Info)) 630 return false; 631 // For stores we need to be careful whether it's safe to move them. 632 // Stores that are otherwise safe to move together may not appear safe 633 // to move over one another (i.e. isSafeToMoveBefore may return false). 634 Instruction *Base = Move.Main.front(); 635 if (Base->getParent() != Info.Inst->getParent()) 636 return false; 637 if (!HVC.isSafeToMoveBeforeInBB(*Info.Inst, Base->getIterator(), Move.Main)) 638 return false; 639 Move.Main.push_back(Info.Inst); 640 return true; 641 }; 642 643 MoveList StoreGroups; 644 645 for (auto I = Group.rbegin(), E = Group.rend(); I != E; ++I) { 646 const AddrInfo &Info = *I; 647 if (!Info.Inst->mayWriteToMemory()) 648 continue; 649 if (StoreGroups.empty() || !tryAddTo(Info, StoreGroups.back())) 650 StoreGroups.emplace_back(Info, Group.front().Inst, isHvx(Info), false); 651 } 652 653 // Erase singleton groups. 654 erase_if(StoreGroups, [](const MoveGroup &G) { return G.Main.size() <= 1; }); 655 return StoreGroups; 656 } 657 658 auto AlignVectors::move(const MoveGroup &Move) const -> bool { 659 assert(!Move.Main.empty() && "Move group should have non-empty Main"); 660 Instruction *Where = Move.Main.front(); 661 662 if (Move.IsLoad) { 663 // Move all deps to before Where, keeping order. 664 for (Instruction *D : Move.Deps) 665 D->moveBefore(Where); 666 // Move all main instructions to after Where, keeping order. 667 ArrayRef<Instruction *> Main(Move.Main); 668 for (Instruction *M : Main.drop_front(1)) { 669 M->moveAfter(Where); 670 Where = M; 671 } 672 } else { 673 // NOTE: Deps are empty for "store" groups. If they need to be 674 // non-empty, decide on the order. 675 assert(Move.Deps.empty()); 676 // Move all main instructions to before Where, inverting order. 677 ArrayRef<Instruction *> Main(Move.Main); 678 for (Instruction *M : Main.drop_front(1)) { 679 M->moveBefore(Where); 680 Where = M; 681 } 682 } 683 684 return Move.Main.size() + Move.Deps.size() > 1; 685 } 686 687 auto AlignVectors::realignGroup(const MoveGroup &Move) const -> bool { 688 // TODO: Needs support for masked loads/stores of "scalar" vectors. 689 if (!Move.IsHvx) 690 return false; 691 692 // Return the element with the maximum alignment from Range, 693 // where GetValue obtains the value to compare from an element. 694 auto getMaxOf = [](auto Range, auto GetValue) { 695 return *std::max_element( 696 Range.begin(), Range.end(), 697 [&GetValue](auto &A, auto &B) { return GetValue(A) < GetValue(B); }); 698 }; 699 700 const AddrList &BaseInfos = AddrGroups.at(Move.Base); 701 702 // Conceptually, there is a vector of N bytes covering the addresses 703 // starting from the minimum offset (i.e. Base.Addr+Start). This vector 704 // represents a contiguous memory region that spans all accessed memory 705 // locations. 706 // The correspondence between loaded or stored values will be expressed 707 // in terms of this vector. For example, the 0th element of the vector 708 // from the Base address info will start at byte Start from the beginning 709 // of this conceptual vector. 710 // 711 // This vector will be loaded/stored starting at the nearest down-aligned 712 // address and the amount od the down-alignment will be AlignVal: 713 // valign(load_vector(align_down(Base+Start)), AlignVal) 714 715 std::set<Instruction *> TestSet(Move.Main.begin(), Move.Main.end()); 716 AddrList MoveInfos; 717 llvm::copy_if( 718 BaseInfos, std::back_inserter(MoveInfos), 719 [&TestSet](const AddrInfo &AI) { return TestSet.count(AI.Inst); }); 720 721 // Maximum alignment present in the whole address group. 722 const AddrInfo &WithMaxAlign = 723 getMaxOf(MoveInfos, [](const AddrInfo &AI) { return AI.HaveAlign; }); 724 Align MaxGiven = WithMaxAlign.HaveAlign; 725 726 // Minimum alignment present in the move address group. 727 const AddrInfo &WithMinOffset = 728 getMaxOf(MoveInfos, [](const AddrInfo &AI) { return -AI.Offset; }); 729 730 const AddrInfo &WithMaxNeeded = 731 getMaxOf(MoveInfos, [](const AddrInfo &AI) { return AI.NeedAlign; }); 732 Align MinNeeded = WithMaxNeeded.NeedAlign; 733 734 // Set the builder at the top instruction in the move group. 735 Instruction *TopIn = Move.IsLoad ? Move.Main.front() : Move.Main.back(); 736 IRBuilder<> Builder(TopIn); 737 Value *AlignAddr = nullptr; // Actual aligned address. 738 Value *AlignVal = nullptr; // Right-shift amount (for valign). 739 740 if (MinNeeded <= MaxGiven) { 741 int Start = WithMinOffset.Offset; 742 int OffAtMax = WithMaxAlign.Offset; 743 // Shift the offset of the maximally aligned instruction (OffAtMax) 744 // back by just enough multiples of the required alignment to cover the 745 // distance from Start to OffAtMax. 746 // Calculate the address adjustment amount based on the address with the 747 // maximum alignment. This is to allow a simple gep instruction instead 748 // of potential bitcasts to i8*. 749 int Adjust = -alignTo(OffAtMax - Start, MinNeeded.value()); 750 AlignAddr = createAdjustedPointer(Builder, WithMaxAlign.Addr, 751 WithMaxAlign.ValTy, Adjust); 752 int Diff = Start - (OffAtMax + Adjust); 753 AlignVal = HVC.getConstInt(Diff); 754 assert(Diff >= 0); 755 assert(static_cast<decltype(MinNeeded.value())>(Diff) < MinNeeded.value()); 756 } else { 757 // WithMinOffset is the lowest address in the group, 758 // WithMinOffset.Addr = Base+Start. 759 // Align instructions for both HVX (V6_valign) and scalar (S2_valignrb) 760 // mask off unnecessary bits, so it's ok to just the original pointer as 761 // the alignment amount. 762 // Do an explicit down-alignment of the address to avoid creating an 763 // aligned instruction with an address that is not really aligned. 764 AlignAddr = createAlignedPointer(Builder, WithMinOffset.Addr, 765 WithMinOffset.ValTy, MinNeeded.value()); 766 AlignVal = Builder.CreatePtrToInt(WithMinOffset.Addr, HVC.getIntTy()); 767 } 768 769 ByteSpan VSpan; 770 for (const AddrInfo &AI : MoveInfos) { 771 VSpan.Blocks.emplace_back(AI.Inst, HVC.getSizeOf(AI.ValTy), 772 AI.Offset - WithMinOffset.Offset); 773 } 774 775 // The aligned loads/stores will use blocks that are either scalars, 776 // or HVX vectors. Let "sector" be the unified term for such a block. 777 // blend(scalar, vector) -> sector... 778 int ScLen = Move.IsHvx ? HVC.HST.getVectorLength() 779 : std::max<int>(MinNeeded.value(), 4); 780 assert(!Move.IsHvx || ScLen == 64 || ScLen == 128); 781 assert(Move.IsHvx || ScLen == 4 || ScLen == 8); 782 783 Type *SecTy = HVC.getByteTy(ScLen); 784 int NumSectors = (VSpan.extent() + ScLen - 1) / ScLen; 785 bool DoAlign = !HVC.isZero(AlignVal); 786 787 if (Move.IsLoad) { 788 ByteSpan ASpan; 789 auto *True = HVC.getFullValue(HVC.getBoolTy(ScLen)); 790 auto *Undef = UndefValue::get(SecTy); 791 792 for (int i = 0; i != NumSectors + DoAlign; ++i) { 793 Value *Ptr = createAdjustedPointer(Builder, AlignAddr, SecTy, i * ScLen); 794 // FIXME: generate a predicated load? 795 Value *Load = createAlignedLoad(Builder, SecTy, Ptr, ScLen, True, Undef); 796 // If vector shifting is potentially needed, accumulate metadata 797 // from source sections of twice the load width. 798 int Start = (i - DoAlign) * ScLen; 799 int Width = (1 + DoAlign) * ScLen; 800 propagateMetadata(cast<Instruction>(Load), 801 VSpan.section(Start, Width).values()); 802 ASpan.Blocks.emplace_back(Load, ScLen, i * ScLen); 803 } 804 805 if (DoAlign) { 806 for (int j = 0; j != NumSectors; ++j) { 807 ASpan[j].Seg.Val = HVC.vralignb(Builder, ASpan[j].Seg.Val, 808 ASpan[j + 1].Seg.Val, AlignVal); 809 } 810 } 811 812 for (ByteSpan::Block &B : VSpan) { 813 ByteSpan ASection = ASpan.section(B.Pos, B.Seg.Size).shift(-B.Pos); 814 Value *Accum = UndefValue::get(HVC.getByteTy(B.Seg.Size)); 815 for (ByteSpan::Block &S : ASection) { 816 Value *Pay = HVC.vbytes(Builder, getPayload(S.Seg.Val)); 817 Accum = 818 HVC.insertb(Builder, Accum, Pay, S.Seg.Start, S.Seg.Size, S.Pos); 819 } 820 // Instead of casting everything to bytes for the vselect, cast to the 821 // original value type. This will avoid complications with casting masks. 822 // For example, in cases when the original mask applied to i32, it could 823 // be converted to a mask applicable to i8 via pred_typecast intrinsic, 824 // but if the mask is not exactly of HVX length, extra handling would be 825 // needed to make it work. 826 Type *ValTy = getPayload(B.Seg.Val)->getType(); 827 Value *Cast = Builder.CreateBitCast(Accum, ValTy); 828 Value *Sel = Builder.CreateSelect(getMask(B.Seg.Val), Cast, 829 getPassThrough(B.Seg.Val)); 830 B.Seg.Val->replaceAllUsesWith(Sel); 831 } 832 } else { 833 // Stores. 834 ByteSpan ASpanV, ASpanM; 835 836 // Return a vector value corresponding to the input value Val: 837 // either <1 x Val> for scalar Val, or Val itself for vector Val. 838 auto MakeVec = [](IRBuilder<> &Builder, Value *Val) -> Value * { 839 Type *Ty = Val->getType(); 840 if (Ty->isVectorTy()) 841 return Val; 842 auto *VecTy = VectorType::get(Ty, 1, /*Scalable*/ false); 843 return Builder.CreateBitCast(Val, VecTy); 844 }; 845 846 // Create an extra "undef" sector at the beginning and at the end. 847 // They will be used as the left/right filler in the vlalign step. 848 for (int i = (DoAlign ? -1 : 0); i != NumSectors + DoAlign; ++i) { 849 // For stores, the size of each section is an aligned vector length. 850 // Adjust the store offsets relative to the section start offset. 851 ByteSpan VSection = VSpan.section(i * ScLen, ScLen).shift(-i * ScLen); 852 Value *AccumV = UndefValue::get(SecTy); 853 Value *AccumM = HVC.getNullValue(SecTy); 854 for (ByteSpan::Block &S : VSection) { 855 Value *Pay = getPayload(S.Seg.Val); 856 Value *Mask = HVC.rescale(Builder, MakeVec(Builder, getMask(S.Seg.Val)), 857 Pay->getType(), HVC.getByteTy()); 858 AccumM = HVC.insertb(Builder, AccumM, HVC.vbytes(Builder, Mask), 859 S.Seg.Start, S.Seg.Size, S.Pos); 860 AccumV = HVC.insertb(Builder, AccumV, HVC.vbytes(Builder, Pay), 861 S.Seg.Start, S.Seg.Size, S.Pos); 862 } 863 ASpanV.Blocks.emplace_back(AccumV, ScLen, i * ScLen); 864 ASpanM.Blocks.emplace_back(AccumM, ScLen, i * ScLen); 865 } 866 867 // vlalign 868 if (DoAlign) { 869 for (int j = 1; j != NumSectors + 2; ++j) { 870 ASpanV[j - 1].Seg.Val = HVC.vlalignb(Builder, ASpanV[j - 1].Seg.Val, 871 ASpanV[j].Seg.Val, AlignVal); 872 ASpanM[j - 1].Seg.Val = HVC.vlalignb(Builder, ASpanM[j - 1].Seg.Val, 873 ASpanM[j].Seg.Val, AlignVal); 874 } 875 } 876 877 for (int i = 0; i != NumSectors + DoAlign; ++i) { 878 Value *Ptr = createAdjustedPointer(Builder, AlignAddr, SecTy, i * ScLen); 879 Value *Val = ASpanV[i].Seg.Val; 880 Value *Mask = ASpanM[i].Seg.Val; // bytes 881 if (!HVC.isUndef(Val) && !HVC.isZero(Mask)) { 882 Value *Store = createAlignedStore(Builder, Val, Ptr, ScLen, 883 HVC.vlsb(Builder, Mask)); 884 // If vector shifting is potentially needed, accumulate metadata 885 // from source sections of twice the store width. 886 int Start = (i - DoAlign) * ScLen; 887 int Width = (1 + DoAlign) * ScLen; 888 propagateMetadata(cast<Instruction>(Store), 889 VSpan.section(Start, Width).values()); 890 } 891 } 892 } 893 894 for (auto *Inst : Move.Main) 895 Inst->eraseFromParent(); 896 897 return true; 898 } 899 900 auto AlignVectors::run() -> bool { 901 if (!createAddressGroups()) 902 return false; 903 904 bool Changed = false; 905 MoveList LoadGroups, StoreGroups; 906 907 for (auto &G : AddrGroups) { 908 llvm::append_range(LoadGroups, createLoadGroups(G.second)); 909 llvm::append_range(StoreGroups, createStoreGroups(G.second)); 910 } 911 912 for (auto &M : LoadGroups) 913 Changed |= move(M); 914 for (auto &M : StoreGroups) 915 Changed |= move(M); 916 917 for (auto &M : LoadGroups) 918 Changed |= realignGroup(M); 919 for (auto &M : StoreGroups) 920 Changed |= realignGroup(M); 921 922 return Changed; 923 } 924 925 // --- End AlignVectors 926 927 auto HexagonVectorCombine::run() -> bool { 928 if (!HST.useHVXOps()) 929 return false; 930 931 bool Changed = AlignVectors(*this).run(); 932 return Changed; 933 } 934 935 auto HexagonVectorCombine::getIntTy() const -> IntegerType * { 936 return Type::getInt32Ty(F.getContext()); 937 } 938 939 auto HexagonVectorCombine::getByteTy(int ElemCount) const -> Type * { 940 assert(ElemCount >= 0); 941 IntegerType *ByteTy = Type::getInt8Ty(F.getContext()); 942 if (ElemCount == 0) 943 return ByteTy; 944 return VectorType::get(ByteTy, ElemCount, /*Scalable*/ false); 945 } 946 947 auto HexagonVectorCombine::getBoolTy(int ElemCount) const -> Type * { 948 assert(ElemCount >= 0); 949 IntegerType *BoolTy = Type::getInt1Ty(F.getContext()); 950 if (ElemCount == 0) 951 return BoolTy; 952 return VectorType::get(BoolTy, ElemCount, /*Scalable*/ false); 953 } 954 955 auto HexagonVectorCombine::getConstInt(int Val) const -> ConstantInt * { 956 return ConstantInt::getSigned(getIntTy(), Val); 957 } 958 959 auto HexagonVectorCombine::isZero(const Value *Val) const -> bool { 960 if (auto *C = dyn_cast<Constant>(Val)) 961 return C->isZeroValue(); 962 return false; 963 } 964 965 auto HexagonVectorCombine::getIntValue(const Value *Val) const 966 -> Optional<APInt> { 967 if (auto *CI = dyn_cast<ConstantInt>(Val)) 968 return CI->getValue(); 969 return None; 970 } 971 972 auto HexagonVectorCombine::isUndef(const Value *Val) const -> bool { 973 return isa<UndefValue>(Val); 974 } 975 976 auto HexagonVectorCombine::getSizeOf(const Value *Val) const -> int { 977 return getSizeOf(Val->getType()); 978 } 979 980 auto HexagonVectorCombine::getSizeOf(const Type *Ty) const -> int { 981 return DL.getTypeStoreSize(const_cast<Type *>(Ty)).getFixedValue(); 982 } 983 984 auto HexagonVectorCombine::getAllocSizeOf(const Type *Ty) const -> int { 985 return DL.getTypeAllocSize(const_cast<Type *>(Ty)).getFixedValue(); 986 } 987 988 auto HexagonVectorCombine::getTypeAlignment(Type *Ty) const -> int { 989 // The actual type may be shorter than the HVX vector, so determine 990 // the alignment based on subtarget info. 991 if (HST.isTypeForHVX(Ty)) 992 return HST.getVectorLength(); 993 return DL.getABITypeAlign(Ty).value(); 994 } 995 996 auto HexagonVectorCombine::getNullValue(Type *Ty) const -> Constant * { 997 assert(Ty->isIntOrIntVectorTy()); 998 auto Zero = ConstantInt::get(Ty->getScalarType(), 0); 999 if (auto *VecTy = dyn_cast<VectorType>(Ty)) 1000 return ConstantVector::getSplat(VecTy->getElementCount(), Zero); 1001 return Zero; 1002 } 1003 1004 auto HexagonVectorCombine::getFullValue(Type *Ty) const -> Constant * { 1005 assert(Ty->isIntOrIntVectorTy()); 1006 auto Minus1 = ConstantInt::get(Ty->getScalarType(), -1); 1007 if (auto *VecTy = dyn_cast<VectorType>(Ty)) 1008 return ConstantVector::getSplat(VecTy->getElementCount(), Minus1); 1009 return Minus1; 1010 } 1011 1012 // Insert bytes [Start..Start+Length) of Src into Dst at byte Where. 1013 auto HexagonVectorCombine::insertb(IRBuilder<> &Builder, Value *Dst, Value *Src, 1014 int Start, int Length, int Where) const 1015 -> Value * { 1016 assert(isByteVecTy(Dst->getType()) && isByteVecTy(Src->getType())); 1017 int SrcLen = getSizeOf(Src); 1018 int DstLen = getSizeOf(Dst); 1019 assert(0 <= Start && Start + Length <= SrcLen); 1020 assert(0 <= Where && Where + Length <= DstLen); 1021 1022 int P2Len = PowerOf2Ceil(SrcLen | DstLen); 1023 auto *Undef = UndefValue::get(getByteTy()); 1024 Value *P2Src = vresize(Builder, Src, P2Len, Undef); 1025 Value *P2Dst = vresize(Builder, Dst, P2Len, Undef); 1026 1027 SmallVector<int, 256> SMask(P2Len); 1028 for (int i = 0; i != P2Len; ++i) { 1029 // If i is in [Where, Where+Length), pick Src[Start+(i-Where)]. 1030 // Otherwise, pick Dst[i]; 1031 SMask[i] = 1032 (Where <= i && i < Where + Length) ? P2Len + Start + (i - Where) : i; 1033 } 1034 1035 Value *P2Insert = Builder.CreateShuffleVector(P2Dst, P2Src, SMask); 1036 return vresize(Builder, P2Insert, DstLen, Undef); 1037 } 1038 1039 auto HexagonVectorCombine::vlalignb(IRBuilder<> &Builder, Value *Lo, Value *Hi, 1040 Value *Amt) const -> Value * { 1041 assert(Lo->getType() == Hi->getType() && "Argument type mismatch"); 1042 assert(isSectorTy(Hi->getType())); 1043 if (isZero(Amt)) 1044 return Hi; 1045 int VecLen = getSizeOf(Hi); 1046 if (auto IntAmt = getIntValue(Amt)) 1047 return getElementRange(Builder, Lo, Hi, VecLen - IntAmt->getSExtValue(), 1048 VecLen); 1049 1050 if (HST.isTypeForHVX(Hi->getType())) { 1051 int HwLen = HST.getVectorLength(); 1052 assert(VecLen == HwLen && "Expecting an exact HVX type"); 1053 Intrinsic::ID V6_vlalignb = HwLen == 64 1054 ? Intrinsic::hexagon_V6_vlalignb 1055 : Intrinsic::hexagon_V6_vlalignb_128B; 1056 return createHvxIntrinsic(Builder, V6_vlalignb, Hi->getType(), 1057 {Hi, Lo, Amt}); 1058 } 1059 1060 if (VecLen == 4) { 1061 Value *Pair = concat(Builder, {Lo, Hi}); 1062 Value *Shift = Builder.CreateLShr(Builder.CreateShl(Pair, Amt), 32); 1063 Value *Trunc = Builder.CreateTrunc(Shift, Type::getInt32Ty(F.getContext())); 1064 return Builder.CreateBitCast(Trunc, Hi->getType()); 1065 } 1066 if (VecLen == 8) { 1067 Value *Sub = Builder.CreateSub(getConstInt(VecLen), Amt); 1068 return vralignb(Builder, Lo, Hi, Sub); 1069 } 1070 llvm_unreachable("Unexpected vector length"); 1071 } 1072 1073 auto HexagonVectorCombine::vralignb(IRBuilder<> &Builder, Value *Lo, Value *Hi, 1074 Value *Amt) const -> Value * { 1075 assert(Lo->getType() == Hi->getType() && "Argument type mismatch"); 1076 assert(isSectorTy(Lo->getType())); 1077 if (isZero(Amt)) 1078 return Lo; 1079 int VecLen = getSizeOf(Lo); 1080 if (auto IntAmt = getIntValue(Amt)) 1081 return getElementRange(Builder, Lo, Hi, IntAmt->getSExtValue(), VecLen); 1082 1083 if (HST.isTypeForHVX(Lo->getType())) { 1084 int HwLen = HST.getVectorLength(); 1085 assert(VecLen == HwLen && "Expecting an exact HVX type"); 1086 Intrinsic::ID V6_valignb = HwLen == 64 ? Intrinsic::hexagon_V6_valignb 1087 : Intrinsic::hexagon_V6_valignb_128B; 1088 return createHvxIntrinsic(Builder, V6_valignb, Lo->getType(), 1089 {Hi, Lo, Amt}); 1090 } 1091 1092 if (VecLen == 4) { 1093 Value *Pair = concat(Builder, {Lo, Hi}); 1094 Value *Shift = Builder.CreateLShr(Pair, Amt); 1095 Value *Trunc = Builder.CreateTrunc(Shift, Type::getInt32Ty(F.getContext())); 1096 return Builder.CreateBitCast(Trunc, Lo->getType()); 1097 } 1098 if (VecLen == 8) { 1099 Type *Int64Ty = Type::getInt64Ty(F.getContext()); 1100 Value *Lo64 = Builder.CreateBitCast(Lo, Int64Ty); 1101 Value *Hi64 = Builder.CreateBitCast(Hi, Int64Ty); 1102 Function *FI = Intrinsic::getDeclaration(F.getParent(), 1103 Intrinsic::hexagon_S2_valignrb); 1104 Value *Call = Builder.CreateCall(FI, {Hi64, Lo64, Amt}); 1105 return Builder.CreateBitCast(Call, Lo->getType()); 1106 } 1107 llvm_unreachable("Unexpected vector length"); 1108 } 1109 1110 // Concatenates a sequence of vectors of the same type. 1111 auto HexagonVectorCombine::concat(IRBuilder<> &Builder, 1112 ArrayRef<Value *> Vecs) const -> Value * { 1113 assert(!Vecs.empty()); 1114 SmallVector<int, 256> SMask; 1115 std::vector<Value *> Work[2]; 1116 int ThisW = 0, OtherW = 1; 1117 1118 Work[ThisW].assign(Vecs.begin(), Vecs.end()); 1119 while (Work[ThisW].size() > 1) { 1120 auto *Ty = cast<VectorType>(Work[ThisW].front()->getType()); 1121 int ElemCount = Ty->getElementCount().getFixedValue(); 1122 SMask.resize(ElemCount * 2); 1123 std::iota(SMask.begin(), SMask.end(), 0); 1124 1125 Work[OtherW].clear(); 1126 if (Work[ThisW].size() % 2 != 0) 1127 Work[ThisW].push_back(UndefValue::get(Ty)); 1128 for (int i = 0, e = Work[ThisW].size(); i < e; i += 2) { 1129 Value *Joined = Builder.CreateShuffleVector(Work[ThisW][i], 1130 Work[ThisW][i + 1], SMask); 1131 Work[OtherW].push_back(Joined); 1132 } 1133 std::swap(ThisW, OtherW); 1134 } 1135 1136 // Since there may have been some undefs appended to make shuffle operands 1137 // have the same type, perform the last shuffle to only pick the original 1138 // elements. 1139 SMask.resize(Vecs.size() * getSizeOf(Vecs.front()->getType())); 1140 std::iota(SMask.begin(), SMask.end(), 0); 1141 Value *Total = Work[OtherW].front(); 1142 return Builder.CreateShuffleVector(Total, SMask); 1143 } 1144 1145 auto HexagonVectorCombine::vresize(IRBuilder<> &Builder, Value *Val, 1146 int NewSize, Value *Pad) const -> Value * { 1147 assert(isa<VectorType>(Val->getType())); 1148 auto *ValTy = cast<VectorType>(Val->getType()); 1149 assert(ValTy->getElementType() == Pad->getType()); 1150 1151 int CurSize = ValTy->getElementCount().getFixedValue(); 1152 if (CurSize == NewSize) 1153 return Val; 1154 // Truncate? 1155 if (CurSize > NewSize) 1156 return getElementRange(Builder, Val, /*Unused*/ Val, 0, NewSize); 1157 // Extend. 1158 SmallVector<int, 128> SMask(NewSize); 1159 std::iota(SMask.begin(), SMask.begin() + CurSize, 0); 1160 std::fill(SMask.begin() + CurSize, SMask.end(), CurSize); 1161 Value *PadVec = Builder.CreateVectorSplat(CurSize, Pad); 1162 return Builder.CreateShuffleVector(Val, PadVec, SMask); 1163 } 1164 1165 auto HexagonVectorCombine::rescale(IRBuilder<> &Builder, Value *Mask, 1166 Type *FromTy, Type *ToTy) const -> Value * { 1167 // Mask is a vector <N x i1>, where each element corresponds to an 1168 // element of FromTy. Remap it so that each element will correspond 1169 // to an element of ToTy. 1170 assert(isa<VectorType>(Mask->getType())); 1171 1172 Type *FromSTy = FromTy->getScalarType(); 1173 Type *ToSTy = ToTy->getScalarType(); 1174 if (FromSTy == ToSTy) 1175 return Mask; 1176 1177 int FromSize = getSizeOf(FromSTy); 1178 int ToSize = getSizeOf(ToSTy); 1179 assert(FromSize % ToSize == 0 || ToSize % FromSize == 0); 1180 1181 auto *MaskTy = cast<VectorType>(Mask->getType()); 1182 int FromCount = MaskTy->getElementCount().getFixedValue(); 1183 int ToCount = (FromCount * FromSize) / ToSize; 1184 assert((FromCount * FromSize) % ToSize == 0); 1185 1186 auto *FromITy = IntegerType::get(F.getContext(), FromSize * 8); 1187 auto *ToITy = IntegerType::get(F.getContext(), ToSize * 8); 1188 1189 // Mask <N x i1> -> sext to <N x FromTy> -> bitcast to <M x ToTy> -> 1190 // -> trunc to <M x i1>. 1191 Value *Ext = Builder.CreateSExt( 1192 Mask, VectorType::get(FromITy, FromCount, /*Scalable*/ false)); 1193 Value *Cast = Builder.CreateBitCast( 1194 Ext, VectorType::get(ToITy, ToCount, /*Scalable*/ false)); 1195 return Builder.CreateTrunc( 1196 Cast, VectorType::get(getBoolTy(), ToCount, /*Scalable*/ false)); 1197 } 1198 1199 // Bitcast to bytes, and return least significant bits. 1200 auto HexagonVectorCombine::vlsb(IRBuilder<> &Builder, Value *Val) const 1201 -> Value * { 1202 Type *ScalarTy = Val->getType()->getScalarType(); 1203 if (ScalarTy == getBoolTy()) 1204 return Val; 1205 1206 Value *Bytes = vbytes(Builder, Val); 1207 if (auto *VecTy = dyn_cast<VectorType>(Bytes->getType())) 1208 return Builder.CreateTrunc(Bytes, getBoolTy(getSizeOf(VecTy))); 1209 // If Bytes is a scalar (i.e. Val was a scalar byte), return i1, not 1210 // <1 x i1>. 1211 return Builder.CreateTrunc(Bytes, getBoolTy()); 1212 } 1213 1214 // Bitcast to bytes for non-bool. For bool, convert i1 -> i8. 1215 auto HexagonVectorCombine::vbytes(IRBuilder<> &Builder, Value *Val) const 1216 -> Value * { 1217 Type *ScalarTy = Val->getType()->getScalarType(); 1218 if (ScalarTy == getByteTy()) 1219 return Val; 1220 1221 if (ScalarTy != getBoolTy()) 1222 return Builder.CreateBitCast(Val, getByteTy(getSizeOf(Val))); 1223 // For bool, return a sext from i1 to i8. 1224 if (auto *VecTy = dyn_cast<VectorType>(Val->getType())) 1225 return Builder.CreateSExt(Val, VectorType::get(getByteTy(), VecTy)); 1226 return Builder.CreateSExt(Val, getByteTy()); 1227 } 1228 1229 auto HexagonVectorCombine::createHvxIntrinsic(IRBuilder<> &Builder, 1230 Intrinsic::ID IntID, Type *RetTy, 1231 ArrayRef<Value *> Args) const 1232 -> Value * { 1233 int HwLen = HST.getVectorLength(); 1234 Type *BoolTy = Type::getInt1Ty(F.getContext()); 1235 Type *Int32Ty = Type::getInt32Ty(F.getContext()); 1236 // HVX vector -> v16i32/v32i32 1237 // HVX vector predicate -> v512i1/v1024i1 1238 auto getTypeForIntrin = [&](Type *Ty) -> Type * { 1239 if (HST.isTypeForHVX(Ty, /*IncludeBool*/ true)) { 1240 Type *ElemTy = cast<VectorType>(Ty)->getElementType(); 1241 if (ElemTy == Int32Ty) 1242 return Ty; 1243 if (ElemTy == BoolTy) 1244 return VectorType::get(BoolTy, 8 * HwLen, /*Scalable*/ false); 1245 return VectorType::get(Int32Ty, HwLen / 4, /*Scalable*/ false); 1246 } 1247 // Non-HVX type. It should be a scalar. 1248 assert(Ty == Int32Ty || Ty->isIntegerTy(64)); 1249 return Ty; 1250 }; 1251 1252 auto getCast = [&](IRBuilder<> &Builder, Value *Val, 1253 Type *DestTy) -> Value * { 1254 Type *SrcTy = Val->getType(); 1255 if (SrcTy == DestTy) 1256 return Val; 1257 if (HST.isTypeForHVX(SrcTy, /*IncludeBool*/ true)) { 1258 if (cast<VectorType>(SrcTy)->getElementType() == BoolTy) { 1259 // This should take care of casts the other way too, for example 1260 // v1024i1 -> v32i1. 1261 Intrinsic::ID TC = HwLen == 64 1262 ? Intrinsic::hexagon_V6_pred_typecast 1263 : Intrinsic::hexagon_V6_pred_typecast_128B; 1264 Function *FI = Intrinsic::getDeclaration(F.getParent(), TC, 1265 {DestTy, Val->getType()}); 1266 return Builder.CreateCall(FI, {Val}); 1267 } 1268 // Non-predicate HVX vector. 1269 return Builder.CreateBitCast(Val, DestTy); 1270 } 1271 // Non-HVX type. It should be a scalar, and it should already have 1272 // a valid type. 1273 llvm_unreachable("Unexpected type"); 1274 }; 1275 1276 SmallVector<Value *, 4> IntOps; 1277 for (Value *A : Args) 1278 IntOps.push_back(getCast(Builder, A, getTypeForIntrin(A->getType()))); 1279 Function *FI = Intrinsic::getDeclaration(F.getParent(), IntID); 1280 Value *Call = Builder.CreateCall(FI, IntOps); 1281 1282 Type *CallTy = Call->getType(); 1283 if (CallTy == RetTy) 1284 return Call; 1285 // Scalar types should have RetTy matching the call return type. 1286 assert(HST.isTypeForHVX(CallTy, /*IncludeBool*/ true)); 1287 if (cast<VectorType>(CallTy)->getElementType() == BoolTy) 1288 return getCast(Builder, Call, RetTy); 1289 return Builder.CreateBitCast(Call, RetTy); 1290 } 1291 1292 auto HexagonVectorCombine::calculatePointerDifference(Value *Ptr0, 1293 Value *Ptr1) const 1294 -> Optional<int> { 1295 struct Builder : IRBuilder<> { 1296 Builder(BasicBlock *B) : IRBuilder<>(B) {} 1297 ~Builder() { 1298 for (Instruction *I : llvm::reverse(ToErase)) 1299 I->eraseFromParent(); 1300 } 1301 SmallVector<Instruction *, 8> ToErase; 1302 }; 1303 1304 #define CallBuilder(B, F) \ 1305 [&](auto &B_) { \ 1306 Value *V = B_.F; \ 1307 if (auto *I = dyn_cast<Instruction>(V)) \ 1308 B_.ToErase.push_back(I); \ 1309 return V; \ 1310 }(B) 1311 1312 auto Simplify = [&](Value *V) { 1313 if (auto *I = dyn_cast<Instruction>(V)) { 1314 SimplifyQuery Q(DL, &TLI, &DT, &AC, I); 1315 if (Value *S = simplifyInstruction(I, Q)) 1316 return S; 1317 } 1318 return V; 1319 }; 1320 1321 auto StripBitCast = [](Value *V) { 1322 while (auto *C = dyn_cast<BitCastInst>(V)) 1323 V = C->getOperand(0); 1324 return V; 1325 }; 1326 1327 Ptr0 = StripBitCast(Ptr0); 1328 Ptr1 = StripBitCast(Ptr1); 1329 if (!isa<GetElementPtrInst>(Ptr0) || !isa<GetElementPtrInst>(Ptr1)) 1330 return None; 1331 1332 auto *Gep0 = cast<GetElementPtrInst>(Ptr0); 1333 auto *Gep1 = cast<GetElementPtrInst>(Ptr1); 1334 if (Gep0->getPointerOperand() != Gep1->getPointerOperand()) 1335 return None; 1336 1337 Builder B(Gep0->getParent()); 1338 int Scale = getAllocSizeOf(Gep0->getSourceElementType()); 1339 1340 // FIXME: for now only check GEPs with a single index. 1341 if (Gep0->getNumOperands() != 2 || Gep1->getNumOperands() != 2) 1342 return None; 1343 1344 Value *Idx0 = Gep0->getOperand(1); 1345 Value *Idx1 = Gep1->getOperand(1); 1346 1347 // First, try to simplify the subtraction directly. 1348 if (auto *Diff = dyn_cast<ConstantInt>( 1349 Simplify(CallBuilder(B, CreateSub(Idx0, Idx1))))) 1350 return Diff->getSExtValue() * Scale; 1351 1352 KnownBits Known0 = computeKnownBits(Idx0, DL, 0, &AC, Gep0, &DT); 1353 KnownBits Known1 = computeKnownBits(Idx1, DL, 0, &AC, Gep1, &DT); 1354 APInt Unknown = ~(Known0.Zero | Known0.One) | ~(Known1.Zero | Known1.One); 1355 if (Unknown.isAllOnes()) 1356 return None; 1357 1358 Value *MaskU = ConstantInt::get(Idx0->getType(), Unknown); 1359 Value *AndU0 = Simplify(CallBuilder(B, CreateAnd(Idx0, MaskU))); 1360 Value *AndU1 = Simplify(CallBuilder(B, CreateAnd(Idx1, MaskU))); 1361 Value *SubU = Simplify(CallBuilder(B, CreateSub(AndU0, AndU1))); 1362 int Diff0 = 0; 1363 if (auto *C = dyn_cast<ConstantInt>(SubU)) { 1364 Diff0 = C->getSExtValue(); 1365 } else { 1366 return None; 1367 } 1368 1369 Value *MaskK = ConstantInt::get(MaskU->getType(), ~Unknown); 1370 Value *AndK0 = Simplify(CallBuilder(B, CreateAnd(Idx0, MaskK))); 1371 Value *AndK1 = Simplify(CallBuilder(B, CreateAnd(Idx1, MaskK))); 1372 Value *SubK = Simplify(CallBuilder(B, CreateSub(AndK0, AndK1))); 1373 int Diff1 = 0; 1374 if (auto *C = dyn_cast<ConstantInt>(SubK)) { 1375 Diff1 = C->getSExtValue(); 1376 } else { 1377 return None; 1378 } 1379 1380 return (Diff0 + Diff1) * Scale; 1381 1382 #undef CallBuilder 1383 } 1384 1385 template <typename T> 1386 auto HexagonVectorCombine::isSafeToMoveBeforeInBB(const Instruction &In, 1387 BasicBlock::const_iterator To, 1388 const T &Ignore) const 1389 -> bool { 1390 auto getLocOrNone = [this](const Instruction &I) -> Optional<MemoryLocation> { 1391 if (const auto *II = dyn_cast<IntrinsicInst>(&I)) { 1392 switch (II->getIntrinsicID()) { 1393 case Intrinsic::masked_load: 1394 return MemoryLocation::getForArgument(II, 0, TLI); 1395 case Intrinsic::masked_store: 1396 return MemoryLocation::getForArgument(II, 1, TLI); 1397 } 1398 } 1399 return MemoryLocation::getOrNone(&I); 1400 }; 1401 1402 // The source and the destination must be in the same basic block. 1403 const BasicBlock &Block = *In.getParent(); 1404 assert(Block.begin() == To || Block.end() == To || To->getParent() == &Block); 1405 // No PHIs. 1406 if (isa<PHINode>(In) || (To != Block.end() && isa<PHINode>(*To))) 1407 return false; 1408 1409 if (!mayHaveNonDefUseDependency(In)) 1410 return true; 1411 bool MayWrite = In.mayWriteToMemory(); 1412 auto MaybeLoc = getLocOrNone(In); 1413 1414 auto From = In.getIterator(); 1415 if (From == To) 1416 return true; 1417 bool MoveUp = (To != Block.end() && To->comesBefore(&In)); 1418 auto Range = 1419 MoveUp ? std::make_pair(To, From) : std::make_pair(std::next(From), To); 1420 for (auto It = Range.first; It != Range.second; ++It) { 1421 const Instruction &I = *It; 1422 if (llvm::is_contained(Ignore, &I)) 1423 continue; 1424 // assume intrinsic can be ignored 1425 if (auto *II = dyn_cast<IntrinsicInst>(&I)) { 1426 if (II->getIntrinsicID() == Intrinsic::assume) 1427 continue; 1428 } 1429 // Parts based on isSafeToMoveBefore from CoveMoverUtils.cpp. 1430 if (I.mayThrow()) 1431 return false; 1432 if (auto *CB = dyn_cast<CallBase>(&I)) { 1433 if (!CB->hasFnAttr(Attribute::WillReturn)) 1434 return false; 1435 if (!CB->hasFnAttr(Attribute::NoSync)) 1436 return false; 1437 } 1438 if (I.mayReadOrWriteMemory()) { 1439 auto MaybeLocI = getLocOrNone(I); 1440 if (MayWrite || I.mayWriteToMemory()) { 1441 if (!MaybeLoc || !MaybeLocI) 1442 return false; 1443 if (!AA.isNoAlias(*MaybeLoc, *MaybeLocI)) 1444 return false; 1445 } 1446 } 1447 } 1448 return true; 1449 } 1450 1451 #ifndef NDEBUG 1452 auto HexagonVectorCombine::isByteVecTy(Type *Ty) const -> bool { 1453 if (auto *VecTy = dyn_cast<VectorType>(Ty)) 1454 return VecTy->getElementType() == getByteTy(); 1455 return false; 1456 } 1457 1458 auto HexagonVectorCombine::isSectorTy(Type *Ty) const -> bool { 1459 if (!isByteVecTy(Ty)) 1460 return false; 1461 int Size = getSizeOf(Ty); 1462 if (HST.isTypeForHVX(Ty)) 1463 return Size == static_cast<int>(HST.getVectorLength()); 1464 return Size == 4 || Size == 8; 1465 } 1466 #endif 1467 1468 auto HexagonVectorCombine::getElementRange(IRBuilder<> &Builder, Value *Lo, 1469 Value *Hi, int Start, 1470 int Length) const -> Value * { 1471 assert(0 <= Start && Start < Length); 1472 SmallVector<int, 128> SMask(Length); 1473 std::iota(SMask.begin(), SMask.end(), Start); 1474 return Builder.CreateShuffleVector(Lo, Hi, SMask); 1475 } 1476 1477 // Pass management. 1478 1479 namespace llvm { 1480 void initializeHexagonVectorCombineLegacyPass(PassRegistry &); 1481 FunctionPass *createHexagonVectorCombineLegacyPass(); 1482 } // namespace llvm 1483 1484 namespace { 1485 class HexagonVectorCombineLegacy : public FunctionPass { 1486 public: 1487 static char ID; 1488 1489 HexagonVectorCombineLegacy() : FunctionPass(ID) {} 1490 1491 StringRef getPassName() const override { return "Hexagon Vector Combine"; } 1492 1493 void getAnalysisUsage(AnalysisUsage &AU) const override { 1494 AU.setPreservesCFG(); 1495 AU.addRequired<AAResultsWrapperPass>(); 1496 AU.addRequired<AssumptionCacheTracker>(); 1497 AU.addRequired<DominatorTreeWrapperPass>(); 1498 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1499 AU.addRequired<TargetPassConfig>(); 1500 FunctionPass::getAnalysisUsage(AU); 1501 } 1502 1503 bool runOnFunction(Function &F) override { 1504 if (skipFunction(F)) 1505 return false; 1506 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 1507 AssumptionCache &AC = 1508 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1509 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1510 TargetLibraryInfo &TLI = 1511 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1512 auto &TM = getAnalysis<TargetPassConfig>().getTM<HexagonTargetMachine>(); 1513 HexagonVectorCombine HVC(F, AA, AC, DT, TLI, TM); 1514 return HVC.run(); 1515 } 1516 }; 1517 } // namespace 1518 1519 char HexagonVectorCombineLegacy::ID = 0; 1520 1521 INITIALIZE_PASS_BEGIN(HexagonVectorCombineLegacy, DEBUG_TYPE, 1522 "Hexagon Vector Combine", false, false) 1523 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 1524 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1525 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1526 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1527 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 1528 INITIALIZE_PASS_END(HexagonVectorCombineLegacy, DEBUG_TYPE, 1529 "Hexagon Vector Combine", false, false) 1530 1531 FunctionPass *llvm::createHexagonVectorCombineLegacyPass() { 1532 return new HexagonVectorCombineLegacy(); 1533 } 1534