1 //===-- HexagonVectorCombine.cpp ------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // HexagonVectorCombine is a utility class implementing a variety of functions 9 // that assist in vector-based optimizations. 10 // 11 // AlignVectors: replace unaligned vector loads and stores with aligned ones. 12 // HvxIdioms: recognize various opportunities to generate HVX intrinsic code. 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/Analysis/AssumptionCache.h" 22 #include "llvm/Analysis/InstSimplifyFolder.h" 23 #include "llvm/Analysis/InstructionSimplify.h" 24 #include "llvm/Analysis/ScalarEvolution.h" 25 #include "llvm/Analysis/TargetLibraryInfo.h" 26 #include "llvm/Analysis/ValueTracking.h" 27 #include "llvm/Analysis/VectorUtils.h" 28 #include "llvm/CodeGen/TargetPassConfig.h" 29 #include "llvm/CodeGen/ValueTypes.h" 30 #include "llvm/IR/Dominators.h" 31 #include "llvm/IR/IRBuilder.h" 32 #include "llvm/IR/IntrinsicInst.h" 33 #include "llvm/IR/Intrinsics.h" 34 #include "llvm/IR/IntrinsicsHexagon.h" 35 #include "llvm/IR/Metadata.h" 36 #include "llvm/IR/PatternMatch.h" 37 #include "llvm/InitializePasses.h" 38 #include "llvm/Pass.h" 39 #include "llvm/Support/CommandLine.h" 40 #include "llvm/Support/KnownBits.h" 41 #include "llvm/Support/MathExtras.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include "llvm/Target/TargetMachine.h" 44 #include "llvm/Transforms/Utils/Local.h" 45 46 #include "HexagonSubtarget.h" 47 #include "HexagonTargetMachine.h" 48 49 #include <algorithm> 50 #include <deque> 51 #include <map> 52 #include <optional> 53 #include <set> 54 #include <utility> 55 #include <vector> 56 57 #define DEBUG_TYPE "hexagon-vc" 58 59 using namespace llvm; 60 61 namespace { 62 cl::opt<bool> DumpModule("hvc-dump-module", cl::Hidden); 63 cl::opt<bool> VAEnabled("hvc-va", cl::Hidden, cl::init(true)); // Align 64 cl::opt<bool> VIEnabled("hvc-vi", cl::Hidden, cl::init(true)); // Idioms 65 cl::opt<bool> VADoFullStores("hvc-va-full-stores", cl::Hidden); 66 67 cl::opt<unsigned> VAGroupCountLimit("hvc-va-group-count-limit", cl::Hidden, 68 cl::init(~0)); 69 cl::opt<unsigned> VAGroupSizeLimit("hvc-va-group-size-limit", cl::Hidden, 70 cl::init(~0)); 71 72 class HexagonVectorCombine { 73 public: 74 HexagonVectorCombine(Function &F_, AliasAnalysis &AA_, AssumptionCache &AC_, 75 DominatorTree &DT_, ScalarEvolution &SE_, 76 TargetLibraryInfo &TLI_, const TargetMachine &TM_) 77 : F(F_), DL(F.getParent()->getDataLayout()), AA(AA_), AC(AC_), DT(DT_), 78 SE(SE_), TLI(TLI_), 79 HST(static_cast<const HexagonSubtarget &>(*TM_.getSubtargetImpl(F))) {} 80 81 bool run(); 82 83 // Common integer type. 84 IntegerType *getIntTy(unsigned Width = 32) const; 85 // Byte type: either scalar (when Length = 0), or vector with given 86 // element count. 87 Type *getByteTy(int ElemCount = 0) const; 88 // Boolean type: either scalar (when Length = 0), or vector with given 89 // element count. 90 Type *getBoolTy(int ElemCount = 0) const; 91 // Create a ConstantInt of type returned by getIntTy with the value Val. 92 ConstantInt *getConstInt(int Val, unsigned Width = 32) const; 93 // Get the integer value of V, if it exists. 94 std::optional<APInt> getIntValue(const Value *Val) const; 95 // Is Val a constant 0, or a vector of 0s? 96 bool isZero(const Value *Val) const; 97 // Is Val an undef value? 98 bool isUndef(const Value *Val) const; 99 // Is Val a scalar (i1 true) or a vector of (i1 true)? 100 bool isTrue(const Value *Val) const; 101 // Is Val a scalar (i1 false) or a vector of (i1 false)? 102 bool isFalse(const Value *Val) const; 103 104 // Get HVX vector type with the given element type. 105 VectorType *getHvxTy(Type *ElemTy, bool Pair = false) const; 106 107 enum SizeKind { 108 Store, // Store size 109 Alloc, // Alloc size 110 }; 111 int getSizeOf(const Value *Val, SizeKind Kind = Store) const; 112 int getSizeOf(const Type *Ty, SizeKind Kind = Store) const; 113 int getTypeAlignment(Type *Ty) const; 114 size_t length(Value *Val) const; 115 size_t length(Type *Ty) const; 116 117 Constant *getNullValue(Type *Ty) const; 118 Constant *getFullValue(Type *Ty) const; 119 Constant *getConstSplat(Type *Ty, int Val) const; 120 121 Value *simplify(Value *Val) const; 122 123 Value *insertb(IRBuilderBase &Builder, Value *Dest, Value *Src, int Start, 124 int Length, int Where) const; 125 Value *vlalignb(IRBuilderBase &Builder, Value *Lo, Value *Hi, 126 Value *Amt) const; 127 Value *vralignb(IRBuilderBase &Builder, Value *Lo, Value *Hi, 128 Value *Amt) const; 129 Value *concat(IRBuilderBase &Builder, ArrayRef<Value *> Vecs) const; 130 Value *vresize(IRBuilderBase &Builder, Value *Val, int NewSize, 131 Value *Pad) const; 132 Value *rescale(IRBuilderBase &Builder, Value *Mask, Type *FromTy, 133 Type *ToTy) const; 134 Value *vlsb(IRBuilderBase &Builder, Value *Val) const; 135 Value *vbytes(IRBuilderBase &Builder, Value *Val) const; 136 Value *subvector(IRBuilderBase &Builder, Value *Val, unsigned Start, 137 unsigned Length) const; 138 Value *sublo(IRBuilderBase &Builder, Value *Val) const; 139 Value *subhi(IRBuilderBase &Builder, Value *Val) const; 140 Value *vdeal(IRBuilderBase &Builder, Value *Val0, Value *Val1) const; 141 Value *vshuff(IRBuilderBase &Builder, Value *Val0, Value *Val1) const; 142 143 Value *createHvxIntrinsic(IRBuilderBase &Builder, Intrinsic::ID IntID, 144 Type *RetTy, ArrayRef<Value *> Args, 145 ArrayRef<Type *> ArgTys = std::nullopt, 146 ArrayRef<Value *> MDSources = std::nullopt) const; 147 SmallVector<Value *> splitVectorElements(IRBuilderBase &Builder, Value *Vec, 148 unsigned ToWidth) const; 149 Value *joinVectorElements(IRBuilderBase &Builder, ArrayRef<Value *> Values, 150 VectorType *ToType) const; 151 152 std::optional<int> calculatePointerDifference(Value *Ptr0, Value *Ptr1) const; 153 154 unsigned getNumSignificantBits(const Value *V, 155 const Instruction *CtxI = nullptr) const; 156 KnownBits getKnownBits(const Value *V, 157 const Instruction *CtxI = nullptr) const; 158 159 bool isSafeToClone(const Instruction &In) const; 160 161 template <typename T = std::vector<Instruction *>> 162 bool isSafeToMoveBeforeInBB(const Instruction &In, 163 BasicBlock::const_iterator To, 164 const T &IgnoreInsts = {}) const; 165 166 // This function is only used for assertions at the moment. 167 [[maybe_unused]] bool isByteVecTy(Type *Ty) const; 168 169 Function &F; 170 const DataLayout &DL; 171 AliasAnalysis &AA; 172 AssumptionCache &AC; 173 DominatorTree &DT; 174 ScalarEvolution &SE; 175 TargetLibraryInfo &TLI; 176 const HexagonSubtarget &HST; 177 178 private: 179 Value *getElementRange(IRBuilderBase &Builder, Value *Lo, Value *Hi, 180 int Start, int Length) const; 181 }; 182 183 class AlignVectors { 184 // This code tries to replace unaligned vector loads/stores with aligned 185 // ones. 186 // Consider unaligned load: 187 // %v = original_load %some_addr, align <bad> 188 // %user = %v 189 // It will generate 190 // = load ..., align <good> 191 // = load ..., align <good> 192 // = valign 193 // etc. 194 // %synthesize = combine/shuffle the loaded data so that it looks 195 // exactly like what "original_load" has loaded. 196 // %user = %synthesize 197 // Similarly for stores. 198 public: 199 AlignVectors(const HexagonVectorCombine &HVC_) : HVC(HVC_) {} 200 201 bool run(); 202 203 private: 204 using InstList = std::vector<Instruction *>; 205 using InstMap = DenseMap<Instruction *, Instruction *>; 206 207 struct AddrInfo { 208 AddrInfo(const AddrInfo &) = default; 209 AddrInfo(const HexagonVectorCombine &HVC, Instruction *I, Value *A, Type *T, 210 Align H) 211 : Inst(I), Addr(A), ValTy(T), HaveAlign(H), 212 NeedAlign(HVC.getTypeAlignment(ValTy)) {} 213 AddrInfo &operator=(const AddrInfo &) = default; 214 215 // XXX: add Size member? 216 Instruction *Inst; 217 Value *Addr; 218 Type *ValTy; 219 Align HaveAlign; 220 Align NeedAlign; 221 int Offset = 0; // Offset (in bytes) from the first member of the 222 // containing AddrList. 223 }; 224 using AddrList = std::vector<AddrInfo>; 225 226 struct InstrLess { 227 bool operator()(const Instruction *A, const Instruction *B) const { 228 return A->comesBefore(B); 229 } 230 }; 231 using DepList = std::set<Instruction *, InstrLess>; 232 233 struct MoveGroup { 234 MoveGroup(const AddrInfo &AI, Instruction *B, bool Hvx, bool Load) 235 : Base(B), Main{AI.Inst}, Clones{}, IsHvx(Hvx), IsLoad(Load) {} 236 MoveGroup() = default; 237 Instruction *Base; // Base instruction of the parent address group. 238 InstList Main; // Main group of instructions. 239 InstList Deps; // List of dependencies. 240 InstMap Clones; // Map from original Deps to cloned ones. 241 bool IsHvx; // Is this group of HVX instructions? 242 bool IsLoad; // Is this a load group? 243 }; 244 using MoveList = std::vector<MoveGroup>; 245 246 struct ByteSpan { 247 // A representation of "interesting" bytes within a given span of memory. 248 // These bytes are those that are loaded or stored, and they don't have 249 // to cover the entire span of memory. 250 // 251 // The representation works by picking a contiguous sequence of bytes 252 // from somewhere within a llvm::Value, and placing it at a given offset 253 // within the span. 254 // 255 // The sequence of bytes from llvm:Value is represented by Segment. 256 // Block is Segment, plus where it goes in the span. 257 // 258 // An important feature of ByteSpan is being able to make a "section", 259 // i.e. creating another ByteSpan corresponding to a range of offsets 260 // relative to the source span. 261 262 struct Segment { 263 // Segment of a Value: 'Len' bytes starting at byte 'Begin'. 264 Segment(Value *Val, int Begin, int Len) 265 : Val(Val), Start(Begin), Size(Len) {} 266 Segment(const Segment &Seg) = default; 267 Segment &operator=(const Segment &Seg) = default; 268 Value *Val; // Value representable as a sequence of bytes. 269 int Start; // First byte of the value that belongs to the segment. 270 int Size; // Number of bytes in the segment. 271 }; 272 273 struct Block { 274 Block(Value *Val, int Len, int Pos) : Seg(Val, 0, Len), Pos(Pos) {} 275 Block(Value *Val, int Off, int Len, int Pos) 276 : Seg(Val, Off, Len), Pos(Pos) {} 277 Block(const Block &Blk) = default; 278 Block &operator=(const Block &Blk) = default; 279 Segment Seg; // Value segment. 280 int Pos; // Position (offset) of the block in the span. 281 }; 282 283 int extent() const; 284 ByteSpan section(int Start, int Length) const; 285 ByteSpan &shift(int Offset); 286 SmallVector<Value *, 8> values() const; 287 288 int size() const { return Blocks.size(); } 289 Block &operator[](int i) { return Blocks[i]; } 290 const Block &operator[](int i) const { return Blocks[i]; } 291 292 std::vector<Block> Blocks; 293 294 using iterator = decltype(Blocks)::iterator; 295 iterator begin() { return Blocks.begin(); } 296 iterator end() { return Blocks.end(); } 297 using const_iterator = decltype(Blocks)::const_iterator; 298 const_iterator begin() const { return Blocks.begin(); } 299 const_iterator end() const { return Blocks.end(); } 300 }; 301 302 Align getAlignFromValue(const Value *V) const; 303 std::optional<AddrInfo> getAddrInfo(Instruction &In) const; 304 bool isHvx(const AddrInfo &AI) const; 305 // This function is only used for assertions at the moment. 306 [[maybe_unused]] bool isSectorTy(Type *Ty) const; 307 308 Value *getPayload(Value *Val) const; 309 Value *getMask(Value *Val) const; 310 Value *getPassThrough(Value *Val) const; 311 312 Value *createAdjustedPointer(IRBuilderBase &Builder, Value *Ptr, Type *ValTy, 313 int Adjust, 314 const InstMap &CloneMap = InstMap()) const; 315 Value *createAlignedPointer(IRBuilderBase &Builder, Value *Ptr, Type *ValTy, 316 int Alignment, 317 const InstMap &CloneMap = InstMap()) const; 318 319 Value *createLoad(IRBuilderBase &Builder, Type *ValTy, Value *Ptr, 320 Value *Predicate, int Alignment, Value *Mask, 321 Value *PassThru, 322 ArrayRef<Value *> MDSources = std::nullopt) const; 323 Value *createSimpleLoad(IRBuilderBase &Builder, Type *ValTy, Value *Ptr, 324 int Alignment, 325 ArrayRef<Value *> MDSources = std::nullopt) const; 326 327 Value *createStore(IRBuilderBase &Builder, Value *Val, Value *Ptr, 328 Value *Predicate, int Alignment, Value *Mask, 329 ArrayRef<Value *> MDSources = std ::nullopt) const; 330 Value *createSimpleStore(IRBuilderBase &Builder, Value *Val, Value *Ptr, 331 int Alignment, 332 ArrayRef<Value *> MDSources = std ::nullopt) const; 333 334 Value *createPredicatedLoad(IRBuilderBase &Builder, Type *ValTy, Value *Ptr, 335 Value *Predicate, int Alignment, 336 ArrayRef<Value *> MDSources = std::nullopt) const; 337 Value * 338 createPredicatedStore(IRBuilderBase &Builder, Value *Val, Value *Ptr, 339 Value *Predicate, int Alignment, 340 ArrayRef<Value *> MDSources = std::nullopt) const; 341 342 DepList getUpwardDeps(Instruction *In, Instruction *Base) const; 343 bool createAddressGroups(); 344 MoveList createLoadGroups(const AddrList &Group) const; 345 MoveList createStoreGroups(const AddrList &Group) const; 346 bool moveTogether(MoveGroup &Move) const; 347 template <typename T> InstMap cloneBefore(Instruction *To, T &&Insts) const; 348 349 void realignLoadGroup(IRBuilderBase &Builder, const ByteSpan &VSpan, 350 int ScLen, Value *AlignVal, Value *AlignAddr) const; 351 void realignStoreGroup(IRBuilderBase &Builder, const ByteSpan &VSpan, 352 int ScLen, Value *AlignVal, Value *AlignAddr) const; 353 bool realignGroup(const MoveGroup &Move) const; 354 355 Value *makeTestIfUnaligned(IRBuilderBase &Builder, Value *AlignVal, 356 int Alignment) const; 357 358 friend raw_ostream &operator<<(raw_ostream &OS, const AddrInfo &AI); 359 friend raw_ostream &operator<<(raw_ostream &OS, const MoveGroup &MG); 360 friend raw_ostream &operator<<(raw_ostream &OS, const ByteSpan::Block &B); 361 friend raw_ostream &operator<<(raw_ostream &OS, const ByteSpan &BS); 362 363 std::map<Instruction *, AddrList> AddrGroups; 364 const HexagonVectorCombine &HVC; 365 }; 366 367 LLVM_ATTRIBUTE_UNUSED 368 raw_ostream &operator<<(raw_ostream &OS, const AlignVectors::AddrInfo &AI) { 369 OS << "Inst: " << AI.Inst << " " << *AI.Inst << '\n'; 370 OS << "Addr: " << *AI.Addr << '\n'; 371 OS << "Type: " << *AI.ValTy << '\n'; 372 OS << "HaveAlign: " << AI.HaveAlign.value() << '\n'; 373 OS << "NeedAlign: " << AI.NeedAlign.value() << '\n'; 374 OS << "Offset: " << AI.Offset; 375 return OS; 376 } 377 378 LLVM_ATTRIBUTE_UNUSED 379 raw_ostream &operator<<(raw_ostream &OS, const AlignVectors::MoveGroup &MG) { 380 OS << "IsLoad:" << (MG.IsLoad ? "yes" : "no"); 381 OS << ", IsHvx:" << (MG.IsHvx ? "yes" : "no") << '\n'; 382 OS << "Main\n"; 383 for (Instruction *I : MG.Main) 384 OS << " " << *I << '\n'; 385 OS << "Deps\n"; 386 for (Instruction *I : MG.Deps) 387 OS << " " << *I << '\n'; 388 OS << "Clones\n"; 389 for (auto [K, V] : MG.Clones) { 390 OS << " "; 391 K->printAsOperand(OS, false); 392 OS << "\t-> " << *V << '\n'; 393 } 394 return OS; 395 } 396 397 LLVM_ATTRIBUTE_UNUSED 398 raw_ostream &operator<<(raw_ostream &OS, 399 const AlignVectors::ByteSpan::Block &B) { 400 OS << " @" << B.Pos << " [" << B.Seg.Start << ',' << B.Seg.Size << "] "; 401 if (B.Seg.Val == reinterpret_cast<const Value *>(&B)) { 402 OS << "(self:" << B.Seg.Val << ')'; 403 } else if (B.Seg.Val != nullptr) { 404 OS << *B.Seg.Val; 405 } else { 406 OS << "(null)"; 407 } 408 return OS; 409 } 410 411 LLVM_ATTRIBUTE_UNUSED 412 raw_ostream &operator<<(raw_ostream &OS, const AlignVectors::ByteSpan &BS) { 413 OS << "ByteSpan[size=" << BS.size() << ", extent=" << BS.extent() << '\n'; 414 for (const AlignVectors::ByteSpan::Block &B : BS) 415 OS << B << '\n'; 416 OS << ']'; 417 return OS; 418 } 419 420 class HvxIdioms { 421 public: 422 HvxIdioms(const HexagonVectorCombine &HVC_) : HVC(HVC_) { 423 auto *Int32Ty = HVC.getIntTy(32); 424 HvxI32Ty = HVC.getHvxTy(Int32Ty, /*Pair=*/false); 425 HvxP32Ty = HVC.getHvxTy(Int32Ty, /*Pair=*/true); 426 } 427 428 bool run(); 429 430 private: 431 enum Signedness { Positive, Signed, Unsigned }; 432 433 // Value + sign 434 // This is to keep track of whether the value should be treated as signed 435 // or unsigned, or is known to be positive. 436 struct SValue { 437 Value *Val; 438 Signedness Sgn; 439 }; 440 441 struct FxpOp { 442 unsigned Opcode; 443 unsigned Frac; // Number of fraction bits 444 SValue X, Y; 445 // If present, add 1 << RoundAt before shift: 446 std::optional<unsigned> RoundAt; 447 VectorType *ResTy; 448 }; 449 450 auto getNumSignificantBits(Value *V, Instruction *In) const 451 -> std::pair<unsigned, Signedness>; 452 auto canonSgn(SValue X, SValue Y) const -> std::pair<SValue, SValue>; 453 454 auto matchFxpMul(Instruction &In) const -> std::optional<FxpOp>; 455 auto processFxpMul(Instruction &In, const FxpOp &Op) const -> Value *; 456 457 auto processFxpMulChopped(IRBuilderBase &Builder, Instruction &In, 458 const FxpOp &Op) const -> Value *; 459 auto createMulQ15(IRBuilderBase &Builder, SValue X, SValue Y, 460 bool Rounding) const -> Value *; 461 auto createMulQ31(IRBuilderBase &Builder, SValue X, SValue Y, 462 bool Rounding) const -> Value *; 463 // Return {Result, Carry}, where Carry is a vector predicate. 464 auto createAddCarry(IRBuilderBase &Builder, Value *X, Value *Y, 465 Value *CarryIn = nullptr) const 466 -> std::pair<Value *, Value *>; 467 auto createMul16(IRBuilderBase &Builder, SValue X, SValue Y) const -> Value *; 468 auto createMulH16(IRBuilderBase &Builder, SValue X, SValue Y) const 469 -> Value *; 470 auto createMul32(IRBuilderBase &Builder, SValue X, SValue Y) const 471 -> std::pair<Value *, Value *>; 472 auto createAddLong(IRBuilderBase &Builder, ArrayRef<Value *> WordX, 473 ArrayRef<Value *> WordY) const -> SmallVector<Value *>; 474 auto createMulLong(IRBuilderBase &Builder, ArrayRef<Value *> WordX, 475 Signedness SgnX, ArrayRef<Value *> WordY, 476 Signedness SgnY) const -> SmallVector<Value *>; 477 478 VectorType *HvxI32Ty; 479 VectorType *HvxP32Ty; 480 const HexagonVectorCombine &HVC; 481 482 friend raw_ostream &operator<<(raw_ostream &, const FxpOp &); 483 }; 484 485 [[maybe_unused]] raw_ostream &operator<<(raw_ostream &OS, 486 const HvxIdioms::FxpOp &Op) { 487 static const char *SgnNames[] = {"Positive", "Signed", "Unsigned"}; 488 OS << Instruction::getOpcodeName(Op.Opcode) << '.' << Op.Frac; 489 if (Op.RoundAt.has_value()) { 490 if (Op.Frac != 0 && *Op.RoundAt == Op.Frac - 1) { 491 OS << ":rnd"; 492 } else { 493 OS << " + 1<<" << *Op.RoundAt; 494 } 495 } 496 OS << "\n X:(" << SgnNames[Op.X.Sgn] << ") " << *Op.X.Val << "\n" 497 << " Y:(" << SgnNames[Op.Y.Sgn] << ") " << *Op.Y.Val; 498 return OS; 499 } 500 501 } // namespace 502 503 namespace { 504 505 template <typename T> T *getIfUnordered(T *MaybeT) { 506 return MaybeT && MaybeT->isUnordered() ? MaybeT : nullptr; 507 } 508 template <typename T> T *isCandidate(Instruction *In) { 509 return dyn_cast<T>(In); 510 } 511 template <> LoadInst *isCandidate<LoadInst>(Instruction *In) { 512 return getIfUnordered(dyn_cast<LoadInst>(In)); 513 } 514 template <> StoreInst *isCandidate<StoreInst>(Instruction *In) { 515 return getIfUnordered(dyn_cast<StoreInst>(In)); 516 } 517 518 #if !defined(_MSC_VER) || _MSC_VER >= 1926 519 // VS2017 and some versions of VS2019 have trouble compiling this: 520 // error C2976: 'std::map': too few template arguments 521 // VS 2019 16.x is known to work, except for 16.4/16.5 (MSC_VER 1924/1925) 522 template <typename Pred, typename... Ts> 523 void erase_if(std::map<Ts...> &map, Pred p) 524 #else 525 template <typename Pred, typename T, typename U> 526 void erase_if(std::map<T, U> &map, Pred p) 527 #endif 528 { 529 for (auto i = map.begin(), e = map.end(); i != e;) { 530 if (p(*i)) 531 i = map.erase(i); 532 else 533 i = std::next(i); 534 } 535 } 536 537 // Forward other erase_ifs to the LLVM implementations. 538 template <typename Pred, typename T> void erase_if(T &&container, Pred p) { 539 llvm::erase_if(std::forward<T>(container), p); 540 } 541 542 } // namespace 543 544 // --- Begin AlignVectors 545 546 // For brevity, only consider loads. We identify a group of loads where we 547 // know the relative differences between their addresses, so we know how they 548 // are laid out in memory (relative to one another). These loads can overlap, 549 // can be shorter or longer than the desired vector length. 550 // Ultimately we want to generate a sequence of aligned loads that will load 551 // every byte that the original loads loaded, and have the program use these 552 // loaded values instead of the original loads. 553 // We consider the contiguous memory area spanned by all these loads. 554 // 555 // Let's say that a single aligned vector load can load 16 bytes at a time. 556 // If the program wanted to use a byte at offset 13 from the beginning of the 557 // original span, it will be a byte at offset 13+x in the aligned data for 558 // some x>=0. This may happen to be in the first aligned load, or in the load 559 // following it. Since we generally don't know what the that alignment value 560 // is at compile time, we proactively do valigns on the aligned loads, so that 561 // byte that was at offset 13 is still at offset 13 after the valigns. 562 // 563 // This will be the starting point for making the rest of the program use the 564 // data loaded by the new loads. 565 // For each original load, and its users: 566 // %v = load ... 567 // ... = %v 568 // ... = %v 569 // we create 570 // %new_v = extract/combine/shuffle data from loaded/valigned vectors so 571 // it contains the same value as %v did before 572 // then replace all users of %v with %new_v. 573 // ... = %new_v 574 // ... = %new_v 575 576 auto AlignVectors::ByteSpan::extent() const -> int { 577 if (size() == 0) 578 return 0; 579 int Min = Blocks[0].Pos; 580 int Max = Blocks[0].Pos + Blocks[0].Seg.Size; 581 for (int i = 1, e = size(); i != e; ++i) { 582 Min = std::min(Min, Blocks[i].Pos); 583 Max = std::max(Max, Blocks[i].Pos + Blocks[i].Seg.Size); 584 } 585 return Max - Min; 586 } 587 588 auto AlignVectors::ByteSpan::section(int Start, int Length) const -> ByteSpan { 589 ByteSpan Section; 590 for (const ByteSpan::Block &B : Blocks) { 591 int L = std::max(B.Pos, Start); // Left end. 592 int R = std::min(B.Pos + B.Seg.Size, Start + Length); // Right end+1. 593 if (L < R) { 594 // How much to chop off the beginning of the segment: 595 int Off = L > B.Pos ? L - B.Pos : 0; 596 Section.Blocks.emplace_back(B.Seg.Val, B.Seg.Start + Off, R - L, L); 597 } 598 } 599 return Section; 600 } 601 602 auto AlignVectors::ByteSpan::shift(int Offset) -> ByteSpan & { 603 for (Block &B : Blocks) 604 B.Pos += Offset; 605 return *this; 606 } 607 608 auto AlignVectors::ByteSpan::values() const -> SmallVector<Value *, 8> { 609 SmallVector<Value *, 8> Values(Blocks.size()); 610 for (int i = 0, e = Blocks.size(); i != e; ++i) 611 Values[i] = Blocks[i].Seg.Val; 612 return Values; 613 } 614 615 auto AlignVectors::getAlignFromValue(const Value *V) const -> Align { 616 const auto *C = dyn_cast<ConstantInt>(V); 617 assert(C && "Alignment must be a compile-time constant integer"); 618 return C->getAlignValue(); 619 } 620 621 auto AlignVectors::getAddrInfo(Instruction &In) const 622 -> std::optional<AddrInfo> { 623 if (auto *L = isCandidate<LoadInst>(&In)) 624 return AddrInfo(HVC, L, L->getPointerOperand(), L->getType(), 625 L->getAlign()); 626 if (auto *S = isCandidate<StoreInst>(&In)) 627 return AddrInfo(HVC, S, S->getPointerOperand(), 628 S->getValueOperand()->getType(), S->getAlign()); 629 if (auto *II = isCandidate<IntrinsicInst>(&In)) { 630 Intrinsic::ID ID = II->getIntrinsicID(); 631 switch (ID) { 632 case Intrinsic::masked_load: 633 return AddrInfo(HVC, II, II->getArgOperand(0), II->getType(), 634 getAlignFromValue(II->getArgOperand(1))); 635 case Intrinsic::masked_store: 636 return AddrInfo(HVC, II, II->getArgOperand(1), 637 II->getArgOperand(0)->getType(), 638 getAlignFromValue(II->getArgOperand(2))); 639 } 640 } 641 return std::nullopt; 642 } 643 644 auto AlignVectors::isHvx(const AddrInfo &AI) const -> bool { 645 return HVC.HST.isTypeForHVX(AI.ValTy); 646 } 647 648 auto AlignVectors::getPayload(Value *Val) const -> Value * { 649 if (auto *In = dyn_cast<Instruction>(Val)) { 650 Intrinsic::ID ID = 0; 651 if (auto *II = dyn_cast<IntrinsicInst>(In)) 652 ID = II->getIntrinsicID(); 653 if (isa<StoreInst>(In) || ID == Intrinsic::masked_store) 654 return In->getOperand(0); 655 } 656 return Val; 657 } 658 659 auto AlignVectors::getMask(Value *Val) const -> Value * { 660 if (auto *II = dyn_cast<IntrinsicInst>(Val)) { 661 switch (II->getIntrinsicID()) { 662 case Intrinsic::masked_load: 663 return II->getArgOperand(2); 664 case Intrinsic::masked_store: 665 return II->getArgOperand(3); 666 } 667 } 668 669 Type *ValTy = getPayload(Val)->getType(); 670 if (auto *VecTy = dyn_cast<VectorType>(ValTy)) 671 return HVC.getFullValue(HVC.getBoolTy(HVC.length(VecTy))); 672 return HVC.getFullValue(HVC.getBoolTy()); 673 } 674 675 auto AlignVectors::getPassThrough(Value *Val) const -> Value * { 676 if (auto *II = dyn_cast<IntrinsicInst>(Val)) { 677 if (II->getIntrinsicID() == Intrinsic::masked_load) 678 return II->getArgOperand(3); 679 } 680 return UndefValue::get(getPayload(Val)->getType()); 681 } 682 683 auto AlignVectors::createAdjustedPointer(IRBuilderBase &Builder, Value *Ptr, 684 Type *ValTy, int Adjust, 685 const InstMap &CloneMap) const 686 -> Value * { 687 if (auto *I = dyn_cast<Instruction>(Ptr)) 688 if (Instruction *New = CloneMap.lookup(I)) 689 Ptr = New; 690 return Builder.CreatePtrAdd(Ptr, HVC.getConstInt(Adjust), "gep"); 691 } 692 693 auto AlignVectors::createAlignedPointer(IRBuilderBase &Builder, Value *Ptr, 694 Type *ValTy, int Alignment, 695 const InstMap &CloneMap) const 696 -> Value * { 697 auto remap = [&](Value *V) -> Value * { 698 if (auto *I = dyn_cast<Instruction>(V)) { 699 for (auto [Old, New] : CloneMap) 700 I->replaceUsesOfWith(Old, New); 701 return I; 702 } 703 return V; 704 }; 705 Value *AsInt = Builder.CreatePtrToInt(Ptr, HVC.getIntTy(), "pti"); 706 Value *Mask = HVC.getConstInt(-Alignment); 707 Value *And = Builder.CreateAnd(remap(AsInt), Mask, "and"); 708 return Builder.CreateIntToPtr( 709 And, PointerType::getUnqual(ValTy->getContext()), "itp"); 710 } 711 712 auto AlignVectors::createLoad(IRBuilderBase &Builder, Type *ValTy, Value *Ptr, 713 Value *Predicate, int Alignment, Value *Mask, 714 Value *PassThru, 715 ArrayRef<Value *> MDSources) const -> Value * { 716 bool HvxHasPredLoad = HVC.HST.useHVXV62Ops(); 717 // Predicate is nullptr if not creating predicated load 718 if (Predicate) { 719 assert(!Predicate->getType()->isVectorTy() && 720 "Expectning scalar predicate"); 721 if (HVC.isFalse(Predicate)) 722 return UndefValue::get(ValTy); 723 if (!HVC.isTrue(Predicate) && HvxHasPredLoad) { 724 Value *Load = createPredicatedLoad(Builder, ValTy, Ptr, Predicate, 725 Alignment, MDSources); 726 return Builder.CreateSelect(Mask, Load, PassThru); 727 } 728 // Predicate == true here. 729 } 730 assert(!HVC.isUndef(Mask)); // Should this be allowed? 731 if (HVC.isZero(Mask)) 732 return PassThru; 733 if (HVC.isTrue(Mask)) 734 return createSimpleLoad(Builder, ValTy, Ptr, Alignment, MDSources); 735 736 Instruction *Load = Builder.CreateMaskedLoad(ValTy, Ptr, Align(Alignment), 737 Mask, PassThru, "mld"); 738 propagateMetadata(Load, MDSources); 739 return Load; 740 } 741 742 auto AlignVectors::createSimpleLoad(IRBuilderBase &Builder, Type *ValTy, 743 Value *Ptr, int Alignment, 744 ArrayRef<Value *> MDSources) const 745 -> Value * { 746 Instruction *Load = 747 Builder.CreateAlignedLoad(ValTy, Ptr, Align(Alignment), "ald"); 748 propagateMetadata(Load, MDSources); 749 return Load; 750 } 751 752 auto AlignVectors::createPredicatedLoad(IRBuilderBase &Builder, Type *ValTy, 753 Value *Ptr, Value *Predicate, 754 int Alignment, 755 ArrayRef<Value *> MDSources) const 756 -> Value * { 757 assert(HVC.HST.isTypeForHVX(ValTy) && 758 "Predicates 'scalar' vector loads not yet supported"); 759 assert(Predicate); 760 assert(!Predicate->getType()->isVectorTy() && "Expectning scalar predicate"); 761 assert(HVC.getSizeOf(ValTy, HVC.Alloc) % Alignment == 0); 762 if (HVC.isFalse(Predicate)) 763 return UndefValue::get(ValTy); 764 if (HVC.isTrue(Predicate)) 765 return createSimpleLoad(Builder, ValTy, Ptr, Alignment, MDSources); 766 767 auto V6_vL32b_pred_ai = HVC.HST.getIntrinsicId(Hexagon::V6_vL32b_pred_ai); 768 // FIXME: This may not put the offset from Ptr into the vmem offset. 769 return HVC.createHvxIntrinsic(Builder, V6_vL32b_pred_ai, ValTy, 770 {Predicate, Ptr, HVC.getConstInt(0)}, 771 std::nullopt, MDSources); 772 } 773 774 auto AlignVectors::createStore(IRBuilderBase &Builder, Value *Val, Value *Ptr, 775 Value *Predicate, int Alignment, Value *Mask, 776 ArrayRef<Value *> MDSources) const -> Value * { 777 if (HVC.isZero(Mask) || HVC.isUndef(Val) || HVC.isUndef(Mask)) 778 return UndefValue::get(Val->getType()); 779 assert(!Predicate || (!Predicate->getType()->isVectorTy() && 780 "Expectning scalar predicate")); 781 if (Predicate) { 782 if (HVC.isFalse(Predicate)) 783 return UndefValue::get(Val->getType()); 784 if (HVC.isTrue(Predicate)) 785 Predicate = nullptr; 786 } 787 // Here both Predicate and Mask are true or unknown. 788 789 if (HVC.isTrue(Mask)) { 790 if (Predicate) { // Predicate unknown 791 return createPredicatedStore(Builder, Val, Ptr, Predicate, Alignment, 792 MDSources); 793 } 794 // Predicate is true: 795 return createSimpleStore(Builder, Val, Ptr, Alignment, MDSources); 796 } 797 798 // Mask is unknown 799 if (!Predicate) { 800 Instruction *Store = 801 Builder.CreateMaskedStore(Val, Ptr, Align(Alignment), Mask); 802 propagateMetadata(Store, MDSources); 803 return Store; 804 } 805 806 // Both Predicate and Mask are unknown. 807 // Emulate masked store with predicated-load + mux + predicated-store. 808 Value *PredLoad = createPredicatedLoad(Builder, Val->getType(), Ptr, 809 Predicate, Alignment, MDSources); 810 Value *Mux = Builder.CreateSelect(Mask, Val, PredLoad); 811 return createPredicatedStore(Builder, Mux, Ptr, Predicate, Alignment, 812 MDSources); 813 } 814 815 auto AlignVectors::createSimpleStore(IRBuilderBase &Builder, Value *Val, 816 Value *Ptr, int Alignment, 817 ArrayRef<Value *> MDSources) const 818 -> Value * { 819 Instruction *Store = Builder.CreateAlignedStore(Val, Ptr, Align(Alignment)); 820 propagateMetadata(Store, MDSources); 821 return Store; 822 } 823 824 auto AlignVectors::createPredicatedStore(IRBuilderBase &Builder, Value *Val, 825 Value *Ptr, Value *Predicate, 826 int Alignment, 827 ArrayRef<Value *> MDSources) const 828 -> Value * { 829 assert(HVC.HST.isTypeForHVX(Val->getType()) && 830 "Predicates 'scalar' vector stores not yet supported"); 831 assert(Predicate); 832 if (HVC.isFalse(Predicate)) 833 return UndefValue::get(Val->getType()); 834 if (HVC.isTrue(Predicate)) 835 return createSimpleStore(Builder, Val, Ptr, Alignment, MDSources); 836 837 assert(HVC.getSizeOf(Val, HVC.Alloc) % Alignment == 0); 838 auto V6_vS32b_pred_ai = HVC.HST.getIntrinsicId(Hexagon::V6_vS32b_pred_ai); 839 // FIXME: This may not put the offset from Ptr into the vmem offset. 840 return HVC.createHvxIntrinsic(Builder, V6_vS32b_pred_ai, nullptr, 841 {Predicate, Ptr, HVC.getConstInt(0), Val}, 842 std::nullopt, MDSources); 843 } 844 845 auto AlignVectors::getUpwardDeps(Instruction *In, Instruction *Base) const 846 -> DepList { 847 BasicBlock *Parent = Base->getParent(); 848 assert(In->getParent() == Parent && 849 "Base and In should be in the same block"); 850 assert(Base->comesBefore(In) && "Base should come before In"); 851 852 DepList Deps; 853 std::deque<Instruction *> WorkQ = {In}; 854 while (!WorkQ.empty()) { 855 Instruction *D = WorkQ.front(); 856 WorkQ.pop_front(); 857 if (D != In) 858 Deps.insert(D); 859 for (Value *Op : D->operands()) { 860 if (auto *I = dyn_cast<Instruction>(Op)) { 861 if (I->getParent() == Parent && Base->comesBefore(I)) 862 WorkQ.push_back(I); 863 } 864 } 865 } 866 return Deps; 867 } 868 869 auto AlignVectors::createAddressGroups() -> bool { 870 // An address group created here may contain instructions spanning 871 // multiple basic blocks. 872 AddrList WorkStack; 873 874 auto findBaseAndOffset = [&](AddrInfo &AI) -> std::pair<Instruction *, int> { 875 for (AddrInfo &W : WorkStack) { 876 if (auto D = HVC.calculatePointerDifference(AI.Addr, W.Addr)) 877 return std::make_pair(W.Inst, *D); 878 } 879 return std::make_pair(nullptr, 0); 880 }; 881 882 auto traverseBlock = [&](DomTreeNode *DomN, auto Visit) -> void { 883 BasicBlock &Block = *DomN->getBlock(); 884 for (Instruction &I : Block) { 885 auto AI = this->getAddrInfo(I); // Use this-> for gcc6. 886 if (!AI) 887 continue; 888 auto F = findBaseAndOffset(*AI); 889 Instruction *GroupInst; 890 if (Instruction *BI = F.first) { 891 AI->Offset = F.second; 892 GroupInst = BI; 893 } else { 894 WorkStack.push_back(*AI); 895 GroupInst = AI->Inst; 896 } 897 AddrGroups[GroupInst].push_back(*AI); 898 } 899 900 for (DomTreeNode *C : DomN->children()) 901 Visit(C, Visit); 902 903 while (!WorkStack.empty() && WorkStack.back().Inst->getParent() == &Block) 904 WorkStack.pop_back(); 905 }; 906 907 traverseBlock(HVC.DT.getRootNode(), traverseBlock); 908 assert(WorkStack.empty()); 909 910 // AddrGroups are formed. 911 912 // Remove groups of size 1. 913 erase_if(AddrGroups, [](auto &G) { return G.second.size() == 1; }); 914 // Remove groups that don't use HVX types. 915 erase_if(AddrGroups, [&](auto &G) { 916 return llvm::none_of( 917 G.second, [&](auto &I) { return HVC.HST.isTypeForHVX(I.ValTy); }); 918 }); 919 920 return !AddrGroups.empty(); 921 } 922 923 auto AlignVectors::createLoadGroups(const AddrList &Group) const -> MoveList { 924 // Form load groups. 925 // To avoid complications with moving code across basic blocks, only form 926 // groups that are contained within a single basic block. 927 unsigned SizeLimit = VAGroupSizeLimit; 928 if (SizeLimit == 0) 929 return {}; 930 931 auto tryAddTo = [&](const AddrInfo &Info, MoveGroup &Move) { 932 assert(!Move.Main.empty() && "Move group should have non-empty Main"); 933 if (Move.Main.size() >= SizeLimit) 934 return false; 935 // Don't mix HVX and non-HVX instructions. 936 if (Move.IsHvx != isHvx(Info)) 937 return false; 938 // Leading instruction in the load group. 939 Instruction *Base = Move.Main.front(); 940 if (Base->getParent() != Info.Inst->getParent()) 941 return false; 942 // Check if it's safe to move the load. 943 if (!HVC.isSafeToMoveBeforeInBB(*Info.Inst, Base->getIterator())) 944 return false; 945 // And if it's safe to clone the dependencies. 946 auto isSafeToCopyAtBase = [&](const Instruction *I) { 947 return HVC.isSafeToMoveBeforeInBB(*I, Base->getIterator()) && 948 HVC.isSafeToClone(*I); 949 }; 950 DepList Deps = getUpwardDeps(Info.Inst, Base); 951 if (!llvm::all_of(Deps, isSafeToCopyAtBase)) 952 return false; 953 954 Move.Main.push_back(Info.Inst); 955 llvm::append_range(Move.Deps, Deps); 956 return true; 957 }; 958 959 MoveList LoadGroups; 960 961 for (const AddrInfo &Info : Group) { 962 if (!Info.Inst->mayReadFromMemory()) 963 continue; 964 if (LoadGroups.empty() || !tryAddTo(Info, LoadGroups.back())) 965 LoadGroups.emplace_back(Info, Group.front().Inst, isHvx(Info), true); 966 } 967 968 // Erase singleton groups. 969 erase_if(LoadGroups, [](const MoveGroup &G) { return G.Main.size() <= 1; }); 970 971 // Erase HVX groups on targets < HvxV62 (due to lack of predicated loads). 972 if (!HVC.HST.useHVXV62Ops()) 973 erase_if(LoadGroups, [](const MoveGroup &G) { return G.IsHvx; }); 974 975 return LoadGroups; 976 } 977 978 auto AlignVectors::createStoreGroups(const AddrList &Group) const -> MoveList { 979 // Form store groups. 980 // To avoid complications with moving code across basic blocks, only form 981 // groups that are contained within a single basic block. 982 unsigned SizeLimit = VAGroupSizeLimit; 983 if (SizeLimit == 0) 984 return {}; 985 986 auto tryAddTo = [&](const AddrInfo &Info, MoveGroup &Move) { 987 assert(!Move.Main.empty() && "Move group should have non-empty Main"); 988 if (Move.Main.size() >= SizeLimit) 989 return false; 990 // For stores with return values we'd have to collect downward depenencies. 991 // There are no such stores that we handle at the moment, so omit that. 992 assert(Info.Inst->getType()->isVoidTy() && 993 "Not handling stores with return values"); 994 // Don't mix HVX and non-HVX instructions. 995 if (Move.IsHvx != isHvx(Info)) 996 return false; 997 // For stores we need to be careful whether it's safe to move them. 998 // Stores that are otherwise safe to move together may not appear safe 999 // to move over one another (i.e. isSafeToMoveBefore may return false). 1000 Instruction *Base = Move.Main.front(); 1001 if (Base->getParent() != Info.Inst->getParent()) 1002 return false; 1003 if (!HVC.isSafeToMoveBeforeInBB(*Info.Inst, Base->getIterator(), Move.Main)) 1004 return false; 1005 Move.Main.push_back(Info.Inst); 1006 return true; 1007 }; 1008 1009 MoveList StoreGroups; 1010 1011 for (auto I = Group.rbegin(), E = Group.rend(); I != E; ++I) { 1012 const AddrInfo &Info = *I; 1013 if (!Info.Inst->mayWriteToMemory()) 1014 continue; 1015 if (StoreGroups.empty() || !tryAddTo(Info, StoreGroups.back())) 1016 StoreGroups.emplace_back(Info, Group.front().Inst, isHvx(Info), false); 1017 } 1018 1019 // Erase singleton groups. 1020 erase_if(StoreGroups, [](const MoveGroup &G) { return G.Main.size() <= 1; }); 1021 1022 // Erase HVX groups on targets < HvxV62 (due to lack of predicated loads). 1023 if (!HVC.HST.useHVXV62Ops()) 1024 erase_if(StoreGroups, [](const MoveGroup &G) { return G.IsHvx; }); 1025 1026 // Erase groups where every store is a full HVX vector. The reason is that 1027 // aligning predicated stores generates complex code that may be less 1028 // efficient than a sequence of unaligned vector stores. 1029 if (!VADoFullStores) { 1030 erase_if(StoreGroups, [this](const MoveGroup &G) { 1031 return G.IsHvx && llvm::all_of(G.Main, [this](Instruction *S) { 1032 auto MaybeInfo = this->getAddrInfo(*S); 1033 assert(MaybeInfo.has_value()); 1034 return HVC.HST.isHVXVectorType( 1035 EVT::getEVT(MaybeInfo->ValTy, false)); 1036 }); 1037 }); 1038 } 1039 1040 return StoreGroups; 1041 } 1042 1043 auto AlignVectors::moveTogether(MoveGroup &Move) const -> bool { 1044 // Move all instructions to be adjacent. 1045 assert(!Move.Main.empty() && "Move group should have non-empty Main"); 1046 Instruction *Where = Move.Main.front(); 1047 1048 if (Move.IsLoad) { 1049 // Move all the loads (and dependencies) to where the first load is. 1050 // Clone all deps to before Where, keeping order. 1051 Move.Clones = cloneBefore(Where, Move.Deps); 1052 // Move all main instructions to after Where, keeping order. 1053 ArrayRef<Instruction *> Main(Move.Main); 1054 for (Instruction *M : Main) { 1055 if (M != Where) 1056 M->moveAfter(Where); 1057 for (auto [Old, New] : Move.Clones) 1058 M->replaceUsesOfWith(Old, New); 1059 Where = M; 1060 } 1061 // Replace Deps with the clones. 1062 for (int i = 0, e = Move.Deps.size(); i != e; ++i) 1063 Move.Deps[i] = Move.Clones[Move.Deps[i]]; 1064 } else { 1065 // Move all the stores to where the last store is. 1066 // NOTE: Deps are empty for "store" groups. If they need to be 1067 // non-empty, decide on the order. 1068 assert(Move.Deps.empty()); 1069 // Move all main instructions to before Where, inverting order. 1070 ArrayRef<Instruction *> Main(Move.Main); 1071 for (Instruction *M : Main.drop_front(1)) { 1072 M->moveBefore(Where); 1073 Where = M; 1074 } 1075 } 1076 1077 return Move.Main.size() + Move.Deps.size() > 1; 1078 } 1079 1080 template <typename T> 1081 auto AlignVectors::cloneBefore(Instruction *To, T &&Insts) const -> InstMap { 1082 InstMap Map; 1083 1084 for (Instruction *I : Insts) { 1085 assert(HVC.isSafeToClone(*I)); 1086 Instruction *C = I->clone(); 1087 C->setName(Twine("c.") + I->getName() + "."); 1088 C->insertBefore(To); 1089 1090 for (auto [Old, New] : Map) 1091 C->replaceUsesOfWith(Old, New); 1092 Map.insert(std::make_pair(I, C)); 1093 } 1094 return Map; 1095 } 1096 1097 auto AlignVectors::realignLoadGroup(IRBuilderBase &Builder, 1098 const ByteSpan &VSpan, int ScLen, 1099 Value *AlignVal, Value *AlignAddr) const 1100 -> void { 1101 LLVM_DEBUG(dbgs() << __func__ << "\n"); 1102 1103 Type *SecTy = HVC.getByteTy(ScLen); 1104 int NumSectors = (VSpan.extent() + ScLen - 1) / ScLen; 1105 bool DoAlign = !HVC.isZero(AlignVal); 1106 BasicBlock::iterator BasePos = Builder.GetInsertPoint(); 1107 BasicBlock *BaseBlock = Builder.GetInsertBlock(); 1108 1109 ByteSpan ASpan; 1110 auto *True = HVC.getFullValue(HVC.getBoolTy(ScLen)); 1111 auto *Undef = UndefValue::get(SecTy); 1112 1113 // Created load does not have to be "Instruction" (e.g. "undef"). 1114 SmallVector<Value *> Loads(NumSectors + DoAlign, nullptr); 1115 1116 // We could create all of the aligned loads, and generate the valigns 1117 // at the location of the first load, but for large load groups, this 1118 // could create highly suboptimal code (there have been groups of 140+ 1119 // loads in real code). 1120 // Instead, place the loads/valigns as close to the users as possible. 1121 // In any case we need to have a mapping from the blocks of VSpan (the 1122 // span covered by the pre-existing loads) to ASpan (the span covered 1123 // by the aligned loads). There is a small problem, though: ASpan needs 1124 // to have pointers to the loads/valigns, but we don't have these loads 1125 // because we don't know where to put them yet. We find out by creating 1126 // a section of ASpan that corresponds to values (blocks) from VSpan, 1127 // and checking where the new load should be placed. We need to attach 1128 // this location information to each block in ASpan somehow, so we put 1129 // distincts values for Seg.Val in each ASpan.Blocks[i], and use a map 1130 // to store the location for each Seg.Val. 1131 // The distinct values happen to be Blocks[i].Seg.Val = &Blocks[i], 1132 // which helps with printing ByteSpans without crashing when printing 1133 // Segments with these temporary identifiers in place of Val. 1134 1135 // Populate the blocks first, to avoid reallocations of the vector 1136 // interfering with generating the placeholder addresses. 1137 for (int Index = 0; Index != NumSectors; ++Index) 1138 ASpan.Blocks.emplace_back(nullptr, ScLen, Index * ScLen); 1139 for (int Index = 0; Index != NumSectors; ++Index) { 1140 ASpan.Blocks[Index].Seg.Val = 1141 reinterpret_cast<Value *>(&ASpan.Blocks[Index]); 1142 } 1143 1144 // Multiple values from VSpan can map to the same value in ASpan. Since we 1145 // try to create loads lazily, we need to find the earliest use for each 1146 // value from ASpan. 1147 DenseMap<void *, Instruction *> EarliestUser; 1148 auto isEarlier = [](Instruction *A, Instruction *B) { 1149 if (B == nullptr) 1150 return true; 1151 if (A == nullptr) 1152 return false; 1153 assert(A->getParent() == B->getParent()); 1154 return A->comesBefore(B); 1155 }; 1156 auto earliestUser = [&](const auto &Uses) { 1157 Instruction *User = nullptr; 1158 for (const Use &U : Uses) { 1159 auto *I = dyn_cast<Instruction>(U.getUser()); 1160 assert(I != nullptr && "Load used in a non-instruction?"); 1161 // Make sure we only consider users in this block, but we need 1162 // to remember if there were users outside the block too. This is 1163 // because if no users are found, aligned loads will not be created. 1164 if (I->getParent() == BaseBlock) { 1165 if (!isa<PHINode>(I)) 1166 User = std::min(User, I, isEarlier); 1167 } else { 1168 User = std::min(User, BaseBlock->getTerminator(), isEarlier); 1169 } 1170 } 1171 return User; 1172 }; 1173 1174 for (const ByteSpan::Block &B : VSpan) { 1175 ByteSpan ASection = ASpan.section(B.Pos, B.Seg.Size); 1176 for (const ByteSpan::Block &S : ASection) { 1177 EarliestUser[S.Seg.Val] = std::min( 1178 EarliestUser[S.Seg.Val], earliestUser(B.Seg.Val->uses()), isEarlier); 1179 } 1180 } 1181 1182 LLVM_DEBUG({ 1183 dbgs() << "ASpan:\n" << ASpan << '\n'; 1184 dbgs() << "Earliest users of ASpan:\n"; 1185 for (auto &[Val, User] : EarliestUser) { 1186 dbgs() << Val << "\n ->" << *User << '\n'; 1187 } 1188 }); 1189 1190 auto createLoad = [&](IRBuilderBase &Builder, const ByteSpan &VSpan, 1191 int Index, bool MakePred) { 1192 Value *Ptr = 1193 createAdjustedPointer(Builder, AlignAddr, SecTy, Index * ScLen); 1194 Value *Predicate = 1195 MakePred ? makeTestIfUnaligned(Builder, AlignVal, ScLen) : nullptr; 1196 1197 // If vector shifting is potentially needed, accumulate metadata 1198 // from source sections of twice the load width. 1199 int Start = (Index - DoAlign) * ScLen; 1200 int Width = (1 + DoAlign) * ScLen; 1201 return this->createLoad(Builder, SecTy, Ptr, Predicate, ScLen, True, Undef, 1202 VSpan.section(Start, Width).values()); 1203 }; 1204 1205 auto moveBefore = [this](Instruction *In, Instruction *To) { 1206 // Move In and its upward dependencies to before To. 1207 assert(In->getParent() == To->getParent()); 1208 DepList Deps = getUpwardDeps(In, To); 1209 In->moveBefore(To); 1210 // DepList is sorted with respect to positions in the basic block. 1211 InstMap Map = cloneBefore(In, Deps); 1212 for (auto [Old, New] : Map) 1213 In->replaceUsesOfWith(Old, New); 1214 }; 1215 1216 // Generate necessary loads at appropriate locations. 1217 LLVM_DEBUG(dbgs() << "Creating loads for ASpan sectors\n"); 1218 for (int Index = 0; Index != NumSectors + 1; ++Index) { 1219 // In ASpan, each block will be either a single aligned load, or a 1220 // valign of a pair of loads. In the latter case, an aligned load j 1221 // will belong to the current valign, and the one in the previous 1222 // block (for j > 0). 1223 // Place the load at a location which will dominate the valign, assuming 1224 // the valign will be placed right before the earliest user. 1225 Instruction *PrevAt = 1226 DoAlign && Index > 0 ? EarliestUser[&ASpan[Index - 1]] : nullptr; 1227 Instruction *ThisAt = 1228 Index < NumSectors ? EarliestUser[&ASpan[Index]] : nullptr; 1229 if (auto *Where = std::min(PrevAt, ThisAt, isEarlier)) { 1230 Builder.SetInsertPoint(Where); 1231 Loads[Index] = 1232 createLoad(Builder, VSpan, Index, DoAlign && Index == NumSectors); 1233 // We know it's safe to put the load at BasePos, but we'd prefer to put 1234 // it at "Where". To see if the load is safe to be placed at Where, put 1235 // it there first and then check if it's safe to move it to BasePos. 1236 // If not, then the load needs to be placed at BasePos. 1237 // We can't do this check proactively because we need the load to exist 1238 // in order to check legality. 1239 if (auto *Load = dyn_cast<Instruction>(Loads[Index])) { 1240 if (!HVC.isSafeToMoveBeforeInBB(*Load, BasePos)) 1241 moveBefore(Load, &*BasePos); 1242 } 1243 LLVM_DEBUG(dbgs() << "Loads[" << Index << "]:" << *Loads[Index] << '\n'); 1244 } 1245 } 1246 1247 // Generate valigns if needed, and fill in proper values in ASpan 1248 LLVM_DEBUG(dbgs() << "Creating values for ASpan sectors\n"); 1249 for (int Index = 0; Index != NumSectors; ++Index) { 1250 ASpan[Index].Seg.Val = nullptr; 1251 if (auto *Where = EarliestUser[&ASpan[Index]]) { 1252 Builder.SetInsertPoint(Where); 1253 Value *Val = Loads[Index]; 1254 assert(Val != nullptr); 1255 if (DoAlign) { 1256 Value *NextLoad = Loads[Index + 1]; 1257 assert(NextLoad != nullptr); 1258 Val = HVC.vralignb(Builder, Val, NextLoad, AlignVal); 1259 } 1260 ASpan[Index].Seg.Val = Val; 1261 LLVM_DEBUG(dbgs() << "ASpan[" << Index << "]:" << *Val << '\n'); 1262 } 1263 } 1264 1265 for (const ByteSpan::Block &B : VSpan) { 1266 ByteSpan ASection = ASpan.section(B.Pos, B.Seg.Size).shift(-B.Pos); 1267 Value *Accum = UndefValue::get(HVC.getByteTy(B.Seg.Size)); 1268 Builder.SetInsertPoint(cast<Instruction>(B.Seg.Val)); 1269 1270 // We're generating a reduction, where each instruction depends on 1271 // the previous one, so we need to order them according to the position 1272 // of their inputs in the code. 1273 std::vector<ByteSpan::Block *> ABlocks; 1274 for (ByteSpan::Block &S : ASection) { 1275 if (S.Seg.Val != nullptr) 1276 ABlocks.push_back(&S); 1277 } 1278 llvm::sort(ABlocks, 1279 [&](const ByteSpan::Block *A, const ByteSpan::Block *B) { 1280 return isEarlier(cast<Instruction>(A->Seg.Val), 1281 cast<Instruction>(B->Seg.Val)); 1282 }); 1283 for (ByteSpan::Block *S : ABlocks) { 1284 // The processing of the data loaded by the aligned loads 1285 // needs to be inserted after the data is available. 1286 Instruction *SegI = cast<Instruction>(S->Seg.Val); 1287 Builder.SetInsertPoint(&*std::next(SegI->getIterator())); 1288 Value *Pay = HVC.vbytes(Builder, getPayload(S->Seg.Val)); 1289 Accum = 1290 HVC.insertb(Builder, Accum, Pay, S->Seg.Start, S->Seg.Size, S->Pos); 1291 } 1292 // Instead of casting everything to bytes for the vselect, cast to the 1293 // original value type. This will avoid complications with casting masks. 1294 // For example, in cases when the original mask applied to i32, it could 1295 // be converted to a mask applicable to i8 via pred_typecast intrinsic, 1296 // but if the mask is not exactly of HVX length, extra handling would be 1297 // needed to make it work. 1298 Type *ValTy = getPayload(B.Seg.Val)->getType(); 1299 Value *Cast = Builder.CreateBitCast(Accum, ValTy, "cst"); 1300 Value *Sel = Builder.CreateSelect(getMask(B.Seg.Val), Cast, 1301 getPassThrough(B.Seg.Val), "sel"); 1302 B.Seg.Val->replaceAllUsesWith(Sel); 1303 } 1304 } 1305 1306 auto AlignVectors::realignStoreGroup(IRBuilderBase &Builder, 1307 const ByteSpan &VSpan, int ScLen, 1308 Value *AlignVal, Value *AlignAddr) const 1309 -> void { 1310 LLVM_DEBUG(dbgs() << __func__ << "\n"); 1311 1312 Type *SecTy = HVC.getByteTy(ScLen); 1313 int NumSectors = (VSpan.extent() + ScLen - 1) / ScLen; 1314 bool DoAlign = !HVC.isZero(AlignVal); 1315 1316 // Stores. 1317 ByteSpan ASpanV, ASpanM; 1318 1319 // Return a vector value corresponding to the input value Val: 1320 // either <1 x Val> for scalar Val, or Val itself for vector Val. 1321 auto MakeVec = [](IRBuilderBase &Builder, Value *Val) -> Value * { 1322 Type *Ty = Val->getType(); 1323 if (Ty->isVectorTy()) 1324 return Val; 1325 auto *VecTy = VectorType::get(Ty, 1, /*Scalable=*/false); 1326 return Builder.CreateBitCast(Val, VecTy, "cst"); 1327 }; 1328 1329 // Create an extra "undef" sector at the beginning and at the end. 1330 // They will be used as the left/right filler in the vlalign step. 1331 for (int Index = (DoAlign ? -1 : 0); Index != NumSectors + DoAlign; ++Index) { 1332 // For stores, the size of each section is an aligned vector length. 1333 // Adjust the store offsets relative to the section start offset. 1334 ByteSpan VSection = 1335 VSpan.section(Index * ScLen, ScLen).shift(-Index * ScLen); 1336 Value *Undef = UndefValue::get(SecTy); 1337 Value *Zero = HVC.getNullValue(SecTy); 1338 Value *AccumV = Undef; 1339 Value *AccumM = Zero; 1340 for (ByteSpan::Block &S : VSection) { 1341 Value *Pay = getPayload(S.Seg.Val); 1342 Value *Mask = HVC.rescale(Builder, MakeVec(Builder, getMask(S.Seg.Val)), 1343 Pay->getType(), HVC.getByteTy()); 1344 Value *PartM = HVC.insertb(Builder, Zero, HVC.vbytes(Builder, Mask), 1345 S.Seg.Start, S.Seg.Size, S.Pos); 1346 AccumM = Builder.CreateOr(AccumM, PartM); 1347 1348 Value *PartV = HVC.insertb(Builder, Undef, HVC.vbytes(Builder, Pay), 1349 S.Seg.Start, S.Seg.Size, S.Pos); 1350 1351 AccumV = Builder.CreateSelect( 1352 Builder.CreateICmp(CmpInst::ICMP_NE, PartM, Zero), PartV, AccumV); 1353 } 1354 ASpanV.Blocks.emplace_back(AccumV, ScLen, Index * ScLen); 1355 ASpanM.Blocks.emplace_back(AccumM, ScLen, Index * ScLen); 1356 } 1357 1358 LLVM_DEBUG({ 1359 dbgs() << "ASpanV before vlalign:\n" << ASpanV << '\n'; 1360 dbgs() << "ASpanM before vlalign:\n" << ASpanM << '\n'; 1361 }); 1362 1363 // vlalign 1364 if (DoAlign) { 1365 for (int Index = 1; Index != NumSectors + 2; ++Index) { 1366 Value *PrevV = ASpanV[Index - 1].Seg.Val, *ThisV = ASpanV[Index].Seg.Val; 1367 Value *PrevM = ASpanM[Index - 1].Seg.Val, *ThisM = ASpanM[Index].Seg.Val; 1368 assert(isSectorTy(PrevV->getType()) && isSectorTy(PrevM->getType())); 1369 ASpanV[Index - 1].Seg.Val = HVC.vlalignb(Builder, PrevV, ThisV, AlignVal); 1370 ASpanM[Index - 1].Seg.Val = HVC.vlalignb(Builder, PrevM, ThisM, AlignVal); 1371 } 1372 } 1373 1374 LLVM_DEBUG({ 1375 dbgs() << "ASpanV after vlalign:\n" << ASpanV << '\n'; 1376 dbgs() << "ASpanM after vlalign:\n" << ASpanM << '\n'; 1377 }); 1378 1379 auto createStore = [&](IRBuilderBase &Builder, const ByteSpan &ASpanV, 1380 const ByteSpan &ASpanM, int Index, bool MakePred) { 1381 Value *Val = ASpanV[Index].Seg.Val; 1382 Value *Mask = ASpanM[Index].Seg.Val; // bytes 1383 if (HVC.isUndef(Val) || HVC.isZero(Mask)) 1384 return; 1385 Value *Ptr = 1386 createAdjustedPointer(Builder, AlignAddr, SecTy, Index * ScLen); 1387 Value *Predicate = 1388 MakePred ? makeTestIfUnaligned(Builder, AlignVal, ScLen) : nullptr; 1389 1390 // If vector shifting is potentially needed, accumulate metadata 1391 // from source sections of twice the store width. 1392 int Start = (Index - DoAlign) * ScLen; 1393 int Width = (1 + DoAlign) * ScLen; 1394 this->createStore(Builder, Val, Ptr, Predicate, ScLen, 1395 HVC.vlsb(Builder, Mask), 1396 VSpan.section(Start, Width).values()); 1397 }; 1398 1399 for (int Index = 0; Index != NumSectors + DoAlign; ++Index) { 1400 createStore(Builder, ASpanV, ASpanM, Index, DoAlign && Index == NumSectors); 1401 } 1402 } 1403 1404 auto AlignVectors::realignGroup(const MoveGroup &Move) const -> bool { 1405 LLVM_DEBUG(dbgs() << "Realigning group:\n" << Move << '\n'); 1406 1407 // TODO: Needs support for masked loads/stores of "scalar" vectors. 1408 if (!Move.IsHvx) 1409 return false; 1410 1411 // Return the element with the maximum alignment from Range, 1412 // where GetValue obtains the value to compare from an element. 1413 auto getMaxOf = [](auto Range, auto GetValue) { 1414 return *std::max_element( 1415 Range.begin(), Range.end(), 1416 [&GetValue](auto &A, auto &B) { return GetValue(A) < GetValue(B); }); 1417 }; 1418 1419 const AddrList &BaseInfos = AddrGroups.at(Move.Base); 1420 1421 // Conceptually, there is a vector of N bytes covering the addresses 1422 // starting from the minimum offset (i.e. Base.Addr+Start). This vector 1423 // represents a contiguous memory region that spans all accessed memory 1424 // locations. 1425 // The correspondence between loaded or stored values will be expressed 1426 // in terms of this vector. For example, the 0th element of the vector 1427 // from the Base address info will start at byte Start from the beginning 1428 // of this conceptual vector. 1429 // 1430 // This vector will be loaded/stored starting at the nearest down-aligned 1431 // address and the amount od the down-alignment will be AlignVal: 1432 // valign(load_vector(align_down(Base+Start)), AlignVal) 1433 1434 std::set<Instruction *> TestSet(Move.Main.begin(), Move.Main.end()); 1435 AddrList MoveInfos; 1436 llvm::copy_if( 1437 BaseInfos, std::back_inserter(MoveInfos), 1438 [&TestSet](const AddrInfo &AI) { return TestSet.count(AI.Inst); }); 1439 1440 // Maximum alignment present in the whole address group. 1441 const AddrInfo &WithMaxAlign = 1442 getMaxOf(MoveInfos, [](const AddrInfo &AI) { return AI.HaveAlign; }); 1443 Align MaxGiven = WithMaxAlign.HaveAlign; 1444 1445 // Minimum alignment present in the move address group. 1446 const AddrInfo &WithMinOffset = 1447 getMaxOf(MoveInfos, [](const AddrInfo &AI) { return -AI.Offset; }); 1448 1449 const AddrInfo &WithMaxNeeded = 1450 getMaxOf(MoveInfos, [](const AddrInfo &AI) { return AI.NeedAlign; }); 1451 Align MinNeeded = WithMaxNeeded.NeedAlign; 1452 1453 // Set the builder's insertion point right before the load group, or 1454 // immediately after the store group. (Instructions in a store group are 1455 // listed in reverse order.) 1456 Instruction *InsertAt = Move.Main.front(); 1457 if (!Move.IsLoad) { 1458 // There should be a terminator (which store isn't, but check anyways). 1459 assert(InsertAt->getIterator() != InsertAt->getParent()->end()); 1460 InsertAt = &*std::next(InsertAt->getIterator()); 1461 } 1462 1463 IRBuilder Builder(InsertAt->getParent(), InsertAt->getIterator(), 1464 InstSimplifyFolder(HVC.DL)); 1465 Value *AlignAddr = nullptr; // Actual aligned address. 1466 Value *AlignVal = nullptr; // Right-shift amount (for valign). 1467 1468 if (MinNeeded <= MaxGiven) { 1469 int Start = WithMinOffset.Offset; 1470 int OffAtMax = WithMaxAlign.Offset; 1471 // Shift the offset of the maximally aligned instruction (OffAtMax) 1472 // back by just enough multiples of the required alignment to cover the 1473 // distance from Start to OffAtMax. 1474 // Calculate the address adjustment amount based on the address with the 1475 // maximum alignment. This is to allow a simple gep instruction instead 1476 // of potential bitcasts to i8*. 1477 int Adjust = -alignTo(OffAtMax - Start, MinNeeded.value()); 1478 AlignAddr = createAdjustedPointer(Builder, WithMaxAlign.Addr, 1479 WithMaxAlign.ValTy, Adjust, Move.Clones); 1480 int Diff = Start - (OffAtMax + Adjust); 1481 AlignVal = HVC.getConstInt(Diff); 1482 assert(Diff >= 0); 1483 assert(static_cast<decltype(MinNeeded.value())>(Diff) < MinNeeded.value()); 1484 } else { 1485 // WithMinOffset is the lowest address in the group, 1486 // WithMinOffset.Addr = Base+Start. 1487 // Align instructions for both HVX (V6_valign) and scalar (S2_valignrb) 1488 // mask off unnecessary bits, so it's ok to just the original pointer as 1489 // the alignment amount. 1490 // Do an explicit down-alignment of the address to avoid creating an 1491 // aligned instruction with an address that is not really aligned. 1492 AlignAddr = 1493 createAlignedPointer(Builder, WithMinOffset.Addr, WithMinOffset.ValTy, 1494 MinNeeded.value(), Move.Clones); 1495 AlignVal = 1496 Builder.CreatePtrToInt(WithMinOffset.Addr, HVC.getIntTy(), "pti"); 1497 if (auto *I = dyn_cast<Instruction>(AlignVal)) { 1498 for (auto [Old, New] : Move.Clones) 1499 I->replaceUsesOfWith(Old, New); 1500 } 1501 } 1502 1503 ByteSpan VSpan; 1504 for (const AddrInfo &AI : MoveInfos) { 1505 VSpan.Blocks.emplace_back(AI.Inst, HVC.getSizeOf(AI.ValTy), 1506 AI.Offset - WithMinOffset.Offset); 1507 } 1508 1509 // The aligned loads/stores will use blocks that are either scalars, 1510 // or HVX vectors. Let "sector" be the unified term for such a block. 1511 // blend(scalar, vector) -> sector... 1512 int ScLen = Move.IsHvx ? HVC.HST.getVectorLength() 1513 : std::max<int>(MinNeeded.value(), 4); 1514 assert(!Move.IsHvx || ScLen == 64 || ScLen == 128); 1515 assert(Move.IsHvx || ScLen == 4 || ScLen == 8); 1516 1517 LLVM_DEBUG({ 1518 dbgs() << "ScLen: " << ScLen << "\n"; 1519 dbgs() << "AlignVal:" << *AlignVal << "\n"; 1520 dbgs() << "AlignAddr:" << *AlignAddr << "\n"; 1521 dbgs() << "VSpan:\n" << VSpan << '\n'; 1522 }); 1523 1524 if (Move.IsLoad) 1525 realignLoadGroup(Builder, VSpan, ScLen, AlignVal, AlignAddr); 1526 else 1527 realignStoreGroup(Builder, VSpan, ScLen, AlignVal, AlignAddr); 1528 1529 for (auto *Inst : Move.Main) 1530 Inst->eraseFromParent(); 1531 1532 return true; 1533 } 1534 1535 auto AlignVectors::makeTestIfUnaligned(IRBuilderBase &Builder, Value *AlignVal, 1536 int Alignment) const -> Value * { 1537 auto *AlignTy = AlignVal->getType(); 1538 Value *And = Builder.CreateAnd( 1539 AlignVal, ConstantInt::get(AlignTy, Alignment - 1), "and"); 1540 Value *Zero = ConstantInt::get(AlignTy, 0); 1541 return Builder.CreateICmpNE(And, Zero, "isz"); 1542 } 1543 1544 auto AlignVectors::isSectorTy(Type *Ty) const -> bool { 1545 if (!HVC.isByteVecTy(Ty)) 1546 return false; 1547 int Size = HVC.getSizeOf(Ty); 1548 if (HVC.HST.isTypeForHVX(Ty)) 1549 return Size == static_cast<int>(HVC.HST.getVectorLength()); 1550 return Size == 4 || Size == 8; 1551 } 1552 1553 auto AlignVectors::run() -> bool { 1554 LLVM_DEBUG(dbgs() << "Running HVC::AlignVectors on " << HVC.F.getName() 1555 << '\n'); 1556 if (!createAddressGroups()) 1557 return false; 1558 1559 LLVM_DEBUG({ 1560 dbgs() << "Address groups(" << AddrGroups.size() << "):\n"; 1561 for (auto &[In, AL] : AddrGroups) { 1562 for (const AddrInfo &AI : AL) 1563 dbgs() << "---\n" << AI << '\n'; 1564 } 1565 }); 1566 1567 bool Changed = false; 1568 MoveList LoadGroups, StoreGroups; 1569 1570 for (auto &G : AddrGroups) { 1571 llvm::append_range(LoadGroups, createLoadGroups(G.second)); 1572 llvm::append_range(StoreGroups, createStoreGroups(G.second)); 1573 } 1574 1575 LLVM_DEBUG({ 1576 dbgs() << "\nLoad groups(" << LoadGroups.size() << "):\n"; 1577 for (const MoveGroup &G : LoadGroups) 1578 dbgs() << G << "\n"; 1579 dbgs() << "Store groups(" << StoreGroups.size() << "):\n"; 1580 for (const MoveGroup &G : StoreGroups) 1581 dbgs() << G << "\n"; 1582 }); 1583 1584 // Cumulative limit on the number of groups. 1585 unsigned CountLimit = VAGroupCountLimit; 1586 if (CountLimit == 0) 1587 return false; 1588 1589 if (LoadGroups.size() > CountLimit) { 1590 LoadGroups.resize(CountLimit); 1591 StoreGroups.clear(); 1592 } else { 1593 unsigned StoreLimit = CountLimit - LoadGroups.size(); 1594 if (StoreGroups.size() > StoreLimit) 1595 StoreGroups.resize(StoreLimit); 1596 } 1597 1598 for (auto &M : LoadGroups) 1599 Changed |= moveTogether(M); 1600 for (auto &M : StoreGroups) 1601 Changed |= moveTogether(M); 1602 1603 LLVM_DEBUG(dbgs() << "After moveTogether:\n" << HVC.F); 1604 1605 for (auto &M : LoadGroups) 1606 Changed |= realignGroup(M); 1607 for (auto &M : StoreGroups) 1608 Changed |= realignGroup(M); 1609 1610 return Changed; 1611 } 1612 1613 // --- End AlignVectors 1614 1615 // --- Begin HvxIdioms 1616 1617 auto HvxIdioms::getNumSignificantBits(Value *V, Instruction *In) const 1618 -> std::pair<unsigned, Signedness> { 1619 unsigned Bits = HVC.getNumSignificantBits(V, In); 1620 // The significant bits are calculated including the sign bit. This may 1621 // add an extra bit for zero-extended values, e.g. (zext i32 to i64) may 1622 // result in 33 significant bits. To avoid extra words, skip the extra 1623 // sign bit, but keep information that the value is to be treated as 1624 // unsigned. 1625 KnownBits Known = HVC.getKnownBits(V, In); 1626 Signedness Sign = Signed; 1627 unsigned NumToTest = 0; // Number of bits used in test for unsignedness. 1628 if (isPowerOf2_32(Bits)) 1629 NumToTest = Bits; 1630 else if (Bits > 1 && isPowerOf2_32(Bits - 1)) 1631 NumToTest = Bits - 1; 1632 1633 if (NumToTest != 0 && Known.Zero.ashr(NumToTest).isAllOnes()) { 1634 Sign = Unsigned; 1635 Bits = NumToTest; 1636 } 1637 1638 // If the top bit of the nearest power-of-2 is zero, this value is 1639 // positive. It could be treated as either signed or unsigned. 1640 if (unsigned Pow2 = PowerOf2Ceil(Bits); Pow2 != Bits) { 1641 if (Known.Zero.ashr(Pow2 - 1).isAllOnes()) 1642 Sign = Positive; 1643 } 1644 return {Bits, Sign}; 1645 } 1646 1647 auto HvxIdioms::canonSgn(SValue X, SValue Y) const 1648 -> std::pair<SValue, SValue> { 1649 // Canonicalize the signedness of X and Y, so that the result is one of: 1650 // S, S 1651 // U/P, S 1652 // U/P, U/P 1653 if (X.Sgn == Signed && Y.Sgn != Signed) 1654 std::swap(X, Y); 1655 return {X, Y}; 1656 } 1657 1658 // Match 1659 // (X * Y) [>> N], or 1660 // ((X * Y) + (1 << M)) >> N 1661 auto HvxIdioms::matchFxpMul(Instruction &In) const -> std::optional<FxpOp> { 1662 using namespace PatternMatch; 1663 auto *Ty = In.getType(); 1664 1665 if (!Ty->isVectorTy() || !Ty->getScalarType()->isIntegerTy()) 1666 return std::nullopt; 1667 1668 unsigned Width = cast<IntegerType>(Ty->getScalarType())->getBitWidth(); 1669 1670 FxpOp Op; 1671 Value *Exp = &In; 1672 1673 // Fixed-point multiplication is always shifted right (except when the 1674 // fraction is 0 bits). 1675 auto m_Shr = [](auto &&V, auto &&S) { 1676 return m_CombineOr(m_LShr(V, S), m_AShr(V, S)); 1677 }; 1678 1679 const APInt *Qn = nullptr; 1680 if (Value * T; match(Exp, m_Shr(m_Value(T), m_APInt(Qn)))) { 1681 Op.Frac = Qn->getZExtValue(); 1682 Exp = T; 1683 } else { 1684 Op.Frac = 0; 1685 } 1686 1687 if (Op.Frac > Width) 1688 return std::nullopt; 1689 1690 // Check if there is rounding added. 1691 const APInt *C = nullptr; 1692 if (Value * T; Op.Frac > 0 && match(Exp, m_Add(m_Value(T), m_APInt(C)))) { 1693 uint64_t CV = C->getZExtValue(); 1694 if (CV != 0 && !isPowerOf2_64(CV)) 1695 return std::nullopt; 1696 if (CV != 0) 1697 Op.RoundAt = Log2_64(CV); 1698 Exp = T; 1699 } 1700 1701 // Check if the rest is a multiplication. 1702 if (match(Exp, m_Mul(m_Value(Op.X.Val), m_Value(Op.Y.Val)))) { 1703 Op.Opcode = Instruction::Mul; 1704 // FIXME: The information below is recomputed. 1705 Op.X.Sgn = getNumSignificantBits(Op.X.Val, &In).second; 1706 Op.Y.Sgn = getNumSignificantBits(Op.Y.Val, &In).second; 1707 Op.ResTy = cast<VectorType>(Ty); 1708 return Op; 1709 } 1710 1711 return std::nullopt; 1712 } 1713 1714 auto HvxIdioms::processFxpMul(Instruction &In, const FxpOp &Op) const 1715 -> Value * { 1716 assert(Op.X.Val->getType() == Op.Y.Val->getType()); 1717 1718 auto *VecTy = dyn_cast<VectorType>(Op.X.Val->getType()); 1719 if (VecTy == nullptr) 1720 return nullptr; 1721 auto *ElemTy = cast<IntegerType>(VecTy->getElementType()); 1722 unsigned ElemWidth = ElemTy->getBitWidth(); 1723 1724 // TODO: This can be relaxed after legalization is done pre-isel. 1725 if ((HVC.length(VecTy) * ElemWidth) % (8 * HVC.HST.getVectorLength()) != 0) 1726 return nullptr; 1727 1728 // There are no special intrinsics that should be used for multiplying 1729 // signed 8-bit values, so just skip them. Normal codegen should handle 1730 // this just fine. 1731 if (ElemWidth <= 8) 1732 return nullptr; 1733 // Similarly, if this is just a multiplication that can be handled without 1734 // intervention, then leave it alone. 1735 if (ElemWidth <= 32 && Op.Frac == 0) 1736 return nullptr; 1737 1738 auto [BitsX, SignX] = getNumSignificantBits(Op.X.Val, &In); 1739 auto [BitsY, SignY] = getNumSignificantBits(Op.Y.Val, &In); 1740 1741 // TODO: Add multiplication of vectors by scalar registers (up to 4 bytes). 1742 1743 Value *X = Op.X.Val, *Y = Op.Y.Val; 1744 IRBuilder Builder(In.getParent(), In.getIterator(), 1745 InstSimplifyFolder(HVC.DL)); 1746 1747 auto roundUpWidth = [](unsigned Width) -> unsigned { 1748 if (Width <= 32 && !isPowerOf2_32(Width)) { 1749 // If the element width is not a power of 2, round it up 1750 // to the next one. Do this for widths not exceeding 32. 1751 return PowerOf2Ceil(Width); 1752 } 1753 if (Width > 32 && Width % 32 != 0) { 1754 // For wider elements, round it up to the multiple of 32. 1755 return alignTo(Width, 32u); 1756 } 1757 return Width; 1758 }; 1759 1760 BitsX = roundUpWidth(BitsX); 1761 BitsY = roundUpWidth(BitsY); 1762 1763 // For elementwise multiplication vectors must have the same lengths, so 1764 // resize the elements of both inputs to the same width, the max of the 1765 // calculated significant bits. 1766 unsigned Width = std::max(BitsX, BitsY); 1767 1768 auto *ResizeTy = VectorType::get(HVC.getIntTy(Width), VecTy); 1769 if (Width < ElemWidth) { 1770 X = Builder.CreateTrunc(X, ResizeTy, "trn"); 1771 Y = Builder.CreateTrunc(Y, ResizeTy, "trn"); 1772 } else if (Width > ElemWidth) { 1773 X = SignX == Signed ? Builder.CreateSExt(X, ResizeTy, "sxt") 1774 : Builder.CreateZExt(X, ResizeTy, "zxt"); 1775 Y = SignY == Signed ? Builder.CreateSExt(Y, ResizeTy, "sxt") 1776 : Builder.CreateZExt(Y, ResizeTy, "zxt"); 1777 }; 1778 1779 assert(X->getType() == Y->getType() && X->getType() == ResizeTy); 1780 1781 unsigned VecLen = HVC.length(ResizeTy); 1782 unsigned ChopLen = (8 * HVC.HST.getVectorLength()) / std::min(Width, 32u); 1783 1784 SmallVector<Value *> Results; 1785 FxpOp ChopOp = Op; 1786 ChopOp.ResTy = VectorType::get(Op.ResTy->getElementType(), ChopLen, false); 1787 1788 for (unsigned V = 0; V != VecLen / ChopLen; ++V) { 1789 ChopOp.X.Val = HVC.subvector(Builder, X, V * ChopLen, ChopLen); 1790 ChopOp.Y.Val = HVC.subvector(Builder, Y, V * ChopLen, ChopLen); 1791 Results.push_back(processFxpMulChopped(Builder, In, ChopOp)); 1792 if (Results.back() == nullptr) 1793 break; 1794 } 1795 1796 if (Results.empty() || Results.back() == nullptr) 1797 return nullptr; 1798 1799 Value *Cat = HVC.concat(Builder, Results); 1800 Value *Ext = SignX == Signed || SignY == Signed 1801 ? Builder.CreateSExt(Cat, VecTy, "sxt") 1802 : Builder.CreateZExt(Cat, VecTy, "zxt"); 1803 return Ext; 1804 } 1805 1806 auto HvxIdioms::processFxpMulChopped(IRBuilderBase &Builder, Instruction &In, 1807 const FxpOp &Op) const -> Value * { 1808 assert(Op.X.Val->getType() == Op.Y.Val->getType()); 1809 auto *InpTy = cast<VectorType>(Op.X.Val->getType()); 1810 unsigned Width = InpTy->getScalarSizeInBits(); 1811 bool Rounding = Op.RoundAt.has_value(); 1812 1813 if (!Op.RoundAt || *Op.RoundAt == Op.Frac - 1) { 1814 // The fixed-point intrinsics do signed multiplication. 1815 if (Width == Op.Frac + 1 && Op.X.Sgn != Unsigned && Op.Y.Sgn != Unsigned) { 1816 Value *QMul = nullptr; 1817 if (Width == 16) { 1818 QMul = createMulQ15(Builder, Op.X, Op.Y, Rounding); 1819 } else if (Width == 32) { 1820 QMul = createMulQ31(Builder, Op.X, Op.Y, Rounding); 1821 } 1822 if (QMul != nullptr) 1823 return QMul; 1824 } 1825 } 1826 1827 assert(Width >= 32 || isPowerOf2_32(Width)); // Width <= 32 => Width is 2^n 1828 assert(Width < 32 || Width % 32 == 0); // Width > 32 => Width is 32*k 1829 1830 // If Width < 32, then it should really be 16. 1831 if (Width < 32) { 1832 if (Width < 16) 1833 return nullptr; 1834 // Getting here with Op.Frac == 0 isn't wrong, but suboptimal: here we 1835 // generate a full precision products, which is unnecessary if there is 1836 // no shift. 1837 assert(Width == 16); 1838 assert(Op.Frac != 0 && "Unshifted mul should have been skipped"); 1839 if (Op.Frac == 16) { 1840 // Multiply high 1841 if (Value *MulH = createMulH16(Builder, Op.X, Op.Y)) 1842 return MulH; 1843 } 1844 // Do full-precision multiply and shift. 1845 Value *Prod32 = createMul16(Builder, Op.X, Op.Y); 1846 if (Rounding) { 1847 Value *RoundVal = HVC.getConstSplat(Prod32->getType(), 1 << *Op.RoundAt); 1848 Prod32 = Builder.CreateAdd(Prod32, RoundVal, "add"); 1849 } 1850 1851 Value *ShiftAmt = HVC.getConstSplat(Prod32->getType(), Op.Frac); 1852 Value *Shifted = Op.X.Sgn == Signed || Op.Y.Sgn == Signed 1853 ? Builder.CreateAShr(Prod32, ShiftAmt, "asr") 1854 : Builder.CreateLShr(Prod32, ShiftAmt, "lsr"); 1855 return Builder.CreateTrunc(Shifted, InpTy, "trn"); 1856 } 1857 1858 // Width >= 32 1859 1860 // Break up the arguments Op.X and Op.Y into vectors of smaller widths 1861 // in preparation of doing the multiplication by 32-bit parts. 1862 auto WordX = HVC.splitVectorElements(Builder, Op.X.Val, /*ToWidth=*/32); 1863 auto WordY = HVC.splitVectorElements(Builder, Op.Y.Val, /*ToWidth=*/32); 1864 auto WordP = createMulLong(Builder, WordX, Op.X.Sgn, WordY, Op.Y.Sgn); 1865 1866 auto *HvxWordTy = cast<VectorType>(WordP.front()->getType()); 1867 1868 // Add the optional rounding to the proper word. 1869 if (Op.RoundAt.has_value()) { 1870 Value *Zero = HVC.getNullValue(WordX[0]->getType()); 1871 SmallVector<Value *> RoundV(WordP.size(), Zero); 1872 RoundV[*Op.RoundAt / 32] = 1873 HVC.getConstSplat(HvxWordTy, 1 << (*Op.RoundAt % 32)); 1874 WordP = createAddLong(Builder, WordP, RoundV); 1875 } 1876 1877 // createRightShiftLong? 1878 1879 // Shift all products right by Op.Frac. 1880 unsigned SkipWords = Op.Frac / 32; 1881 Constant *ShiftAmt = HVC.getConstSplat(HvxWordTy, Op.Frac % 32); 1882 1883 for (int Dst = 0, End = WordP.size() - SkipWords; Dst != End; ++Dst) { 1884 int Src = Dst + SkipWords; 1885 Value *Lo = WordP[Src]; 1886 if (Src + 1 < End) { 1887 Value *Hi = WordP[Src + 1]; 1888 WordP[Dst] = Builder.CreateIntrinsic(HvxWordTy, Intrinsic::fshr, 1889 {Hi, Lo, ShiftAmt}, 1890 /*FMFSource*/ nullptr, "int"); 1891 } else { 1892 // The shift of the most significant word. 1893 WordP[Dst] = Builder.CreateAShr(Lo, ShiftAmt, "asr"); 1894 } 1895 } 1896 if (SkipWords != 0) 1897 WordP.resize(WordP.size() - SkipWords); 1898 1899 return HVC.joinVectorElements(Builder, WordP, Op.ResTy); 1900 } 1901 1902 auto HvxIdioms::createMulQ15(IRBuilderBase &Builder, SValue X, SValue Y, 1903 bool Rounding) const -> Value * { 1904 assert(X.Val->getType() == Y.Val->getType()); 1905 assert(X.Val->getType()->getScalarType() == HVC.getIntTy(16)); 1906 assert(HVC.HST.isHVXVectorType(EVT::getEVT(X.Val->getType(), false))); 1907 1908 // There is no non-rounding intrinsic for i16. 1909 if (!Rounding || X.Sgn == Unsigned || Y.Sgn == Unsigned) 1910 return nullptr; 1911 1912 auto V6_vmpyhvsrs = HVC.HST.getIntrinsicId(Hexagon::V6_vmpyhvsrs); 1913 return HVC.createHvxIntrinsic(Builder, V6_vmpyhvsrs, X.Val->getType(), 1914 {X.Val, Y.Val}); 1915 } 1916 1917 auto HvxIdioms::createMulQ31(IRBuilderBase &Builder, SValue X, SValue Y, 1918 bool Rounding) const -> Value * { 1919 Type *InpTy = X.Val->getType(); 1920 assert(InpTy == Y.Val->getType()); 1921 assert(InpTy->getScalarType() == HVC.getIntTy(32)); 1922 assert(HVC.HST.isHVXVectorType(EVT::getEVT(InpTy, false))); 1923 1924 if (X.Sgn == Unsigned || Y.Sgn == Unsigned) 1925 return nullptr; 1926 1927 auto V6_vmpyewuh = HVC.HST.getIntrinsicId(Hexagon::V6_vmpyewuh); 1928 auto V6_vmpyo_acc = Rounding 1929 ? HVC.HST.getIntrinsicId(Hexagon::V6_vmpyowh_rnd_sacc) 1930 : HVC.HST.getIntrinsicId(Hexagon::V6_vmpyowh_sacc); 1931 Value *V1 = 1932 HVC.createHvxIntrinsic(Builder, V6_vmpyewuh, InpTy, {X.Val, Y.Val}); 1933 return HVC.createHvxIntrinsic(Builder, V6_vmpyo_acc, InpTy, 1934 {V1, X.Val, Y.Val}); 1935 } 1936 1937 auto HvxIdioms::createAddCarry(IRBuilderBase &Builder, Value *X, Value *Y, 1938 Value *CarryIn) const 1939 -> std::pair<Value *, Value *> { 1940 assert(X->getType() == Y->getType()); 1941 auto VecTy = cast<VectorType>(X->getType()); 1942 if (VecTy == HvxI32Ty && HVC.HST.useHVXV62Ops()) { 1943 SmallVector<Value *> Args = {X, Y}; 1944 Intrinsic::ID AddCarry; 1945 if (CarryIn == nullptr && HVC.HST.useHVXV66Ops()) { 1946 AddCarry = HVC.HST.getIntrinsicId(Hexagon::V6_vaddcarryo); 1947 } else { 1948 AddCarry = HVC.HST.getIntrinsicId(Hexagon::V6_vaddcarry); 1949 if (CarryIn == nullptr) 1950 CarryIn = HVC.getNullValue(HVC.getBoolTy(HVC.length(VecTy))); 1951 Args.push_back(CarryIn); 1952 } 1953 Value *Ret = HVC.createHvxIntrinsic(Builder, AddCarry, 1954 /*RetTy=*/nullptr, Args); 1955 Value *Result = Builder.CreateExtractValue(Ret, {0}, "ext"); 1956 Value *CarryOut = Builder.CreateExtractValue(Ret, {1}, "ext"); 1957 return {Result, CarryOut}; 1958 } 1959 1960 // In other cases, do a regular add, and unsigned compare-less-than. 1961 // The carry-out can originate in two places: adding the carry-in or adding 1962 // the two input values. 1963 Value *Result1 = X; // Result1 = X + CarryIn 1964 if (CarryIn != nullptr) { 1965 unsigned Width = VecTy->getScalarSizeInBits(); 1966 uint32_t Mask = 1; 1967 if (Width < 32) { 1968 for (unsigned i = 0, e = 32 / Width; i != e; ++i) 1969 Mask = (Mask << Width) | 1; 1970 } 1971 auto V6_vandqrt = HVC.HST.getIntrinsicId(Hexagon::V6_vandqrt); 1972 Value *ValueIn = 1973 HVC.createHvxIntrinsic(Builder, V6_vandqrt, /*RetTy=*/nullptr, 1974 {CarryIn, HVC.getConstInt(Mask)}); 1975 Result1 = Builder.CreateAdd(X, ValueIn, "add"); 1976 } 1977 1978 Value *CarryOut1 = Builder.CreateCmp(CmpInst::ICMP_ULT, Result1, X, "cmp"); 1979 Value *Result2 = Builder.CreateAdd(Result1, Y, "add"); 1980 Value *CarryOut2 = Builder.CreateCmp(CmpInst::ICMP_ULT, Result2, Y, "cmp"); 1981 return {Result2, Builder.CreateOr(CarryOut1, CarryOut2, "orb")}; 1982 } 1983 1984 auto HvxIdioms::createMul16(IRBuilderBase &Builder, SValue X, SValue Y) const 1985 -> Value * { 1986 Intrinsic::ID V6_vmpyh = 0; 1987 std::tie(X, Y) = canonSgn(X, Y); 1988 1989 if (X.Sgn == Signed) { 1990 V6_vmpyh = HVC.HST.getIntrinsicId(Hexagon::V6_vmpyhv); 1991 } else if (Y.Sgn == Signed) { 1992 // In vmpyhus the second operand is unsigned 1993 V6_vmpyh = HVC.HST.getIntrinsicId(Hexagon::V6_vmpyhus); 1994 } else { 1995 V6_vmpyh = HVC.HST.getIntrinsicId(Hexagon::V6_vmpyuhv); 1996 } 1997 1998 // i16*i16 -> i32 / interleaved 1999 Value *P = 2000 HVC.createHvxIntrinsic(Builder, V6_vmpyh, HvxP32Ty, {Y.Val, X.Val}); 2001 // Deinterleave 2002 return HVC.vshuff(Builder, HVC.sublo(Builder, P), HVC.subhi(Builder, P)); 2003 } 2004 2005 auto HvxIdioms::createMulH16(IRBuilderBase &Builder, SValue X, SValue Y) const 2006 -> Value * { 2007 Type *HvxI16Ty = HVC.getHvxTy(HVC.getIntTy(16), /*Pair=*/false); 2008 2009 if (HVC.HST.useHVXV69Ops()) { 2010 if (X.Sgn != Signed && Y.Sgn != Signed) { 2011 auto V6_vmpyuhvs = HVC.HST.getIntrinsicId(Hexagon::V6_vmpyuhvs); 2012 return HVC.createHvxIntrinsic(Builder, V6_vmpyuhvs, HvxI16Ty, 2013 {X.Val, Y.Val}); 2014 } 2015 } 2016 2017 Type *HvxP16Ty = HVC.getHvxTy(HVC.getIntTy(16), /*Pair=*/true); 2018 Value *Pair16 = 2019 Builder.CreateBitCast(createMul16(Builder, X, Y), HvxP16Ty, "cst"); 2020 unsigned Len = HVC.length(HvxP16Ty) / 2; 2021 2022 SmallVector<int, 128> PickOdd(Len); 2023 for (int i = 0; i != static_cast<int>(Len); ++i) 2024 PickOdd[i] = 2 * i + 1; 2025 2026 return Builder.CreateShuffleVector( 2027 HVC.sublo(Builder, Pair16), HVC.subhi(Builder, Pair16), PickOdd, "shf"); 2028 } 2029 2030 auto HvxIdioms::createMul32(IRBuilderBase &Builder, SValue X, SValue Y) const 2031 -> std::pair<Value *, Value *> { 2032 assert(X.Val->getType() == Y.Val->getType()); 2033 assert(X.Val->getType() == HvxI32Ty); 2034 2035 Intrinsic::ID V6_vmpy_parts; 2036 std::tie(X, Y) = canonSgn(X, Y); 2037 2038 if (X.Sgn == Signed) { 2039 V6_vmpy_parts = Intrinsic::hexagon_V6_vmpyss_parts; 2040 } else if (Y.Sgn == Signed) { 2041 V6_vmpy_parts = Intrinsic::hexagon_V6_vmpyus_parts; 2042 } else { 2043 V6_vmpy_parts = Intrinsic::hexagon_V6_vmpyuu_parts; 2044 } 2045 2046 Value *Parts = HVC.createHvxIntrinsic(Builder, V6_vmpy_parts, nullptr, 2047 {X.Val, Y.Val}, {HvxI32Ty}); 2048 Value *Hi = Builder.CreateExtractValue(Parts, {0}, "ext"); 2049 Value *Lo = Builder.CreateExtractValue(Parts, {1}, "ext"); 2050 return {Lo, Hi}; 2051 } 2052 2053 auto HvxIdioms::createAddLong(IRBuilderBase &Builder, ArrayRef<Value *> WordX, 2054 ArrayRef<Value *> WordY) const 2055 -> SmallVector<Value *> { 2056 assert(WordX.size() == WordY.size()); 2057 unsigned Idx = 0, Length = WordX.size(); 2058 SmallVector<Value *> Sum(Length); 2059 2060 while (Idx != Length) { 2061 if (HVC.isZero(WordX[Idx])) 2062 Sum[Idx] = WordY[Idx]; 2063 else if (HVC.isZero(WordY[Idx])) 2064 Sum[Idx] = WordX[Idx]; 2065 else 2066 break; 2067 ++Idx; 2068 } 2069 2070 Value *Carry = nullptr; 2071 for (; Idx != Length; ++Idx) { 2072 std::tie(Sum[Idx], Carry) = 2073 createAddCarry(Builder, WordX[Idx], WordY[Idx], Carry); 2074 } 2075 2076 // This drops the final carry beyond the highest word. 2077 return Sum; 2078 } 2079 2080 auto HvxIdioms::createMulLong(IRBuilderBase &Builder, ArrayRef<Value *> WordX, 2081 Signedness SgnX, ArrayRef<Value *> WordY, 2082 Signedness SgnY) const -> SmallVector<Value *> { 2083 SmallVector<SmallVector<Value *>> Products(WordX.size() + WordY.size()); 2084 2085 // WordX[i] * WordY[j] produces words i+j and i+j+1 of the results, 2086 // that is halves 2(i+j), 2(i+j)+1, 2(i+j)+2, 2(i+j)+3. 2087 for (int i = 0, e = WordX.size(); i != e; ++i) { 2088 for (int j = 0, f = WordY.size(); j != f; ++j) { 2089 // Check the 4 halves that this multiplication can generate. 2090 Signedness SX = (i + 1 == e) ? SgnX : Unsigned; 2091 Signedness SY = (j + 1 == f) ? SgnY : Unsigned; 2092 auto [Lo, Hi] = createMul32(Builder, {WordX[i], SX}, {WordY[j], SY}); 2093 Products[i + j + 0].push_back(Lo); 2094 Products[i + j + 1].push_back(Hi); 2095 } 2096 } 2097 2098 Value *Zero = HVC.getNullValue(WordX[0]->getType()); 2099 2100 auto pop_back_or_zero = [Zero](auto &Vector) -> Value * { 2101 if (Vector.empty()) 2102 return Zero; 2103 auto Last = Vector.back(); 2104 Vector.pop_back(); 2105 return Last; 2106 }; 2107 2108 for (int i = 0, e = Products.size(); i != e; ++i) { 2109 while (Products[i].size() > 1) { 2110 Value *Carry = nullptr; // no carry-in 2111 for (int j = i; j != e; ++j) { 2112 auto &ProdJ = Products[j]; 2113 auto [Sum, CarryOut] = createAddCarry(Builder, pop_back_or_zero(ProdJ), 2114 pop_back_or_zero(ProdJ), Carry); 2115 ProdJ.insert(ProdJ.begin(), Sum); 2116 Carry = CarryOut; 2117 } 2118 } 2119 } 2120 2121 SmallVector<Value *> WordP; 2122 for (auto &P : Products) { 2123 assert(P.size() == 1 && "Should have been added together"); 2124 WordP.push_back(P.front()); 2125 } 2126 2127 return WordP; 2128 } 2129 2130 auto HvxIdioms::run() -> bool { 2131 bool Changed = false; 2132 2133 for (BasicBlock &B : HVC.F) { 2134 for (auto It = B.rbegin(); It != B.rend(); ++It) { 2135 if (auto Fxm = matchFxpMul(*It)) { 2136 Value *New = processFxpMul(*It, *Fxm); 2137 // Always report "changed" for now. 2138 Changed = true; 2139 if (!New) 2140 continue; 2141 bool StartOver = !isa<Instruction>(New); 2142 It->replaceAllUsesWith(New); 2143 RecursivelyDeleteTriviallyDeadInstructions(&*It, &HVC.TLI); 2144 It = StartOver ? B.rbegin() 2145 : cast<Instruction>(New)->getReverseIterator(); 2146 Changed = true; 2147 } 2148 } 2149 } 2150 2151 return Changed; 2152 } 2153 2154 // --- End HvxIdioms 2155 2156 auto HexagonVectorCombine::run() -> bool { 2157 if (DumpModule) 2158 dbgs() << "Module before HexagonVectorCombine\n" << *F.getParent(); 2159 2160 bool Changed = false; 2161 if (HST.useHVXOps()) { 2162 if (VAEnabled) 2163 Changed |= AlignVectors(*this).run(); 2164 if (VIEnabled) 2165 Changed |= HvxIdioms(*this).run(); 2166 } 2167 2168 if (DumpModule) { 2169 dbgs() << "Module " << (Changed ? "(modified)" : "(unchanged)") 2170 << " after HexagonVectorCombine\n" 2171 << *F.getParent(); 2172 } 2173 return Changed; 2174 } 2175 2176 auto HexagonVectorCombine::getIntTy(unsigned Width) const -> IntegerType * { 2177 return IntegerType::get(F.getContext(), Width); 2178 } 2179 2180 auto HexagonVectorCombine::getByteTy(int ElemCount) const -> Type * { 2181 assert(ElemCount >= 0); 2182 IntegerType *ByteTy = Type::getInt8Ty(F.getContext()); 2183 if (ElemCount == 0) 2184 return ByteTy; 2185 return VectorType::get(ByteTy, ElemCount, /*Scalable=*/false); 2186 } 2187 2188 auto HexagonVectorCombine::getBoolTy(int ElemCount) const -> Type * { 2189 assert(ElemCount >= 0); 2190 IntegerType *BoolTy = Type::getInt1Ty(F.getContext()); 2191 if (ElemCount == 0) 2192 return BoolTy; 2193 return VectorType::get(BoolTy, ElemCount, /*Scalable=*/false); 2194 } 2195 2196 auto HexagonVectorCombine::getConstInt(int Val, unsigned Width) const 2197 -> ConstantInt * { 2198 return ConstantInt::getSigned(getIntTy(Width), Val); 2199 } 2200 2201 auto HexagonVectorCombine::isZero(const Value *Val) const -> bool { 2202 if (auto *C = dyn_cast<Constant>(Val)) 2203 return C->isZeroValue(); 2204 return false; 2205 } 2206 2207 auto HexagonVectorCombine::getIntValue(const Value *Val) const 2208 -> std::optional<APInt> { 2209 if (auto *CI = dyn_cast<ConstantInt>(Val)) 2210 return CI->getValue(); 2211 return std::nullopt; 2212 } 2213 2214 auto HexagonVectorCombine::isUndef(const Value *Val) const -> bool { 2215 return isa<UndefValue>(Val); 2216 } 2217 2218 auto HexagonVectorCombine::isTrue(const Value *Val) const -> bool { 2219 return Val == ConstantInt::getTrue(Val->getType()); 2220 } 2221 2222 auto HexagonVectorCombine::isFalse(const Value *Val) const -> bool { 2223 return isZero(Val); 2224 } 2225 2226 auto HexagonVectorCombine::getHvxTy(Type *ElemTy, bool Pair) const 2227 -> VectorType * { 2228 EVT ETy = EVT::getEVT(ElemTy, false); 2229 assert(ETy.isSimple() && "Invalid HVX element type"); 2230 // Do not allow boolean types here: they don't have a fixed length. 2231 assert(HST.isHVXElementType(ETy.getSimpleVT(), /*IncludeBool=*/false) && 2232 "Invalid HVX element type"); 2233 unsigned HwLen = HST.getVectorLength(); 2234 unsigned NumElems = (8 * HwLen) / ETy.getSizeInBits(); 2235 return VectorType::get(ElemTy, Pair ? 2 * NumElems : NumElems, 2236 /*Scalable=*/false); 2237 } 2238 2239 auto HexagonVectorCombine::getSizeOf(const Value *Val, SizeKind Kind) const 2240 -> int { 2241 return getSizeOf(Val->getType(), Kind); 2242 } 2243 2244 auto HexagonVectorCombine::getSizeOf(const Type *Ty, SizeKind Kind) const 2245 -> int { 2246 auto *NcTy = const_cast<Type *>(Ty); 2247 switch (Kind) { 2248 case Store: 2249 return DL.getTypeStoreSize(NcTy).getFixedValue(); 2250 case Alloc: 2251 return DL.getTypeAllocSize(NcTy).getFixedValue(); 2252 } 2253 llvm_unreachable("Unhandled SizeKind enum"); 2254 } 2255 2256 auto HexagonVectorCombine::getTypeAlignment(Type *Ty) const -> int { 2257 // The actual type may be shorter than the HVX vector, so determine 2258 // the alignment based on subtarget info. 2259 if (HST.isTypeForHVX(Ty)) 2260 return HST.getVectorLength(); 2261 return DL.getABITypeAlign(Ty).value(); 2262 } 2263 2264 auto HexagonVectorCombine::length(Value *Val) const -> size_t { 2265 return length(Val->getType()); 2266 } 2267 2268 auto HexagonVectorCombine::length(Type *Ty) const -> size_t { 2269 auto *VecTy = dyn_cast<VectorType>(Ty); 2270 assert(VecTy && "Must be a vector type"); 2271 return VecTy->getElementCount().getFixedValue(); 2272 } 2273 2274 auto HexagonVectorCombine::getNullValue(Type *Ty) const -> Constant * { 2275 assert(Ty->isIntOrIntVectorTy()); 2276 auto Zero = ConstantInt::get(Ty->getScalarType(), 0); 2277 if (auto *VecTy = dyn_cast<VectorType>(Ty)) 2278 return ConstantVector::getSplat(VecTy->getElementCount(), Zero); 2279 return Zero; 2280 } 2281 2282 auto HexagonVectorCombine::getFullValue(Type *Ty) const -> Constant * { 2283 assert(Ty->isIntOrIntVectorTy()); 2284 auto Minus1 = ConstantInt::get(Ty->getScalarType(), -1); 2285 if (auto *VecTy = dyn_cast<VectorType>(Ty)) 2286 return ConstantVector::getSplat(VecTy->getElementCount(), Minus1); 2287 return Minus1; 2288 } 2289 2290 auto HexagonVectorCombine::getConstSplat(Type *Ty, int Val) const 2291 -> Constant * { 2292 assert(Ty->isVectorTy()); 2293 auto VecTy = cast<VectorType>(Ty); 2294 Type *ElemTy = VecTy->getElementType(); 2295 // Add support for floats if needed. 2296 auto *Splat = ConstantVector::getSplat(VecTy->getElementCount(), 2297 ConstantInt::get(ElemTy, Val)); 2298 return Splat; 2299 } 2300 2301 auto HexagonVectorCombine::simplify(Value *V) const -> Value * { 2302 if (auto *In = dyn_cast<Instruction>(V)) { 2303 SimplifyQuery Q(DL, &TLI, &DT, &AC, In); 2304 return simplifyInstruction(In, Q); 2305 } 2306 return nullptr; 2307 } 2308 2309 // Insert bytes [Start..Start+Length) of Src into Dst at byte Where. 2310 auto HexagonVectorCombine::insertb(IRBuilderBase &Builder, Value *Dst, 2311 Value *Src, int Start, int Length, 2312 int Where) const -> Value * { 2313 assert(isByteVecTy(Dst->getType()) && isByteVecTy(Src->getType())); 2314 int SrcLen = getSizeOf(Src); 2315 int DstLen = getSizeOf(Dst); 2316 assert(0 <= Start && Start + Length <= SrcLen); 2317 assert(0 <= Where && Where + Length <= DstLen); 2318 2319 int P2Len = PowerOf2Ceil(SrcLen | DstLen); 2320 auto *Undef = UndefValue::get(getByteTy()); 2321 Value *P2Src = vresize(Builder, Src, P2Len, Undef); 2322 Value *P2Dst = vresize(Builder, Dst, P2Len, Undef); 2323 2324 SmallVector<int, 256> SMask(P2Len); 2325 for (int i = 0; i != P2Len; ++i) { 2326 // If i is in [Where, Where+Length), pick Src[Start+(i-Where)]. 2327 // Otherwise, pick Dst[i]; 2328 SMask[i] = 2329 (Where <= i && i < Where + Length) ? P2Len + Start + (i - Where) : i; 2330 } 2331 2332 Value *P2Insert = Builder.CreateShuffleVector(P2Dst, P2Src, SMask, "shf"); 2333 return vresize(Builder, P2Insert, DstLen, Undef); 2334 } 2335 2336 auto HexagonVectorCombine::vlalignb(IRBuilderBase &Builder, Value *Lo, 2337 Value *Hi, Value *Amt) const -> Value * { 2338 assert(Lo->getType() == Hi->getType() && "Argument type mismatch"); 2339 if (isZero(Amt)) 2340 return Hi; 2341 int VecLen = getSizeOf(Hi); 2342 if (auto IntAmt = getIntValue(Amt)) 2343 return getElementRange(Builder, Lo, Hi, VecLen - IntAmt->getSExtValue(), 2344 VecLen); 2345 2346 if (HST.isTypeForHVX(Hi->getType())) { 2347 assert(static_cast<unsigned>(VecLen) == HST.getVectorLength() && 2348 "Expecting an exact HVX type"); 2349 return createHvxIntrinsic(Builder, HST.getIntrinsicId(Hexagon::V6_vlalignb), 2350 Hi->getType(), {Hi, Lo, Amt}); 2351 } 2352 2353 if (VecLen == 4) { 2354 Value *Pair = concat(Builder, {Lo, Hi}); 2355 Value *Shift = 2356 Builder.CreateLShr(Builder.CreateShl(Pair, Amt, "shl"), 32, "lsr"); 2357 Value *Trunc = 2358 Builder.CreateTrunc(Shift, Type::getInt32Ty(F.getContext()), "trn"); 2359 return Builder.CreateBitCast(Trunc, Hi->getType(), "cst"); 2360 } 2361 if (VecLen == 8) { 2362 Value *Sub = Builder.CreateSub(getConstInt(VecLen), Amt, "sub"); 2363 return vralignb(Builder, Lo, Hi, Sub); 2364 } 2365 llvm_unreachable("Unexpected vector length"); 2366 } 2367 2368 auto HexagonVectorCombine::vralignb(IRBuilderBase &Builder, Value *Lo, 2369 Value *Hi, Value *Amt) const -> Value * { 2370 assert(Lo->getType() == Hi->getType() && "Argument type mismatch"); 2371 if (isZero(Amt)) 2372 return Lo; 2373 int VecLen = getSizeOf(Lo); 2374 if (auto IntAmt = getIntValue(Amt)) 2375 return getElementRange(Builder, Lo, Hi, IntAmt->getSExtValue(), VecLen); 2376 2377 if (HST.isTypeForHVX(Lo->getType())) { 2378 assert(static_cast<unsigned>(VecLen) == HST.getVectorLength() && 2379 "Expecting an exact HVX type"); 2380 return createHvxIntrinsic(Builder, HST.getIntrinsicId(Hexagon::V6_valignb), 2381 Lo->getType(), {Hi, Lo, Amt}); 2382 } 2383 2384 if (VecLen == 4) { 2385 Value *Pair = concat(Builder, {Lo, Hi}); 2386 Value *Shift = Builder.CreateLShr(Pair, Amt, "lsr"); 2387 Value *Trunc = 2388 Builder.CreateTrunc(Shift, Type::getInt32Ty(F.getContext()), "trn"); 2389 return Builder.CreateBitCast(Trunc, Lo->getType(), "cst"); 2390 } 2391 if (VecLen == 8) { 2392 Type *Int64Ty = Type::getInt64Ty(F.getContext()); 2393 Value *Lo64 = Builder.CreateBitCast(Lo, Int64Ty, "cst"); 2394 Value *Hi64 = Builder.CreateBitCast(Hi, Int64Ty, "cst"); 2395 Function *FI = Intrinsic::getDeclaration(F.getParent(), 2396 Intrinsic::hexagon_S2_valignrb); 2397 Value *Call = Builder.CreateCall(FI, {Hi64, Lo64, Amt}, "cup"); 2398 return Builder.CreateBitCast(Call, Lo->getType(), "cst"); 2399 } 2400 llvm_unreachable("Unexpected vector length"); 2401 } 2402 2403 // Concatenates a sequence of vectors of the same type. 2404 auto HexagonVectorCombine::concat(IRBuilderBase &Builder, 2405 ArrayRef<Value *> Vecs) const -> Value * { 2406 assert(!Vecs.empty()); 2407 SmallVector<int, 256> SMask; 2408 std::vector<Value *> Work[2]; 2409 int ThisW = 0, OtherW = 1; 2410 2411 Work[ThisW].assign(Vecs.begin(), Vecs.end()); 2412 while (Work[ThisW].size() > 1) { 2413 auto *Ty = cast<VectorType>(Work[ThisW].front()->getType()); 2414 SMask.resize(length(Ty) * 2); 2415 std::iota(SMask.begin(), SMask.end(), 0); 2416 2417 Work[OtherW].clear(); 2418 if (Work[ThisW].size() % 2 != 0) 2419 Work[ThisW].push_back(UndefValue::get(Ty)); 2420 for (int i = 0, e = Work[ThisW].size(); i < e; i += 2) { 2421 Value *Joined = Builder.CreateShuffleVector( 2422 Work[ThisW][i], Work[ThisW][i + 1], SMask, "shf"); 2423 Work[OtherW].push_back(Joined); 2424 } 2425 std::swap(ThisW, OtherW); 2426 } 2427 2428 // Since there may have been some undefs appended to make shuffle operands 2429 // have the same type, perform the last shuffle to only pick the original 2430 // elements. 2431 SMask.resize(Vecs.size() * length(Vecs.front()->getType())); 2432 std::iota(SMask.begin(), SMask.end(), 0); 2433 Value *Total = Work[ThisW].front(); 2434 return Builder.CreateShuffleVector(Total, SMask, "shf"); 2435 } 2436 2437 auto HexagonVectorCombine::vresize(IRBuilderBase &Builder, Value *Val, 2438 int NewSize, Value *Pad) const -> Value * { 2439 assert(isa<VectorType>(Val->getType())); 2440 auto *ValTy = cast<VectorType>(Val->getType()); 2441 assert(ValTy->getElementType() == Pad->getType()); 2442 2443 int CurSize = length(ValTy); 2444 if (CurSize == NewSize) 2445 return Val; 2446 // Truncate? 2447 if (CurSize > NewSize) 2448 return getElementRange(Builder, Val, /*Ignored*/ Val, 0, NewSize); 2449 // Extend. 2450 SmallVector<int, 128> SMask(NewSize); 2451 std::iota(SMask.begin(), SMask.begin() + CurSize, 0); 2452 std::fill(SMask.begin() + CurSize, SMask.end(), CurSize); 2453 Value *PadVec = Builder.CreateVectorSplat(CurSize, Pad, "spt"); 2454 return Builder.CreateShuffleVector(Val, PadVec, SMask, "shf"); 2455 } 2456 2457 auto HexagonVectorCombine::rescale(IRBuilderBase &Builder, Value *Mask, 2458 Type *FromTy, Type *ToTy) const -> Value * { 2459 // Mask is a vector <N x i1>, where each element corresponds to an 2460 // element of FromTy. Remap it so that each element will correspond 2461 // to an element of ToTy. 2462 assert(isa<VectorType>(Mask->getType())); 2463 2464 Type *FromSTy = FromTy->getScalarType(); 2465 Type *ToSTy = ToTy->getScalarType(); 2466 if (FromSTy == ToSTy) 2467 return Mask; 2468 2469 int FromSize = getSizeOf(FromSTy); 2470 int ToSize = getSizeOf(ToSTy); 2471 assert(FromSize % ToSize == 0 || ToSize % FromSize == 0); 2472 2473 auto *MaskTy = cast<VectorType>(Mask->getType()); 2474 int FromCount = length(MaskTy); 2475 int ToCount = (FromCount * FromSize) / ToSize; 2476 assert((FromCount * FromSize) % ToSize == 0); 2477 2478 auto *FromITy = getIntTy(FromSize * 8); 2479 auto *ToITy = getIntTy(ToSize * 8); 2480 2481 // Mask <N x i1> -> sext to <N x FromTy> -> bitcast to <M x ToTy> -> 2482 // -> trunc to <M x i1>. 2483 Value *Ext = Builder.CreateSExt( 2484 Mask, VectorType::get(FromITy, FromCount, /*Scalable=*/false), "sxt"); 2485 Value *Cast = Builder.CreateBitCast( 2486 Ext, VectorType::get(ToITy, ToCount, /*Scalable=*/false), "cst"); 2487 return Builder.CreateTrunc( 2488 Cast, VectorType::get(getBoolTy(), ToCount, /*Scalable=*/false), "trn"); 2489 } 2490 2491 // Bitcast to bytes, and return least significant bits. 2492 auto HexagonVectorCombine::vlsb(IRBuilderBase &Builder, Value *Val) const 2493 -> Value * { 2494 Type *ScalarTy = Val->getType()->getScalarType(); 2495 if (ScalarTy == getBoolTy()) 2496 return Val; 2497 2498 Value *Bytes = vbytes(Builder, Val); 2499 if (auto *VecTy = dyn_cast<VectorType>(Bytes->getType())) 2500 return Builder.CreateTrunc(Bytes, getBoolTy(getSizeOf(VecTy)), "trn"); 2501 // If Bytes is a scalar (i.e. Val was a scalar byte), return i1, not 2502 // <1 x i1>. 2503 return Builder.CreateTrunc(Bytes, getBoolTy(), "trn"); 2504 } 2505 2506 // Bitcast to bytes for non-bool. For bool, convert i1 -> i8. 2507 auto HexagonVectorCombine::vbytes(IRBuilderBase &Builder, Value *Val) const 2508 -> Value * { 2509 Type *ScalarTy = Val->getType()->getScalarType(); 2510 if (ScalarTy == getByteTy()) 2511 return Val; 2512 2513 if (ScalarTy != getBoolTy()) 2514 return Builder.CreateBitCast(Val, getByteTy(getSizeOf(Val)), "cst"); 2515 // For bool, return a sext from i1 to i8. 2516 if (auto *VecTy = dyn_cast<VectorType>(Val->getType())) 2517 return Builder.CreateSExt(Val, VectorType::get(getByteTy(), VecTy), "sxt"); 2518 return Builder.CreateSExt(Val, getByteTy(), "sxt"); 2519 } 2520 2521 auto HexagonVectorCombine::subvector(IRBuilderBase &Builder, Value *Val, 2522 unsigned Start, unsigned Length) const 2523 -> Value * { 2524 assert(Start + Length <= length(Val)); 2525 return getElementRange(Builder, Val, /*Ignored*/ Val, Start, Length); 2526 } 2527 2528 auto HexagonVectorCombine::sublo(IRBuilderBase &Builder, Value *Val) const 2529 -> Value * { 2530 size_t Len = length(Val); 2531 assert(Len % 2 == 0 && "Length should be even"); 2532 return subvector(Builder, Val, 0, Len / 2); 2533 } 2534 2535 auto HexagonVectorCombine::subhi(IRBuilderBase &Builder, Value *Val) const 2536 -> Value * { 2537 size_t Len = length(Val); 2538 assert(Len % 2 == 0 && "Length should be even"); 2539 return subvector(Builder, Val, Len / 2, Len / 2); 2540 } 2541 2542 auto HexagonVectorCombine::vdeal(IRBuilderBase &Builder, Value *Val0, 2543 Value *Val1) const -> Value * { 2544 assert(Val0->getType() == Val1->getType()); 2545 int Len = length(Val0); 2546 SmallVector<int, 128> Mask(2 * Len); 2547 2548 for (int i = 0; i != Len; ++i) { 2549 Mask[i] = 2 * i; // Even 2550 Mask[i + Len] = 2 * i + 1; // Odd 2551 } 2552 return Builder.CreateShuffleVector(Val0, Val1, Mask, "shf"); 2553 } 2554 2555 auto HexagonVectorCombine::vshuff(IRBuilderBase &Builder, Value *Val0, 2556 Value *Val1) const -> Value * { // 2557 assert(Val0->getType() == Val1->getType()); 2558 int Len = length(Val0); 2559 SmallVector<int, 128> Mask(2 * Len); 2560 2561 for (int i = 0; i != Len; ++i) { 2562 Mask[2 * i + 0] = i; // Val0 2563 Mask[2 * i + 1] = i + Len; // Val1 2564 } 2565 return Builder.CreateShuffleVector(Val0, Val1, Mask, "shf"); 2566 } 2567 2568 auto HexagonVectorCombine::createHvxIntrinsic(IRBuilderBase &Builder, 2569 Intrinsic::ID IntID, Type *RetTy, 2570 ArrayRef<Value *> Args, 2571 ArrayRef<Type *> ArgTys, 2572 ArrayRef<Value *> MDSources) const 2573 -> Value * { 2574 auto getCast = [&](IRBuilderBase &Builder, Value *Val, 2575 Type *DestTy) -> Value * { 2576 Type *SrcTy = Val->getType(); 2577 if (SrcTy == DestTy) 2578 return Val; 2579 2580 // Non-HVX type. It should be a scalar, and it should already have 2581 // a valid type. 2582 assert(HST.isTypeForHVX(SrcTy, /*IncludeBool=*/true)); 2583 2584 Type *BoolTy = Type::getInt1Ty(F.getContext()); 2585 if (cast<VectorType>(SrcTy)->getElementType() != BoolTy) 2586 return Builder.CreateBitCast(Val, DestTy, "cst"); 2587 2588 // Predicate HVX vector. 2589 unsigned HwLen = HST.getVectorLength(); 2590 Intrinsic::ID TC = HwLen == 64 ? Intrinsic::hexagon_V6_pred_typecast 2591 : Intrinsic::hexagon_V6_pred_typecast_128B; 2592 Function *FI = 2593 Intrinsic::getDeclaration(F.getParent(), TC, {DestTy, Val->getType()}); 2594 return Builder.CreateCall(FI, {Val}, "cup"); 2595 }; 2596 2597 Function *IntrFn = Intrinsic::getDeclaration(F.getParent(), IntID, ArgTys); 2598 FunctionType *IntrTy = IntrFn->getFunctionType(); 2599 2600 SmallVector<Value *, 4> IntrArgs; 2601 for (int i = 0, e = Args.size(); i != e; ++i) { 2602 Value *A = Args[i]; 2603 Type *T = IntrTy->getParamType(i); 2604 if (A->getType() != T) { 2605 IntrArgs.push_back(getCast(Builder, A, T)); 2606 } else { 2607 IntrArgs.push_back(A); 2608 } 2609 } 2610 StringRef MaybeName = !IntrTy->getReturnType()->isVoidTy() ? "cup" : ""; 2611 CallInst *Call = Builder.CreateCall(IntrFn, IntrArgs, MaybeName); 2612 2613 MemoryEffects ME = Call->getAttributes().getMemoryEffects(); 2614 if (!ME.doesNotAccessMemory() && !ME.onlyAccessesInaccessibleMem()) 2615 propagateMetadata(Call, MDSources); 2616 2617 Type *CallTy = Call->getType(); 2618 if (RetTy == nullptr || CallTy == RetTy) 2619 return Call; 2620 // Scalar types should have RetTy matching the call return type. 2621 assert(HST.isTypeForHVX(CallTy, /*IncludeBool=*/true)); 2622 return getCast(Builder, Call, RetTy); 2623 } 2624 2625 auto HexagonVectorCombine::splitVectorElements(IRBuilderBase &Builder, 2626 Value *Vec, 2627 unsigned ToWidth) const 2628 -> SmallVector<Value *> { 2629 // Break a vector of wide elements into a series of vectors with narrow 2630 // elements: 2631 // (...c0:b0:a0, ...c1:b1:a1, ...c2:b2:a2, ...) 2632 // --> 2633 // (a0, a1, a2, ...) // lowest "ToWidth" bits 2634 // (b0, b1, b2, ...) // the next lowest... 2635 // (c0, c1, c2, ...) // ... 2636 // ... 2637 // 2638 // The number of elements in each resulting vector is the same as 2639 // in the original vector. 2640 2641 auto *VecTy = cast<VectorType>(Vec->getType()); 2642 assert(VecTy->getElementType()->isIntegerTy()); 2643 unsigned FromWidth = VecTy->getScalarSizeInBits(); 2644 assert(isPowerOf2_32(ToWidth) && isPowerOf2_32(FromWidth)); 2645 assert(ToWidth <= FromWidth && "Breaking up into wider elements?"); 2646 unsigned NumResults = FromWidth / ToWidth; 2647 2648 SmallVector<Value *> Results(NumResults); 2649 Results[0] = Vec; 2650 unsigned Length = length(VecTy); 2651 2652 // Do it by splitting in half, since those operations correspond to deal 2653 // instructions. 2654 auto splitInHalf = [&](unsigned Begin, unsigned End, auto splitFunc) -> void { 2655 // Take V = Results[Begin], split it in L, H. 2656 // Store Results[Begin] = L, Results[(Begin+End)/2] = H 2657 // Call itself recursively split(Begin, Half), split(Half+1, End) 2658 if (Begin + 1 == End) 2659 return; 2660 2661 Value *Val = Results[Begin]; 2662 unsigned Width = Val->getType()->getScalarSizeInBits(); 2663 2664 auto *VTy = VectorType::get(getIntTy(Width / 2), 2 * Length, false); 2665 Value *VVal = Builder.CreateBitCast(Val, VTy, "cst"); 2666 2667 Value *Res = vdeal(Builder, sublo(Builder, VVal), subhi(Builder, VVal)); 2668 2669 unsigned Half = (Begin + End) / 2; 2670 Results[Begin] = sublo(Builder, Res); 2671 Results[Half] = subhi(Builder, Res); 2672 2673 splitFunc(Begin, Half, splitFunc); 2674 splitFunc(Half, End, splitFunc); 2675 }; 2676 2677 splitInHalf(0, NumResults, splitInHalf); 2678 return Results; 2679 } 2680 2681 auto HexagonVectorCombine::joinVectorElements(IRBuilderBase &Builder, 2682 ArrayRef<Value *> Values, 2683 VectorType *ToType) const 2684 -> Value * { 2685 assert(ToType->getElementType()->isIntegerTy()); 2686 2687 // If the list of values does not have power-of-2 elements, append copies 2688 // of the sign bit to it, to make the size be 2^n. 2689 // The reason for this is that the values will be joined in pairs, because 2690 // otherwise the shuffles will result in convoluted code. With pairwise 2691 // joins, the shuffles will hopefully be folded into a perfect shuffle. 2692 // The output will need to be sign-extended to a type with element width 2693 // being a power-of-2 anyways. 2694 SmallVector<Value *> Inputs(Values.begin(), Values.end()); 2695 2696 unsigned ToWidth = ToType->getScalarSizeInBits(); 2697 unsigned Width = Inputs.front()->getType()->getScalarSizeInBits(); 2698 assert(Width <= ToWidth); 2699 assert(isPowerOf2_32(Width) && isPowerOf2_32(ToWidth)); 2700 unsigned Length = length(Inputs.front()->getType()); 2701 2702 unsigned NeedInputs = ToWidth / Width; 2703 if (Inputs.size() != NeedInputs) { 2704 // Having too many inputs is ok: drop the high bits (usual wrap-around). 2705 // If there are too few, fill them with the sign bit. 2706 Value *Last = Inputs.back(); 2707 Value *Sign = Builder.CreateAShr( 2708 Last, getConstSplat(Last->getType(), Width - 1), "asr"); 2709 Inputs.resize(NeedInputs, Sign); 2710 } 2711 2712 while (Inputs.size() > 1) { 2713 Width *= 2; 2714 auto *VTy = VectorType::get(getIntTy(Width), Length, false); 2715 for (int i = 0, e = Inputs.size(); i < e; i += 2) { 2716 Value *Res = vshuff(Builder, Inputs[i], Inputs[i + 1]); 2717 Inputs[i / 2] = Builder.CreateBitCast(Res, VTy, "cst"); 2718 } 2719 Inputs.resize(Inputs.size() / 2); 2720 } 2721 2722 assert(Inputs.front()->getType() == ToType); 2723 return Inputs.front(); 2724 } 2725 2726 auto HexagonVectorCombine::calculatePointerDifference(Value *Ptr0, 2727 Value *Ptr1) const 2728 -> std::optional<int> { 2729 // Try SCEV first. 2730 const SCEV *Scev0 = SE.getSCEV(Ptr0); 2731 const SCEV *Scev1 = SE.getSCEV(Ptr1); 2732 const SCEV *ScevDiff = SE.getMinusSCEV(Scev0, Scev1); 2733 if (auto *Const = dyn_cast<SCEVConstant>(ScevDiff)) { 2734 APInt V = Const->getAPInt(); 2735 if (V.isSignedIntN(8 * sizeof(int))) 2736 return static_cast<int>(V.getSExtValue()); 2737 } 2738 2739 struct Builder : IRBuilder<> { 2740 Builder(BasicBlock *B) : IRBuilder<>(B->getTerminator()) {} 2741 ~Builder() { 2742 for (Instruction *I : llvm::reverse(ToErase)) 2743 I->eraseFromParent(); 2744 } 2745 SmallVector<Instruction *, 8> ToErase; 2746 }; 2747 2748 #define CallBuilder(B, F) \ 2749 [&](auto &B_) { \ 2750 Value *V = B_.F; \ 2751 if (auto *I = dyn_cast<Instruction>(V)) \ 2752 B_.ToErase.push_back(I); \ 2753 return V; \ 2754 }(B) 2755 2756 auto Simplify = [this](Value *V) { 2757 if (Value *S = simplify(V)) 2758 return S; 2759 return V; 2760 }; 2761 2762 auto StripBitCast = [](Value *V) { 2763 while (auto *C = dyn_cast<BitCastInst>(V)) 2764 V = C->getOperand(0); 2765 return V; 2766 }; 2767 2768 Ptr0 = StripBitCast(Ptr0); 2769 Ptr1 = StripBitCast(Ptr1); 2770 if (!isa<GetElementPtrInst>(Ptr0) || !isa<GetElementPtrInst>(Ptr1)) 2771 return std::nullopt; 2772 2773 auto *Gep0 = cast<GetElementPtrInst>(Ptr0); 2774 auto *Gep1 = cast<GetElementPtrInst>(Ptr1); 2775 if (Gep0->getPointerOperand() != Gep1->getPointerOperand()) 2776 return std::nullopt; 2777 if (Gep0->getSourceElementType() != Gep1->getSourceElementType()) 2778 return std::nullopt; 2779 2780 Builder B(Gep0->getParent()); 2781 int Scale = getSizeOf(Gep0->getSourceElementType(), Alloc); 2782 2783 // FIXME: for now only check GEPs with a single index. 2784 if (Gep0->getNumOperands() != 2 || Gep1->getNumOperands() != 2) 2785 return std::nullopt; 2786 2787 Value *Idx0 = Gep0->getOperand(1); 2788 Value *Idx1 = Gep1->getOperand(1); 2789 2790 // First, try to simplify the subtraction directly. 2791 if (auto *Diff = dyn_cast<ConstantInt>( 2792 Simplify(CallBuilder(B, CreateSub(Idx0, Idx1))))) 2793 return Diff->getSExtValue() * Scale; 2794 2795 KnownBits Known0 = getKnownBits(Idx0, Gep0); 2796 KnownBits Known1 = getKnownBits(Idx1, Gep1); 2797 APInt Unknown = ~(Known0.Zero | Known0.One) | ~(Known1.Zero | Known1.One); 2798 if (Unknown.isAllOnes()) 2799 return std::nullopt; 2800 2801 Value *MaskU = ConstantInt::get(Idx0->getType(), Unknown); 2802 Value *AndU0 = Simplify(CallBuilder(B, CreateAnd(Idx0, MaskU))); 2803 Value *AndU1 = Simplify(CallBuilder(B, CreateAnd(Idx1, MaskU))); 2804 Value *SubU = Simplify(CallBuilder(B, CreateSub(AndU0, AndU1))); 2805 int Diff0 = 0; 2806 if (auto *C = dyn_cast<ConstantInt>(SubU)) { 2807 Diff0 = C->getSExtValue(); 2808 } else { 2809 return std::nullopt; 2810 } 2811 2812 Value *MaskK = ConstantInt::get(MaskU->getType(), ~Unknown); 2813 Value *AndK0 = Simplify(CallBuilder(B, CreateAnd(Idx0, MaskK))); 2814 Value *AndK1 = Simplify(CallBuilder(B, CreateAnd(Idx1, MaskK))); 2815 Value *SubK = Simplify(CallBuilder(B, CreateSub(AndK0, AndK1))); 2816 int Diff1 = 0; 2817 if (auto *C = dyn_cast<ConstantInt>(SubK)) { 2818 Diff1 = C->getSExtValue(); 2819 } else { 2820 return std::nullopt; 2821 } 2822 2823 return (Diff0 + Diff1) * Scale; 2824 2825 #undef CallBuilder 2826 } 2827 2828 auto HexagonVectorCombine::getNumSignificantBits(const Value *V, 2829 const Instruction *CtxI) const 2830 -> unsigned { 2831 return ComputeMaxSignificantBits(V, DL, /*Depth=*/0, &AC, CtxI, &DT); 2832 } 2833 2834 auto HexagonVectorCombine::getKnownBits(const Value *V, 2835 const Instruction *CtxI) const 2836 -> KnownBits { 2837 return computeKnownBits(V, DL, /*Depth=*/0, &AC, CtxI, &DT); 2838 } 2839 2840 auto HexagonVectorCombine::isSafeToClone(const Instruction &In) const -> bool { 2841 if (In.mayHaveSideEffects() || In.isAtomic() || In.isVolatile() || 2842 In.isFenceLike() || In.mayReadOrWriteMemory()) { 2843 return false; 2844 } 2845 if (isa<CallBase>(In) || isa<AllocaInst>(In)) 2846 return false; 2847 return true; 2848 } 2849 2850 template <typename T> 2851 auto HexagonVectorCombine::isSafeToMoveBeforeInBB(const Instruction &In, 2852 BasicBlock::const_iterator To, 2853 const T &IgnoreInsts) const 2854 -> bool { 2855 auto getLocOrNone = 2856 [this](const Instruction &I) -> std::optional<MemoryLocation> { 2857 if (const auto *II = dyn_cast<IntrinsicInst>(&I)) { 2858 switch (II->getIntrinsicID()) { 2859 case Intrinsic::masked_load: 2860 return MemoryLocation::getForArgument(II, 0, TLI); 2861 case Intrinsic::masked_store: 2862 return MemoryLocation::getForArgument(II, 1, TLI); 2863 } 2864 } 2865 return MemoryLocation::getOrNone(&I); 2866 }; 2867 2868 // The source and the destination must be in the same basic block. 2869 const BasicBlock &Block = *In.getParent(); 2870 assert(Block.begin() == To || Block.end() == To || To->getParent() == &Block); 2871 // No PHIs. 2872 if (isa<PHINode>(In) || (To != Block.end() && isa<PHINode>(*To))) 2873 return false; 2874 2875 if (!mayHaveNonDefUseDependency(In)) 2876 return true; 2877 bool MayWrite = In.mayWriteToMemory(); 2878 auto MaybeLoc = getLocOrNone(In); 2879 2880 auto From = In.getIterator(); 2881 if (From == To) 2882 return true; 2883 bool MoveUp = (To != Block.end() && To->comesBefore(&In)); 2884 auto Range = 2885 MoveUp ? std::make_pair(To, From) : std::make_pair(std::next(From), To); 2886 for (auto It = Range.first; It != Range.second; ++It) { 2887 const Instruction &I = *It; 2888 if (llvm::is_contained(IgnoreInsts, &I)) 2889 continue; 2890 // assume intrinsic can be ignored 2891 if (auto *II = dyn_cast<IntrinsicInst>(&I)) { 2892 if (II->getIntrinsicID() == Intrinsic::assume) 2893 continue; 2894 } 2895 // Parts based on isSafeToMoveBefore from CoveMoverUtils.cpp. 2896 if (I.mayThrow()) 2897 return false; 2898 if (auto *CB = dyn_cast<CallBase>(&I)) { 2899 if (!CB->hasFnAttr(Attribute::WillReturn)) 2900 return false; 2901 if (!CB->hasFnAttr(Attribute::NoSync)) 2902 return false; 2903 } 2904 if (I.mayReadOrWriteMemory()) { 2905 auto MaybeLocI = getLocOrNone(I); 2906 if (MayWrite || I.mayWriteToMemory()) { 2907 if (!MaybeLoc || !MaybeLocI) 2908 return false; 2909 if (!AA.isNoAlias(*MaybeLoc, *MaybeLocI)) 2910 return false; 2911 } 2912 } 2913 } 2914 return true; 2915 } 2916 2917 auto HexagonVectorCombine::isByteVecTy(Type *Ty) const -> bool { 2918 if (auto *VecTy = dyn_cast<VectorType>(Ty)) 2919 return VecTy->getElementType() == getByteTy(); 2920 return false; 2921 } 2922 2923 auto HexagonVectorCombine::getElementRange(IRBuilderBase &Builder, Value *Lo, 2924 Value *Hi, int Start, 2925 int Length) const -> Value * { 2926 assert(0 <= Start && size_t(Start + Length) < length(Lo) + length(Hi)); 2927 SmallVector<int, 128> SMask(Length); 2928 std::iota(SMask.begin(), SMask.end(), Start); 2929 return Builder.CreateShuffleVector(Lo, Hi, SMask, "shf"); 2930 } 2931 2932 // Pass management. 2933 2934 namespace llvm { 2935 void initializeHexagonVectorCombineLegacyPass(PassRegistry &); 2936 FunctionPass *createHexagonVectorCombineLegacyPass(); 2937 } // namespace llvm 2938 2939 namespace { 2940 class HexagonVectorCombineLegacy : public FunctionPass { 2941 public: 2942 static char ID; 2943 2944 HexagonVectorCombineLegacy() : FunctionPass(ID) {} 2945 2946 StringRef getPassName() const override { return "Hexagon Vector Combine"; } 2947 2948 void getAnalysisUsage(AnalysisUsage &AU) const override { 2949 AU.setPreservesCFG(); 2950 AU.addRequired<AAResultsWrapperPass>(); 2951 AU.addRequired<AssumptionCacheTracker>(); 2952 AU.addRequired<DominatorTreeWrapperPass>(); 2953 AU.addRequired<ScalarEvolutionWrapperPass>(); 2954 AU.addRequired<TargetLibraryInfoWrapperPass>(); 2955 AU.addRequired<TargetPassConfig>(); 2956 FunctionPass::getAnalysisUsage(AU); 2957 } 2958 2959 bool runOnFunction(Function &F) override { 2960 if (skipFunction(F)) 2961 return false; 2962 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 2963 AssumptionCache &AC = 2964 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2965 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2966 ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2967 TargetLibraryInfo &TLI = 2968 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 2969 auto &TM = getAnalysis<TargetPassConfig>().getTM<HexagonTargetMachine>(); 2970 HexagonVectorCombine HVC(F, AA, AC, DT, SE, TLI, TM); 2971 return HVC.run(); 2972 } 2973 }; 2974 } // namespace 2975 2976 char HexagonVectorCombineLegacy::ID = 0; 2977 2978 INITIALIZE_PASS_BEGIN(HexagonVectorCombineLegacy, DEBUG_TYPE, 2979 "Hexagon Vector Combine", false, false) 2980 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 2981 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 2982 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2983 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 2984 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 2985 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 2986 INITIALIZE_PASS_END(HexagonVectorCombineLegacy, DEBUG_TYPE, 2987 "Hexagon Vector Combine", false, false) 2988 2989 FunctionPass *llvm::createHexagonVectorCombineLegacyPass() { 2990 return new HexagonVectorCombineLegacy(); 2991 } 2992