1 //===-- HexagonVectorCombine.cpp ------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // HexagonVectorCombine is a utility class implementing a variety of functions 9 // that assist in vector-based optimizations. 10 // 11 // AlignVectors: replace unaligned vector loads and stores with aligned ones. 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/ADT/APInt.h" 15 #include "llvm/ADT/ArrayRef.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/Analysis/AliasAnalysis.h" 20 #include "llvm/Analysis/AssumptionCache.h" 21 #include "llvm/Analysis/InstSimplifyFolder.h" 22 #include "llvm/Analysis/InstructionSimplify.h" 23 #include "llvm/Analysis/TargetLibraryInfo.h" 24 #include "llvm/Analysis/ValueTracking.h" 25 #include "llvm/Analysis/VectorUtils.h" 26 #include "llvm/CodeGen/TargetPassConfig.h" 27 #include "llvm/CodeGen/ValueTypes.h" 28 #include "llvm/IR/Dominators.h" 29 #include "llvm/IR/IRBuilder.h" 30 #include "llvm/IR/IntrinsicInst.h" 31 #include "llvm/IR/Intrinsics.h" 32 #include "llvm/IR/IntrinsicsHexagon.h" 33 #include "llvm/IR/Metadata.h" 34 #include "llvm/IR/PatternMatch.h" 35 #include "llvm/InitializePasses.h" 36 #include "llvm/Pass.h" 37 #include "llvm/Support/KnownBits.h" 38 #include "llvm/Support/MathExtras.h" 39 #include "llvm/Support/raw_ostream.h" 40 #include "llvm/Target/TargetMachine.h" 41 #include "llvm/Transforms/Utils/Local.h" 42 43 #include "HexagonSubtarget.h" 44 #include "HexagonTargetMachine.h" 45 46 #include <algorithm> 47 #include <deque> 48 #include <map> 49 #include <optional> 50 #include <set> 51 #include <utility> 52 #include <vector> 53 54 #define DEBUG_TYPE "hexagon-vc" 55 56 using namespace llvm; 57 58 namespace { 59 class HexagonVectorCombine { 60 public: 61 HexagonVectorCombine(Function &F_, AliasAnalysis &AA_, AssumptionCache &AC_, 62 DominatorTree &DT_, TargetLibraryInfo &TLI_, 63 const TargetMachine &TM_) 64 : F(F_), DL(F.getParent()->getDataLayout()), AA(AA_), AC(AC_), DT(DT_), 65 TLI(TLI_), 66 HST(static_cast<const HexagonSubtarget &>(*TM_.getSubtargetImpl(F))) {} 67 68 bool run(); 69 70 // Common integer type. 71 IntegerType *getIntTy(unsigned Width = 32) const; 72 // Byte type: either scalar (when Length = 0), or vector with given 73 // element count. 74 Type *getByteTy(int ElemCount = 0) const; 75 // Boolean type: either scalar (when Length = 0), or vector with given 76 // element count. 77 Type *getBoolTy(int ElemCount = 0) const; 78 // Create a ConstantInt of type returned by getIntTy with the value Val. 79 ConstantInt *getConstInt(int Val, unsigned Width = 32) const; 80 // Get the integer value of V, if it exists. 81 std::optional<APInt> getIntValue(const Value *Val) const; 82 // Is V a constant 0, or a vector of 0s? 83 bool isZero(const Value *Val) const; 84 // Is V an undef value? 85 bool isUndef(const Value *Val) const; 86 87 // Get HVX vector type with the given element type. 88 VectorType *getHvxTy(Type *ElemTy, bool Pair = false) const; 89 90 enum SizeKind { 91 Store, // Store size 92 Alloc, // Alloc size 93 }; 94 int getSizeOf(const Value *Val, SizeKind Kind = Store) const; 95 int getSizeOf(const Type *Ty, SizeKind Kind = Store) const; 96 int getTypeAlignment(Type *Ty) const; 97 size_t length(Value *Val) const; 98 size_t length(Type *Ty) const; 99 100 Constant *getNullValue(Type *Ty) const; 101 Constant *getFullValue(Type *Ty) const; 102 Constant *getConstSplat(Type *Ty, int Val) const; 103 104 Value *simplify(Value *Val) const; 105 106 Value *insertb(IRBuilderBase &Builder, Value *Dest, Value *Src, int Start, 107 int Length, int Where) const; 108 Value *vlalignb(IRBuilderBase &Builder, Value *Lo, Value *Hi, 109 Value *Amt) const; 110 Value *vralignb(IRBuilderBase &Builder, Value *Lo, Value *Hi, 111 Value *Amt) const; 112 Value *concat(IRBuilderBase &Builder, ArrayRef<Value *> Vecs) const; 113 Value *vresize(IRBuilderBase &Builder, Value *Val, int NewSize, 114 Value *Pad) const; 115 Value *rescale(IRBuilderBase &Builder, Value *Mask, Type *FromTy, 116 Type *ToTy) const; 117 Value *vlsb(IRBuilderBase &Builder, Value *Val) const; 118 Value *vbytes(IRBuilderBase &Builder, Value *Val) const; 119 Value *subvector(IRBuilderBase &Builder, Value *Val, unsigned Start, 120 unsigned Length) const; 121 Value *sublo(IRBuilderBase &Builder, Value *Val) const; 122 Value *subhi(IRBuilderBase &Builder, Value *Val) const; 123 Value *vdeal(IRBuilderBase &Builder, Value *Val0, Value *Val1) const; 124 Value *vshuff(IRBuilderBase &Builder, Value *Val0, Value *Val1) const; 125 126 Value *createHvxIntrinsic(IRBuilderBase &Builder, Intrinsic::ID IntID, 127 Type *RetTy, ArrayRef<Value *> Args, 128 ArrayRef<Type *> ArgTys = std::nullopt) const; 129 SmallVector<Value *> splitVectorElements(IRBuilderBase &Builder, Value *Vec, 130 unsigned ToWidth) const; 131 Value *joinVectorElements(IRBuilderBase &Builder, ArrayRef<Value *> Values, 132 VectorType *ToType) const; 133 134 std::optional<int> calculatePointerDifference(Value *Ptr0, Value *Ptr1) const; 135 136 unsigned getNumSignificantBits(const Value *V, 137 const Instruction *CtxI = nullptr) const; 138 KnownBits getKnownBits(const Value *V, 139 const Instruction *CtxI = nullptr) const; 140 141 template <typename T = std::vector<Instruction *>> 142 bool isSafeToMoveBeforeInBB(const Instruction &In, 143 BasicBlock::const_iterator To, 144 const T &IgnoreInsts = {}) const; 145 146 // This function is only used for assertions at the moment. 147 [[maybe_unused]] bool isByteVecTy(Type *Ty) const; 148 149 Function &F; 150 const DataLayout &DL; 151 AliasAnalysis &AA; 152 AssumptionCache &AC; 153 DominatorTree &DT; 154 TargetLibraryInfo &TLI; 155 const HexagonSubtarget &HST; 156 157 private: 158 Value *getElementRange(IRBuilderBase &Builder, Value *Lo, Value *Hi, 159 int Start, int Length) const; 160 }; 161 162 class AlignVectors { 163 public: 164 AlignVectors(const HexagonVectorCombine &HVC_) : HVC(HVC_) {} 165 166 bool run(); 167 168 private: 169 using InstList = std::vector<Instruction *>; 170 171 struct Segment { 172 void *Data; 173 int Start; 174 int Size; 175 }; 176 177 struct AddrInfo { 178 AddrInfo(const AddrInfo &) = default; 179 AddrInfo(const HexagonVectorCombine &HVC, Instruction *I, Value *A, Type *T, 180 Align H) 181 : Inst(I), Addr(A), ValTy(T), HaveAlign(H), 182 NeedAlign(HVC.getTypeAlignment(ValTy)) {} 183 AddrInfo &operator=(const AddrInfo &) = default; 184 185 // XXX: add Size member? 186 Instruction *Inst; 187 Value *Addr; 188 Type *ValTy; 189 Align HaveAlign; 190 Align NeedAlign; 191 int Offset = 0; // Offset (in bytes) from the first member of the 192 // containing AddrList. 193 }; 194 using AddrList = std::vector<AddrInfo>; 195 196 struct InstrLess { 197 bool operator()(const Instruction *A, const Instruction *B) const { 198 return A->comesBefore(B); 199 } 200 }; 201 using DepList = std::set<Instruction *, InstrLess>; 202 203 struct MoveGroup { 204 MoveGroup(const AddrInfo &AI, Instruction *B, bool Hvx, bool Load) 205 : Base(B), Main{AI.Inst}, IsHvx(Hvx), IsLoad(Load) {} 206 Instruction *Base; // Base instruction of the parent address group. 207 InstList Main; // Main group of instructions. 208 InstList Deps; // List of dependencies. 209 bool IsHvx; // Is this group of HVX instructions? 210 bool IsLoad; // Is this a load group? 211 }; 212 using MoveList = std::vector<MoveGroup>; 213 214 struct ByteSpan { 215 struct Segment { 216 // Segment of a Value: 'Len' bytes starting at byte 'Begin'. 217 Segment(Value *Val, int Begin, int Len) 218 : Val(Val), Start(Begin), Size(Len) {} 219 Segment(const Segment &Seg) = default; 220 Segment &operator=(const Segment &Seg) = default; 221 Value *Val; // Value representable as a sequence of bytes. 222 int Start; // First byte of the value that belongs to the segment. 223 int Size; // Number of bytes in the segment. 224 }; 225 226 struct Block { 227 Block(Value *Val, int Len, int Pos) : Seg(Val, 0, Len), Pos(Pos) {} 228 Block(Value *Val, int Off, int Len, int Pos) 229 : Seg(Val, Off, Len), Pos(Pos) {} 230 Block(const Block &Blk) = default; 231 Block &operator=(const Block &Blk) = default; 232 Segment Seg; // Value segment. 233 int Pos; // Position (offset) of the segment in the Block. 234 }; 235 236 int extent() const; 237 ByteSpan section(int Start, int Length) const; 238 ByteSpan &shift(int Offset); 239 SmallVector<Value *, 8> values() const; 240 241 int size() const { return Blocks.size(); } 242 Block &operator[](int i) { return Blocks[i]; } 243 244 std::vector<Block> Blocks; 245 246 using iterator = decltype(Blocks)::iterator; 247 iterator begin() { return Blocks.begin(); } 248 iterator end() { return Blocks.end(); } 249 using const_iterator = decltype(Blocks)::const_iterator; 250 const_iterator begin() const { return Blocks.begin(); } 251 const_iterator end() const { return Blocks.end(); } 252 }; 253 254 Align getAlignFromValue(const Value *V) const; 255 std::optional<MemoryLocation> getLocation(const Instruction &In) const; 256 std::optional<AddrInfo> getAddrInfo(Instruction &In) const; 257 bool isHvx(const AddrInfo &AI) const; 258 // This function is only used for assertions at the moment. 259 [[maybe_unused]] bool isSectorTy(Type *Ty) const; 260 261 Value *getPayload(Value *Val) const; 262 Value *getMask(Value *Val) const; 263 Value *getPassThrough(Value *Val) const; 264 265 Value *createAdjustedPointer(IRBuilderBase &Builder, Value *Ptr, Type *ValTy, 266 int Adjust) const; 267 Value *createAlignedPointer(IRBuilderBase &Builder, Value *Ptr, Type *ValTy, 268 int Alignment) const; 269 Value *createAlignedLoad(IRBuilderBase &Builder, Type *ValTy, Value *Ptr, 270 int Alignment, Value *Mask, Value *PassThru) const; 271 Value *createAlignedStore(IRBuilderBase &Builder, Value *Val, Value *Ptr, 272 int Alignment, Value *Mask) const; 273 274 DepList getUpwardDeps(Instruction *In, Instruction *Base) const; 275 bool createAddressGroups(); 276 MoveList createLoadGroups(const AddrList &Group) const; 277 MoveList createStoreGroups(const AddrList &Group) const; 278 bool move(const MoveGroup &Move) const; 279 void realignLoadGroup(IRBuilderBase &Builder, const ByteSpan &VSpan, 280 int ScLen, Value *AlignVal, Value *AlignAddr) const; 281 void realignStoreGroup(IRBuilderBase &Builder, const ByteSpan &VSpan, 282 int ScLen, Value *AlignVal, Value *AlignAddr) const; 283 bool realignGroup(const MoveGroup &Move) const; 284 285 friend raw_ostream &operator<<(raw_ostream &OS, const AddrInfo &AI); 286 friend raw_ostream &operator<<(raw_ostream &OS, const MoveGroup &MG); 287 friend raw_ostream &operator<<(raw_ostream &OS, const ByteSpan::Block &B); 288 friend raw_ostream &operator<<(raw_ostream &OS, const ByteSpan &BS); 289 290 std::map<Instruction *, AddrList> AddrGroups; 291 const HexagonVectorCombine &HVC; 292 }; 293 294 LLVM_ATTRIBUTE_UNUSED 295 raw_ostream &operator<<(raw_ostream &OS, const AlignVectors::AddrInfo &AI) { 296 OS << "Inst: " << AI.Inst << " " << *AI.Inst << '\n'; 297 OS << "Addr: " << *AI.Addr << '\n'; 298 OS << "Type: " << *AI.ValTy << '\n'; 299 OS << "HaveAlign: " << AI.HaveAlign.value() << '\n'; 300 OS << "NeedAlign: " << AI.NeedAlign.value() << '\n'; 301 OS << "Offset: " << AI.Offset; 302 return OS; 303 } 304 305 LLVM_ATTRIBUTE_UNUSED 306 raw_ostream &operator<<(raw_ostream &OS, const AlignVectors::MoveGroup &MG) { 307 OS << "Main\n"; 308 for (Instruction *I : MG.Main) 309 OS << " " << *I << '\n'; 310 OS << "Deps\n"; 311 for (Instruction *I : MG.Deps) 312 OS << " " << *I << '\n'; 313 return OS; 314 } 315 316 LLVM_ATTRIBUTE_UNUSED 317 raw_ostream &operator<<(raw_ostream &OS, 318 const AlignVectors::ByteSpan::Block &B) { 319 OS << " @" << B.Pos << " [" << B.Seg.Start << ',' << B.Seg.Size << "] " 320 << *B.Seg.Val; 321 return OS; 322 } 323 324 LLVM_ATTRIBUTE_UNUSED 325 raw_ostream &operator<<(raw_ostream &OS, const AlignVectors::ByteSpan &BS) { 326 OS << "ByteSpan[size=" << BS.size() << ", extent=" << BS.extent() << '\n'; 327 for (const AlignVectors::ByteSpan::Block &B : BS) 328 OS << B << '\n'; 329 OS << ']'; 330 return OS; 331 } 332 333 class HvxIdioms { 334 public: 335 HvxIdioms(const HexagonVectorCombine &HVC_) : HVC(HVC_) { 336 auto *Int32Ty = HVC.getIntTy(32); 337 HvxI32Ty = HVC.getHvxTy(Int32Ty, /*Pair=*/false); 338 HvxP32Ty = HVC.getHvxTy(Int32Ty, /*Pair=*/true); 339 } 340 341 bool run(); 342 343 private: 344 enum Signedness { Positive, Signed, Unsigned }; 345 346 // Value + sign 347 // This is to keep track of whether the value should be treated as signed 348 // or unsigned, or is known to be positive. 349 struct SValue { 350 Value *Val; 351 Signedness Sgn; 352 }; 353 354 struct FxpOp { 355 unsigned Opcode; 356 unsigned Frac; // Number of fraction bits 357 SValue X, Y; 358 // If present, add 1 << RoundAt before shift: 359 std::optional<unsigned> RoundAt; 360 VectorType *ResTy; 361 }; 362 363 auto getNumSignificantBits(Value *V, Instruction *In) const 364 -> std::pair<unsigned, Signedness>; 365 auto canonSgn(SValue X, SValue Y) const -> std::pair<SValue, SValue>; 366 367 auto matchFxpMul(Instruction &In) const -> std::optional<FxpOp>; 368 auto processFxpMul(Instruction &In, const FxpOp &Op) const -> Value *; 369 370 auto processFxpMulChopped(IRBuilderBase &Builder, Instruction &In, 371 const FxpOp &Op) const -> Value *; 372 auto createMulQ15(IRBuilderBase &Builder, SValue X, SValue Y, 373 bool Rounding) const -> Value *; 374 auto createMulQ31(IRBuilderBase &Builder, SValue X, SValue Y, 375 bool Rounding) const -> Value *; 376 // Return {Result, Carry}, where Carry is a vector predicate. 377 auto createAddCarry(IRBuilderBase &Builder, Value *X, Value *Y, 378 Value *CarryIn = nullptr) const 379 -> std::pair<Value *, Value *>; 380 auto createMul16(IRBuilderBase &Builder, SValue X, SValue Y) const -> Value *; 381 auto createMulH16(IRBuilderBase &Builder, SValue X, SValue Y) const 382 -> Value *; 383 auto createMul32(IRBuilderBase &Builder, SValue X, SValue Y) const 384 -> std::pair<Value *, Value *>; 385 auto createAddLong(IRBuilderBase &Builder, ArrayRef<Value *> WordX, 386 ArrayRef<Value *> WordY) const -> SmallVector<Value *>; 387 auto createMulLong(IRBuilderBase &Builder, ArrayRef<Value *> WordX, 388 Signedness SgnX, ArrayRef<Value *> WordY, 389 Signedness SgnY) const -> SmallVector<Value *>; 390 391 VectorType *HvxI32Ty; 392 VectorType *HvxP32Ty; 393 const HexagonVectorCombine &HVC; 394 395 friend raw_ostream &operator<<(raw_ostream &, const FxpOp &); 396 }; 397 398 [[maybe_unused]] raw_ostream &operator<<(raw_ostream &OS, 399 const HvxIdioms::FxpOp &Op) { 400 static const char *SgnNames[] = {"Positive", "Signed", "Unsigned"}; 401 OS << Instruction::getOpcodeName(Op.Opcode) << '.' << Op.Frac; 402 if (Op.RoundAt.has_value()) { 403 if (Op.Frac != 0 && *Op.RoundAt == Op.Frac - 1) { 404 OS << ":rnd"; 405 } else { 406 OS << " + 1<<" << *Op.RoundAt; 407 } 408 } 409 OS << "\n X:(" << SgnNames[Op.X.Sgn] << ") " << *Op.X.Val << "\n" 410 << " Y:(" << SgnNames[Op.Y.Sgn] << ") " << *Op.Y.Val; 411 return OS; 412 } 413 414 } // namespace 415 416 namespace { 417 418 template <typename T> T *getIfUnordered(T *MaybeT) { 419 return MaybeT && MaybeT->isUnordered() ? MaybeT : nullptr; 420 } 421 template <typename T> T *isCandidate(Instruction *In) { 422 return dyn_cast<T>(In); 423 } 424 template <> LoadInst *isCandidate<LoadInst>(Instruction *In) { 425 return getIfUnordered(dyn_cast<LoadInst>(In)); 426 } 427 template <> StoreInst *isCandidate<StoreInst>(Instruction *In) { 428 return getIfUnordered(dyn_cast<StoreInst>(In)); 429 } 430 431 #if !defined(_MSC_VER) || _MSC_VER >= 1926 432 // VS2017 and some versions of VS2019 have trouble compiling this: 433 // error C2976: 'std::map': too few template arguments 434 // VS 2019 16.x is known to work, except for 16.4/16.5 (MSC_VER 1924/1925) 435 template <typename Pred, typename... Ts> 436 void erase_if(std::map<Ts...> &map, Pred p) 437 #else 438 template <typename Pred, typename T, typename U> 439 void erase_if(std::map<T, U> &map, Pred p) 440 #endif 441 { 442 for (auto i = map.begin(), e = map.end(); i != e;) { 443 if (p(*i)) 444 i = map.erase(i); 445 else 446 i = std::next(i); 447 } 448 } 449 450 // Forward other erase_ifs to the LLVM implementations. 451 template <typename Pred, typename T> void erase_if(T &&container, Pred p) { 452 llvm::erase_if(std::forward<T>(container), p); 453 } 454 455 } // namespace 456 457 // --- Begin AlignVectors 458 459 auto AlignVectors::ByteSpan::extent() const -> int { 460 if (size() == 0) 461 return 0; 462 int Min = Blocks[0].Pos; 463 int Max = Blocks[0].Pos + Blocks[0].Seg.Size; 464 for (int i = 1, e = size(); i != e; ++i) { 465 Min = std::min(Min, Blocks[i].Pos); 466 Max = std::max(Max, Blocks[i].Pos + Blocks[i].Seg.Size); 467 } 468 return Max - Min; 469 } 470 471 auto AlignVectors::ByteSpan::section(int Start, int Length) const -> ByteSpan { 472 ByteSpan Section; 473 for (const ByteSpan::Block &B : Blocks) { 474 int L = std::max(B.Pos, Start); // Left end. 475 int R = std::min(B.Pos + B.Seg.Size, Start + Length); // Right end+1. 476 if (L < R) { 477 // How much to chop off the beginning of the segment: 478 int Off = L > B.Pos ? L - B.Pos : 0; 479 Section.Blocks.emplace_back(B.Seg.Val, B.Seg.Start + Off, R - L, L); 480 } 481 } 482 return Section; 483 } 484 485 auto AlignVectors::ByteSpan::shift(int Offset) -> ByteSpan & { 486 for (Block &B : Blocks) 487 B.Pos += Offset; 488 return *this; 489 } 490 491 auto AlignVectors::ByteSpan::values() const -> SmallVector<Value *, 8> { 492 SmallVector<Value *, 8> Values(Blocks.size()); 493 for (int i = 0, e = Blocks.size(); i != e; ++i) 494 Values[i] = Blocks[i].Seg.Val; 495 return Values; 496 } 497 498 auto AlignVectors::getAlignFromValue(const Value *V) const -> Align { 499 const auto *C = dyn_cast<ConstantInt>(V); 500 assert(C && "Alignment must be a compile-time constant integer"); 501 return C->getAlignValue(); 502 } 503 504 auto AlignVectors::getAddrInfo(Instruction &In) const 505 -> std::optional<AddrInfo> { 506 if (auto *L = isCandidate<LoadInst>(&In)) 507 return AddrInfo(HVC, L, L->getPointerOperand(), L->getType(), 508 L->getAlign()); 509 if (auto *S = isCandidate<StoreInst>(&In)) 510 return AddrInfo(HVC, S, S->getPointerOperand(), 511 S->getValueOperand()->getType(), S->getAlign()); 512 if (auto *II = isCandidate<IntrinsicInst>(&In)) { 513 Intrinsic::ID ID = II->getIntrinsicID(); 514 switch (ID) { 515 case Intrinsic::masked_load: 516 return AddrInfo(HVC, II, II->getArgOperand(0), II->getType(), 517 getAlignFromValue(II->getArgOperand(1))); 518 case Intrinsic::masked_store: 519 return AddrInfo(HVC, II, II->getArgOperand(1), 520 II->getArgOperand(0)->getType(), 521 getAlignFromValue(II->getArgOperand(2))); 522 } 523 } 524 return std::nullopt; 525 } 526 527 auto AlignVectors::isHvx(const AddrInfo &AI) const -> bool { 528 return HVC.HST.isTypeForHVX(AI.ValTy); 529 } 530 531 auto AlignVectors::getPayload(Value *Val) const -> Value * { 532 if (auto *In = dyn_cast<Instruction>(Val)) { 533 Intrinsic::ID ID = 0; 534 if (auto *II = dyn_cast<IntrinsicInst>(In)) 535 ID = II->getIntrinsicID(); 536 if (isa<StoreInst>(In) || ID == Intrinsic::masked_store) 537 return In->getOperand(0); 538 } 539 return Val; 540 } 541 542 auto AlignVectors::getMask(Value *Val) const -> Value * { 543 if (auto *II = dyn_cast<IntrinsicInst>(Val)) { 544 switch (II->getIntrinsicID()) { 545 case Intrinsic::masked_load: 546 return II->getArgOperand(2); 547 case Intrinsic::masked_store: 548 return II->getArgOperand(3); 549 } 550 } 551 552 Type *ValTy = getPayload(Val)->getType(); 553 if (auto *VecTy = dyn_cast<VectorType>(ValTy)) 554 return HVC.getFullValue(HVC.getBoolTy(HVC.length(VecTy))); 555 return HVC.getFullValue(HVC.getBoolTy()); 556 } 557 558 auto AlignVectors::getPassThrough(Value *Val) const -> Value * { 559 if (auto *II = dyn_cast<IntrinsicInst>(Val)) { 560 if (II->getIntrinsicID() == Intrinsic::masked_load) 561 return II->getArgOperand(3); 562 } 563 return UndefValue::get(getPayload(Val)->getType()); 564 } 565 566 auto AlignVectors::createAdjustedPointer(IRBuilderBase &Builder, Value *Ptr, 567 Type *ValTy, int Adjust) const 568 -> Value * { 569 // The adjustment is in bytes, but if it's a multiple of the type size, 570 // we don't need to do pointer casts. 571 auto *PtrTy = cast<PointerType>(Ptr->getType()); 572 if (!PtrTy->isOpaque()) { 573 Type *ElemTy = PtrTy->getNonOpaquePointerElementType(); 574 int ElemSize = HVC.getSizeOf(ElemTy, HVC.Alloc); 575 if (Adjust % ElemSize == 0 && Adjust != 0) { 576 Value *Tmp0 = 577 Builder.CreateGEP(ElemTy, Ptr, HVC.getConstInt(Adjust / ElemSize)); 578 return Builder.CreatePointerCast(Tmp0, ValTy->getPointerTo()); 579 } 580 } 581 582 PointerType *CharPtrTy = Type::getInt8PtrTy(HVC.F.getContext()); 583 Value *Tmp0 = Builder.CreatePointerCast(Ptr, CharPtrTy); 584 Value *Tmp1 = Builder.CreateGEP(Type::getInt8Ty(HVC.F.getContext()), Tmp0, 585 HVC.getConstInt(Adjust)); 586 return Builder.CreatePointerCast(Tmp1, ValTy->getPointerTo()); 587 } 588 589 auto AlignVectors::createAlignedPointer(IRBuilderBase &Builder, Value *Ptr, 590 Type *ValTy, int Alignment) const 591 -> Value * { 592 Value *AsInt = Builder.CreatePtrToInt(Ptr, HVC.getIntTy()); 593 Value *Mask = HVC.getConstInt(-Alignment); 594 Value *And = Builder.CreateAnd(AsInt, Mask); 595 return Builder.CreateIntToPtr(And, ValTy->getPointerTo()); 596 } 597 598 auto AlignVectors::createAlignedLoad(IRBuilderBase &Builder, Type *ValTy, 599 Value *Ptr, int Alignment, Value *Mask, 600 Value *PassThru) const -> Value * { 601 assert(!HVC.isUndef(Mask)); // Should this be allowed? 602 if (HVC.isZero(Mask)) 603 return PassThru; 604 if (Mask == ConstantInt::getTrue(Mask->getType())) 605 return Builder.CreateAlignedLoad(ValTy, Ptr, Align(Alignment)); 606 return Builder.CreateMaskedLoad(ValTy, Ptr, Align(Alignment), Mask, PassThru); 607 } 608 609 auto AlignVectors::createAlignedStore(IRBuilderBase &Builder, Value *Val, 610 Value *Ptr, int Alignment, 611 Value *Mask) const -> Value * { 612 if (HVC.isZero(Mask) || HVC.isUndef(Val) || HVC.isUndef(Mask)) 613 return UndefValue::get(Val->getType()); 614 if (Mask == ConstantInt::getTrue(Mask->getType())) 615 return Builder.CreateAlignedStore(Val, Ptr, Align(Alignment)); 616 return Builder.CreateMaskedStore(Val, Ptr, Align(Alignment), Mask); 617 } 618 619 auto AlignVectors::getUpwardDeps(Instruction *In, Instruction *Base) const 620 -> DepList { 621 BasicBlock *Parent = Base->getParent(); 622 assert(In->getParent() == Parent && 623 "Base and In should be in the same block"); 624 assert(Base->comesBefore(In) && "Base should come before In"); 625 626 DepList Deps; 627 std::deque<Instruction *> WorkQ = {In}; 628 while (!WorkQ.empty()) { 629 Instruction *D = WorkQ.front(); 630 WorkQ.pop_front(); 631 Deps.insert(D); 632 for (Value *Op : D->operands()) { 633 if (auto *I = dyn_cast<Instruction>(Op)) { 634 if (I->getParent() == Parent && Base->comesBefore(I)) 635 WorkQ.push_back(I); 636 } 637 } 638 } 639 return Deps; 640 } 641 642 auto AlignVectors::createAddressGroups() -> bool { 643 // An address group created here may contain instructions spanning 644 // multiple basic blocks. 645 AddrList WorkStack; 646 647 auto findBaseAndOffset = [&](AddrInfo &AI) -> std::pair<Instruction *, int> { 648 for (AddrInfo &W : WorkStack) { 649 if (auto D = HVC.calculatePointerDifference(AI.Addr, W.Addr)) 650 return std::make_pair(W.Inst, *D); 651 } 652 return std::make_pair(nullptr, 0); 653 }; 654 655 auto traverseBlock = [&](DomTreeNode *DomN, auto Visit) -> void { 656 BasicBlock &Block = *DomN->getBlock(); 657 for (Instruction &I : Block) { 658 auto AI = this->getAddrInfo(I); // Use this-> for gcc6. 659 if (!AI) 660 continue; 661 auto F = findBaseAndOffset(*AI); 662 Instruction *GroupInst; 663 if (Instruction *BI = F.first) { 664 AI->Offset = F.second; 665 GroupInst = BI; 666 } else { 667 WorkStack.push_back(*AI); 668 GroupInst = AI->Inst; 669 } 670 AddrGroups[GroupInst].push_back(*AI); 671 } 672 673 for (DomTreeNode *C : DomN->children()) 674 Visit(C, Visit); 675 676 while (!WorkStack.empty() && WorkStack.back().Inst->getParent() == &Block) 677 WorkStack.pop_back(); 678 }; 679 680 traverseBlock(HVC.DT.getRootNode(), traverseBlock); 681 assert(WorkStack.empty()); 682 683 // AddrGroups are formed. 684 685 // Remove groups of size 1. 686 erase_if(AddrGroups, [](auto &G) { return G.second.size() == 1; }); 687 // Remove groups that don't use HVX types. 688 erase_if(AddrGroups, [&](auto &G) { 689 return llvm::none_of( 690 G.second, [&](auto &I) { return HVC.HST.isTypeForHVX(I.ValTy); }); 691 }); 692 693 return !AddrGroups.empty(); 694 } 695 696 auto AlignVectors::createLoadGroups(const AddrList &Group) const -> MoveList { 697 // Form load groups. 698 // To avoid complications with moving code across basic blocks, only form 699 // groups that are contained within a single basic block. 700 701 auto tryAddTo = [&](const AddrInfo &Info, MoveGroup &Move) { 702 assert(!Move.Main.empty() && "Move group should have non-empty Main"); 703 // Don't mix HVX and non-HVX instructions. 704 if (Move.IsHvx != isHvx(Info)) 705 return false; 706 // Leading instruction in the load group. 707 Instruction *Base = Move.Main.front(); 708 if (Base->getParent() != Info.Inst->getParent()) 709 return false; 710 711 auto isSafeToMoveToBase = [&](const Instruction *I) { 712 return HVC.isSafeToMoveBeforeInBB(*I, Base->getIterator()); 713 }; 714 DepList Deps = getUpwardDeps(Info.Inst, Base); 715 if (!llvm::all_of(Deps, isSafeToMoveToBase)) 716 return false; 717 718 // The dependencies will be moved together with the load, so make sure 719 // that none of them could be moved independently in another group. 720 Deps.erase(Info.Inst); 721 auto inAddrMap = [&](Instruction *I) { return AddrGroups.count(I) > 0; }; 722 if (llvm::any_of(Deps, inAddrMap)) 723 return false; 724 Move.Main.push_back(Info.Inst); 725 llvm::append_range(Move.Deps, Deps); 726 return true; 727 }; 728 729 MoveList LoadGroups; 730 731 for (const AddrInfo &Info : Group) { 732 if (!Info.Inst->mayReadFromMemory()) 733 continue; 734 if (LoadGroups.empty() || !tryAddTo(Info, LoadGroups.back())) 735 LoadGroups.emplace_back(Info, Group.front().Inst, isHvx(Info), true); 736 } 737 738 // Erase singleton groups. 739 erase_if(LoadGroups, [](const MoveGroup &G) { return G.Main.size() <= 1; }); 740 return LoadGroups; 741 } 742 743 auto AlignVectors::createStoreGroups(const AddrList &Group) const -> MoveList { 744 // Form store groups. 745 // To avoid complications with moving code across basic blocks, only form 746 // groups that are contained within a single basic block. 747 748 auto tryAddTo = [&](const AddrInfo &Info, MoveGroup &Move) { 749 assert(!Move.Main.empty() && "Move group should have non-empty Main"); 750 // For stores with return values we'd have to collect downward depenencies. 751 // There are no such stores that we handle at the moment, so omit that. 752 assert(Info.Inst->getType()->isVoidTy() && 753 "Not handling stores with return values"); 754 // Don't mix HVX and non-HVX instructions. 755 if (Move.IsHvx != isHvx(Info)) 756 return false; 757 // For stores we need to be careful whether it's safe to move them. 758 // Stores that are otherwise safe to move together may not appear safe 759 // to move over one another (i.e. isSafeToMoveBefore may return false). 760 Instruction *Base = Move.Main.front(); 761 if (Base->getParent() != Info.Inst->getParent()) 762 return false; 763 if (!HVC.isSafeToMoveBeforeInBB(*Info.Inst, Base->getIterator(), Move.Main)) 764 return false; 765 Move.Main.push_back(Info.Inst); 766 return true; 767 }; 768 769 MoveList StoreGroups; 770 771 for (auto I = Group.rbegin(), E = Group.rend(); I != E; ++I) { 772 const AddrInfo &Info = *I; 773 if (!Info.Inst->mayWriteToMemory()) 774 continue; 775 if (StoreGroups.empty() || !tryAddTo(Info, StoreGroups.back())) 776 StoreGroups.emplace_back(Info, Group.front().Inst, isHvx(Info), false); 777 } 778 779 // Erase singleton groups. 780 erase_if(StoreGroups, [](const MoveGroup &G) { return G.Main.size() <= 1; }); 781 return StoreGroups; 782 } 783 784 auto AlignVectors::move(const MoveGroup &Move) const -> bool { 785 assert(!Move.Main.empty() && "Move group should have non-empty Main"); 786 Instruction *Where = Move.Main.front(); 787 788 if (Move.IsLoad) { 789 // Move all deps to before Where, keeping order. 790 for (Instruction *D : Move.Deps) 791 D->moveBefore(Where); 792 // Move all main instructions to after Where, keeping order. 793 ArrayRef<Instruction *> Main(Move.Main); 794 for (Instruction *M : Main.drop_front(1)) { 795 M->moveAfter(Where); 796 Where = M; 797 } 798 } else { 799 // NOTE: Deps are empty for "store" groups. If they need to be 800 // non-empty, decide on the order. 801 assert(Move.Deps.empty()); 802 // Move all main instructions to before Where, inverting order. 803 ArrayRef<Instruction *> Main(Move.Main); 804 for (Instruction *M : Main.drop_front(1)) { 805 M->moveBefore(Where); 806 Where = M; 807 } 808 } 809 810 return Move.Main.size() + Move.Deps.size() > 1; 811 } 812 813 auto AlignVectors::realignLoadGroup(IRBuilderBase &Builder, 814 const ByteSpan &VSpan, int ScLen, 815 Value *AlignVal, Value *AlignAddr) const 816 -> void { 817 Type *SecTy = HVC.getByteTy(ScLen); 818 int NumSectors = (VSpan.extent() + ScLen - 1) / ScLen; 819 bool DoAlign = !HVC.isZero(AlignVal); 820 BasicBlock::iterator BasePos = Builder.GetInsertPoint(); 821 BasicBlock *BaseBlock = Builder.GetInsertBlock(); 822 823 ByteSpan ASpan; 824 auto *True = HVC.getFullValue(HVC.getBoolTy(ScLen)); 825 auto *Undef = UndefValue::get(SecTy); 826 827 SmallVector<Instruction *> Loads(NumSectors + DoAlign, nullptr); 828 829 // We could create all of the aligned loads, and generate the valigns 830 // at the location of the first load, but for large load groups, this 831 // could create highly suboptimal code (there have been groups of 140+ 832 // loads in real code). 833 // Instead, place the loads/valigns as close to the users as possible. 834 // In any case we need to have a mapping from the blocks of VSpan (the 835 // span covered by the pre-existing loads) to ASpan (the span covered 836 // by the aligned loads). There is a small problem, though: ASpan needs 837 // to have pointers to the loads/valigns, but we don't know where to put 838 // them yet. We can't use nullptr, because when we create sections of 839 // ASpan (corresponding to blocks from VSpan), for each block in the 840 // section we need to know which blocks of ASpan they are a part of. 841 // To have 1-1 mapping between blocks of ASpan and the temporary value 842 // pointers, use the addresses of the blocks themselves. 843 844 // Populate the blocks first, to avoid reallocations of the vector 845 // interfering with generating the placeholder addresses. 846 for (int Index = 0; Index != NumSectors; ++Index) 847 ASpan.Blocks.emplace_back(nullptr, ScLen, Index * ScLen); 848 for (int Index = 0; Index != NumSectors; ++Index) { 849 ASpan.Blocks[Index].Seg.Val = 850 reinterpret_cast<Value *>(&ASpan.Blocks[Index]); 851 } 852 853 // Multiple values from VSpan can map to the same value in ASpan. Since we 854 // try to create loads lazily, we need to find the earliest use for each 855 // value from ASpan. 856 DenseMap<void *, Instruction *> EarliestUser; 857 auto isEarlier = [](Instruction *A, Instruction *B) { 858 if (B == nullptr) 859 return true; 860 if (A == nullptr) 861 return false; 862 assert(A->getParent() == B->getParent()); 863 return A->comesBefore(B); 864 }; 865 auto earliestUser = [&](const auto &Uses) { 866 Instruction *User = nullptr; 867 for (const Use &U : Uses) { 868 auto *I = dyn_cast<Instruction>(U.getUser()); 869 assert(I != nullptr && "Load used in a non-instruction?"); 870 // Make sure we only consider at users in this block, but we need 871 // to remember if there were users outside the block too. This is 872 // because if there are no users, aligned loads will not be created. 873 if (I->getParent() == BaseBlock) { 874 if (!isa<PHINode>(I)) 875 User = std::min(User, I, isEarlier); 876 } else { 877 User = std::min(User, BaseBlock->getTerminator(), isEarlier); 878 } 879 } 880 return User; 881 }; 882 883 for (const ByteSpan::Block &B : VSpan) { 884 ByteSpan ASection = ASpan.section(B.Pos, B.Seg.Size); 885 for (const ByteSpan::Block &S : ASection) { 886 EarliestUser[S.Seg.Val] = std::min( 887 EarliestUser[S.Seg.Val], earliestUser(B.Seg.Val->uses()), isEarlier); 888 } 889 } 890 891 auto createLoad = [&](IRBuilderBase &Builder, const ByteSpan &VSpan, 892 int Index) { 893 Value *Ptr = 894 createAdjustedPointer(Builder, AlignAddr, SecTy, Index * ScLen); 895 // FIXME: generate a predicated load? 896 Value *Load = createAlignedLoad(Builder, SecTy, Ptr, ScLen, True, Undef); 897 // If vector shifting is potentially needed, accumulate metadata 898 // from source sections of twice the load width. 899 int Start = (Index - DoAlign) * ScLen; 900 int Width = (1 + DoAlign) * ScLen; 901 propagateMetadata(cast<Instruction>(Load), 902 VSpan.section(Start, Width).values()); 903 return cast<Instruction>(Load); 904 }; 905 906 auto moveBefore = [this](Instruction *In, Instruction *To) { 907 // Move In and its upward dependencies to before To. 908 assert(In->getParent() == To->getParent()); 909 DepList Deps = getUpwardDeps(In, To); 910 // DepList is sorted with respect to positions in the basic block. 911 for (Instruction *I : Deps) 912 I->moveBefore(To); 913 }; 914 915 // Generate necessary loads at appropriate locations. 916 for (int Index = 0; Index != NumSectors + 1; ++Index) { 917 // In ASpan, each block will be either a single aligned load, or a 918 // valign of a pair of loads. In the latter case, an aligned load j 919 // will belong to the current valign, and the one in the previous 920 // block (for j > 0). 921 Instruction *PrevAt = 922 DoAlign && Index > 0 ? EarliestUser[&ASpan[Index - 1]] : nullptr; 923 Instruction *ThisAt = 924 Index < NumSectors ? EarliestUser[&ASpan[Index]] : nullptr; 925 if (auto *Where = std::min(PrevAt, ThisAt, isEarlier)) { 926 Builder.SetInsertPoint(Where); 927 Loads[Index] = createLoad(Builder, VSpan, Index); 928 // We know it's safe to put the load at BasePos, so if it's not safe 929 // to move it from this location to BasePos, then the current location 930 // is not valid. 931 // We can't do this check proactively because we need the load to exist 932 // in order to check legality. 933 if (!HVC.isSafeToMoveBeforeInBB(*Loads[Index], BasePos)) 934 moveBefore(Loads[Index], &*BasePos); 935 } 936 } 937 // Generate valigns if needed, and fill in proper values in ASpan 938 for (int Index = 0; Index != NumSectors; ++Index) { 939 ASpan[Index].Seg.Val = nullptr; 940 if (auto *Where = EarliestUser[&ASpan[Index]]) { 941 Builder.SetInsertPoint(Where); 942 Value *Val = Loads[Index]; 943 assert(Val != nullptr); 944 if (DoAlign) { 945 Value *NextLoad = Loads[Index + 1]; 946 assert(NextLoad != nullptr); 947 Val = HVC.vralignb(Builder, Val, NextLoad, AlignVal); 948 } 949 ASpan[Index].Seg.Val = Val; 950 } 951 } 952 953 for (const ByteSpan::Block &B : VSpan) { 954 ByteSpan ASection = ASpan.section(B.Pos, B.Seg.Size).shift(-B.Pos); 955 Value *Accum = UndefValue::get(HVC.getByteTy(B.Seg.Size)); 956 Builder.SetInsertPoint(cast<Instruction>(B.Seg.Val)); 957 958 for (ByteSpan::Block &S : ASection) { 959 if (S.Seg.Val == nullptr) 960 continue; 961 // The processing of the data loaded by the aligned loads 962 // needs to be inserted after the data is available. 963 Instruction *SegI = cast<Instruction>(S.Seg.Val); 964 Builder.SetInsertPoint(&*std::next(SegI->getIterator())); 965 Value *Pay = HVC.vbytes(Builder, getPayload(S.Seg.Val)); 966 Accum = HVC.insertb(Builder, Accum, Pay, S.Seg.Start, S.Seg.Size, S.Pos); 967 } 968 // Instead of casting everything to bytes for the vselect, cast to the 969 // original value type. This will avoid complications with casting masks. 970 // For example, in cases when the original mask applied to i32, it could 971 // be converted to a mask applicable to i8 via pred_typecast intrinsic, 972 // but if the mask is not exactly of HVX length, extra handling would be 973 // needed to make it work. 974 Type *ValTy = getPayload(B.Seg.Val)->getType(); 975 Value *Cast = Builder.CreateBitCast(Accum, ValTy); 976 Value *Sel = Builder.CreateSelect(getMask(B.Seg.Val), Cast, 977 getPassThrough(B.Seg.Val)); 978 B.Seg.Val->replaceAllUsesWith(Sel); 979 } 980 } 981 982 auto AlignVectors::realignStoreGroup(IRBuilderBase &Builder, 983 const ByteSpan &VSpan, int ScLen, 984 Value *AlignVal, Value *AlignAddr) const 985 -> void { 986 Type *SecTy = HVC.getByteTy(ScLen); 987 int NumSectors = (VSpan.extent() + ScLen - 1) / ScLen; 988 bool DoAlign = !HVC.isZero(AlignVal); 989 990 // Stores. 991 ByteSpan ASpanV, ASpanM; 992 993 // Return a vector value corresponding to the input value Val: 994 // either <1 x Val> for scalar Val, or Val itself for vector Val. 995 auto MakeVec = [](IRBuilderBase &Builder, Value *Val) -> Value * { 996 Type *Ty = Val->getType(); 997 if (Ty->isVectorTy()) 998 return Val; 999 auto *VecTy = VectorType::get(Ty, 1, /*Scalable=*/false); 1000 return Builder.CreateBitCast(Val, VecTy); 1001 }; 1002 1003 // Create an extra "undef" sector at the beginning and at the end. 1004 // They will be used as the left/right filler in the vlalign step. 1005 for (int i = (DoAlign ? -1 : 0); i != NumSectors + DoAlign; ++i) { 1006 // For stores, the size of each section is an aligned vector length. 1007 // Adjust the store offsets relative to the section start offset. 1008 ByteSpan VSection = VSpan.section(i * ScLen, ScLen).shift(-i * ScLen); 1009 Value *AccumV = UndefValue::get(SecTy); 1010 Value *AccumM = HVC.getNullValue(SecTy); 1011 for (ByteSpan::Block &S : VSection) { 1012 Value *Pay = getPayload(S.Seg.Val); 1013 Value *Mask = HVC.rescale(Builder, MakeVec(Builder, getMask(S.Seg.Val)), 1014 Pay->getType(), HVC.getByteTy()); 1015 AccumM = HVC.insertb(Builder, AccumM, HVC.vbytes(Builder, Mask), 1016 S.Seg.Start, S.Seg.Size, S.Pos); 1017 AccumV = HVC.insertb(Builder, AccumV, HVC.vbytes(Builder, Pay), 1018 S.Seg.Start, S.Seg.Size, S.Pos); 1019 } 1020 ASpanV.Blocks.emplace_back(AccumV, ScLen, i * ScLen); 1021 ASpanM.Blocks.emplace_back(AccumM, ScLen, i * ScLen); 1022 } 1023 1024 // vlalign 1025 if (DoAlign) { 1026 for (int j = 1; j != NumSectors + 2; ++j) { 1027 Value *PrevV = ASpanV[j - 1].Seg.Val, *ThisV = ASpanV[j].Seg.Val; 1028 Value *PrevM = ASpanM[j - 1].Seg.Val, *ThisM = ASpanM[j].Seg.Val; 1029 assert(isSectorTy(PrevV->getType()) && isSectorTy(PrevM->getType())); 1030 ASpanV[j - 1].Seg.Val = HVC.vlalignb(Builder, PrevV, ThisV, AlignVal); 1031 ASpanM[j - 1].Seg.Val = HVC.vlalignb(Builder, PrevM, ThisM, AlignVal); 1032 } 1033 } 1034 1035 for (int i = 0; i != NumSectors + DoAlign; ++i) { 1036 Value *Ptr = createAdjustedPointer(Builder, AlignAddr, SecTy, i * ScLen); 1037 Value *Val = ASpanV[i].Seg.Val; 1038 Value *Mask = ASpanM[i].Seg.Val; // bytes 1039 if (!HVC.isUndef(Val) && !HVC.isZero(Mask)) { 1040 Value *Store = 1041 createAlignedStore(Builder, Val, Ptr, ScLen, HVC.vlsb(Builder, Mask)); 1042 // If vector shifting is potentially needed, accumulate metadata 1043 // from source sections of twice the store width. 1044 int Start = (i - DoAlign) * ScLen; 1045 int Width = (1 + DoAlign) * ScLen; 1046 propagateMetadata(cast<Instruction>(Store), 1047 VSpan.section(Start, Width).values()); 1048 } 1049 } 1050 } 1051 1052 auto AlignVectors::realignGroup(const MoveGroup &Move) const -> bool { 1053 // TODO: Needs support for masked loads/stores of "scalar" vectors. 1054 if (!Move.IsHvx) 1055 return false; 1056 1057 // Return the element with the maximum alignment from Range, 1058 // where GetValue obtains the value to compare from an element. 1059 auto getMaxOf = [](auto Range, auto GetValue) { 1060 return *std::max_element( 1061 Range.begin(), Range.end(), 1062 [&GetValue](auto &A, auto &B) { return GetValue(A) < GetValue(B); }); 1063 }; 1064 1065 const AddrList &BaseInfos = AddrGroups.at(Move.Base); 1066 1067 // Conceptually, there is a vector of N bytes covering the addresses 1068 // starting from the minimum offset (i.e. Base.Addr+Start). This vector 1069 // represents a contiguous memory region that spans all accessed memory 1070 // locations. 1071 // The correspondence between loaded or stored values will be expressed 1072 // in terms of this vector. For example, the 0th element of the vector 1073 // from the Base address info will start at byte Start from the beginning 1074 // of this conceptual vector. 1075 // 1076 // This vector will be loaded/stored starting at the nearest down-aligned 1077 // address and the amount od the down-alignment will be AlignVal: 1078 // valign(load_vector(align_down(Base+Start)), AlignVal) 1079 1080 std::set<Instruction *> TestSet(Move.Main.begin(), Move.Main.end()); 1081 AddrList MoveInfos; 1082 llvm::copy_if( 1083 BaseInfos, std::back_inserter(MoveInfos), 1084 [&TestSet](const AddrInfo &AI) { return TestSet.count(AI.Inst); }); 1085 1086 // Maximum alignment present in the whole address group. 1087 const AddrInfo &WithMaxAlign = 1088 getMaxOf(MoveInfos, [](const AddrInfo &AI) { return AI.HaveAlign; }); 1089 Align MaxGiven = WithMaxAlign.HaveAlign; 1090 1091 // Minimum alignment present in the move address group. 1092 const AddrInfo &WithMinOffset = 1093 getMaxOf(MoveInfos, [](const AddrInfo &AI) { return -AI.Offset; }); 1094 1095 const AddrInfo &WithMaxNeeded = 1096 getMaxOf(MoveInfos, [](const AddrInfo &AI) { return AI.NeedAlign; }); 1097 Align MinNeeded = WithMaxNeeded.NeedAlign; 1098 1099 // Set the builder's insertion point right before the load group, or 1100 // immediately after the store group. (Instructions in a store group are 1101 // listed in reverse order.) 1102 Instruction *InsertAt = Move.Main.front(); 1103 if (!Move.IsLoad) { 1104 // There should be a terminator (which store isn't, but check anyways). 1105 assert(InsertAt->getIterator() != InsertAt->getParent()->end()); 1106 InsertAt = &*std::next(InsertAt->getIterator()); 1107 } 1108 1109 IRBuilder Builder(InsertAt->getParent(), InsertAt->getIterator(), 1110 InstSimplifyFolder(HVC.DL)); 1111 Value *AlignAddr = nullptr; // Actual aligned address. 1112 Value *AlignVal = nullptr; // Right-shift amount (for valign). 1113 1114 if (MinNeeded <= MaxGiven) { 1115 int Start = WithMinOffset.Offset; 1116 int OffAtMax = WithMaxAlign.Offset; 1117 // Shift the offset of the maximally aligned instruction (OffAtMax) 1118 // back by just enough multiples of the required alignment to cover the 1119 // distance from Start to OffAtMax. 1120 // Calculate the address adjustment amount based on the address with the 1121 // maximum alignment. This is to allow a simple gep instruction instead 1122 // of potential bitcasts to i8*. 1123 int Adjust = -alignTo(OffAtMax - Start, MinNeeded.value()); 1124 AlignAddr = createAdjustedPointer(Builder, WithMaxAlign.Addr, 1125 WithMaxAlign.ValTy, Adjust); 1126 int Diff = Start - (OffAtMax + Adjust); 1127 AlignVal = HVC.getConstInt(Diff); 1128 assert(Diff >= 0); 1129 assert(static_cast<decltype(MinNeeded.value())>(Diff) < MinNeeded.value()); 1130 } else { 1131 // WithMinOffset is the lowest address in the group, 1132 // WithMinOffset.Addr = Base+Start. 1133 // Align instructions for both HVX (V6_valign) and scalar (S2_valignrb) 1134 // mask off unnecessary bits, so it's ok to just the original pointer as 1135 // the alignment amount. 1136 // Do an explicit down-alignment of the address to avoid creating an 1137 // aligned instruction with an address that is not really aligned. 1138 AlignAddr = createAlignedPointer(Builder, WithMinOffset.Addr, 1139 WithMinOffset.ValTy, MinNeeded.value()); 1140 AlignVal = Builder.CreatePtrToInt(WithMinOffset.Addr, HVC.getIntTy()); 1141 } 1142 1143 ByteSpan VSpan; 1144 for (const AddrInfo &AI : MoveInfos) { 1145 VSpan.Blocks.emplace_back(AI.Inst, HVC.getSizeOf(AI.ValTy), 1146 AI.Offset - WithMinOffset.Offset); 1147 } 1148 1149 // The aligned loads/stores will use blocks that are either scalars, 1150 // or HVX vectors. Let "sector" be the unified term for such a block. 1151 // blend(scalar, vector) -> sector... 1152 int ScLen = Move.IsHvx ? HVC.HST.getVectorLength() 1153 : std::max<int>(MinNeeded.value(), 4); 1154 assert(!Move.IsHvx || ScLen == 64 || ScLen == 128); 1155 assert(Move.IsHvx || ScLen == 4 || ScLen == 8); 1156 1157 if (Move.IsLoad) 1158 realignLoadGroup(Builder, VSpan, ScLen, AlignVal, AlignAddr); 1159 else 1160 realignStoreGroup(Builder, VSpan, ScLen, AlignVal, AlignAddr); 1161 1162 for (auto *Inst : Move.Main) 1163 Inst->eraseFromParent(); 1164 1165 return true; 1166 } 1167 1168 auto AlignVectors::isSectorTy(Type *Ty) const -> bool { 1169 if (!HVC.isByteVecTy(Ty)) 1170 return false; 1171 int Size = HVC.getSizeOf(Ty); 1172 if (HVC.HST.isTypeForHVX(Ty)) 1173 return Size == static_cast<int>(HVC.HST.getVectorLength()); 1174 return Size == 4 || Size == 8; 1175 } 1176 1177 auto AlignVectors::run() -> bool { 1178 if (!createAddressGroups()) 1179 return false; 1180 1181 bool Changed = false; 1182 MoveList LoadGroups, StoreGroups; 1183 1184 for (auto &G : AddrGroups) { 1185 llvm::append_range(LoadGroups, createLoadGroups(G.second)); 1186 llvm::append_range(StoreGroups, createStoreGroups(G.second)); 1187 } 1188 1189 for (auto &M : LoadGroups) 1190 Changed |= move(M); 1191 for (auto &M : StoreGroups) 1192 Changed |= move(M); 1193 1194 for (auto &M : LoadGroups) 1195 Changed |= realignGroup(M); 1196 for (auto &M : StoreGroups) 1197 Changed |= realignGroup(M); 1198 1199 return Changed; 1200 } 1201 1202 // --- End AlignVectors 1203 1204 // --- Begin HvxIdioms 1205 1206 auto HvxIdioms::getNumSignificantBits(Value *V, Instruction *In) const 1207 -> std::pair<unsigned, Signedness> { 1208 unsigned Bits = HVC.getNumSignificantBits(V, In); 1209 // The significant bits are calculated including the sign bit. This may 1210 // add an extra bit for zero-extended values, e.g. (zext i32 to i64) may 1211 // result in 33 significant bits. To avoid extra words, skip the extra 1212 // sign bit, but keep information that the value is to be treated as 1213 // unsigned. 1214 KnownBits Known = HVC.getKnownBits(V, In); 1215 Signedness Sign = Signed; 1216 unsigned NumToTest = 0; // Number of bits used in test for unsignedness. 1217 if (isPowerOf2_32(Bits)) 1218 NumToTest = Bits; 1219 else if (Bits > 1 && isPowerOf2_32(Bits - 1)) 1220 NumToTest = Bits - 1; 1221 1222 if (NumToTest != 0 && Known.Zero.ashr(NumToTest).isAllOnes()) { 1223 Sign = Unsigned; 1224 Bits = NumToTest; 1225 } 1226 1227 // If the top bit of the nearest power-of-2 is zero, this value is 1228 // positive. It could be treated as either signed or unsigned. 1229 if (unsigned Pow2 = PowerOf2Ceil(Bits); Pow2 != Bits) { 1230 if (Known.Zero.ashr(Pow2 - 1).isAllOnes()) 1231 Sign = Positive; 1232 } 1233 return {Bits, Sign}; 1234 } 1235 1236 auto HvxIdioms::canonSgn(SValue X, SValue Y) const 1237 -> std::pair<SValue, SValue> { 1238 // Canonicalize the signedness of X and Y, so that the result is one of: 1239 // S, S 1240 // U/P, S 1241 // U/P, U/P 1242 if (X.Sgn == Signed && Y.Sgn != Signed) 1243 std::swap(X, Y); 1244 return {X, Y}; 1245 } 1246 1247 // Match 1248 // (X * Y) [>> N], or 1249 // ((X * Y) + (1 << M)) >> N 1250 auto HvxIdioms::matchFxpMul(Instruction &In) const -> std::optional<FxpOp> { 1251 using namespace PatternMatch; 1252 auto *Ty = In.getType(); 1253 1254 if (!Ty->isVectorTy() || !Ty->getScalarType()->isIntegerTy()) 1255 return std::nullopt; 1256 1257 unsigned Width = cast<IntegerType>(Ty->getScalarType())->getBitWidth(); 1258 1259 FxpOp Op; 1260 Value *Exp = &In; 1261 1262 // Fixed-point multiplication is always shifted right (except when the 1263 // fraction is 0 bits). 1264 auto m_Shr = [](auto &&V, auto &&S) { 1265 return m_CombineOr(m_LShr(V, S), m_AShr(V, S)); 1266 }; 1267 1268 const APInt *Qn = nullptr; 1269 if (Value * T; match(Exp, m_Shr(m_Value(T), m_APInt(Qn)))) { 1270 Op.Frac = Qn->getZExtValue(); 1271 Exp = T; 1272 } else { 1273 Op.Frac = 0; 1274 } 1275 1276 if (Op.Frac > Width) 1277 return std::nullopt; 1278 1279 // Check if there is rounding added. 1280 const APInt *C = nullptr; 1281 if (Value * T; Op.Frac > 0 && match(Exp, m_Add(m_Value(T), m_APInt(C)))) { 1282 uint64_t CV = C->getZExtValue(); 1283 if (CV != 0 && !isPowerOf2_64(CV)) 1284 return std::nullopt; 1285 if (CV != 0) 1286 Op.RoundAt = Log2_64(CV); 1287 Exp = T; 1288 } 1289 1290 // Check if the rest is a multiplication. 1291 if (match(Exp, m_Mul(m_Value(Op.X.Val), m_Value(Op.Y.Val)))) { 1292 Op.Opcode = Instruction::Mul; 1293 // FIXME: The information below is recomputed. 1294 Op.X.Sgn = getNumSignificantBits(Op.X.Val, &In).second; 1295 Op.Y.Sgn = getNumSignificantBits(Op.Y.Val, &In).second; 1296 Op.ResTy = cast<VectorType>(Ty); 1297 return Op; 1298 } 1299 1300 return std::nullopt; 1301 } 1302 1303 auto HvxIdioms::processFxpMul(Instruction &In, const FxpOp &Op) const 1304 -> Value * { 1305 assert(Op.X.Val->getType() == Op.Y.Val->getType()); 1306 1307 auto *VecTy = dyn_cast<VectorType>(Op.X.Val->getType()); 1308 if (VecTy == nullptr) 1309 return nullptr; 1310 auto *ElemTy = cast<IntegerType>(VecTy->getElementType()); 1311 unsigned ElemWidth = ElemTy->getBitWidth(); 1312 1313 // TODO: This can be relaxed after legalization is done pre-isel. 1314 if ((HVC.length(VecTy) * ElemWidth) % (8 * HVC.HST.getVectorLength()) != 0) 1315 return nullptr; 1316 1317 // There are no special intrinsics that should be used for multiplying 1318 // signed 8-bit values, so just skip them. Normal codegen should handle 1319 // this just fine. 1320 if (ElemWidth <= 8) 1321 return nullptr; 1322 // Similarly, if this is just a multiplication that can be handled without 1323 // intervention, then leave it alone. 1324 if (ElemWidth <= 32 && Op.Frac == 0) 1325 return nullptr; 1326 1327 auto [BitsX, SignX] = getNumSignificantBits(Op.X.Val, &In); 1328 auto [BitsY, SignY] = getNumSignificantBits(Op.Y.Val, &In); 1329 1330 // TODO: Add multiplication of vectors by scalar registers (up to 4 bytes). 1331 1332 Value *X = Op.X.Val, *Y = Op.Y.Val; 1333 IRBuilder Builder(In.getParent(), In.getIterator(), 1334 InstSimplifyFolder(HVC.DL)); 1335 1336 auto roundUpWidth = [](unsigned Width) -> unsigned { 1337 if (Width <= 32 && !isPowerOf2_32(Width)) { 1338 // If the element width is not a power of 2, round it up 1339 // to the next one. Do this for widths not exceeding 32. 1340 return PowerOf2Ceil(Width); 1341 } 1342 if (Width > 32 && Width % 32 != 0) { 1343 // For wider elements, round it up to the multiple of 32. 1344 return alignTo(Width, 32u); 1345 } 1346 return Width; 1347 }; 1348 1349 BitsX = roundUpWidth(BitsX); 1350 BitsY = roundUpWidth(BitsY); 1351 1352 // For elementwise multiplication vectors must have the same lengths, so 1353 // resize the elements of both inputs to the same width, the max of the 1354 // calculated significant bits. 1355 unsigned Width = std::max(BitsX, BitsY); 1356 1357 auto *ResizeTy = VectorType::get(HVC.getIntTy(Width), VecTy); 1358 if (Width < ElemWidth) { 1359 X = Builder.CreateTrunc(X, ResizeTy); 1360 Y = Builder.CreateTrunc(Y, ResizeTy); 1361 } else if (Width > ElemWidth) { 1362 X = SignX == Signed ? Builder.CreateSExt(X, ResizeTy) 1363 : Builder.CreateZExt(X, ResizeTy); 1364 Y = SignY == Signed ? Builder.CreateSExt(Y, ResizeTy) 1365 : Builder.CreateZExt(Y, ResizeTy); 1366 }; 1367 1368 assert(X->getType() == Y->getType() && X->getType() == ResizeTy); 1369 1370 unsigned VecLen = HVC.length(ResizeTy); 1371 unsigned ChopLen = (8 * HVC.HST.getVectorLength()) / std::min(Width, 32u); 1372 1373 SmallVector<Value *> Results; 1374 FxpOp ChopOp = Op; 1375 ChopOp.ResTy = VectorType::get(Op.ResTy->getElementType(), ChopLen, false); 1376 1377 for (unsigned V = 0; V != VecLen / ChopLen; ++V) { 1378 ChopOp.X.Val = HVC.subvector(Builder, X, V * ChopLen, ChopLen); 1379 ChopOp.Y.Val = HVC.subvector(Builder, Y, V * ChopLen, ChopLen); 1380 Results.push_back(processFxpMulChopped(Builder, In, ChopOp)); 1381 if (Results.back() == nullptr) 1382 break; 1383 } 1384 1385 if (Results.empty() || Results.back() == nullptr) 1386 return nullptr; 1387 1388 Value *Cat = HVC.concat(Builder, Results); 1389 Value *Ext = SignX == Signed || SignY == Signed 1390 ? Builder.CreateSExt(Cat, VecTy) 1391 : Builder.CreateZExt(Cat, VecTy); 1392 return Ext; 1393 } 1394 1395 auto HvxIdioms::processFxpMulChopped(IRBuilderBase &Builder, Instruction &In, 1396 const FxpOp &Op) const -> Value * { 1397 assert(Op.X.Val->getType() == Op.Y.Val->getType()); 1398 auto *InpTy = cast<VectorType>(Op.X.Val->getType()); 1399 unsigned Width = InpTy->getScalarSizeInBits(); 1400 bool Rounding = Op.RoundAt.has_value(); 1401 1402 if (!Op.RoundAt || *Op.RoundAt == Op.Frac - 1) { 1403 // The fixed-point intrinsics do signed multiplication. 1404 if (Width == Op.Frac + 1 && Op.X.Sgn != Unsigned && Op.Y.Sgn != Unsigned) { 1405 Value *QMul = nullptr; 1406 if (Width == 16) { 1407 QMul = createMulQ15(Builder, Op.X, Op.Y, Rounding); 1408 } else if (Width == 32) { 1409 QMul = createMulQ31(Builder, Op.X, Op.Y, Rounding); 1410 } 1411 if (QMul != nullptr) 1412 return QMul; 1413 } 1414 } 1415 1416 assert(Width >= 32 || isPowerOf2_32(Width)); // Width <= 32 => Width is 2^n 1417 assert(Width < 32 || Width % 32 == 0); // Width > 32 => Width is 32*k 1418 1419 // If Width < 32, then it should really be 16. 1420 if (Width < 32) { 1421 if (Width < 16) 1422 return nullptr; 1423 // Getting here with Op.Frac == 0 isn't wrong, but suboptimal: here we 1424 // generate a full precision products, which is unnecessary if there is 1425 // no shift. 1426 assert(Width == 16); 1427 assert(Op.Frac != 0 && "Unshifted mul should have been skipped"); 1428 if (Op.Frac == 16) { 1429 // Multiply high 1430 if (Value *MulH = createMulH16(Builder, Op.X, Op.Y)) 1431 return MulH; 1432 } 1433 // Do full-precision multiply and shift. 1434 Value *Prod32 = createMul16(Builder, Op.X, Op.Y); 1435 if (Rounding) { 1436 Value *RoundVal = HVC.getConstSplat(Prod32->getType(), 1 << *Op.RoundAt); 1437 Prod32 = Builder.CreateAdd(Prod32, RoundVal); 1438 } 1439 1440 Value *ShiftAmt = HVC.getConstSplat(Prod32->getType(), Op.Frac); 1441 Value *Shifted = Op.X.Sgn == Signed || Op.Y.Sgn == Signed 1442 ? Builder.CreateAShr(Prod32, ShiftAmt) 1443 : Builder.CreateLShr(Prod32, ShiftAmt); 1444 return Builder.CreateTrunc(Shifted, InpTy); 1445 } 1446 1447 // Width >= 32 1448 1449 // Break up the arguments Op.X and Op.Y into vectors of smaller widths 1450 // in preparation of doing the multiplication by 32-bit parts. 1451 auto WordX = HVC.splitVectorElements(Builder, Op.X.Val, /*ToWidth=*/32); 1452 auto WordY = HVC.splitVectorElements(Builder, Op.Y.Val, /*ToWidth=*/32); 1453 auto WordP = createMulLong(Builder, WordX, Op.X.Sgn, WordY, Op.Y.Sgn); 1454 1455 auto *HvxWordTy = cast<VectorType>(WordP.front()->getType()); 1456 1457 // Add the optional rounding to the proper word. 1458 if (Op.RoundAt.has_value()) { 1459 Value *Zero = HVC.getNullValue(WordX[0]->getType()); 1460 SmallVector<Value *> RoundV(WordP.size(), Zero); 1461 RoundV[*Op.RoundAt / 32] = 1462 HVC.getConstSplat(HvxWordTy, 1 << (*Op.RoundAt % 32)); 1463 WordP = createAddLong(Builder, WordP, RoundV); 1464 } 1465 1466 // createRightShiftLong? 1467 1468 // Shift all products right by Op.Frac. 1469 unsigned SkipWords = Op.Frac / 32; 1470 Constant *ShiftAmt = HVC.getConstSplat(HvxWordTy, Op.Frac % 32); 1471 1472 for (int Dst = 0, End = WordP.size() - SkipWords; Dst != End; ++Dst) { 1473 int Src = Dst + SkipWords; 1474 Value *Lo = WordP[Src]; 1475 if (Src + 1 < End) { 1476 Value *Hi = WordP[Src + 1]; 1477 WordP[Dst] = Builder.CreateIntrinsic(HvxWordTy, Intrinsic::fshr, 1478 {Hi, Lo, ShiftAmt}); 1479 } else { 1480 // The shift of the most significant word. 1481 WordP[Dst] = Builder.CreateAShr(Lo, ShiftAmt); 1482 } 1483 } 1484 if (SkipWords != 0) 1485 WordP.resize(WordP.size() - SkipWords); 1486 1487 return HVC.joinVectorElements(Builder, WordP, Op.ResTy); 1488 } 1489 1490 auto HvxIdioms::createMulQ15(IRBuilderBase &Builder, SValue X, SValue Y, 1491 bool Rounding) const -> Value * { 1492 assert(X.Val->getType() == Y.Val->getType()); 1493 assert(X.Val->getType()->getScalarType() == HVC.getIntTy(16)); 1494 assert(HVC.HST.isHVXVectorType(EVT::getEVT(X.Val->getType(), false))); 1495 1496 // There is no non-rounding intrinsic for i16. 1497 if (!Rounding || X.Sgn == Unsigned || Y.Sgn == Unsigned) 1498 return nullptr; 1499 1500 auto V6_vmpyhvsrs = HVC.HST.getIntrinsicId(Hexagon::V6_vmpyhvsrs); 1501 return HVC.createHvxIntrinsic(Builder, V6_vmpyhvsrs, X.Val->getType(), 1502 {X.Val, Y.Val}); 1503 } 1504 1505 auto HvxIdioms::createMulQ31(IRBuilderBase &Builder, SValue X, SValue Y, 1506 bool Rounding) const -> Value * { 1507 Type *InpTy = X.Val->getType(); 1508 assert(InpTy == Y.Val->getType()); 1509 assert(InpTy->getScalarType() == HVC.getIntTy(32)); 1510 assert(HVC.HST.isHVXVectorType(EVT::getEVT(InpTy, false))); 1511 1512 if (X.Sgn == Unsigned || Y.Sgn == Unsigned) 1513 return nullptr; 1514 1515 auto V6_vmpyewuh = HVC.HST.getIntrinsicId(Hexagon::V6_vmpyewuh); 1516 auto V6_vmpyo_acc = Rounding 1517 ? HVC.HST.getIntrinsicId(Hexagon::V6_vmpyowh_rnd_sacc) 1518 : HVC.HST.getIntrinsicId(Hexagon::V6_vmpyowh_sacc); 1519 Value *V1 = 1520 HVC.createHvxIntrinsic(Builder, V6_vmpyewuh, InpTy, {X.Val, Y.Val}); 1521 return HVC.createHvxIntrinsic(Builder, V6_vmpyo_acc, InpTy, 1522 {V1, X.Val, Y.Val}); 1523 } 1524 1525 auto HvxIdioms::createAddCarry(IRBuilderBase &Builder, Value *X, Value *Y, 1526 Value *CarryIn) const 1527 -> std::pair<Value *, Value *> { 1528 assert(X->getType() == Y->getType()); 1529 auto VecTy = cast<VectorType>(X->getType()); 1530 if (VecTy == HvxI32Ty && HVC.HST.useHVXV62Ops()) { 1531 SmallVector<Value *> Args = {X, Y}; 1532 Intrinsic::ID AddCarry; 1533 if (CarryIn == nullptr && HVC.HST.useHVXV66Ops()) { 1534 AddCarry = HVC.HST.getIntrinsicId(Hexagon::V6_vaddcarryo); 1535 } else { 1536 AddCarry = HVC.HST.getIntrinsicId(Hexagon::V6_vaddcarry); 1537 if (CarryIn == nullptr) 1538 CarryIn = HVC.getNullValue(HVC.getBoolTy(HVC.length(VecTy))); 1539 Args.push_back(CarryIn); 1540 } 1541 Value *Ret = HVC.createHvxIntrinsic(Builder, AddCarry, 1542 /*RetTy=*/nullptr, Args); 1543 Value *Result = Builder.CreateExtractValue(Ret, {0}); 1544 Value *CarryOut = Builder.CreateExtractValue(Ret, {1}); 1545 return {Result, CarryOut}; 1546 } 1547 1548 // In other cases, do a regular add, and unsigned compare-less-than. 1549 // The carry-out can originate in two places: adding the carry-in or adding 1550 // the two input values. 1551 Value *Result1 = X; // Result1 = X + CarryIn 1552 if (CarryIn != nullptr) { 1553 unsigned Width = VecTy->getScalarSizeInBits(); 1554 uint32_t Mask = 1; 1555 if (Width < 32) { 1556 for (unsigned i = 0, e = 32 / Width; i != e; ++i) 1557 Mask = (Mask << Width) | 1; 1558 } 1559 auto V6_vandqrt = HVC.HST.getIntrinsicId(Hexagon::V6_vandqrt); 1560 Value *ValueIn = 1561 HVC.createHvxIntrinsic(Builder, V6_vandqrt, /*RetTy=*/nullptr, 1562 {CarryIn, HVC.getConstInt(Mask)}); 1563 Result1 = Builder.CreateAdd(X, ValueIn); 1564 } 1565 1566 Value *CarryOut1 = Builder.CreateCmp(CmpInst::ICMP_ULT, Result1, X); 1567 Value *Result2 = Builder.CreateAdd(Result1, Y); 1568 Value *CarryOut2 = Builder.CreateCmp(CmpInst::ICMP_ULT, Result2, Y); 1569 return {Result2, Builder.CreateOr(CarryOut1, CarryOut2)}; 1570 } 1571 1572 auto HvxIdioms::createMul16(IRBuilderBase &Builder, SValue X, SValue Y) const 1573 -> Value * { 1574 Intrinsic::ID V6_vmpyh = 0; 1575 std::tie(X, Y) = canonSgn(X, Y); 1576 1577 if (X.Sgn == Signed) { 1578 V6_vmpyh = HVC.HST.getIntrinsicId(Hexagon::V6_vmpyhv); 1579 } else if (Y.Sgn == Signed) { 1580 // In vmpyhus the second operand is unsigned 1581 V6_vmpyh = HVC.HST.getIntrinsicId(Hexagon::V6_vmpyhus); 1582 } else { 1583 V6_vmpyh = HVC.HST.getIntrinsicId(Hexagon::V6_vmpyuhv); 1584 } 1585 1586 // i16*i16 -> i32 / interleaved 1587 Value *P = 1588 HVC.createHvxIntrinsic(Builder, V6_vmpyh, HvxP32Ty, {Y.Val, X.Val}); 1589 // Deinterleave 1590 return HVC.vshuff(Builder, HVC.sublo(Builder, P), HVC.subhi(Builder, P)); 1591 } 1592 1593 auto HvxIdioms::createMulH16(IRBuilderBase &Builder, SValue X, SValue Y) const 1594 -> Value * { 1595 Type *HvxI16Ty = HVC.getHvxTy(HVC.getIntTy(16), /*Pair=*/false); 1596 1597 if (HVC.HST.useHVXV69Ops()) { 1598 if (X.Sgn != Signed && Y.Sgn != Signed) { 1599 auto V6_vmpyuhvs = HVC.HST.getIntrinsicId(Hexagon::V6_vmpyuhvs); 1600 return HVC.createHvxIntrinsic(Builder, V6_vmpyuhvs, HvxI16Ty, 1601 {X.Val, Y.Val}); 1602 } 1603 } 1604 1605 Type *HvxP16Ty = HVC.getHvxTy(HVC.getIntTy(16), /*Pair=*/true); 1606 Value *Pair16 = Builder.CreateBitCast(createMul16(Builder, X, Y), HvxP16Ty); 1607 unsigned Len = HVC.length(HvxP16Ty) / 2; 1608 1609 SmallVector<int, 128> PickOdd(Len); 1610 for (int i = 0; i != static_cast<int>(Len); ++i) 1611 PickOdd[i] = 2 * i + 1; 1612 1613 return Builder.CreateShuffleVector(HVC.sublo(Builder, Pair16), 1614 HVC.subhi(Builder, Pair16), PickOdd); 1615 } 1616 1617 auto HvxIdioms::createMul32(IRBuilderBase &Builder, SValue X, SValue Y) const 1618 -> std::pair<Value *, Value *> { 1619 assert(X.Val->getType() == Y.Val->getType()); 1620 assert(X.Val->getType() == HvxI32Ty); 1621 1622 Intrinsic::ID V6_vmpy_parts; 1623 std::tie(X, Y) = canonSgn(X, Y); 1624 1625 if (X.Sgn == Signed) { 1626 V6_vmpy_parts = Intrinsic::hexagon_V6_vmpyss_parts; 1627 } else if (Y.Sgn == Signed) { 1628 V6_vmpy_parts = Intrinsic::hexagon_V6_vmpyus_parts; 1629 } else { 1630 V6_vmpy_parts = Intrinsic::hexagon_V6_vmpyuu_parts; 1631 } 1632 1633 Value *Parts = HVC.createHvxIntrinsic(Builder, V6_vmpy_parts, nullptr, 1634 {X.Val, Y.Val}, {HvxI32Ty}); 1635 Value *Hi = Builder.CreateExtractValue(Parts, {0}); 1636 Value *Lo = Builder.CreateExtractValue(Parts, {1}); 1637 return {Lo, Hi}; 1638 } 1639 1640 auto HvxIdioms::createAddLong(IRBuilderBase &Builder, ArrayRef<Value *> WordX, 1641 ArrayRef<Value *> WordY) const 1642 -> SmallVector<Value *> { 1643 assert(WordX.size() == WordY.size()); 1644 unsigned Idx = 0, Length = WordX.size(); 1645 SmallVector<Value *> Sum(Length); 1646 1647 while (Idx != Length) { 1648 if (HVC.isZero(WordX[Idx])) 1649 Sum[Idx] = WordY[Idx]; 1650 else if (HVC.isZero(WordY[Idx])) 1651 Sum[Idx] = WordX[Idx]; 1652 else 1653 break; 1654 ++Idx; 1655 } 1656 1657 Value *Carry = nullptr; 1658 for (; Idx != Length; ++Idx) { 1659 std::tie(Sum[Idx], Carry) = 1660 createAddCarry(Builder, WordX[Idx], WordY[Idx], Carry); 1661 } 1662 1663 // This drops the final carry beyond the highest word. 1664 return Sum; 1665 } 1666 1667 auto HvxIdioms::createMulLong(IRBuilderBase &Builder, ArrayRef<Value *> WordX, 1668 Signedness SgnX, ArrayRef<Value *> WordY, 1669 Signedness SgnY) const -> SmallVector<Value *> { 1670 SmallVector<SmallVector<Value *>> Products(WordX.size() + WordY.size()); 1671 1672 // WordX[i] * WordY[j] produces words i+j and i+j+1 of the results, 1673 // that is halves 2(i+j), 2(i+j)+1, 2(i+j)+2, 2(i+j)+3. 1674 for (int i = 0, e = WordX.size(); i != e; ++i) { 1675 for (int j = 0, f = WordY.size(); j != f; ++j) { 1676 // Check the 4 halves that this multiplication can generate. 1677 Signedness SX = (i + 1 == e) ? SgnX : Unsigned; 1678 Signedness SY = (j + 1 == f) ? SgnY : Unsigned; 1679 auto [Lo, Hi] = createMul32(Builder, {WordX[i], SX}, {WordY[j], SY}); 1680 Products[i + j + 0].push_back(Lo); 1681 Products[i + j + 1].push_back(Hi); 1682 } 1683 } 1684 1685 Value *Zero = HVC.getNullValue(WordX[0]->getType()); 1686 1687 auto pop_back_or_zero = [Zero](auto &Vector) -> Value * { 1688 if (Vector.empty()) 1689 return Zero; 1690 auto Last = Vector.back(); 1691 Vector.pop_back(); 1692 return Last; 1693 }; 1694 1695 for (int i = 0, e = Products.size(); i != e; ++i) { 1696 while (Products[i].size() > 1) { 1697 Value *Carry = nullptr; // no carry-in 1698 for (int j = i; j != e; ++j) { 1699 auto &ProdJ = Products[j]; 1700 auto [Sum, CarryOut] = createAddCarry(Builder, pop_back_or_zero(ProdJ), 1701 pop_back_or_zero(ProdJ), Carry); 1702 ProdJ.insert(ProdJ.begin(), Sum); 1703 Carry = CarryOut; 1704 } 1705 } 1706 } 1707 1708 SmallVector<Value *> WordP; 1709 for (auto &P : Products) { 1710 assert(P.size() == 1 && "Should have been added together"); 1711 WordP.push_back(P.front()); 1712 } 1713 1714 return WordP; 1715 } 1716 1717 auto HvxIdioms::run() -> bool { 1718 bool Changed = false; 1719 1720 for (BasicBlock &B : HVC.F) { 1721 for (auto It = B.rbegin(); It != B.rend(); ++It) { 1722 if (auto Fxm = matchFxpMul(*It)) { 1723 Value *New = processFxpMul(*It, *Fxm); 1724 // Always report "changed" for now. 1725 Changed = true; 1726 if (!New) 1727 continue; 1728 bool StartOver = !isa<Instruction>(New); 1729 It->replaceAllUsesWith(New); 1730 RecursivelyDeleteTriviallyDeadInstructions(&*It, &HVC.TLI); 1731 It = StartOver ? B.rbegin() 1732 : cast<Instruction>(New)->getReverseIterator(); 1733 Changed = true; 1734 } 1735 } 1736 } 1737 1738 return Changed; 1739 } 1740 1741 // --- End HvxIdioms 1742 1743 auto HexagonVectorCombine::run() -> bool { 1744 if (!HST.useHVXOps()) 1745 return false; 1746 1747 bool Changed = false; 1748 Changed |= AlignVectors(*this).run(); 1749 Changed |= HvxIdioms(*this).run(); 1750 1751 return Changed; 1752 } 1753 1754 auto HexagonVectorCombine::getIntTy(unsigned Width) const -> IntegerType * { 1755 return IntegerType::get(F.getContext(), Width); 1756 } 1757 1758 auto HexagonVectorCombine::getByteTy(int ElemCount) const -> Type * { 1759 assert(ElemCount >= 0); 1760 IntegerType *ByteTy = Type::getInt8Ty(F.getContext()); 1761 if (ElemCount == 0) 1762 return ByteTy; 1763 return VectorType::get(ByteTy, ElemCount, /*Scalable=*/false); 1764 } 1765 1766 auto HexagonVectorCombine::getBoolTy(int ElemCount) const -> Type * { 1767 assert(ElemCount >= 0); 1768 IntegerType *BoolTy = Type::getInt1Ty(F.getContext()); 1769 if (ElemCount == 0) 1770 return BoolTy; 1771 return VectorType::get(BoolTy, ElemCount, /*Scalable=*/false); 1772 } 1773 1774 auto HexagonVectorCombine::getConstInt(int Val, unsigned Width) const 1775 -> ConstantInt * { 1776 return ConstantInt::getSigned(getIntTy(Width), Val); 1777 } 1778 1779 auto HexagonVectorCombine::isZero(const Value *Val) const -> bool { 1780 if (auto *C = dyn_cast<Constant>(Val)) 1781 return C->isZeroValue(); 1782 return false; 1783 } 1784 1785 auto HexagonVectorCombine::getIntValue(const Value *Val) const 1786 -> std::optional<APInt> { 1787 if (auto *CI = dyn_cast<ConstantInt>(Val)) 1788 return CI->getValue(); 1789 return std::nullopt; 1790 } 1791 1792 auto HexagonVectorCombine::isUndef(const Value *Val) const -> bool { 1793 return isa<UndefValue>(Val); 1794 } 1795 1796 auto HexagonVectorCombine::getHvxTy(Type *ElemTy, bool Pair) const 1797 -> VectorType * { 1798 EVT ETy = EVT::getEVT(ElemTy, false); 1799 assert(ETy.isSimple() && "Invalid HVX element type"); 1800 // Do not allow boolean types here: they don't have a fixed length. 1801 assert(HST.isHVXElementType(ETy.getSimpleVT(), /*IncludeBool=*/false) && 1802 "Invalid HVX element type"); 1803 unsigned HwLen = HST.getVectorLength(); 1804 unsigned NumElems = (8 * HwLen) / ETy.getSizeInBits(); 1805 return VectorType::get(ElemTy, Pair ? 2 * NumElems : NumElems, 1806 /*Scalable=*/false); 1807 } 1808 1809 auto HexagonVectorCombine::getSizeOf(const Value *Val, SizeKind Kind) const 1810 -> int { 1811 return getSizeOf(Val->getType(), Kind); 1812 } 1813 1814 auto HexagonVectorCombine::getSizeOf(const Type *Ty, SizeKind Kind) const 1815 -> int { 1816 auto *NcTy = const_cast<Type *>(Ty); 1817 switch (Kind) { 1818 case Store: 1819 return DL.getTypeStoreSize(NcTy).getFixedValue(); 1820 case Alloc: 1821 return DL.getTypeAllocSize(NcTy).getFixedValue(); 1822 } 1823 llvm_unreachable("Unhandled SizeKind enum"); 1824 } 1825 1826 auto HexagonVectorCombine::getTypeAlignment(Type *Ty) const -> int { 1827 // The actual type may be shorter than the HVX vector, so determine 1828 // the alignment based on subtarget info. 1829 if (HST.isTypeForHVX(Ty)) 1830 return HST.getVectorLength(); 1831 return DL.getABITypeAlign(Ty).value(); 1832 } 1833 1834 auto HexagonVectorCombine::length(Value *Val) const -> size_t { 1835 return length(Val->getType()); 1836 } 1837 1838 auto HexagonVectorCombine::length(Type *Ty) const -> size_t { 1839 auto *VecTy = dyn_cast<VectorType>(Ty); 1840 assert(VecTy && "Must be a vector type"); 1841 return VecTy->getElementCount().getFixedValue(); 1842 } 1843 1844 auto HexagonVectorCombine::getNullValue(Type *Ty) const -> Constant * { 1845 assert(Ty->isIntOrIntVectorTy()); 1846 auto Zero = ConstantInt::get(Ty->getScalarType(), 0); 1847 if (auto *VecTy = dyn_cast<VectorType>(Ty)) 1848 return ConstantVector::getSplat(VecTy->getElementCount(), Zero); 1849 return Zero; 1850 } 1851 1852 auto HexagonVectorCombine::getFullValue(Type *Ty) const -> Constant * { 1853 assert(Ty->isIntOrIntVectorTy()); 1854 auto Minus1 = ConstantInt::get(Ty->getScalarType(), -1); 1855 if (auto *VecTy = dyn_cast<VectorType>(Ty)) 1856 return ConstantVector::getSplat(VecTy->getElementCount(), Minus1); 1857 return Minus1; 1858 } 1859 1860 auto HexagonVectorCombine::getConstSplat(Type *Ty, int Val) const 1861 -> Constant * { 1862 assert(Ty->isVectorTy()); 1863 auto VecTy = cast<VectorType>(Ty); 1864 Type *ElemTy = VecTy->getElementType(); 1865 // Add support for floats if needed. 1866 auto *Splat = ConstantVector::getSplat(VecTy->getElementCount(), 1867 ConstantInt::get(ElemTy, Val)); 1868 return Splat; 1869 } 1870 1871 auto HexagonVectorCombine::simplify(Value *V) const -> Value * { 1872 if (auto *In = dyn_cast<Instruction>(V)) { 1873 SimplifyQuery Q(DL, &TLI, &DT, &AC, In); 1874 return simplifyInstruction(In, Q); 1875 } 1876 return nullptr; 1877 } 1878 1879 // Insert bytes [Start..Start+Length) of Src into Dst at byte Where. 1880 auto HexagonVectorCombine::insertb(IRBuilderBase &Builder, Value *Dst, 1881 Value *Src, int Start, int Length, 1882 int Where) const -> Value * { 1883 assert(isByteVecTy(Dst->getType()) && isByteVecTy(Src->getType())); 1884 int SrcLen = getSizeOf(Src); 1885 int DstLen = getSizeOf(Dst); 1886 assert(0 <= Start && Start + Length <= SrcLen); 1887 assert(0 <= Where && Where + Length <= DstLen); 1888 1889 int P2Len = PowerOf2Ceil(SrcLen | DstLen); 1890 auto *Undef = UndefValue::get(getByteTy()); 1891 Value *P2Src = vresize(Builder, Src, P2Len, Undef); 1892 Value *P2Dst = vresize(Builder, Dst, P2Len, Undef); 1893 1894 SmallVector<int, 256> SMask(P2Len); 1895 for (int i = 0; i != P2Len; ++i) { 1896 // If i is in [Where, Where+Length), pick Src[Start+(i-Where)]. 1897 // Otherwise, pick Dst[i]; 1898 SMask[i] = 1899 (Where <= i && i < Where + Length) ? P2Len + Start + (i - Where) : i; 1900 } 1901 1902 Value *P2Insert = Builder.CreateShuffleVector(P2Dst, P2Src, SMask); 1903 return vresize(Builder, P2Insert, DstLen, Undef); 1904 } 1905 1906 auto HexagonVectorCombine::vlalignb(IRBuilderBase &Builder, Value *Lo, 1907 Value *Hi, Value *Amt) const -> Value * { 1908 assert(Lo->getType() == Hi->getType() && "Argument type mismatch"); 1909 if (isZero(Amt)) 1910 return Hi; 1911 int VecLen = getSizeOf(Hi); 1912 if (auto IntAmt = getIntValue(Amt)) 1913 return getElementRange(Builder, Lo, Hi, VecLen - IntAmt->getSExtValue(), 1914 VecLen); 1915 1916 if (HST.isTypeForHVX(Hi->getType())) { 1917 assert(static_cast<unsigned>(VecLen) == HST.getVectorLength() && 1918 "Expecting an exact HVX type"); 1919 return createHvxIntrinsic(Builder, HST.getIntrinsicId(Hexagon::V6_vlalignb), 1920 Hi->getType(), {Hi, Lo, Amt}); 1921 } 1922 1923 if (VecLen == 4) { 1924 Value *Pair = concat(Builder, {Lo, Hi}); 1925 Value *Shift = Builder.CreateLShr(Builder.CreateShl(Pair, Amt), 32); 1926 Value *Trunc = Builder.CreateTrunc(Shift, Type::getInt32Ty(F.getContext())); 1927 return Builder.CreateBitCast(Trunc, Hi->getType()); 1928 } 1929 if (VecLen == 8) { 1930 Value *Sub = Builder.CreateSub(getConstInt(VecLen), Amt); 1931 return vralignb(Builder, Lo, Hi, Sub); 1932 } 1933 llvm_unreachable("Unexpected vector length"); 1934 } 1935 1936 auto HexagonVectorCombine::vralignb(IRBuilderBase &Builder, Value *Lo, 1937 Value *Hi, Value *Amt) const -> Value * { 1938 assert(Lo->getType() == Hi->getType() && "Argument type mismatch"); 1939 if (isZero(Amt)) 1940 return Lo; 1941 int VecLen = getSizeOf(Lo); 1942 if (auto IntAmt = getIntValue(Amt)) 1943 return getElementRange(Builder, Lo, Hi, IntAmt->getSExtValue(), VecLen); 1944 1945 if (HST.isTypeForHVX(Lo->getType())) { 1946 assert(static_cast<unsigned>(VecLen) == HST.getVectorLength() && 1947 "Expecting an exact HVX type"); 1948 return createHvxIntrinsic(Builder, HST.getIntrinsicId(Hexagon::V6_valignb), 1949 Lo->getType(), {Hi, Lo, Amt}); 1950 } 1951 1952 if (VecLen == 4) { 1953 Value *Pair = concat(Builder, {Lo, Hi}); 1954 Value *Shift = Builder.CreateLShr(Pair, Amt); 1955 Value *Trunc = Builder.CreateTrunc(Shift, Type::getInt32Ty(F.getContext())); 1956 return Builder.CreateBitCast(Trunc, Lo->getType()); 1957 } 1958 if (VecLen == 8) { 1959 Type *Int64Ty = Type::getInt64Ty(F.getContext()); 1960 Value *Lo64 = Builder.CreateBitCast(Lo, Int64Ty); 1961 Value *Hi64 = Builder.CreateBitCast(Hi, Int64Ty); 1962 Function *FI = Intrinsic::getDeclaration(F.getParent(), 1963 Intrinsic::hexagon_S2_valignrb); 1964 Value *Call = Builder.CreateCall(FI, {Hi64, Lo64, Amt}); 1965 return Builder.CreateBitCast(Call, Lo->getType()); 1966 } 1967 llvm_unreachable("Unexpected vector length"); 1968 } 1969 1970 // Concatenates a sequence of vectors of the same type. 1971 auto HexagonVectorCombine::concat(IRBuilderBase &Builder, 1972 ArrayRef<Value *> Vecs) const -> Value * { 1973 assert(!Vecs.empty()); 1974 SmallVector<int, 256> SMask; 1975 std::vector<Value *> Work[2]; 1976 int ThisW = 0, OtherW = 1; 1977 1978 Work[ThisW].assign(Vecs.begin(), Vecs.end()); 1979 while (Work[ThisW].size() > 1) { 1980 auto *Ty = cast<VectorType>(Work[ThisW].front()->getType()); 1981 SMask.resize(length(Ty) * 2); 1982 std::iota(SMask.begin(), SMask.end(), 0); 1983 1984 Work[OtherW].clear(); 1985 if (Work[ThisW].size() % 2 != 0) 1986 Work[ThisW].push_back(UndefValue::get(Ty)); 1987 for (int i = 0, e = Work[ThisW].size(); i < e; i += 2) { 1988 Value *Joined = Builder.CreateShuffleVector(Work[ThisW][i], 1989 Work[ThisW][i + 1], SMask); 1990 Work[OtherW].push_back(Joined); 1991 } 1992 std::swap(ThisW, OtherW); 1993 } 1994 1995 // Since there may have been some undefs appended to make shuffle operands 1996 // have the same type, perform the last shuffle to only pick the original 1997 // elements. 1998 SMask.resize(Vecs.size() * length(Vecs.front()->getType())); 1999 std::iota(SMask.begin(), SMask.end(), 0); 2000 Value *Total = Work[ThisW].front(); 2001 return Builder.CreateShuffleVector(Total, SMask); 2002 } 2003 2004 auto HexagonVectorCombine::vresize(IRBuilderBase &Builder, Value *Val, 2005 int NewSize, Value *Pad) const -> Value * { 2006 assert(isa<VectorType>(Val->getType())); 2007 auto *ValTy = cast<VectorType>(Val->getType()); 2008 assert(ValTy->getElementType() == Pad->getType()); 2009 2010 int CurSize = length(ValTy); 2011 if (CurSize == NewSize) 2012 return Val; 2013 // Truncate? 2014 if (CurSize > NewSize) 2015 return getElementRange(Builder, Val, /*Ignored*/ Val, 0, NewSize); 2016 // Extend. 2017 SmallVector<int, 128> SMask(NewSize); 2018 std::iota(SMask.begin(), SMask.begin() + CurSize, 0); 2019 std::fill(SMask.begin() + CurSize, SMask.end(), CurSize); 2020 Value *PadVec = Builder.CreateVectorSplat(CurSize, Pad); 2021 return Builder.CreateShuffleVector(Val, PadVec, SMask); 2022 } 2023 2024 auto HexagonVectorCombine::rescale(IRBuilderBase &Builder, Value *Mask, 2025 Type *FromTy, Type *ToTy) const -> Value * { 2026 // Mask is a vector <N x i1>, where each element corresponds to an 2027 // element of FromTy. Remap it so that each element will correspond 2028 // to an element of ToTy. 2029 assert(isa<VectorType>(Mask->getType())); 2030 2031 Type *FromSTy = FromTy->getScalarType(); 2032 Type *ToSTy = ToTy->getScalarType(); 2033 if (FromSTy == ToSTy) 2034 return Mask; 2035 2036 int FromSize = getSizeOf(FromSTy); 2037 int ToSize = getSizeOf(ToSTy); 2038 assert(FromSize % ToSize == 0 || ToSize % FromSize == 0); 2039 2040 auto *MaskTy = cast<VectorType>(Mask->getType()); 2041 int FromCount = length(MaskTy); 2042 int ToCount = (FromCount * FromSize) / ToSize; 2043 assert((FromCount * FromSize) % ToSize == 0); 2044 2045 auto *FromITy = getIntTy(FromSize * 8); 2046 auto *ToITy = getIntTy(ToSize * 8); 2047 2048 // Mask <N x i1> -> sext to <N x FromTy> -> bitcast to <M x ToTy> -> 2049 // -> trunc to <M x i1>. 2050 Value *Ext = Builder.CreateSExt( 2051 Mask, VectorType::get(FromITy, FromCount, /*Scalable=*/false)); 2052 Value *Cast = Builder.CreateBitCast( 2053 Ext, VectorType::get(ToITy, ToCount, /*Scalable=*/false)); 2054 return Builder.CreateTrunc( 2055 Cast, VectorType::get(getBoolTy(), ToCount, /*Scalable=*/false)); 2056 } 2057 2058 // Bitcast to bytes, and return least significant bits. 2059 auto HexagonVectorCombine::vlsb(IRBuilderBase &Builder, Value *Val) const 2060 -> Value * { 2061 Type *ScalarTy = Val->getType()->getScalarType(); 2062 if (ScalarTy == getBoolTy()) 2063 return Val; 2064 2065 Value *Bytes = vbytes(Builder, Val); 2066 if (auto *VecTy = dyn_cast<VectorType>(Bytes->getType())) 2067 return Builder.CreateTrunc(Bytes, getBoolTy(getSizeOf(VecTy))); 2068 // If Bytes is a scalar (i.e. Val was a scalar byte), return i1, not 2069 // <1 x i1>. 2070 return Builder.CreateTrunc(Bytes, getBoolTy()); 2071 } 2072 2073 // Bitcast to bytes for non-bool. For bool, convert i1 -> i8. 2074 auto HexagonVectorCombine::vbytes(IRBuilderBase &Builder, Value *Val) const 2075 -> Value * { 2076 Type *ScalarTy = Val->getType()->getScalarType(); 2077 if (ScalarTy == getByteTy()) 2078 return Val; 2079 2080 if (ScalarTy != getBoolTy()) 2081 return Builder.CreateBitCast(Val, getByteTy(getSizeOf(Val))); 2082 // For bool, return a sext from i1 to i8. 2083 if (auto *VecTy = dyn_cast<VectorType>(Val->getType())) 2084 return Builder.CreateSExt(Val, VectorType::get(getByteTy(), VecTy)); 2085 return Builder.CreateSExt(Val, getByteTy()); 2086 } 2087 2088 auto HexagonVectorCombine::subvector(IRBuilderBase &Builder, Value *Val, 2089 unsigned Start, unsigned Length) const 2090 -> Value * { 2091 assert(Start + Length <= length(Val)); 2092 return getElementRange(Builder, Val, /*Ignored*/ Val, Start, Length); 2093 } 2094 2095 auto HexagonVectorCombine::sublo(IRBuilderBase &Builder, Value *Val) const 2096 -> Value * { 2097 size_t Len = length(Val); 2098 assert(Len % 2 == 0 && "Length should be even"); 2099 return subvector(Builder, Val, 0, Len / 2); 2100 } 2101 2102 auto HexagonVectorCombine::subhi(IRBuilderBase &Builder, Value *Val) const 2103 -> Value * { 2104 size_t Len = length(Val); 2105 assert(Len % 2 == 0 && "Length should be even"); 2106 return subvector(Builder, Val, Len / 2, Len / 2); 2107 } 2108 2109 auto HexagonVectorCombine::vdeal(IRBuilderBase &Builder, Value *Val0, 2110 Value *Val1) const -> Value * { 2111 assert(Val0->getType() == Val1->getType()); 2112 int Len = length(Val0); 2113 SmallVector<int, 128> Mask(2 * Len); 2114 2115 for (int i = 0; i != Len; ++i) { 2116 Mask[i] = 2 * i; // Even 2117 Mask[i + Len] = 2 * i + 1; // Odd 2118 } 2119 return Builder.CreateShuffleVector(Val0, Val1, Mask); 2120 } 2121 2122 auto HexagonVectorCombine::vshuff(IRBuilderBase &Builder, Value *Val0, 2123 Value *Val1) const -> Value * { // 2124 assert(Val0->getType() == Val1->getType()); 2125 int Len = length(Val0); 2126 SmallVector<int, 128> Mask(2 * Len); 2127 2128 for (int i = 0; i != Len; ++i) { 2129 Mask[2 * i + 0] = i; // Val0 2130 Mask[2 * i + 1] = i + Len; // Val1 2131 } 2132 return Builder.CreateShuffleVector(Val0, Val1, Mask); 2133 } 2134 2135 auto HexagonVectorCombine::createHvxIntrinsic(IRBuilderBase &Builder, 2136 Intrinsic::ID IntID, Type *RetTy, 2137 ArrayRef<Value *> Args, 2138 ArrayRef<Type *> ArgTys) const 2139 -> Value * { 2140 auto getCast = [&](IRBuilderBase &Builder, Value *Val, 2141 Type *DestTy) -> Value * { 2142 Type *SrcTy = Val->getType(); 2143 if (SrcTy == DestTy) 2144 return Val; 2145 2146 // Non-HVX type. It should be a scalar, and it should already have 2147 // a valid type. 2148 assert(HST.isTypeForHVX(SrcTy, /*IncludeBool=*/true)); 2149 2150 Type *BoolTy = Type::getInt1Ty(F.getContext()); 2151 if (cast<VectorType>(SrcTy)->getElementType() != BoolTy) 2152 return Builder.CreateBitCast(Val, DestTy); 2153 2154 // Predicate HVX vector. 2155 unsigned HwLen = HST.getVectorLength(); 2156 Intrinsic::ID TC = HwLen == 64 ? Intrinsic::hexagon_V6_pred_typecast 2157 : Intrinsic::hexagon_V6_pred_typecast_128B; 2158 Function *FI = 2159 Intrinsic::getDeclaration(F.getParent(), TC, {DestTy, Val->getType()}); 2160 return Builder.CreateCall(FI, {Val}); 2161 }; 2162 2163 Function *IntrFn = Intrinsic::getDeclaration(F.getParent(), IntID, ArgTys); 2164 FunctionType *IntrTy = IntrFn->getFunctionType(); 2165 2166 SmallVector<Value *, 4> IntrArgs; 2167 for (int i = 0, e = Args.size(); i != e; ++i) { 2168 Value *A = Args[i]; 2169 Type *T = IntrTy->getParamType(i); 2170 if (A->getType() != T) { 2171 IntrArgs.push_back(getCast(Builder, A, T)); 2172 } else { 2173 IntrArgs.push_back(A); 2174 } 2175 } 2176 Value *Call = Builder.CreateCall(IntrFn, IntrArgs); 2177 2178 Type *CallTy = Call->getType(); 2179 if (RetTy == nullptr || CallTy == RetTy) 2180 return Call; 2181 // Scalar types should have RetTy matching the call return type. 2182 assert(HST.isTypeForHVX(CallTy, /*IncludeBool=*/true)); 2183 return getCast(Builder, Call, RetTy); 2184 } 2185 2186 auto HexagonVectorCombine::splitVectorElements(IRBuilderBase &Builder, 2187 Value *Vec, 2188 unsigned ToWidth) const 2189 -> SmallVector<Value *> { 2190 // Break a vector of wide elements into a series of vectors with narrow 2191 // elements: 2192 // (...c0:b0:a0, ...c1:b1:a1, ...c2:b2:a2, ...) 2193 // --> 2194 // (a0, a1, a2, ...) // lowest "ToWidth" bits 2195 // (b0, b1, b2, ...) // the next lowest... 2196 // (c0, c1, c2, ...) // ... 2197 // ... 2198 // 2199 // The number of elements in each resulting vector is the same as 2200 // in the original vector. 2201 2202 auto *VecTy = cast<VectorType>(Vec->getType()); 2203 assert(VecTy->getElementType()->isIntegerTy()); 2204 unsigned FromWidth = VecTy->getScalarSizeInBits(); 2205 assert(isPowerOf2_32(ToWidth) && isPowerOf2_32(FromWidth)); 2206 assert(ToWidth <= FromWidth && "Breaking up into wider elements?"); 2207 unsigned NumResults = FromWidth / ToWidth; 2208 2209 SmallVector<Value *> Results(NumResults); 2210 Results[0] = Vec; 2211 unsigned Length = length(VecTy); 2212 2213 // Do it by splitting in half, since those operations correspond to deal 2214 // instructions. 2215 auto splitInHalf = [&](unsigned Begin, unsigned End, auto splitFunc) -> void { 2216 // Take V = Results[Begin], split it in L, H. 2217 // Store Results[Begin] = L, Results[(Begin+End)/2] = H 2218 // Call itself recursively split(Begin, Half), split(Half+1, End) 2219 if (Begin + 1 == End) 2220 return; 2221 2222 Value *Val = Results[Begin]; 2223 unsigned Width = Val->getType()->getScalarSizeInBits(); 2224 2225 auto *VTy = VectorType::get(getIntTy(Width / 2), 2 * Length, false); 2226 Value *VVal = Builder.CreateBitCast(Val, VTy); 2227 2228 Value *Res = vdeal(Builder, sublo(Builder, VVal), subhi(Builder, VVal)); 2229 2230 unsigned Half = (Begin + End) / 2; 2231 Results[Begin] = sublo(Builder, Res); 2232 Results[Half] = subhi(Builder, Res); 2233 2234 splitFunc(Begin, Half, splitFunc); 2235 splitFunc(Half, End, splitFunc); 2236 }; 2237 2238 splitInHalf(0, NumResults, splitInHalf); 2239 return Results; 2240 } 2241 2242 auto HexagonVectorCombine::joinVectorElements(IRBuilderBase &Builder, 2243 ArrayRef<Value *> Values, 2244 VectorType *ToType) const 2245 -> Value * { 2246 assert(ToType->getElementType()->isIntegerTy()); 2247 2248 // If the list of values does not have power-of-2 elements, append copies 2249 // of the sign bit to it, to make the size be 2^n. 2250 // The reason for this is that the values will be joined in pairs, because 2251 // otherwise the shuffles will result in convoluted code. With pairwise 2252 // joins, the shuffles will hopefully be folded into a perfect shuffle. 2253 // The output will need to be sign-extended to a type with element width 2254 // being a power-of-2 anyways. 2255 SmallVector<Value *> Inputs(Values.begin(), Values.end()); 2256 2257 unsigned ToWidth = ToType->getScalarSizeInBits(); 2258 unsigned Width = Inputs.front()->getType()->getScalarSizeInBits(); 2259 assert(Width <= ToWidth); 2260 assert(isPowerOf2_32(Width) && isPowerOf2_32(ToWidth)); 2261 unsigned Length = length(Inputs.front()->getType()); 2262 2263 unsigned NeedInputs = ToWidth / Width; 2264 if (Inputs.size() != NeedInputs) { 2265 // Having too many inputs is ok: drop the high bits (usual wrap-around). 2266 // If there are too few, fill them with the sign bit. 2267 Value *Last = Inputs.back(); 2268 Value *Sign = 2269 Builder.CreateAShr(Last, getConstSplat(Last->getType(), Width - 1)); 2270 Inputs.resize(NeedInputs, Sign); 2271 } 2272 2273 while (Inputs.size() > 1) { 2274 Width *= 2; 2275 auto *VTy = VectorType::get(getIntTy(Width), Length, false); 2276 for (int i = 0, e = Inputs.size(); i < e; i += 2) { 2277 Value *Res = vshuff(Builder, Inputs[i], Inputs[i + 1]); 2278 Inputs[i / 2] = Builder.CreateBitCast(Res, VTy); 2279 } 2280 Inputs.resize(Inputs.size() / 2); 2281 } 2282 2283 assert(Inputs.front()->getType() == ToType); 2284 return Inputs.front(); 2285 } 2286 2287 auto HexagonVectorCombine::calculatePointerDifference(Value *Ptr0, 2288 Value *Ptr1) const 2289 -> std::optional<int> { 2290 struct Builder : IRBuilder<> { 2291 Builder(BasicBlock *B) : IRBuilder<>(B->getTerminator()) {} 2292 ~Builder() { 2293 for (Instruction *I : llvm::reverse(ToErase)) 2294 I->eraseFromParent(); 2295 } 2296 SmallVector<Instruction *, 8> ToErase; 2297 }; 2298 2299 #define CallBuilder(B, F) \ 2300 [&](auto &B_) { \ 2301 Value *V = B_.F; \ 2302 if (auto *I = dyn_cast<Instruction>(V)) \ 2303 B_.ToErase.push_back(I); \ 2304 return V; \ 2305 }(B) 2306 2307 auto Simplify = [this](Value *V) { 2308 if (Value *S = simplify(V)) 2309 return S; 2310 return V; 2311 }; 2312 2313 auto StripBitCast = [](Value *V) { 2314 while (auto *C = dyn_cast<BitCastInst>(V)) 2315 V = C->getOperand(0); 2316 return V; 2317 }; 2318 2319 Ptr0 = StripBitCast(Ptr0); 2320 Ptr1 = StripBitCast(Ptr1); 2321 if (!isa<GetElementPtrInst>(Ptr0) || !isa<GetElementPtrInst>(Ptr1)) 2322 return std::nullopt; 2323 2324 auto *Gep0 = cast<GetElementPtrInst>(Ptr0); 2325 auto *Gep1 = cast<GetElementPtrInst>(Ptr1); 2326 if (Gep0->getPointerOperand() != Gep1->getPointerOperand()) 2327 return std::nullopt; 2328 if (Gep0->getSourceElementType() != Gep1->getSourceElementType()) 2329 return std::nullopt; 2330 2331 Builder B(Gep0->getParent()); 2332 int Scale = getSizeOf(Gep0->getSourceElementType(), Alloc); 2333 2334 // FIXME: for now only check GEPs with a single index. 2335 if (Gep0->getNumOperands() != 2 || Gep1->getNumOperands() != 2) 2336 return std::nullopt; 2337 2338 Value *Idx0 = Gep0->getOperand(1); 2339 Value *Idx1 = Gep1->getOperand(1); 2340 2341 // First, try to simplify the subtraction directly. 2342 if (auto *Diff = dyn_cast<ConstantInt>( 2343 Simplify(CallBuilder(B, CreateSub(Idx0, Idx1))))) 2344 return Diff->getSExtValue() * Scale; 2345 2346 KnownBits Known0 = getKnownBits(Idx0, Gep0); 2347 KnownBits Known1 = getKnownBits(Idx1, Gep1); 2348 APInt Unknown = ~(Known0.Zero | Known0.One) | ~(Known1.Zero | Known1.One); 2349 if (Unknown.isAllOnes()) 2350 return std::nullopt; 2351 2352 Value *MaskU = ConstantInt::get(Idx0->getType(), Unknown); 2353 Value *AndU0 = Simplify(CallBuilder(B, CreateAnd(Idx0, MaskU))); 2354 Value *AndU1 = Simplify(CallBuilder(B, CreateAnd(Idx1, MaskU))); 2355 Value *SubU = Simplify(CallBuilder(B, CreateSub(AndU0, AndU1))); 2356 int Diff0 = 0; 2357 if (auto *C = dyn_cast<ConstantInt>(SubU)) { 2358 Diff0 = C->getSExtValue(); 2359 } else { 2360 return std::nullopt; 2361 } 2362 2363 Value *MaskK = ConstantInt::get(MaskU->getType(), ~Unknown); 2364 Value *AndK0 = Simplify(CallBuilder(B, CreateAnd(Idx0, MaskK))); 2365 Value *AndK1 = Simplify(CallBuilder(B, CreateAnd(Idx1, MaskK))); 2366 Value *SubK = Simplify(CallBuilder(B, CreateSub(AndK0, AndK1))); 2367 int Diff1 = 0; 2368 if (auto *C = dyn_cast<ConstantInt>(SubK)) { 2369 Diff1 = C->getSExtValue(); 2370 } else { 2371 return std::nullopt; 2372 } 2373 2374 return (Diff0 + Diff1) * Scale; 2375 2376 #undef CallBuilder 2377 } 2378 2379 auto HexagonVectorCombine::getNumSignificantBits(const Value *V, 2380 const Instruction *CtxI) const 2381 -> unsigned { 2382 return ComputeMaxSignificantBits(V, DL, /*Depth=*/0, &AC, CtxI, &DT); 2383 } 2384 2385 auto HexagonVectorCombine::getKnownBits(const Value *V, 2386 const Instruction *CtxI) const 2387 -> KnownBits { 2388 return computeKnownBits(V, DL, /*Depth=*/0, &AC, CtxI, &DT, /*ORE=*/nullptr, 2389 /*UseInstrInfo=*/true); 2390 } 2391 2392 template <typename T> 2393 auto HexagonVectorCombine::isSafeToMoveBeforeInBB(const Instruction &In, 2394 BasicBlock::const_iterator To, 2395 const T &IgnoreInsts) const 2396 -> bool { 2397 auto getLocOrNone = 2398 [this](const Instruction &I) -> std::optional<MemoryLocation> { 2399 if (const auto *II = dyn_cast<IntrinsicInst>(&I)) { 2400 switch (II->getIntrinsicID()) { 2401 case Intrinsic::masked_load: 2402 return MemoryLocation::getForArgument(II, 0, TLI); 2403 case Intrinsic::masked_store: 2404 return MemoryLocation::getForArgument(II, 1, TLI); 2405 } 2406 } 2407 return MemoryLocation::getOrNone(&I); 2408 }; 2409 2410 // The source and the destination must be in the same basic block. 2411 const BasicBlock &Block = *In.getParent(); 2412 assert(Block.begin() == To || Block.end() == To || To->getParent() == &Block); 2413 // No PHIs. 2414 if (isa<PHINode>(In) || (To != Block.end() && isa<PHINode>(*To))) 2415 return false; 2416 2417 if (!mayHaveNonDefUseDependency(In)) 2418 return true; 2419 bool MayWrite = In.mayWriteToMemory(); 2420 auto MaybeLoc = getLocOrNone(In); 2421 2422 auto From = In.getIterator(); 2423 if (From == To) 2424 return true; 2425 bool MoveUp = (To != Block.end() && To->comesBefore(&In)); 2426 auto Range = 2427 MoveUp ? std::make_pair(To, From) : std::make_pair(std::next(From), To); 2428 for (auto It = Range.first; It != Range.second; ++It) { 2429 const Instruction &I = *It; 2430 if (llvm::is_contained(IgnoreInsts, &I)) 2431 continue; 2432 // assume intrinsic can be ignored 2433 if (auto *II = dyn_cast<IntrinsicInst>(&I)) { 2434 if (II->getIntrinsicID() == Intrinsic::assume) 2435 continue; 2436 } 2437 // Parts based on isSafeToMoveBefore from CoveMoverUtils.cpp. 2438 if (I.mayThrow()) 2439 return false; 2440 if (auto *CB = dyn_cast<CallBase>(&I)) { 2441 if (!CB->hasFnAttr(Attribute::WillReturn)) 2442 return false; 2443 if (!CB->hasFnAttr(Attribute::NoSync)) 2444 return false; 2445 } 2446 if (I.mayReadOrWriteMemory()) { 2447 auto MaybeLocI = getLocOrNone(I); 2448 if (MayWrite || I.mayWriteToMemory()) { 2449 if (!MaybeLoc || !MaybeLocI) 2450 return false; 2451 if (!AA.isNoAlias(*MaybeLoc, *MaybeLocI)) 2452 return false; 2453 } 2454 } 2455 } 2456 return true; 2457 } 2458 2459 auto HexagonVectorCombine::isByteVecTy(Type *Ty) const -> bool { 2460 if (auto *VecTy = dyn_cast<VectorType>(Ty)) 2461 return VecTy->getElementType() == getByteTy(); 2462 return false; 2463 } 2464 2465 auto HexagonVectorCombine::getElementRange(IRBuilderBase &Builder, Value *Lo, 2466 Value *Hi, int Start, 2467 int Length) const -> Value * { 2468 assert(0 <= Start && size_t(Start + Length) < length(Lo) + length(Hi)); 2469 SmallVector<int, 128> SMask(Length); 2470 std::iota(SMask.begin(), SMask.end(), Start); 2471 return Builder.CreateShuffleVector(Lo, Hi, SMask); 2472 } 2473 2474 // Pass management. 2475 2476 namespace llvm { 2477 void initializeHexagonVectorCombineLegacyPass(PassRegistry &); 2478 FunctionPass *createHexagonVectorCombineLegacyPass(); 2479 } // namespace llvm 2480 2481 namespace { 2482 class HexagonVectorCombineLegacy : public FunctionPass { 2483 public: 2484 static char ID; 2485 2486 HexagonVectorCombineLegacy() : FunctionPass(ID) {} 2487 2488 StringRef getPassName() const override { return "Hexagon Vector Combine"; } 2489 2490 void getAnalysisUsage(AnalysisUsage &AU) const override { 2491 AU.setPreservesCFG(); 2492 AU.addRequired<AAResultsWrapperPass>(); 2493 AU.addRequired<AssumptionCacheTracker>(); 2494 AU.addRequired<DominatorTreeWrapperPass>(); 2495 AU.addRequired<TargetLibraryInfoWrapperPass>(); 2496 AU.addRequired<TargetPassConfig>(); 2497 FunctionPass::getAnalysisUsage(AU); 2498 } 2499 2500 bool runOnFunction(Function &F) override { 2501 if (skipFunction(F)) 2502 return false; 2503 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 2504 AssumptionCache &AC = 2505 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2506 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2507 TargetLibraryInfo &TLI = 2508 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 2509 auto &TM = getAnalysis<TargetPassConfig>().getTM<HexagonTargetMachine>(); 2510 HexagonVectorCombine HVC(F, AA, AC, DT, TLI, TM); 2511 return HVC.run(); 2512 } 2513 }; 2514 } // namespace 2515 2516 char HexagonVectorCombineLegacy::ID = 0; 2517 2518 INITIALIZE_PASS_BEGIN(HexagonVectorCombineLegacy, DEBUG_TYPE, 2519 "Hexagon Vector Combine", false, false) 2520 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 2521 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 2522 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2523 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 2524 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) 2525 INITIALIZE_PASS_END(HexagonVectorCombineLegacy, DEBUG_TYPE, 2526 "Hexagon Vector Combine", false, false) 2527 2528 FunctionPass *llvm::createHexagonVectorCombineLegacyPass() { 2529 return new HexagonVectorCombineLegacy(); 2530 } 2531