1 //===- MVEGatherScatterLowering.cpp - Gather/Scatter lowering -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// This pass custom lowers llvm.gather and llvm.scatter instructions to 10 /// arm.mve.gather and arm.mve.scatter intrinsics, optimising the code to 11 /// produce a better final result as we go. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "ARM.h" 16 #include "ARMBaseInstrInfo.h" 17 #include "ARMSubtarget.h" 18 #include "llvm/Analysis/LoopInfo.h" 19 #include "llvm/Analysis/TargetTransformInfo.h" 20 #include "llvm/Analysis/ValueTracking.h" 21 #include "llvm/CodeGen/TargetLowering.h" 22 #include "llvm/CodeGen/TargetPassConfig.h" 23 #include "llvm/CodeGen/TargetSubtargetInfo.h" 24 #include "llvm/InitializePasses.h" 25 #include "llvm/IR/BasicBlock.h" 26 #include "llvm/IR/Constant.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/DerivedTypes.h" 29 #include "llvm/IR/Function.h" 30 #include "llvm/IR/InstrTypes.h" 31 #include "llvm/IR/Instruction.h" 32 #include "llvm/IR/Instructions.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/Intrinsics.h" 35 #include "llvm/IR/IntrinsicsARM.h" 36 #include "llvm/IR/IRBuilder.h" 37 #include "llvm/IR/PatternMatch.h" 38 #include "llvm/IR/Type.h" 39 #include "llvm/IR/Value.h" 40 #include "llvm/Pass.h" 41 #include "llvm/Support/Casting.h" 42 #include "llvm/Transforms/Utils/Local.h" 43 #include <algorithm> 44 #include <cassert> 45 46 using namespace llvm; 47 48 #define DEBUG_TYPE "arm-mve-gather-scatter-lowering" 49 50 cl::opt<bool> EnableMaskedGatherScatters( 51 "enable-arm-maskedgatscat", cl::Hidden, cl::init(true), 52 cl::desc("Enable the generation of masked gathers and scatters")); 53 54 namespace { 55 56 class MVEGatherScatterLowering : public FunctionPass { 57 public: 58 static char ID; // Pass identification, replacement for typeid 59 60 explicit MVEGatherScatterLowering() : FunctionPass(ID) { 61 initializeMVEGatherScatterLoweringPass(*PassRegistry::getPassRegistry()); 62 } 63 64 bool runOnFunction(Function &F) override; 65 66 StringRef getPassName() const override { 67 return "MVE gather/scatter lowering"; 68 } 69 70 void getAnalysisUsage(AnalysisUsage &AU) const override { 71 AU.setPreservesCFG(); 72 AU.addRequired<TargetPassConfig>(); 73 AU.addRequired<LoopInfoWrapperPass>(); 74 FunctionPass::getAnalysisUsage(AU); 75 } 76 77 private: 78 LoopInfo *LI = nullptr; 79 const DataLayout *DL; 80 81 // Check this is a valid gather with correct alignment 82 bool isLegalTypeAndAlignment(unsigned NumElements, unsigned ElemSize, 83 Align Alignment); 84 // Check whether Ptr is hidden behind a bitcast and look through it 85 void lookThroughBitcast(Value *&Ptr); 86 // Decompose a ptr into Base and Offsets, potentially using a GEP to return a 87 // scalar base and vector offsets, or else fallback to using a base of 0 and 88 // offset of Ptr where possible. 89 Value *decomposePtr(Value *Ptr, Value *&Offsets, int &Scale, 90 FixedVectorType *Ty, Type *MemoryTy, 91 IRBuilder<> &Builder); 92 // Check for a getelementptr and deduce base and offsets from it, on success 93 // returning the base directly and the offsets indirectly using the Offsets 94 // argument 95 Value *decomposeGEP(Value *&Offsets, FixedVectorType *Ty, 96 GetElementPtrInst *GEP, IRBuilder<> &Builder); 97 // Compute the scale of this gather/scatter instruction 98 int computeScale(unsigned GEPElemSize, unsigned MemoryElemSize); 99 // If the value is a constant, or derived from constants via additions 100 // and multilications, return its numeric value 101 Optional<int64_t> getIfConst(const Value *V); 102 // If Inst is an add instruction, check whether one summand is a 103 // constant. If so, scale this constant and return it together with 104 // the other summand. 105 std::pair<Value *, int64_t> getVarAndConst(Value *Inst, int TypeScale); 106 107 Instruction *lowerGather(IntrinsicInst *I); 108 // Create a gather from a base + vector of offsets 109 Instruction *tryCreateMaskedGatherOffset(IntrinsicInst *I, Value *Ptr, 110 Instruction *&Root, 111 IRBuilder<> &Builder); 112 // Create a gather from a vector of pointers 113 Instruction *tryCreateMaskedGatherBase(IntrinsicInst *I, Value *Ptr, 114 IRBuilder<> &Builder, 115 int64_t Increment = 0); 116 // Create an incrementing gather from a vector of pointers 117 Instruction *tryCreateMaskedGatherBaseWB(IntrinsicInst *I, Value *Ptr, 118 IRBuilder<> &Builder, 119 int64_t Increment = 0); 120 121 Instruction *lowerScatter(IntrinsicInst *I); 122 // Create a scatter to a base + vector of offsets 123 Instruction *tryCreateMaskedScatterOffset(IntrinsicInst *I, Value *Offsets, 124 IRBuilder<> &Builder); 125 // Create a scatter to a vector of pointers 126 Instruction *tryCreateMaskedScatterBase(IntrinsicInst *I, Value *Ptr, 127 IRBuilder<> &Builder, 128 int64_t Increment = 0); 129 // Create an incrementing scatter from a vector of pointers 130 Instruction *tryCreateMaskedScatterBaseWB(IntrinsicInst *I, Value *Ptr, 131 IRBuilder<> &Builder, 132 int64_t Increment = 0); 133 134 // QI gathers and scatters can increment their offsets on their own if 135 // the increment is a constant value (digit) 136 Instruction *tryCreateIncrementingGatScat(IntrinsicInst *I, Value *Ptr, 137 IRBuilder<> &Builder); 138 // QI gathers/scatters can increment their offsets on their own if the 139 // increment is a constant value (digit) - this creates a writeback QI 140 // gather/scatter 141 Instruction *tryCreateIncrementingWBGatScat(IntrinsicInst *I, Value *BasePtr, 142 Value *Ptr, unsigned TypeScale, 143 IRBuilder<> &Builder); 144 145 // Optimise the base and offsets of the given address 146 bool optimiseAddress(Value *Address, BasicBlock *BB, LoopInfo *LI); 147 // Try to fold consecutive geps together into one 148 Value *foldGEP(GetElementPtrInst *GEP, Value *&Offsets, IRBuilder<> &Builder); 149 // Check whether these offsets could be moved out of the loop they're in 150 bool optimiseOffsets(Value *Offsets, BasicBlock *BB, LoopInfo *LI); 151 // Pushes the given add out of the loop 152 void pushOutAdd(PHINode *&Phi, Value *OffsSecondOperand, unsigned StartIndex); 153 // Pushes the given mul or shl out of the loop 154 void pushOutMulShl(unsigned Opc, PHINode *&Phi, Value *IncrementPerRound, 155 Value *OffsSecondOperand, unsigned LoopIncrement, 156 IRBuilder<> &Builder); 157 }; 158 159 } // end anonymous namespace 160 161 char MVEGatherScatterLowering::ID = 0; 162 163 INITIALIZE_PASS(MVEGatherScatterLowering, DEBUG_TYPE, 164 "MVE gather/scattering lowering pass", false, false) 165 166 Pass *llvm::createMVEGatherScatterLoweringPass() { 167 return new MVEGatherScatterLowering(); 168 } 169 170 bool MVEGatherScatterLowering::isLegalTypeAndAlignment(unsigned NumElements, 171 unsigned ElemSize, 172 Align Alignment) { 173 if (((NumElements == 4 && 174 (ElemSize == 32 || ElemSize == 16 || ElemSize == 8)) || 175 (NumElements == 8 && (ElemSize == 16 || ElemSize == 8)) || 176 (NumElements == 16 && ElemSize == 8)) && 177 Alignment >= ElemSize / 8) 178 return true; 179 LLVM_DEBUG(dbgs() << "masked gathers/scatters: instruction does not have " 180 << "valid alignment or vector type \n"); 181 return false; 182 } 183 184 static bool checkOffsetSize(Value *Offsets, unsigned TargetElemCount) { 185 // Offsets that are not of type <N x i32> are sign extended by the 186 // getelementptr instruction, and MVE gathers/scatters treat the offset as 187 // unsigned. Thus, if the element size is smaller than 32, we can only allow 188 // positive offsets - i.e., the offsets are not allowed to be variables we 189 // can't look into. 190 // Additionally, <N x i32> offsets have to either originate from a zext of a 191 // vector with element types smaller or equal the type of the gather we're 192 // looking at, or consist of constants that we can check are small enough 193 // to fit into the gather type. 194 // Thus we check that 0 < value < 2^TargetElemSize. 195 unsigned TargetElemSize = 128 / TargetElemCount; 196 unsigned OffsetElemSize = cast<FixedVectorType>(Offsets->getType()) 197 ->getElementType() 198 ->getScalarSizeInBits(); 199 if (OffsetElemSize != TargetElemSize || OffsetElemSize != 32) { 200 Constant *ConstOff = dyn_cast<Constant>(Offsets); 201 if (!ConstOff) 202 return false; 203 int64_t TargetElemMaxSize = (1ULL << TargetElemSize); 204 auto CheckValueSize = [TargetElemMaxSize](Value *OffsetElem) { 205 ConstantInt *OConst = dyn_cast<ConstantInt>(OffsetElem); 206 if (!OConst) 207 return false; 208 int SExtValue = OConst->getSExtValue(); 209 if (SExtValue >= TargetElemMaxSize || SExtValue < 0) 210 return false; 211 return true; 212 }; 213 if (isa<FixedVectorType>(ConstOff->getType())) { 214 for (unsigned i = 0; i < TargetElemCount; i++) { 215 if (!CheckValueSize(ConstOff->getAggregateElement(i))) 216 return false; 217 } 218 } else { 219 if (!CheckValueSize(ConstOff)) 220 return false; 221 } 222 } 223 return true; 224 } 225 226 Value *MVEGatherScatterLowering::decomposePtr(Value *Ptr, Value *&Offsets, 227 int &Scale, FixedVectorType *Ty, 228 Type *MemoryTy, 229 IRBuilder<> &Builder) { 230 if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { 231 if (Value *V = decomposeGEP(Offsets, Ty, GEP, Builder)) { 232 Scale = 233 computeScale(GEP->getSourceElementType()->getPrimitiveSizeInBits(), 234 MemoryTy->getScalarSizeInBits()); 235 return Scale == -1 ? nullptr : V; 236 } 237 } 238 239 // If we couldn't use the GEP (or it doesn't exist), attempt to use a 240 // BasePtr of 0 with Ptr as the Offsets, so long as there are only 4 241 // elements. 242 FixedVectorType *PtrTy = cast<FixedVectorType>(Ptr->getType()); 243 if (PtrTy->getNumElements() != 4 || MemoryTy->getScalarSizeInBits() == 32) 244 return nullptr; 245 Value *Zero = ConstantInt::get(Builder.getInt32Ty(), 0); 246 Value *BasePtr = Builder.CreateIntToPtr(Zero, Builder.getInt8PtrTy()); 247 Offsets = Builder.CreatePtrToInt( 248 Ptr, FixedVectorType::get(Builder.getInt32Ty(), 4)); 249 Scale = 0; 250 return BasePtr; 251 } 252 253 Value *MVEGatherScatterLowering::decomposeGEP(Value *&Offsets, 254 FixedVectorType *Ty, 255 GetElementPtrInst *GEP, 256 IRBuilder<> &Builder) { 257 if (!GEP) { 258 LLVM_DEBUG(dbgs() << "masked gathers/scatters: no getelementpointer " 259 << "found\n"); 260 return nullptr; 261 } 262 LLVM_DEBUG(dbgs() << "masked gathers/scatters: getelementpointer found." 263 << " Looking at intrinsic for base + vector of offsets\n"); 264 Value *GEPPtr = GEP->getPointerOperand(); 265 Offsets = GEP->getOperand(1); 266 if (GEPPtr->getType()->isVectorTy() || 267 !isa<FixedVectorType>(Offsets->getType())) 268 return nullptr; 269 270 if (GEP->getNumOperands() != 2) { 271 LLVM_DEBUG(dbgs() << "masked gathers/scatters: getelementptr with too many" 272 << " operands. Expanding.\n"); 273 return nullptr; 274 } 275 Offsets = GEP->getOperand(1); 276 unsigned OffsetsElemCount = 277 cast<FixedVectorType>(Offsets->getType())->getNumElements(); 278 // Paranoid check whether the number of parallel lanes is the same 279 assert(Ty->getNumElements() == OffsetsElemCount); 280 281 ZExtInst *ZextOffs = dyn_cast<ZExtInst>(Offsets); 282 if (ZextOffs) 283 Offsets = ZextOffs->getOperand(0); 284 FixedVectorType *OffsetType = cast<FixedVectorType>(Offsets->getType()); 285 286 // If the offsets are already being zext-ed to <N x i32>, that relieves us of 287 // having to make sure that they won't overflow. 288 if (!ZextOffs || cast<FixedVectorType>(ZextOffs->getDestTy()) 289 ->getElementType() 290 ->getScalarSizeInBits() != 32) 291 if (!checkOffsetSize(Offsets, OffsetsElemCount)) 292 return nullptr; 293 294 // The offset sizes have been checked; if any truncating or zext-ing is 295 // required to fix them, do that now 296 if (Ty != Offsets->getType()) { 297 if ((Ty->getElementType()->getScalarSizeInBits() < 298 OffsetType->getElementType()->getScalarSizeInBits())) { 299 Offsets = Builder.CreateTrunc(Offsets, Ty); 300 } else { 301 Offsets = Builder.CreateZExt(Offsets, VectorType::getInteger(Ty)); 302 } 303 } 304 // If none of the checks failed, return the gep's base pointer 305 LLVM_DEBUG(dbgs() << "masked gathers/scatters: found correct offsets\n"); 306 return GEPPtr; 307 } 308 309 void MVEGatherScatterLowering::lookThroughBitcast(Value *&Ptr) { 310 // Look through bitcast instruction if #elements is the same 311 if (auto *BitCast = dyn_cast<BitCastInst>(Ptr)) { 312 auto *BCTy = cast<FixedVectorType>(BitCast->getType()); 313 auto *BCSrcTy = cast<FixedVectorType>(BitCast->getOperand(0)->getType()); 314 if (BCTy->getNumElements() == BCSrcTy->getNumElements()) { 315 LLVM_DEBUG(dbgs() << "masked gathers/scatters: looking through " 316 << "bitcast\n"); 317 Ptr = BitCast->getOperand(0); 318 } 319 } 320 } 321 322 int MVEGatherScatterLowering::computeScale(unsigned GEPElemSize, 323 unsigned MemoryElemSize) { 324 // This can be a 32bit load/store scaled by 4, a 16bit load/store scaled by 2, 325 // or a 8bit, 16bit or 32bit load/store scaled by 1 326 if (GEPElemSize == 32 && MemoryElemSize == 32) 327 return 2; 328 else if (GEPElemSize == 16 && MemoryElemSize == 16) 329 return 1; 330 else if (GEPElemSize == 8) 331 return 0; 332 LLVM_DEBUG(dbgs() << "masked gathers/scatters: incorrect scale. Can't " 333 << "create intrinsic\n"); 334 return -1; 335 } 336 337 Optional<int64_t> MVEGatherScatterLowering::getIfConst(const Value *V) { 338 const Constant *C = dyn_cast<Constant>(V); 339 if (C && C->getSplatValue()) 340 return Optional<int64_t>{C->getUniqueInteger().getSExtValue()}; 341 if (!isa<Instruction>(V)) 342 return Optional<int64_t>{}; 343 344 const Instruction *I = cast<Instruction>(V); 345 if (I->getOpcode() == Instruction::Add || I->getOpcode() == Instruction::Or || 346 I->getOpcode() == Instruction::Mul || 347 I->getOpcode() == Instruction::Shl) { 348 Optional<int64_t> Op0 = getIfConst(I->getOperand(0)); 349 Optional<int64_t> Op1 = getIfConst(I->getOperand(1)); 350 if (!Op0 || !Op1) 351 return Optional<int64_t>{}; 352 if (I->getOpcode() == Instruction::Add) 353 return Optional<int64_t>{Op0.getValue() + Op1.getValue()}; 354 if (I->getOpcode() == Instruction::Mul) 355 return Optional<int64_t>{Op0.getValue() * Op1.getValue()}; 356 if (I->getOpcode() == Instruction::Shl) 357 return Optional<int64_t>{Op0.getValue() << Op1.getValue()}; 358 if (I->getOpcode() == Instruction::Or) 359 return Optional<int64_t>{Op0.getValue() | Op1.getValue()}; 360 } 361 return Optional<int64_t>{}; 362 } 363 364 // Return true if I is an Or instruction that is equivalent to an add, due to 365 // the operands having no common bits set. 366 static bool isAddLikeOr(Instruction *I, const DataLayout &DL) { 367 return I->getOpcode() == Instruction::Or && 368 haveNoCommonBitsSet(I->getOperand(0), I->getOperand(1), DL); 369 } 370 371 std::pair<Value *, int64_t> 372 MVEGatherScatterLowering::getVarAndConst(Value *Inst, int TypeScale) { 373 std::pair<Value *, int64_t> ReturnFalse = 374 std::pair<Value *, int64_t>(nullptr, 0); 375 // At this point, the instruction we're looking at must be an add or an 376 // add-like-or. 377 Instruction *Add = dyn_cast<Instruction>(Inst); 378 if (Add == nullptr || 379 (Add->getOpcode() != Instruction::Add && !isAddLikeOr(Add, *DL))) 380 return ReturnFalse; 381 382 Value *Summand; 383 Optional<int64_t> Const; 384 // Find out which operand the value that is increased is 385 if ((Const = getIfConst(Add->getOperand(0)))) 386 Summand = Add->getOperand(1); 387 else if ((Const = getIfConst(Add->getOperand(1)))) 388 Summand = Add->getOperand(0); 389 else 390 return ReturnFalse; 391 392 // Check that the constant is small enough for an incrementing gather 393 int64_t Immediate = Const.getValue() << TypeScale; 394 if (Immediate > 512 || Immediate < -512 || Immediate % 4 != 0) 395 return ReturnFalse; 396 397 return std::pair<Value *, int64_t>(Summand, Immediate); 398 } 399 400 Instruction *MVEGatherScatterLowering::lowerGather(IntrinsicInst *I) { 401 using namespace PatternMatch; 402 LLVM_DEBUG(dbgs() << "masked gathers: checking transform preconditions\n" 403 << *I << "\n"); 404 405 // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0) 406 // Attempt to turn the masked gather in I into a MVE intrinsic 407 // Potentially optimising the addressing modes as we do so. 408 auto *Ty = cast<FixedVectorType>(I->getType()); 409 Value *Ptr = I->getArgOperand(0); 410 Align Alignment = cast<ConstantInt>(I->getArgOperand(1))->getAlignValue(); 411 Value *Mask = I->getArgOperand(2); 412 Value *PassThru = I->getArgOperand(3); 413 414 if (!isLegalTypeAndAlignment(Ty->getNumElements(), Ty->getScalarSizeInBits(), 415 Alignment)) 416 return nullptr; 417 lookThroughBitcast(Ptr); 418 assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type"); 419 420 IRBuilder<> Builder(I->getContext()); 421 Builder.SetInsertPoint(I); 422 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 423 424 Instruction *Root = I; 425 426 Instruction *Load = tryCreateIncrementingGatScat(I, Ptr, Builder); 427 if (!Load) 428 Load = tryCreateMaskedGatherOffset(I, Ptr, Root, Builder); 429 if (!Load) 430 Load = tryCreateMaskedGatherBase(I, Ptr, Builder); 431 if (!Load) 432 return nullptr; 433 434 if (!isa<UndefValue>(PassThru) && !match(PassThru, m_Zero())) { 435 LLVM_DEBUG(dbgs() << "masked gathers: found non-trivial passthru - " 436 << "creating select\n"); 437 Load = SelectInst::Create(Mask, Load, PassThru); 438 Builder.Insert(Load); 439 } 440 441 Root->replaceAllUsesWith(Load); 442 Root->eraseFromParent(); 443 if (Root != I) 444 // If this was an extending gather, we need to get rid of the sext/zext 445 // sext/zext as well as of the gather itself 446 I->eraseFromParent(); 447 448 LLVM_DEBUG(dbgs() << "masked gathers: successfully built masked gather\n" 449 << *Load << "\n"); 450 return Load; 451 } 452 453 Instruction *MVEGatherScatterLowering::tryCreateMaskedGatherBase( 454 IntrinsicInst *I, Value *Ptr, IRBuilder<> &Builder, int64_t Increment) { 455 using namespace PatternMatch; 456 auto *Ty = cast<FixedVectorType>(I->getType()); 457 LLVM_DEBUG(dbgs() << "masked gathers: loading from vector of pointers\n"); 458 if (Ty->getNumElements() != 4 || Ty->getScalarSizeInBits() != 32) 459 // Can't build an intrinsic for this 460 return nullptr; 461 Value *Mask = I->getArgOperand(2); 462 if (match(Mask, m_One())) 463 return Builder.CreateIntrinsic(Intrinsic::arm_mve_vldr_gather_base, 464 {Ty, Ptr->getType()}, 465 {Ptr, Builder.getInt32(Increment)}); 466 else 467 return Builder.CreateIntrinsic( 468 Intrinsic::arm_mve_vldr_gather_base_predicated, 469 {Ty, Ptr->getType(), Mask->getType()}, 470 {Ptr, Builder.getInt32(Increment), Mask}); 471 } 472 473 Instruction *MVEGatherScatterLowering::tryCreateMaskedGatherBaseWB( 474 IntrinsicInst *I, Value *Ptr, IRBuilder<> &Builder, int64_t Increment) { 475 using namespace PatternMatch; 476 auto *Ty = cast<FixedVectorType>(I->getType()); 477 LLVM_DEBUG(dbgs() << "masked gathers: loading from vector of pointers with " 478 << "writeback\n"); 479 if (Ty->getNumElements() != 4 || Ty->getScalarSizeInBits() != 32) 480 // Can't build an intrinsic for this 481 return nullptr; 482 Value *Mask = I->getArgOperand(2); 483 if (match(Mask, m_One())) 484 return Builder.CreateIntrinsic(Intrinsic::arm_mve_vldr_gather_base_wb, 485 {Ty, Ptr->getType()}, 486 {Ptr, Builder.getInt32(Increment)}); 487 else 488 return Builder.CreateIntrinsic( 489 Intrinsic::arm_mve_vldr_gather_base_wb_predicated, 490 {Ty, Ptr->getType(), Mask->getType()}, 491 {Ptr, Builder.getInt32(Increment), Mask}); 492 } 493 494 Instruction *MVEGatherScatterLowering::tryCreateMaskedGatherOffset( 495 IntrinsicInst *I, Value *Ptr, Instruction *&Root, IRBuilder<> &Builder) { 496 using namespace PatternMatch; 497 498 Type *MemoryTy = I->getType(); 499 Type *ResultTy = MemoryTy; 500 501 unsigned Unsigned = 1; 502 // The size of the gather was already checked in isLegalTypeAndAlignment; 503 // if it was not a full vector width an appropriate extend should follow. 504 auto *Extend = Root; 505 bool TruncResult = false; 506 if (MemoryTy->getPrimitiveSizeInBits() < 128) { 507 if (I->hasOneUse()) { 508 // If the gather has a single extend of the correct type, use an extending 509 // gather and replace the ext. In which case the correct root to replace 510 // is not the CallInst itself, but the instruction which extends it. 511 Instruction* User = cast<Instruction>(*I->users().begin()); 512 if (isa<SExtInst>(User) && 513 User->getType()->getPrimitiveSizeInBits() == 128) { 514 LLVM_DEBUG(dbgs() << "masked gathers: Incorporating extend: " 515 << *User << "\n"); 516 Extend = User; 517 ResultTy = User->getType(); 518 Unsigned = 0; 519 } else if (isa<ZExtInst>(User) && 520 User->getType()->getPrimitiveSizeInBits() == 128) { 521 LLVM_DEBUG(dbgs() << "masked gathers: Incorporating extend: " 522 << *ResultTy << "\n"); 523 Extend = User; 524 ResultTy = User->getType(); 525 } 526 } 527 528 // If an extend hasn't been found and the type is an integer, create an 529 // extending gather and truncate back to the original type. 530 if (ResultTy->getPrimitiveSizeInBits() < 128 && 531 ResultTy->isIntOrIntVectorTy()) { 532 ResultTy = ResultTy->getWithNewBitWidth( 533 128 / cast<FixedVectorType>(ResultTy)->getNumElements()); 534 TruncResult = true; 535 LLVM_DEBUG(dbgs() << "masked gathers: Small input type, truncing to: " 536 << *ResultTy << "\n"); 537 } 538 539 // The final size of the gather must be a full vector width 540 if (ResultTy->getPrimitiveSizeInBits() != 128) { 541 LLVM_DEBUG(dbgs() << "masked gathers: Extend needed but not provided " 542 "from the correct type. Expanding\n"); 543 return nullptr; 544 } 545 } 546 547 Value *Offsets; 548 int Scale; 549 Value *BasePtr = decomposePtr( 550 Ptr, Offsets, Scale, cast<FixedVectorType>(ResultTy), MemoryTy, Builder); 551 if (!BasePtr) 552 return nullptr; 553 554 Root = Extend; 555 Value *Mask = I->getArgOperand(2); 556 Instruction *Load = nullptr; 557 if (!match(Mask, m_One())) 558 Load = Builder.CreateIntrinsic( 559 Intrinsic::arm_mve_vldr_gather_offset_predicated, 560 {ResultTy, BasePtr->getType(), Offsets->getType(), Mask->getType()}, 561 {BasePtr, Offsets, Builder.getInt32(MemoryTy->getScalarSizeInBits()), 562 Builder.getInt32(Scale), Builder.getInt32(Unsigned), Mask}); 563 else 564 Load = Builder.CreateIntrinsic( 565 Intrinsic::arm_mve_vldr_gather_offset, 566 {ResultTy, BasePtr->getType(), Offsets->getType()}, 567 {BasePtr, Offsets, Builder.getInt32(MemoryTy->getScalarSizeInBits()), 568 Builder.getInt32(Scale), Builder.getInt32(Unsigned)}); 569 570 if (TruncResult) { 571 Load = TruncInst::Create(Instruction::Trunc, Load, MemoryTy); 572 Builder.Insert(Load); 573 } 574 return Load; 575 } 576 577 Instruction *MVEGatherScatterLowering::lowerScatter(IntrinsicInst *I) { 578 using namespace PatternMatch; 579 LLVM_DEBUG(dbgs() << "masked scatters: checking transform preconditions\n" 580 << *I << "\n"); 581 582 // @llvm.masked.scatter.*(data, ptrs, alignment, mask) 583 // Attempt to turn the masked scatter in I into a MVE intrinsic 584 // Potentially optimising the addressing modes as we do so. 585 Value *Input = I->getArgOperand(0); 586 Value *Ptr = I->getArgOperand(1); 587 Align Alignment = cast<ConstantInt>(I->getArgOperand(2))->getAlignValue(); 588 auto *Ty = cast<FixedVectorType>(Input->getType()); 589 590 if (!isLegalTypeAndAlignment(Ty->getNumElements(), Ty->getScalarSizeInBits(), 591 Alignment)) 592 return nullptr; 593 594 lookThroughBitcast(Ptr); 595 assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type"); 596 597 IRBuilder<> Builder(I->getContext()); 598 Builder.SetInsertPoint(I); 599 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 600 601 Instruction *Store = tryCreateIncrementingGatScat(I, Ptr, Builder); 602 if (!Store) 603 Store = tryCreateMaskedScatterOffset(I, Ptr, Builder); 604 if (!Store) 605 Store = tryCreateMaskedScatterBase(I, Ptr, Builder); 606 if (!Store) 607 return nullptr; 608 609 LLVM_DEBUG(dbgs() << "masked scatters: successfully built masked scatter\n" 610 << *Store << "\n"); 611 I->eraseFromParent(); 612 return Store; 613 } 614 615 Instruction *MVEGatherScatterLowering::tryCreateMaskedScatterBase( 616 IntrinsicInst *I, Value *Ptr, IRBuilder<> &Builder, int64_t Increment) { 617 using namespace PatternMatch; 618 Value *Input = I->getArgOperand(0); 619 auto *Ty = cast<FixedVectorType>(Input->getType()); 620 // Only QR variants allow truncating 621 if (!(Ty->getNumElements() == 4 && Ty->getScalarSizeInBits() == 32)) { 622 // Can't build an intrinsic for this 623 return nullptr; 624 } 625 Value *Mask = I->getArgOperand(3); 626 // int_arm_mve_vstr_scatter_base(_predicated) addr, offset, data(, mask) 627 LLVM_DEBUG(dbgs() << "masked scatters: storing to a vector of pointers\n"); 628 if (match(Mask, m_One())) 629 return Builder.CreateIntrinsic(Intrinsic::arm_mve_vstr_scatter_base, 630 {Ptr->getType(), Input->getType()}, 631 {Ptr, Builder.getInt32(Increment), Input}); 632 else 633 return Builder.CreateIntrinsic( 634 Intrinsic::arm_mve_vstr_scatter_base_predicated, 635 {Ptr->getType(), Input->getType(), Mask->getType()}, 636 {Ptr, Builder.getInt32(Increment), Input, Mask}); 637 } 638 639 Instruction *MVEGatherScatterLowering::tryCreateMaskedScatterBaseWB( 640 IntrinsicInst *I, Value *Ptr, IRBuilder<> &Builder, int64_t Increment) { 641 using namespace PatternMatch; 642 Value *Input = I->getArgOperand(0); 643 auto *Ty = cast<FixedVectorType>(Input->getType()); 644 LLVM_DEBUG(dbgs() << "masked scatters: storing to a vector of pointers " 645 << "with writeback\n"); 646 if (Ty->getNumElements() != 4 || Ty->getScalarSizeInBits() != 32) 647 // Can't build an intrinsic for this 648 return nullptr; 649 Value *Mask = I->getArgOperand(3); 650 if (match(Mask, m_One())) 651 return Builder.CreateIntrinsic(Intrinsic::arm_mve_vstr_scatter_base_wb, 652 {Ptr->getType(), Input->getType()}, 653 {Ptr, Builder.getInt32(Increment), Input}); 654 else 655 return Builder.CreateIntrinsic( 656 Intrinsic::arm_mve_vstr_scatter_base_wb_predicated, 657 {Ptr->getType(), Input->getType(), Mask->getType()}, 658 {Ptr, Builder.getInt32(Increment), Input, Mask}); 659 } 660 661 Instruction *MVEGatherScatterLowering::tryCreateMaskedScatterOffset( 662 IntrinsicInst *I, Value *Ptr, IRBuilder<> &Builder) { 663 using namespace PatternMatch; 664 Value *Input = I->getArgOperand(0); 665 Value *Mask = I->getArgOperand(3); 666 Type *InputTy = Input->getType(); 667 Type *MemoryTy = InputTy; 668 669 LLVM_DEBUG(dbgs() << "masked scatters: getelementpointer found. Storing" 670 << " to base + vector of offsets\n"); 671 // If the input has been truncated, try to integrate that trunc into the 672 // scatter instruction (we don't care about alignment here) 673 if (TruncInst *Trunc = dyn_cast<TruncInst>(Input)) { 674 Value *PreTrunc = Trunc->getOperand(0); 675 Type *PreTruncTy = PreTrunc->getType(); 676 if (PreTruncTy->getPrimitiveSizeInBits() == 128) { 677 Input = PreTrunc; 678 InputTy = PreTruncTy; 679 } 680 } 681 bool ExtendInput = false; 682 if (InputTy->getPrimitiveSizeInBits() < 128 && 683 InputTy->isIntOrIntVectorTy()) { 684 // If we can't find a trunc to incorporate into the instruction, create an 685 // implicit one with a zext, so that we can still create a scatter. We know 686 // that the input type is 4x/8x/16x and of type i8/i16/i32, so any type 687 // smaller than 128 bits will divide evenly into a 128bit vector. 688 InputTy = InputTy->getWithNewBitWidth( 689 128 / cast<FixedVectorType>(InputTy)->getNumElements()); 690 ExtendInput = true; 691 LLVM_DEBUG(dbgs() << "masked scatters: Small input type, will extend:\n" 692 << *Input << "\n"); 693 } 694 if (InputTy->getPrimitiveSizeInBits() != 128) { 695 LLVM_DEBUG(dbgs() << "masked scatters: cannot create scatters for " 696 "non-standard input types. Expanding.\n"); 697 return nullptr; 698 } 699 700 Value *Offsets; 701 int Scale; 702 Value *BasePtr = decomposePtr( 703 Ptr, Offsets, Scale, cast<FixedVectorType>(InputTy), MemoryTy, Builder); 704 if (!BasePtr) 705 return nullptr; 706 707 if (ExtendInput) 708 Input = Builder.CreateZExt(Input, InputTy); 709 if (!match(Mask, m_One())) 710 return Builder.CreateIntrinsic( 711 Intrinsic::arm_mve_vstr_scatter_offset_predicated, 712 {BasePtr->getType(), Offsets->getType(), Input->getType(), 713 Mask->getType()}, 714 {BasePtr, Offsets, Input, 715 Builder.getInt32(MemoryTy->getScalarSizeInBits()), 716 Builder.getInt32(Scale), Mask}); 717 else 718 return Builder.CreateIntrinsic( 719 Intrinsic::arm_mve_vstr_scatter_offset, 720 {BasePtr->getType(), Offsets->getType(), Input->getType()}, 721 {BasePtr, Offsets, Input, 722 Builder.getInt32(MemoryTy->getScalarSizeInBits()), 723 Builder.getInt32(Scale)}); 724 } 725 726 Instruction *MVEGatherScatterLowering::tryCreateIncrementingGatScat( 727 IntrinsicInst *I, Value *Ptr, IRBuilder<> &Builder) { 728 FixedVectorType *Ty; 729 if (I->getIntrinsicID() == Intrinsic::masked_gather) 730 Ty = cast<FixedVectorType>(I->getType()); 731 else 732 Ty = cast<FixedVectorType>(I->getArgOperand(0)->getType()); 733 734 // Incrementing gathers only exist for v4i32 735 if (Ty->getNumElements() != 4 || Ty->getScalarSizeInBits() != 32) 736 return nullptr; 737 // Incrementing gathers are not beneficial outside of a loop 738 Loop *L = LI->getLoopFor(I->getParent()); 739 if (L == nullptr) 740 return nullptr; 741 742 // Decompose the GEP into Base and Offsets 743 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 744 Value *Offsets; 745 Value *BasePtr = decomposeGEP(Offsets, Ty, GEP, Builder); 746 if (!BasePtr) 747 return nullptr; 748 749 LLVM_DEBUG(dbgs() << "masked gathers/scatters: trying to build incrementing " 750 "wb gather/scatter\n"); 751 752 // The gep was in charge of making sure the offsets are scaled correctly 753 // - calculate that factor so it can be applied by hand 754 int TypeScale = 755 computeScale(DL->getTypeSizeInBits(GEP->getOperand(0)->getType()), 756 DL->getTypeSizeInBits(GEP->getType()) / 757 cast<FixedVectorType>(GEP->getType())->getNumElements()); 758 if (TypeScale == -1) 759 return nullptr; 760 761 if (GEP->hasOneUse()) { 762 // Only in this case do we want to build a wb gather, because the wb will 763 // change the phi which does affect other users of the gep (which will still 764 // be using the phi in the old way) 765 if (auto *Load = tryCreateIncrementingWBGatScat(I, BasePtr, Offsets, 766 TypeScale, Builder)) 767 return Load; 768 } 769 770 LLVM_DEBUG(dbgs() << "masked gathers/scatters: trying to build incrementing " 771 "non-wb gather/scatter\n"); 772 773 std::pair<Value *, int64_t> Add = getVarAndConst(Offsets, TypeScale); 774 if (Add.first == nullptr) 775 return nullptr; 776 Value *OffsetsIncoming = Add.first; 777 int64_t Immediate = Add.second; 778 779 // Make sure the offsets are scaled correctly 780 Instruction *ScaledOffsets = BinaryOperator::Create( 781 Instruction::Shl, OffsetsIncoming, 782 Builder.CreateVectorSplat(Ty->getNumElements(), Builder.getInt32(TypeScale)), 783 "ScaledIndex", I); 784 // Add the base to the offsets 785 OffsetsIncoming = BinaryOperator::Create( 786 Instruction::Add, ScaledOffsets, 787 Builder.CreateVectorSplat( 788 Ty->getNumElements(), 789 Builder.CreatePtrToInt( 790 BasePtr, 791 cast<VectorType>(ScaledOffsets->getType())->getElementType())), 792 "StartIndex", I); 793 794 if (I->getIntrinsicID() == Intrinsic::masked_gather) 795 return tryCreateMaskedGatherBase(I, OffsetsIncoming, Builder, Immediate); 796 else 797 return tryCreateMaskedScatterBase(I, OffsetsIncoming, Builder, Immediate); 798 } 799 800 Instruction *MVEGatherScatterLowering::tryCreateIncrementingWBGatScat( 801 IntrinsicInst *I, Value *BasePtr, Value *Offsets, unsigned TypeScale, 802 IRBuilder<> &Builder) { 803 // Check whether this gather's offset is incremented by a constant - if so, 804 // and the load is of the right type, we can merge this into a QI gather 805 Loop *L = LI->getLoopFor(I->getParent()); 806 // Offsets that are worth merging into this instruction will be incremented 807 // by a constant, thus we're looking for an add of a phi and a constant 808 PHINode *Phi = dyn_cast<PHINode>(Offsets); 809 if (Phi == nullptr || Phi->getNumIncomingValues() != 2 || 810 Phi->getParent() != L->getHeader() || Phi->getNumUses() != 2) 811 // No phi means no IV to write back to; if there is a phi, we expect it 812 // to have exactly two incoming values; the only phis we are interested in 813 // will be loop IV's and have exactly two uses, one in their increment and 814 // one in the gather's gep 815 return nullptr; 816 817 unsigned IncrementIndex = 818 Phi->getIncomingBlock(0) == L->getLoopLatch() ? 0 : 1; 819 // Look through the phi to the phi increment 820 Offsets = Phi->getIncomingValue(IncrementIndex); 821 822 std::pair<Value *, int64_t> Add = getVarAndConst(Offsets, TypeScale); 823 if (Add.first == nullptr) 824 return nullptr; 825 Value *OffsetsIncoming = Add.first; 826 int64_t Immediate = Add.second; 827 if (OffsetsIncoming != Phi) 828 // Then the increment we are looking at is not an increment of the 829 // induction variable, and we don't want to do a writeback 830 return nullptr; 831 832 Builder.SetInsertPoint(&Phi->getIncomingBlock(1 - IncrementIndex)->back()); 833 unsigned NumElems = 834 cast<FixedVectorType>(OffsetsIncoming->getType())->getNumElements(); 835 836 // Make sure the offsets are scaled correctly 837 Instruction *ScaledOffsets = BinaryOperator::Create( 838 Instruction::Shl, Phi->getIncomingValue(1 - IncrementIndex), 839 Builder.CreateVectorSplat(NumElems, Builder.getInt32(TypeScale)), 840 "ScaledIndex", &Phi->getIncomingBlock(1 - IncrementIndex)->back()); 841 // Add the base to the offsets 842 OffsetsIncoming = BinaryOperator::Create( 843 Instruction::Add, ScaledOffsets, 844 Builder.CreateVectorSplat( 845 NumElems, 846 Builder.CreatePtrToInt( 847 BasePtr, 848 cast<VectorType>(ScaledOffsets->getType())->getElementType())), 849 "StartIndex", &Phi->getIncomingBlock(1 - IncrementIndex)->back()); 850 // The gather is pre-incrementing 851 OffsetsIncoming = BinaryOperator::Create( 852 Instruction::Sub, OffsetsIncoming, 853 Builder.CreateVectorSplat(NumElems, Builder.getInt32(Immediate)), 854 "PreIncrementStartIndex", 855 &Phi->getIncomingBlock(1 - IncrementIndex)->back()); 856 Phi->setIncomingValue(1 - IncrementIndex, OffsetsIncoming); 857 858 Builder.SetInsertPoint(I); 859 860 Instruction *EndResult; 861 Instruction *NewInduction; 862 if (I->getIntrinsicID() == Intrinsic::masked_gather) { 863 // Build the incrementing gather 864 Value *Load = tryCreateMaskedGatherBaseWB(I, Phi, Builder, Immediate); 865 // One value to be handed to whoever uses the gather, one is the loop 866 // increment 867 EndResult = ExtractValueInst::Create(Load, 0, "Gather"); 868 NewInduction = ExtractValueInst::Create(Load, 1, "GatherIncrement"); 869 Builder.Insert(EndResult); 870 Builder.Insert(NewInduction); 871 } else { 872 // Build the incrementing scatter 873 EndResult = NewInduction = 874 tryCreateMaskedScatterBaseWB(I, Phi, Builder, Immediate); 875 } 876 Instruction *AddInst = cast<Instruction>(Offsets); 877 AddInst->replaceAllUsesWith(NewInduction); 878 AddInst->eraseFromParent(); 879 Phi->setIncomingValue(IncrementIndex, NewInduction); 880 881 return EndResult; 882 } 883 884 void MVEGatherScatterLowering::pushOutAdd(PHINode *&Phi, 885 Value *OffsSecondOperand, 886 unsigned StartIndex) { 887 LLVM_DEBUG(dbgs() << "masked gathers/scatters: optimising add instruction\n"); 888 Instruction *InsertionPoint = 889 &cast<Instruction>(Phi->getIncomingBlock(StartIndex)->back()); 890 // Initialize the phi with a vector that contains a sum of the constants 891 Instruction *NewIndex = BinaryOperator::Create( 892 Instruction::Add, Phi->getIncomingValue(StartIndex), OffsSecondOperand, 893 "PushedOutAdd", InsertionPoint); 894 unsigned IncrementIndex = StartIndex == 0 ? 1 : 0; 895 896 // Order such that start index comes first (this reduces mov's) 897 Phi->addIncoming(NewIndex, Phi->getIncomingBlock(StartIndex)); 898 Phi->addIncoming(Phi->getIncomingValue(IncrementIndex), 899 Phi->getIncomingBlock(IncrementIndex)); 900 Phi->removeIncomingValue(IncrementIndex); 901 Phi->removeIncomingValue(StartIndex); 902 } 903 904 void MVEGatherScatterLowering::pushOutMulShl(unsigned Opcode, PHINode *&Phi, 905 Value *IncrementPerRound, 906 Value *OffsSecondOperand, 907 unsigned LoopIncrement, 908 IRBuilder<> &Builder) { 909 LLVM_DEBUG(dbgs() << "masked gathers/scatters: optimising mul instruction\n"); 910 911 // Create a new scalar add outside of the loop and transform it to a splat 912 // by which loop variable can be incremented 913 Instruction *InsertionPoint = &cast<Instruction>( 914 Phi->getIncomingBlock(LoopIncrement == 1 ? 0 : 1)->back()); 915 916 // Create a new index 917 Value *StartIndex = 918 BinaryOperator::Create((Instruction::BinaryOps)Opcode, 919 Phi->getIncomingValue(LoopIncrement == 1 ? 0 : 1), 920 OffsSecondOperand, "PushedOutMul", InsertionPoint); 921 922 Instruction *Product = 923 BinaryOperator::Create((Instruction::BinaryOps)Opcode, IncrementPerRound, 924 OffsSecondOperand, "Product", InsertionPoint); 925 // Increment NewIndex by Product instead of the multiplication 926 Instruction *NewIncrement = BinaryOperator::Create( 927 Instruction::Add, Phi, Product, "IncrementPushedOutMul", 928 cast<Instruction>(Phi->getIncomingBlock(LoopIncrement)->back()) 929 .getPrevNode()); 930 931 Phi->addIncoming(StartIndex, 932 Phi->getIncomingBlock(LoopIncrement == 1 ? 0 : 1)); 933 Phi->addIncoming(NewIncrement, Phi->getIncomingBlock(LoopIncrement)); 934 Phi->removeIncomingValue((unsigned)0); 935 Phi->removeIncomingValue((unsigned)0); 936 } 937 938 // Check whether all usages of this instruction are as offsets of 939 // gathers/scatters or simple arithmetics only used by gathers/scatters 940 static bool hasAllGatScatUsers(Instruction *I, const DataLayout &DL) { 941 if (I->hasNUses(0)) { 942 return false; 943 } 944 bool Gatscat = true; 945 for (User *U : I->users()) { 946 if (!isa<Instruction>(U)) 947 return false; 948 if (isa<GetElementPtrInst>(U) || 949 isGatherScatter(dyn_cast<IntrinsicInst>(U))) { 950 return Gatscat; 951 } else { 952 unsigned OpCode = cast<Instruction>(U)->getOpcode(); 953 if ((OpCode == Instruction::Add || OpCode == Instruction::Mul || 954 OpCode == Instruction::Shl || 955 isAddLikeOr(cast<Instruction>(U), DL)) && 956 hasAllGatScatUsers(cast<Instruction>(U), DL)) { 957 continue; 958 } 959 return false; 960 } 961 } 962 return Gatscat; 963 } 964 965 bool MVEGatherScatterLowering::optimiseOffsets(Value *Offsets, BasicBlock *BB, 966 LoopInfo *LI) { 967 LLVM_DEBUG(dbgs() << "masked gathers/scatters: trying to optimize\n" 968 << *Offsets << "\n"); 969 // Optimise the addresses of gathers/scatters by moving invariant 970 // calculations out of the loop 971 if (!isa<Instruction>(Offsets)) 972 return false; 973 Instruction *Offs = cast<Instruction>(Offsets); 974 if (Offs->getOpcode() != Instruction::Add && !isAddLikeOr(Offs, *DL) && 975 Offs->getOpcode() != Instruction::Mul && 976 Offs->getOpcode() != Instruction::Shl) 977 return false; 978 Loop *L = LI->getLoopFor(BB); 979 if (L == nullptr) 980 return false; 981 if (!Offs->hasOneUse()) { 982 if (!hasAllGatScatUsers(Offs, *DL)) 983 return false; 984 } 985 986 // Find out which, if any, operand of the instruction 987 // is a phi node 988 PHINode *Phi; 989 int OffsSecondOp; 990 if (isa<PHINode>(Offs->getOperand(0))) { 991 Phi = cast<PHINode>(Offs->getOperand(0)); 992 OffsSecondOp = 1; 993 } else if (isa<PHINode>(Offs->getOperand(1))) { 994 Phi = cast<PHINode>(Offs->getOperand(1)); 995 OffsSecondOp = 0; 996 } else { 997 bool Changed = false; 998 if (isa<Instruction>(Offs->getOperand(0)) && 999 L->contains(cast<Instruction>(Offs->getOperand(0)))) 1000 Changed |= optimiseOffsets(Offs->getOperand(0), BB, LI); 1001 if (isa<Instruction>(Offs->getOperand(1)) && 1002 L->contains(cast<Instruction>(Offs->getOperand(1)))) 1003 Changed |= optimiseOffsets(Offs->getOperand(1), BB, LI); 1004 if (!Changed) 1005 return false; 1006 if (isa<PHINode>(Offs->getOperand(0))) { 1007 Phi = cast<PHINode>(Offs->getOperand(0)); 1008 OffsSecondOp = 1; 1009 } else if (isa<PHINode>(Offs->getOperand(1))) { 1010 Phi = cast<PHINode>(Offs->getOperand(1)); 1011 OffsSecondOp = 0; 1012 } else { 1013 return false; 1014 } 1015 } 1016 // A phi node we want to perform this function on should be from the 1017 // loop header. 1018 if (Phi->getParent() != L->getHeader()) 1019 return false; 1020 1021 // We're looking for a simple add recurrence. 1022 BinaryOperator *IncInstruction; 1023 Value *Start, *IncrementPerRound; 1024 if (!matchSimpleRecurrence(Phi, IncInstruction, Start, IncrementPerRound) || 1025 IncInstruction->getOpcode() != Instruction::Add) 1026 return false; 1027 1028 int IncrementingBlock = Phi->getIncomingValue(0) == IncInstruction ? 0 : 1; 1029 1030 // Get the value that is added to/multiplied with the phi 1031 Value *OffsSecondOperand = Offs->getOperand(OffsSecondOp); 1032 1033 if (IncrementPerRound->getType() != OffsSecondOperand->getType() || 1034 !L->isLoopInvariant(OffsSecondOperand)) 1035 // Something has gone wrong, abort 1036 return false; 1037 1038 // Only proceed if the increment per round is a constant or an instruction 1039 // which does not originate from within the loop 1040 if (!isa<Constant>(IncrementPerRound) && 1041 !(isa<Instruction>(IncrementPerRound) && 1042 !L->contains(cast<Instruction>(IncrementPerRound)))) 1043 return false; 1044 1045 // If the phi is not used by anything else, we can just adapt it when 1046 // replacing the instruction; if it is, we'll have to duplicate it 1047 PHINode *NewPhi; 1048 if (Phi->getNumUses() == 2) { 1049 // No other users -> reuse existing phi (One user is the instruction 1050 // we're looking at, the other is the phi increment) 1051 if (IncInstruction->getNumUses() != 1) { 1052 // If the incrementing instruction does have more users than 1053 // our phi, we need to copy it 1054 IncInstruction = BinaryOperator::Create( 1055 Instruction::BinaryOps(IncInstruction->getOpcode()), Phi, 1056 IncrementPerRound, "LoopIncrement", IncInstruction); 1057 Phi->setIncomingValue(IncrementingBlock, IncInstruction); 1058 } 1059 NewPhi = Phi; 1060 } else { 1061 // There are other users -> create a new phi 1062 NewPhi = PHINode::Create(Phi->getType(), 2, "NewPhi", Phi); 1063 // Copy the incoming values of the old phi 1064 NewPhi->addIncoming(Phi->getIncomingValue(IncrementingBlock == 1 ? 0 : 1), 1065 Phi->getIncomingBlock(IncrementingBlock == 1 ? 0 : 1)); 1066 IncInstruction = BinaryOperator::Create( 1067 Instruction::BinaryOps(IncInstruction->getOpcode()), NewPhi, 1068 IncrementPerRound, "LoopIncrement", IncInstruction); 1069 NewPhi->addIncoming(IncInstruction, 1070 Phi->getIncomingBlock(IncrementingBlock)); 1071 IncrementingBlock = 1; 1072 } 1073 1074 IRBuilder<> Builder(BB->getContext()); 1075 Builder.SetInsertPoint(Phi); 1076 Builder.SetCurrentDebugLocation(Offs->getDebugLoc()); 1077 1078 switch (Offs->getOpcode()) { 1079 case Instruction::Add: 1080 case Instruction::Or: 1081 pushOutAdd(NewPhi, OffsSecondOperand, IncrementingBlock == 1 ? 0 : 1); 1082 break; 1083 case Instruction::Mul: 1084 case Instruction::Shl: 1085 pushOutMulShl(Offs->getOpcode(), NewPhi, IncrementPerRound, 1086 OffsSecondOperand, IncrementingBlock, Builder); 1087 break; 1088 default: 1089 return false; 1090 } 1091 LLVM_DEBUG(dbgs() << "masked gathers/scatters: simplified loop variable " 1092 << "add/mul\n"); 1093 1094 // The instruction has now been "absorbed" into the phi value 1095 Offs->replaceAllUsesWith(NewPhi); 1096 if (Offs->hasNUses(0)) 1097 Offs->eraseFromParent(); 1098 // Clean up the old increment in case it's unused because we built a new 1099 // one 1100 if (IncInstruction->hasNUses(0)) 1101 IncInstruction->eraseFromParent(); 1102 1103 return true; 1104 } 1105 1106 static Value *CheckAndCreateOffsetAdd(Value *X, Value *Y, Value *GEP, 1107 IRBuilder<> &Builder) { 1108 // Splat the non-vector value to a vector of the given type - if the value is 1109 // a constant (and its value isn't too big), we can even use this opportunity 1110 // to scale it to the size of the vector elements 1111 auto FixSummands = [&Builder](FixedVectorType *&VT, Value *&NonVectorVal) { 1112 ConstantInt *Const; 1113 if ((Const = dyn_cast<ConstantInt>(NonVectorVal)) && 1114 VT->getElementType() != NonVectorVal->getType()) { 1115 unsigned TargetElemSize = VT->getElementType()->getPrimitiveSizeInBits(); 1116 uint64_t N = Const->getZExtValue(); 1117 if (N < (unsigned)(1 << (TargetElemSize - 1))) { 1118 NonVectorVal = Builder.CreateVectorSplat( 1119 VT->getNumElements(), Builder.getIntN(TargetElemSize, N)); 1120 return; 1121 } 1122 } 1123 NonVectorVal = 1124 Builder.CreateVectorSplat(VT->getNumElements(), NonVectorVal); 1125 }; 1126 1127 FixedVectorType *XElType = dyn_cast<FixedVectorType>(X->getType()); 1128 FixedVectorType *YElType = dyn_cast<FixedVectorType>(Y->getType()); 1129 // If one of X, Y is not a vector, we have to splat it in order 1130 // to add the two of them. 1131 if (XElType && !YElType) { 1132 FixSummands(XElType, Y); 1133 YElType = cast<FixedVectorType>(Y->getType()); 1134 } else if (YElType && !XElType) { 1135 FixSummands(YElType, X); 1136 XElType = cast<FixedVectorType>(X->getType()); 1137 } 1138 assert(XElType && YElType && "Unknown vector types"); 1139 // Check that the summands are of compatible types 1140 if (XElType != YElType) { 1141 LLVM_DEBUG(dbgs() << "masked gathers/scatters: incompatible gep offsets\n"); 1142 return nullptr; 1143 } 1144 1145 if (XElType->getElementType()->getScalarSizeInBits() != 32) { 1146 // Check that by adding the vectors we do not accidentally 1147 // create an overflow 1148 Constant *ConstX = dyn_cast<Constant>(X); 1149 Constant *ConstY = dyn_cast<Constant>(Y); 1150 if (!ConstX || !ConstY) 1151 return nullptr; 1152 unsigned TargetElemSize = 128 / XElType->getNumElements(); 1153 for (unsigned i = 0; i < XElType->getNumElements(); i++) { 1154 ConstantInt *ConstXEl = 1155 dyn_cast<ConstantInt>(ConstX->getAggregateElement(i)); 1156 ConstantInt *ConstYEl = 1157 dyn_cast<ConstantInt>(ConstY->getAggregateElement(i)); 1158 if (!ConstXEl || !ConstYEl || 1159 ConstXEl->getZExtValue() + ConstYEl->getZExtValue() >= 1160 (unsigned)(1 << (TargetElemSize - 1))) 1161 return nullptr; 1162 } 1163 } 1164 1165 Value *Add = Builder.CreateAdd(X, Y); 1166 1167 FixedVectorType *GEPType = cast<FixedVectorType>(GEP->getType()); 1168 if (checkOffsetSize(Add, GEPType->getNumElements())) 1169 return Add; 1170 else 1171 return nullptr; 1172 } 1173 1174 Value *MVEGatherScatterLowering::foldGEP(GetElementPtrInst *GEP, 1175 Value *&Offsets, 1176 IRBuilder<> &Builder) { 1177 Value *GEPPtr = GEP->getPointerOperand(); 1178 Offsets = GEP->getOperand(1); 1179 // We only merge geps with constant offsets, because only for those 1180 // we can make sure that we do not cause an overflow 1181 if (!isa<Constant>(Offsets)) 1182 return nullptr; 1183 GetElementPtrInst *BaseGEP; 1184 if ((BaseGEP = dyn_cast<GetElementPtrInst>(GEPPtr))) { 1185 // Merge the two geps into one 1186 Value *BaseBasePtr = foldGEP(BaseGEP, Offsets, Builder); 1187 if (!BaseBasePtr) 1188 return nullptr; 1189 Offsets = 1190 CheckAndCreateOffsetAdd(Offsets, GEP->getOperand(1), GEP, Builder); 1191 if (Offsets == nullptr) 1192 return nullptr; 1193 return BaseBasePtr; 1194 } 1195 return GEPPtr; 1196 } 1197 1198 bool MVEGatherScatterLowering::optimiseAddress(Value *Address, BasicBlock *BB, 1199 LoopInfo *LI) { 1200 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Address); 1201 if (!GEP) 1202 return false; 1203 bool Changed = false; 1204 if (GEP->hasOneUse() && isa<GetElementPtrInst>(GEP->getPointerOperand())) { 1205 IRBuilder<> Builder(GEP->getContext()); 1206 Builder.SetInsertPoint(GEP); 1207 Builder.SetCurrentDebugLocation(GEP->getDebugLoc()); 1208 Value *Offsets; 1209 Value *Base = foldGEP(GEP, Offsets, Builder); 1210 // We only want to merge the geps if there is a real chance that they can be 1211 // used by an MVE gather; thus the offset has to have the correct size 1212 // (always i32 if it is not of vector type) and the base has to be a 1213 // pointer. 1214 if (Offsets && Base && Base != GEP) { 1215 GetElementPtrInst *NewAddress = GetElementPtrInst::Create( 1216 GEP->getSourceElementType(), Base, Offsets, "gep.merged", GEP); 1217 GEP->replaceAllUsesWith(NewAddress); 1218 GEP = NewAddress; 1219 Changed = true; 1220 } 1221 } 1222 Changed |= optimiseOffsets(GEP->getOperand(1), GEP->getParent(), LI); 1223 return Changed; 1224 } 1225 1226 bool MVEGatherScatterLowering::runOnFunction(Function &F) { 1227 if (!EnableMaskedGatherScatters) 1228 return false; 1229 auto &TPC = getAnalysis<TargetPassConfig>(); 1230 auto &TM = TPC.getTM<TargetMachine>(); 1231 auto *ST = &TM.getSubtarget<ARMSubtarget>(F); 1232 if (!ST->hasMVEIntegerOps()) 1233 return false; 1234 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1235 DL = &F.getParent()->getDataLayout(); 1236 SmallVector<IntrinsicInst *, 4> Gathers; 1237 SmallVector<IntrinsicInst *, 4> Scatters; 1238 1239 bool Changed = false; 1240 1241 for (BasicBlock &BB : F) { 1242 Changed |= SimplifyInstructionsInBlock(&BB); 1243 1244 for (Instruction &I : BB) { 1245 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I); 1246 if (II && II->getIntrinsicID() == Intrinsic::masked_gather && 1247 isa<FixedVectorType>(II->getType())) { 1248 Gathers.push_back(II); 1249 Changed |= optimiseAddress(II->getArgOperand(0), II->getParent(), LI); 1250 } else if (II && II->getIntrinsicID() == Intrinsic::masked_scatter && 1251 isa<FixedVectorType>(II->getArgOperand(0)->getType())) { 1252 Scatters.push_back(II); 1253 Changed |= optimiseAddress(II->getArgOperand(1), II->getParent(), LI); 1254 } 1255 } 1256 } 1257 for (unsigned i = 0; i < Gathers.size(); i++) { 1258 IntrinsicInst *I = Gathers[i]; 1259 Instruction *L = lowerGather(I); 1260 if (L == nullptr) 1261 continue; 1262 1263 // Get rid of any now dead instructions 1264 SimplifyInstructionsInBlock(L->getParent()); 1265 Changed = true; 1266 } 1267 1268 for (unsigned i = 0; i < Scatters.size(); i++) { 1269 IntrinsicInst *I = Scatters[i]; 1270 Instruction *S = lowerScatter(I); 1271 if (S == nullptr) 1272 continue; 1273 1274 // Get rid of any now dead instructions 1275 SimplifyInstructionsInBlock(S->getParent()); 1276 Changed = true; 1277 } 1278 return Changed; 1279 } 1280