1 //===- RISCVGatherScatterLowering.cpp - Gather/Scatter lowering -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass custom lowers llvm.gather and llvm.scatter instructions to 10 // RISC-V intrinsics. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "RISCV.h" 15 #include "RISCVTargetMachine.h" 16 #include "llvm/Analysis/InstSimplifyFolder.h" 17 #include "llvm/Analysis/LoopInfo.h" 18 #include "llvm/Analysis/ValueTracking.h" 19 #include "llvm/Analysis/VectorUtils.h" 20 #include "llvm/CodeGen/TargetPassConfig.h" 21 #include "llvm/IR/GetElementPtrTypeIterator.h" 22 #include "llvm/IR/IRBuilder.h" 23 #include "llvm/IR/IntrinsicInst.h" 24 #include "llvm/IR/IntrinsicsRISCV.h" 25 #include "llvm/IR/PatternMatch.h" 26 #include "llvm/Transforms/Utils/Local.h" 27 #include <optional> 28 29 using namespace llvm; 30 using namespace PatternMatch; 31 32 #define DEBUG_TYPE "riscv-gather-scatter-lowering" 33 34 namespace { 35 36 class RISCVGatherScatterLowering : public FunctionPass { 37 const RISCVSubtarget *ST = nullptr; 38 const RISCVTargetLowering *TLI = nullptr; 39 LoopInfo *LI = nullptr; 40 const DataLayout *DL = nullptr; 41 42 SmallVector<WeakTrackingVH> MaybeDeadPHIs; 43 44 // Cache of the BasePtr and Stride determined from this GEP. When a GEP is 45 // used by multiple gathers/scatters, this allow us to reuse the scalar 46 // instructions we created for the first gather/scatter for the others. 47 DenseMap<GetElementPtrInst *, std::pair<Value *, Value *>> StridedAddrs; 48 49 public: 50 static char ID; // Pass identification, replacement for typeid 51 52 RISCVGatherScatterLowering() : FunctionPass(ID) {} 53 54 bool runOnFunction(Function &F) override; 55 56 void getAnalysisUsage(AnalysisUsage &AU) const override { 57 AU.setPreservesCFG(); 58 AU.addRequired<TargetPassConfig>(); 59 AU.addRequired<LoopInfoWrapperPass>(); 60 } 61 62 StringRef getPassName() const override { 63 return "RISC-V gather/scatter lowering"; 64 } 65 66 private: 67 bool tryCreateStridedLoadStore(IntrinsicInst *II, Type *DataType, Value *Ptr, 68 Value *AlignOp); 69 70 std::pair<Value *, Value *> determineBaseAndStride(Instruction *Ptr, 71 IRBuilderBase &Builder); 72 73 bool matchStridedRecurrence(Value *Index, Loop *L, Value *&Stride, 74 PHINode *&BasePtr, BinaryOperator *&Inc, 75 IRBuilderBase &Builder); 76 }; 77 78 } // end anonymous namespace 79 80 char RISCVGatherScatterLowering::ID = 0; 81 82 INITIALIZE_PASS(RISCVGatherScatterLowering, DEBUG_TYPE, 83 "RISC-V gather/scatter lowering pass", false, false) 84 85 FunctionPass *llvm::createRISCVGatherScatterLoweringPass() { 86 return new RISCVGatherScatterLowering(); 87 } 88 89 // TODO: Should we consider the mask when looking for a stride? 90 static std::pair<Value *, Value *> matchStridedConstant(Constant *StartC) { 91 if (!isa<FixedVectorType>(StartC->getType())) 92 return std::make_pair(nullptr, nullptr); 93 94 unsigned NumElts = cast<FixedVectorType>(StartC->getType())->getNumElements(); 95 96 // Check that the start value is a strided constant. 97 auto *StartVal = 98 dyn_cast_or_null<ConstantInt>(StartC->getAggregateElement((unsigned)0)); 99 if (!StartVal) 100 return std::make_pair(nullptr, nullptr); 101 APInt StrideVal(StartVal->getValue().getBitWidth(), 0); 102 ConstantInt *Prev = StartVal; 103 for (unsigned i = 1; i != NumElts; ++i) { 104 auto *C = dyn_cast_or_null<ConstantInt>(StartC->getAggregateElement(i)); 105 if (!C) 106 return std::make_pair(nullptr, nullptr); 107 108 APInt LocalStride = C->getValue() - Prev->getValue(); 109 if (i == 1) 110 StrideVal = LocalStride; 111 else if (StrideVal != LocalStride) 112 return std::make_pair(nullptr, nullptr); 113 114 Prev = C; 115 } 116 117 Value *Stride = ConstantInt::get(StartVal->getType(), StrideVal); 118 119 return std::make_pair(StartVal, Stride); 120 } 121 122 static std::pair<Value *, Value *> matchStridedStart(Value *Start, 123 IRBuilderBase &Builder) { 124 // Base case, start is a strided constant. 125 auto *StartC = dyn_cast<Constant>(Start); 126 if (StartC) 127 return matchStridedConstant(StartC); 128 129 // Base case, start is a stepvector 130 if (match(Start, m_Intrinsic<Intrinsic::experimental_stepvector>())) { 131 auto *Ty = Start->getType()->getScalarType(); 132 return std::make_pair(ConstantInt::get(Ty, 0), ConstantInt::get(Ty, 1)); 133 } 134 135 // Not a constant, maybe it's a strided constant with a splat added or 136 // multipled. 137 auto *BO = dyn_cast<BinaryOperator>(Start); 138 if (!BO || (BO->getOpcode() != Instruction::Add && 139 BO->getOpcode() != Instruction::Shl && 140 BO->getOpcode() != Instruction::Mul)) 141 return std::make_pair(nullptr, nullptr); 142 143 // Look for an operand that is splatted. 144 unsigned OtherIndex = 0; 145 Value *Splat = getSplatValue(BO->getOperand(1)); 146 if (!Splat && Instruction::isCommutative(BO->getOpcode())) { 147 Splat = getSplatValue(BO->getOperand(0)); 148 OtherIndex = 1; 149 } 150 if (!Splat) 151 return std::make_pair(nullptr, nullptr); 152 153 Value *Stride; 154 std::tie(Start, Stride) = matchStridedStart(BO->getOperand(OtherIndex), 155 Builder); 156 if (!Start) 157 return std::make_pair(nullptr, nullptr); 158 159 Builder.SetInsertPoint(BO); 160 Builder.SetCurrentDebugLocation(DebugLoc()); 161 // Add the splat value to the start or multiply the start and stride by the 162 // splat. 163 switch (BO->getOpcode()) { 164 default: 165 llvm_unreachable("Unexpected opcode"); 166 case Instruction::Add: 167 Start = Builder.CreateAdd(Start, Splat); 168 break; 169 case Instruction::Mul: 170 Start = Builder.CreateMul(Start, Splat); 171 Stride = Builder.CreateMul(Stride, Splat); 172 break; 173 case Instruction::Shl: 174 Start = Builder.CreateShl(Start, Splat); 175 Stride = Builder.CreateShl(Stride, Splat); 176 break; 177 } 178 179 return std::make_pair(Start, Stride); 180 } 181 182 // Recursively, walk about the use-def chain until we find a Phi with a strided 183 // start value. Build and update a scalar recurrence as we unwind the recursion. 184 // We also update the Stride as we unwind. Our goal is to move all of the 185 // arithmetic out of the loop. 186 bool RISCVGatherScatterLowering::matchStridedRecurrence(Value *Index, Loop *L, 187 Value *&Stride, 188 PHINode *&BasePtr, 189 BinaryOperator *&Inc, 190 IRBuilderBase &Builder) { 191 // Our base case is a Phi. 192 if (auto *Phi = dyn_cast<PHINode>(Index)) { 193 // A phi node we want to perform this function on should be from the 194 // loop header. 195 if (Phi->getParent() != L->getHeader()) 196 return false; 197 198 Value *Step, *Start; 199 if (!matchSimpleRecurrence(Phi, Inc, Start, Step) || 200 Inc->getOpcode() != Instruction::Add) 201 return false; 202 assert(Phi->getNumIncomingValues() == 2 && "Expected 2 operand phi."); 203 unsigned IncrementingBlock = Phi->getIncomingValue(0) == Inc ? 0 : 1; 204 assert(Phi->getIncomingValue(IncrementingBlock) == Inc && 205 "Expected one operand of phi to be Inc"); 206 207 // Only proceed if the step is loop invariant. 208 if (!L->isLoopInvariant(Step)) 209 return false; 210 211 // Step should be a splat. 212 Step = getSplatValue(Step); 213 if (!Step) 214 return false; 215 216 std::tie(Start, Stride) = matchStridedStart(Start, Builder); 217 if (!Start) 218 return false; 219 assert(Stride != nullptr); 220 221 // Build scalar phi and increment. 222 BasePtr = 223 PHINode::Create(Start->getType(), 2, Phi->getName() + ".scalar", Phi); 224 Inc = BinaryOperator::CreateAdd(BasePtr, Step, Inc->getName() + ".scalar", 225 Inc); 226 BasePtr->addIncoming(Start, Phi->getIncomingBlock(1 - IncrementingBlock)); 227 BasePtr->addIncoming(Inc, Phi->getIncomingBlock(IncrementingBlock)); 228 229 // Note that this Phi might be eligible for removal. 230 MaybeDeadPHIs.push_back(Phi); 231 return true; 232 } 233 234 // Otherwise look for binary operator. 235 auto *BO = dyn_cast<BinaryOperator>(Index); 236 if (!BO) 237 return false; 238 239 switch (BO->getOpcode()) { 240 default: 241 return false; 242 case Instruction::Or: 243 // We need to be able to treat Or as Add. 244 if (!haveNoCommonBitsSet(BO->getOperand(0), BO->getOperand(1), *DL)) 245 return false; 246 break; 247 case Instruction::Add: 248 break; 249 case Instruction::Shl: 250 break; 251 case Instruction::Mul: 252 break; 253 } 254 255 // We should have one operand in the loop and one splat. 256 Value *OtherOp; 257 if (isa<Instruction>(BO->getOperand(0)) && 258 L->contains(cast<Instruction>(BO->getOperand(0)))) { 259 Index = cast<Instruction>(BO->getOperand(0)); 260 OtherOp = BO->getOperand(1); 261 } else if (isa<Instruction>(BO->getOperand(1)) && 262 L->contains(cast<Instruction>(BO->getOperand(1))) && 263 Instruction::isCommutative(BO->getOpcode())) { 264 Index = cast<Instruction>(BO->getOperand(1)); 265 OtherOp = BO->getOperand(0); 266 } else { 267 return false; 268 } 269 270 // Make sure other op is loop invariant. 271 if (!L->isLoopInvariant(OtherOp)) 272 return false; 273 274 // Make sure we have a splat. 275 Value *SplatOp = getSplatValue(OtherOp); 276 if (!SplatOp) 277 return false; 278 279 // Recurse up the use-def chain. 280 if (!matchStridedRecurrence(Index, L, Stride, BasePtr, Inc, Builder)) 281 return false; 282 283 // Locate the Step and Start values from the recurrence. 284 unsigned StepIndex = Inc->getOperand(0) == BasePtr ? 1 : 0; 285 unsigned StartBlock = BasePtr->getOperand(0) == Inc ? 1 : 0; 286 Value *Step = Inc->getOperand(StepIndex); 287 Value *Start = BasePtr->getOperand(StartBlock); 288 289 // We need to adjust the start value in the preheader. 290 Builder.SetInsertPoint( 291 BasePtr->getIncomingBlock(StartBlock)->getTerminator()); 292 Builder.SetCurrentDebugLocation(DebugLoc()); 293 294 switch (BO->getOpcode()) { 295 default: 296 llvm_unreachable("Unexpected opcode!"); 297 case Instruction::Add: 298 case Instruction::Or: { 299 // An add only affects the start value. It's ok to do this for Or because 300 // we already checked that there are no common set bits. 301 Start = Builder.CreateAdd(Start, SplatOp, "start"); 302 break; 303 } 304 case Instruction::Mul: { 305 Start = Builder.CreateMul(Start, SplatOp, "start"); 306 Step = Builder.CreateMul(Step, SplatOp, "step"); 307 Stride = Builder.CreateMul(Stride, SplatOp, "stride"); 308 break; 309 } 310 case Instruction::Shl: { 311 Start = Builder.CreateShl(Start, SplatOp, "start"); 312 Step = Builder.CreateShl(Step, SplatOp, "step"); 313 Stride = Builder.CreateShl(Stride, SplatOp, "stride"); 314 break; 315 } 316 } 317 318 Inc->setOperand(StepIndex, Step); 319 BasePtr->setIncomingValue(StartBlock, Start); 320 return true; 321 } 322 323 std::pair<Value *, Value *> 324 RISCVGatherScatterLowering::determineBaseAndStride(Instruction *Ptr, 325 IRBuilderBase &Builder) { 326 327 // A gather/scatter of a splat is a zero strided load/store. 328 if (auto *BasePtr = getSplatValue(Ptr)) { 329 Type *IntPtrTy = DL->getIntPtrType(BasePtr->getType()); 330 return std::make_pair(BasePtr, ConstantInt::get(IntPtrTy, 0)); 331 } 332 333 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr); 334 if (!GEP) 335 return std::make_pair(nullptr, nullptr); 336 337 auto I = StridedAddrs.find(GEP); 338 if (I != StridedAddrs.end()) 339 return I->second; 340 341 SmallVector<Value *, 2> Ops(GEP->operands()); 342 343 // Base pointer needs to be a scalar. 344 Value *ScalarBase = Ops[0]; 345 if (ScalarBase->getType()->isVectorTy()) { 346 ScalarBase = getSplatValue(ScalarBase); 347 if (!ScalarBase) 348 return std::make_pair(nullptr, nullptr); 349 } 350 351 std::optional<unsigned> VecOperand; 352 unsigned TypeScale = 0; 353 354 // Look for a vector operand and scale. 355 gep_type_iterator GTI = gep_type_begin(GEP); 356 for (unsigned i = 1, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { 357 if (!Ops[i]->getType()->isVectorTy()) 358 continue; 359 360 if (VecOperand) 361 return std::make_pair(nullptr, nullptr); 362 363 VecOperand = i; 364 365 TypeSize TS = GTI.getSequentialElementStride(*DL); 366 if (TS.isScalable()) 367 return std::make_pair(nullptr, nullptr); 368 369 TypeScale = TS.getFixedValue(); 370 } 371 372 // We need to find a vector index to simplify. 373 if (!VecOperand) 374 return std::make_pair(nullptr, nullptr); 375 376 // We can't extract the stride if the arithmetic is done at a different size 377 // than the pointer type. Adding the stride later may not wrap correctly. 378 // Technically we could handle wider indices, but I don't expect that in 379 // practice. Handle one special case here - constants. This simplifies 380 // writing test cases. 381 Value *VecIndex = Ops[*VecOperand]; 382 Type *VecIntPtrTy = DL->getIntPtrType(GEP->getType()); 383 if (VecIndex->getType() != VecIntPtrTy) { 384 auto *VecIndexC = dyn_cast<Constant>(VecIndex); 385 if (!VecIndexC) 386 return std::make_pair(nullptr, nullptr); 387 if (VecIndex->getType()->getScalarSizeInBits() > VecIntPtrTy->getScalarSizeInBits()) 388 VecIndex = ConstantFoldCastInstruction(Instruction::Trunc, VecIndexC, VecIntPtrTy); 389 else 390 VecIndex = ConstantFoldCastInstruction(Instruction::SExt, VecIndexC, VecIntPtrTy); 391 } 392 393 // Handle the non-recursive case. This is what we see if the vectorizer 394 // decides to use a scalar IV + vid on demand instead of a vector IV. 395 auto [Start, Stride] = matchStridedStart(VecIndex, Builder); 396 if (Start) { 397 assert(Stride); 398 Builder.SetInsertPoint(GEP); 399 400 // Replace the vector index with the scalar start and build a scalar GEP. 401 Ops[*VecOperand] = Start; 402 Type *SourceTy = GEP->getSourceElementType(); 403 Value *BasePtr = 404 Builder.CreateGEP(SourceTy, ScalarBase, ArrayRef(Ops).drop_front()); 405 406 // Convert stride to pointer size if needed. 407 Type *IntPtrTy = DL->getIntPtrType(BasePtr->getType()); 408 assert(Stride->getType() == IntPtrTy && "Unexpected type"); 409 410 // Scale the stride by the size of the indexed type. 411 if (TypeScale != 1) 412 Stride = Builder.CreateMul(Stride, ConstantInt::get(IntPtrTy, TypeScale)); 413 414 auto P = std::make_pair(BasePtr, Stride); 415 StridedAddrs[GEP] = P; 416 return P; 417 } 418 419 // Make sure we're in a loop and that has a pre-header and a single latch. 420 Loop *L = LI->getLoopFor(GEP->getParent()); 421 if (!L || !L->getLoopPreheader() || !L->getLoopLatch()) 422 return std::make_pair(nullptr, nullptr); 423 424 BinaryOperator *Inc; 425 PHINode *BasePhi; 426 if (!matchStridedRecurrence(VecIndex, L, Stride, BasePhi, Inc, Builder)) 427 return std::make_pair(nullptr, nullptr); 428 429 assert(BasePhi->getNumIncomingValues() == 2 && "Expected 2 operand phi."); 430 unsigned IncrementingBlock = BasePhi->getOperand(0) == Inc ? 0 : 1; 431 assert(BasePhi->getIncomingValue(IncrementingBlock) == Inc && 432 "Expected one operand of phi to be Inc"); 433 434 Builder.SetInsertPoint(GEP); 435 436 // Replace the vector index with the scalar phi and build a scalar GEP. 437 Ops[*VecOperand] = BasePhi; 438 Type *SourceTy = GEP->getSourceElementType(); 439 Value *BasePtr = 440 Builder.CreateGEP(SourceTy, ScalarBase, ArrayRef(Ops).drop_front()); 441 442 // Final adjustments to stride should go in the start block. 443 Builder.SetInsertPoint( 444 BasePhi->getIncomingBlock(1 - IncrementingBlock)->getTerminator()); 445 446 // Convert stride to pointer size if needed. 447 Type *IntPtrTy = DL->getIntPtrType(BasePtr->getType()); 448 assert(Stride->getType() == IntPtrTy && "Unexpected type"); 449 450 // Scale the stride by the size of the indexed type. 451 if (TypeScale != 1) 452 Stride = Builder.CreateMul(Stride, ConstantInt::get(IntPtrTy, TypeScale)); 453 454 auto P = std::make_pair(BasePtr, Stride); 455 StridedAddrs[GEP] = P; 456 return P; 457 } 458 459 bool RISCVGatherScatterLowering::tryCreateStridedLoadStore(IntrinsicInst *II, 460 Type *DataType, 461 Value *Ptr, 462 Value *AlignOp) { 463 // Make sure the operation will be supported by the backend. 464 MaybeAlign MA = cast<ConstantInt>(AlignOp)->getMaybeAlignValue(); 465 EVT DataTypeVT = TLI->getValueType(*DL, DataType); 466 if (!MA || !TLI->isLegalStridedLoadStore(DataTypeVT, *MA)) 467 return false; 468 469 // FIXME: Let the backend type legalize by splitting/widening? 470 if (!TLI->isTypeLegal(DataTypeVT)) 471 return false; 472 473 // Pointer should be an instruction. 474 auto *PtrI = dyn_cast<Instruction>(Ptr); 475 if (!PtrI) 476 return false; 477 478 LLVMContext &Ctx = PtrI->getContext(); 479 IRBuilder<InstSimplifyFolder> Builder(Ctx, *DL); 480 Builder.SetInsertPoint(PtrI); 481 482 Value *BasePtr, *Stride; 483 std::tie(BasePtr, Stride) = determineBaseAndStride(PtrI, Builder); 484 if (!BasePtr) 485 return false; 486 assert(Stride != nullptr); 487 488 Builder.SetInsertPoint(II); 489 490 CallInst *Call; 491 if (II->getIntrinsicID() == Intrinsic::masked_gather) 492 Call = Builder.CreateIntrinsic( 493 Intrinsic::riscv_masked_strided_load, 494 {DataType, BasePtr->getType(), Stride->getType()}, 495 {II->getArgOperand(3), BasePtr, Stride, II->getArgOperand(2)}); 496 else 497 Call = Builder.CreateIntrinsic( 498 Intrinsic::riscv_masked_strided_store, 499 {DataType, BasePtr->getType(), Stride->getType()}, 500 {II->getArgOperand(0), BasePtr, Stride, II->getArgOperand(3)}); 501 502 Call->takeName(II); 503 II->replaceAllUsesWith(Call); 504 II->eraseFromParent(); 505 506 if (PtrI->use_empty()) 507 RecursivelyDeleteTriviallyDeadInstructions(PtrI); 508 509 return true; 510 } 511 512 bool RISCVGatherScatterLowering::runOnFunction(Function &F) { 513 if (skipFunction(F)) 514 return false; 515 516 auto &TPC = getAnalysis<TargetPassConfig>(); 517 auto &TM = TPC.getTM<RISCVTargetMachine>(); 518 ST = &TM.getSubtarget<RISCVSubtarget>(F); 519 if (!ST->hasVInstructions() || !ST->useRVVForFixedLengthVectors()) 520 return false; 521 522 TLI = ST->getTargetLowering(); 523 DL = &F.getParent()->getDataLayout(); 524 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 525 526 StridedAddrs.clear(); 527 528 SmallVector<IntrinsicInst *, 4> Gathers; 529 SmallVector<IntrinsicInst *, 4> Scatters; 530 531 bool Changed = false; 532 533 for (BasicBlock &BB : F) { 534 for (Instruction &I : BB) { 535 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I); 536 if (II && II->getIntrinsicID() == Intrinsic::masked_gather) { 537 Gathers.push_back(II); 538 } else if (II && II->getIntrinsicID() == Intrinsic::masked_scatter) { 539 Scatters.push_back(II); 540 } 541 } 542 } 543 544 // Rewrite gather/scatter to form strided load/store if possible. 545 for (auto *II : Gathers) 546 Changed |= tryCreateStridedLoadStore( 547 II, II->getType(), II->getArgOperand(0), II->getArgOperand(1)); 548 for (auto *II : Scatters) 549 Changed |= 550 tryCreateStridedLoadStore(II, II->getArgOperand(0)->getType(), 551 II->getArgOperand(1), II->getArgOperand(2)); 552 553 // Remove any dead phis. 554 while (!MaybeDeadPHIs.empty()) { 555 if (auto *Phi = dyn_cast_or_null<PHINode>(MaybeDeadPHIs.pop_back_val())) 556 RecursivelyDeleteDeadPHINode(Phi); 557 } 558 559 return Changed; 560 } 561