1 //===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h" 9 #include "llvm/ADT/APFloat.h" 10 #include "llvm/ADT/STLExtras.h" 11 #include "llvm/ADT/SetVector.h" 12 #include "llvm/ADT/SmallBitVector.h" 13 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" 14 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" 15 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" 16 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" 17 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" 18 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" 19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 20 #include "llvm/CodeGen/GlobalISel/Utils.h" 21 #include "llvm/CodeGen/LowLevelTypeUtils.h" 22 #include "llvm/CodeGen/MachineBasicBlock.h" 23 #include "llvm/CodeGen/MachineDominators.h" 24 #include "llvm/CodeGen/MachineInstr.h" 25 #include "llvm/CodeGen/MachineMemOperand.h" 26 #include "llvm/CodeGen/MachineRegisterInfo.h" 27 #include "llvm/CodeGen/RegisterBankInfo.h" 28 #include "llvm/CodeGen/TargetInstrInfo.h" 29 #include "llvm/CodeGen/TargetLowering.h" 30 #include "llvm/CodeGen/TargetOpcodes.h" 31 #include "llvm/IR/DataLayout.h" 32 #include "llvm/IR/InstrTypes.h" 33 #include "llvm/Support/Casting.h" 34 #include "llvm/Support/DivisionByConstantInfo.h" 35 #include "llvm/Support/MathExtras.h" 36 #include "llvm/Target/TargetMachine.h" 37 #include <cmath> 38 #include <optional> 39 #include <tuple> 40 41 #define DEBUG_TYPE "gi-combiner" 42 43 using namespace llvm; 44 using namespace MIPatternMatch; 45 46 // Option to allow testing of the combiner while no targets know about indexed 47 // addressing. 48 static cl::opt<bool> 49 ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false), 50 cl::desc("Force all indexed operations to be " 51 "legal for the GlobalISel combiner")); 52 53 CombinerHelper::CombinerHelper(GISelChangeObserver &Observer, 54 MachineIRBuilder &B, bool IsPreLegalize, 55 GISelKnownBits *KB, MachineDominatorTree *MDT, 56 const LegalizerInfo *LI) 57 : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer), KB(KB), 58 MDT(MDT), IsPreLegalize(IsPreLegalize), LI(LI), 59 RBI(Builder.getMF().getSubtarget().getRegBankInfo()), 60 TRI(Builder.getMF().getSubtarget().getRegisterInfo()) { 61 (void)this->KB; 62 } 63 64 const TargetLowering &CombinerHelper::getTargetLowering() const { 65 return *Builder.getMF().getSubtarget().getTargetLowering(); 66 } 67 68 /// \returns The little endian in-memory byte position of byte \p I in a 69 /// \p ByteWidth bytes wide type. 70 /// 71 /// E.g. Given a 4-byte type x, x[0] -> byte 0 72 static unsigned littleEndianByteAt(const unsigned ByteWidth, const unsigned I) { 73 assert(I < ByteWidth && "I must be in [0, ByteWidth)"); 74 return I; 75 } 76 77 /// Determines the LogBase2 value for a non-null input value using the 78 /// transform: LogBase2(V) = (EltBits - 1) - ctlz(V). 79 static Register buildLogBase2(Register V, MachineIRBuilder &MIB) { 80 auto &MRI = *MIB.getMRI(); 81 LLT Ty = MRI.getType(V); 82 auto Ctlz = MIB.buildCTLZ(Ty, V); 83 auto Base = MIB.buildConstant(Ty, Ty.getScalarSizeInBits() - 1); 84 return MIB.buildSub(Ty, Base, Ctlz).getReg(0); 85 } 86 87 /// \returns The big endian in-memory byte position of byte \p I in a 88 /// \p ByteWidth bytes wide type. 89 /// 90 /// E.g. Given a 4-byte type x, x[0] -> byte 3 91 static unsigned bigEndianByteAt(const unsigned ByteWidth, const unsigned I) { 92 assert(I < ByteWidth && "I must be in [0, ByteWidth)"); 93 return ByteWidth - I - 1; 94 } 95 96 /// Given a map from byte offsets in memory to indices in a load/store, 97 /// determine if that map corresponds to a little or big endian byte pattern. 98 /// 99 /// \param MemOffset2Idx maps memory offsets to address offsets. 100 /// \param LowestIdx is the lowest index in \p MemOffset2Idx. 101 /// 102 /// \returns true if the map corresponds to a big endian byte pattern, false if 103 /// it corresponds to a little endian byte pattern, and std::nullopt otherwise. 104 /// 105 /// E.g. given a 32-bit type x, and x[AddrOffset], the in-memory byte patterns 106 /// are as follows: 107 /// 108 /// AddrOffset Little endian Big endian 109 /// 0 0 3 110 /// 1 1 2 111 /// 2 2 1 112 /// 3 3 0 113 static std::optional<bool> 114 isBigEndian(const SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx, 115 int64_t LowestIdx) { 116 // Need at least two byte positions to decide on endianness. 117 unsigned Width = MemOffset2Idx.size(); 118 if (Width < 2) 119 return std::nullopt; 120 bool BigEndian = true, LittleEndian = true; 121 for (unsigned MemOffset = 0; MemOffset < Width; ++ MemOffset) { 122 auto MemOffsetAndIdx = MemOffset2Idx.find(MemOffset); 123 if (MemOffsetAndIdx == MemOffset2Idx.end()) 124 return std::nullopt; 125 const int64_t Idx = MemOffsetAndIdx->second - LowestIdx; 126 assert(Idx >= 0 && "Expected non-negative byte offset?"); 127 LittleEndian &= Idx == littleEndianByteAt(Width, MemOffset); 128 BigEndian &= Idx == bigEndianByteAt(Width, MemOffset); 129 if (!BigEndian && !LittleEndian) 130 return std::nullopt; 131 } 132 133 assert((BigEndian != LittleEndian) && 134 "Pattern cannot be both big and little endian!"); 135 return BigEndian; 136 } 137 138 bool CombinerHelper::isPreLegalize() const { return IsPreLegalize; } 139 140 bool CombinerHelper::isLegal(const LegalityQuery &Query) const { 141 assert(LI && "Must have LegalizerInfo to query isLegal!"); 142 return LI->getAction(Query).Action == LegalizeActions::Legal; 143 } 144 145 bool CombinerHelper::isLegalOrBeforeLegalizer( 146 const LegalityQuery &Query) const { 147 return isPreLegalize() || isLegal(Query); 148 } 149 150 bool CombinerHelper::isConstantLegalOrBeforeLegalizer(const LLT Ty) const { 151 if (!Ty.isVector()) 152 return isLegalOrBeforeLegalizer({TargetOpcode::G_CONSTANT, {Ty}}); 153 // Vector constants are represented as a G_BUILD_VECTOR of scalar G_CONSTANTs. 154 if (isPreLegalize()) 155 return true; 156 LLT EltTy = Ty.getElementType(); 157 return isLegal({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}}) && 158 isLegal({TargetOpcode::G_CONSTANT, {EltTy}}); 159 } 160 161 void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, 162 Register ToReg) const { 163 Observer.changingAllUsesOfReg(MRI, FromReg); 164 165 if (MRI.constrainRegAttrs(ToReg, FromReg)) 166 MRI.replaceRegWith(FromReg, ToReg); 167 else 168 Builder.buildCopy(ToReg, FromReg); 169 170 Observer.finishedChangingAllUsesOfReg(); 171 } 172 173 void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI, 174 MachineOperand &FromRegOp, 175 Register ToReg) const { 176 assert(FromRegOp.getParent() && "Expected an operand in an MI"); 177 Observer.changingInstr(*FromRegOp.getParent()); 178 179 FromRegOp.setReg(ToReg); 180 181 Observer.changedInstr(*FromRegOp.getParent()); 182 } 183 184 void CombinerHelper::replaceOpcodeWith(MachineInstr &FromMI, 185 unsigned ToOpcode) const { 186 Observer.changingInstr(FromMI); 187 188 FromMI.setDesc(Builder.getTII().get(ToOpcode)); 189 190 Observer.changedInstr(FromMI); 191 } 192 193 const RegisterBank *CombinerHelper::getRegBank(Register Reg) const { 194 return RBI->getRegBank(Reg, MRI, *TRI); 195 } 196 197 void CombinerHelper::setRegBank(Register Reg, const RegisterBank *RegBank) { 198 if (RegBank) 199 MRI.setRegBank(Reg, *RegBank); 200 } 201 202 bool CombinerHelper::tryCombineCopy(MachineInstr &MI) { 203 if (matchCombineCopy(MI)) { 204 applyCombineCopy(MI); 205 return true; 206 } 207 return false; 208 } 209 bool CombinerHelper::matchCombineCopy(MachineInstr &MI) { 210 if (MI.getOpcode() != TargetOpcode::COPY) 211 return false; 212 Register DstReg = MI.getOperand(0).getReg(); 213 Register SrcReg = MI.getOperand(1).getReg(); 214 return canReplaceReg(DstReg, SrcReg, MRI); 215 } 216 void CombinerHelper::applyCombineCopy(MachineInstr &MI) { 217 Register DstReg = MI.getOperand(0).getReg(); 218 Register SrcReg = MI.getOperand(1).getReg(); 219 MI.eraseFromParent(); 220 replaceRegWith(MRI, DstReg, SrcReg); 221 } 222 223 bool CombinerHelper::tryCombineConcatVectors(MachineInstr &MI) { 224 bool IsUndef = false; 225 SmallVector<Register, 4> Ops; 226 if (matchCombineConcatVectors(MI, IsUndef, Ops)) { 227 applyCombineConcatVectors(MI, IsUndef, Ops); 228 return true; 229 } 230 return false; 231 } 232 233 bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef, 234 SmallVectorImpl<Register> &Ops) { 235 assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS && 236 "Invalid instruction"); 237 IsUndef = true; 238 MachineInstr *Undef = nullptr; 239 240 // Walk over all the operands of concat vectors and check if they are 241 // build_vector themselves or undef. 242 // Then collect their operands in Ops. 243 for (const MachineOperand &MO : MI.uses()) { 244 Register Reg = MO.getReg(); 245 MachineInstr *Def = MRI.getVRegDef(Reg); 246 assert(Def && "Operand not defined"); 247 switch (Def->getOpcode()) { 248 case TargetOpcode::G_BUILD_VECTOR: 249 IsUndef = false; 250 // Remember the operands of the build_vector to fold 251 // them into the yet-to-build flattened concat vectors. 252 for (const MachineOperand &BuildVecMO : Def->uses()) 253 Ops.push_back(BuildVecMO.getReg()); 254 break; 255 case TargetOpcode::G_IMPLICIT_DEF: { 256 LLT OpType = MRI.getType(Reg); 257 // Keep one undef value for all the undef operands. 258 if (!Undef) { 259 Builder.setInsertPt(*MI.getParent(), MI); 260 Undef = Builder.buildUndef(OpType.getScalarType()); 261 } 262 assert(MRI.getType(Undef->getOperand(0).getReg()) == 263 OpType.getScalarType() && 264 "All undefs should have the same type"); 265 // Break the undef vector in as many scalar elements as needed 266 // for the flattening. 267 for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements(); 268 EltIdx != EltEnd; ++EltIdx) 269 Ops.push_back(Undef->getOperand(0).getReg()); 270 break; 271 } 272 default: 273 return false; 274 } 275 } 276 return true; 277 } 278 void CombinerHelper::applyCombineConcatVectors( 279 MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) { 280 // We determined that the concat_vectors can be flatten. 281 // Generate the flattened build_vector. 282 Register DstReg = MI.getOperand(0).getReg(); 283 Builder.setInsertPt(*MI.getParent(), MI); 284 Register NewDstReg = MRI.cloneVirtualRegister(DstReg); 285 286 // Note: IsUndef is sort of redundant. We could have determine it by 287 // checking that at all Ops are undef. Alternatively, we could have 288 // generate a build_vector of undefs and rely on another combine to 289 // clean that up. For now, given we already gather this information 290 // in tryCombineConcatVectors, just save compile time and issue the 291 // right thing. 292 if (IsUndef) 293 Builder.buildUndef(NewDstReg); 294 else 295 Builder.buildBuildVector(NewDstReg, Ops); 296 MI.eraseFromParent(); 297 replaceRegWith(MRI, DstReg, NewDstReg); 298 } 299 300 bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) { 301 SmallVector<Register, 4> Ops; 302 if (matchCombineShuffleVector(MI, Ops)) { 303 applyCombineShuffleVector(MI, Ops); 304 return true; 305 } 306 return false; 307 } 308 309 bool CombinerHelper::matchCombineShuffleVector(MachineInstr &MI, 310 SmallVectorImpl<Register> &Ops) { 311 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && 312 "Invalid instruction kind"); 313 LLT DstType = MRI.getType(MI.getOperand(0).getReg()); 314 Register Src1 = MI.getOperand(1).getReg(); 315 LLT SrcType = MRI.getType(Src1); 316 // As bizarre as it may look, shuffle vector can actually produce 317 // scalar! This is because at the IR level a <1 x ty> shuffle 318 // vector is perfectly valid. 319 unsigned DstNumElts = DstType.isVector() ? DstType.getNumElements() : 1; 320 unsigned SrcNumElts = SrcType.isVector() ? SrcType.getNumElements() : 1; 321 322 // If the resulting vector is smaller than the size of the source 323 // vectors being concatenated, we won't be able to replace the 324 // shuffle vector into a concat_vectors. 325 // 326 // Note: We may still be able to produce a concat_vectors fed by 327 // extract_vector_elt and so on. It is less clear that would 328 // be better though, so don't bother for now. 329 // 330 // If the destination is a scalar, the size of the sources doesn't 331 // matter. we will lower the shuffle to a plain copy. This will 332 // work only if the source and destination have the same size. But 333 // that's covered by the next condition. 334 // 335 // TODO: If the size between the source and destination don't match 336 // we could still emit an extract vector element in that case. 337 if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1) 338 return false; 339 340 // Check that the shuffle mask can be broken evenly between the 341 // different sources. 342 if (DstNumElts % SrcNumElts != 0) 343 return false; 344 345 // Mask length is a multiple of the source vector length. 346 // Check if the shuffle is some kind of concatenation of the input 347 // vectors. 348 unsigned NumConcat = DstNumElts / SrcNumElts; 349 SmallVector<int, 8> ConcatSrcs(NumConcat, -1); 350 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); 351 for (unsigned i = 0; i != DstNumElts; ++i) { 352 int Idx = Mask[i]; 353 // Undef value. 354 if (Idx < 0) 355 continue; 356 // Ensure the indices in each SrcType sized piece are sequential and that 357 // the same source is used for the whole piece. 358 if ((Idx % SrcNumElts != (i % SrcNumElts)) || 359 (ConcatSrcs[i / SrcNumElts] >= 0 && 360 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) 361 return false; 362 // Remember which source this index came from. 363 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts; 364 } 365 366 // The shuffle is concatenating multiple vectors together. 367 // Collect the different operands for that. 368 Register UndefReg; 369 Register Src2 = MI.getOperand(2).getReg(); 370 for (auto Src : ConcatSrcs) { 371 if (Src < 0) { 372 if (!UndefReg) { 373 Builder.setInsertPt(*MI.getParent(), MI); 374 UndefReg = Builder.buildUndef(SrcType).getReg(0); 375 } 376 Ops.push_back(UndefReg); 377 } else if (Src == 0) 378 Ops.push_back(Src1); 379 else 380 Ops.push_back(Src2); 381 } 382 return true; 383 } 384 385 void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI, 386 const ArrayRef<Register> Ops) { 387 Register DstReg = MI.getOperand(0).getReg(); 388 Builder.setInsertPt(*MI.getParent(), MI); 389 Register NewDstReg = MRI.cloneVirtualRegister(DstReg); 390 391 if (Ops.size() == 1) 392 Builder.buildCopy(NewDstReg, Ops[0]); 393 else 394 Builder.buildMergeLikeInstr(NewDstReg, Ops); 395 396 MI.eraseFromParent(); 397 replaceRegWith(MRI, DstReg, NewDstReg); 398 } 399 400 bool CombinerHelper::matchShuffleToExtract(MachineInstr &MI) { 401 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && 402 "Invalid instruction kind"); 403 404 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); 405 return Mask.size() == 1; 406 } 407 408 void CombinerHelper::applyShuffleToExtract(MachineInstr &MI) { 409 Register DstReg = MI.getOperand(0).getReg(); 410 Builder.setInsertPt(*MI.getParent(), MI); 411 412 int I = MI.getOperand(3).getShuffleMask()[0]; 413 Register Src1 = MI.getOperand(1).getReg(); 414 LLT Src1Ty = MRI.getType(Src1); 415 int Src1NumElts = Src1Ty.isVector() ? Src1Ty.getNumElements() : 1; 416 Register SrcReg; 417 if (I >= Src1NumElts) { 418 SrcReg = MI.getOperand(2).getReg(); 419 I -= Src1NumElts; 420 } else if (I >= 0) 421 SrcReg = Src1; 422 423 if (I < 0) 424 Builder.buildUndef(DstReg); 425 else if (!MRI.getType(SrcReg).isVector()) 426 Builder.buildCopy(DstReg, SrcReg); 427 else 428 Builder.buildExtractVectorElementConstant(DstReg, SrcReg, I); 429 430 MI.eraseFromParent(); 431 } 432 433 namespace { 434 435 /// Select a preference between two uses. CurrentUse is the current preference 436 /// while *ForCandidate is attributes of the candidate under consideration. 437 PreferredTuple ChoosePreferredUse(MachineInstr &LoadMI, 438 PreferredTuple &CurrentUse, 439 const LLT TyForCandidate, 440 unsigned OpcodeForCandidate, 441 MachineInstr *MIForCandidate) { 442 if (!CurrentUse.Ty.isValid()) { 443 if (CurrentUse.ExtendOpcode == OpcodeForCandidate || 444 CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT) 445 return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; 446 return CurrentUse; 447 } 448 449 // We permit the extend to hoist through basic blocks but this is only 450 // sensible if the target has extending loads. If you end up lowering back 451 // into a load and extend during the legalizer then the end result is 452 // hoisting the extend up to the load. 453 454 // Prefer defined extensions to undefined extensions as these are more 455 // likely to reduce the number of instructions. 456 if (OpcodeForCandidate == TargetOpcode::G_ANYEXT && 457 CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT) 458 return CurrentUse; 459 else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT && 460 OpcodeForCandidate != TargetOpcode::G_ANYEXT) 461 return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; 462 463 // Prefer sign extensions to zero extensions as sign-extensions tend to be 464 // more expensive. Don't do this if the load is already a zero-extend load 465 // though, otherwise we'll rewrite a zero-extend load into a sign-extend 466 // later. 467 if (!isa<GZExtLoad>(LoadMI) && CurrentUse.Ty == TyForCandidate) { 468 if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT && 469 OpcodeForCandidate == TargetOpcode::G_ZEXT) 470 return CurrentUse; 471 else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT && 472 OpcodeForCandidate == TargetOpcode::G_SEXT) 473 return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; 474 } 475 476 // This is potentially target specific. We've chosen the largest type 477 // because G_TRUNC is usually free. One potential catch with this is that 478 // some targets have a reduced number of larger registers than smaller 479 // registers and this choice potentially increases the live-range for the 480 // larger value. 481 if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) { 482 return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; 483 } 484 return CurrentUse; 485 } 486 487 /// Find a suitable place to insert some instructions and insert them. This 488 /// function accounts for special cases like inserting before a PHI node. 489 /// The current strategy for inserting before PHI's is to duplicate the 490 /// instructions for each predecessor. However, while that's ok for G_TRUNC 491 /// on most targets since it generally requires no code, other targets/cases may 492 /// want to try harder to find a dominating block. 493 static void InsertInsnsWithoutSideEffectsBeforeUse( 494 MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO, 495 std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator, 496 MachineOperand &UseMO)> 497 Inserter) { 498 MachineInstr &UseMI = *UseMO.getParent(); 499 500 MachineBasicBlock *InsertBB = UseMI.getParent(); 501 502 // If the use is a PHI then we want the predecessor block instead. 503 if (UseMI.isPHI()) { 504 MachineOperand *PredBB = std::next(&UseMO); 505 InsertBB = PredBB->getMBB(); 506 } 507 508 // If the block is the same block as the def then we want to insert just after 509 // the def instead of at the start of the block. 510 if (InsertBB == DefMI.getParent()) { 511 MachineBasicBlock::iterator InsertPt = &DefMI; 512 Inserter(InsertBB, std::next(InsertPt), UseMO); 513 return; 514 } 515 516 // Otherwise we want the start of the BB 517 Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO); 518 } 519 } // end anonymous namespace 520 521 bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) { 522 PreferredTuple Preferred; 523 if (matchCombineExtendingLoads(MI, Preferred)) { 524 applyCombineExtendingLoads(MI, Preferred); 525 return true; 526 } 527 return false; 528 } 529 530 static unsigned getExtLoadOpcForExtend(unsigned ExtOpc) { 531 unsigned CandidateLoadOpc; 532 switch (ExtOpc) { 533 case TargetOpcode::G_ANYEXT: 534 CandidateLoadOpc = TargetOpcode::G_LOAD; 535 break; 536 case TargetOpcode::G_SEXT: 537 CandidateLoadOpc = TargetOpcode::G_SEXTLOAD; 538 break; 539 case TargetOpcode::G_ZEXT: 540 CandidateLoadOpc = TargetOpcode::G_ZEXTLOAD; 541 break; 542 default: 543 llvm_unreachable("Unexpected extend opc"); 544 } 545 return CandidateLoadOpc; 546 } 547 548 bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI, 549 PreferredTuple &Preferred) { 550 // We match the loads and follow the uses to the extend instead of matching 551 // the extends and following the def to the load. This is because the load 552 // must remain in the same position for correctness (unless we also add code 553 // to find a safe place to sink it) whereas the extend is freely movable. 554 // It also prevents us from duplicating the load for the volatile case or just 555 // for performance. 556 GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(&MI); 557 if (!LoadMI) 558 return false; 559 560 Register LoadReg = LoadMI->getDstReg(); 561 562 LLT LoadValueTy = MRI.getType(LoadReg); 563 if (!LoadValueTy.isScalar()) 564 return false; 565 566 // Most architectures are going to legalize <s8 loads into at least a 1 byte 567 // load, and the MMOs can only describe memory accesses in multiples of bytes. 568 // If we try to perform extload combining on those, we can end up with 569 // %a(s8) = extload %ptr (load 1 byte from %ptr) 570 // ... which is an illegal extload instruction. 571 if (LoadValueTy.getSizeInBits() < 8) 572 return false; 573 574 // For non power-of-2 types, they will very likely be legalized into multiple 575 // loads. Don't bother trying to match them into extending loads. 576 if (!llvm::has_single_bit<uint32_t>(LoadValueTy.getSizeInBits())) 577 return false; 578 579 // Find the preferred type aside from the any-extends (unless it's the only 580 // one) and non-extending ops. We'll emit an extending load to that type and 581 // and emit a variant of (extend (trunc X)) for the others according to the 582 // relative type sizes. At the same time, pick an extend to use based on the 583 // extend involved in the chosen type. 584 unsigned PreferredOpcode = 585 isa<GLoad>(&MI) 586 ? TargetOpcode::G_ANYEXT 587 : isa<GSExtLoad>(&MI) ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT; 588 Preferred = {LLT(), PreferredOpcode, nullptr}; 589 for (auto &UseMI : MRI.use_nodbg_instructions(LoadReg)) { 590 if (UseMI.getOpcode() == TargetOpcode::G_SEXT || 591 UseMI.getOpcode() == TargetOpcode::G_ZEXT || 592 (UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) { 593 const auto &MMO = LoadMI->getMMO(); 594 // For atomics, only form anyextending loads. 595 if (MMO.isAtomic() && UseMI.getOpcode() != TargetOpcode::G_ANYEXT) 596 continue; 597 // Check for legality. 598 if (!isPreLegalize()) { 599 LegalityQuery::MemDesc MMDesc(MMO); 600 unsigned CandidateLoadOpc = getExtLoadOpcForExtend(UseMI.getOpcode()); 601 LLT UseTy = MRI.getType(UseMI.getOperand(0).getReg()); 602 LLT SrcTy = MRI.getType(LoadMI->getPointerReg()); 603 if (LI->getAction({CandidateLoadOpc, {UseTy, SrcTy}, {MMDesc}}) 604 .Action != LegalizeActions::Legal) 605 continue; 606 } 607 Preferred = ChoosePreferredUse(MI, Preferred, 608 MRI.getType(UseMI.getOperand(0).getReg()), 609 UseMI.getOpcode(), &UseMI); 610 } 611 } 612 613 // There were no extends 614 if (!Preferred.MI) 615 return false; 616 // It should be impossible to chose an extend without selecting a different 617 // type since by definition the result of an extend is larger. 618 assert(Preferred.Ty != LoadValueTy && "Extending to same type?"); 619 620 LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI); 621 return true; 622 } 623 624 void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI, 625 PreferredTuple &Preferred) { 626 // Rewrite the load to the chosen extending load. 627 Register ChosenDstReg = Preferred.MI->getOperand(0).getReg(); 628 629 // Inserter to insert a truncate back to the original type at a given point 630 // with some basic CSE to limit truncate duplication to one per BB. 631 DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns; 632 auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB, 633 MachineBasicBlock::iterator InsertBefore, 634 MachineOperand &UseMO) { 635 MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB); 636 if (PreviouslyEmitted) { 637 Observer.changingInstr(*UseMO.getParent()); 638 UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg()); 639 Observer.changedInstr(*UseMO.getParent()); 640 return; 641 } 642 643 Builder.setInsertPt(*InsertIntoBB, InsertBefore); 644 Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg()); 645 MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg); 646 EmittedInsns[InsertIntoBB] = NewMI; 647 replaceRegOpWith(MRI, UseMO, NewDstReg); 648 }; 649 650 Observer.changingInstr(MI); 651 unsigned LoadOpc = getExtLoadOpcForExtend(Preferred.ExtendOpcode); 652 MI.setDesc(Builder.getTII().get(LoadOpc)); 653 654 // Rewrite all the uses to fix up the types. 655 auto &LoadValue = MI.getOperand(0); 656 SmallVector<MachineOperand *, 4> Uses; 657 for (auto &UseMO : MRI.use_operands(LoadValue.getReg())) 658 Uses.push_back(&UseMO); 659 660 for (auto *UseMO : Uses) { 661 MachineInstr *UseMI = UseMO->getParent(); 662 663 // If the extend is compatible with the preferred extend then we should fix 664 // up the type and extend so that it uses the preferred use. 665 if (UseMI->getOpcode() == Preferred.ExtendOpcode || 666 UseMI->getOpcode() == TargetOpcode::G_ANYEXT) { 667 Register UseDstReg = UseMI->getOperand(0).getReg(); 668 MachineOperand &UseSrcMO = UseMI->getOperand(1); 669 const LLT UseDstTy = MRI.getType(UseDstReg); 670 if (UseDstReg != ChosenDstReg) { 671 if (Preferred.Ty == UseDstTy) { 672 // If the use has the same type as the preferred use, then merge 673 // the vregs and erase the extend. For example: 674 // %1:_(s8) = G_LOAD ... 675 // %2:_(s32) = G_SEXT %1(s8) 676 // %3:_(s32) = G_ANYEXT %1(s8) 677 // ... = ... %3(s32) 678 // rewrites to: 679 // %2:_(s32) = G_SEXTLOAD ... 680 // ... = ... %2(s32) 681 replaceRegWith(MRI, UseDstReg, ChosenDstReg); 682 Observer.erasingInstr(*UseMO->getParent()); 683 UseMO->getParent()->eraseFromParent(); 684 } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) { 685 // If the preferred size is smaller, then keep the extend but extend 686 // from the result of the extending load. For example: 687 // %1:_(s8) = G_LOAD ... 688 // %2:_(s32) = G_SEXT %1(s8) 689 // %3:_(s64) = G_ANYEXT %1(s8) 690 // ... = ... %3(s64) 691 /// rewrites to: 692 // %2:_(s32) = G_SEXTLOAD ... 693 // %3:_(s64) = G_ANYEXT %2:_(s32) 694 // ... = ... %3(s64) 695 replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg); 696 } else { 697 // If the preferred size is large, then insert a truncate. For 698 // example: 699 // %1:_(s8) = G_LOAD ... 700 // %2:_(s64) = G_SEXT %1(s8) 701 // %3:_(s32) = G_ZEXT %1(s8) 702 // ... = ... %3(s32) 703 /// rewrites to: 704 // %2:_(s64) = G_SEXTLOAD ... 705 // %4:_(s8) = G_TRUNC %2:_(s32) 706 // %3:_(s64) = G_ZEXT %2:_(s8) 707 // ... = ... %3(s64) 708 InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, 709 InsertTruncAt); 710 } 711 continue; 712 } 713 // The use is (one of) the uses of the preferred use we chose earlier. 714 // We're going to update the load to def this value later so just erase 715 // the old extend. 716 Observer.erasingInstr(*UseMO->getParent()); 717 UseMO->getParent()->eraseFromParent(); 718 continue; 719 } 720 721 // The use isn't an extend. Truncate back to the type we originally loaded. 722 // This is free on many targets. 723 InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt); 724 } 725 726 MI.getOperand(0).setReg(ChosenDstReg); 727 Observer.changedInstr(MI); 728 } 729 730 bool CombinerHelper::matchCombineLoadWithAndMask(MachineInstr &MI, 731 BuildFnTy &MatchInfo) { 732 assert(MI.getOpcode() == TargetOpcode::G_AND); 733 734 // If we have the following code: 735 // %mask = G_CONSTANT 255 736 // %ld = G_LOAD %ptr, (load s16) 737 // %and = G_AND %ld, %mask 738 // 739 // Try to fold it into 740 // %ld = G_ZEXTLOAD %ptr, (load s8) 741 742 Register Dst = MI.getOperand(0).getReg(); 743 if (MRI.getType(Dst).isVector()) 744 return false; 745 746 auto MaybeMask = 747 getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); 748 if (!MaybeMask) 749 return false; 750 751 APInt MaskVal = MaybeMask->Value; 752 753 if (!MaskVal.isMask()) 754 return false; 755 756 Register SrcReg = MI.getOperand(1).getReg(); 757 // Don't use getOpcodeDef() here since intermediate instructions may have 758 // multiple users. 759 GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(MRI.getVRegDef(SrcReg)); 760 if (!LoadMI || !MRI.hasOneNonDBGUse(LoadMI->getDstReg())) 761 return false; 762 763 Register LoadReg = LoadMI->getDstReg(); 764 LLT RegTy = MRI.getType(LoadReg); 765 Register PtrReg = LoadMI->getPointerReg(); 766 unsigned RegSize = RegTy.getSizeInBits(); 767 uint64_t LoadSizeBits = LoadMI->getMemSizeInBits(); 768 unsigned MaskSizeBits = MaskVal.countr_one(); 769 770 // The mask may not be larger than the in-memory type, as it might cover sign 771 // extended bits 772 if (MaskSizeBits > LoadSizeBits) 773 return false; 774 775 // If the mask covers the whole destination register, there's nothing to 776 // extend 777 if (MaskSizeBits >= RegSize) 778 return false; 779 780 // Most targets cannot deal with loads of size < 8 and need to re-legalize to 781 // at least byte loads. Avoid creating such loads here 782 if (MaskSizeBits < 8 || !isPowerOf2_32(MaskSizeBits)) 783 return false; 784 785 const MachineMemOperand &MMO = LoadMI->getMMO(); 786 LegalityQuery::MemDesc MemDesc(MMO); 787 788 // Don't modify the memory access size if this is atomic/volatile, but we can 789 // still adjust the opcode to indicate the high bit behavior. 790 if (LoadMI->isSimple()) 791 MemDesc.MemoryTy = LLT::scalar(MaskSizeBits); 792 else if (LoadSizeBits > MaskSizeBits || LoadSizeBits == RegSize) 793 return false; 794 795 // TODO: Could check if it's legal with the reduced or original memory size. 796 if (!isLegalOrBeforeLegalizer( 797 {TargetOpcode::G_ZEXTLOAD, {RegTy, MRI.getType(PtrReg)}, {MemDesc}})) 798 return false; 799 800 MatchInfo = [=](MachineIRBuilder &B) { 801 B.setInstrAndDebugLoc(*LoadMI); 802 auto &MF = B.getMF(); 803 auto PtrInfo = MMO.getPointerInfo(); 804 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, MemDesc.MemoryTy); 805 B.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, Dst, PtrReg, *NewMMO); 806 LoadMI->eraseFromParent(); 807 }; 808 return true; 809 } 810 811 bool CombinerHelper::isPredecessor(const MachineInstr &DefMI, 812 const MachineInstr &UseMI) { 813 assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() && 814 "shouldn't consider debug uses"); 815 assert(DefMI.getParent() == UseMI.getParent()); 816 if (&DefMI == &UseMI) 817 return true; 818 const MachineBasicBlock &MBB = *DefMI.getParent(); 819 auto DefOrUse = find_if(MBB, [&DefMI, &UseMI](const MachineInstr &MI) { 820 return &MI == &DefMI || &MI == &UseMI; 821 }); 822 if (DefOrUse == MBB.end()) 823 llvm_unreachable("Block must contain both DefMI and UseMI!"); 824 return &*DefOrUse == &DefMI; 825 } 826 827 bool CombinerHelper::dominates(const MachineInstr &DefMI, 828 const MachineInstr &UseMI) { 829 assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() && 830 "shouldn't consider debug uses"); 831 if (MDT) 832 return MDT->dominates(&DefMI, &UseMI); 833 else if (DefMI.getParent() != UseMI.getParent()) 834 return false; 835 836 return isPredecessor(DefMI, UseMI); 837 } 838 839 bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) { 840 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG); 841 Register SrcReg = MI.getOperand(1).getReg(); 842 Register LoadUser = SrcReg; 843 844 if (MRI.getType(SrcReg).isVector()) 845 return false; 846 847 Register TruncSrc; 848 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) 849 LoadUser = TruncSrc; 850 851 uint64_t SizeInBits = MI.getOperand(2).getImm(); 852 // If the source is a G_SEXTLOAD from the same bit width, then we don't 853 // need any extend at all, just a truncate. 854 if (auto *LoadMI = getOpcodeDef<GSExtLoad>(LoadUser, MRI)) { 855 // If truncating more than the original extended value, abort. 856 auto LoadSizeBits = LoadMI->getMemSizeInBits(); 857 if (TruncSrc && MRI.getType(TruncSrc).getSizeInBits() < LoadSizeBits) 858 return false; 859 if (LoadSizeBits == SizeInBits) 860 return true; 861 } 862 return false; 863 } 864 865 void CombinerHelper::applySextTruncSextLoad(MachineInstr &MI) { 866 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG); 867 Builder.setInstrAndDebugLoc(MI); 868 Builder.buildCopy(MI.getOperand(0).getReg(), MI.getOperand(1).getReg()); 869 MI.eraseFromParent(); 870 } 871 872 bool CombinerHelper::matchSextInRegOfLoad( 873 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) { 874 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG); 875 876 Register DstReg = MI.getOperand(0).getReg(); 877 LLT RegTy = MRI.getType(DstReg); 878 879 // Only supports scalars for now. 880 if (RegTy.isVector()) 881 return false; 882 883 Register SrcReg = MI.getOperand(1).getReg(); 884 auto *LoadDef = getOpcodeDef<GLoad>(SrcReg, MRI); 885 if (!LoadDef || !MRI.hasOneNonDBGUse(DstReg)) 886 return false; 887 888 uint64_t MemBits = LoadDef->getMemSizeInBits(); 889 890 // If the sign extend extends from a narrower width than the load's width, 891 // then we can narrow the load width when we combine to a G_SEXTLOAD. 892 // Avoid widening the load at all. 893 unsigned NewSizeBits = std::min((uint64_t)MI.getOperand(2).getImm(), MemBits); 894 895 // Don't generate G_SEXTLOADs with a < 1 byte width. 896 if (NewSizeBits < 8) 897 return false; 898 // Don't bother creating a non-power-2 sextload, it will likely be broken up 899 // anyway for most targets. 900 if (!isPowerOf2_32(NewSizeBits)) 901 return false; 902 903 const MachineMemOperand &MMO = LoadDef->getMMO(); 904 LegalityQuery::MemDesc MMDesc(MMO); 905 906 // Don't modify the memory access size if this is atomic/volatile, but we can 907 // still adjust the opcode to indicate the high bit behavior. 908 if (LoadDef->isSimple()) 909 MMDesc.MemoryTy = LLT::scalar(NewSizeBits); 910 else if (MemBits > NewSizeBits || MemBits == RegTy.getSizeInBits()) 911 return false; 912 913 // TODO: Could check if it's legal with the reduced or original memory size. 914 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SEXTLOAD, 915 {MRI.getType(LoadDef->getDstReg()), 916 MRI.getType(LoadDef->getPointerReg())}, 917 {MMDesc}})) 918 return false; 919 920 MatchInfo = std::make_tuple(LoadDef->getDstReg(), NewSizeBits); 921 return true; 922 } 923 924 void CombinerHelper::applySextInRegOfLoad( 925 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) { 926 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG); 927 Register LoadReg; 928 unsigned ScalarSizeBits; 929 std::tie(LoadReg, ScalarSizeBits) = MatchInfo; 930 GLoad *LoadDef = cast<GLoad>(MRI.getVRegDef(LoadReg)); 931 932 // If we have the following: 933 // %ld = G_LOAD %ptr, (load 2) 934 // %ext = G_SEXT_INREG %ld, 8 935 // ==> 936 // %ld = G_SEXTLOAD %ptr (load 1) 937 938 auto &MMO = LoadDef->getMMO(); 939 Builder.setInstrAndDebugLoc(*LoadDef); 940 auto &MF = Builder.getMF(); 941 auto PtrInfo = MMO.getPointerInfo(); 942 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, ScalarSizeBits / 8); 943 Builder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, MI.getOperand(0).getReg(), 944 LoadDef->getPointerReg(), *NewMMO); 945 MI.eraseFromParent(); 946 } 947 948 static Type *getTypeForLLT(LLT Ty, LLVMContext &C) { 949 if (Ty.isVector()) 950 return FixedVectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()), 951 Ty.getNumElements()); 952 return IntegerType::get(C, Ty.getSizeInBits()); 953 } 954 955 /// Return true if 'MI' is a load or a store that may be fold it's address 956 /// operand into the load / store addressing mode. 957 static bool canFoldInAddressingMode(GLoadStore *MI, const TargetLowering &TLI, 958 MachineRegisterInfo &MRI) { 959 TargetLowering::AddrMode AM; 960 auto *MF = MI->getMF(); 961 auto *Addr = getOpcodeDef<GPtrAdd>(MI->getPointerReg(), MRI); 962 if (!Addr) 963 return false; 964 965 AM.HasBaseReg = true; 966 if (auto CstOff = getIConstantVRegVal(Addr->getOffsetReg(), MRI)) 967 AM.BaseOffs = CstOff->getSExtValue(); // [reg +/- imm] 968 else 969 AM.Scale = 1; // [reg +/- reg] 970 971 return TLI.isLegalAddressingMode( 972 MF->getDataLayout(), AM, 973 getTypeForLLT(MI->getMMO().getMemoryType(), 974 MF->getFunction().getContext()), 975 MI->getMMO().getAddrSpace()); 976 } 977 978 static unsigned getIndexedOpc(unsigned LdStOpc) { 979 switch (LdStOpc) { 980 case TargetOpcode::G_LOAD: 981 return TargetOpcode::G_INDEXED_LOAD; 982 case TargetOpcode::G_STORE: 983 return TargetOpcode::G_INDEXED_STORE; 984 case TargetOpcode::G_ZEXTLOAD: 985 return TargetOpcode::G_INDEXED_ZEXTLOAD; 986 case TargetOpcode::G_SEXTLOAD: 987 return TargetOpcode::G_INDEXED_SEXTLOAD; 988 default: 989 llvm_unreachable("Unexpected opcode"); 990 } 991 } 992 993 bool CombinerHelper::isIndexedLoadStoreLegal(GLoadStore &LdSt) const { 994 // Check for legality. 995 LLT PtrTy = MRI.getType(LdSt.getPointerReg()); 996 LLT Ty = MRI.getType(LdSt.getReg(0)); 997 LLT MemTy = LdSt.getMMO().getMemoryType(); 998 SmallVector<LegalityQuery::MemDesc, 2> MemDescrs( 999 {{MemTy, MemTy.getSizeInBits(), AtomicOrdering::NotAtomic}}); 1000 unsigned IndexedOpc = getIndexedOpc(LdSt.getOpcode()); 1001 SmallVector<LLT> OpTys; 1002 if (IndexedOpc == TargetOpcode::G_INDEXED_STORE) 1003 OpTys = {PtrTy, Ty, Ty}; 1004 else 1005 OpTys = {Ty, PtrTy}; // For G_INDEXED_LOAD, G_INDEXED_[SZ]EXTLOAD 1006 1007 LegalityQuery Q(IndexedOpc, OpTys, MemDescrs); 1008 return isLegal(Q); 1009 } 1010 1011 static cl::opt<unsigned> PostIndexUseThreshold( 1012 "post-index-use-threshold", cl::Hidden, cl::init(32), 1013 cl::desc("Number of uses of a base pointer to check before it is no longer " 1014 "considered for post-indexing.")); 1015 1016 bool CombinerHelper::findPostIndexCandidate(GLoadStore &LdSt, Register &Addr, 1017 Register &Base, Register &Offset, 1018 bool &RematOffset) { 1019 // We're looking for the following pattern, for either load or store: 1020 // %baseptr:_(p0) = ... 1021 // G_STORE %val(s64), %baseptr(p0) 1022 // %offset:_(s64) = G_CONSTANT i64 -256 1023 // %new_addr:_(p0) = G_PTR_ADD %baseptr, %offset(s64) 1024 const auto &TLI = getTargetLowering(); 1025 1026 Register Ptr = LdSt.getPointerReg(); 1027 // If the store is the only use, don't bother. 1028 if (MRI.hasOneNonDBGUse(Ptr)) 1029 return false; 1030 1031 if (!isIndexedLoadStoreLegal(LdSt)) 1032 return false; 1033 1034 if (getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Ptr, MRI)) 1035 return false; 1036 1037 MachineInstr *StoredValDef = getDefIgnoringCopies(LdSt.getReg(0), MRI); 1038 auto *PtrDef = MRI.getVRegDef(Ptr); 1039 1040 unsigned NumUsesChecked = 0; 1041 for (auto &Use : MRI.use_nodbg_instructions(Ptr)) { 1042 if (++NumUsesChecked > PostIndexUseThreshold) 1043 return false; // Try to avoid exploding compile time. 1044 1045 auto *PtrAdd = dyn_cast<GPtrAdd>(&Use); 1046 // The use itself might be dead. This can happen during combines if DCE 1047 // hasn't had a chance to run yet. Don't allow it to form an indexed op. 1048 if (!PtrAdd || MRI.use_nodbg_empty(PtrAdd->getReg(0))) 1049 continue; 1050 1051 // Check the user of this isn't the store, otherwise we'd be generate a 1052 // indexed store defining its own use. 1053 if (StoredValDef == &Use) 1054 continue; 1055 1056 Offset = PtrAdd->getOffsetReg(); 1057 if (!ForceLegalIndexing && 1058 !TLI.isIndexingLegal(LdSt, PtrAdd->getBaseReg(), Offset, 1059 /*IsPre*/ false, MRI)) 1060 continue; 1061 1062 // Make sure the offset calculation is before the potentially indexed op. 1063 MachineInstr *OffsetDef = MRI.getVRegDef(Offset); 1064 RematOffset = false; 1065 if (!dominates(*OffsetDef, LdSt)) { 1066 // If the offset however is just a G_CONSTANT, we can always just 1067 // rematerialize it where we need it. 1068 if (OffsetDef->getOpcode() != TargetOpcode::G_CONSTANT) 1069 continue; 1070 RematOffset = true; 1071 } 1072 1073 for (auto &BasePtrUse : MRI.use_nodbg_instructions(PtrAdd->getBaseReg())) { 1074 if (&BasePtrUse == PtrDef) 1075 continue; 1076 1077 // If the user is a later load/store that can be post-indexed, then don't 1078 // combine this one. 1079 auto *BasePtrLdSt = dyn_cast<GLoadStore>(&BasePtrUse); 1080 if (BasePtrLdSt && BasePtrLdSt != &LdSt && 1081 dominates(LdSt, *BasePtrLdSt) && 1082 isIndexedLoadStoreLegal(*BasePtrLdSt)) 1083 return false; 1084 1085 // Now we're looking for the key G_PTR_ADD instruction, which contains 1086 // the offset add that we want to fold. 1087 if (auto *BasePtrUseDef = dyn_cast<GPtrAdd>(&BasePtrUse)) { 1088 Register PtrAddDefReg = BasePtrUseDef->getReg(0); 1089 for (auto &BaseUseUse : MRI.use_nodbg_instructions(PtrAddDefReg)) { 1090 // If the use is in a different block, then we may produce worse code 1091 // due to the extra register pressure. 1092 if (BaseUseUse.getParent() != LdSt.getParent()) 1093 return false; 1094 1095 if (auto *UseUseLdSt = dyn_cast<GLoadStore>(&BaseUseUse)) 1096 if (canFoldInAddressingMode(UseUseLdSt, TLI, MRI)) 1097 return false; 1098 } 1099 if (!dominates(LdSt, BasePtrUse)) 1100 return false; // All use must be dominated by the load/store. 1101 } 1102 } 1103 1104 Addr = PtrAdd->getReg(0); 1105 Base = PtrAdd->getBaseReg(); 1106 return true; 1107 } 1108 1109 return false; 1110 } 1111 1112 bool CombinerHelper::findPreIndexCandidate(GLoadStore &LdSt, Register &Addr, 1113 Register &Base, Register &Offset) { 1114 auto &MF = *LdSt.getParent()->getParent(); 1115 const auto &TLI = *MF.getSubtarget().getTargetLowering(); 1116 1117 Addr = LdSt.getPointerReg(); 1118 if (!mi_match(Addr, MRI, m_GPtrAdd(m_Reg(Base), m_Reg(Offset))) || 1119 MRI.hasOneNonDBGUse(Addr)) 1120 return false; 1121 1122 if (!ForceLegalIndexing && 1123 !TLI.isIndexingLegal(LdSt, Base, Offset, /*IsPre*/ true, MRI)) 1124 return false; 1125 1126 if (!isIndexedLoadStoreLegal(LdSt)) 1127 return false; 1128 1129 MachineInstr *BaseDef = getDefIgnoringCopies(Base, MRI); 1130 if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) 1131 return false; 1132 1133 if (auto *St = dyn_cast<GStore>(&LdSt)) { 1134 // Would require a copy. 1135 if (Base == St->getValueReg()) 1136 return false; 1137 1138 // We're expecting one use of Addr in MI, but it could also be the 1139 // value stored, which isn't actually dominated by the instruction. 1140 if (St->getValueReg() == Addr) 1141 return false; 1142 } 1143 1144 // Avoid increasing cross-block register pressure. 1145 for (auto &AddrUse : MRI.use_nodbg_instructions(Addr)) 1146 if (AddrUse.getParent() != LdSt.getParent()) 1147 return false; 1148 1149 // FIXME: check whether all uses of the base pointer are constant PtrAdds. 1150 // That might allow us to end base's liveness here by adjusting the constant. 1151 bool RealUse = false; 1152 for (auto &AddrUse : MRI.use_nodbg_instructions(Addr)) { 1153 if (!dominates(LdSt, AddrUse)) 1154 return false; // All use must be dominated by the load/store. 1155 1156 // If Ptr may be folded in addressing mode of other use, then it's 1157 // not profitable to do this transformation. 1158 if (auto *UseLdSt = dyn_cast<GLoadStore>(&AddrUse)) { 1159 if (!canFoldInAddressingMode(UseLdSt, TLI, MRI)) 1160 RealUse = true; 1161 } else { 1162 RealUse = true; 1163 } 1164 } 1165 return RealUse; 1166 } 1167 1168 bool CombinerHelper::matchCombineExtractedVectorLoad(MachineInstr &MI, 1169 BuildFnTy &MatchInfo) { 1170 assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT); 1171 1172 // Check if there is a load that defines the vector being extracted from. 1173 auto *LoadMI = getOpcodeDef<GLoad>(MI.getOperand(1).getReg(), MRI); 1174 if (!LoadMI) 1175 return false; 1176 1177 Register Vector = MI.getOperand(1).getReg(); 1178 LLT VecEltTy = MRI.getType(Vector).getElementType(); 1179 1180 assert(MRI.getType(MI.getOperand(0).getReg()) == VecEltTy); 1181 1182 // Checking whether we should reduce the load width. 1183 if (!MRI.hasOneNonDBGUse(Vector)) 1184 return false; 1185 1186 // Check if the defining load is simple. 1187 if (!LoadMI->isSimple()) 1188 return false; 1189 1190 // If the vector element type is not a multiple of a byte then we are unable 1191 // to correctly compute an address to load only the extracted element as a 1192 // scalar. 1193 if (!VecEltTy.isByteSized()) 1194 return false; 1195 1196 // Check if the new load that we are going to create is legal 1197 // if we are in the post-legalization phase. 1198 MachineMemOperand MMO = LoadMI->getMMO(); 1199 Align Alignment = MMO.getAlign(); 1200 MachinePointerInfo PtrInfo; 1201 uint64_t Offset; 1202 1203 // Finding the appropriate PtrInfo if offset is a known constant. 1204 // This is required to create the memory operand for the narrowed load. 1205 // This machine memory operand object helps us infer about legality 1206 // before we proceed to combine the instruction. 1207 if (auto CVal = getIConstantVRegVal(Vector, MRI)) { 1208 int Elt = CVal->getZExtValue(); 1209 // FIXME: should be (ABI size)*Elt. 1210 Offset = VecEltTy.getSizeInBits() * Elt / 8; 1211 PtrInfo = MMO.getPointerInfo().getWithOffset(Offset); 1212 } else { 1213 // Discard the pointer info except the address space because the memory 1214 // operand can't represent this new access since the offset is variable. 1215 Offset = VecEltTy.getSizeInBits() / 8; 1216 PtrInfo = MachinePointerInfo(MMO.getPointerInfo().getAddrSpace()); 1217 } 1218 1219 Alignment = commonAlignment(Alignment, Offset); 1220 1221 Register VecPtr = LoadMI->getPointerReg(); 1222 LLT PtrTy = MRI.getType(VecPtr); 1223 1224 MachineFunction &MF = *MI.getMF(); 1225 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, VecEltTy); 1226 1227 LegalityQuery::MemDesc MMDesc(*NewMMO); 1228 1229 LegalityQuery Q = {TargetOpcode::G_LOAD, {VecEltTy, PtrTy}, {MMDesc}}; 1230 1231 if (!isLegalOrBeforeLegalizer(Q)) 1232 return false; 1233 1234 // Load must be allowed and fast on the target. 1235 LLVMContext &C = MF.getFunction().getContext(); 1236 auto &DL = MF.getDataLayout(); 1237 unsigned Fast = 0; 1238 if (!getTargetLowering().allowsMemoryAccess(C, DL, VecEltTy, *NewMMO, 1239 &Fast) || 1240 !Fast) 1241 return false; 1242 1243 Register Result = MI.getOperand(0).getReg(); 1244 Register Index = MI.getOperand(2).getReg(); 1245 1246 MatchInfo = [=](MachineIRBuilder &B) { 1247 GISelObserverWrapper DummyObserver; 1248 LegalizerHelper Helper(B.getMF(), DummyObserver, B); 1249 //// Get pointer to the vector element. 1250 Register finalPtr = Helper.getVectorElementPointer( 1251 LoadMI->getPointerReg(), MRI.getType(LoadMI->getOperand(0).getReg()), 1252 Index); 1253 // New G_LOAD instruction. 1254 B.buildLoad(Result, finalPtr, PtrInfo, Alignment); 1255 // Remove original GLOAD instruction. 1256 LoadMI->eraseFromParent(); 1257 }; 1258 1259 return true; 1260 } 1261 1262 bool CombinerHelper::matchCombineIndexedLoadStore( 1263 MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) { 1264 auto &LdSt = cast<GLoadStore>(MI); 1265 1266 if (LdSt.isAtomic()) 1267 return false; 1268 1269 MatchInfo.IsPre = findPreIndexCandidate(LdSt, MatchInfo.Addr, MatchInfo.Base, 1270 MatchInfo.Offset); 1271 if (!MatchInfo.IsPre && 1272 !findPostIndexCandidate(LdSt, MatchInfo.Addr, MatchInfo.Base, 1273 MatchInfo.Offset, MatchInfo.RematOffset)) 1274 return false; 1275 1276 return true; 1277 } 1278 1279 void CombinerHelper::applyCombineIndexedLoadStore( 1280 MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) { 1281 MachineInstr &AddrDef = *MRI.getUniqueVRegDef(MatchInfo.Addr); 1282 Builder.setInstrAndDebugLoc(MI); 1283 unsigned Opcode = MI.getOpcode(); 1284 bool IsStore = Opcode == TargetOpcode::G_STORE; 1285 unsigned NewOpcode = getIndexedOpc(Opcode); 1286 1287 // If the offset constant didn't happen to dominate the load/store, we can 1288 // just clone it as needed. 1289 if (MatchInfo.RematOffset) { 1290 auto *OldCst = MRI.getVRegDef(MatchInfo.Offset); 1291 auto NewCst = Builder.buildConstant(MRI.getType(MatchInfo.Offset), 1292 *OldCst->getOperand(1).getCImm()); 1293 MatchInfo.Offset = NewCst.getReg(0); 1294 } 1295 1296 auto MIB = Builder.buildInstr(NewOpcode); 1297 if (IsStore) { 1298 MIB.addDef(MatchInfo.Addr); 1299 MIB.addUse(MI.getOperand(0).getReg()); 1300 } else { 1301 MIB.addDef(MI.getOperand(0).getReg()); 1302 MIB.addDef(MatchInfo.Addr); 1303 } 1304 1305 MIB.addUse(MatchInfo.Base); 1306 MIB.addUse(MatchInfo.Offset); 1307 MIB.addImm(MatchInfo.IsPre); 1308 MIB->cloneMemRefs(*MI.getMF(), MI); 1309 MI.eraseFromParent(); 1310 AddrDef.eraseFromParent(); 1311 1312 LLVM_DEBUG(dbgs() << " Combinined to indexed operation"); 1313 } 1314 1315 bool CombinerHelper::matchCombineDivRem(MachineInstr &MI, 1316 MachineInstr *&OtherMI) { 1317 unsigned Opcode = MI.getOpcode(); 1318 bool IsDiv, IsSigned; 1319 1320 switch (Opcode) { 1321 default: 1322 llvm_unreachable("Unexpected opcode!"); 1323 case TargetOpcode::G_SDIV: 1324 case TargetOpcode::G_UDIV: { 1325 IsDiv = true; 1326 IsSigned = Opcode == TargetOpcode::G_SDIV; 1327 break; 1328 } 1329 case TargetOpcode::G_SREM: 1330 case TargetOpcode::G_UREM: { 1331 IsDiv = false; 1332 IsSigned = Opcode == TargetOpcode::G_SREM; 1333 break; 1334 } 1335 } 1336 1337 Register Src1 = MI.getOperand(1).getReg(); 1338 unsigned DivOpcode, RemOpcode, DivremOpcode; 1339 if (IsSigned) { 1340 DivOpcode = TargetOpcode::G_SDIV; 1341 RemOpcode = TargetOpcode::G_SREM; 1342 DivremOpcode = TargetOpcode::G_SDIVREM; 1343 } else { 1344 DivOpcode = TargetOpcode::G_UDIV; 1345 RemOpcode = TargetOpcode::G_UREM; 1346 DivremOpcode = TargetOpcode::G_UDIVREM; 1347 } 1348 1349 if (!isLegalOrBeforeLegalizer({DivremOpcode, {MRI.getType(Src1)}})) 1350 return false; 1351 1352 // Combine: 1353 // %div:_ = G_[SU]DIV %src1:_, %src2:_ 1354 // %rem:_ = G_[SU]REM %src1:_, %src2:_ 1355 // into: 1356 // %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_ 1357 1358 // Combine: 1359 // %rem:_ = G_[SU]REM %src1:_, %src2:_ 1360 // %div:_ = G_[SU]DIV %src1:_, %src2:_ 1361 // into: 1362 // %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_ 1363 1364 for (auto &UseMI : MRI.use_nodbg_instructions(Src1)) { 1365 if (MI.getParent() == UseMI.getParent() && 1366 ((IsDiv && UseMI.getOpcode() == RemOpcode) || 1367 (!IsDiv && UseMI.getOpcode() == DivOpcode)) && 1368 matchEqualDefs(MI.getOperand(2), UseMI.getOperand(2)) && 1369 matchEqualDefs(MI.getOperand(1), UseMI.getOperand(1))) { 1370 OtherMI = &UseMI; 1371 return true; 1372 } 1373 } 1374 1375 return false; 1376 } 1377 1378 void CombinerHelper::applyCombineDivRem(MachineInstr &MI, 1379 MachineInstr *&OtherMI) { 1380 unsigned Opcode = MI.getOpcode(); 1381 assert(OtherMI && "OtherMI shouldn't be empty."); 1382 1383 Register DestDivReg, DestRemReg; 1384 if (Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_UDIV) { 1385 DestDivReg = MI.getOperand(0).getReg(); 1386 DestRemReg = OtherMI->getOperand(0).getReg(); 1387 } else { 1388 DestDivReg = OtherMI->getOperand(0).getReg(); 1389 DestRemReg = MI.getOperand(0).getReg(); 1390 } 1391 1392 bool IsSigned = 1393 Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_SREM; 1394 1395 // Check which instruction is first in the block so we don't break def-use 1396 // deps by "moving" the instruction incorrectly. Also keep track of which 1397 // instruction is first so we pick it's operands, avoiding use-before-def 1398 // bugs. 1399 MachineInstr *FirstInst; 1400 if (dominates(MI, *OtherMI)) { 1401 Builder.setInstrAndDebugLoc(MI); 1402 FirstInst = &MI; 1403 } else { 1404 Builder.setInstrAndDebugLoc(*OtherMI); 1405 FirstInst = OtherMI; 1406 } 1407 1408 Builder.buildInstr(IsSigned ? TargetOpcode::G_SDIVREM 1409 : TargetOpcode::G_UDIVREM, 1410 {DestDivReg, DestRemReg}, 1411 { FirstInst->getOperand(1), FirstInst->getOperand(2) }); 1412 MI.eraseFromParent(); 1413 OtherMI->eraseFromParent(); 1414 } 1415 1416 bool CombinerHelper::matchOptBrCondByInvertingCond(MachineInstr &MI, 1417 MachineInstr *&BrCond) { 1418 assert(MI.getOpcode() == TargetOpcode::G_BR); 1419 1420 // Try to match the following: 1421 // bb1: 1422 // G_BRCOND %c1, %bb2 1423 // G_BR %bb3 1424 // bb2: 1425 // ... 1426 // bb3: 1427 1428 // The above pattern does not have a fall through to the successor bb2, always 1429 // resulting in a branch no matter which path is taken. Here we try to find 1430 // and replace that pattern with conditional branch to bb3 and otherwise 1431 // fallthrough to bb2. This is generally better for branch predictors. 1432 1433 MachineBasicBlock *MBB = MI.getParent(); 1434 MachineBasicBlock::iterator BrIt(MI); 1435 if (BrIt == MBB->begin()) 1436 return false; 1437 assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator"); 1438 1439 BrCond = &*std::prev(BrIt); 1440 if (BrCond->getOpcode() != TargetOpcode::G_BRCOND) 1441 return false; 1442 1443 // Check that the next block is the conditional branch target. Also make sure 1444 // that it isn't the same as the G_BR's target (otherwise, this will loop.) 1445 MachineBasicBlock *BrCondTarget = BrCond->getOperand(1).getMBB(); 1446 return BrCondTarget != MI.getOperand(0).getMBB() && 1447 MBB->isLayoutSuccessor(BrCondTarget); 1448 } 1449 1450 void CombinerHelper::applyOptBrCondByInvertingCond(MachineInstr &MI, 1451 MachineInstr *&BrCond) { 1452 MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB(); 1453 Builder.setInstrAndDebugLoc(*BrCond); 1454 LLT Ty = MRI.getType(BrCond->getOperand(0).getReg()); 1455 // FIXME: Does int/fp matter for this? If so, we might need to restrict 1456 // this to i1 only since we might not know for sure what kind of 1457 // compare generated the condition value. 1458 auto True = Builder.buildConstant( 1459 Ty, getICmpTrueVal(getTargetLowering(), false, false)); 1460 auto Xor = Builder.buildXor(Ty, BrCond->getOperand(0), True); 1461 1462 auto *FallthroughBB = BrCond->getOperand(1).getMBB(); 1463 Observer.changingInstr(MI); 1464 MI.getOperand(0).setMBB(FallthroughBB); 1465 Observer.changedInstr(MI); 1466 1467 // Change the conditional branch to use the inverted condition and 1468 // new target block. 1469 Observer.changingInstr(*BrCond); 1470 BrCond->getOperand(0).setReg(Xor.getReg(0)); 1471 BrCond->getOperand(1).setMBB(BrTarget); 1472 Observer.changedInstr(*BrCond); 1473 } 1474 1475 1476 bool CombinerHelper::tryEmitMemcpyInline(MachineInstr &MI) { 1477 MachineIRBuilder HelperBuilder(MI); 1478 GISelObserverWrapper DummyObserver; 1479 LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder); 1480 return Helper.lowerMemcpyInline(MI) == 1481 LegalizerHelper::LegalizeResult::Legalized; 1482 } 1483 1484 bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) { 1485 MachineIRBuilder HelperBuilder(MI); 1486 GISelObserverWrapper DummyObserver; 1487 LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder); 1488 return Helper.lowerMemCpyFamily(MI, MaxLen) == 1489 LegalizerHelper::LegalizeResult::Legalized; 1490 } 1491 1492 static APFloat constantFoldFpUnary(const MachineInstr &MI, 1493 const MachineRegisterInfo &MRI, 1494 const APFloat &Val) { 1495 APFloat Result(Val); 1496 switch (MI.getOpcode()) { 1497 default: 1498 llvm_unreachable("Unexpected opcode!"); 1499 case TargetOpcode::G_FNEG: { 1500 Result.changeSign(); 1501 return Result; 1502 } 1503 case TargetOpcode::G_FABS: { 1504 Result.clearSign(); 1505 return Result; 1506 } 1507 case TargetOpcode::G_FPTRUNC: { 1508 bool Unused; 1509 LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); 1510 Result.convert(getFltSemanticForLLT(DstTy), APFloat::rmNearestTiesToEven, 1511 &Unused); 1512 return Result; 1513 } 1514 case TargetOpcode::G_FSQRT: { 1515 bool Unused; 1516 Result.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, 1517 &Unused); 1518 Result = APFloat(sqrt(Result.convertToDouble())); 1519 break; 1520 } 1521 case TargetOpcode::G_FLOG2: { 1522 bool Unused; 1523 Result.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, 1524 &Unused); 1525 Result = APFloat(log2(Result.convertToDouble())); 1526 break; 1527 } 1528 } 1529 // Convert `APFloat` to appropriate IEEE type depending on `DstTy`. Otherwise, 1530 // `buildFConstant` will assert on size mismatch. Only `G_FSQRT`, and 1531 // `G_FLOG2` reach here. 1532 bool Unused; 1533 Result.convert(Val.getSemantics(), APFloat::rmNearestTiesToEven, &Unused); 1534 return Result; 1535 } 1536 1537 void CombinerHelper::applyCombineConstantFoldFpUnary(MachineInstr &MI, 1538 const ConstantFP *Cst) { 1539 Builder.setInstrAndDebugLoc(MI); 1540 APFloat Folded = constantFoldFpUnary(MI, MRI, Cst->getValue()); 1541 const ConstantFP *NewCst = ConstantFP::get(Builder.getContext(), Folded); 1542 Builder.buildFConstant(MI.getOperand(0), *NewCst); 1543 MI.eraseFromParent(); 1544 } 1545 1546 bool CombinerHelper::matchPtrAddImmedChain(MachineInstr &MI, 1547 PtrAddChain &MatchInfo) { 1548 // We're trying to match the following pattern: 1549 // %t1 = G_PTR_ADD %base, G_CONSTANT imm1 1550 // %root = G_PTR_ADD %t1, G_CONSTANT imm2 1551 // --> 1552 // %root = G_PTR_ADD %base, G_CONSTANT (imm1 + imm2) 1553 1554 if (MI.getOpcode() != TargetOpcode::G_PTR_ADD) 1555 return false; 1556 1557 Register Add2 = MI.getOperand(1).getReg(); 1558 Register Imm1 = MI.getOperand(2).getReg(); 1559 auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI); 1560 if (!MaybeImmVal) 1561 return false; 1562 1563 MachineInstr *Add2Def = MRI.getVRegDef(Add2); 1564 if (!Add2Def || Add2Def->getOpcode() != TargetOpcode::G_PTR_ADD) 1565 return false; 1566 1567 Register Base = Add2Def->getOperand(1).getReg(); 1568 Register Imm2 = Add2Def->getOperand(2).getReg(); 1569 auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI); 1570 if (!MaybeImm2Val) 1571 return false; 1572 1573 // Check if the new combined immediate forms an illegal addressing mode. 1574 // Do not combine if it was legal before but would get illegal. 1575 // To do so, we need to find a load/store user of the pointer to get 1576 // the access type. 1577 Type *AccessTy = nullptr; 1578 auto &MF = *MI.getMF(); 1579 for (auto &UseMI : MRI.use_nodbg_instructions(MI.getOperand(0).getReg())) { 1580 if (auto *LdSt = dyn_cast<GLoadStore>(&UseMI)) { 1581 AccessTy = getTypeForLLT(MRI.getType(LdSt->getReg(0)), 1582 MF.getFunction().getContext()); 1583 break; 1584 } 1585 } 1586 TargetLoweringBase::AddrMode AMNew; 1587 APInt CombinedImm = MaybeImmVal->Value + MaybeImm2Val->Value; 1588 AMNew.BaseOffs = CombinedImm.getSExtValue(); 1589 if (AccessTy) { 1590 AMNew.HasBaseReg = true; 1591 TargetLoweringBase::AddrMode AMOld; 1592 AMOld.BaseOffs = MaybeImmVal->Value.getSExtValue(); 1593 AMOld.HasBaseReg = true; 1594 unsigned AS = MRI.getType(Add2).getAddressSpace(); 1595 const auto &TLI = *MF.getSubtarget().getTargetLowering(); 1596 if (TLI.isLegalAddressingMode(MF.getDataLayout(), AMOld, AccessTy, AS) && 1597 !TLI.isLegalAddressingMode(MF.getDataLayout(), AMNew, AccessTy, AS)) 1598 return false; 1599 } 1600 1601 // Pass the combined immediate to the apply function. 1602 MatchInfo.Imm = AMNew.BaseOffs; 1603 MatchInfo.Base = Base; 1604 MatchInfo.Bank = getRegBank(Imm2); 1605 return true; 1606 } 1607 1608 void CombinerHelper::applyPtrAddImmedChain(MachineInstr &MI, 1609 PtrAddChain &MatchInfo) { 1610 assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD"); 1611 MachineIRBuilder MIB(MI); 1612 LLT OffsetTy = MRI.getType(MI.getOperand(2).getReg()); 1613 auto NewOffset = MIB.buildConstant(OffsetTy, MatchInfo.Imm); 1614 setRegBank(NewOffset.getReg(0), MatchInfo.Bank); 1615 Observer.changingInstr(MI); 1616 MI.getOperand(1).setReg(MatchInfo.Base); 1617 MI.getOperand(2).setReg(NewOffset.getReg(0)); 1618 Observer.changedInstr(MI); 1619 } 1620 1621 bool CombinerHelper::matchShiftImmedChain(MachineInstr &MI, 1622 RegisterImmPair &MatchInfo) { 1623 // We're trying to match the following pattern with any of 1624 // G_SHL/G_ASHR/G_LSHR/G_SSHLSAT/G_USHLSAT shift instructions: 1625 // %t1 = SHIFT %base, G_CONSTANT imm1 1626 // %root = SHIFT %t1, G_CONSTANT imm2 1627 // --> 1628 // %root = SHIFT %base, G_CONSTANT (imm1 + imm2) 1629 1630 unsigned Opcode = MI.getOpcode(); 1631 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || 1632 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || 1633 Opcode == TargetOpcode::G_USHLSAT) && 1634 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT"); 1635 1636 Register Shl2 = MI.getOperand(1).getReg(); 1637 Register Imm1 = MI.getOperand(2).getReg(); 1638 auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI); 1639 if (!MaybeImmVal) 1640 return false; 1641 1642 MachineInstr *Shl2Def = MRI.getUniqueVRegDef(Shl2); 1643 if (Shl2Def->getOpcode() != Opcode) 1644 return false; 1645 1646 Register Base = Shl2Def->getOperand(1).getReg(); 1647 Register Imm2 = Shl2Def->getOperand(2).getReg(); 1648 auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI); 1649 if (!MaybeImm2Val) 1650 return false; 1651 1652 // Pass the combined immediate to the apply function. 1653 MatchInfo.Imm = 1654 (MaybeImmVal->Value.getZExtValue() + MaybeImm2Val->Value).getZExtValue(); 1655 MatchInfo.Reg = Base; 1656 1657 // There is no simple replacement for a saturating unsigned left shift that 1658 // exceeds the scalar size. 1659 if (Opcode == TargetOpcode::G_USHLSAT && 1660 MatchInfo.Imm >= MRI.getType(Shl2).getScalarSizeInBits()) 1661 return false; 1662 1663 return true; 1664 } 1665 1666 void CombinerHelper::applyShiftImmedChain(MachineInstr &MI, 1667 RegisterImmPair &MatchInfo) { 1668 unsigned Opcode = MI.getOpcode(); 1669 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || 1670 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || 1671 Opcode == TargetOpcode::G_USHLSAT) && 1672 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT"); 1673 1674 Builder.setInstrAndDebugLoc(MI); 1675 LLT Ty = MRI.getType(MI.getOperand(1).getReg()); 1676 unsigned const ScalarSizeInBits = Ty.getScalarSizeInBits(); 1677 auto Imm = MatchInfo.Imm; 1678 1679 if (Imm >= ScalarSizeInBits) { 1680 // Any logical shift that exceeds scalar size will produce zero. 1681 if (Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_LSHR) { 1682 Builder.buildConstant(MI.getOperand(0), 0); 1683 MI.eraseFromParent(); 1684 return; 1685 } 1686 // Arithmetic shift and saturating signed left shift have no effect beyond 1687 // scalar size. 1688 Imm = ScalarSizeInBits - 1; 1689 } 1690 1691 LLT ImmTy = MRI.getType(MI.getOperand(2).getReg()); 1692 Register NewImm = Builder.buildConstant(ImmTy, Imm).getReg(0); 1693 Observer.changingInstr(MI); 1694 MI.getOperand(1).setReg(MatchInfo.Reg); 1695 MI.getOperand(2).setReg(NewImm); 1696 Observer.changedInstr(MI); 1697 } 1698 1699 bool CombinerHelper::matchShiftOfShiftedLogic(MachineInstr &MI, 1700 ShiftOfShiftedLogic &MatchInfo) { 1701 // We're trying to match the following pattern with any of 1702 // G_SHL/G_ASHR/G_LSHR/G_USHLSAT/G_SSHLSAT shift instructions in combination 1703 // with any of G_AND/G_OR/G_XOR logic instructions. 1704 // %t1 = SHIFT %X, G_CONSTANT C0 1705 // %t2 = LOGIC %t1, %Y 1706 // %root = SHIFT %t2, G_CONSTANT C1 1707 // --> 1708 // %t3 = SHIFT %X, G_CONSTANT (C0+C1) 1709 // %t4 = SHIFT %Y, G_CONSTANT C1 1710 // %root = LOGIC %t3, %t4 1711 unsigned ShiftOpcode = MI.getOpcode(); 1712 assert((ShiftOpcode == TargetOpcode::G_SHL || 1713 ShiftOpcode == TargetOpcode::G_ASHR || 1714 ShiftOpcode == TargetOpcode::G_LSHR || 1715 ShiftOpcode == TargetOpcode::G_USHLSAT || 1716 ShiftOpcode == TargetOpcode::G_SSHLSAT) && 1717 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT"); 1718 1719 // Match a one-use bitwise logic op. 1720 Register LogicDest = MI.getOperand(1).getReg(); 1721 if (!MRI.hasOneNonDBGUse(LogicDest)) 1722 return false; 1723 1724 MachineInstr *LogicMI = MRI.getUniqueVRegDef(LogicDest); 1725 unsigned LogicOpcode = LogicMI->getOpcode(); 1726 if (LogicOpcode != TargetOpcode::G_AND && LogicOpcode != TargetOpcode::G_OR && 1727 LogicOpcode != TargetOpcode::G_XOR) 1728 return false; 1729 1730 // Find a matching one-use shift by constant. 1731 const Register C1 = MI.getOperand(2).getReg(); 1732 auto MaybeImmVal = getIConstantVRegValWithLookThrough(C1, MRI); 1733 if (!MaybeImmVal || MaybeImmVal->Value == 0) 1734 return false; 1735 1736 const uint64_t C1Val = MaybeImmVal->Value.getZExtValue(); 1737 1738 auto matchFirstShift = [&](const MachineInstr *MI, uint64_t &ShiftVal) { 1739 // Shift should match previous one and should be a one-use. 1740 if (MI->getOpcode() != ShiftOpcode || 1741 !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg())) 1742 return false; 1743 1744 // Must be a constant. 1745 auto MaybeImmVal = 1746 getIConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI); 1747 if (!MaybeImmVal) 1748 return false; 1749 1750 ShiftVal = MaybeImmVal->Value.getSExtValue(); 1751 return true; 1752 }; 1753 1754 // Logic ops are commutative, so check each operand for a match. 1755 Register LogicMIReg1 = LogicMI->getOperand(1).getReg(); 1756 MachineInstr *LogicMIOp1 = MRI.getUniqueVRegDef(LogicMIReg1); 1757 Register LogicMIReg2 = LogicMI->getOperand(2).getReg(); 1758 MachineInstr *LogicMIOp2 = MRI.getUniqueVRegDef(LogicMIReg2); 1759 uint64_t C0Val; 1760 1761 if (matchFirstShift(LogicMIOp1, C0Val)) { 1762 MatchInfo.LogicNonShiftReg = LogicMIReg2; 1763 MatchInfo.Shift2 = LogicMIOp1; 1764 } else if (matchFirstShift(LogicMIOp2, C0Val)) { 1765 MatchInfo.LogicNonShiftReg = LogicMIReg1; 1766 MatchInfo.Shift2 = LogicMIOp2; 1767 } else 1768 return false; 1769 1770 MatchInfo.ValSum = C0Val + C1Val; 1771 1772 // The fold is not valid if the sum of the shift values exceeds bitwidth. 1773 if (MatchInfo.ValSum >= MRI.getType(LogicDest).getScalarSizeInBits()) 1774 return false; 1775 1776 MatchInfo.Logic = LogicMI; 1777 return true; 1778 } 1779 1780 void CombinerHelper::applyShiftOfShiftedLogic(MachineInstr &MI, 1781 ShiftOfShiftedLogic &MatchInfo) { 1782 unsigned Opcode = MI.getOpcode(); 1783 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || 1784 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT || 1785 Opcode == TargetOpcode::G_SSHLSAT) && 1786 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT"); 1787 1788 LLT ShlType = MRI.getType(MI.getOperand(2).getReg()); 1789 LLT DestType = MRI.getType(MI.getOperand(0).getReg()); 1790 Builder.setInstrAndDebugLoc(MI); 1791 1792 Register Const = Builder.buildConstant(ShlType, MatchInfo.ValSum).getReg(0); 1793 1794 Register Shift1Base = MatchInfo.Shift2->getOperand(1).getReg(); 1795 Register Shift1 = 1796 Builder.buildInstr(Opcode, {DestType}, {Shift1Base, Const}).getReg(0); 1797 1798 // If LogicNonShiftReg is the same to Shift1Base, and shift1 const is the same 1799 // to MatchInfo.Shift2 const, CSEMIRBuilder will reuse the old shift1 when 1800 // build shift2. So, if we erase MatchInfo.Shift2 at the end, actually we 1801 // remove old shift1. And it will cause crash later. So erase it earlier to 1802 // avoid the crash. 1803 MatchInfo.Shift2->eraseFromParent(); 1804 1805 Register Shift2Const = MI.getOperand(2).getReg(); 1806 Register Shift2 = Builder 1807 .buildInstr(Opcode, {DestType}, 1808 {MatchInfo.LogicNonShiftReg, Shift2Const}) 1809 .getReg(0); 1810 1811 Register Dest = MI.getOperand(0).getReg(); 1812 Builder.buildInstr(MatchInfo.Logic->getOpcode(), {Dest}, {Shift1, Shift2}); 1813 1814 // This was one use so it's safe to remove it. 1815 MatchInfo.Logic->eraseFromParent(); 1816 1817 MI.eraseFromParent(); 1818 } 1819 1820 bool CombinerHelper::matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo) { 1821 assert(MI.getOpcode() == TargetOpcode::G_SHL && "Expected G_SHL"); 1822 // Combine (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) 1823 // Combine (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2) 1824 auto &Shl = cast<GenericMachineInstr>(MI); 1825 Register DstReg = Shl.getReg(0); 1826 Register SrcReg = Shl.getReg(1); 1827 Register ShiftReg = Shl.getReg(2); 1828 Register X, C1; 1829 1830 if (!getTargetLowering().isDesirableToCommuteWithShift(MI, !isPreLegalize())) 1831 return false; 1832 1833 if (!mi_match(SrcReg, MRI, 1834 m_OneNonDBGUse(m_any_of(m_GAdd(m_Reg(X), m_Reg(C1)), 1835 m_GOr(m_Reg(X), m_Reg(C1)))))) 1836 return false; 1837 1838 APInt C1Val, C2Val; 1839 if (!mi_match(C1, MRI, m_ICstOrSplat(C1Val)) || 1840 !mi_match(ShiftReg, MRI, m_ICstOrSplat(C2Val))) 1841 return false; 1842 1843 auto *SrcDef = MRI.getVRegDef(SrcReg); 1844 assert((SrcDef->getOpcode() == TargetOpcode::G_ADD || 1845 SrcDef->getOpcode() == TargetOpcode::G_OR) && "Unexpected op"); 1846 LLT SrcTy = MRI.getType(SrcReg); 1847 MatchInfo = [=](MachineIRBuilder &B) { 1848 auto S1 = B.buildShl(SrcTy, X, ShiftReg); 1849 auto S2 = B.buildShl(SrcTy, C1, ShiftReg); 1850 B.buildInstr(SrcDef->getOpcode(), {DstReg}, {S1, S2}); 1851 }; 1852 return true; 1853 } 1854 1855 bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI, 1856 unsigned &ShiftVal) { 1857 assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL"); 1858 auto MaybeImmVal = 1859 getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); 1860 if (!MaybeImmVal) 1861 return false; 1862 1863 ShiftVal = MaybeImmVal->Value.exactLogBase2(); 1864 return (static_cast<int32_t>(ShiftVal) != -1); 1865 } 1866 1867 void CombinerHelper::applyCombineMulToShl(MachineInstr &MI, 1868 unsigned &ShiftVal) { 1869 assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL"); 1870 MachineIRBuilder MIB(MI); 1871 LLT ShiftTy = MRI.getType(MI.getOperand(0).getReg()); 1872 auto ShiftCst = MIB.buildConstant(ShiftTy, ShiftVal); 1873 Observer.changingInstr(MI); 1874 MI.setDesc(MIB.getTII().get(TargetOpcode::G_SHL)); 1875 MI.getOperand(2).setReg(ShiftCst.getReg(0)); 1876 Observer.changedInstr(MI); 1877 } 1878 1879 // shl ([sza]ext x), y => zext (shl x, y), if shift does not overflow source 1880 bool CombinerHelper::matchCombineShlOfExtend(MachineInstr &MI, 1881 RegisterImmPair &MatchData) { 1882 assert(MI.getOpcode() == TargetOpcode::G_SHL && KB); 1883 if (!getTargetLowering().isDesirableToPullExtFromShl(MI)) 1884 return false; 1885 1886 Register LHS = MI.getOperand(1).getReg(); 1887 1888 Register ExtSrc; 1889 if (!mi_match(LHS, MRI, m_GAnyExt(m_Reg(ExtSrc))) && 1890 !mi_match(LHS, MRI, m_GZExt(m_Reg(ExtSrc))) && 1891 !mi_match(LHS, MRI, m_GSExt(m_Reg(ExtSrc)))) 1892 return false; 1893 1894 Register RHS = MI.getOperand(2).getReg(); 1895 MachineInstr *MIShiftAmt = MRI.getVRegDef(RHS); 1896 auto MaybeShiftAmtVal = isConstantOrConstantSplatVector(*MIShiftAmt, MRI); 1897 if (!MaybeShiftAmtVal) 1898 return false; 1899 1900 if (LI) { 1901 LLT SrcTy = MRI.getType(ExtSrc); 1902 1903 // We only really care about the legality with the shifted value. We can 1904 // pick any type the constant shift amount, so ask the target what to 1905 // use. Otherwise we would have to guess and hope it is reported as legal. 1906 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(SrcTy); 1907 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SHL, {SrcTy, ShiftAmtTy}})) 1908 return false; 1909 } 1910 1911 int64_t ShiftAmt = MaybeShiftAmtVal->getSExtValue(); 1912 MatchData.Reg = ExtSrc; 1913 MatchData.Imm = ShiftAmt; 1914 1915 unsigned MinLeadingZeros = KB->getKnownZeroes(ExtSrc).countl_one(); 1916 unsigned SrcTySize = MRI.getType(ExtSrc).getScalarSizeInBits(); 1917 return MinLeadingZeros >= ShiftAmt && ShiftAmt < SrcTySize; 1918 } 1919 1920 void CombinerHelper::applyCombineShlOfExtend(MachineInstr &MI, 1921 const RegisterImmPair &MatchData) { 1922 Register ExtSrcReg = MatchData.Reg; 1923 int64_t ShiftAmtVal = MatchData.Imm; 1924 1925 LLT ExtSrcTy = MRI.getType(ExtSrcReg); 1926 Builder.setInstrAndDebugLoc(MI); 1927 auto ShiftAmt = Builder.buildConstant(ExtSrcTy, ShiftAmtVal); 1928 auto NarrowShift = 1929 Builder.buildShl(ExtSrcTy, ExtSrcReg, ShiftAmt, MI.getFlags()); 1930 Builder.buildZExt(MI.getOperand(0), NarrowShift); 1931 MI.eraseFromParent(); 1932 } 1933 1934 bool CombinerHelper::matchCombineMergeUnmerge(MachineInstr &MI, 1935 Register &MatchInfo) { 1936 GMerge &Merge = cast<GMerge>(MI); 1937 SmallVector<Register, 16> MergedValues; 1938 for (unsigned I = 0; I < Merge.getNumSources(); ++I) 1939 MergedValues.emplace_back(Merge.getSourceReg(I)); 1940 1941 auto *Unmerge = getOpcodeDef<GUnmerge>(MergedValues[0], MRI); 1942 if (!Unmerge || Unmerge->getNumDefs() != Merge.getNumSources()) 1943 return false; 1944 1945 for (unsigned I = 0; I < MergedValues.size(); ++I) 1946 if (MergedValues[I] != Unmerge->getReg(I)) 1947 return false; 1948 1949 MatchInfo = Unmerge->getSourceReg(); 1950 return true; 1951 } 1952 1953 static Register peekThroughBitcast(Register Reg, 1954 const MachineRegisterInfo &MRI) { 1955 while (mi_match(Reg, MRI, m_GBitcast(m_Reg(Reg)))) 1956 ; 1957 1958 return Reg; 1959 } 1960 1961 bool CombinerHelper::matchCombineUnmergeMergeToPlainValues( 1962 MachineInstr &MI, SmallVectorImpl<Register> &Operands) { 1963 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && 1964 "Expected an unmerge"); 1965 auto &Unmerge = cast<GUnmerge>(MI); 1966 Register SrcReg = peekThroughBitcast(Unmerge.getSourceReg(), MRI); 1967 1968 auto *SrcInstr = getOpcodeDef<GMergeLikeInstr>(SrcReg, MRI); 1969 if (!SrcInstr) 1970 return false; 1971 1972 // Check the source type of the merge. 1973 LLT SrcMergeTy = MRI.getType(SrcInstr->getSourceReg(0)); 1974 LLT Dst0Ty = MRI.getType(Unmerge.getReg(0)); 1975 bool SameSize = Dst0Ty.getSizeInBits() == SrcMergeTy.getSizeInBits(); 1976 if (SrcMergeTy != Dst0Ty && !SameSize) 1977 return false; 1978 // They are the same now (modulo a bitcast). 1979 // We can collect all the src registers. 1980 for (unsigned Idx = 0; Idx < SrcInstr->getNumSources(); ++Idx) 1981 Operands.push_back(SrcInstr->getSourceReg(Idx)); 1982 return true; 1983 } 1984 1985 void CombinerHelper::applyCombineUnmergeMergeToPlainValues( 1986 MachineInstr &MI, SmallVectorImpl<Register> &Operands) { 1987 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && 1988 "Expected an unmerge"); 1989 assert((MI.getNumOperands() - 1 == Operands.size()) && 1990 "Not enough operands to replace all defs"); 1991 unsigned NumElems = MI.getNumOperands() - 1; 1992 1993 LLT SrcTy = MRI.getType(Operands[0]); 1994 LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); 1995 bool CanReuseInputDirectly = DstTy == SrcTy; 1996 Builder.setInstrAndDebugLoc(MI); 1997 for (unsigned Idx = 0; Idx < NumElems; ++Idx) { 1998 Register DstReg = MI.getOperand(Idx).getReg(); 1999 Register SrcReg = Operands[Idx]; 2000 2001 // This combine may run after RegBankSelect, so we need to be aware of 2002 // register banks. 2003 const auto &DstCB = MRI.getRegClassOrRegBank(DstReg); 2004 if (!DstCB.isNull() && DstCB != MRI.getRegClassOrRegBank(SrcReg)) { 2005 SrcReg = Builder.buildCopy(MRI.getType(SrcReg), SrcReg).getReg(0); 2006 MRI.setRegClassOrRegBank(SrcReg, DstCB); 2007 } 2008 2009 if (CanReuseInputDirectly) 2010 replaceRegWith(MRI, DstReg, SrcReg); 2011 else 2012 Builder.buildCast(DstReg, SrcReg); 2013 } 2014 MI.eraseFromParent(); 2015 } 2016 2017 bool CombinerHelper::matchCombineUnmergeConstant(MachineInstr &MI, 2018 SmallVectorImpl<APInt> &Csts) { 2019 unsigned SrcIdx = MI.getNumOperands() - 1; 2020 Register SrcReg = MI.getOperand(SrcIdx).getReg(); 2021 MachineInstr *SrcInstr = MRI.getVRegDef(SrcReg); 2022 if (SrcInstr->getOpcode() != TargetOpcode::G_CONSTANT && 2023 SrcInstr->getOpcode() != TargetOpcode::G_FCONSTANT) 2024 return false; 2025 // Break down the big constant in smaller ones. 2026 const MachineOperand &CstVal = SrcInstr->getOperand(1); 2027 APInt Val = SrcInstr->getOpcode() == TargetOpcode::G_CONSTANT 2028 ? CstVal.getCImm()->getValue() 2029 : CstVal.getFPImm()->getValueAPF().bitcastToAPInt(); 2030 2031 LLT Dst0Ty = MRI.getType(MI.getOperand(0).getReg()); 2032 unsigned ShiftAmt = Dst0Ty.getSizeInBits(); 2033 // Unmerge a constant. 2034 for (unsigned Idx = 0; Idx != SrcIdx; ++Idx) { 2035 Csts.emplace_back(Val.trunc(ShiftAmt)); 2036 Val = Val.lshr(ShiftAmt); 2037 } 2038 2039 return true; 2040 } 2041 2042 void CombinerHelper::applyCombineUnmergeConstant(MachineInstr &MI, 2043 SmallVectorImpl<APInt> &Csts) { 2044 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && 2045 "Expected an unmerge"); 2046 assert((MI.getNumOperands() - 1 == Csts.size()) && 2047 "Not enough operands to replace all defs"); 2048 unsigned NumElems = MI.getNumOperands() - 1; 2049 Builder.setInstrAndDebugLoc(MI); 2050 for (unsigned Idx = 0; Idx < NumElems; ++Idx) { 2051 Register DstReg = MI.getOperand(Idx).getReg(); 2052 Builder.buildConstant(DstReg, Csts[Idx]); 2053 } 2054 2055 MI.eraseFromParent(); 2056 } 2057 2058 bool CombinerHelper::matchCombineUnmergeUndef( 2059 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 2060 unsigned SrcIdx = MI.getNumOperands() - 1; 2061 Register SrcReg = MI.getOperand(SrcIdx).getReg(); 2062 MatchInfo = [&MI](MachineIRBuilder &B) { 2063 unsigned NumElems = MI.getNumOperands() - 1; 2064 for (unsigned Idx = 0; Idx < NumElems; ++Idx) { 2065 Register DstReg = MI.getOperand(Idx).getReg(); 2066 B.buildUndef(DstReg); 2067 } 2068 }; 2069 return isa<GImplicitDef>(MRI.getVRegDef(SrcReg)); 2070 } 2071 2072 bool CombinerHelper::matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) { 2073 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && 2074 "Expected an unmerge"); 2075 // Check that all the lanes are dead except the first one. 2076 for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) { 2077 if (!MRI.use_nodbg_empty(MI.getOperand(Idx).getReg())) 2078 return false; 2079 } 2080 return true; 2081 } 2082 2083 void CombinerHelper::applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) { 2084 Builder.setInstrAndDebugLoc(MI); 2085 Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg(); 2086 // Truncating a vector is going to truncate every single lane, 2087 // whereas we want the full lowbits. 2088 // Do the operation on a scalar instead. 2089 LLT SrcTy = MRI.getType(SrcReg); 2090 if (SrcTy.isVector()) 2091 SrcReg = 2092 Builder.buildCast(LLT::scalar(SrcTy.getSizeInBits()), SrcReg).getReg(0); 2093 2094 Register Dst0Reg = MI.getOperand(0).getReg(); 2095 LLT Dst0Ty = MRI.getType(Dst0Reg); 2096 if (Dst0Ty.isVector()) { 2097 auto MIB = Builder.buildTrunc(LLT::scalar(Dst0Ty.getSizeInBits()), SrcReg); 2098 Builder.buildCast(Dst0Reg, MIB); 2099 } else 2100 Builder.buildTrunc(Dst0Reg, SrcReg); 2101 MI.eraseFromParent(); 2102 } 2103 2104 bool CombinerHelper::matchCombineUnmergeZExtToZExt(MachineInstr &MI) { 2105 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && 2106 "Expected an unmerge"); 2107 Register Dst0Reg = MI.getOperand(0).getReg(); 2108 LLT Dst0Ty = MRI.getType(Dst0Reg); 2109 // G_ZEXT on vector applies to each lane, so it will 2110 // affect all destinations. Therefore we won't be able 2111 // to simplify the unmerge to just the first definition. 2112 if (Dst0Ty.isVector()) 2113 return false; 2114 Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg(); 2115 LLT SrcTy = MRI.getType(SrcReg); 2116 if (SrcTy.isVector()) 2117 return false; 2118 2119 Register ZExtSrcReg; 2120 if (!mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZExtSrcReg)))) 2121 return false; 2122 2123 // Finally we can replace the first definition with 2124 // a zext of the source if the definition is big enough to hold 2125 // all of ZExtSrc bits. 2126 LLT ZExtSrcTy = MRI.getType(ZExtSrcReg); 2127 return ZExtSrcTy.getSizeInBits() <= Dst0Ty.getSizeInBits(); 2128 } 2129 2130 void CombinerHelper::applyCombineUnmergeZExtToZExt(MachineInstr &MI) { 2131 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && 2132 "Expected an unmerge"); 2133 2134 Register Dst0Reg = MI.getOperand(0).getReg(); 2135 2136 MachineInstr *ZExtInstr = 2137 MRI.getVRegDef(MI.getOperand(MI.getNumDefs()).getReg()); 2138 assert(ZExtInstr && ZExtInstr->getOpcode() == TargetOpcode::G_ZEXT && 2139 "Expecting a G_ZEXT"); 2140 2141 Register ZExtSrcReg = ZExtInstr->getOperand(1).getReg(); 2142 LLT Dst0Ty = MRI.getType(Dst0Reg); 2143 LLT ZExtSrcTy = MRI.getType(ZExtSrcReg); 2144 2145 Builder.setInstrAndDebugLoc(MI); 2146 2147 if (Dst0Ty.getSizeInBits() > ZExtSrcTy.getSizeInBits()) { 2148 Builder.buildZExt(Dst0Reg, ZExtSrcReg); 2149 } else { 2150 assert(Dst0Ty.getSizeInBits() == ZExtSrcTy.getSizeInBits() && 2151 "ZExt src doesn't fit in destination"); 2152 replaceRegWith(MRI, Dst0Reg, ZExtSrcReg); 2153 } 2154 2155 Register ZeroReg; 2156 for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) { 2157 if (!ZeroReg) 2158 ZeroReg = Builder.buildConstant(Dst0Ty, 0).getReg(0); 2159 replaceRegWith(MRI, MI.getOperand(Idx).getReg(), ZeroReg); 2160 } 2161 MI.eraseFromParent(); 2162 } 2163 2164 bool CombinerHelper::matchCombineShiftToUnmerge(MachineInstr &MI, 2165 unsigned TargetShiftSize, 2166 unsigned &ShiftVal) { 2167 assert((MI.getOpcode() == TargetOpcode::G_SHL || 2168 MI.getOpcode() == TargetOpcode::G_LSHR || 2169 MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift"); 2170 2171 LLT Ty = MRI.getType(MI.getOperand(0).getReg()); 2172 if (Ty.isVector()) // TODO: 2173 return false; 2174 2175 // Don't narrow further than the requested size. 2176 unsigned Size = Ty.getSizeInBits(); 2177 if (Size <= TargetShiftSize) 2178 return false; 2179 2180 auto MaybeImmVal = 2181 getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); 2182 if (!MaybeImmVal) 2183 return false; 2184 2185 ShiftVal = MaybeImmVal->Value.getSExtValue(); 2186 return ShiftVal >= Size / 2 && ShiftVal < Size; 2187 } 2188 2189 void CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI, 2190 const unsigned &ShiftVal) { 2191 Register DstReg = MI.getOperand(0).getReg(); 2192 Register SrcReg = MI.getOperand(1).getReg(); 2193 LLT Ty = MRI.getType(SrcReg); 2194 unsigned Size = Ty.getSizeInBits(); 2195 unsigned HalfSize = Size / 2; 2196 assert(ShiftVal >= HalfSize); 2197 2198 LLT HalfTy = LLT::scalar(HalfSize); 2199 2200 Builder.setInstr(MI); 2201 auto Unmerge = Builder.buildUnmerge(HalfTy, SrcReg); 2202 unsigned NarrowShiftAmt = ShiftVal - HalfSize; 2203 2204 if (MI.getOpcode() == TargetOpcode::G_LSHR) { 2205 Register Narrowed = Unmerge.getReg(1); 2206 2207 // dst = G_LSHR s64:x, C for C >= 32 2208 // => 2209 // lo, hi = G_UNMERGE_VALUES x 2210 // dst = G_MERGE_VALUES (G_LSHR hi, C - 32), 0 2211 2212 if (NarrowShiftAmt != 0) { 2213 Narrowed = Builder.buildLShr(HalfTy, Narrowed, 2214 Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0); 2215 } 2216 2217 auto Zero = Builder.buildConstant(HalfTy, 0); 2218 Builder.buildMergeLikeInstr(DstReg, {Narrowed, Zero}); 2219 } else if (MI.getOpcode() == TargetOpcode::G_SHL) { 2220 Register Narrowed = Unmerge.getReg(0); 2221 // dst = G_SHL s64:x, C for C >= 32 2222 // => 2223 // lo, hi = G_UNMERGE_VALUES x 2224 // dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32) 2225 if (NarrowShiftAmt != 0) { 2226 Narrowed = Builder.buildShl(HalfTy, Narrowed, 2227 Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0); 2228 } 2229 2230 auto Zero = Builder.buildConstant(HalfTy, 0); 2231 Builder.buildMergeLikeInstr(DstReg, {Zero, Narrowed}); 2232 } else { 2233 assert(MI.getOpcode() == TargetOpcode::G_ASHR); 2234 auto Hi = Builder.buildAShr( 2235 HalfTy, Unmerge.getReg(1), 2236 Builder.buildConstant(HalfTy, HalfSize - 1)); 2237 2238 if (ShiftVal == HalfSize) { 2239 // (G_ASHR i64:x, 32) -> 2240 // G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31) 2241 Builder.buildMergeLikeInstr(DstReg, {Unmerge.getReg(1), Hi}); 2242 } else if (ShiftVal == Size - 1) { 2243 // Don't need a second shift. 2244 // (G_ASHR i64:x, 63) -> 2245 // %narrowed = (G_ASHR hi_32(x), 31) 2246 // G_MERGE_VALUES %narrowed, %narrowed 2247 Builder.buildMergeLikeInstr(DstReg, {Hi, Hi}); 2248 } else { 2249 auto Lo = Builder.buildAShr( 2250 HalfTy, Unmerge.getReg(1), 2251 Builder.buildConstant(HalfTy, ShiftVal - HalfSize)); 2252 2253 // (G_ASHR i64:x, C) ->, for C >= 32 2254 // G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31) 2255 Builder.buildMergeLikeInstr(DstReg, {Lo, Hi}); 2256 } 2257 } 2258 2259 MI.eraseFromParent(); 2260 } 2261 2262 bool CombinerHelper::tryCombineShiftToUnmerge(MachineInstr &MI, 2263 unsigned TargetShiftAmount) { 2264 unsigned ShiftAmt; 2265 if (matchCombineShiftToUnmerge(MI, TargetShiftAmount, ShiftAmt)) { 2266 applyCombineShiftToUnmerge(MI, ShiftAmt); 2267 return true; 2268 } 2269 2270 return false; 2271 } 2272 2273 bool CombinerHelper::matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) { 2274 assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR"); 2275 Register DstReg = MI.getOperand(0).getReg(); 2276 LLT DstTy = MRI.getType(DstReg); 2277 Register SrcReg = MI.getOperand(1).getReg(); 2278 return mi_match(SrcReg, MRI, 2279 m_GPtrToInt(m_all_of(m_SpecificType(DstTy), m_Reg(Reg)))); 2280 } 2281 2282 void CombinerHelper::applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) { 2283 assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR"); 2284 Register DstReg = MI.getOperand(0).getReg(); 2285 Builder.setInstr(MI); 2286 Builder.buildCopy(DstReg, Reg); 2287 MI.eraseFromParent(); 2288 } 2289 2290 void CombinerHelper::applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) { 2291 assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT"); 2292 Register DstReg = MI.getOperand(0).getReg(); 2293 Builder.setInstr(MI); 2294 Builder.buildZExtOrTrunc(DstReg, Reg); 2295 MI.eraseFromParent(); 2296 } 2297 2298 bool CombinerHelper::matchCombineAddP2IToPtrAdd( 2299 MachineInstr &MI, std::pair<Register, bool> &PtrReg) { 2300 assert(MI.getOpcode() == TargetOpcode::G_ADD); 2301 Register LHS = MI.getOperand(1).getReg(); 2302 Register RHS = MI.getOperand(2).getReg(); 2303 LLT IntTy = MRI.getType(LHS); 2304 2305 // G_PTR_ADD always has the pointer in the LHS, so we may need to commute the 2306 // instruction. 2307 PtrReg.second = false; 2308 for (Register SrcReg : {LHS, RHS}) { 2309 if (mi_match(SrcReg, MRI, m_GPtrToInt(m_Reg(PtrReg.first)))) { 2310 // Don't handle cases where the integer is implicitly converted to the 2311 // pointer width. 2312 LLT PtrTy = MRI.getType(PtrReg.first); 2313 if (PtrTy.getScalarSizeInBits() == IntTy.getScalarSizeInBits()) 2314 return true; 2315 } 2316 2317 PtrReg.second = true; 2318 } 2319 2320 return false; 2321 } 2322 2323 void CombinerHelper::applyCombineAddP2IToPtrAdd( 2324 MachineInstr &MI, std::pair<Register, bool> &PtrReg) { 2325 Register Dst = MI.getOperand(0).getReg(); 2326 Register LHS = MI.getOperand(1).getReg(); 2327 Register RHS = MI.getOperand(2).getReg(); 2328 2329 const bool DoCommute = PtrReg.second; 2330 if (DoCommute) 2331 std::swap(LHS, RHS); 2332 LHS = PtrReg.first; 2333 2334 LLT PtrTy = MRI.getType(LHS); 2335 2336 Builder.setInstrAndDebugLoc(MI); 2337 auto PtrAdd = Builder.buildPtrAdd(PtrTy, LHS, RHS); 2338 Builder.buildPtrToInt(Dst, PtrAdd); 2339 MI.eraseFromParent(); 2340 } 2341 2342 bool CombinerHelper::matchCombineConstPtrAddToI2P(MachineInstr &MI, 2343 APInt &NewCst) { 2344 auto &PtrAdd = cast<GPtrAdd>(MI); 2345 Register LHS = PtrAdd.getBaseReg(); 2346 Register RHS = PtrAdd.getOffsetReg(); 2347 MachineRegisterInfo &MRI = Builder.getMF().getRegInfo(); 2348 2349 if (auto RHSCst = getIConstantVRegVal(RHS, MRI)) { 2350 APInt Cst; 2351 if (mi_match(LHS, MRI, m_GIntToPtr(m_ICst(Cst)))) { 2352 auto DstTy = MRI.getType(PtrAdd.getReg(0)); 2353 // G_INTTOPTR uses zero-extension 2354 NewCst = Cst.zextOrTrunc(DstTy.getSizeInBits()); 2355 NewCst += RHSCst->sextOrTrunc(DstTy.getSizeInBits()); 2356 return true; 2357 } 2358 } 2359 2360 return false; 2361 } 2362 2363 void CombinerHelper::applyCombineConstPtrAddToI2P(MachineInstr &MI, 2364 APInt &NewCst) { 2365 auto &PtrAdd = cast<GPtrAdd>(MI); 2366 Register Dst = PtrAdd.getReg(0); 2367 2368 Builder.setInstrAndDebugLoc(MI); 2369 Builder.buildConstant(Dst, NewCst); 2370 PtrAdd.eraseFromParent(); 2371 } 2372 2373 bool CombinerHelper::matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) { 2374 assert(MI.getOpcode() == TargetOpcode::G_ANYEXT && "Expected a G_ANYEXT"); 2375 Register DstReg = MI.getOperand(0).getReg(); 2376 Register SrcReg = MI.getOperand(1).getReg(); 2377 LLT DstTy = MRI.getType(DstReg); 2378 return mi_match(SrcReg, MRI, 2379 m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy)))); 2380 } 2381 2382 bool CombinerHelper::matchCombineZextTrunc(MachineInstr &MI, Register &Reg) { 2383 assert(MI.getOpcode() == TargetOpcode::G_ZEXT && "Expected a G_ZEXT"); 2384 Register DstReg = MI.getOperand(0).getReg(); 2385 Register SrcReg = MI.getOperand(1).getReg(); 2386 LLT DstTy = MRI.getType(DstReg); 2387 if (mi_match(SrcReg, MRI, 2388 m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))))) { 2389 unsigned DstSize = DstTy.getScalarSizeInBits(); 2390 unsigned SrcSize = MRI.getType(SrcReg).getScalarSizeInBits(); 2391 return KB->getKnownBits(Reg).countMinLeadingZeros() >= DstSize - SrcSize; 2392 } 2393 return false; 2394 } 2395 2396 bool CombinerHelper::matchCombineExtOfExt( 2397 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) { 2398 assert((MI.getOpcode() == TargetOpcode::G_ANYEXT || 2399 MI.getOpcode() == TargetOpcode::G_SEXT || 2400 MI.getOpcode() == TargetOpcode::G_ZEXT) && 2401 "Expected a G_[ASZ]EXT"); 2402 Register SrcReg = MI.getOperand(1).getReg(); 2403 MachineInstr *SrcMI = MRI.getVRegDef(SrcReg); 2404 // Match exts with the same opcode, anyext([sz]ext) and sext(zext). 2405 unsigned Opc = MI.getOpcode(); 2406 unsigned SrcOpc = SrcMI->getOpcode(); 2407 if (Opc == SrcOpc || 2408 (Opc == TargetOpcode::G_ANYEXT && 2409 (SrcOpc == TargetOpcode::G_SEXT || SrcOpc == TargetOpcode::G_ZEXT)) || 2410 (Opc == TargetOpcode::G_SEXT && SrcOpc == TargetOpcode::G_ZEXT)) { 2411 MatchInfo = std::make_tuple(SrcMI->getOperand(1).getReg(), SrcOpc); 2412 return true; 2413 } 2414 return false; 2415 } 2416 2417 void CombinerHelper::applyCombineExtOfExt( 2418 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) { 2419 assert((MI.getOpcode() == TargetOpcode::G_ANYEXT || 2420 MI.getOpcode() == TargetOpcode::G_SEXT || 2421 MI.getOpcode() == TargetOpcode::G_ZEXT) && 2422 "Expected a G_[ASZ]EXT"); 2423 2424 Register Reg = std::get<0>(MatchInfo); 2425 unsigned SrcExtOp = std::get<1>(MatchInfo); 2426 2427 // Combine exts with the same opcode. 2428 if (MI.getOpcode() == SrcExtOp) { 2429 Observer.changingInstr(MI); 2430 MI.getOperand(1).setReg(Reg); 2431 Observer.changedInstr(MI); 2432 return; 2433 } 2434 2435 // Combine: 2436 // - anyext([sz]ext x) to [sz]ext x 2437 // - sext(zext x) to zext x 2438 if (MI.getOpcode() == TargetOpcode::G_ANYEXT || 2439 (MI.getOpcode() == TargetOpcode::G_SEXT && 2440 SrcExtOp == TargetOpcode::G_ZEXT)) { 2441 Register DstReg = MI.getOperand(0).getReg(); 2442 Builder.setInstrAndDebugLoc(MI); 2443 Builder.buildInstr(SrcExtOp, {DstReg}, {Reg}); 2444 MI.eraseFromParent(); 2445 } 2446 } 2447 2448 bool CombinerHelper::matchCombineTruncOfExt( 2449 MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) { 2450 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC"); 2451 Register SrcReg = MI.getOperand(1).getReg(); 2452 MachineInstr *SrcMI = MRI.getVRegDef(SrcReg); 2453 unsigned SrcOpc = SrcMI->getOpcode(); 2454 if (SrcOpc == TargetOpcode::G_ANYEXT || SrcOpc == TargetOpcode::G_SEXT || 2455 SrcOpc == TargetOpcode::G_ZEXT) { 2456 MatchInfo = std::make_pair(SrcMI->getOperand(1).getReg(), SrcOpc); 2457 return true; 2458 } 2459 return false; 2460 } 2461 2462 void CombinerHelper::applyCombineTruncOfExt( 2463 MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) { 2464 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC"); 2465 Register SrcReg = MatchInfo.first; 2466 unsigned SrcExtOp = MatchInfo.second; 2467 Register DstReg = MI.getOperand(0).getReg(); 2468 LLT SrcTy = MRI.getType(SrcReg); 2469 LLT DstTy = MRI.getType(DstReg); 2470 if (SrcTy == DstTy) { 2471 MI.eraseFromParent(); 2472 replaceRegWith(MRI, DstReg, SrcReg); 2473 return; 2474 } 2475 Builder.setInstrAndDebugLoc(MI); 2476 if (SrcTy.getSizeInBits() < DstTy.getSizeInBits()) 2477 Builder.buildInstr(SrcExtOp, {DstReg}, {SrcReg}); 2478 else 2479 Builder.buildTrunc(DstReg, SrcReg); 2480 MI.eraseFromParent(); 2481 } 2482 2483 static LLT getMidVTForTruncRightShiftCombine(LLT ShiftTy, LLT TruncTy) { 2484 const unsigned ShiftSize = ShiftTy.getScalarSizeInBits(); 2485 const unsigned TruncSize = TruncTy.getScalarSizeInBits(); 2486 2487 // ShiftTy > 32 > TruncTy -> 32 2488 if (ShiftSize > 32 && TruncSize < 32) 2489 return ShiftTy.changeElementSize(32); 2490 2491 // TODO: We could also reduce to 16 bits, but that's more target-dependent. 2492 // Some targets like it, some don't, some only like it under certain 2493 // conditions/processor versions, etc. 2494 // A TL hook might be needed for this. 2495 2496 // Don't combine 2497 return ShiftTy; 2498 } 2499 2500 bool CombinerHelper::matchCombineTruncOfShift( 2501 MachineInstr &MI, std::pair<MachineInstr *, LLT> &MatchInfo) { 2502 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC"); 2503 Register DstReg = MI.getOperand(0).getReg(); 2504 Register SrcReg = MI.getOperand(1).getReg(); 2505 2506 if (!MRI.hasOneNonDBGUse(SrcReg)) 2507 return false; 2508 2509 LLT SrcTy = MRI.getType(SrcReg); 2510 LLT DstTy = MRI.getType(DstReg); 2511 2512 MachineInstr *SrcMI = getDefIgnoringCopies(SrcReg, MRI); 2513 const auto &TL = getTargetLowering(); 2514 2515 LLT NewShiftTy; 2516 switch (SrcMI->getOpcode()) { 2517 default: 2518 return false; 2519 case TargetOpcode::G_SHL: { 2520 NewShiftTy = DstTy; 2521 2522 // Make sure new shift amount is legal. 2523 KnownBits Known = KB->getKnownBits(SrcMI->getOperand(2).getReg()); 2524 if (Known.getMaxValue().uge(NewShiftTy.getScalarSizeInBits())) 2525 return false; 2526 break; 2527 } 2528 case TargetOpcode::G_LSHR: 2529 case TargetOpcode::G_ASHR: { 2530 // For right shifts, we conservatively do not do the transform if the TRUNC 2531 // has any STORE users. The reason is that if we change the type of the 2532 // shift, we may break the truncstore combine. 2533 // 2534 // TODO: Fix truncstore combine to handle (trunc(lshr (trunc x), k)). 2535 for (auto &User : MRI.use_instructions(DstReg)) 2536 if (User.getOpcode() == TargetOpcode::G_STORE) 2537 return false; 2538 2539 NewShiftTy = getMidVTForTruncRightShiftCombine(SrcTy, DstTy); 2540 if (NewShiftTy == SrcTy) 2541 return false; 2542 2543 // Make sure we won't lose information by truncating the high bits. 2544 KnownBits Known = KB->getKnownBits(SrcMI->getOperand(2).getReg()); 2545 if (Known.getMaxValue().ugt(NewShiftTy.getScalarSizeInBits() - 2546 DstTy.getScalarSizeInBits())) 2547 return false; 2548 break; 2549 } 2550 } 2551 2552 if (!isLegalOrBeforeLegalizer( 2553 {SrcMI->getOpcode(), 2554 {NewShiftTy, TL.getPreferredShiftAmountTy(NewShiftTy)}})) 2555 return false; 2556 2557 MatchInfo = std::make_pair(SrcMI, NewShiftTy); 2558 return true; 2559 } 2560 2561 void CombinerHelper::applyCombineTruncOfShift( 2562 MachineInstr &MI, std::pair<MachineInstr *, LLT> &MatchInfo) { 2563 Builder.setInstrAndDebugLoc(MI); 2564 2565 MachineInstr *ShiftMI = MatchInfo.first; 2566 LLT NewShiftTy = MatchInfo.second; 2567 2568 Register Dst = MI.getOperand(0).getReg(); 2569 LLT DstTy = MRI.getType(Dst); 2570 2571 Register ShiftAmt = ShiftMI->getOperand(2).getReg(); 2572 Register ShiftSrc = ShiftMI->getOperand(1).getReg(); 2573 ShiftSrc = Builder.buildTrunc(NewShiftTy, ShiftSrc).getReg(0); 2574 2575 Register NewShift = 2576 Builder 2577 .buildInstr(ShiftMI->getOpcode(), {NewShiftTy}, {ShiftSrc, ShiftAmt}) 2578 .getReg(0); 2579 2580 if (NewShiftTy == DstTy) 2581 replaceRegWith(MRI, Dst, NewShift); 2582 else 2583 Builder.buildTrunc(Dst, NewShift); 2584 2585 eraseInst(MI); 2586 } 2587 2588 bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) { 2589 return any_of(MI.explicit_uses(), [this](const MachineOperand &MO) { 2590 return MO.isReg() && 2591 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI); 2592 }); 2593 } 2594 2595 bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) { 2596 return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) { 2597 return !MO.isReg() || 2598 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI); 2599 }); 2600 } 2601 2602 bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr &MI) { 2603 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR); 2604 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); 2605 return all_of(Mask, [](int Elt) { return Elt < 0; }); 2606 } 2607 2608 bool CombinerHelper::matchUndefStore(MachineInstr &MI) { 2609 assert(MI.getOpcode() == TargetOpcode::G_STORE); 2610 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(0).getReg(), 2611 MRI); 2612 } 2613 2614 bool CombinerHelper::matchUndefSelectCmp(MachineInstr &MI) { 2615 assert(MI.getOpcode() == TargetOpcode::G_SELECT); 2616 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(1).getReg(), 2617 MRI); 2618 } 2619 2620 bool CombinerHelper::matchInsertExtractVecEltOutOfBounds(MachineInstr &MI) { 2621 assert((MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT || 2622 MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT) && 2623 "Expected an insert/extract element op"); 2624 LLT VecTy = MRI.getType(MI.getOperand(1).getReg()); 2625 unsigned IdxIdx = 2626 MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT ? 2 : 3; 2627 auto Idx = getIConstantVRegVal(MI.getOperand(IdxIdx).getReg(), MRI); 2628 if (!Idx) 2629 return false; 2630 return Idx->getZExtValue() >= VecTy.getNumElements(); 2631 } 2632 2633 bool CombinerHelper::matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) { 2634 GSelect &SelMI = cast<GSelect>(MI); 2635 auto Cst = 2636 isConstantOrConstantSplatVector(*MRI.getVRegDef(SelMI.getCondReg()), MRI); 2637 if (!Cst) 2638 return false; 2639 OpIdx = Cst->isZero() ? 3 : 2; 2640 return true; 2641 } 2642 2643 void CombinerHelper::eraseInst(MachineInstr &MI) { MI.eraseFromParent(); } 2644 2645 bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1, 2646 const MachineOperand &MOP2) { 2647 if (!MOP1.isReg() || !MOP2.isReg()) 2648 return false; 2649 auto InstAndDef1 = getDefSrcRegIgnoringCopies(MOP1.getReg(), MRI); 2650 if (!InstAndDef1) 2651 return false; 2652 auto InstAndDef2 = getDefSrcRegIgnoringCopies(MOP2.getReg(), MRI); 2653 if (!InstAndDef2) 2654 return false; 2655 MachineInstr *I1 = InstAndDef1->MI; 2656 MachineInstr *I2 = InstAndDef2->MI; 2657 2658 // Handle a case like this: 2659 // 2660 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<2 x s64>) 2661 // 2662 // Even though %0 and %1 are produced by the same instruction they are not 2663 // the same values. 2664 if (I1 == I2) 2665 return MOP1.getReg() == MOP2.getReg(); 2666 2667 // If we have an instruction which loads or stores, we can't guarantee that 2668 // it is identical. 2669 // 2670 // For example, we may have 2671 // 2672 // %x1 = G_LOAD %addr (load N from @somewhere) 2673 // ... 2674 // call @foo 2675 // ... 2676 // %x2 = G_LOAD %addr (load N from @somewhere) 2677 // ... 2678 // %or = G_OR %x1, %x2 2679 // 2680 // It's possible that @foo will modify whatever lives at the address we're 2681 // loading from. To be safe, let's just assume that all loads and stores 2682 // are different (unless we have something which is guaranteed to not 2683 // change.) 2684 if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad()) 2685 return false; 2686 2687 // If both instructions are loads or stores, they are equal only if both 2688 // are dereferenceable invariant loads with the same number of bits. 2689 if (I1->mayLoadOrStore() && I2->mayLoadOrStore()) { 2690 GLoadStore *LS1 = dyn_cast<GLoadStore>(I1); 2691 GLoadStore *LS2 = dyn_cast<GLoadStore>(I2); 2692 if (!LS1 || !LS2) 2693 return false; 2694 2695 if (!I2->isDereferenceableInvariantLoad() || 2696 (LS1->getMemSizeInBits() != LS2->getMemSizeInBits())) 2697 return false; 2698 } 2699 2700 // Check for physical registers on the instructions first to avoid cases 2701 // like this: 2702 // 2703 // %a = COPY $physreg 2704 // ... 2705 // SOMETHING implicit-def $physreg 2706 // ... 2707 // %b = COPY $physreg 2708 // 2709 // These copies are not equivalent. 2710 if (any_of(I1->uses(), [](const MachineOperand &MO) { 2711 return MO.isReg() && MO.getReg().isPhysical(); 2712 })) { 2713 // Check if we have a case like this: 2714 // 2715 // %a = COPY $physreg 2716 // %b = COPY %a 2717 // 2718 // In this case, I1 and I2 will both be equal to %a = COPY $physreg. 2719 // From that, we know that they must have the same value, since they must 2720 // have come from the same COPY. 2721 return I1->isIdenticalTo(*I2); 2722 } 2723 2724 // We don't have any physical registers, so we don't necessarily need the 2725 // same vreg defs. 2726 // 2727 // On the off-chance that there's some target instruction feeding into the 2728 // instruction, let's use produceSameValue instead of isIdenticalTo. 2729 if (Builder.getTII().produceSameValue(*I1, *I2, &MRI)) { 2730 // Handle instructions with multiple defs that produce same values. Values 2731 // are same for operands with same index. 2732 // %0:_(s8), %1:_(s8), %2:_(s8), %3:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>) 2733 // %5:_(s8), %6:_(s8), %7:_(s8), %8:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>) 2734 // I1 and I2 are different instructions but produce same values, 2735 // %1 and %6 are same, %1 and %7 are not the same value. 2736 return I1->findRegisterDefOperandIdx(InstAndDef1->Reg) == 2737 I2->findRegisterDefOperandIdx(InstAndDef2->Reg); 2738 } 2739 return false; 2740 } 2741 2742 bool CombinerHelper::matchConstantOp(const MachineOperand &MOP, int64_t C) { 2743 if (!MOP.isReg()) 2744 return false; 2745 auto *MI = MRI.getVRegDef(MOP.getReg()); 2746 auto MaybeCst = isConstantOrConstantSplatVector(*MI, MRI); 2747 return MaybeCst && MaybeCst->getBitWidth() <= 64 && 2748 MaybeCst->getSExtValue() == C; 2749 } 2750 2751 bool CombinerHelper::matchConstantFPOp(const MachineOperand &MOP, double C) { 2752 if (!MOP.isReg()) 2753 return false; 2754 std::optional<FPValueAndVReg> MaybeCst; 2755 if (!mi_match(MOP.getReg(), MRI, m_GFCstOrSplat(MaybeCst))) 2756 return false; 2757 2758 return MaybeCst->Value.isExactlyValue(C); 2759 } 2760 2761 void CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI, 2762 unsigned OpIdx) { 2763 assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?"); 2764 Register OldReg = MI.getOperand(0).getReg(); 2765 Register Replacement = MI.getOperand(OpIdx).getReg(); 2766 assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?"); 2767 MI.eraseFromParent(); 2768 replaceRegWith(MRI, OldReg, Replacement); 2769 } 2770 2771 void CombinerHelper::replaceSingleDefInstWithReg(MachineInstr &MI, 2772 Register Replacement) { 2773 assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?"); 2774 Register OldReg = MI.getOperand(0).getReg(); 2775 assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?"); 2776 MI.eraseFromParent(); 2777 replaceRegWith(MRI, OldReg, Replacement); 2778 } 2779 2780 bool CombinerHelper::matchConstantLargerBitWidth(MachineInstr &MI, 2781 unsigned ConstIdx) { 2782 Register ConstReg = MI.getOperand(ConstIdx).getReg(); 2783 LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); 2784 2785 // Get the shift amount 2786 auto VRegAndVal = getIConstantVRegValWithLookThrough(ConstReg, MRI); 2787 if (!VRegAndVal) 2788 return false; 2789 2790 // Return true of shift amount >= Bitwidth 2791 return (VRegAndVal->Value.uge(DstTy.getSizeInBits())); 2792 } 2793 2794 void CombinerHelper::applyFunnelShiftConstantModulo(MachineInstr &MI) { 2795 assert((MI.getOpcode() == TargetOpcode::G_FSHL || 2796 MI.getOpcode() == TargetOpcode::G_FSHR) && 2797 "This is not a funnel shift operation"); 2798 2799 Register ConstReg = MI.getOperand(3).getReg(); 2800 LLT ConstTy = MRI.getType(ConstReg); 2801 LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); 2802 2803 auto VRegAndVal = getIConstantVRegValWithLookThrough(ConstReg, MRI); 2804 assert((VRegAndVal) && "Value is not a constant"); 2805 2806 // Calculate the new Shift Amount = Old Shift Amount % BitWidth 2807 APInt NewConst = VRegAndVal->Value.urem( 2808 APInt(ConstTy.getSizeInBits(), DstTy.getScalarSizeInBits())); 2809 2810 Builder.setInstrAndDebugLoc(MI); 2811 auto NewConstInstr = Builder.buildConstant(ConstTy, NewConst.getZExtValue()); 2812 Builder.buildInstr( 2813 MI.getOpcode(), {MI.getOperand(0)}, 2814 {MI.getOperand(1), MI.getOperand(2), NewConstInstr.getReg(0)}); 2815 2816 MI.eraseFromParent(); 2817 } 2818 2819 bool CombinerHelper::matchSelectSameVal(MachineInstr &MI) { 2820 assert(MI.getOpcode() == TargetOpcode::G_SELECT); 2821 // Match (cond ? x : x) 2822 return matchEqualDefs(MI.getOperand(2), MI.getOperand(3)) && 2823 canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(2).getReg(), 2824 MRI); 2825 } 2826 2827 bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) { 2828 return matchEqualDefs(MI.getOperand(1), MI.getOperand(2)) && 2829 canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), 2830 MRI); 2831 } 2832 2833 bool CombinerHelper::matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) { 2834 return matchConstantOp(MI.getOperand(OpIdx), 0) && 2835 canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(), 2836 MRI); 2837 } 2838 2839 bool CombinerHelper::matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) { 2840 MachineOperand &MO = MI.getOperand(OpIdx); 2841 return MO.isReg() && 2842 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI); 2843 } 2844 2845 bool CombinerHelper::matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, 2846 unsigned OpIdx) { 2847 MachineOperand &MO = MI.getOperand(OpIdx); 2848 return isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB); 2849 } 2850 2851 void CombinerHelper::replaceInstWithFConstant(MachineInstr &MI, double C) { 2852 assert(MI.getNumDefs() == 1 && "Expected only one def?"); 2853 Builder.setInstr(MI); 2854 Builder.buildFConstant(MI.getOperand(0), C); 2855 MI.eraseFromParent(); 2856 } 2857 2858 void CombinerHelper::replaceInstWithConstant(MachineInstr &MI, int64_t C) { 2859 assert(MI.getNumDefs() == 1 && "Expected only one def?"); 2860 Builder.setInstr(MI); 2861 Builder.buildConstant(MI.getOperand(0), C); 2862 MI.eraseFromParent(); 2863 } 2864 2865 void CombinerHelper::replaceInstWithConstant(MachineInstr &MI, APInt C) { 2866 assert(MI.getNumDefs() == 1 && "Expected only one def?"); 2867 Builder.setInstr(MI); 2868 Builder.buildConstant(MI.getOperand(0), C); 2869 MI.eraseFromParent(); 2870 } 2871 2872 void CombinerHelper::replaceInstWithFConstant(MachineInstr &MI, ConstantFP *CFP) { 2873 assert(MI.getNumDefs() == 1 && "Expected only one def?"); 2874 Builder.setInstr(MI); 2875 Builder.buildFConstant(MI.getOperand(0), CFP->getValueAPF()); 2876 MI.eraseFromParent(); 2877 } 2878 2879 void CombinerHelper::replaceInstWithUndef(MachineInstr &MI) { 2880 assert(MI.getNumDefs() == 1 && "Expected only one def?"); 2881 Builder.setInstr(MI); 2882 Builder.buildUndef(MI.getOperand(0)); 2883 MI.eraseFromParent(); 2884 } 2885 2886 bool CombinerHelper::matchSimplifyAddToSub( 2887 MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) { 2888 Register LHS = MI.getOperand(1).getReg(); 2889 Register RHS = MI.getOperand(2).getReg(); 2890 Register &NewLHS = std::get<0>(MatchInfo); 2891 Register &NewRHS = std::get<1>(MatchInfo); 2892 2893 // Helper lambda to check for opportunities for 2894 // ((0-A) + B) -> B - A 2895 // (A + (0-B)) -> A - B 2896 auto CheckFold = [&](Register &MaybeSub, Register &MaybeNewLHS) { 2897 if (!mi_match(MaybeSub, MRI, m_Neg(m_Reg(NewRHS)))) 2898 return false; 2899 NewLHS = MaybeNewLHS; 2900 return true; 2901 }; 2902 2903 return CheckFold(LHS, RHS) || CheckFold(RHS, LHS); 2904 } 2905 2906 bool CombinerHelper::matchCombineInsertVecElts( 2907 MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) { 2908 assert(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT && 2909 "Invalid opcode"); 2910 Register DstReg = MI.getOperand(0).getReg(); 2911 LLT DstTy = MRI.getType(DstReg); 2912 assert(DstTy.isVector() && "Invalid G_INSERT_VECTOR_ELT?"); 2913 unsigned NumElts = DstTy.getNumElements(); 2914 // If this MI is part of a sequence of insert_vec_elts, then 2915 // don't do the combine in the middle of the sequence. 2916 if (MRI.hasOneUse(DstReg) && MRI.use_instr_begin(DstReg)->getOpcode() == 2917 TargetOpcode::G_INSERT_VECTOR_ELT) 2918 return false; 2919 MachineInstr *CurrInst = &MI; 2920 MachineInstr *TmpInst; 2921 int64_t IntImm; 2922 Register TmpReg; 2923 MatchInfo.resize(NumElts); 2924 while (mi_match( 2925 CurrInst->getOperand(0).getReg(), MRI, 2926 m_GInsertVecElt(m_MInstr(TmpInst), m_Reg(TmpReg), m_ICst(IntImm)))) { 2927 if (IntImm >= NumElts || IntImm < 0) 2928 return false; 2929 if (!MatchInfo[IntImm]) 2930 MatchInfo[IntImm] = TmpReg; 2931 CurrInst = TmpInst; 2932 } 2933 // Variable index. 2934 if (CurrInst->getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT) 2935 return false; 2936 if (TmpInst->getOpcode() == TargetOpcode::G_BUILD_VECTOR) { 2937 for (unsigned I = 1; I < TmpInst->getNumOperands(); ++I) { 2938 if (!MatchInfo[I - 1].isValid()) 2939 MatchInfo[I - 1] = TmpInst->getOperand(I).getReg(); 2940 } 2941 return true; 2942 } 2943 // If we didn't end in a G_IMPLICIT_DEF, bail out. 2944 return TmpInst->getOpcode() == TargetOpcode::G_IMPLICIT_DEF; 2945 } 2946 2947 void CombinerHelper::applyCombineInsertVecElts( 2948 MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) { 2949 Builder.setInstr(MI); 2950 Register UndefReg; 2951 auto GetUndef = [&]() { 2952 if (UndefReg) 2953 return UndefReg; 2954 LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); 2955 UndefReg = Builder.buildUndef(DstTy.getScalarType()).getReg(0); 2956 return UndefReg; 2957 }; 2958 for (unsigned I = 0; I < MatchInfo.size(); ++I) { 2959 if (!MatchInfo[I]) 2960 MatchInfo[I] = GetUndef(); 2961 } 2962 Builder.buildBuildVector(MI.getOperand(0).getReg(), MatchInfo); 2963 MI.eraseFromParent(); 2964 } 2965 2966 void CombinerHelper::applySimplifyAddToSub( 2967 MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) { 2968 Builder.setInstr(MI); 2969 Register SubLHS, SubRHS; 2970 std::tie(SubLHS, SubRHS) = MatchInfo; 2971 Builder.buildSub(MI.getOperand(0).getReg(), SubLHS, SubRHS); 2972 MI.eraseFromParent(); 2973 } 2974 2975 bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands( 2976 MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) { 2977 // Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ... 2978 // 2979 // Creates the new hand + logic instruction (but does not insert them.) 2980 // 2981 // On success, MatchInfo is populated with the new instructions. These are 2982 // inserted in applyHoistLogicOpWithSameOpcodeHands. 2983 unsigned LogicOpcode = MI.getOpcode(); 2984 assert(LogicOpcode == TargetOpcode::G_AND || 2985 LogicOpcode == TargetOpcode::G_OR || 2986 LogicOpcode == TargetOpcode::G_XOR); 2987 MachineIRBuilder MIB(MI); 2988 Register Dst = MI.getOperand(0).getReg(); 2989 Register LHSReg = MI.getOperand(1).getReg(); 2990 Register RHSReg = MI.getOperand(2).getReg(); 2991 2992 // Don't recompute anything. 2993 if (!MRI.hasOneNonDBGUse(LHSReg) || !MRI.hasOneNonDBGUse(RHSReg)) 2994 return false; 2995 2996 // Make sure we have (hand x, ...), (hand y, ...) 2997 MachineInstr *LeftHandInst = getDefIgnoringCopies(LHSReg, MRI); 2998 MachineInstr *RightHandInst = getDefIgnoringCopies(RHSReg, MRI); 2999 if (!LeftHandInst || !RightHandInst) 3000 return false; 3001 unsigned HandOpcode = LeftHandInst->getOpcode(); 3002 if (HandOpcode != RightHandInst->getOpcode()) 3003 return false; 3004 if (!LeftHandInst->getOperand(1).isReg() || 3005 !RightHandInst->getOperand(1).isReg()) 3006 return false; 3007 3008 // Make sure the types match up, and if we're doing this post-legalization, 3009 // we end up with legal types. 3010 Register X = LeftHandInst->getOperand(1).getReg(); 3011 Register Y = RightHandInst->getOperand(1).getReg(); 3012 LLT XTy = MRI.getType(X); 3013 LLT YTy = MRI.getType(Y); 3014 if (!XTy.isValid() || XTy != YTy) 3015 return false; 3016 3017 // Optional extra source register. 3018 Register ExtraHandOpSrcReg; 3019 switch (HandOpcode) { 3020 default: 3021 return false; 3022 case TargetOpcode::G_ANYEXT: 3023 case TargetOpcode::G_SEXT: 3024 case TargetOpcode::G_ZEXT: { 3025 // Match: logic (ext X), (ext Y) --> ext (logic X, Y) 3026 break; 3027 } 3028 case TargetOpcode::G_AND: 3029 case TargetOpcode::G_ASHR: 3030 case TargetOpcode::G_LSHR: 3031 case TargetOpcode::G_SHL: { 3032 // Match: logic (binop x, z), (binop y, z) -> binop (logic x, y), z 3033 MachineOperand &ZOp = LeftHandInst->getOperand(2); 3034 if (!matchEqualDefs(ZOp, RightHandInst->getOperand(2))) 3035 return false; 3036 ExtraHandOpSrcReg = ZOp.getReg(); 3037 break; 3038 } 3039 } 3040 3041 if (!isLegalOrBeforeLegalizer({LogicOpcode, {XTy, YTy}})) 3042 return false; 3043 3044 // Record the steps to build the new instructions. 3045 // 3046 // Steps to build (logic x, y) 3047 auto NewLogicDst = MRI.createGenericVirtualRegister(XTy); 3048 OperandBuildSteps LogicBuildSteps = { 3049 [=](MachineInstrBuilder &MIB) { MIB.addDef(NewLogicDst); }, 3050 [=](MachineInstrBuilder &MIB) { MIB.addReg(X); }, 3051 [=](MachineInstrBuilder &MIB) { MIB.addReg(Y); }}; 3052 InstructionBuildSteps LogicSteps(LogicOpcode, LogicBuildSteps); 3053 3054 // Steps to build hand (logic x, y), ...z 3055 OperandBuildSteps HandBuildSteps = { 3056 [=](MachineInstrBuilder &MIB) { MIB.addDef(Dst); }, 3057 [=](MachineInstrBuilder &MIB) { MIB.addReg(NewLogicDst); }}; 3058 if (ExtraHandOpSrcReg.isValid()) 3059 HandBuildSteps.push_back( 3060 [=](MachineInstrBuilder &MIB) { MIB.addReg(ExtraHandOpSrcReg); }); 3061 InstructionBuildSteps HandSteps(HandOpcode, HandBuildSteps); 3062 3063 MatchInfo = InstructionStepsMatchInfo({LogicSteps, HandSteps}); 3064 return true; 3065 } 3066 3067 void CombinerHelper::applyBuildInstructionSteps( 3068 MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) { 3069 assert(MatchInfo.InstrsToBuild.size() && 3070 "Expected at least one instr to build?"); 3071 Builder.setInstr(MI); 3072 for (auto &InstrToBuild : MatchInfo.InstrsToBuild) { 3073 assert(InstrToBuild.Opcode && "Expected a valid opcode?"); 3074 assert(InstrToBuild.OperandFns.size() && "Expected at least one operand?"); 3075 MachineInstrBuilder Instr = Builder.buildInstr(InstrToBuild.Opcode); 3076 for (auto &OperandFn : InstrToBuild.OperandFns) 3077 OperandFn(Instr); 3078 } 3079 MI.eraseFromParent(); 3080 } 3081 3082 bool CombinerHelper::matchAshrShlToSextInreg( 3083 MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) { 3084 assert(MI.getOpcode() == TargetOpcode::G_ASHR); 3085 int64_t ShlCst, AshrCst; 3086 Register Src; 3087 if (!mi_match(MI.getOperand(0).getReg(), MRI, 3088 m_GAShr(m_GShl(m_Reg(Src), m_ICstOrSplat(ShlCst)), 3089 m_ICstOrSplat(AshrCst)))) 3090 return false; 3091 if (ShlCst != AshrCst) 3092 return false; 3093 if (!isLegalOrBeforeLegalizer( 3094 {TargetOpcode::G_SEXT_INREG, {MRI.getType(Src)}})) 3095 return false; 3096 MatchInfo = std::make_tuple(Src, ShlCst); 3097 return true; 3098 } 3099 3100 void CombinerHelper::applyAshShlToSextInreg( 3101 MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) { 3102 assert(MI.getOpcode() == TargetOpcode::G_ASHR); 3103 Register Src; 3104 int64_t ShiftAmt; 3105 std::tie(Src, ShiftAmt) = MatchInfo; 3106 unsigned Size = MRI.getType(Src).getScalarSizeInBits(); 3107 Builder.setInstrAndDebugLoc(MI); 3108 Builder.buildSExtInReg(MI.getOperand(0).getReg(), Src, Size - ShiftAmt); 3109 MI.eraseFromParent(); 3110 } 3111 3112 /// and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0 3113 bool CombinerHelper::matchOverlappingAnd( 3114 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 3115 assert(MI.getOpcode() == TargetOpcode::G_AND); 3116 3117 Register Dst = MI.getOperand(0).getReg(); 3118 LLT Ty = MRI.getType(Dst); 3119 3120 Register R; 3121 int64_t C1; 3122 int64_t C2; 3123 if (!mi_match( 3124 Dst, MRI, 3125 m_GAnd(m_GAnd(m_Reg(R), m_ICst(C1)), m_ICst(C2)))) 3126 return false; 3127 3128 MatchInfo = [=](MachineIRBuilder &B) { 3129 if (C1 & C2) { 3130 B.buildAnd(Dst, R, B.buildConstant(Ty, C1 & C2)); 3131 return; 3132 } 3133 auto Zero = B.buildConstant(Ty, 0); 3134 replaceRegWith(MRI, Dst, Zero->getOperand(0).getReg()); 3135 }; 3136 return true; 3137 } 3138 3139 bool CombinerHelper::matchRedundantAnd(MachineInstr &MI, 3140 Register &Replacement) { 3141 // Given 3142 // 3143 // %y:_(sN) = G_SOMETHING 3144 // %x:_(sN) = G_SOMETHING 3145 // %res:_(sN) = G_AND %x, %y 3146 // 3147 // Eliminate the G_AND when it is known that x & y == x or x & y == y. 3148 // 3149 // Patterns like this can appear as a result of legalization. E.g. 3150 // 3151 // %cmp:_(s32) = G_ICMP intpred(pred), %x(s32), %y 3152 // %one:_(s32) = G_CONSTANT i32 1 3153 // %and:_(s32) = G_AND %cmp, %one 3154 // 3155 // In this case, G_ICMP only produces a single bit, so x & 1 == x. 3156 assert(MI.getOpcode() == TargetOpcode::G_AND); 3157 if (!KB) 3158 return false; 3159 3160 Register AndDst = MI.getOperand(0).getReg(); 3161 Register LHS = MI.getOperand(1).getReg(); 3162 Register RHS = MI.getOperand(2).getReg(); 3163 KnownBits LHSBits = KB->getKnownBits(LHS); 3164 KnownBits RHSBits = KB->getKnownBits(RHS); 3165 3166 // Check that x & Mask == x. 3167 // x & 1 == x, always 3168 // x & 0 == x, only if x is also 0 3169 // Meaning Mask has no effect if every bit is either one in Mask or zero in x. 3170 // 3171 // Check if we can replace AndDst with the LHS of the G_AND 3172 if (canReplaceReg(AndDst, LHS, MRI) && 3173 (LHSBits.Zero | RHSBits.One).isAllOnes()) { 3174 Replacement = LHS; 3175 return true; 3176 } 3177 3178 // Check if we can replace AndDst with the RHS of the G_AND 3179 if (canReplaceReg(AndDst, RHS, MRI) && 3180 (LHSBits.One | RHSBits.Zero).isAllOnes()) { 3181 Replacement = RHS; 3182 return true; 3183 } 3184 3185 return false; 3186 } 3187 3188 bool CombinerHelper::matchRedundantOr(MachineInstr &MI, Register &Replacement) { 3189 // Given 3190 // 3191 // %y:_(sN) = G_SOMETHING 3192 // %x:_(sN) = G_SOMETHING 3193 // %res:_(sN) = G_OR %x, %y 3194 // 3195 // Eliminate the G_OR when it is known that x | y == x or x | y == y. 3196 assert(MI.getOpcode() == TargetOpcode::G_OR); 3197 if (!KB) 3198 return false; 3199 3200 Register OrDst = MI.getOperand(0).getReg(); 3201 Register LHS = MI.getOperand(1).getReg(); 3202 Register RHS = MI.getOperand(2).getReg(); 3203 KnownBits LHSBits = KB->getKnownBits(LHS); 3204 KnownBits RHSBits = KB->getKnownBits(RHS); 3205 3206 // Check that x | Mask == x. 3207 // x | 0 == x, always 3208 // x | 1 == x, only if x is also 1 3209 // Meaning Mask has no effect if every bit is either zero in Mask or one in x. 3210 // 3211 // Check if we can replace OrDst with the LHS of the G_OR 3212 if (canReplaceReg(OrDst, LHS, MRI) && 3213 (LHSBits.One | RHSBits.Zero).isAllOnes()) { 3214 Replacement = LHS; 3215 return true; 3216 } 3217 3218 // Check if we can replace OrDst with the RHS of the G_OR 3219 if (canReplaceReg(OrDst, RHS, MRI) && 3220 (LHSBits.Zero | RHSBits.One).isAllOnes()) { 3221 Replacement = RHS; 3222 return true; 3223 } 3224 3225 return false; 3226 } 3227 3228 bool CombinerHelper::matchRedundantSExtInReg(MachineInstr &MI) { 3229 // If the input is already sign extended, just drop the extension. 3230 Register Src = MI.getOperand(1).getReg(); 3231 unsigned ExtBits = MI.getOperand(2).getImm(); 3232 unsigned TypeSize = MRI.getType(Src).getScalarSizeInBits(); 3233 return KB->computeNumSignBits(Src) >= (TypeSize - ExtBits + 1); 3234 } 3235 3236 static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits, 3237 int64_t Cst, bool IsVector, bool IsFP) { 3238 // For i1, Cst will always be -1 regardless of boolean contents. 3239 return (ScalarSizeBits == 1 && Cst == -1) || 3240 isConstTrueVal(TLI, Cst, IsVector, IsFP); 3241 } 3242 3243 bool CombinerHelper::matchNotCmp(MachineInstr &MI, 3244 SmallVectorImpl<Register> &RegsToNegate) { 3245 assert(MI.getOpcode() == TargetOpcode::G_XOR); 3246 LLT Ty = MRI.getType(MI.getOperand(0).getReg()); 3247 const auto &TLI = *Builder.getMF().getSubtarget().getTargetLowering(); 3248 Register XorSrc; 3249 Register CstReg; 3250 // We match xor(src, true) here. 3251 if (!mi_match(MI.getOperand(0).getReg(), MRI, 3252 m_GXor(m_Reg(XorSrc), m_Reg(CstReg)))) 3253 return false; 3254 3255 if (!MRI.hasOneNonDBGUse(XorSrc)) 3256 return false; 3257 3258 // Check that XorSrc is the root of a tree of comparisons combined with ANDs 3259 // and ORs. The suffix of RegsToNegate starting from index I is used a work 3260 // list of tree nodes to visit. 3261 RegsToNegate.push_back(XorSrc); 3262 // Remember whether the comparisons are all integer or all floating point. 3263 bool IsInt = false; 3264 bool IsFP = false; 3265 for (unsigned I = 0; I < RegsToNegate.size(); ++I) { 3266 Register Reg = RegsToNegate[I]; 3267 if (!MRI.hasOneNonDBGUse(Reg)) 3268 return false; 3269 MachineInstr *Def = MRI.getVRegDef(Reg); 3270 switch (Def->getOpcode()) { 3271 default: 3272 // Don't match if the tree contains anything other than ANDs, ORs and 3273 // comparisons. 3274 return false; 3275 case TargetOpcode::G_ICMP: 3276 if (IsFP) 3277 return false; 3278 IsInt = true; 3279 // When we apply the combine we will invert the predicate. 3280 break; 3281 case TargetOpcode::G_FCMP: 3282 if (IsInt) 3283 return false; 3284 IsFP = true; 3285 // When we apply the combine we will invert the predicate. 3286 break; 3287 case TargetOpcode::G_AND: 3288 case TargetOpcode::G_OR: 3289 // Implement De Morgan's laws: 3290 // ~(x & y) -> ~x | ~y 3291 // ~(x | y) -> ~x & ~y 3292 // When we apply the combine we will change the opcode and recursively 3293 // negate the operands. 3294 RegsToNegate.push_back(Def->getOperand(1).getReg()); 3295 RegsToNegate.push_back(Def->getOperand(2).getReg()); 3296 break; 3297 } 3298 } 3299 3300 // Now we know whether the comparisons are integer or floating point, check 3301 // the constant in the xor. 3302 int64_t Cst; 3303 if (Ty.isVector()) { 3304 MachineInstr *CstDef = MRI.getVRegDef(CstReg); 3305 auto MaybeCst = getIConstantSplatSExtVal(*CstDef, MRI); 3306 if (!MaybeCst) 3307 return false; 3308 if (!isConstValidTrue(TLI, Ty.getScalarSizeInBits(), *MaybeCst, true, IsFP)) 3309 return false; 3310 } else { 3311 if (!mi_match(CstReg, MRI, m_ICst(Cst))) 3312 return false; 3313 if (!isConstValidTrue(TLI, Ty.getSizeInBits(), Cst, false, IsFP)) 3314 return false; 3315 } 3316 3317 return true; 3318 } 3319 3320 void CombinerHelper::applyNotCmp(MachineInstr &MI, 3321 SmallVectorImpl<Register> &RegsToNegate) { 3322 for (Register Reg : RegsToNegate) { 3323 MachineInstr *Def = MRI.getVRegDef(Reg); 3324 Observer.changingInstr(*Def); 3325 // For each comparison, invert the opcode. For each AND and OR, change the 3326 // opcode. 3327 switch (Def->getOpcode()) { 3328 default: 3329 llvm_unreachable("Unexpected opcode"); 3330 case TargetOpcode::G_ICMP: 3331 case TargetOpcode::G_FCMP: { 3332 MachineOperand &PredOp = Def->getOperand(1); 3333 CmpInst::Predicate NewP = CmpInst::getInversePredicate( 3334 (CmpInst::Predicate)PredOp.getPredicate()); 3335 PredOp.setPredicate(NewP); 3336 break; 3337 } 3338 case TargetOpcode::G_AND: 3339 Def->setDesc(Builder.getTII().get(TargetOpcode::G_OR)); 3340 break; 3341 case TargetOpcode::G_OR: 3342 Def->setDesc(Builder.getTII().get(TargetOpcode::G_AND)); 3343 break; 3344 } 3345 Observer.changedInstr(*Def); 3346 } 3347 3348 replaceRegWith(MRI, MI.getOperand(0).getReg(), MI.getOperand(1).getReg()); 3349 MI.eraseFromParent(); 3350 } 3351 3352 bool CombinerHelper::matchXorOfAndWithSameReg( 3353 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) { 3354 // Match (xor (and x, y), y) (or any of its commuted cases) 3355 assert(MI.getOpcode() == TargetOpcode::G_XOR); 3356 Register &X = MatchInfo.first; 3357 Register &Y = MatchInfo.second; 3358 Register AndReg = MI.getOperand(1).getReg(); 3359 Register SharedReg = MI.getOperand(2).getReg(); 3360 3361 // Find a G_AND on either side of the G_XOR. 3362 // Look for one of 3363 // 3364 // (xor (and x, y), SharedReg) 3365 // (xor SharedReg, (and x, y)) 3366 if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y)))) { 3367 std::swap(AndReg, SharedReg); 3368 if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y)))) 3369 return false; 3370 } 3371 3372 // Only do this if we'll eliminate the G_AND. 3373 if (!MRI.hasOneNonDBGUse(AndReg)) 3374 return false; 3375 3376 // We can combine if SharedReg is the same as either the LHS or RHS of the 3377 // G_AND. 3378 if (Y != SharedReg) 3379 std::swap(X, Y); 3380 return Y == SharedReg; 3381 } 3382 3383 void CombinerHelper::applyXorOfAndWithSameReg( 3384 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) { 3385 // Fold (xor (and x, y), y) -> (and (not x), y) 3386 Builder.setInstrAndDebugLoc(MI); 3387 Register X, Y; 3388 std::tie(X, Y) = MatchInfo; 3389 auto Not = Builder.buildNot(MRI.getType(X), X); 3390 Observer.changingInstr(MI); 3391 MI.setDesc(Builder.getTII().get(TargetOpcode::G_AND)); 3392 MI.getOperand(1).setReg(Not->getOperand(0).getReg()); 3393 MI.getOperand(2).setReg(Y); 3394 Observer.changedInstr(MI); 3395 } 3396 3397 bool CombinerHelper::matchPtrAddZero(MachineInstr &MI) { 3398 auto &PtrAdd = cast<GPtrAdd>(MI); 3399 Register DstReg = PtrAdd.getReg(0); 3400 LLT Ty = MRI.getType(DstReg); 3401 const DataLayout &DL = Builder.getMF().getDataLayout(); 3402 3403 if (DL.isNonIntegralAddressSpace(Ty.getScalarType().getAddressSpace())) 3404 return false; 3405 3406 if (Ty.isPointer()) { 3407 auto ConstVal = getIConstantVRegVal(PtrAdd.getBaseReg(), MRI); 3408 return ConstVal && *ConstVal == 0; 3409 } 3410 3411 assert(Ty.isVector() && "Expecting a vector type"); 3412 const MachineInstr *VecMI = MRI.getVRegDef(PtrAdd.getBaseReg()); 3413 return isBuildVectorAllZeros(*VecMI, MRI); 3414 } 3415 3416 void CombinerHelper::applyPtrAddZero(MachineInstr &MI) { 3417 auto &PtrAdd = cast<GPtrAdd>(MI); 3418 Builder.setInstrAndDebugLoc(PtrAdd); 3419 Builder.buildIntToPtr(PtrAdd.getReg(0), PtrAdd.getOffsetReg()); 3420 PtrAdd.eraseFromParent(); 3421 } 3422 3423 /// The second source operand is known to be a power of 2. 3424 void CombinerHelper::applySimplifyURemByPow2(MachineInstr &MI) { 3425 Register DstReg = MI.getOperand(0).getReg(); 3426 Register Src0 = MI.getOperand(1).getReg(); 3427 Register Pow2Src1 = MI.getOperand(2).getReg(); 3428 LLT Ty = MRI.getType(DstReg); 3429 Builder.setInstrAndDebugLoc(MI); 3430 3431 // Fold (urem x, pow2) -> (and x, pow2-1) 3432 auto NegOne = Builder.buildConstant(Ty, -1); 3433 auto Add = Builder.buildAdd(Ty, Pow2Src1, NegOne); 3434 Builder.buildAnd(DstReg, Src0, Add); 3435 MI.eraseFromParent(); 3436 } 3437 3438 bool CombinerHelper::matchFoldBinOpIntoSelect(MachineInstr &MI, 3439 unsigned &SelectOpNo) { 3440 Register LHS = MI.getOperand(1).getReg(); 3441 Register RHS = MI.getOperand(2).getReg(); 3442 3443 Register OtherOperandReg = RHS; 3444 SelectOpNo = 1; 3445 MachineInstr *Select = MRI.getVRegDef(LHS); 3446 3447 // Don't do this unless the old select is going away. We want to eliminate the 3448 // binary operator, not replace a binop with a select. 3449 if (Select->getOpcode() != TargetOpcode::G_SELECT || 3450 !MRI.hasOneNonDBGUse(LHS)) { 3451 OtherOperandReg = LHS; 3452 SelectOpNo = 2; 3453 Select = MRI.getVRegDef(RHS); 3454 if (Select->getOpcode() != TargetOpcode::G_SELECT || 3455 !MRI.hasOneNonDBGUse(RHS)) 3456 return false; 3457 } 3458 3459 MachineInstr *SelectLHS = MRI.getVRegDef(Select->getOperand(2).getReg()); 3460 MachineInstr *SelectRHS = MRI.getVRegDef(Select->getOperand(3).getReg()); 3461 3462 if (!isConstantOrConstantVector(*SelectLHS, MRI, 3463 /*AllowFP*/ true, 3464 /*AllowOpaqueConstants*/ false)) 3465 return false; 3466 if (!isConstantOrConstantVector(*SelectRHS, MRI, 3467 /*AllowFP*/ true, 3468 /*AllowOpaqueConstants*/ false)) 3469 return false; 3470 3471 unsigned BinOpcode = MI.getOpcode(); 3472 3473 // We know that one of the operands is a select of constants. Now verify that 3474 // the other binary operator operand is either a constant, or we can handle a 3475 // variable. 3476 bool CanFoldNonConst = 3477 (BinOpcode == TargetOpcode::G_AND || BinOpcode == TargetOpcode::G_OR) && 3478 (isNullOrNullSplat(*SelectLHS, MRI) || 3479 isAllOnesOrAllOnesSplat(*SelectLHS, MRI)) && 3480 (isNullOrNullSplat(*SelectRHS, MRI) || 3481 isAllOnesOrAllOnesSplat(*SelectRHS, MRI)); 3482 if (CanFoldNonConst) 3483 return true; 3484 3485 return isConstantOrConstantVector(*MRI.getVRegDef(OtherOperandReg), MRI, 3486 /*AllowFP*/ true, 3487 /*AllowOpaqueConstants*/ false); 3488 } 3489 3490 /// \p SelectOperand is the operand in binary operator \p MI that is the select 3491 /// to fold. 3492 void CombinerHelper::applyFoldBinOpIntoSelect(MachineInstr &MI, 3493 const unsigned &SelectOperand) { 3494 Builder.setInstrAndDebugLoc(MI); 3495 3496 Register Dst = MI.getOperand(0).getReg(); 3497 Register LHS = MI.getOperand(1).getReg(); 3498 Register RHS = MI.getOperand(2).getReg(); 3499 MachineInstr *Select = MRI.getVRegDef(MI.getOperand(SelectOperand).getReg()); 3500 3501 Register SelectCond = Select->getOperand(1).getReg(); 3502 Register SelectTrue = Select->getOperand(2).getReg(); 3503 Register SelectFalse = Select->getOperand(3).getReg(); 3504 3505 LLT Ty = MRI.getType(Dst); 3506 unsigned BinOpcode = MI.getOpcode(); 3507 3508 Register FoldTrue, FoldFalse; 3509 3510 // We have a select-of-constants followed by a binary operator with a 3511 // constant. Eliminate the binop by pulling the constant math into the select. 3512 // Example: add (select Cond, CT, CF), CBO --> select Cond, CT + CBO, CF + CBO 3513 if (SelectOperand == 1) { 3514 // TODO: SelectionDAG verifies this actually constant folds before 3515 // committing to the combine. 3516 3517 FoldTrue = Builder.buildInstr(BinOpcode, {Ty}, {SelectTrue, RHS}).getReg(0); 3518 FoldFalse = 3519 Builder.buildInstr(BinOpcode, {Ty}, {SelectFalse, RHS}).getReg(0); 3520 } else { 3521 FoldTrue = Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectTrue}).getReg(0); 3522 FoldFalse = 3523 Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectFalse}).getReg(0); 3524 } 3525 3526 Builder.buildSelect(Dst, SelectCond, FoldTrue, FoldFalse, MI.getFlags()); 3527 MI.eraseFromParent(); 3528 } 3529 3530 std::optional<SmallVector<Register, 8>> 3531 CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr *Root) const { 3532 assert(Root->getOpcode() == TargetOpcode::G_OR && "Expected G_OR only!"); 3533 // We want to detect if Root is part of a tree which represents a bunch 3534 // of loads being merged into a larger load. We'll try to recognize patterns 3535 // like, for example: 3536 // 3537 // Reg Reg 3538 // \ / 3539 // OR_1 Reg 3540 // \ / 3541 // OR_2 3542 // \ Reg 3543 // .. / 3544 // Root 3545 // 3546 // Reg Reg Reg Reg 3547 // \ / \ / 3548 // OR_1 OR_2 3549 // \ / 3550 // \ / 3551 // ... 3552 // Root 3553 // 3554 // Each "Reg" may have been produced by a load + some arithmetic. This 3555 // function will save each of them. 3556 SmallVector<Register, 8> RegsToVisit; 3557 SmallVector<const MachineInstr *, 7> Ors = {Root}; 3558 3559 // In the "worst" case, we're dealing with a load for each byte. So, there 3560 // are at most #bytes - 1 ORs. 3561 const unsigned MaxIter = 3562 MRI.getType(Root->getOperand(0).getReg()).getSizeInBytes() - 1; 3563 for (unsigned Iter = 0; Iter < MaxIter; ++Iter) { 3564 if (Ors.empty()) 3565 break; 3566 const MachineInstr *Curr = Ors.pop_back_val(); 3567 Register OrLHS = Curr->getOperand(1).getReg(); 3568 Register OrRHS = Curr->getOperand(2).getReg(); 3569 3570 // In the combine, we want to elimate the entire tree. 3571 if (!MRI.hasOneNonDBGUse(OrLHS) || !MRI.hasOneNonDBGUse(OrRHS)) 3572 return std::nullopt; 3573 3574 // If it's a G_OR, save it and continue to walk. If it's not, then it's 3575 // something that may be a load + arithmetic. 3576 if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrLHS, MRI)) 3577 Ors.push_back(Or); 3578 else 3579 RegsToVisit.push_back(OrLHS); 3580 if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrRHS, MRI)) 3581 Ors.push_back(Or); 3582 else 3583 RegsToVisit.push_back(OrRHS); 3584 } 3585 3586 // We're going to try and merge each register into a wider power-of-2 type, 3587 // so we ought to have an even number of registers. 3588 if (RegsToVisit.empty() || RegsToVisit.size() % 2 != 0) 3589 return std::nullopt; 3590 return RegsToVisit; 3591 } 3592 3593 /// Helper function for findLoadOffsetsForLoadOrCombine. 3594 /// 3595 /// Check if \p Reg is the result of loading a \p MemSizeInBits wide value, 3596 /// and then moving that value into a specific byte offset. 3597 /// 3598 /// e.g. x[i] << 24 3599 /// 3600 /// \returns The load instruction and the byte offset it is moved into. 3601 static std::optional<std::pair<GZExtLoad *, int64_t>> 3602 matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits, 3603 const MachineRegisterInfo &MRI) { 3604 assert(MRI.hasOneNonDBGUse(Reg) && 3605 "Expected Reg to only have one non-debug use?"); 3606 Register MaybeLoad; 3607 int64_t Shift; 3608 if (!mi_match(Reg, MRI, 3609 m_OneNonDBGUse(m_GShl(m_Reg(MaybeLoad), m_ICst(Shift))))) { 3610 Shift = 0; 3611 MaybeLoad = Reg; 3612 } 3613 3614 if (Shift % MemSizeInBits != 0) 3615 return std::nullopt; 3616 3617 // TODO: Handle other types of loads. 3618 auto *Load = getOpcodeDef<GZExtLoad>(MaybeLoad, MRI); 3619 if (!Load) 3620 return std::nullopt; 3621 3622 if (!Load->isUnordered() || Load->getMemSizeInBits() != MemSizeInBits) 3623 return std::nullopt; 3624 3625 return std::make_pair(Load, Shift / MemSizeInBits); 3626 } 3627 3628 std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>> 3629 CombinerHelper::findLoadOffsetsForLoadOrCombine( 3630 SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx, 3631 const SmallVector<Register, 8> &RegsToVisit, const unsigned MemSizeInBits) { 3632 3633 // Each load found for the pattern. There should be one for each RegsToVisit. 3634 SmallSetVector<const MachineInstr *, 8> Loads; 3635 3636 // The lowest index used in any load. (The lowest "i" for each x[i].) 3637 int64_t LowestIdx = INT64_MAX; 3638 3639 // The load which uses the lowest index. 3640 GZExtLoad *LowestIdxLoad = nullptr; 3641 3642 // Keeps track of the load indices we see. We shouldn't see any indices twice. 3643 SmallSet<int64_t, 8> SeenIdx; 3644 3645 // Ensure each load is in the same MBB. 3646 // TODO: Support multiple MachineBasicBlocks. 3647 MachineBasicBlock *MBB = nullptr; 3648 const MachineMemOperand *MMO = nullptr; 3649 3650 // Earliest instruction-order load in the pattern. 3651 GZExtLoad *EarliestLoad = nullptr; 3652 3653 // Latest instruction-order load in the pattern. 3654 GZExtLoad *LatestLoad = nullptr; 3655 3656 // Base pointer which every load should share. 3657 Register BasePtr; 3658 3659 // We want to find a load for each register. Each load should have some 3660 // appropriate bit twiddling arithmetic. During this loop, we will also keep 3661 // track of the load which uses the lowest index. Later, we will check if we 3662 // can use its pointer in the final, combined load. 3663 for (auto Reg : RegsToVisit) { 3664 // Find the load, and find the position that it will end up in (e.g. a 3665 // shifted) value. 3666 auto LoadAndPos = matchLoadAndBytePosition(Reg, MemSizeInBits, MRI); 3667 if (!LoadAndPos) 3668 return std::nullopt; 3669 GZExtLoad *Load; 3670 int64_t DstPos; 3671 std::tie(Load, DstPos) = *LoadAndPos; 3672 3673 // TODO: Handle multiple MachineBasicBlocks. Currently not handled because 3674 // it is difficult to check for stores/calls/etc between loads. 3675 MachineBasicBlock *LoadMBB = Load->getParent(); 3676 if (!MBB) 3677 MBB = LoadMBB; 3678 if (LoadMBB != MBB) 3679 return std::nullopt; 3680 3681 // Make sure that the MachineMemOperands of every seen load are compatible. 3682 auto &LoadMMO = Load->getMMO(); 3683 if (!MMO) 3684 MMO = &LoadMMO; 3685 if (MMO->getAddrSpace() != LoadMMO.getAddrSpace()) 3686 return std::nullopt; 3687 3688 // Find out what the base pointer and index for the load is. 3689 Register LoadPtr; 3690 int64_t Idx; 3691 if (!mi_match(Load->getOperand(1).getReg(), MRI, 3692 m_GPtrAdd(m_Reg(LoadPtr), m_ICst(Idx)))) { 3693 LoadPtr = Load->getOperand(1).getReg(); 3694 Idx = 0; 3695 } 3696 3697 // Don't combine things like a[i], a[i] -> a bigger load. 3698 if (!SeenIdx.insert(Idx).second) 3699 return std::nullopt; 3700 3701 // Every load must share the same base pointer; don't combine things like: 3702 // 3703 // a[i], b[i + 1] -> a bigger load. 3704 if (!BasePtr.isValid()) 3705 BasePtr = LoadPtr; 3706 if (BasePtr != LoadPtr) 3707 return std::nullopt; 3708 3709 if (Idx < LowestIdx) { 3710 LowestIdx = Idx; 3711 LowestIdxLoad = Load; 3712 } 3713 3714 // Keep track of the byte offset that this load ends up at. If we have seen 3715 // the byte offset, then stop here. We do not want to combine: 3716 // 3717 // a[i] << 16, a[i + k] << 16 -> a bigger load. 3718 if (!MemOffset2Idx.try_emplace(DstPos, Idx).second) 3719 return std::nullopt; 3720 Loads.insert(Load); 3721 3722 // Keep track of the position of the earliest/latest loads in the pattern. 3723 // We will check that there are no load fold barriers between them later 3724 // on. 3725 // 3726 // FIXME: Is there a better way to check for load fold barriers? 3727 if (!EarliestLoad || dominates(*Load, *EarliestLoad)) 3728 EarliestLoad = Load; 3729 if (!LatestLoad || dominates(*LatestLoad, *Load)) 3730 LatestLoad = Load; 3731 } 3732 3733 // We found a load for each register. Let's check if each load satisfies the 3734 // pattern. 3735 assert(Loads.size() == RegsToVisit.size() && 3736 "Expected to find a load for each register?"); 3737 assert(EarliestLoad != LatestLoad && EarliestLoad && 3738 LatestLoad && "Expected at least two loads?"); 3739 3740 // Check if there are any stores, calls, etc. between any of the loads. If 3741 // there are, then we can't safely perform the combine. 3742 // 3743 // MaxIter is chosen based off the (worst case) number of iterations it 3744 // typically takes to succeed in the LLVM test suite plus some padding. 3745 // 3746 // FIXME: Is there a better way to check for load fold barriers? 3747 const unsigned MaxIter = 20; 3748 unsigned Iter = 0; 3749 for (const auto &MI : instructionsWithoutDebug(EarliestLoad->getIterator(), 3750 LatestLoad->getIterator())) { 3751 if (Loads.count(&MI)) 3752 continue; 3753 if (MI.isLoadFoldBarrier()) 3754 return std::nullopt; 3755 if (Iter++ == MaxIter) 3756 return std::nullopt; 3757 } 3758 3759 return std::make_tuple(LowestIdxLoad, LowestIdx, LatestLoad); 3760 } 3761 3762 bool CombinerHelper::matchLoadOrCombine( 3763 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 3764 assert(MI.getOpcode() == TargetOpcode::G_OR); 3765 MachineFunction &MF = *MI.getMF(); 3766 // Assuming a little-endian target, transform: 3767 // s8 *a = ... 3768 // s32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24) 3769 // => 3770 // s32 val = *((i32)a) 3771 // 3772 // s8 *a = ... 3773 // s32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3] 3774 // => 3775 // s32 val = BSWAP(*((s32)a)) 3776 Register Dst = MI.getOperand(0).getReg(); 3777 LLT Ty = MRI.getType(Dst); 3778 if (Ty.isVector()) 3779 return false; 3780 3781 // We need to combine at least two loads into this type. Since the smallest 3782 // possible load is into a byte, we need at least a 16-bit wide type. 3783 const unsigned WideMemSizeInBits = Ty.getSizeInBits(); 3784 if (WideMemSizeInBits < 16 || WideMemSizeInBits % 8 != 0) 3785 return false; 3786 3787 // Match a collection of non-OR instructions in the pattern. 3788 auto RegsToVisit = findCandidatesForLoadOrCombine(&MI); 3789 if (!RegsToVisit) 3790 return false; 3791 3792 // We have a collection of non-OR instructions. Figure out how wide each of 3793 // the small loads should be based off of the number of potential loads we 3794 // found. 3795 const unsigned NarrowMemSizeInBits = WideMemSizeInBits / RegsToVisit->size(); 3796 if (NarrowMemSizeInBits % 8 != 0) 3797 return false; 3798 3799 // Check if each register feeding into each OR is a load from the same 3800 // base pointer + some arithmetic. 3801 // 3802 // e.g. a[0], a[1] << 8, a[2] << 16, etc. 3803 // 3804 // Also verify that each of these ends up putting a[i] into the same memory 3805 // offset as a load into a wide type would. 3806 SmallDenseMap<int64_t, int64_t, 8> MemOffset2Idx; 3807 GZExtLoad *LowestIdxLoad, *LatestLoad; 3808 int64_t LowestIdx; 3809 auto MaybeLoadInfo = findLoadOffsetsForLoadOrCombine( 3810 MemOffset2Idx, *RegsToVisit, NarrowMemSizeInBits); 3811 if (!MaybeLoadInfo) 3812 return false; 3813 std::tie(LowestIdxLoad, LowestIdx, LatestLoad) = *MaybeLoadInfo; 3814 3815 // We have a bunch of loads being OR'd together. Using the addresses + offsets 3816 // we found before, check if this corresponds to a big or little endian byte 3817 // pattern. If it does, then we can represent it using a load + possibly a 3818 // BSWAP. 3819 bool IsBigEndianTarget = MF.getDataLayout().isBigEndian(); 3820 std::optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx); 3821 if (!IsBigEndian) 3822 return false; 3823 bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian; 3824 if (NeedsBSwap && !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {Ty}})) 3825 return false; 3826 3827 // Make sure that the load from the lowest index produces offset 0 in the 3828 // final value. 3829 // 3830 // This ensures that we won't combine something like this: 3831 // 3832 // load x[i] -> byte 2 3833 // load x[i+1] -> byte 0 ---> wide_load x[i] 3834 // load x[i+2] -> byte 1 3835 const unsigned NumLoadsInTy = WideMemSizeInBits / NarrowMemSizeInBits; 3836 const unsigned ZeroByteOffset = 3837 *IsBigEndian 3838 ? bigEndianByteAt(NumLoadsInTy, 0) 3839 : littleEndianByteAt(NumLoadsInTy, 0); 3840 auto ZeroOffsetIdx = MemOffset2Idx.find(ZeroByteOffset); 3841 if (ZeroOffsetIdx == MemOffset2Idx.end() || 3842 ZeroOffsetIdx->second != LowestIdx) 3843 return false; 3844 3845 // We wil reuse the pointer from the load which ends up at byte offset 0. It 3846 // may not use index 0. 3847 Register Ptr = LowestIdxLoad->getPointerReg(); 3848 const MachineMemOperand &MMO = LowestIdxLoad->getMMO(); 3849 LegalityQuery::MemDesc MMDesc(MMO); 3850 MMDesc.MemoryTy = Ty; 3851 if (!isLegalOrBeforeLegalizer( 3852 {TargetOpcode::G_LOAD, {Ty, MRI.getType(Ptr)}, {MMDesc}})) 3853 return false; 3854 auto PtrInfo = MMO.getPointerInfo(); 3855 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, WideMemSizeInBits / 8); 3856 3857 // Load must be allowed and fast on the target. 3858 LLVMContext &C = MF.getFunction().getContext(); 3859 auto &DL = MF.getDataLayout(); 3860 unsigned Fast = 0; 3861 if (!getTargetLowering().allowsMemoryAccess(C, DL, Ty, *NewMMO, &Fast) || 3862 !Fast) 3863 return false; 3864 3865 MatchInfo = [=](MachineIRBuilder &MIB) { 3866 MIB.setInstrAndDebugLoc(*LatestLoad); 3867 Register LoadDst = NeedsBSwap ? MRI.cloneVirtualRegister(Dst) : Dst; 3868 MIB.buildLoad(LoadDst, Ptr, *NewMMO); 3869 if (NeedsBSwap) 3870 MIB.buildBSwap(Dst, LoadDst); 3871 }; 3872 return true; 3873 } 3874 3875 bool CombinerHelper::matchExtendThroughPhis(MachineInstr &MI, 3876 MachineInstr *&ExtMI) { 3877 assert(MI.getOpcode() == TargetOpcode::G_PHI); 3878 3879 Register DstReg = MI.getOperand(0).getReg(); 3880 3881 // TODO: Extending a vector may be expensive, don't do this until heuristics 3882 // are better. 3883 if (MRI.getType(DstReg).isVector()) 3884 return false; 3885 3886 // Try to match a phi, whose only use is an extend. 3887 if (!MRI.hasOneNonDBGUse(DstReg)) 3888 return false; 3889 ExtMI = &*MRI.use_instr_nodbg_begin(DstReg); 3890 switch (ExtMI->getOpcode()) { 3891 case TargetOpcode::G_ANYEXT: 3892 return true; // G_ANYEXT is usually free. 3893 case TargetOpcode::G_ZEXT: 3894 case TargetOpcode::G_SEXT: 3895 break; 3896 default: 3897 return false; 3898 } 3899 3900 // If the target is likely to fold this extend away, don't propagate. 3901 if (Builder.getTII().isExtendLikelyToBeFolded(*ExtMI, MRI)) 3902 return false; 3903 3904 // We don't want to propagate the extends unless there's a good chance that 3905 // they'll be optimized in some way. 3906 // Collect the unique incoming values. 3907 SmallPtrSet<MachineInstr *, 4> InSrcs; 3908 for (unsigned Idx = 1; Idx < MI.getNumOperands(); Idx += 2) { 3909 auto *DefMI = getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI); 3910 switch (DefMI->getOpcode()) { 3911 case TargetOpcode::G_LOAD: 3912 case TargetOpcode::G_TRUNC: 3913 case TargetOpcode::G_SEXT: 3914 case TargetOpcode::G_ZEXT: 3915 case TargetOpcode::G_ANYEXT: 3916 case TargetOpcode::G_CONSTANT: 3917 InSrcs.insert(getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI)); 3918 // Don't try to propagate if there are too many places to create new 3919 // extends, chances are it'll increase code size. 3920 if (InSrcs.size() > 2) 3921 return false; 3922 break; 3923 default: 3924 return false; 3925 } 3926 } 3927 return true; 3928 } 3929 3930 void CombinerHelper::applyExtendThroughPhis(MachineInstr &MI, 3931 MachineInstr *&ExtMI) { 3932 assert(MI.getOpcode() == TargetOpcode::G_PHI); 3933 Register DstReg = ExtMI->getOperand(0).getReg(); 3934 LLT ExtTy = MRI.getType(DstReg); 3935 3936 // Propagate the extension into the block of each incoming reg's block. 3937 // Use a SetVector here because PHIs can have duplicate edges, and we want 3938 // deterministic iteration order. 3939 SmallSetVector<MachineInstr *, 8> SrcMIs; 3940 SmallDenseMap<MachineInstr *, MachineInstr *, 8> OldToNewSrcMap; 3941 for (unsigned SrcIdx = 1; SrcIdx < MI.getNumOperands(); SrcIdx += 2) { 3942 auto *SrcMI = MRI.getVRegDef(MI.getOperand(SrcIdx).getReg()); 3943 if (!SrcMIs.insert(SrcMI)) 3944 continue; 3945 3946 // Build an extend after each src inst. 3947 auto *MBB = SrcMI->getParent(); 3948 MachineBasicBlock::iterator InsertPt = ++SrcMI->getIterator(); 3949 if (InsertPt != MBB->end() && InsertPt->isPHI()) 3950 InsertPt = MBB->getFirstNonPHI(); 3951 3952 Builder.setInsertPt(*SrcMI->getParent(), InsertPt); 3953 Builder.setDebugLoc(MI.getDebugLoc()); 3954 auto NewExt = Builder.buildExtOrTrunc(ExtMI->getOpcode(), ExtTy, 3955 SrcMI->getOperand(0).getReg()); 3956 OldToNewSrcMap[SrcMI] = NewExt; 3957 } 3958 3959 // Create a new phi with the extended inputs. 3960 Builder.setInstrAndDebugLoc(MI); 3961 auto NewPhi = Builder.buildInstrNoInsert(TargetOpcode::G_PHI); 3962 NewPhi.addDef(DstReg); 3963 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) { 3964 if (!MO.isReg()) { 3965 NewPhi.addMBB(MO.getMBB()); 3966 continue; 3967 } 3968 auto *NewSrc = OldToNewSrcMap[MRI.getVRegDef(MO.getReg())]; 3969 NewPhi.addUse(NewSrc->getOperand(0).getReg()); 3970 } 3971 Builder.insertInstr(NewPhi); 3972 ExtMI->eraseFromParent(); 3973 } 3974 3975 bool CombinerHelper::matchExtractVecEltBuildVec(MachineInstr &MI, 3976 Register &Reg) { 3977 assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT); 3978 // If we have a constant index, look for a G_BUILD_VECTOR source 3979 // and find the source register that the index maps to. 3980 Register SrcVec = MI.getOperand(1).getReg(); 3981 LLT SrcTy = MRI.getType(SrcVec); 3982 3983 auto Cst = getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); 3984 if (!Cst || Cst->Value.getZExtValue() >= SrcTy.getNumElements()) 3985 return false; 3986 3987 unsigned VecIdx = Cst->Value.getZExtValue(); 3988 3989 // Check if we have a build_vector or build_vector_trunc with an optional 3990 // trunc in front. 3991 MachineInstr *SrcVecMI = MRI.getVRegDef(SrcVec); 3992 if (SrcVecMI->getOpcode() == TargetOpcode::G_TRUNC) { 3993 SrcVecMI = MRI.getVRegDef(SrcVecMI->getOperand(1).getReg()); 3994 } 3995 3996 if (SrcVecMI->getOpcode() != TargetOpcode::G_BUILD_VECTOR && 3997 SrcVecMI->getOpcode() != TargetOpcode::G_BUILD_VECTOR_TRUNC) 3998 return false; 3999 4000 EVT Ty(getMVTForLLT(SrcTy)); 4001 if (!MRI.hasOneNonDBGUse(SrcVec) && 4002 !getTargetLowering().aggressivelyPreferBuildVectorSources(Ty)) 4003 return false; 4004 4005 Reg = SrcVecMI->getOperand(VecIdx + 1).getReg(); 4006 return true; 4007 } 4008 4009 void CombinerHelper::applyExtractVecEltBuildVec(MachineInstr &MI, 4010 Register &Reg) { 4011 // Check the type of the register, since it may have come from a 4012 // G_BUILD_VECTOR_TRUNC. 4013 LLT ScalarTy = MRI.getType(Reg); 4014 Register DstReg = MI.getOperand(0).getReg(); 4015 LLT DstTy = MRI.getType(DstReg); 4016 4017 Builder.setInstrAndDebugLoc(MI); 4018 if (ScalarTy != DstTy) { 4019 assert(ScalarTy.getSizeInBits() > DstTy.getSizeInBits()); 4020 Builder.buildTrunc(DstReg, Reg); 4021 MI.eraseFromParent(); 4022 return; 4023 } 4024 replaceSingleDefInstWithReg(MI, Reg); 4025 } 4026 4027 bool CombinerHelper::matchExtractAllEltsFromBuildVector( 4028 MachineInstr &MI, 4029 SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) { 4030 assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR); 4031 // This combine tries to find build_vector's which have every source element 4032 // extracted using G_EXTRACT_VECTOR_ELT. This can happen when transforms like 4033 // the masked load scalarization is run late in the pipeline. There's already 4034 // a combine for a similar pattern starting from the extract, but that 4035 // doesn't attempt to do it if there are multiple uses of the build_vector, 4036 // which in this case is true. Starting the combine from the build_vector 4037 // feels more natural than trying to find sibling nodes of extracts. 4038 // E.g. 4039 // %vec(<4 x s32>) = G_BUILD_VECTOR %s1(s32), %s2, %s3, %s4 4040 // %ext1 = G_EXTRACT_VECTOR_ELT %vec, 0 4041 // %ext2 = G_EXTRACT_VECTOR_ELT %vec, 1 4042 // %ext3 = G_EXTRACT_VECTOR_ELT %vec, 2 4043 // %ext4 = G_EXTRACT_VECTOR_ELT %vec, 3 4044 // ==> 4045 // replace ext{1,2,3,4} with %s{1,2,3,4} 4046 4047 Register DstReg = MI.getOperand(0).getReg(); 4048 LLT DstTy = MRI.getType(DstReg); 4049 unsigned NumElts = DstTy.getNumElements(); 4050 4051 SmallBitVector ExtractedElts(NumElts); 4052 for (MachineInstr &II : MRI.use_nodbg_instructions(DstReg)) { 4053 if (II.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT) 4054 return false; 4055 auto Cst = getIConstantVRegVal(II.getOperand(2).getReg(), MRI); 4056 if (!Cst) 4057 return false; 4058 unsigned Idx = Cst->getZExtValue(); 4059 if (Idx >= NumElts) 4060 return false; // Out of range. 4061 ExtractedElts.set(Idx); 4062 SrcDstPairs.emplace_back( 4063 std::make_pair(MI.getOperand(Idx + 1).getReg(), &II)); 4064 } 4065 // Match if every element was extracted. 4066 return ExtractedElts.all(); 4067 } 4068 4069 void CombinerHelper::applyExtractAllEltsFromBuildVector( 4070 MachineInstr &MI, 4071 SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) { 4072 assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR); 4073 for (auto &Pair : SrcDstPairs) { 4074 auto *ExtMI = Pair.second; 4075 replaceRegWith(MRI, ExtMI->getOperand(0).getReg(), Pair.first); 4076 ExtMI->eraseFromParent(); 4077 } 4078 MI.eraseFromParent(); 4079 } 4080 4081 void CombinerHelper::applyBuildFn( 4082 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 4083 Builder.setInstrAndDebugLoc(MI); 4084 MatchInfo(Builder); 4085 MI.eraseFromParent(); 4086 } 4087 4088 void CombinerHelper::applyBuildFnNoErase( 4089 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 4090 Builder.setInstrAndDebugLoc(MI); 4091 MatchInfo(Builder); 4092 } 4093 4094 bool CombinerHelper::matchOrShiftToFunnelShift(MachineInstr &MI, 4095 BuildFnTy &MatchInfo) { 4096 assert(MI.getOpcode() == TargetOpcode::G_OR); 4097 4098 Register Dst = MI.getOperand(0).getReg(); 4099 LLT Ty = MRI.getType(Dst); 4100 unsigned BitWidth = Ty.getScalarSizeInBits(); 4101 4102 Register ShlSrc, ShlAmt, LShrSrc, LShrAmt, Amt; 4103 unsigned FshOpc = 0; 4104 4105 // Match (or (shl ...), (lshr ...)). 4106 if (!mi_match(Dst, MRI, 4107 // m_GOr() handles the commuted version as well. 4108 m_GOr(m_GShl(m_Reg(ShlSrc), m_Reg(ShlAmt)), 4109 m_GLShr(m_Reg(LShrSrc), m_Reg(LShrAmt))))) 4110 return false; 4111 4112 // Given constants C0 and C1 such that C0 + C1 is bit-width: 4113 // (or (shl x, C0), (lshr y, C1)) -> (fshl x, y, C0) or (fshr x, y, C1) 4114 int64_t CstShlAmt, CstLShrAmt; 4115 if (mi_match(ShlAmt, MRI, m_ICstOrSplat(CstShlAmt)) && 4116 mi_match(LShrAmt, MRI, m_ICstOrSplat(CstLShrAmt)) && 4117 CstShlAmt + CstLShrAmt == BitWidth) { 4118 FshOpc = TargetOpcode::G_FSHR; 4119 Amt = LShrAmt; 4120 4121 } else if (mi_match(LShrAmt, MRI, 4122 m_GSub(m_SpecificICstOrSplat(BitWidth), m_Reg(Amt))) && 4123 ShlAmt == Amt) { 4124 // (or (shl x, amt), (lshr y, (sub bw, amt))) -> (fshl x, y, amt) 4125 FshOpc = TargetOpcode::G_FSHL; 4126 4127 } else if (mi_match(ShlAmt, MRI, 4128 m_GSub(m_SpecificICstOrSplat(BitWidth), m_Reg(Amt))) && 4129 LShrAmt == Amt) { 4130 // (or (shl x, (sub bw, amt)), (lshr y, amt)) -> (fshr x, y, amt) 4131 FshOpc = TargetOpcode::G_FSHR; 4132 4133 } else { 4134 return false; 4135 } 4136 4137 LLT AmtTy = MRI.getType(Amt); 4138 if (!isLegalOrBeforeLegalizer({FshOpc, {Ty, AmtTy}})) 4139 return false; 4140 4141 MatchInfo = [=](MachineIRBuilder &B) { 4142 B.buildInstr(FshOpc, {Dst}, {ShlSrc, LShrSrc, Amt}); 4143 }; 4144 return true; 4145 } 4146 4147 /// Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate. 4148 bool CombinerHelper::matchFunnelShiftToRotate(MachineInstr &MI) { 4149 unsigned Opc = MI.getOpcode(); 4150 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR); 4151 Register X = MI.getOperand(1).getReg(); 4152 Register Y = MI.getOperand(2).getReg(); 4153 if (X != Y) 4154 return false; 4155 unsigned RotateOpc = 4156 Opc == TargetOpcode::G_FSHL ? TargetOpcode::G_ROTL : TargetOpcode::G_ROTR; 4157 return isLegalOrBeforeLegalizer({RotateOpc, {MRI.getType(X), MRI.getType(Y)}}); 4158 } 4159 4160 void CombinerHelper::applyFunnelShiftToRotate(MachineInstr &MI) { 4161 unsigned Opc = MI.getOpcode(); 4162 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR); 4163 bool IsFSHL = Opc == TargetOpcode::G_FSHL; 4164 Observer.changingInstr(MI); 4165 MI.setDesc(Builder.getTII().get(IsFSHL ? TargetOpcode::G_ROTL 4166 : TargetOpcode::G_ROTR)); 4167 MI.removeOperand(2); 4168 Observer.changedInstr(MI); 4169 } 4170 4171 // Fold (rot x, c) -> (rot x, c % BitSize) 4172 bool CombinerHelper::matchRotateOutOfRange(MachineInstr &MI) { 4173 assert(MI.getOpcode() == TargetOpcode::G_ROTL || 4174 MI.getOpcode() == TargetOpcode::G_ROTR); 4175 unsigned Bitsize = 4176 MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits(); 4177 Register AmtReg = MI.getOperand(2).getReg(); 4178 bool OutOfRange = false; 4179 auto MatchOutOfRange = [Bitsize, &OutOfRange](const Constant *C) { 4180 if (auto *CI = dyn_cast<ConstantInt>(C)) 4181 OutOfRange |= CI->getValue().uge(Bitsize); 4182 return true; 4183 }; 4184 return matchUnaryPredicate(MRI, AmtReg, MatchOutOfRange) && OutOfRange; 4185 } 4186 4187 void CombinerHelper::applyRotateOutOfRange(MachineInstr &MI) { 4188 assert(MI.getOpcode() == TargetOpcode::G_ROTL || 4189 MI.getOpcode() == TargetOpcode::G_ROTR); 4190 unsigned Bitsize = 4191 MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits(); 4192 Builder.setInstrAndDebugLoc(MI); 4193 Register Amt = MI.getOperand(2).getReg(); 4194 LLT AmtTy = MRI.getType(Amt); 4195 auto Bits = Builder.buildConstant(AmtTy, Bitsize); 4196 Amt = Builder.buildURem(AmtTy, MI.getOperand(2).getReg(), Bits).getReg(0); 4197 Observer.changingInstr(MI); 4198 MI.getOperand(2).setReg(Amt); 4199 Observer.changedInstr(MI); 4200 } 4201 4202 bool CombinerHelper::matchICmpToTrueFalseKnownBits(MachineInstr &MI, 4203 int64_t &MatchInfo) { 4204 assert(MI.getOpcode() == TargetOpcode::G_ICMP); 4205 auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()); 4206 auto KnownLHS = KB->getKnownBits(MI.getOperand(2).getReg()); 4207 auto KnownRHS = KB->getKnownBits(MI.getOperand(3).getReg()); 4208 std::optional<bool> KnownVal; 4209 switch (Pred) { 4210 default: 4211 llvm_unreachable("Unexpected G_ICMP predicate?"); 4212 case CmpInst::ICMP_EQ: 4213 KnownVal = KnownBits::eq(KnownLHS, KnownRHS); 4214 break; 4215 case CmpInst::ICMP_NE: 4216 KnownVal = KnownBits::ne(KnownLHS, KnownRHS); 4217 break; 4218 case CmpInst::ICMP_SGE: 4219 KnownVal = KnownBits::sge(KnownLHS, KnownRHS); 4220 break; 4221 case CmpInst::ICMP_SGT: 4222 KnownVal = KnownBits::sgt(KnownLHS, KnownRHS); 4223 break; 4224 case CmpInst::ICMP_SLE: 4225 KnownVal = KnownBits::sle(KnownLHS, KnownRHS); 4226 break; 4227 case CmpInst::ICMP_SLT: 4228 KnownVal = KnownBits::slt(KnownLHS, KnownRHS); 4229 break; 4230 case CmpInst::ICMP_UGE: 4231 KnownVal = KnownBits::uge(KnownLHS, KnownRHS); 4232 break; 4233 case CmpInst::ICMP_UGT: 4234 KnownVal = KnownBits::ugt(KnownLHS, KnownRHS); 4235 break; 4236 case CmpInst::ICMP_ULE: 4237 KnownVal = KnownBits::ule(KnownLHS, KnownRHS); 4238 break; 4239 case CmpInst::ICMP_ULT: 4240 KnownVal = KnownBits::ult(KnownLHS, KnownRHS); 4241 break; 4242 } 4243 if (!KnownVal) 4244 return false; 4245 MatchInfo = 4246 *KnownVal 4247 ? getICmpTrueVal(getTargetLowering(), 4248 /*IsVector = */ 4249 MRI.getType(MI.getOperand(0).getReg()).isVector(), 4250 /* IsFP = */ false) 4251 : 0; 4252 return true; 4253 } 4254 4255 bool CombinerHelper::matchICmpToLHSKnownBits( 4256 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 4257 assert(MI.getOpcode() == TargetOpcode::G_ICMP); 4258 // Given: 4259 // 4260 // %x = G_WHATEVER (... x is known to be 0 or 1 ...) 4261 // %cmp = G_ICMP ne %x, 0 4262 // 4263 // Or: 4264 // 4265 // %x = G_WHATEVER (... x is known to be 0 or 1 ...) 4266 // %cmp = G_ICMP eq %x, 1 4267 // 4268 // We can replace %cmp with %x assuming true is 1 on the target. 4269 auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()); 4270 if (!CmpInst::isEquality(Pred)) 4271 return false; 4272 Register Dst = MI.getOperand(0).getReg(); 4273 LLT DstTy = MRI.getType(Dst); 4274 if (getICmpTrueVal(getTargetLowering(), DstTy.isVector(), 4275 /* IsFP = */ false) != 1) 4276 return false; 4277 int64_t OneOrZero = Pred == CmpInst::ICMP_EQ; 4278 if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICst(OneOrZero))) 4279 return false; 4280 Register LHS = MI.getOperand(2).getReg(); 4281 auto KnownLHS = KB->getKnownBits(LHS); 4282 if (KnownLHS.getMinValue() != 0 || KnownLHS.getMaxValue() != 1) 4283 return false; 4284 // Make sure replacing Dst with the LHS is a legal operation. 4285 LLT LHSTy = MRI.getType(LHS); 4286 unsigned LHSSize = LHSTy.getSizeInBits(); 4287 unsigned DstSize = DstTy.getSizeInBits(); 4288 unsigned Op = TargetOpcode::COPY; 4289 if (DstSize != LHSSize) 4290 Op = DstSize < LHSSize ? TargetOpcode::G_TRUNC : TargetOpcode::G_ZEXT; 4291 if (!isLegalOrBeforeLegalizer({Op, {DstTy, LHSTy}})) 4292 return false; 4293 MatchInfo = [=](MachineIRBuilder &B) { B.buildInstr(Op, {Dst}, {LHS}); }; 4294 return true; 4295 } 4296 4297 // Replace (and (or x, c1), c2) with (and x, c2) iff c1 & c2 == 0 4298 bool CombinerHelper::matchAndOrDisjointMask( 4299 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 4300 assert(MI.getOpcode() == TargetOpcode::G_AND); 4301 4302 // Ignore vector types to simplify matching the two constants. 4303 // TODO: do this for vectors and scalars via a demanded bits analysis. 4304 LLT Ty = MRI.getType(MI.getOperand(0).getReg()); 4305 if (Ty.isVector()) 4306 return false; 4307 4308 Register Src; 4309 Register AndMaskReg; 4310 int64_t AndMaskBits; 4311 int64_t OrMaskBits; 4312 if (!mi_match(MI, MRI, 4313 m_GAnd(m_GOr(m_Reg(Src), m_ICst(OrMaskBits)), 4314 m_all_of(m_ICst(AndMaskBits), m_Reg(AndMaskReg))))) 4315 return false; 4316 4317 // Check if OrMask could turn on any bits in Src. 4318 if (AndMaskBits & OrMaskBits) 4319 return false; 4320 4321 MatchInfo = [=, &MI](MachineIRBuilder &B) { 4322 Observer.changingInstr(MI); 4323 // Canonicalize the result to have the constant on the RHS. 4324 if (MI.getOperand(1).getReg() == AndMaskReg) 4325 MI.getOperand(2).setReg(AndMaskReg); 4326 MI.getOperand(1).setReg(Src); 4327 Observer.changedInstr(MI); 4328 }; 4329 return true; 4330 } 4331 4332 /// Form a G_SBFX from a G_SEXT_INREG fed by a right shift. 4333 bool CombinerHelper::matchBitfieldExtractFromSExtInReg( 4334 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 4335 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG); 4336 Register Dst = MI.getOperand(0).getReg(); 4337 Register Src = MI.getOperand(1).getReg(); 4338 LLT Ty = MRI.getType(Src); 4339 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty); 4340 if (!LI || !LI->isLegalOrCustom({TargetOpcode::G_SBFX, {Ty, ExtractTy}})) 4341 return false; 4342 int64_t Width = MI.getOperand(2).getImm(); 4343 Register ShiftSrc; 4344 int64_t ShiftImm; 4345 if (!mi_match( 4346 Src, MRI, 4347 m_OneNonDBGUse(m_any_of(m_GAShr(m_Reg(ShiftSrc), m_ICst(ShiftImm)), 4348 m_GLShr(m_Reg(ShiftSrc), m_ICst(ShiftImm)))))) 4349 return false; 4350 if (ShiftImm < 0 || ShiftImm + Width > Ty.getScalarSizeInBits()) 4351 return false; 4352 4353 MatchInfo = [=](MachineIRBuilder &B) { 4354 auto Cst1 = B.buildConstant(ExtractTy, ShiftImm); 4355 auto Cst2 = B.buildConstant(ExtractTy, Width); 4356 B.buildSbfx(Dst, ShiftSrc, Cst1, Cst2); 4357 }; 4358 return true; 4359 } 4360 4361 /// Form a G_UBFX from "(a srl b) & mask", where b and mask are constants. 4362 bool CombinerHelper::matchBitfieldExtractFromAnd( 4363 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 4364 assert(MI.getOpcode() == TargetOpcode::G_AND); 4365 Register Dst = MI.getOperand(0).getReg(); 4366 LLT Ty = MRI.getType(Dst); 4367 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty); 4368 if (LI && !LI->isLegalOrCustom({TargetOpcode::G_UBFX, {Ty, ExtractTy}})) 4369 return false; 4370 4371 int64_t AndImm, LSBImm; 4372 Register ShiftSrc; 4373 const unsigned Size = Ty.getScalarSizeInBits(); 4374 if (!mi_match(MI.getOperand(0).getReg(), MRI, 4375 m_GAnd(m_OneNonDBGUse(m_GLShr(m_Reg(ShiftSrc), m_ICst(LSBImm))), 4376 m_ICst(AndImm)))) 4377 return false; 4378 4379 // The mask is a mask of the low bits iff imm & (imm+1) == 0. 4380 auto MaybeMask = static_cast<uint64_t>(AndImm); 4381 if (MaybeMask & (MaybeMask + 1)) 4382 return false; 4383 4384 // LSB must fit within the register. 4385 if (static_cast<uint64_t>(LSBImm) >= Size) 4386 return false; 4387 4388 uint64_t Width = APInt(Size, AndImm).countr_one(); 4389 MatchInfo = [=](MachineIRBuilder &B) { 4390 auto WidthCst = B.buildConstant(ExtractTy, Width); 4391 auto LSBCst = B.buildConstant(ExtractTy, LSBImm); 4392 B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {ShiftSrc, LSBCst, WidthCst}); 4393 }; 4394 return true; 4395 } 4396 4397 bool CombinerHelper::matchBitfieldExtractFromShr( 4398 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 4399 const unsigned Opcode = MI.getOpcode(); 4400 assert(Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR); 4401 4402 const Register Dst = MI.getOperand(0).getReg(); 4403 4404 const unsigned ExtrOpcode = Opcode == TargetOpcode::G_ASHR 4405 ? TargetOpcode::G_SBFX 4406 : TargetOpcode::G_UBFX; 4407 4408 // Check if the type we would use for the extract is legal 4409 LLT Ty = MRI.getType(Dst); 4410 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty); 4411 if (!LI || !LI->isLegalOrCustom({ExtrOpcode, {Ty, ExtractTy}})) 4412 return false; 4413 4414 Register ShlSrc; 4415 int64_t ShrAmt; 4416 int64_t ShlAmt; 4417 const unsigned Size = Ty.getScalarSizeInBits(); 4418 4419 // Try to match shr (shl x, c1), c2 4420 if (!mi_match(Dst, MRI, 4421 m_BinOp(Opcode, 4422 m_OneNonDBGUse(m_GShl(m_Reg(ShlSrc), m_ICst(ShlAmt))), 4423 m_ICst(ShrAmt)))) 4424 return false; 4425 4426 // Make sure that the shift sizes can fit a bitfield extract 4427 if (ShlAmt < 0 || ShlAmt > ShrAmt || ShrAmt >= Size) 4428 return false; 4429 4430 // Skip this combine if the G_SEXT_INREG combine could handle it 4431 if (Opcode == TargetOpcode::G_ASHR && ShlAmt == ShrAmt) 4432 return false; 4433 4434 // Calculate start position and width of the extract 4435 const int64_t Pos = ShrAmt - ShlAmt; 4436 const int64_t Width = Size - ShrAmt; 4437 4438 MatchInfo = [=](MachineIRBuilder &B) { 4439 auto WidthCst = B.buildConstant(ExtractTy, Width); 4440 auto PosCst = B.buildConstant(ExtractTy, Pos); 4441 B.buildInstr(ExtrOpcode, {Dst}, {ShlSrc, PosCst, WidthCst}); 4442 }; 4443 return true; 4444 } 4445 4446 bool CombinerHelper::matchBitfieldExtractFromShrAnd( 4447 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 4448 const unsigned Opcode = MI.getOpcode(); 4449 assert(Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_ASHR); 4450 4451 const Register Dst = MI.getOperand(0).getReg(); 4452 LLT Ty = MRI.getType(Dst); 4453 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty); 4454 if (LI && !LI->isLegalOrCustom({TargetOpcode::G_UBFX, {Ty, ExtractTy}})) 4455 return false; 4456 4457 // Try to match shr (and x, c1), c2 4458 Register AndSrc; 4459 int64_t ShrAmt; 4460 int64_t SMask; 4461 if (!mi_match(Dst, MRI, 4462 m_BinOp(Opcode, 4463 m_OneNonDBGUse(m_GAnd(m_Reg(AndSrc), m_ICst(SMask))), 4464 m_ICst(ShrAmt)))) 4465 return false; 4466 4467 const unsigned Size = Ty.getScalarSizeInBits(); 4468 if (ShrAmt < 0 || ShrAmt >= Size) 4469 return false; 4470 4471 // If the shift subsumes the mask, emit the 0 directly. 4472 if (0 == (SMask >> ShrAmt)) { 4473 MatchInfo = [=](MachineIRBuilder &B) { 4474 B.buildConstant(Dst, 0); 4475 }; 4476 return true; 4477 } 4478 4479 // Check that ubfx can do the extraction, with no holes in the mask. 4480 uint64_t UMask = SMask; 4481 UMask |= maskTrailingOnes<uint64_t>(ShrAmt); 4482 UMask &= maskTrailingOnes<uint64_t>(Size); 4483 if (!isMask_64(UMask)) 4484 return false; 4485 4486 // Calculate start position and width of the extract. 4487 const int64_t Pos = ShrAmt; 4488 const int64_t Width = llvm::countr_one(UMask) - ShrAmt; 4489 4490 // It's preferable to keep the shift, rather than form G_SBFX. 4491 // TODO: remove the G_AND via demanded bits analysis. 4492 if (Opcode == TargetOpcode::G_ASHR && Width + ShrAmt == Size) 4493 return false; 4494 4495 MatchInfo = [=](MachineIRBuilder &B) { 4496 auto WidthCst = B.buildConstant(ExtractTy, Width); 4497 auto PosCst = B.buildConstant(ExtractTy, Pos); 4498 B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {AndSrc, PosCst, WidthCst}); 4499 }; 4500 return true; 4501 } 4502 4503 bool CombinerHelper::reassociationCanBreakAddressingModePattern( 4504 MachineInstr &MI) { 4505 auto &PtrAdd = cast<GPtrAdd>(MI); 4506 4507 Register Src1Reg = PtrAdd.getBaseReg(); 4508 auto *Src1Def = getOpcodeDef<GPtrAdd>(Src1Reg, MRI); 4509 if (!Src1Def) 4510 return false; 4511 4512 Register Src2Reg = PtrAdd.getOffsetReg(); 4513 4514 if (MRI.hasOneNonDBGUse(Src1Reg)) 4515 return false; 4516 4517 auto C1 = getIConstantVRegVal(Src1Def->getOffsetReg(), MRI); 4518 if (!C1) 4519 return false; 4520 auto C2 = getIConstantVRegVal(Src2Reg, MRI); 4521 if (!C2) 4522 return false; 4523 4524 const APInt &C1APIntVal = *C1; 4525 const APInt &C2APIntVal = *C2; 4526 const int64_t CombinedValue = (C1APIntVal + C2APIntVal).getSExtValue(); 4527 4528 for (auto &UseMI : MRI.use_nodbg_instructions(PtrAdd.getReg(0))) { 4529 // This combine may end up running before ptrtoint/inttoptr combines 4530 // manage to eliminate redundant conversions, so try to look through them. 4531 MachineInstr *ConvUseMI = &UseMI; 4532 unsigned ConvUseOpc = ConvUseMI->getOpcode(); 4533 while (ConvUseOpc == TargetOpcode::G_INTTOPTR || 4534 ConvUseOpc == TargetOpcode::G_PTRTOINT) { 4535 Register DefReg = ConvUseMI->getOperand(0).getReg(); 4536 if (!MRI.hasOneNonDBGUse(DefReg)) 4537 break; 4538 ConvUseMI = &*MRI.use_instr_nodbg_begin(DefReg); 4539 ConvUseOpc = ConvUseMI->getOpcode(); 4540 } 4541 auto *LdStMI = dyn_cast<GLoadStore>(ConvUseMI); 4542 if (!LdStMI) 4543 continue; 4544 // Is x[offset2] already not a legal addressing mode? If so then 4545 // reassociating the constants breaks nothing (we test offset2 because 4546 // that's the one we hope to fold into the load or store). 4547 TargetLoweringBase::AddrMode AM; 4548 AM.HasBaseReg = true; 4549 AM.BaseOffs = C2APIntVal.getSExtValue(); 4550 unsigned AS = MRI.getType(LdStMI->getPointerReg()).getAddressSpace(); 4551 Type *AccessTy = getTypeForLLT(LdStMI->getMMO().getMemoryType(), 4552 PtrAdd.getMF()->getFunction().getContext()); 4553 const auto &TLI = *PtrAdd.getMF()->getSubtarget().getTargetLowering(); 4554 if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM, 4555 AccessTy, AS)) 4556 continue; 4557 4558 // Would x[offset1+offset2] still be a legal addressing mode? 4559 AM.BaseOffs = CombinedValue; 4560 if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM, 4561 AccessTy, AS)) 4562 return true; 4563 } 4564 4565 return false; 4566 } 4567 4568 bool CombinerHelper::matchReassocConstantInnerRHS(GPtrAdd &MI, 4569 MachineInstr *RHS, 4570 BuildFnTy &MatchInfo) { 4571 // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C) 4572 Register Src1Reg = MI.getOperand(1).getReg(); 4573 if (RHS->getOpcode() != TargetOpcode::G_ADD) 4574 return false; 4575 auto C2 = getIConstantVRegVal(RHS->getOperand(2).getReg(), MRI); 4576 if (!C2) 4577 return false; 4578 4579 MatchInfo = [=, &MI](MachineIRBuilder &B) { 4580 LLT PtrTy = MRI.getType(MI.getOperand(0).getReg()); 4581 4582 auto NewBase = 4583 Builder.buildPtrAdd(PtrTy, Src1Reg, RHS->getOperand(1).getReg()); 4584 Observer.changingInstr(MI); 4585 MI.getOperand(1).setReg(NewBase.getReg(0)); 4586 MI.getOperand(2).setReg(RHS->getOperand(2).getReg()); 4587 Observer.changedInstr(MI); 4588 }; 4589 return !reassociationCanBreakAddressingModePattern(MI); 4590 } 4591 4592 bool CombinerHelper::matchReassocConstantInnerLHS(GPtrAdd &MI, 4593 MachineInstr *LHS, 4594 MachineInstr *RHS, 4595 BuildFnTy &MatchInfo) { 4596 // G_PTR_ADD (G_PTR_ADD X, C), Y) -> (G_PTR_ADD (G_PTR_ADD(X, Y), C) 4597 // if and only if (G_PTR_ADD X, C) has one use. 4598 Register LHSBase; 4599 std::optional<ValueAndVReg> LHSCstOff; 4600 if (!mi_match(MI.getBaseReg(), MRI, 4601 m_OneNonDBGUse(m_GPtrAdd(m_Reg(LHSBase), m_GCst(LHSCstOff))))) 4602 return false; 4603 4604 auto *LHSPtrAdd = cast<GPtrAdd>(LHS); 4605 MatchInfo = [=, &MI](MachineIRBuilder &B) { 4606 // When we change LHSPtrAdd's offset register we might cause it to use a reg 4607 // before its def. Sink the instruction so the outer PTR_ADD to ensure this 4608 // doesn't happen. 4609 LHSPtrAdd->moveBefore(&MI); 4610 Register RHSReg = MI.getOffsetReg(); 4611 // set VReg will cause type mismatch if it comes from extend/trunc 4612 auto NewCst = B.buildConstant(MRI.getType(RHSReg), LHSCstOff->Value); 4613 Observer.changingInstr(MI); 4614 MI.getOperand(2).setReg(NewCst.getReg(0)); 4615 Observer.changedInstr(MI); 4616 Observer.changingInstr(*LHSPtrAdd); 4617 LHSPtrAdd->getOperand(2).setReg(RHSReg); 4618 Observer.changedInstr(*LHSPtrAdd); 4619 }; 4620 return !reassociationCanBreakAddressingModePattern(MI); 4621 } 4622 4623 bool CombinerHelper::matchReassocFoldConstantsInSubTree(GPtrAdd &MI, 4624 MachineInstr *LHS, 4625 MachineInstr *RHS, 4626 BuildFnTy &MatchInfo) { 4627 // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2) 4628 auto *LHSPtrAdd = dyn_cast<GPtrAdd>(LHS); 4629 if (!LHSPtrAdd) 4630 return false; 4631 4632 Register Src2Reg = MI.getOperand(2).getReg(); 4633 Register LHSSrc1 = LHSPtrAdd->getBaseReg(); 4634 Register LHSSrc2 = LHSPtrAdd->getOffsetReg(); 4635 auto C1 = getIConstantVRegVal(LHSSrc2, MRI); 4636 if (!C1) 4637 return false; 4638 auto C2 = getIConstantVRegVal(Src2Reg, MRI); 4639 if (!C2) 4640 return false; 4641 4642 MatchInfo = [=, &MI](MachineIRBuilder &B) { 4643 auto NewCst = B.buildConstant(MRI.getType(Src2Reg), *C1 + *C2); 4644 Observer.changingInstr(MI); 4645 MI.getOperand(1).setReg(LHSSrc1); 4646 MI.getOperand(2).setReg(NewCst.getReg(0)); 4647 Observer.changedInstr(MI); 4648 }; 4649 return !reassociationCanBreakAddressingModePattern(MI); 4650 } 4651 4652 bool CombinerHelper::matchReassocPtrAdd(MachineInstr &MI, 4653 BuildFnTy &MatchInfo) { 4654 auto &PtrAdd = cast<GPtrAdd>(MI); 4655 // We're trying to match a few pointer computation patterns here for 4656 // re-association opportunities. 4657 // 1) Isolating a constant operand to be on the RHS, e.g.: 4658 // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C) 4659 // 4660 // 2) Folding two constants in each sub-tree as long as such folding 4661 // doesn't break a legal addressing mode. 4662 // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2) 4663 // 4664 // 3) Move a constant from the LHS of an inner op to the RHS of the outer. 4665 // G_PTR_ADD (G_PTR_ADD X, C), Y) -> G_PTR_ADD (G_PTR_ADD(X, Y), C) 4666 // iif (G_PTR_ADD X, C) has one use. 4667 MachineInstr *LHS = MRI.getVRegDef(PtrAdd.getBaseReg()); 4668 MachineInstr *RHS = MRI.getVRegDef(PtrAdd.getOffsetReg()); 4669 4670 // Try to match example 2. 4671 if (matchReassocFoldConstantsInSubTree(PtrAdd, LHS, RHS, MatchInfo)) 4672 return true; 4673 4674 // Try to match example 3. 4675 if (matchReassocConstantInnerLHS(PtrAdd, LHS, RHS, MatchInfo)) 4676 return true; 4677 4678 // Try to match example 1. 4679 if (matchReassocConstantInnerRHS(PtrAdd, RHS, MatchInfo)) 4680 return true; 4681 4682 return false; 4683 } 4684 bool CombinerHelper::tryReassocBinOp(unsigned Opc, Register DstReg, 4685 Register OpLHS, Register OpRHS, 4686 BuildFnTy &MatchInfo) { 4687 LLT OpRHSTy = MRI.getType(OpRHS); 4688 MachineInstr *OpLHSDef = MRI.getVRegDef(OpLHS); 4689 4690 if (OpLHSDef->getOpcode() != Opc) 4691 return false; 4692 4693 MachineInstr *OpRHSDef = MRI.getVRegDef(OpRHS); 4694 Register OpLHSLHS = OpLHSDef->getOperand(1).getReg(); 4695 Register OpLHSRHS = OpLHSDef->getOperand(2).getReg(); 4696 4697 // If the inner op is (X op C), pull the constant out so it can be folded with 4698 // other constants in the expression tree. Folding is not guaranteed so we 4699 // might have (C1 op C2). In that case do not pull a constant out because it 4700 // won't help and can lead to infinite loops. 4701 if (isConstantOrConstantSplatVector(*MRI.getVRegDef(OpLHSRHS), MRI) && 4702 !isConstantOrConstantSplatVector(*MRI.getVRegDef(OpLHSLHS), MRI)) { 4703 if (isConstantOrConstantSplatVector(*OpRHSDef, MRI)) { 4704 // (Opc (Opc X, C1), C2) -> (Opc X, (Opc C1, C2)) 4705 MatchInfo = [=](MachineIRBuilder &B) { 4706 auto NewCst = B.buildInstr(Opc, {OpRHSTy}, {OpLHSRHS, OpRHS}); 4707 B.buildInstr(Opc, {DstReg}, {OpLHSLHS, NewCst}); 4708 }; 4709 return true; 4710 } 4711 if (getTargetLowering().isReassocProfitable(MRI, OpLHS, OpRHS)) { 4712 // Reassociate: (op (op x, c1), y) -> (op (op x, y), c1) 4713 // iff (op x, c1) has one use 4714 MatchInfo = [=](MachineIRBuilder &B) { 4715 auto NewLHSLHS = B.buildInstr(Opc, {OpRHSTy}, {OpLHSLHS, OpRHS}); 4716 B.buildInstr(Opc, {DstReg}, {NewLHSLHS, OpLHSRHS}); 4717 }; 4718 return true; 4719 } 4720 } 4721 4722 return false; 4723 } 4724 4725 bool CombinerHelper::matchReassocCommBinOp(MachineInstr &MI, 4726 BuildFnTy &MatchInfo) { 4727 // We don't check if the reassociation will break a legal addressing mode 4728 // here since pointer arithmetic is handled by G_PTR_ADD. 4729 unsigned Opc = MI.getOpcode(); 4730 Register DstReg = MI.getOperand(0).getReg(); 4731 Register LHSReg = MI.getOperand(1).getReg(); 4732 Register RHSReg = MI.getOperand(2).getReg(); 4733 4734 if (tryReassocBinOp(Opc, DstReg, LHSReg, RHSReg, MatchInfo)) 4735 return true; 4736 if (tryReassocBinOp(Opc, DstReg, RHSReg, LHSReg, MatchInfo)) 4737 return true; 4738 return false; 4739 } 4740 4741 bool CombinerHelper::matchConstantFoldCastOp(MachineInstr &MI, APInt &MatchInfo) { 4742 LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); 4743 Register SrcOp = MI.getOperand(1).getReg(); 4744 4745 if (auto MaybeCst = ConstantFoldCastOp(MI.getOpcode(), DstTy, SrcOp, MRI)) { 4746 MatchInfo = *MaybeCst; 4747 return true; 4748 } 4749 4750 return false; 4751 } 4752 4753 bool CombinerHelper::matchConstantFoldBinOp(MachineInstr &MI, APInt &MatchInfo) { 4754 Register Op1 = MI.getOperand(1).getReg(); 4755 Register Op2 = MI.getOperand(2).getReg(); 4756 auto MaybeCst = ConstantFoldBinOp(MI.getOpcode(), Op1, Op2, MRI); 4757 if (!MaybeCst) 4758 return false; 4759 MatchInfo = *MaybeCst; 4760 return true; 4761 } 4762 4763 bool CombinerHelper::matchConstantFoldFPBinOp(MachineInstr &MI, ConstantFP* &MatchInfo) { 4764 Register Op1 = MI.getOperand(1).getReg(); 4765 Register Op2 = MI.getOperand(2).getReg(); 4766 auto MaybeCst = ConstantFoldFPBinOp(MI.getOpcode(), Op1, Op2, MRI); 4767 if (!MaybeCst) 4768 return false; 4769 MatchInfo = 4770 ConstantFP::get(MI.getMF()->getFunction().getContext(), *MaybeCst); 4771 return true; 4772 } 4773 4774 bool CombinerHelper::matchConstantFoldFMA(MachineInstr &MI, 4775 ConstantFP *&MatchInfo) { 4776 assert(MI.getOpcode() == TargetOpcode::G_FMA || 4777 MI.getOpcode() == TargetOpcode::G_FMAD); 4778 auto [_, Op1, Op2, Op3] = MI.getFirst4Regs(); 4779 4780 const ConstantFP *Op3Cst = getConstantFPVRegVal(Op3, MRI); 4781 if (!Op3Cst) 4782 return false; 4783 4784 const ConstantFP *Op2Cst = getConstantFPVRegVal(Op2, MRI); 4785 if (!Op2Cst) 4786 return false; 4787 4788 const ConstantFP *Op1Cst = getConstantFPVRegVal(Op1, MRI); 4789 if (!Op1Cst) 4790 return false; 4791 4792 APFloat Op1F = Op1Cst->getValueAPF(); 4793 Op1F.fusedMultiplyAdd(Op2Cst->getValueAPF(), Op3Cst->getValueAPF(), 4794 APFloat::rmNearestTiesToEven); 4795 MatchInfo = ConstantFP::get(MI.getMF()->getFunction().getContext(), Op1F); 4796 return true; 4797 } 4798 4799 bool CombinerHelper::matchNarrowBinopFeedingAnd( 4800 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 4801 // Look for a binop feeding into an AND with a mask: 4802 // 4803 // %add = G_ADD %lhs, %rhs 4804 // %and = G_AND %add, 000...11111111 4805 // 4806 // Check if it's possible to perform the binop at a narrower width and zext 4807 // back to the original width like so: 4808 // 4809 // %narrow_lhs = G_TRUNC %lhs 4810 // %narrow_rhs = G_TRUNC %rhs 4811 // %narrow_add = G_ADD %narrow_lhs, %narrow_rhs 4812 // %new_add = G_ZEXT %narrow_add 4813 // %and = G_AND %new_add, 000...11111111 4814 // 4815 // This can allow later combines to eliminate the G_AND if it turns out 4816 // that the mask is irrelevant. 4817 assert(MI.getOpcode() == TargetOpcode::G_AND); 4818 Register Dst = MI.getOperand(0).getReg(); 4819 Register AndLHS = MI.getOperand(1).getReg(); 4820 Register AndRHS = MI.getOperand(2).getReg(); 4821 LLT WideTy = MRI.getType(Dst); 4822 4823 // If the potential binop has more than one use, then it's possible that one 4824 // of those uses will need its full width. 4825 if (!WideTy.isScalar() || !MRI.hasOneNonDBGUse(AndLHS)) 4826 return false; 4827 4828 // Check if the LHS feeding the AND is impacted by the high bits that we're 4829 // masking out. 4830 // 4831 // e.g. for 64-bit x, y: 4832 // 4833 // add_64(x, y) & 65535 == zext(add_16(trunc(x), trunc(y))) & 65535 4834 MachineInstr *LHSInst = getDefIgnoringCopies(AndLHS, MRI); 4835 if (!LHSInst) 4836 return false; 4837 unsigned LHSOpc = LHSInst->getOpcode(); 4838 switch (LHSOpc) { 4839 default: 4840 return false; 4841 case TargetOpcode::G_ADD: 4842 case TargetOpcode::G_SUB: 4843 case TargetOpcode::G_MUL: 4844 case TargetOpcode::G_AND: 4845 case TargetOpcode::G_OR: 4846 case TargetOpcode::G_XOR: 4847 break; 4848 } 4849 4850 // Find the mask on the RHS. 4851 auto Cst = getIConstantVRegValWithLookThrough(AndRHS, MRI); 4852 if (!Cst) 4853 return false; 4854 auto Mask = Cst->Value; 4855 if (!Mask.isMask()) 4856 return false; 4857 4858 // No point in combining if there's nothing to truncate. 4859 unsigned NarrowWidth = Mask.countr_one(); 4860 if (NarrowWidth == WideTy.getSizeInBits()) 4861 return false; 4862 LLT NarrowTy = LLT::scalar(NarrowWidth); 4863 4864 // Check if adding the zext + truncates could be harmful. 4865 auto &MF = *MI.getMF(); 4866 const auto &TLI = getTargetLowering(); 4867 LLVMContext &Ctx = MF.getFunction().getContext(); 4868 auto &DL = MF.getDataLayout(); 4869 if (!TLI.isTruncateFree(WideTy, NarrowTy, DL, Ctx) || 4870 !TLI.isZExtFree(NarrowTy, WideTy, DL, Ctx)) 4871 return false; 4872 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_TRUNC, {NarrowTy, WideTy}}) || 4873 !isLegalOrBeforeLegalizer({TargetOpcode::G_ZEXT, {WideTy, NarrowTy}})) 4874 return false; 4875 Register BinOpLHS = LHSInst->getOperand(1).getReg(); 4876 Register BinOpRHS = LHSInst->getOperand(2).getReg(); 4877 MatchInfo = [=, &MI](MachineIRBuilder &B) { 4878 auto NarrowLHS = Builder.buildTrunc(NarrowTy, BinOpLHS); 4879 auto NarrowRHS = Builder.buildTrunc(NarrowTy, BinOpRHS); 4880 auto NarrowBinOp = 4881 Builder.buildInstr(LHSOpc, {NarrowTy}, {NarrowLHS, NarrowRHS}); 4882 auto Ext = Builder.buildZExt(WideTy, NarrowBinOp); 4883 Observer.changingInstr(MI); 4884 MI.getOperand(1).setReg(Ext.getReg(0)); 4885 Observer.changedInstr(MI); 4886 }; 4887 return true; 4888 } 4889 4890 bool CombinerHelper::matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo) { 4891 unsigned Opc = MI.getOpcode(); 4892 assert(Opc == TargetOpcode::G_UMULO || Opc == TargetOpcode::G_SMULO); 4893 4894 if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(2))) 4895 return false; 4896 4897 MatchInfo = [=, &MI](MachineIRBuilder &B) { 4898 Observer.changingInstr(MI); 4899 unsigned NewOpc = Opc == TargetOpcode::G_UMULO ? TargetOpcode::G_UADDO 4900 : TargetOpcode::G_SADDO; 4901 MI.setDesc(Builder.getTII().get(NewOpc)); 4902 MI.getOperand(3).setReg(MI.getOperand(2).getReg()); 4903 Observer.changedInstr(MI); 4904 }; 4905 return true; 4906 } 4907 4908 bool CombinerHelper::matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) { 4909 // (G_*MULO x, 0) -> 0 + no carry out 4910 assert(MI.getOpcode() == TargetOpcode::G_UMULO || 4911 MI.getOpcode() == TargetOpcode::G_SMULO); 4912 if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(0))) 4913 return false; 4914 Register Dst = MI.getOperand(0).getReg(); 4915 Register Carry = MI.getOperand(1).getReg(); 4916 if (!isConstantLegalOrBeforeLegalizer(MRI.getType(Dst)) || 4917 !isConstantLegalOrBeforeLegalizer(MRI.getType(Carry))) 4918 return false; 4919 MatchInfo = [=](MachineIRBuilder &B) { 4920 B.buildConstant(Dst, 0); 4921 B.buildConstant(Carry, 0); 4922 }; 4923 return true; 4924 } 4925 4926 bool CombinerHelper::matchAddOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) { 4927 // (G_*ADDO x, 0) -> x + no carry out 4928 assert(MI.getOpcode() == TargetOpcode::G_UADDO || 4929 MI.getOpcode() == TargetOpcode::G_SADDO); 4930 if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(0))) 4931 return false; 4932 Register Carry = MI.getOperand(1).getReg(); 4933 if (!isConstantLegalOrBeforeLegalizer(MRI.getType(Carry))) 4934 return false; 4935 Register Dst = MI.getOperand(0).getReg(); 4936 Register LHS = MI.getOperand(2).getReg(); 4937 MatchInfo = [=](MachineIRBuilder &B) { 4938 B.buildCopy(Dst, LHS); 4939 B.buildConstant(Carry, 0); 4940 }; 4941 return true; 4942 } 4943 4944 bool CombinerHelper::matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo) { 4945 // (G_*ADDE x, y, 0) -> (G_*ADDO x, y) 4946 // (G_*SUBE x, y, 0) -> (G_*SUBO x, y) 4947 assert(MI.getOpcode() == TargetOpcode::G_UADDE || 4948 MI.getOpcode() == TargetOpcode::G_SADDE || 4949 MI.getOpcode() == TargetOpcode::G_USUBE || 4950 MI.getOpcode() == TargetOpcode::G_SSUBE); 4951 if (!mi_match(MI.getOperand(4).getReg(), MRI, m_SpecificICstOrSplat(0))) 4952 return false; 4953 MatchInfo = [&](MachineIRBuilder &B) { 4954 unsigned NewOpcode; 4955 switch (MI.getOpcode()) { 4956 case TargetOpcode::G_UADDE: 4957 NewOpcode = TargetOpcode::G_UADDO; 4958 break; 4959 case TargetOpcode::G_SADDE: 4960 NewOpcode = TargetOpcode::G_SADDO; 4961 break; 4962 case TargetOpcode::G_USUBE: 4963 NewOpcode = TargetOpcode::G_USUBO; 4964 break; 4965 case TargetOpcode::G_SSUBE: 4966 NewOpcode = TargetOpcode::G_SSUBO; 4967 break; 4968 } 4969 Observer.changingInstr(MI); 4970 MI.setDesc(B.getTII().get(NewOpcode)); 4971 MI.removeOperand(4); 4972 Observer.changedInstr(MI); 4973 }; 4974 return true; 4975 } 4976 4977 bool CombinerHelper::matchSubAddSameReg(MachineInstr &MI, 4978 BuildFnTy &MatchInfo) { 4979 assert(MI.getOpcode() == TargetOpcode::G_SUB); 4980 Register Dst = MI.getOperand(0).getReg(); 4981 // (x + y) - z -> x (if y == z) 4982 // (x + y) - z -> y (if x == z) 4983 Register X, Y, Z; 4984 if (mi_match(Dst, MRI, m_GSub(m_GAdd(m_Reg(X), m_Reg(Y)), m_Reg(Z)))) { 4985 Register ReplaceReg; 4986 int64_t CstX, CstY; 4987 if (Y == Z || (mi_match(Y, MRI, m_ICstOrSplat(CstY)) && 4988 mi_match(Z, MRI, m_SpecificICstOrSplat(CstY)))) 4989 ReplaceReg = X; 4990 else if (X == Z || (mi_match(X, MRI, m_ICstOrSplat(CstX)) && 4991 mi_match(Z, MRI, m_SpecificICstOrSplat(CstX)))) 4992 ReplaceReg = Y; 4993 if (ReplaceReg) { 4994 MatchInfo = [=](MachineIRBuilder &B) { B.buildCopy(Dst, ReplaceReg); }; 4995 return true; 4996 } 4997 } 4998 4999 // x - (y + z) -> 0 - y (if x == z) 5000 // x - (y + z) -> 0 - z (if x == y) 5001 if (mi_match(Dst, MRI, m_GSub(m_Reg(X), m_GAdd(m_Reg(Y), m_Reg(Z))))) { 5002 Register ReplaceReg; 5003 int64_t CstX; 5004 if (X == Z || (mi_match(X, MRI, m_ICstOrSplat(CstX)) && 5005 mi_match(Z, MRI, m_SpecificICstOrSplat(CstX)))) 5006 ReplaceReg = Y; 5007 else if (X == Y || (mi_match(X, MRI, m_ICstOrSplat(CstX)) && 5008 mi_match(Y, MRI, m_SpecificICstOrSplat(CstX)))) 5009 ReplaceReg = Z; 5010 if (ReplaceReg) { 5011 MatchInfo = [=](MachineIRBuilder &B) { 5012 auto Zero = B.buildConstant(MRI.getType(Dst), 0); 5013 B.buildSub(Dst, Zero, ReplaceReg); 5014 }; 5015 return true; 5016 } 5017 } 5018 return false; 5019 } 5020 5021 MachineInstr *CombinerHelper::buildUDivUsingMul(MachineInstr &MI) { 5022 assert(MI.getOpcode() == TargetOpcode::G_UDIV); 5023 auto &UDiv = cast<GenericMachineInstr>(MI); 5024 Register Dst = UDiv.getReg(0); 5025 Register LHS = UDiv.getReg(1); 5026 Register RHS = UDiv.getReg(2); 5027 LLT Ty = MRI.getType(Dst); 5028 LLT ScalarTy = Ty.getScalarType(); 5029 const unsigned EltBits = ScalarTy.getScalarSizeInBits(); 5030 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty); 5031 LLT ScalarShiftAmtTy = ShiftAmtTy.getScalarType(); 5032 auto &MIB = Builder; 5033 MIB.setInstrAndDebugLoc(MI); 5034 5035 bool UseNPQ = false; 5036 SmallVector<Register, 16> PreShifts, PostShifts, MagicFactors, NPQFactors; 5037 5038 auto BuildUDIVPattern = [&](const Constant *C) { 5039 auto *CI = cast<ConstantInt>(C); 5040 const APInt &Divisor = CI->getValue(); 5041 5042 bool SelNPQ = false; 5043 APInt Magic(Divisor.getBitWidth(), 0); 5044 unsigned PreShift = 0, PostShift = 0; 5045 5046 // Magic algorithm doesn't work for division by 1. We need to emit a select 5047 // at the end. 5048 // TODO: Use undef values for divisor of 1. 5049 if (!Divisor.isOne()) { 5050 UnsignedDivisionByConstantInfo magics = 5051 UnsignedDivisionByConstantInfo::get(Divisor); 5052 5053 Magic = std::move(magics.Magic); 5054 5055 assert(magics.PreShift < Divisor.getBitWidth() && 5056 "We shouldn't generate an undefined shift!"); 5057 assert(magics.PostShift < Divisor.getBitWidth() && 5058 "We shouldn't generate an undefined shift!"); 5059 assert((!magics.IsAdd || magics.PreShift == 0) && "Unexpected pre-shift"); 5060 PreShift = magics.PreShift; 5061 PostShift = magics.PostShift; 5062 SelNPQ = magics.IsAdd; 5063 } 5064 5065 PreShifts.push_back( 5066 MIB.buildConstant(ScalarShiftAmtTy, PreShift).getReg(0)); 5067 MagicFactors.push_back(MIB.buildConstant(ScalarTy, Magic).getReg(0)); 5068 NPQFactors.push_back( 5069 MIB.buildConstant(ScalarTy, 5070 SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1) 5071 : APInt::getZero(EltBits)) 5072 .getReg(0)); 5073 PostShifts.push_back( 5074 MIB.buildConstant(ScalarShiftAmtTy, PostShift).getReg(0)); 5075 UseNPQ |= SelNPQ; 5076 return true; 5077 }; 5078 5079 // Collect the shifts/magic values from each element. 5080 bool Matched = matchUnaryPredicate(MRI, RHS, BuildUDIVPattern); 5081 (void)Matched; 5082 assert(Matched && "Expected unary predicate match to succeed"); 5083 5084 Register PreShift, PostShift, MagicFactor, NPQFactor; 5085 auto *RHSDef = getOpcodeDef<GBuildVector>(RHS, MRI); 5086 if (RHSDef) { 5087 PreShift = MIB.buildBuildVector(ShiftAmtTy, PreShifts).getReg(0); 5088 MagicFactor = MIB.buildBuildVector(Ty, MagicFactors).getReg(0); 5089 NPQFactor = MIB.buildBuildVector(Ty, NPQFactors).getReg(0); 5090 PostShift = MIB.buildBuildVector(ShiftAmtTy, PostShifts).getReg(0); 5091 } else { 5092 assert(MRI.getType(RHS).isScalar() && 5093 "Non-build_vector operation should have been a scalar"); 5094 PreShift = PreShifts[0]; 5095 MagicFactor = MagicFactors[0]; 5096 PostShift = PostShifts[0]; 5097 } 5098 5099 Register Q = LHS; 5100 Q = MIB.buildLShr(Ty, Q, PreShift).getReg(0); 5101 5102 // Multiply the numerator (operand 0) by the magic value. 5103 Q = MIB.buildUMulH(Ty, Q, MagicFactor).getReg(0); 5104 5105 if (UseNPQ) { 5106 Register NPQ = MIB.buildSub(Ty, LHS, Q).getReg(0); 5107 5108 // For vectors we might have a mix of non-NPQ/NPQ paths, so use 5109 // G_UMULH to act as a SRL-by-1 for NPQ, else multiply by zero. 5110 if (Ty.isVector()) 5111 NPQ = MIB.buildUMulH(Ty, NPQ, NPQFactor).getReg(0); 5112 else 5113 NPQ = MIB.buildLShr(Ty, NPQ, MIB.buildConstant(ShiftAmtTy, 1)).getReg(0); 5114 5115 Q = MIB.buildAdd(Ty, NPQ, Q).getReg(0); 5116 } 5117 5118 Q = MIB.buildLShr(Ty, Q, PostShift).getReg(0); 5119 auto One = MIB.buildConstant(Ty, 1); 5120 auto IsOne = MIB.buildICmp( 5121 CmpInst::Predicate::ICMP_EQ, 5122 Ty.isScalar() ? LLT::scalar(1) : Ty.changeElementSize(1), RHS, One); 5123 return MIB.buildSelect(Ty, IsOne, LHS, Q); 5124 } 5125 5126 bool CombinerHelper::matchUDivByConst(MachineInstr &MI) { 5127 assert(MI.getOpcode() == TargetOpcode::G_UDIV); 5128 Register Dst = MI.getOperand(0).getReg(); 5129 Register RHS = MI.getOperand(2).getReg(); 5130 LLT DstTy = MRI.getType(Dst); 5131 auto *RHSDef = MRI.getVRegDef(RHS); 5132 if (!isConstantOrConstantVector(*RHSDef, MRI)) 5133 return false; 5134 5135 auto &MF = *MI.getMF(); 5136 AttributeList Attr = MF.getFunction().getAttributes(); 5137 const auto &TLI = getTargetLowering(); 5138 LLVMContext &Ctx = MF.getFunction().getContext(); 5139 auto &DL = MF.getDataLayout(); 5140 if (TLI.isIntDivCheap(getApproximateEVTForLLT(DstTy, DL, Ctx), Attr)) 5141 return false; 5142 5143 // Don't do this for minsize because the instruction sequence is usually 5144 // larger. 5145 if (MF.getFunction().hasMinSize()) 5146 return false; 5147 5148 // Don't do this if the types are not going to be legal. 5149 if (LI) { 5150 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_MUL, {DstTy, DstTy}})) 5151 return false; 5152 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_UMULH, {DstTy}})) 5153 return false; 5154 if (!isLegalOrBeforeLegalizer( 5155 {TargetOpcode::G_ICMP, 5156 {DstTy.isVector() ? DstTy.changeElementSize(1) : LLT::scalar(1), 5157 DstTy}})) 5158 return false; 5159 } 5160 5161 auto CheckEltValue = [&](const Constant *C) { 5162 if (auto *CI = dyn_cast_or_null<ConstantInt>(C)) 5163 return !CI->isZero(); 5164 return false; 5165 }; 5166 return matchUnaryPredicate(MRI, RHS, CheckEltValue); 5167 } 5168 5169 void CombinerHelper::applyUDivByConst(MachineInstr &MI) { 5170 auto *NewMI = buildUDivUsingMul(MI); 5171 replaceSingleDefInstWithReg(MI, NewMI->getOperand(0).getReg()); 5172 } 5173 5174 bool CombinerHelper::matchSDivByConst(MachineInstr &MI) { 5175 assert(MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV"); 5176 Register Dst = MI.getOperand(0).getReg(); 5177 Register RHS = MI.getOperand(2).getReg(); 5178 LLT DstTy = MRI.getType(Dst); 5179 5180 auto &MF = *MI.getMF(); 5181 AttributeList Attr = MF.getFunction().getAttributes(); 5182 const auto &TLI = getTargetLowering(); 5183 LLVMContext &Ctx = MF.getFunction().getContext(); 5184 auto &DL = MF.getDataLayout(); 5185 if (TLI.isIntDivCheap(getApproximateEVTForLLT(DstTy, DL, Ctx), Attr)) 5186 return false; 5187 5188 // Don't do this for minsize because the instruction sequence is usually 5189 // larger. 5190 if (MF.getFunction().hasMinSize()) 5191 return false; 5192 5193 // If the sdiv has an 'exact' flag we can use a simpler lowering. 5194 if (MI.getFlag(MachineInstr::MIFlag::IsExact)) { 5195 return matchUnaryPredicate( 5196 MRI, RHS, [](const Constant *C) { return C && !C->isZeroValue(); }); 5197 } 5198 5199 // Don't support the general case for now. 5200 return false; 5201 } 5202 5203 void CombinerHelper::applySDivByConst(MachineInstr &MI) { 5204 auto *NewMI = buildSDivUsingMul(MI); 5205 replaceSingleDefInstWithReg(MI, NewMI->getOperand(0).getReg()); 5206 } 5207 5208 MachineInstr *CombinerHelper::buildSDivUsingMul(MachineInstr &MI) { 5209 assert(MI.getOpcode() == TargetOpcode::G_SDIV && "Expected SDIV"); 5210 auto &SDiv = cast<GenericMachineInstr>(MI); 5211 Register Dst = SDiv.getReg(0); 5212 Register LHS = SDiv.getReg(1); 5213 Register RHS = SDiv.getReg(2); 5214 LLT Ty = MRI.getType(Dst); 5215 LLT ScalarTy = Ty.getScalarType(); 5216 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty); 5217 LLT ScalarShiftAmtTy = ShiftAmtTy.getScalarType(); 5218 auto &MIB = Builder; 5219 MIB.setInstrAndDebugLoc(MI); 5220 5221 bool UseSRA = false; 5222 SmallVector<Register, 16> Shifts, Factors; 5223 5224 auto *RHSDef = cast<GenericMachineInstr>(getDefIgnoringCopies(RHS, MRI)); 5225 bool IsSplat = getIConstantSplatVal(*RHSDef, MRI).has_value(); 5226 5227 auto BuildSDIVPattern = [&](const Constant *C) { 5228 // Don't recompute inverses for each splat element. 5229 if (IsSplat && !Factors.empty()) { 5230 Shifts.push_back(Shifts[0]); 5231 Factors.push_back(Factors[0]); 5232 return true; 5233 } 5234 5235 auto *CI = cast<ConstantInt>(C); 5236 APInt Divisor = CI->getValue(); 5237 unsigned Shift = Divisor.countr_zero(); 5238 if (Shift) { 5239 Divisor.ashrInPlace(Shift); 5240 UseSRA = true; 5241 } 5242 5243 // Calculate the multiplicative inverse modulo BW. 5244 // 2^W requires W + 1 bits, so we have to extend and then truncate. 5245 unsigned W = Divisor.getBitWidth(); 5246 APInt Factor = Divisor.zext(W + 1) 5247 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 5248 .trunc(W); 5249 Shifts.push_back(MIB.buildConstant(ScalarShiftAmtTy, Shift).getReg(0)); 5250 Factors.push_back(MIB.buildConstant(ScalarTy, Factor).getReg(0)); 5251 return true; 5252 }; 5253 5254 // Collect all magic values from the build vector. 5255 bool Matched = matchUnaryPredicate(MRI, RHS, BuildSDIVPattern); 5256 (void)Matched; 5257 assert(Matched && "Expected unary predicate match to succeed"); 5258 5259 Register Shift, Factor; 5260 if (Ty.isVector()) { 5261 Shift = MIB.buildBuildVector(ShiftAmtTy, Shifts).getReg(0); 5262 Factor = MIB.buildBuildVector(Ty, Factors).getReg(0); 5263 } else { 5264 Shift = Shifts[0]; 5265 Factor = Factors[0]; 5266 } 5267 5268 Register Res = LHS; 5269 5270 if (UseSRA) 5271 Res = MIB.buildAShr(Ty, Res, Shift, MachineInstr::IsExact).getReg(0); 5272 5273 return MIB.buildMul(Ty, Res, Factor); 5274 } 5275 5276 bool CombinerHelper::matchUMulHToLShr(MachineInstr &MI) { 5277 assert(MI.getOpcode() == TargetOpcode::G_UMULH); 5278 Register RHS = MI.getOperand(2).getReg(); 5279 Register Dst = MI.getOperand(0).getReg(); 5280 LLT Ty = MRI.getType(Dst); 5281 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty); 5282 auto MatchPow2ExceptOne = [&](const Constant *C) { 5283 if (auto *CI = dyn_cast<ConstantInt>(C)) 5284 return CI->getValue().isPowerOf2() && !CI->getValue().isOne(); 5285 return false; 5286 }; 5287 if (!matchUnaryPredicate(MRI, RHS, MatchPow2ExceptOne, false)) 5288 return false; 5289 return isLegalOrBeforeLegalizer({TargetOpcode::G_LSHR, {Ty, ShiftAmtTy}}); 5290 } 5291 5292 void CombinerHelper::applyUMulHToLShr(MachineInstr &MI) { 5293 Register LHS = MI.getOperand(1).getReg(); 5294 Register RHS = MI.getOperand(2).getReg(); 5295 Register Dst = MI.getOperand(0).getReg(); 5296 LLT Ty = MRI.getType(Dst); 5297 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty); 5298 unsigned NumEltBits = Ty.getScalarSizeInBits(); 5299 5300 Builder.setInstrAndDebugLoc(MI); 5301 auto LogBase2 = buildLogBase2(RHS, Builder); 5302 auto ShiftAmt = 5303 Builder.buildSub(Ty, Builder.buildConstant(Ty, NumEltBits), LogBase2); 5304 auto Trunc = Builder.buildZExtOrTrunc(ShiftAmtTy, ShiftAmt); 5305 Builder.buildLShr(Dst, LHS, Trunc); 5306 MI.eraseFromParent(); 5307 } 5308 5309 bool CombinerHelper::matchRedundantNegOperands(MachineInstr &MI, 5310 BuildFnTy &MatchInfo) { 5311 unsigned Opc = MI.getOpcode(); 5312 assert(Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB || 5313 Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV || 5314 Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA); 5315 5316 Register Dst = MI.getOperand(0).getReg(); 5317 Register X = MI.getOperand(1).getReg(); 5318 Register Y = MI.getOperand(2).getReg(); 5319 LLT Type = MRI.getType(Dst); 5320 5321 // fold (fadd x, fneg(y)) -> (fsub x, y) 5322 // fold (fadd fneg(y), x) -> (fsub x, y) 5323 // G_ADD is commutative so both cases are checked by m_GFAdd 5324 if (mi_match(Dst, MRI, m_GFAdd(m_Reg(X), m_GFNeg(m_Reg(Y)))) && 5325 isLegalOrBeforeLegalizer({TargetOpcode::G_FSUB, {Type}})) { 5326 Opc = TargetOpcode::G_FSUB; 5327 } 5328 /// fold (fsub x, fneg(y)) -> (fadd x, y) 5329 else if (mi_match(Dst, MRI, m_GFSub(m_Reg(X), m_GFNeg(m_Reg(Y)))) && 5330 isLegalOrBeforeLegalizer({TargetOpcode::G_FADD, {Type}})) { 5331 Opc = TargetOpcode::G_FADD; 5332 } 5333 // fold (fmul fneg(x), fneg(y)) -> (fmul x, y) 5334 // fold (fdiv fneg(x), fneg(y)) -> (fdiv x, y) 5335 // fold (fmad fneg(x), fneg(y), z) -> (fmad x, y, z) 5336 // fold (fma fneg(x), fneg(y), z) -> (fma x, y, z) 5337 else if ((Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV || 5338 Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA) && 5339 mi_match(X, MRI, m_GFNeg(m_Reg(X))) && 5340 mi_match(Y, MRI, m_GFNeg(m_Reg(Y)))) { 5341 // no opcode change 5342 } else 5343 return false; 5344 5345 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5346 Observer.changingInstr(MI); 5347 MI.setDesc(B.getTII().get(Opc)); 5348 MI.getOperand(1).setReg(X); 5349 MI.getOperand(2).setReg(Y); 5350 Observer.changedInstr(MI); 5351 }; 5352 return true; 5353 } 5354 5355 bool CombinerHelper::matchFsubToFneg(MachineInstr &MI, Register &MatchInfo) { 5356 assert(MI.getOpcode() == TargetOpcode::G_FSUB); 5357 5358 Register LHS = MI.getOperand(1).getReg(); 5359 MatchInfo = MI.getOperand(2).getReg(); 5360 LLT Ty = MRI.getType(MI.getOperand(0).getReg()); 5361 5362 const auto LHSCst = Ty.isVector() 5363 ? getFConstantSplat(LHS, MRI, /* allowUndef */ true) 5364 : getFConstantVRegValWithLookThrough(LHS, MRI); 5365 if (!LHSCst) 5366 return false; 5367 5368 // -0.0 is always allowed 5369 if (LHSCst->Value.isNegZero()) 5370 return true; 5371 5372 // +0.0 is only allowed if nsz is set. 5373 if (LHSCst->Value.isPosZero()) 5374 return MI.getFlag(MachineInstr::FmNsz); 5375 5376 return false; 5377 } 5378 5379 void CombinerHelper::applyFsubToFneg(MachineInstr &MI, Register &MatchInfo) { 5380 Builder.setInstrAndDebugLoc(MI); 5381 Register Dst = MI.getOperand(0).getReg(); 5382 Builder.buildFNeg( 5383 Dst, Builder.buildFCanonicalize(MRI.getType(Dst), MatchInfo).getReg(0)); 5384 eraseInst(MI); 5385 } 5386 5387 /// Checks if \p MI is TargetOpcode::G_FMUL and contractable either 5388 /// due to global flags or MachineInstr flags. 5389 static bool isContractableFMul(MachineInstr &MI, bool AllowFusionGlobally) { 5390 if (MI.getOpcode() != TargetOpcode::G_FMUL) 5391 return false; 5392 return AllowFusionGlobally || MI.getFlag(MachineInstr::MIFlag::FmContract); 5393 } 5394 5395 static bool hasMoreUses(const MachineInstr &MI0, const MachineInstr &MI1, 5396 const MachineRegisterInfo &MRI) { 5397 return std::distance(MRI.use_instr_nodbg_begin(MI0.getOperand(0).getReg()), 5398 MRI.use_instr_nodbg_end()) > 5399 std::distance(MRI.use_instr_nodbg_begin(MI1.getOperand(0).getReg()), 5400 MRI.use_instr_nodbg_end()); 5401 } 5402 5403 bool CombinerHelper::canCombineFMadOrFMA(MachineInstr &MI, 5404 bool &AllowFusionGlobally, 5405 bool &HasFMAD, bool &Aggressive, 5406 bool CanReassociate) { 5407 5408 auto *MF = MI.getMF(); 5409 const auto &TLI = *MF->getSubtarget().getTargetLowering(); 5410 const TargetOptions &Options = MF->getTarget().Options; 5411 LLT DstType = MRI.getType(MI.getOperand(0).getReg()); 5412 5413 if (CanReassociate && 5414 !(Options.UnsafeFPMath || MI.getFlag(MachineInstr::MIFlag::FmReassoc))) 5415 return false; 5416 5417 // Floating-point multiply-add with intermediate rounding. 5418 HasFMAD = (!isPreLegalize() && TLI.isFMADLegal(MI, DstType)); 5419 // Floating-point multiply-add without intermediate rounding. 5420 bool HasFMA = TLI.isFMAFasterThanFMulAndFAdd(*MF, DstType) && 5421 isLegalOrBeforeLegalizer({TargetOpcode::G_FMA, {DstType}}); 5422 // No valid opcode, do not combine. 5423 if (!HasFMAD && !HasFMA) 5424 return false; 5425 5426 AllowFusionGlobally = Options.AllowFPOpFusion == FPOpFusion::Fast || 5427 Options.UnsafeFPMath || HasFMAD; 5428 // If the addition is not contractable, do not combine. 5429 if (!AllowFusionGlobally && !MI.getFlag(MachineInstr::MIFlag::FmContract)) 5430 return false; 5431 5432 Aggressive = TLI.enableAggressiveFMAFusion(DstType); 5433 return true; 5434 } 5435 5436 bool CombinerHelper::matchCombineFAddFMulToFMadOrFMA( 5437 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 5438 assert(MI.getOpcode() == TargetOpcode::G_FADD); 5439 5440 bool AllowFusionGlobally, HasFMAD, Aggressive; 5441 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) 5442 return false; 5443 5444 Register Op1 = MI.getOperand(1).getReg(); 5445 Register Op2 = MI.getOperand(2).getReg(); 5446 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1}; 5447 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2}; 5448 unsigned PreferredFusedOpcode = 5449 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; 5450 5451 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)), 5452 // prefer to fold the multiply with fewer uses. 5453 if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) && 5454 isContractableFMul(*RHS.MI, AllowFusionGlobally)) { 5455 if (hasMoreUses(*LHS.MI, *RHS.MI, MRI)) 5456 std::swap(LHS, RHS); 5457 } 5458 5459 // fold (fadd (fmul x, y), z) -> (fma x, y, z) 5460 if (isContractableFMul(*LHS.MI, AllowFusionGlobally) && 5461 (Aggressive || MRI.hasOneNonDBGUse(LHS.Reg))) { 5462 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5463 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5464 {LHS.MI->getOperand(1).getReg(), 5465 LHS.MI->getOperand(2).getReg(), RHS.Reg}); 5466 }; 5467 return true; 5468 } 5469 5470 // fold (fadd x, (fmul y, z)) -> (fma y, z, x) 5471 if (isContractableFMul(*RHS.MI, AllowFusionGlobally) && 5472 (Aggressive || MRI.hasOneNonDBGUse(RHS.Reg))) { 5473 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5474 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5475 {RHS.MI->getOperand(1).getReg(), 5476 RHS.MI->getOperand(2).getReg(), LHS.Reg}); 5477 }; 5478 return true; 5479 } 5480 5481 return false; 5482 } 5483 5484 bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMA( 5485 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 5486 assert(MI.getOpcode() == TargetOpcode::G_FADD); 5487 5488 bool AllowFusionGlobally, HasFMAD, Aggressive; 5489 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) 5490 return false; 5491 5492 const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering(); 5493 Register Op1 = MI.getOperand(1).getReg(); 5494 Register Op2 = MI.getOperand(2).getReg(); 5495 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1}; 5496 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2}; 5497 LLT DstType = MRI.getType(MI.getOperand(0).getReg()); 5498 5499 unsigned PreferredFusedOpcode = 5500 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; 5501 5502 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)), 5503 // prefer to fold the multiply with fewer uses. 5504 if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) && 5505 isContractableFMul(*RHS.MI, AllowFusionGlobally)) { 5506 if (hasMoreUses(*LHS.MI, *RHS.MI, MRI)) 5507 std::swap(LHS, RHS); 5508 } 5509 5510 // fold (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z) 5511 MachineInstr *FpExtSrc; 5512 if (mi_match(LHS.Reg, MRI, m_GFPExt(m_MInstr(FpExtSrc))) && 5513 isContractableFMul(*FpExtSrc, AllowFusionGlobally) && 5514 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, 5515 MRI.getType(FpExtSrc->getOperand(1).getReg()))) { 5516 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5517 auto FpExtX = B.buildFPExt(DstType, FpExtSrc->getOperand(1).getReg()); 5518 auto FpExtY = B.buildFPExt(DstType, FpExtSrc->getOperand(2).getReg()); 5519 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5520 {FpExtX.getReg(0), FpExtY.getReg(0), RHS.Reg}); 5521 }; 5522 return true; 5523 } 5524 5525 // fold (fadd z, (fpext (fmul x, y))) -> (fma (fpext x), (fpext y), z) 5526 // Note: Commutes FADD operands. 5527 if (mi_match(RHS.Reg, MRI, m_GFPExt(m_MInstr(FpExtSrc))) && 5528 isContractableFMul(*FpExtSrc, AllowFusionGlobally) && 5529 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, 5530 MRI.getType(FpExtSrc->getOperand(1).getReg()))) { 5531 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5532 auto FpExtX = B.buildFPExt(DstType, FpExtSrc->getOperand(1).getReg()); 5533 auto FpExtY = B.buildFPExt(DstType, FpExtSrc->getOperand(2).getReg()); 5534 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5535 {FpExtX.getReg(0), FpExtY.getReg(0), LHS.Reg}); 5536 }; 5537 return true; 5538 } 5539 5540 return false; 5541 } 5542 5543 bool CombinerHelper::matchCombineFAddFMAFMulToFMadOrFMA( 5544 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 5545 assert(MI.getOpcode() == TargetOpcode::G_FADD); 5546 5547 bool AllowFusionGlobally, HasFMAD, Aggressive; 5548 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive, true)) 5549 return false; 5550 5551 Register Op1 = MI.getOperand(1).getReg(); 5552 Register Op2 = MI.getOperand(2).getReg(); 5553 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1}; 5554 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2}; 5555 LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); 5556 5557 unsigned PreferredFusedOpcode = 5558 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; 5559 5560 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)), 5561 // prefer to fold the multiply with fewer uses. 5562 if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) && 5563 isContractableFMul(*RHS.MI, AllowFusionGlobally)) { 5564 if (hasMoreUses(*LHS.MI, *RHS.MI, MRI)) 5565 std::swap(LHS, RHS); 5566 } 5567 5568 MachineInstr *FMA = nullptr; 5569 Register Z; 5570 // fold (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z)) 5571 if (LHS.MI->getOpcode() == PreferredFusedOpcode && 5572 (MRI.getVRegDef(LHS.MI->getOperand(3).getReg())->getOpcode() == 5573 TargetOpcode::G_FMUL) && 5574 MRI.hasOneNonDBGUse(LHS.MI->getOperand(0).getReg()) && 5575 MRI.hasOneNonDBGUse(LHS.MI->getOperand(3).getReg())) { 5576 FMA = LHS.MI; 5577 Z = RHS.Reg; 5578 } 5579 // fold (fadd z, (fma x, y, (fmul u, v))) -> (fma x, y, (fma u, v, z)) 5580 else if (RHS.MI->getOpcode() == PreferredFusedOpcode && 5581 (MRI.getVRegDef(RHS.MI->getOperand(3).getReg())->getOpcode() == 5582 TargetOpcode::G_FMUL) && 5583 MRI.hasOneNonDBGUse(RHS.MI->getOperand(0).getReg()) && 5584 MRI.hasOneNonDBGUse(RHS.MI->getOperand(3).getReg())) { 5585 Z = LHS.Reg; 5586 FMA = RHS.MI; 5587 } 5588 5589 if (FMA) { 5590 MachineInstr *FMulMI = MRI.getVRegDef(FMA->getOperand(3).getReg()); 5591 Register X = FMA->getOperand(1).getReg(); 5592 Register Y = FMA->getOperand(2).getReg(); 5593 Register U = FMulMI->getOperand(1).getReg(); 5594 Register V = FMulMI->getOperand(2).getReg(); 5595 5596 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5597 Register InnerFMA = MRI.createGenericVirtualRegister(DstTy); 5598 B.buildInstr(PreferredFusedOpcode, {InnerFMA}, {U, V, Z}); 5599 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5600 {X, Y, InnerFMA}); 5601 }; 5602 return true; 5603 } 5604 5605 return false; 5606 } 5607 5608 bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive( 5609 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 5610 assert(MI.getOpcode() == TargetOpcode::G_FADD); 5611 5612 bool AllowFusionGlobally, HasFMAD, Aggressive; 5613 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) 5614 return false; 5615 5616 if (!Aggressive) 5617 return false; 5618 5619 const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering(); 5620 LLT DstType = MRI.getType(MI.getOperand(0).getReg()); 5621 Register Op1 = MI.getOperand(1).getReg(); 5622 Register Op2 = MI.getOperand(2).getReg(); 5623 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1}; 5624 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2}; 5625 5626 unsigned PreferredFusedOpcode = 5627 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; 5628 5629 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)), 5630 // prefer to fold the multiply with fewer uses. 5631 if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) && 5632 isContractableFMul(*RHS.MI, AllowFusionGlobally)) { 5633 if (hasMoreUses(*LHS.MI, *RHS.MI, MRI)) 5634 std::swap(LHS, RHS); 5635 } 5636 5637 // Builds: (fma x, y, (fma (fpext u), (fpext v), z)) 5638 auto buildMatchInfo = [=, &MI](Register U, Register V, Register Z, Register X, 5639 Register Y, MachineIRBuilder &B) { 5640 Register FpExtU = B.buildFPExt(DstType, U).getReg(0); 5641 Register FpExtV = B.buildFPExt(DstType, V).getReg(0); 5642 Register InnerFMA = 5643 B.buildInstr(PreferredFusedOpcode, {DstType}, {FpExtU, FpExtV, Z}) 5644 .getReg(0); 5645 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5646 {X, Y, InnerFMA}); 5647 }; 5648 5649 MachineInstr *FMulMI, *FMAMI; 5650 // fold (fadd (fma x, y, (fpext (fmul u, v))), z) 5651 // -> (fma x, y, (fma (fpext u), (fpext v), z)) 5652 if (LHS.MI->getOpcode() == PreferredFusedOpcode && 5653 mi_match(LHS.MI->getOperand(3).getReg(), MRI, 5654 m_GFPExt(m_MInstr(FMulMI))) && 5655 isContractableFMul(*FMulMI, AllowFusionGlobally) && 5656 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, 5657 MRI.getType(FMulMI->getOperand(0).getReg()))) { 5658 MatchInfo = [=](MachineIRBuilder &B) { 5659 buildMatchInfo(FMulMI->getOperand(1).getReg(), 5660 FMulMI->getOperand(2).getReg(), RHS.Reg, 5661 LHS.MI->getOperand(1).getReg(), 5662 LHS.MI->getOperand(2).getReg(), B); 5663 }; 5664 return true; 5665 } 5666 5667 // fold (fadd (fpext (fma x, y, (fmul u, v))), z) 5668 // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z)) 5669 // FIXME: This turns two single-precision and one double-precision 5670 // operation into two double-precision operations, which might not be 5671 // interesting for all targets, especially GPUs. 5672 if (mi_match(LHS.Reg, MRI, m_GFPExt(m_MInstr(FMAMI))) && 5673 FMAMI->getOpcode() == PreferredFusedOpcode) { 5674 MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg()); 5675 if (isContractableFMul(*FMulMI, AllowFusionGlobally) && 5676 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, 5677 MRI.getType(FMAMI->getOperand(0).getReg()))) { 5678 MatchInfo = [=](MachineIRBuilder &B) { 5679 Register X = FMAMI->getOperand(1).getReg(); 5680 Register Y = FMAMI->getOperand(2).getReg(); 5681 X = B.buildFPExt(DstType, X).getReg(0); 5682 Y = B.buildFPExt(DstType, Y).getReg(0); 5683 buildMatchInfo(FMulMI->getOperand(1).getReg(), 5684 FMulMI->getOperand(2).getReg(), RHS.Reg, X, Y, B); 5685 }; 5686 5687 return true; 5688 } 5689 } 5690 5691 // fold (fadd z, (fma x, y, (fpext (fmul u, v))) 5692 // -> (fma x, y, (fma (fpext u), (fpext v), z)) 5693 if (RHS.MI->getOpcode() == PreferredFusedOpcode && 5694 mi_match(RHS.MI->getOperand(3).getReg(), MRI, 5695 m_GFPExt(m_MInstr(FMulMI))) && 5696 isContractableFMul(*FMulMI, AllowFusionGlobally) && 5697 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, 5698 MRI.getType(FMulMI->getOperand(0).getReg()))) { 5699 MatchInfo = [=](MachineIRBuilder &B) { 5700 buildMatchInfo(FMulMI->getOperand(1).getReg(), 5701 FMulMI->getOperand(2).getReg(), LHS.Reg, 5702 RHS.MI->getOperand(1).getReg(), 5703 RHS.MI->getOperand(2).getReg(), B); 5704 }; 5705 return true; 5706 } 5707 5708 // fold (fadd z, (fpext (fma x, y, (fmul u, v))) 5709 // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z)) 5710 // FIXME: This turns two single-precision and one double-precision 5711 // operation into two double-precision operations, which might not be 5712 // interesting for all targets, especially GPUs. 5713 if (mi_match(RHS.Reg, MRI, m_GFPExt(m_MInstr(FMAMI))) && 5714 FMAMI->getOpcode() == PreferredFusedOpcode) { 5715 MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg()); 5716 if (isContractableFMul(*FMulMI, AllowFusionGlobally) && 5717 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, 5718 MRI.getType(FMAMI->getOperand(0).getReg()))) { 5719 MatchInfo = [=](MachineIRBuilder &B) { 5720 Register X = FMAMI->getOperand(1).getReg(); 5721 Register Y = FMAMI->getOperand(2).getReg(); 5722 X = B.buildFPExt(DstType, X).getReg(0); 5723 Y = B.buildFPExt(DstType, Y).getReg(0); 5724 buildMatchInfo(FMulMI->getOperand(1).getReg(), 5725 FMulMI->getOperand(2).getReg(), LHS.Reg, X, Y, B); 5726 }; 5727 return true; 5728 } 5729 } 5730 5731 return false; 5732 } 5733 5734 bool CombinerHelper::matchCombineFSubFMulToFMadOrFMA( 5735 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 5736 assert(MI.getOpcode() == TargetOpcode::G_FSUB); 5737 5738 bool AllowFusionGlobally, HasFMAD, Aggressive; 5739 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) 5740 return false; 5741 5742 Register Op1 = MI.getOperand(1).getReg(); 5743 Register Op2 = MI.getOperand(2).getReg(); 5744 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1}; 5745 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2}; 5746 LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); 5747 5748 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)), 5749 // prefer to fold the multiply with fewer uses. 5750 int FirstMulHasFewerUses = true; 5751 if (isContractableFMul(*LHS.MI, AllowFusionGlobally) && 5752 isContractableFMul(*RHS.MI, AllowFusionGlobally) && 5753 hasMoreUses(*LHS.MI, *RHS.MI, MRI)) 5754 FirstMulHasFewerUses = false; 5755 5756 unsigned PreferredFusedOpcode = 5757 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; 5758 5759 // fold (fsub (fmul x, y), z) -> (fma x, y, -z) 5760 if (FirstMulHasFewerUses && 5761 (isContractableFMul(*LHS.MI, AllowFusionGlobally) && 5762 (Aggressive || MRI.hasOneNonDBGUse(LHS.Reg)))) { 5763 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5764 Register NegZ = B.buildFNeg(DstTy, RHS.Reg).getReg(0); 5765 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5766 {LHS.MI->getOperand(1).getReg(), 5767 LHS.MI->getOperand(2).getReg(), NegZ}); 5768 }; 5769 return true; 5770 } 5771 // fold (fsub x, (fmul y, z)) -> (fma -y, z, x) 5772 else if ((isContractableFMul(*RHS.MI, AllowFusionGlobally) && 5773 (Aggressive || MRI.hasOneNonDBGUse(RHS.Reg)))) { 5774 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5775 Register NegY = 5776 B.buildFNeg(DstTy, RHS.MI->getOperand(1).getReg()).getReg(0); 5777 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5778 {NegY, RHS.MI->getOperand(2).getReg(), LHS.Reg}); 5779 }; 5780 return true; 5781 } 5782 5783 return false; 5784 } 5785 5786 bool CombinerHelper::matchCombineFSubFNegFMulToFMadOrFMA( 5787 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 5788 assert(MI.getOpcode() == TargetOpcode::G_FSUB); 5789 5790 bool AllowFusionGlobally, HasFMAD, Aggressive; 5791 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) 5792 return false; 5793 5794 Register LHSReg = MI.getOperand(1).getReg(); 5795 Register RHSReg = MI.getOperand(2).getReg(); 5796 LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); 5797 5798 unsigned PreferredFusedOpcode = 5799 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; 5800 5801 MachineInstr *FMulMI; 5802 // fold (fsub (fneg (fmul x, y)), z) -> (fma (fneg x), y, (fneg z)) 5803 if (mi_match(LHSReg, MRI, m_GFNeg(m_MInstr(FMulMI))) && 5804 (Aggressive || (MRI.hasOneNonDBGUse(LHSReg) && 5805 MRI.hasOneNonDBGUse(FMulMI->getOperand(0).getReg()))) && 5806 isContractableFMul(*FMulMI, AllowFusionGlobally)) { 5807 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5808 Register NegX = 5809 B.buildFNeg(DstTy, FMulMI->getOperand(1).getReg()).getReg(0); 5810 Register NegZ = B.buildFNeg(DstTy, RHSReg).getReg(0); 5811 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5812 {NegX, FMulMI->getOperand(2).getReg(), NegZ}); 5813 }; 5814 return true; 5815 } 5816 5817 // fold (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x) 5818 if (mi_match(RHSReg, MRI, m_GFNeg(m_MInstr(FMulMI))) && 5819 (Aggressive || (MRI.hasOneNonDBGUse(RHSReg) && 5820 MRI.hasOneNonDBGUse(FMulMI->getOperand(0).getReg()))) && 5821 isContractableFMul(*FMulMI, AllowFusionGlobally)) { 5822 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5823 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5824 {FMulMI->getOperand(1).getReg(), 5825 FMulMI->getOperand(2).getReg(), LHSReg}); 5826 }; 5827 return true; 5828 } 5829 5830 return false; 5831 } 5832 5833 bool CombinerHelper::matchCombineFSubFpExtFMulToFMadOrFMA( 5834 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 5835 assert(MI.getOpcode() == TargetOpcode::G_FSUB); 5836 5837 bool AllowFusionGlobally, HasFMAD, Aggressive; 5838 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) 5839 return false; 5840 5841 Register LHSReg = MI.getOperand(1).getReg(); 5842 Register RHSReg = MI.getOperand(2).getReg(); 5843 LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); 5844 5845 unsigned PreferredFusedOpcode = 5846 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; 5847 5848 MachineInstr *FMulMI; 5849 // fold (fsub (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), (fneg z)) 5850 if (mi_match(LHSReg, MRI, m_GFPExt(m_MInstr(FMulMI))) && 5851 isContractableFMul(*FMulMI, AllowFusionGlobally) && 5852 (Aggressive || MRI.hasOneNonDBGUse(LHSReg))) { 5853 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5854 Register FpExtX = 5855 B.buildFPExt(DstTy, FMulMI->getOperand(1).getReg()).getReg(0); 5856 Register FpExtY = 5857 B.buildFPExt(DstTy, FMulMI->getOperand(2).getReg()).getReg(0); 5858 Register NegZ = B.buildFNeg(DstTy, RHSReg).getReg(0); 5859 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5860 {FpExtX, FpExtY, NegZ}); 5861 }; 5862 return true; 5863 } 5864 5865 // fold (fsub x, (fpext (fmul y, z))) -> (fma (fneg (fpext y)), (fpext z), x) 5866 if (mi_match(RHSReg, MRI, m_GFPExt(m_MInstr(FMulMI))) && 5867 isContractableFMul(*FMulMI, AllowFusionGlobally) && 5868 (Aggressive || MRI.hasOneNonDBGUse(RHSReg))) { 5869 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5870 Register FpExtY = 5871 B.buildFPExt(DstTy, FMulMI->getOperand(1).getReg()).getReg(0); 5872 Register NegY = B.buildFNeg(DstTy, FpExtY).getReg(0); 5873 Register FpExtZ = 5874 B.buildFPExt(DstTy, FMulMI->getOperand(2).getReg()).getReg(0); 5875 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5876 {NegY, FpExtZ, LHSReg}); 5877 }; 5878 return true; 5879 } 5880 5881 return false; 5882 } 5883 5884 bool CombinerHelper::matchCombineFSubFpExtFNegFMulToFMadOrFMA( 5885 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 5886 assert(MI.getOpcode() == TargetOpcode::G_FSUB); 5887 5888 bool AllowFusionGlobally, HasFMAD, Aggressive; 5889 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) 5890 return false; 5891 5892 const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering(); 5893 LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); 5894 Register LHSReg = MI.getOperand(1).getReg(); 5895 Register RHSReg = MI.getOperand(2).getReg(); 5896 5897 unsigned PreferredFusedOpcode = 5898 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; 5899 5900 auto buildMatchInfo = [=](Register Dst, Register X, Register Y, Register Z, 5901 MachineIRBuilder &B) { 5902 Register FpExtX = B.buildFPExt(DstTy, X).getReg(0); 5903 Register FpExtY = B.buildFPExt(DstTy, Y).getReg(0); 5904 B.buildInstr(PreferredFusedOpcode, {Dst}, {FpExtX, FpExtY, Z}); 5905 }; 5906 5907 MachineInstr *FMulMI; 5908 // fold (fsub (fpext (fneg (fmul x, y))), z) -> 5909 // (fneg (fma (fpext x), (fpext y), z)) 5910 // fold (fsub (fneg (fpext (fmul x, y))), z) -> 5911 // (fneg (fma (fpext x), (fpext y), z)) 5912 if ((mi_match(LHSReg, MRI, m_GFPExt(m_GFNeg(m_MInstr(FMulMI)))) || 5913 mi_match(LHSReg, MRI, m_GFNeg(m_GFPExt(m_MInstr(FMulMI))))) && 5914 isContractableFMul(*FMulMI, AllowFusionGlobally) && 5915 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstTy, 5916 MRI.getType(FMulMI->getOperand(0).getReg()))) { 5917 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5918 Register FMAReg = MRI.createGenericVirtualRegister(DstTy); 5919 buildMatchInfo(FMAReg, FMulMI->getOperand(1).getReg(), 5920 FMulMI->getOperand(2).getReg(), RHSReg, B); 5921 B.buildFNeg(MI.getOperand(0).getReg(), FMAReg); 5922 }; 5923 return true; 5924 } 5925 5926 // fold (fsub x, (fpext (fneg (fmul y, z)))) -> (fma (fpext y), (fpext z), x) 5927 // fold (fsub x, (fneg (fpext (fmul y, z)))) -> (fma (fpext y), (fpext z), x) 5928 if ((mi_match(RHSReg, MRI, m_GFPExt(m_GFNeg(m_MInstr(FMulMI)))) || 5929 mi_match(RHSReg, MRI, m_GFNeg(m_GFPExt(m_MInstr(FMulMI))))) && 5930 isContractableFMul(*FMulMI, AllowFusionGlobally) && 5931 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstTy, 5932 MRI.getType(FMulMI->getOperand(0).getReg()))) { 5933 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5934 buildMatchInfo(MI.getOperand(0).getReg(), FMulMI->getOperand(1).getReg(), 5935 FMulMI->getOperand(2).getReg(), LHSReg, B); 5936 }; 5937 return true; 5938 } 5939 5940 return false; 5941 } 5942 5943 bool CombinerHelper::matchCombineFMinMaxNaN(MachineInstr &MI, 5944 unsigned &IdxToPropagate) { 5945 bool PropagateNaN; 5946 switch (MI.getOpcode()) { 5947 default: 5948 return false; 5949 case TargetOpcode::G_FMINNUM: 5950 case TargetOpcode::G_FMAXNUM: 5951 PropagateNaN = false; 5952 break; 5953 case TargetOpcode::G_FMINIMUM: 5954 case TargetOpcode::G_FMAXIMUM: 5955 PropagateNaN = true; 5956 break; 5957 } 5958 5959 auto MatchNaN = [&](unsigned Idx) { 5960 Register MaybeNaNReg = MI.getOperand(Idx).getReg(); 5961 const ConstantFP *MaybeCst = getConstantFPVRegVal(MaybeNaNReg, MRI); 5962 if (!MaybeCst || !MaybeCst->getValueAPF().isNaN()) 5963 return false; 5964 IdxToPropagate = PropagateNaN ? Idx : (Idx == 1 ? 2 : 1); 5965 return true; 5966 }; 5967 5968 return MatchNaN(1) || MatchNaN(2); 5969 } 5970 5971 bool CombinerHelper::matchAddSubSameReg(MachineInstr &MI, Register &Src) { 5972 assert(MI.getOpcode() == TargetOpcode::G_ADD && "Expected a G_ADD"); 5973 Register LHS = MI.getOperand(1).getReg(); 5974 Register RHS = MI.getOperand(2).getReg(); 5975 5976 // Helper lambda to check for opportunities for 5977 // A + (B - A) -> B 5978 // (B - A) + A -> B 5979 auto CheckFold = [&](Register MaybeSub, Register MaybeSameReg) { 5980 Register Reg; 5981 return mi_match(MaybeSub, MRI, m_GSub(m_Reg(Src), m_Reg(Reg))) && 5982 Reg == MaybeSameReg; 5983 }; 5984 return CheckFold(LHS, RHS) || CheckFold(RHS, LHS); 5985 } 5986 5987 bool CombinerHelper::matchBuildVectorIdentityFold(MachineInstr &MI, 5988 Register &MatchInfo) { 5989 // This combine folds the following patterns: 5990 // 5991 // G_BUILD_VECTOR_TRUNC (G_BITCAST(x), G_LSHR(G_BITCAST(x), k)) 5992 // G_BUILD_VECTOR(G_TRUNC(G_BITCAST(x)), G_TRUNC(G_LSHR(G_BITCAST(x), k))) 5993 // into 5994 // x 5995 // if 5996 // k == sizeof(VecEltTy)/2 5997 // type(x) == type(dst) 5998 // 5999 // G_BUILD_VECTOR(G_TRUNC(G_BITCAST(x)), undef) 6000 // into 6001 // x 6002 // if 6003 // type(x) == type(dst) 6004 6005 LLT DstVecTy = MRI.getType(MI.getOperand(0).getReg()); 6006 LLT DstEltTy = DstVecTy.getElementType(); 6007 6008 Register Lo, Hi; 6009 6010 if (mi_match( 6011 MI, MRI, 6012 m_GBuildVector(m_GTrunc(m_GBitcast(m_Reg(Lo))), m_GImplicitDef()))) { 6013 MatchInfo = Lo; 6014 return MRI.getType(MatchInfo) == DstVecTy; 6015 } 6016 6017 std::optional<ValueAndVReg> ShiftAmount; 6018 const auto LoPattern = m_GBitcast(m_Reg(Lo)); 6019 const auto HiPattern = m_GLShr(m_GBitcast(m_Reg(Hi)), m_GCst(ShiftAmount)); 6020 if (mi_match( 6021 MI, MRI, 6022 m_any_of(m_GBuildVectorTrunc(LoPattern, HiPattern), 6023 m_GBuildVector(m_GTrunc(LoPattern), m_GTrunc(HiPattern))))) { 6024 if (Lo == Hi && ShiftAmount->Value == DstEltTy.getSizeInBits()) { 6025 MatchInfo = Lo; 6026 return MRI.getType(MatchInfo) == DstVecTy; 6027 } 6028 } 6029 6030 return false; 6031 } 6032 6033 bool CombinerHelper::matchTruncBuildVectorFold(MachineInstr &MI, 6034 Register &MatchInfo) { 6035 // Replace (G_TRUNC (G_BITCAST (G_BUILD_VECTOR x, y)) with just x 6036 // if type(x) == type(G_TRUNC) 6037 if (!mi_match(MI.getOperand(1).getReg(), MRI, 6038 m_GBitcast(m_GBuildVector(m_Reg(MatchInfo), m_Reg())))) 6039 return false; 6040 6041 return MRI.getType(MatchInfo) == MRI.getType(MI.getOperand(0).getReg()); 6042 } 6043 6044 bool CombinerHelper::matchTruncLshrBuildVectorFold(MachineInstr &MI, 6045 Register &MatchInfo) { 6046 // Replace (G_TRUNC (G_LSHR (G_BITCAST (G_BUILD_VECTOR x, y)), K)) with 6047 // y if K == size of vector element type 6048 std::optional<ValueAndVReg> ShiftAmt; 6049 if (!mi_match(MI.getOperand(1).getReg(), MRI, 6050 m_GLShr(m_GBitcast(m_GBuildVector(m_Reg(), m_Reg(MatchInfo))), 6051 m_GCst(ShiftAmt)))) 6052 return false; 6053 6054 LLT MatchTy = MRI.getType(MatchInfo); 6055 return ShiftAmt->Value.getZExtValue() == MatchTy.getSizeInBits() && 6056 MatchTy == MRI.getType(MI.getOperand(0).getReg()); 6057 } 6058 6059 unsigned CombinerHelper::getFPMinMaxOpcForSelect( 6060 CmpInst::Predicate Pred, LLT DstTy, 6061 SelectPatternNaNBehaviour VsNaNRetVal) const { 6062 assert(VsNaNRetVal != SelectPatternNaNBehaviour::NOT_APPLICABLE && 6063 "Expected a NaN behaviour?"); 6064 // Choose an opcode based off of legality or the behaviour when one of the 6065 // LHS/RHS may be NaN. 6066 switch (Pred) { 6067 default: 6068 return 0; 6069 case CmpInst::FCMP_UGT: 6070 case CmpInst::FCMP_UGE: 6071 case CmpInst::FCMP_OGT: 6072 case CmpInst::FCMP_OGE: 6073 if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_OTHER) 6074 return TargetOpcode::G_FMAXNUM; 6075 if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_NAN) 6076 return TargetOpcode::G_FMAXIMUM; 6077 if (isLegal({TargetOpcode::G_FMAXNUM, {DstTy}})) 6078 return TargetOpcode::G_FMAXNUM; 6079 if (isLegal({TargetOpcode::G_FMAXIMUM, {DstTy}})) 6080 return TargetOpcode::G_FMAXIMUM; 6081 return 0; 6082 case CmpInst::FCMP_ULT: 6083 case CmpInst::FCMP_ULE: 6084 case CmpInst::FCMP_OLT: 6085 case CmpInst::FCMP_OLE: 6086 if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_OTHER) 6087 return TargetOpcode::G_FMINNUM; 6088 if (VsNaNRetVal == SelectPatternNaNBehaviour::RETURNS_NAN) 6089 return TargetOpcode::G_FMINIMUM; 6090 if (isLegal({TargetOpcode::G_FMINNUM, {DstTy}})) 6091 return TargetOpcode::G_FMINNUM; 6092 if (!isLegal({TargetOpcode::G_FMINIMUM, {DstTy}})) 6093 return 0; 6094 return TargetOpcode::G_FMINIMUM; 6095 } 6096 } 6097 6098 CombinerHelper::SelectPatternNaNBehaviour 6099 CombinerHelper::computeRetValAgainstNaN(Register LHS, Register RHS, 6100 bool IsOrderedComparison) const { 6101 bool LHSSafe = isKnownNeverNaN(LHS, MRI); 6102 bool RHSSafe = isKnownNeverNaN(RHS, MRI); 6103 // Completely unsafe. 6104 if (!LHSSafe && !RHSSafe) 6105 return SelectPatternNaNBehaviour::NOT_APPLICABLE; 6106 if (LHSSafe && RHSSafe) 6107 return SelectPatternNaNBehaviour::RETURNS_ANY; 6108 // An ordered comparison will return false when given a NaN, so it 6109 // returns the RHS. 6110 if (IsOrderedComparison) 6111 return LHSSafe ? SelectPatternNaNBehaviour::RETURNS_NAN 6112 : SelectPatternNaNBehaviour::RETURNS_OTHER; 6113 // An unordered comparison will return true when given a NaN, so it 6114 // returns the LHS. 6115 return LHSSafe ? SelectPatternNaNBehaviour::RETURNS_OTHER 6116 : SelectPatternNaNBehaviour::RETURNS_NAN; 6117 } 6118 6119 bool CombinerHelper::matchFPSelectToMinMax(Register Dst, Register Cond, 6120 Register TrueVal, Register FalseVal, 6121 BuildFnTy &MatchInfo) { 6122 // Match: select (fcmp cond x, y) x, y 6123 // select (fcmp cond x, y) y, x 6124 // And turn it into fminnum/fmaxnum or fmin/fmax based off of the condition. 6125 LLT DstTy = MRI.getType(Dst); 6126 // Bail out early on pointers, since we'll never want to fold to a min/max. 6127 if (DstTy.isPointer()) 6128 return false; 6129 // Match a floating point compare with a less-than/greater-than predicate. 6130 // TODO: Allow multiple users of the compare if they are all selects. 6131 CmpInst::Predicate Pred; 6132 Register CmpLHS, CmpRHS; 6133 if (!mi_match(Cond, MRI, 6134 m_OneNonDBGUse( 6135 m_GFCmp(m_Pred(Pred), m_Reg(CmpLHS), m_Reg(CmpRHS)))) || 6136 CmpInst::isEquality(Pred)) 6137 return false; 6138 SelectPatternNaNBehaviour ResWithKnownNaNInfo = 6139 computeRetValAgainstNaN(CmpLHS, CmpRHS, CmpInst::isOrdered(Pred)); 6140 if (ResWithKnownNaNInfo == SelectPatternNaNBehaviour::NOT_APPLICABLE) 6141 return false; 6142 if (TrueVal == CmpRHS && FalseVal == CmpLHS) { 6143 std::swap(CmpLHS, CmpRHS); 6144 Pred = CmpInst::getSwappedPredicate(Pred); 6145 if (ResWithKnownNaNInfo == SelectPatternNaNBehaviour::RETURNS_NAN) 6146 ResWithKnownNaNInfo = SelectPatternNaNBehaviour::RETURNS_OTHER; 6147 else if (ResWithKnownNaNInfo == SelectPatternNaNBehaviour::RETURNS_OTHER) 6148 ResWithKnownNaNInfo = SelectPatternNaNBehaviour::RETURNS_NAN; 6149 } 6150 if (TrueVal != CmpLHS || FalseVal != CmpRHS) 6151 return false; 6152 // Decide what type of max/min this should be based off of the predicate. 6153 unsigned Opc = getFPMinMaxOpcForSelect(Pred, DstTy, ResWithKnownNaNInfo); 6154 if (!Opc || !isLegal({Opc, {DstTy}})) 6155 return false; 6156 // Comparisons between signed zero and zero may have different results... 6157 // unless we have fmaximum/fminimum. In that case, we know -0 < 0. 6158 if (Opc != TargetOpcode::G_FMAXIMUM && Opc != TargetOpcode::G_FMINIMUM) { 6159 // We don't know if a comparison between two 0s will give us a consistent 6160 // result. Be conservative and only proceed if at least one side is 6161 // non-zero. 6162 auto KnownNonZeroSide = getFConstantVRegValWithLookThrough(CmpLHS, MRI); 6163 if (!KnownNonZeroSide || !KnownNonZeroSide->Value.isNonZero()) { 6164 KnownNonZeroSide = getFConstantVRegValWithLookThrough(CmpRHS, MRI); 6165 if (!KnownNonZeroSide || !KnownNonZeroSide->Value.isNonZero()) 6166 return false; 6167 } 6168 } 6169 MatchInfo = [=](MachineIRBuilder &B) { 6170 B.buildInstr(Opc, {Dst}, {CmpLHS, CmpRHS}); 6171 }; 6172 return true; 6173 } 6174 6175 bool CombinerHelper::matchSimplifySelectToMinMax(MachineInstr &MI, 6176 BuildFnTy &MatchInfo) { 6177 // TODO: Handle integer cases. 6178 assert(MI.getOpcode() == TargetOpcode::G_SELECT); 6179 // Condition may be fed by a truncated compare. 6180 Register Cond = MI.getOperand(1).getReg(); 6181 Register MaybeTrunc; 6182 if (mi_match(Cond, MRI, m_OneNonDBGUse(m_GTrunc(m_Reg(MaybeTrunc))))) 6183 Cond = MaybeTrunc; 6184 Register Dst = MI.getOperand(0).getReg(); 6185 Register TrueVal = MI.getOperand(2).getReg(); 6186 Register FalseVal = MI.getOperand(3).getReg(); 6187 return matchFPSelectToMinMax(Dst, Cond, TrueVal, FalseVal, MatchInfo); 6188 } 6189 6190 bool CombinerHelper::matchRedundantBinOpInEquality(MachineInstr &MI, 6191 BuildFnTy &MatchInfo) { 6192 assert(MI.getOpcode() == TargetOpcode::G_ICMP); 6193 // (X + Y) == X --> Y == 0 6194 // (X + Y) != X --> Y != 0 6195 // (X - Y) == X --> Y == 0 6196 // (X - Y) != X --> Y != 0 6197 // (X ^ Y) == X --> Y == 0 6198 // (X ^ Y) != X --> Y != 0 6199 Register Dst = MI.getOperand(0).getReg(); 6200 CmpInst::Predicate Pred; 6201 Register X, Y, OpLHS, OpRHS; 6202 bool MatchedSub = mi_match( 6203 Dst, MRI, 6204 m_c_GICmp(m_Pred(Pred), m_Reg(X), m_GSub(m_Reg(OpLHS), m_Reg(Y)))); 6205 if (MatchedSub && X != OpLHS) 6206 return false; 6207 if (!MatchedSub) { 6208 if (!mi_match(Dst, MRI, 6209 m_c_GICmp(m_Pred(Pred), m_Reg(X), 6210 m_any_of(m_GAdd(m_Reg(OpLHS), m_Reg(OpRHS)), 6211 m_GXor(m_Reg(OpLHS), m_Reg(OpRHS)))))) 6212 return false; 6213 Y = X == OpLHS ? OpRHS : X == OpRHS ? OpLHS : Register(); 6214 } 6215 MatchInfo = [=](MachineIRBuilder &B) { 6216 auto Zero = B.buildConstant(MRI.getType(Y), 0); 6217 B.buildICmp(Pred, Dst, Y, Zero); 6218 }; 6219 return CmpInst::isEquality(Pred) && Y.isValid(); 6220 } 6221 6222 bool CombinerHelper::matchShiftsTooBig(MachineInstr &MI) { 6223 Register ShiftReg = MI.getOperand(2).getReg(); 6224 LLT ResTy = MRI.getType(MI.getOperand(0).getReg()); 6225 auto IsShiftTooBig = [&](const Constant *C) { 6226 auto *CI = dyn_cast<ConstantInt>(C); 6227 return CI && CI->uge(ResTy.getScalarSizeInBits()); 6228 }; 6229 return matchUnaryPredicate(MRI, ShiftReg, IsShiftTooBig); 6230 } 6231 6232 bool CombinerHelper::matchCommuteConstantToRHS(MachineInstr &MI) { 6233 Register LHS = MI.getOperand(1).getReg(); 6234 Register RHS = MI.getOperand(2).getReg(); 6235 auto *LHSDef = MRI.getVRegDef(LHS); 6236 if (getIConstantVRegVal(LHS, MRI).has_value()) 6237 return true; 6238 6239 // LHS may be a G_CONSTANT_FOLD_BARRIER. If so we commute 6240 // as long as we don't already have a constant on the RHS. 6241 if (LHSDef->getOpcode() != TargetOpcode::G_CONSTANT_FOLD_BARRIER) 6242 return false; 6243 return MRI.getVRegDef(RHS)->getOpcode() != 6244 TargetOpcode::G_CONSTANT_FOLD_BARRIER && 6245 !getIConstantVRegVal(RHS, MRI); 6246 } 6247 6248 bool CombinerHelper::matchCommuteFPConstantToRHS(MachineInstr &MI) { 6249 Register LHS = MI.getOperand(1).getReg(); 6250 Register RHS = MI.getOperand(2).getReg(); 6251 std::optional<FPValueAndVReg> ValAndVReg; 6252 if (!mi_match(LHS, MRI, m_GFCstOrSplat(ValAndVReg))) 6253 return false; 6254 return !mi_match(RHS, MRI, m_GFCstOrSplat(ValAndVReg)); 6255 } 6256 6257 void CombinerHelper::applyCommuteBinOpOperands(MachineInstr &MI) { 6258 Observer.changingInstr(MI); 6259 Register LHSReg = MI.getOperand(1).getReg(); 6260 Register RHSReg = MI.getOperand(2).getReg(); 6261 MI.getOperand(1).setReg(RHSReg); 6262 MI.getOperand(2).setReg(LHSReg); 6263 Observer.changedInstr(MI); 6264 } 6265 6266 bool CombinerHelper::isOneOrOneSplat(Register Src, bool AllowUndefs) { 6267 LLT SrcTy = MRI.getType(Src); 6268 if (SrcTy.isFixedVector()) 6269 return isConstantSplatVector(Src, 1, AllowUndefs); 6270 if (SrcTy.isScalar()) { 6271 if (AllowUndefs && getOpcodeDef<GImplicitDef>(Src, MRI) != nullptr) 6272 return true; 6273 auto IConstant = getIConstantVRegValWithLookThrough(Src, MRI); 6274 return IConstant && IConstant->Value == 1; 6275 } 6276 return false; // scalable vector 6277 } 6278 6279 bool CombinerHelper::isZeroOrZeroSplat(Register Src, bool AllowUndefs) { 6280 LLT SrcTy = MRI.getType(Src); 6281 if (SrcTy.isFixedVector()) 6282 return isConstantSplatVector(Src, 0, AllowUndefs); 6283 if (SrcTy.isScalar()) { 6284 if (AllowUndefs && getOpcodeDef<GImplicitDef>(Src, MRI) != nullptr) 6285 return true; 6286 auto IConstant = getIConstantVRegValWithLookThrough(Src, MRI); 6287 return IConstant && IConstant->Value == 0; 6288 } 6289 return false; // scalable vector 6290 } 6291 6292 // Ignores COPYs during conformance checks. 6293 // FIXME scalable vectors. 6294 bool CombinerHelper::isConstantSplatVector(Register Src, int64_t SplatValue, 6295 bool AllowUndefs) { 6296 GBuildVector *BuildVector = getOpcodeDef<GBuildVector>(Src, MRI); 6297 if (!BuildVector) 6298 return false; 6299 unsigned NumSources = BuildVector->getNumSources(); 6300 6301 for (unsigned I = 0; I < NumSources; ++I) { 6302 GImplicitDef *ImplicitDef = 6303 getOpcodeDef<GImplicitDef>(BuildVector->getSourceReg(I), MRI); 6304 if (ImplicitDef && AllowUndefs) 6305 continue; 6306 if (ImplicitDef && !AllowUndefs) 6307 return false; 6308 std::optional<ValueAndVReg> IConstant = 6309 getIConstantVRegValWithLookThrough(BuildVector->getSourceReg(I), MRI); 6310 if (IConstant && IConstant->Value == SplatValue) 6311 continue; 6312 return false; 6313 } 6314 return true; 6315 } 6316 6317 // Ignores COPYs during lookups. 6318 // FIXME scalable vectors 6319 std::optional<APInt> 6320 CombinerHelper::getConstantOrConstantSplatVector(Register Src) { 6321 auto IConstant = getIConstantVRegValWithLookThrough(Src, MRI); 6322 if (IConstant) 6323 return IConstant->Value; 6324 6325 GBuildVector *BuildVector = getOpcodeDef<GBuildVector>(Src, MRI); 6326 if (!BuildVector) 6327 return std::nullopt; 6328 unsigned NumSources = BuildVector->getNumSources(); 6329 6330 std::optional<APInt> Value = std::nullopt; 6331 for (unsigned I = 0; I < NumSources; ++I) { 6332 std::optional<ValueAndVReg> IConstant = 6333 getIConstantVRegValWithLookThrough(BuildVector->getSourceReg(I), MRI); 6334 if (!IConstant) 6335 return std::nullopt; 6336 if (!Value) 6337 Value = IConstant->Value; 6338 else if (*Value != IConstant->Value) 6339 return std::nullopt; 6340 } 6341 return Value; 6342 } 6343 6344 // TODO: use knownbits to determine zeros 6345 bool CombinerHelper::tryFoldSelectOfConstants(GSelect *Select, 6346 BuildFnTy &MatchInfo) { 6347 uint32_t Flags = Select->getFlags(); 6348 Register Dest = Select->getReg(0); 6349 Register Cond = Select->getCondReg(); 6350 Register True = Select->getTrueReg(); 6351 Register False = Select->getFalseReg(); 6352 LLT CondTy = MRI.getType(Select->getCondReg()); 6353 LLT TrueTy = MRI.getType(Select->getTrueReg()); 6354 6355 // We only do this combine for scalar boolean conditions. 6356 if (CondTy != LLT::scalar(1)) 6357 return false; 6358 6359 // Both are scalars. 6360 std::optional<ValueAndVReg> TrueOpt = 6361 getIConstantVRegValWithLookThrough(True, MRI); 6362 std::optional<ValueAndVReg> FalseOpt = 6363 getIConstantVRegValWithLookThrough(False, MRI); 6364 6365 if (!TrueOpt || !FalseOpt) 6366 return false; 6367 6368 APInt TrueValue = TrueOpt->Value; 6369 APInt FalseValue = FalseOpt->Value; 6370 6371 // select Cond, 1, 0 --> zext (Cond) 6372 if (TrueValue.isOne() && FalseValue.isZero()) { 6373 MatchInfo = [=](MachineIRBuilder &B) { 6374 B.setInstrAndDebugLoc(*Select); 6375 B.buildZExtOrTrunc(Dest, Cond); 6376 }; 6377 return true; 6378 } 6379 6380 // select Cond, -1, 0 --> sext (Cond) 6381 if (TrueValue.isAllOnes() && FalseValue.isZero()) { 6382 MatchInfo = [=](MachineIRBuilder &B) { 6383 B.setInstrAndDebugLoc(*Select); 6384 B.buildSExtOrTrunc(Dest, Cond); 6385 }; 6386 return true; 6387 } 6388 6389 // select Cond, 0, 1 --> zext (!Cond) 6390 if (TrueValue.isZero() && FalseValue.isOne()) { 6391 MatchInfo = [=](MachineIRBuilder &B) { 6392 B.setInstrAndDebugLoc(*Select); 6393 Register Inner = MRI.createGenericVirtualRegister(CondTy); 6394 B.buildNot(Inner, Cond); 6395 B.buildZExtOrTrunc(Dest, Inner); 6396 }; 6397 return true; 6398 } 6399 6400 // select Cond, 0, -1 --> sext (!Cond) 6401 if (TrueValue.isZero() && FalseValue.isAllOnes()) { 6402 MatchInfo = [=](MachineIRBuilder &B) { 6403 B.setInstrAndDebugLoc(*Select); 6404 Register Inner = MRI.createGenericVirtualRegister(CondTy); 6405 B.buildNot(Inner, Cond); 6406 B.buildSExtOrTrunc(Dest, Inner); 6407 }; 6408 return true; 6409 } 6410 6411 // select Cond, C1, C1-1 --> add (zext Cond), C1-1 6412 if (TrueValue - 1 == FalseValue) { 6413 MatchInfo = [=](MachineIRBuilder &B) { 6414 B.setInstrAndDebugLoc(*Select); 6415 Register Inner = MRI.createGenericVirtualRegister(TrueTy); 6416 B.buildZExtOrTrunc(Inner, Cond); 6417 B.buildAdd(Dest, Inner, False); 6418 }; 6419 return true; 6420 } 6421 6422 // select Cond, C1, C1+1 --> add (sext Cond), C1+1 6423 if (TrueValue + 1 == FalseValue) { 6424 MatchInfo = [=](MachineIRBuilder &B) { 6425 B.setInstrAndDebugLoc(*Select); 6426 Register Inner = MRI.createGenericVirtualRegister(TrueTy); 6427 B.buildSExtOrTrunc(Inner, Cond); 6428 B.buildAdd(Dest, Inner, False); 6429 }; 6430 return true; 6431 } 6432 6433 // select Cond, Pow2, 0 --> (zext Cond) << log2(Pow2) 6434 if (TrueValue.isPowerOf2() && FalseValue.isZero()) { 6435 MatchInfo = [=](MachineIRBuilder &B) { 6436 B.setInstrAndDebugLoc(*Select); 6437 Register Inner = MRI.createGenericVirtualRegister(TrueTy); 6438 B.buildZExtOrTrunc(Inner, Cond); 6439 // The shift amount must be scalar. 6440 LLT ShiftTy = TrueTy.isVector() ? TrueTy.getElementType() : TrueTy; 6441 auto ShAmtC = B.buildConstant(ShiftTy, TrueValue.exactLogBase2()); 6442 B.buildShl(Dest, Inner, ShAmtC, Flags); 6443 }; 6444 return true; 6445 } 6446 // select Cond, -1, C --> or (sext Cond), C 6447 if (TrueValue.isAllOnes()) { 6448 MatchInfo = [=](MachineIRBuilder &B) { 6449 B.setInstrAndDebugLoc(*Select); 6450 Register Inner = MRI.createGenericVirtualRegister(TrueTy); 6451 B.buildSExtOrTrunc(Inner, Cond); 6452 B.buildOr(Dest, Inner, False, Flags); 6453 }; 6454 return true; 6455 } 6456 6457 // select Cond, C, -1 --> or (sext (not Cond)), C 6458 if (FalseValue.isAllOnes()) { 6459 MatchInfo = [=](MachineIRBuilder &B) { 6460 B.setInstrAndDebugLoc(*Select); 6461 Register Not = MRI.createGenericVirtualRegister(CondTy); 6462 B.buildNot(Not, Cond); 6463 Register Inner = MRI.createGenericVirtualRegister(TrueTy); 6464 B.buildSExtOrTrunc(Inner, Not); 6465 B.buildOr(Dest, Inner, True, Flags); 6466 }; 6467 return true; 6468 } 6469 6470 return false; 6471 } 6472 6473 // TODO: use knownbits to determine zeros 6474 bool CombinerHelper::tryFoldBoolSelectToLogic(GSelect *Select, 6475 BuildFnTy &MatchInfo) { 6476 uint32_t Flags = Select->getFlags(); 6477 Register DstReg = Select->getReg(0); 6478 Register Cond = Select->getCondReg(); 6479 Register True = Select->getTrueReg(); 6480 Register False = Select->getFalseReg(); 6481 LLT CondTy = MRI.getType(Select->getCondReg()); 6482 LLT TrueTy = MRI.getType(Select->getTrueReg()); 6483 6484 // Boolean or fixed vector of booleans. 6485 if (CondTy.isScalableVector() || 6486 (CondTy.isFixedVector() && 6487 CondTy.getElementType().getScalarSizeInBits() != 1) || 6488 CondTy.getScalarSizeInBits() != 1) 6489 return false; 6490 6491 if (CondTy != TrueTy) 6492 return false; 6493 6494 // select Cond, Cond, F --> or Cond, F 6495 // select Cond, 1, F --> or Cond, F 6496 if ((Cond == True) || isOneOrOneSplat(True, /* AllowUndefs */ true)) { 6497 MatchInfo = [=](MachineIRBuilder &B) { 6498 B.setInstrAndDebugLoc(*Select); 6499 Register Ext = MRI.createGenericVirtualRegister(TrueTy); 6500 B.buildZExtOrTrunc(Ext, Cond); 6501 B.buildOr(DstReg, Ext, False, Flags); 6502 }; 6503 return true; 6504 } 6505 6506 // select Cond, T, Cond --> and Cond, T 6507 // select Cond, T, 0 --> and Cond, T 6508 if ((Cond == False) || isZeroOrZeroSplat(False, /* AllowUndefs */ true)) { 6509 MatchInfo = [=](MachineIRBuilder &B) { 6510 B.setInstrAndDebugLoc(*Select); 6511 Register Ext = MRI.createGenericVirtualRegister(TrueTy); 6512 B.buildZExtOrTrunc(Ext, Cond); 6513 B.buildAnd(DstReg, Ext, True); 6514 }; 6515 return true; 6516 } 6517 6518 // select Cond, T, 1 --> or (not Cond), T 6519 if (isOneOrOneSplat(False, /* AllowUndefs */ true)) { 6520 MatchInfo = [=](MachineIRBuilder &B) { 6521 B.setInstrAndDebugLoc(*Select); 6522 // First the not. 6523 Register Inner = MRI.createGenericVirtualRegister(CondTy); 6524 B.buildNot(Inner, Cond); 6525 // Then an ext to match the destination register. 6526 Register Ext = MRI.createGenericVirtualRegister(TrueTy); 6527 B.buildZExtOrTrunc(Ext, Inner); 6528 B.buildOr(DstReg, Ext, True, Flags); 6529 }; 6530 return true; 6531 } 6532 6533 // select Cond, 0, F --> and (not Cond), F 6534 if (isZeroOrZeroSplat(True, /* AllowUndefs */ true)) { 6535 MatchInfo = [=](MachineIRBuilder &B) { 6536 B.setInstrAndDebugLoc(*Select); 6537 // First the not. 6538 Register Inner = MRI.createGenericVirtualRegister(CondTy); 6539 B.buildNot(Inner, Cond); 6540 // Then an ext to match the destination register. 6541 Register Ext = MRI.createGenericVirtualRegister(TrueTy); 6542 B.buildZExtOrTrunc(Ext, Inner); 6543 B.buildAnd(DstReg, Ext, False); 6544 }; 6545 return true; 6546 } 6547 6548 return false; 6549 } 6550 6551 bool CombinerHelper::matchSelect(MachineInstr &MI, BuildFnTy &MatchInfo) { 6552 GSelect *Select = cast<GSelect>(&MI); 6553 6554 if (tryFoldSelectOfConstants(Select, MatchInfo)) 6555 return true; 6556 6557 if (tryFoldBoolSelectToLogic(Select, MatchInfo)) 6558 return true; 6559 6560 return false; 6561 } 6562