1 //===- llvm/Analysis/IVDescriptors.cpp - IndVar Descriptors -----*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file "describes" induction and recurrence variables. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Analysis/IVDescriptors.h" 14 #include "llvm/Analysis/DemandedBits.h" 15 #include "llvm/Analysis/LoopInfo.h" 16 #include "llvm/Analysis/ScalarEvolution.h" 17 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 18 #include "llvm/Analysis/ValueTracking.h" 19 #include "llvm/IR/Dominators.h" 20 #include "llvm/IR/Instructions.h" 21 #include "llvm/IR/Module.h" 22 #include "llvm/IR/PatternMatch.h" 23 #include "llvm/IR/ValueHandle.h" 24 #include "llvm/Support/Debug.h" 25 #include "llvm/Support/KnownBits.h" 26 27 #include <set> 28 29 using namespace llvm; 30 using namespace llvm::PatternMatch; 31 32 #define DEBUG_TYPE "iv-descriptors" 33 34 bool RecurrenceDescriptor::areAllUsesIn(Instruction *I, 35 SmallPtrSetImpl<Instruction *> &Set) { 36 for (const Use &Use : I->operands()) 37 if (!Set.count(dyn_cast<Instruction>(Use))) 38 return false; 39 return true; 40 } 41 42 bool RecurrenceDescriptor::isIntegerRecurrenceKind(RecurKind Kind) { 43 switch (Kind) { 44 default: 45 break; 46 case RecurKind::Add: 47 case RecurKind::Mul: 48 case RecurKind::Or: 49 case RecurKind::And: 50 case RecurKind::Xor: 51 case RecurKind::SMax: 52 case RecurKind::SMin: 53 case RecurKind::UMax: 54 case RecurKind::UMin: 55 case RecurKind::SelectICmp: 56 case RecurKind::SelectFCmp: 57 return true; 58 } 59 return false; 60 } 61 62 bool RecurrenceDescriptor::isFloatingPointRecurrenceKind(RecurKind Kind) { 63 return (Kind != RecurKind::None) && !isIntegerRecurrenceKind(Kind); 64 } 65 66 /// Determines if Phi may have been type-promoted. If Phi has a single user 67 /// that ANDs the Phi with a type mask, return the user. RT is updated to 68 /// account for the narrower bit width represented by the mask, and the AND 69 /// instruction is added to CI. 70 static Instruction *lookThroughAnd(PHINode *Phi, Type *&RT, 71 SmallPtrSetImpl<Instruction *> &Visited, 72 SmallPtrSetImpl<Instruction *> &CI) { 73 if (!Phi->hasOneUse()) 74 return Phi; 75 76 const APInt *M = nullptr; 77 Instruction *I, *J = cast<Instruction>(Phi->use_begin()->getUser()); 78 79 // Matches either I & 2^x-1 or 2^x-1 & I. If we find a match, we update RT 80 // with a new integer type of the corresponding bit width. 81 if (match(J, m_c_And(m_Instruction(I), m_APInt(M)))) { 82 int32_t Bits = (*M + 1).exactLogBase2(); 83 if (Bits > 0) { 84 RT = IntegerType::get(Phi->getContext(), Bits); 85 Visited.insert(Phi); 86 CI.insert(J); 87 return J; 88 } 89 } 90 return Phi; 91 } 92 93 /// Compute the minimal bit width needed to represent a reduction whose exit 94 /// instruction is given by Exit. 95 static std::pair<Type *, bool> computeRecurrenceType(Instruction *Exit, 96 DemandedBits *DB, 97 AssumptionCache *AC, 98 DominatorTree *DT) { 99 bool IsSigned = false; 100 const DataLayout &DL = Exit->getModule()->getDataLayout(); 101 uint64_t MaxBitWidth = DL.getTypeSizeInBits(Exit->getType()); 102 103 if (DB) { 104 // Use the demanded bits analysis to determine the bits that are live out 105 // of the exit instruction, rounding up to the nearest power of two. If the 106 // use of demanded bits results in a smaller bit width, we know the value 107 // must be positive (i.e., IsSigned = false), because if this were not the 108 // case, the sign bit would have been demanded. 109 auto Mask = DB->getDemandedBits(Exit); 110 MaxBitWidth = Mask.getBitWidth() - Mask.countl_zero(); 111 } 112 113 if (MaxBitWidth == DL.getTypeSizeInBits(Exit->getType()) && AC && DT) { 114 // If demanded bits wasn't able to limit the bit width, we can try to use 115 // value tracking instead. This can be the case, for example, if the value 116 // may be negative. 117 auto NumSignBits = ComputeNumSignBits(Exit, DL, 0, AC, nullptr, DT); 118 auto NumTypeBits = DL.getTypeSizeInBits(Exit->getType()); 119 MaxBitWidth = NumTypeBits - NumSignBits; 120 KnownBits Bits = computeKnownBits(Exit, DL); 121 if (!Bits.isNonNegative()) { 122 // If the value is not known to be non-negative, we set IsSigned to true, 123 // meaning that we will use sext instructions instead of zext 124 // instructions to restore the original type. 125 IsSigned = true; 126 // Make sure at at least one sign bit is included in the result, so it 127 // will get properly sign-extended. 128 ++MaxBitWidth; 129 } 130 } 131 MaxBitWidth = llvm::bit_ceil(MaxBitWidth); 132 133 return std::make_pair(Type::getIntNTy(Exit->getContext(), MaxBitWidth), 134 IsSigned); 135 } 136 137 /// Collect cast instructions that can be ignored in the vectorizer's cost 138 /// model, given a reduction exit value and the minimal type in which the 139 // reduction can be represented. Also search casts to the recurrence type 140 // to find the minimum width used by the recurrence. 141 static void collectCastInstrs(Loop *TheLoop, Instruction *Exit, 142 Type *RecurrenceType, 143 SmallPtrSetImpl<Instruction *> &Casts, 144 unsigned &MinWidthCastToRecurTy) { 145 146 SmallVector<Instruction *, 8> Worklist; 147 SmallPtrSet<Instruction *, 8> Visited; 148 Worklist.push_back(Exit); 149 MinWidthCastToRecurTy = -1U; 150 151 while (!Worklist.empty()) { 152 Instruction *Val = Worklist.pop_back_val(); 153 Visited.insert(Val); 154 if (auto *Cast = dyn_cast<CastInst>(Val)) { 155 if (Cast->getSrcTy() == RecurrenceType) { 156 // If the source type of a cast instruction is equal to the recurrence 157 // type, it will be eliminated, and should be ignored in the vectorizer 158 // cost model. 159 Casts.insert(Cast); 160 continue; 161 } 162 if (Cast->getDestTy() == RecurrenceType) { 163 // The minimum width used by the recurrence is found by checking for 164 // casts on its operands. The minimum width is used by the vectorizer 165 // when finding the widest type for in-loop reductions without any 166 // loads/stores. 167 MinWidthCastToRecurTy = std::min<unsigned>( 168 MinWidthCastToRecurTy, Cast->getSrcTy()->getScalarSizeInBits()); 169 continue; 170 } 171 } 172 // Add all operands to the work list if they are loop-varying values that 173 // we haven't yet visited. 174 for (Value *O : cast<User>(Val)->operands()) 175 if (auto *I = dyn_cast<Instruction>(O)) 176 if (TheLoop->contains(I) && !Visited.count(I)) 177 Worklist.push_back(I); 178 } 179 } 180 181 // Check if a given Phi node can be recognized as an ordered reduction for 182 // vectorizing floating point operations without unsafe math. 183 static bool checkOrderedReduction(RecurKind Kind, Instruction *ExactFPMathInst, 184 Instruction *Exit, PHINode *Phi) { 185 // Currently only FAdd and FMulAdd are supported. 186 if (Kind != RecurKind::FAdd && Kind != RecurKind::FMulAdd) 187 return false; 188 189 if (Kind == RecurKind::FAdd && Exit->getOpcode() != Instruction::FAdd) 190 return false; 191 192 if (Kind == RecurKind::FMulAdd && 193 !RecurrenceDescriptor::isFMulAddIntrinsic(Exit)) 194 return false; 195 196 // Ensure the exit instruction has only one user other than the reduction PHI 197 if (Exit != ExactFPMathInst || Exit->hasNUsesOrMore(3)) 198 return false; 199 200 // The only pattern accepted is the one in which the reduction PHI 201 // is used as one of the operands of the exit instruction 202 auto *Op0 = Exit->getOperand(0); 203 auto *Op1 = Exit->getOperand(1); 204 if (Kind == RecurKind::FAdd && Op0 != Phi && Op1 != Phi) 205 return false; 206 if (Kind == RecurKind::FMulAdd && Exit->getOperand(2) != Phi) 207 return false; 208 209 LLVM_DEBUG(dbgs() << "LV: Found an ordered reduction: Phi: " << *Phi 210 << ", ExitInst: " << *Exit << "\n"); 211 212 return true; 213 } 214 215 bool RecurrenceDescriptor::AddReductionVar( 216 PHINode *Phi, RecurKind Kind, Loop *TheLoop, FastMathFlags FuncFMF, 217 RecurrenceDescriptor &RedDes, DemandedBits *DB, AssumptionCache *AC, 218 DominatorTree *DT, ScalarEvolution *SE) { 219 if (Phi->getNumIncomingValues() != 2) 220 return false; 221 222 // Reduction variables are only found in the loop header block. 223 if (Phi->getParent() != TheLoop->getHeader()) 224 return false; 225 226 // Obtain the reduction start value from the value that comes from the loop 227 // preheader. 228 Value *RdxStart = Phi->getIncomingValueForBlock(TheLoop->getLoopPreheader()); 229 230 // ExitInstruction is the single value which is used outside the loop. 231 // We only allow for a single reduction value to be used outside the loop. 232 // This includes users of the reduction, variables (which form a cycle 233 // which ends in the phi node). 234 Instruction *ExitInstruction = nullptr; 235 236 // Variable to keep last visited store instruction. By the end of the 237 // algorithm this variable will be either empty or having intermediate 238 // reduction value stored in invariant address. 239 StoreInst *IntermediateStore = nullptr; 240 241 // Indicates that we found a reduction operation in our scan. 242 bool FoundReduxOp = false; 243 244 // We start with the PHI node and scan for all of the users of this 245 // instruction. All users must be instructions that can be used as reduction 246 // variables (such as ADD). We must have a single out-of-block user. The cycle 247 // must include the original PHI. 248 bool FoundStartPHI = false; 249 250 // To recognize min/max patterns formed by a icmp select sequence, we store 251 // the number of instruction we saw from the recognized min/max pattern, 252 // to make sure we only see exactly the two instructions. 253 unsigned NumCmpSelectPatternInst = 0; 254 InstDesc ReduxDesc(false, nullptr); 255 256 // Data used for determining if the recurrence has been type-promoted. 257 Type *RecurrenceType = Phi->getType(); 258 SmallPtrSet<Instruction *, 4> CastInsts; 259 unsigned MinWidthCastToRecurrenceType; 260 Instruction *Start = Phi; 261 bool IsSigned = false; 262 263 SmallPtrSet<Instruction *, 8> VisitedInsts; 264 SmallVector<Instruction *, 8> Worklist; 265 266 // Return early if the recurrence kind does not match the type of Phi. If the 267 // recurrence kind is arithmetic, we attempt to look through AND operations 268 // resulting from the type promotion performed by InstCombine. Vector 269 // operations are not limited to the legal integer widths, so we may be able 270 // to evaluate the reduction in the narrower width. 271 if (RecurrenceType->isFloatingPointTy()) { 272 if (!isFloatingPointRecurrenceKind(Kind)) 273 return false; 274 } else if (RecurrenceType->isIntegerTy()) { 275 if (!isIntegerRecurrenceKind(Kind)) 276 return false; 277 if (!isMinMaxRecurrenceKind(Kind)) 278 Start = lookThroughAnd(Phi, RecurrenceType, VisitedInsts, CastInsts); 279 } else { 280 // Pointer min/max may exist, but it is not supported as a reduction op. 281 return false; 282 } 283 284 Worklist.push_back(Start); 285 VisitedInsts.insert(Start); 286 287 // Start with all flags set because we will intersect this with the reduction 288 // flags from all the reduction operations. 289 FastMathFlags FMF = FastMathFlags::getFast(); 290 291 // The first instruction in the use-def chain of the Phi node that requires 292 // exact floating point operations. 293 Instruction *ExactFPMathInst = nullptr; 294 295 // A value in the reduction can be used: 296 // - By the reduction: 297 // - Reduction operation: 298 // - One use of reduction value (safe). 299 // - Multiple use of reduction value (not safe). 300 // - PHI: 301 // - All uses of the PHI must be the reduction (safe). 302 // - Otherwise, not safe. 303 // - By instructions outside of the loop (safe). 304 // * One value may have several outside users, but all outside 305 // uses must be of the same value. 306 // - By store instructions with a loop invariant address (safe with 307 // the following restrictions): 308 // * If there are several stores, all must have the same address. 309 // * Final value should be stored in that loop invariant address. 310 // - By an instruction that is not part of the reduction (not safe). 311 // This is either: 312 // * An instruction type other than PHI or the reduction operation. 313 // * A PHI in the header other than the initial PHI. 314 while (!Worklist.empty()) { 315 Instruction *Cur = Worklist.pop_back_val(); 316 317 // Store instructions are allowed iff it is the store of the reduction 318 // value to the same loop invariant memory location. 319 if (auto *SI = dyn_cast<StoreInst>(Cur)) { 320 if (!SE) { 321 LLVM_DEBUG(dbgs() << "Store instructions are not processed without " 322 << "Scalar Evolution Analysis\n"); 323 return false; 324 } 325 326 const SCEV *PtrScev = SE->getSCEV(SI->getPointerOperand()); 327 // Check it is the same address as previous stores 328 if (IntermediateStore) { 329 const SCEV *OtherScev = 330 SE->getSCEV(IntermediateStore->getPointerOperand()); 331 332 if (OtherScev != PtrScev) { 333 LLVM_DEBUG(dbgs() << "Storing reduction value to different addresses " 334 << "inside the loop: " << *SI->getPointerOperand() 335 << " and " 336 << *IntermediateStore->getPointerOperand() << '\n'); 337 return false; 338 } 339 } 340 341 // Check the pointer is loop invariant 342 if (!SE->isLoopInvariant(PtrScev, TheLoop)) { 343 LLVM_DEBUG(dbgs() << "Storing reduction value to non-uniform address " 344 << "inside the loop: " << *SI->getPointerOperand() 345 << '\n'); 346 return false; 347 } 348 349 // IntermediateStore is always the last store in the loop. 350 IntermediateStore = SI; 351 continue; 352 } 353 354 // No Users. 355 // If the instruction has no users then this is a broken chain and can't be 356 // a reduction variable. 357 if (Cur->use_empty()) 358 return false; 359 360 bool IsAPhi = isa<PHINode>(Cur); 361 362 // A header PHI use other than the original PHI. 363 if (Cur != Phi && IsAPhi && Cur->getParent() == Phi->getParent()) 364 return false; 365 366 // Reductions of instructions such as Div, and Sub is only possible if the 367 // LHS is the reduction variable. 368 if (!Cur->isCommutative() && !IsAPhi && !isa<SelectInst>(Cur) && 369 !isa<ICmpInst>(Cur) && !isa<FCmpInst>(Cur) && 370 !VisitedInsts.count(dyn_cast<Instruction>(Cur->getOperand(0)))) 371 return false; 372 373 // Any reduction instruction must be of one of the allowed kinds. We ignore 374 // the starting value (the Phi or an AND instruction if the Phi has been 375 // type-promoted). 376 if (Cur != Start) { 377 ReduxDesc = 378 isRecurrenceInstr(TheLoop, Phi, Cur, Kind, ReduxDesc, FuncFMF); 379 ExactFPMathInst = ExactFPMathInst == nullptr 380 ? ReduxDesc.getExactFPMathInst() 381 : ExactFPMathInst; 382 if (!ReduxDesc.isRecurrence()) 383 return false; 384 // FIXME: FMF is allowed on phi, but propagation is not handled correctly. 385 if (isa<FPMathOperator>(ReduxDesc.getPatternInst()) && !IsAPhi) { 386 FastMathFlags CurFMF = ReduxDesc.getPatternInst()->getFastMathFlags(); 387 if (auto *Sel = dyn_cast<SelectInst>(ReduxDesc.getPatternInst())) { 388 // Accept FMF on either fcmp or select of a min/max idiom. 389 // TODO: This is a hack to work-around the fact that FMF may not be 390 // assigned/propagated correctly. If that problem is fixed or we 391 // standardize on fmin/fmax via intrinsics, this can be removed. 392 if (auto *FCmp = dyn_cast<FCmpInst>(Sel->getCondition())) 393 CurFMF |= FCmp->getFastMathFlags(); 394 } 395 FMF &= CurFMF; 396 } 397 // Update this reduction kind if we matched a new instruction. 398 // TODO: Can we eliminate the need for a 2nd InstDesc by keeping 'Kind' 399 // state accurate while processing the worklist? 400 if (ReduxDesc.getRecKind() != RecurKind::None) 401 Kind = ReduxDesc.getRecKind(); 402 } 403 404 bool IsASelect = isa<SelectInst>(Cur); 405 406 // A conditional reduction operation must only have 2 or less uses in 407 // VisitedInsts. 408 if (IsASelect && (Kind == RecurKind::FAdd || Kind == RecurKind::FMul) && 409 hasMultipleUsesOf(Cur, VisitedInsts, 2)) 410 return false; 411 412 // A reduction operation must only have one use of the reduction value. 413 if (!IsAPhi && !IsASelect && !isMinMaxRecurrenceKind(Kind) && 414 !isSelectCmpRecurrenceKind(Kind) && 415 hasMultipleUsesOf(Cur, VisitedInsts, 1)) 416 return false; 417 418 // All inputs to a PHI node must be a reduction value. 419 if (IsAPhi && Cur != Phi && !areAllUsesIn(Cur, VisitedInsts)) 420 return false; 421 422 if ((isIntMinMaxRecurrenceKind(Kind) || Kind == RecurKind::SelectICmp) && 423 (isa<ICmpInst>(Cur) || isa<SelectInst>(Cur))) 424 ++NumCmpSelectPatternInst; 425 if ((isFPMinMaxRecurrenceKind(Kind) || Kind == RecurKind::SelectFCmp) && 426 (isa<FCmpInst>(Cur) || isa<SelectInst>(Cur))) 427 ++NumCmpSelectPatternInst; 428 429 // Check whether we found a reduction operator. 430 FoundReduxOp |= !IsAPhi && Cur != Start; 431 432 // Process users of current instruction. Push non-PHI nodes after PHI nodes 433 // onto the stack. This way we are going to have seen all inputs to PHI 434 // nodes once we get to them. 435 SmallVector<Instruction *, 8> NonPHIs; 436 SmallVector<Instruction *, 8> PHIs; 437 for (User *U : Cur->users()) { 438 Instruction *UI = cast<Instruction>(U); 439 440 // If the user is a call to llvm.fmuladd then the instruction can only be 441 // the final operand. 442 if (isFMulAddIntrinsic(UI)) 443 if (Cur == UI->getOperand(0) || Cur == UI->getOperand(1)) 444 return false; 445 446 // Check if we found the exit user. 447 BasicBlock *Parent = UI->getParent(); 448 if (!TheLoop->contains(Parent)) { 449 // If we already know this instruction is used externally, move on to 450 // the next user. 451 if (ExitInstruction == Cur) 452 continue; 453 454 // Exit if you find multiple values used outside or if the header phi 455 // node is being used. In this case the user uses the value of the 456 // previous iteration, in which case we would loose "VF-1" iterations of 457 // the reduction operation if we vectorize. 458 if (ExitInstruction != nullptr || Cur == Phi) 459 return false; 460 461 // The instruction used by an outside user must be the last instruction 462 // before we feed back to the reduction phi. Otherwise, we loose VF-1 463 // operations on the value. 464 if (!is_contained(Phi->operands(), Cur)) 465 return false; 466 467 ExitInstruction = Cur; 468 continue; 469 } 470 471 // Process instructions only once (termination). Each reduction cycle 472 // value must only be used once, except by phi nodes and min/max 473 // reductions which are represented as a cmp followed by a select. 474 InstDesc IgnoredVal(false, nullptr); 475 if (VisitedInsts.insert(UI).second) { 476 if (isa<PHINode>(UI)) { 477 PHIs.push_back(UI); 478 } else { 479 StoreInst *SI = dyn_cast<StoreInst>(UI); 480 if (SI && SI->getPointerOperand() == Cur) { 481 // Reduction variable chain can only be stored somewhere but it 482 // can't be used as an address. 483 return false; 484 } 485 NonPHIs.push_back(UI); 486 } 487 } else if (!isa<PHINode>(UI) && 488 ((!isa<FCmpInst>(UI) && !isa<ICmpInst>(UI) && 489 !isa<SelectInst>(UI)) || 490 (!isConditionalRdxPattern(Kind, UI).isRecurrence() && 491 !isSelectCmpPattern(TheLoop, Phi, UI, IgnoredVal) 492 .isRecurrence() && 493 !isMinMaxPattern(UI, Kind, IgnoredVal).isRecurrence()))) 494 return false; 495 496 // Remember that we completed the cycle. 497 if (UI == Phi) 498 FoundStartPHI = true; 499 } 500 Worklist.append(PHIs.begin(), PHIs.end()); 501 Worklist.append(NonPHIs.begin(), NonPHIs.end()); 502 } 503 504 // This means we have seen one but not the other instruction of the 505 // pattern or more than just a select and cmp. Zero implies that we saw a 506 // llvm.min/max intrinsic, which is always OK. 507 if (isMinMaxRecurrenceKind(Kind) && NumCmpSelectPatternInst != 2 && 508 NumCmpSelectPatternInst != 0) 509 return false; 510 511 if (isSelectCmpRecurrenceKind(Kind) && NumCmpSelectPatternInst != 1) 512 return false; 513 514 if (IntermediateStore) { 515 // Check that stored value goes to the phi node again. This way we make sure 516 // that the value stored in IntermediateStore is indeed the final reduction 517 // value. 518 if (!is_contained(Phi->operands(), IntermediateStore->getValueOperand())) { 519 LLVM_DEBUG(dbgs() << "Not a final reduction value stored: " 520 << *IntermediateStore << '\n'); 521 return false; 522 } 523 524 // If there is an exit instruction it's value should be stored in 525 // IntermediateStore 526 if (ExitInstruction && 527 IntermediateStore->getValueOperand() != ExitInstruction) { 528 LLVM_DEBUG(dbgs() << "Last store Instruction of reduction value does not " 529 "store last calculated value of the reduction: " 530 << *IntermediateStore << '\n'); 531 return false; 532 } 533 534 // If all uses are inside the loop (intermediate stores), then the 535 // reduction value after the loop will be the one used in the last store. 536 if (!ExitInstruction) 537 ExitInstruction = cast<Instruction>(IntermediateStore->getValueOperand()); 538 } 539 540 if (!FoundStartPHI || !FoundReduxOp || !ExitInstruction) 541 return false; 542 543 const bool IsOrdered = 544 checkOrderedReduction(Kind, ExactFPMathInst, ExitInstruction, Phi); 545 546 if (Start != Phi) { 547 // If the starting value is not the same as the phi node, we speculatively 548 // looked through an 'and' instruction when evaluating a potential 549 // arithmetic reduction to determine if it may have been type-promoted. 550 // 551 // We now compute the minimal bit width that is required to represent the 552 // reduction. If this is the same width that was indicated by the 'and', we 553 // can represent the reduction in the smaller type. The 'and' instruction 554 // will be eliminated since it will essentially be a cast instruction that 555 // can be ignore in the cost model. If we compute a different type than we 556 // did when evaluating the 'and', the 'and' will not be eliminated, and we 557 // will end up with different kinds of operations in the recurrence 558 // expression (e.g., IntegerAND, IntegerADD). We give up if this is 559 // the case. 560 // 561 // The vectorizer relies on InstCombine to perform the actual 562 // type-shrinking. It does this by inserting instructions to truncate the 563 // exit value of the reduction to the width indicated by RecurrenceType and 564 // then extend this value back to the original width. If IsSigned is false, 565 // a 'zext' instruction will be generated; otherwise, a 'sext' will be 566 // used. 567 // 568 // TODO: We should not rely on InstCombine to rewrite the reduction in the 569 // smaller type. We should just generate a correctly typed expression 570 // to begin with. 571 Type *ComputedType; 572 std::tie(ComputedType, IsSigned) = 573 computeRecurrenceType(ExitInstruction, DB, AC, DT); 574 if (ComputedType != RecurrenceType) 575 return false; 576 } 577 578 // Collect cast instructions and the minimum width used by the recurrence. 579 // If the starting value is not the same as the phi node and the computed 580 // recurrence type is equal to the recurrence type, the recurrence expression 581 // will be represented in a narrower or wider type. If there are any cast 582 // instructions that will be unnecessary, collect them in CastsFromRecurTy. 583 // Note that the 'and' instruction was already included in this list. 584 // 585 // TODO: A better way to represent this may be to tag in some way all the 586 // instructions that are a part of the reduction. The vectorizer cost 587 // model could then apply the recurrence type to these instructions, 588 // without needing a white list of instructions to ignore. 589 // This may also be useful for the inloop reductions, if it can be 590 // kept simple enough. 591 collectCastInstrs(TheLoop, ExitInstruction, RecurrenceType, CastInsts, 592 MinWidthCastToRecurrenceType); 593 594 // We found a reduction var if we have reached the original phi node and we 595 // only have a single instruction with out-of-loop users. 596 597 // The ExitInstruction(Instruction which is allowed to have out-of-loop users) 598 // is saved as part of the RecurrenceDescriptor. 599 600 // Save the description of this reduction variable. 601 RecurrenceDescriptor RD(RdxStart, ExitInstruction, IntermediateStore, Kind, 602 FMF, ExactFPMathInst, RecurrenceType, IsSigned, 603 IsOrdered, CastInsts, MinWidthCastToRecurrenceType); 604 RedDes = RD; 605 606 return true; 607 } 608 609 // We are looking for loops that do something like this: 610 // int r = 0; 611 // for (int i = 0; i < n; i++) { 612 // if (src[i] > 3) 613 // r = 3; 614 // } 615 // where the reduction value (r) only has two states, in this example 0 or 3. 616 // The generated LLVM IR for this type of loop will be like this: 617 // for.body: 618 // %r = phi i32 [ %spec.select, %for.body ], [ 0, %entry ] 619 // ... 620 // %cmp = icmp sgt i32 %5, 3 621 // %spec.select = select i1 %cmp, i32 3, i32 %r 622 // ... 623 // In general we can support vectorization of loops where 'r' flips between 624 // any two non-constants, provided they are loop invariant. The only thing 625 // we actually care about at the end of the loop is whether or not any lane 626 // in the selected vector is different from the start value. The final 627 // across-vector reduction after the loop simply involves choosing the start 628 // value if nothing changed (0 in the example above) or the other selected 629 // value (3 in the example above). 630 RecurrenceDescriptor::InstDesc 631 RecurrenceDescriptor::isSelectCmpPattern(Loop *Loop, PHINode *OrigPhi, 632 Instruction *I, InstDesc &Prev) { 633 // We must handle the select(cmp(),x,y) as a single instruction. Advance to 634 // the select. 635 CmpInst::Predicate Pred; 636 if (match(I, m_OneUse(m_Cmp(Pred, m_Value(), m_Value())))) { 637 if (auto *Select = dyn_cast<SelectInst>(*I->user_begin())) 638 return InstDesc(Select, Prev.getRecKind()); 639 } 640 641 // Only match select with single use cmp condition. 642 if (!match(I, m_Select(m_OneUse(m_Cmp(Pred, m_Value(), m_Value())), m_Value(), 643 m_Value()))) 644 return InstDesc(false, I); 645 646 SelectInst *SI = cast<SelectInst>(I); 647 Value *NonPhi = nullptr; 648 649 if (OrigPhi == dyn_cast<PHINode>(SI->getTrueValue())) 650 NonPhi = SI->getFalseValue(); 651 else if (OrigPhi == dyn_cast<PHINode>(SI->getFalseValue())) 652 NonPhi = SI->getTrueValue(); 653 else 654 return InstDesc(false, I); 655 656 // We are looking for selects of the form: 657 // select(cmp(), phi, loop_invariant) or 658 // select(cmp(), loop_invariant, phi) 659 if (!Loop->isLoopInvariant(NonPhi)) 660 return InstDesc(false, I); 661 662 return InstDesc(I, isa<ICmpInst>(I->getOperand(0)) ? RecurKind::SelectICmp 663 : RecurKind::SelectFCmp); 664 } 665 666 RecurrenceDescriptor::InstDesc 667 RecurrenceDescriptor::isMinMaxPattern(Instruction *I, RecurKind Kind, 668 const InstDesc &Prev) { 669 assert((isa<CmpInst>(I) || isa<SelectInst>(I) || isa<CallInst>(I)) && 670 "Expected a cmp or select or call instruction"); 671 if (!isMinMaxRecurrenceKind(Kind)) 672 return InstDesc(false, I); 673 674 // We must handle the select(cmp()) as a single instruction. Advance to the 675 // select. 676 CmpInst::Predicate Pred; 677 if (match(I, m_OneUse(m_Cmp(Pred, m_Value(), m_Value())))) { 678 if (auto *Select = dyn_cast<SelectInst>(*I->user_begin())) 679 return InstDesc(Select, Prev.getRecKind()); 680 } 681 682 // Only match select with single use cmp condition, or a min/max intrinsic. 683 if (!isa<IntrinsicInst>(I) && 684 !match(I, m_Select(m_OneUse(m_Cmp(Pred, m_Value(), m_Value())), m_Value(), 685 m_Value()))) 686 return InstDesc(false, I); 687 688 // Look for a min/max pattern. 689 if (match(I, m_UMin(m_Value(), m_Value()))) 690 return InstDesc(Kind == RecurKind::UMin, I); 691 if (match(I, m_UMax(m_Value(), m_Value()))) 692 return InstDesc(Kind == RecurKind::UMax, I); 693 if (match(I, m_SMax(m_Value(), m_Value()))) 694 return InstDesc(Kind == RecurKind::SMax, I); 695 if (match(I, m_SMin(m_Value(), m_Value()))) 696 return InstDesc(Kind == RecurKind::SMin, I); 697 if (match(I, m_OrdFMin(m_Value(), m_Value()))) 698 return InstDesc(Kind == RecurKind::FMin, I); 699 if (match(I, m_OrdFMax(m_Value(), m_Value()))) 700 return InstDesc(Kind == RecurKind::FMax, I); 701 if (match(I, m_UnordFMin(m_Value(), m_Value()))) 702 return InstDesc(Kind == RecurKind::FMin, I); 703 if (match(I, m_UnordFMax(m_Value(), m_Value()))) 704 return InstDesc(Kind == RecurKind::FMax, I); 705 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value()))) 706 return InstDesc(Kind == RecurKind::FMin, I); 707 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value()))) 708 return InstDesc(Kind == RecurKind::FMax, I); 709 if (match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(), m_Value()))) 710 return InstDesc(Kind == RecurKind::FMinimum, I); 711 if (match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(), m_Value()))) 712 return InstDesc(Kind == RecurKind::FMaximum, I); 713 714 return InstDesc(false, I); 715 } 716 717 /// Returns true if the select instruction has users in the compare-and-add 718 /// reduction pattern below. The select instruction argument is the last one 719 /// in the sequence. 720 /// 721 /// %sum.1 = phi ... 722 /// ... 723 /// %cmp = fcmp pred %0, %CFP 724 /// %add = fadd %0, %sum.1 725 /// %sum.2 = select %cmp, %add, %sum.1 726 RecurrenceDescriptor::InstDesc 727 RecurrenceDescriptor::isConditionalRdxPattern(RecurKind Kind, Instruction *I) { 728 SelectInst *SI = dyn_cast<SelectInst>(I); 729 if (!SI) 730 return InstDesc(false, I); 731 732 CmpInst *CI = dyn_cast<CmpInst>(SI->getCondition()); 733 // Only handle single use cases for now. 734 if (!CI || !CI->hasOneUse()) 735 return InstDesc(false, I); 736 737 Value *TrueVal = SI->getTrueValue(); 738 Value *FalseVal = SI->getFalseValue(); 739 // Handle only when either of operands of select instruction is a PHI 740 // node for now. 741 if ((isa<PHINode>(*TrueVal) && isa<PHINode>(*FalseVal)) || 742 (!isa<PHINode>(*TrueVal) && !isa<PHINode>(*FalseVal))) 743 return InstDesc(false, I); 744 745 Instruction *I1 = 746 isa<PHINode>(*TrueVal) ? dyn_cast<Instruction>(FalseVal) 747 : dyn_cast<Instruction>(TrueVal); 748 if (!I1 || !I1->isBinaryOp()) 749 return InstDesc(false, I); 750 751 Value *Op1, *Op2; 752 if (!(((m_FAdd(m_Value(Op1), m_Value(Op2)).match(I1) || 753 m_FSub(m_Value(Op1), m_Value(Op2)).match(I1)) && 754 I1->isFast()) || 755 (m_FMul(m_Value(Op1), m_Value(Op2)).match(I1) && (I1->isFast())) || 756 ((m_Add(m_Value(Op1), m_Value(Op2)).match(I1) || 757 m_Sub(m_Value(Op1), m_Value(Op2)).match(I1))) || 758 (m_Mul(m_Value(Op1), m_Value(Op2)).match(I1)))) 759 return InstDesc(false, I); 760 761 Instruction *IPhi = isa<PHINode>(*Op1) ? dyn_cast<Instruction>(Op1) 762 : dyn_cast<Instruction>(Op2); 763 if (!IPhi || IPhi != FalseVal) 764 return InstDesc(false, I); 765 766 return InstDesc(true, SI); 767 } 768 769 RecurrenceDescriptor::InstDesc 770 RecurrenceDescriptor::isRecurrenceInstr(Loop *L, PHINode *OrigPhi, 771 Instruction *I, RecurKind Kind, 772 InstDesc &Prev, FastMathFlags FuncFMF) { 773 assert(Prev.getRecKind() == RecurKind::None || Prev.getRecKind() == Kind); 774 switch (I->getOpcode()) { 775 default: 776 return InstDesc(false, I); 777 case Instruction::PHI: 778 return InstDesc(I, Prev.getRecKind(), Prev.getExactFPMathInst()); 779 case Instruction::Sub: 780 case Instruction::Add: 781 return InstDesc(Kind == RecurKind::Add, I); 782 case Instruction::Mul: 783 return InstDesc(Kind == RecurKind::Mul, I); 784 case Instruction::And: 785 return InstDesc(Kind == RecurKind::And, I); 786 case Instruction::Or: 787 return InstDesc(Kind == RecurKind::Or, I); 788 case Instruction::Xor: 789 return InstDesc(Kind == RecurKind::Xor, I); 790 case Instruction::FDiv: 791 case Instruction::FMul: 792 return InstDesc(Kind == RecurKind::FMul, I, 793 I->hasAllowReassoc() ? nullptr : I); 794 case Instruction::FSub: 795 case Instruction::FAdd: 796 return InstDesc(Kind == RecurKind::FAdd, I, 797 I->hasAllowReassoc() ? nullptr : I); 798 case Instruction::Select: 799 if (Kind == RecurKind::FAdd || Kind == RecurKind::FMul || 800 Kind == RecurKind::Add || Kind == RecurKind::Mul) 801 return isConditionalRdxPattern(Kind, I); 802 [[fallthrough]]; 803 case Instruction::FCmp: 804 case Instruction::ICmp: 805 case Instruction::Call: 806 if (isSelectCmpRecurrenceKind(Kind)) 807 return isSelectCmpPattern(L, OrigPhi, I, Prev); 808 auto HasRequiredFMF = [&]() { 809 if (FuncFMF.noNaNs() && FuncFMF.noSignedZeros()) 810 return true; 811 if (isa<FPMathOperator>(I) && I->hasNoNaNs() && I->hasNoSignedZeros()) 812 return true; 813 // minimum and maximum intrinsics do not require nsz and nnan flags since 814 // NaN and signed zeroes are propagated in the intrinsic implementation. 815 return match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(), m_Value())) || 816 match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(), m_Value())); 817 }; 818 if (isIntMinMaxRecurrenceKind(Kind) || 819 (HasRequiredFMF() && isFPMinMaxRecurrenceKind(Kind))) 820 return isMinMaxPattern(I, Kind, Prev); 821 else if (isFMulAddIntrinsic(I)) 822 return InstDesc(Kind == RecurKind::FMulAdd, I, 823 I->hasAllowReassoc() ? nullptr : I); 824 return InstDesc(false, I); 825 } 826 } 827 828 bool RecurrenceDescriptor::hasMultipleUsesOf( 829 Instruction *I, SmallPtrSetImpl<Instruction *> &Insts, 830 unsigned MaxNumUses) { 831 unsigned NumUses = 0; 832 for (const Use &U : I->operands()) { 833 if (Insts.count(dyn_cast<Instruction>(U))) 834 ++NumUses; 835 if (NumUses > MaxNumUses) 836 return true; 837 } 838 839 return false; 840 } 841 842 bool RecurrenceDescriptor::isReductionPHI(PHINode *Phi, Loop *TheLoop, 843 RecurrenceDescriptor &RedDes, 844 DemandedBits *DB, AssumptionCache *AC, 845 DominatorTree *DT, 846 ScalarEvolution *SE) { 847 BasicBlock *Header = TheLoop->getHeader(); 848 Function &F = *Header->getParent(); 849 FastMathFlags FMF; 850 FMF.setNoNaNs( 851 F.getFnAttribute("no-nans-fp-math").getValueAsBool()); 852 FMF.setNoSignedZeros( 853 F.getFnAttribute("no-signed-zeros-fp-math").getValueAsBool()); 854 855 if (AddReductionVar(Phi, RecurKind::Add, TheLoop, FMF, RedDes, DB, AC, DT, 856 SE)) { 857 LLVM_DEBUG(dbgs() << "Found an ADD reduction PHI." << *Phi << "\n"); 858 return true; 859 } 860 if (AddReductionVar(Phi, RecurKind::Mul, TheLoop, FMF, RedDes, DB, AC, DT, 861 SE)) { 862 LLVM_DEBUG(dbgs() << "Found a MUL reduction PHI." << *Phi << "\n"); 863 return true; 864 } 865 if (AddReductionVar(Phi, RecurKind::Or, TheLoop, FMF, RedDes, DB, AC, DT, 866 SE)) { 867 LLVM_DEBUG(dbgs() << "Found an OR reduction PHI." << *Phi << "\n"); 868 return true; 869 } 870 if (AddReductionVar(Phi, RecurKind::And, TheLoop, FMF, RedDes, DB, AC, DT, 871 SE)) { 872 LLVM_DEBUG(dbgs() << "Found an AND reduction PHI." << *Phi << "\n"); 873 return true; 874 } 875 if (AddReductionVar(Phi, RecurKind::Xor, TheLoop, FMF, RedDes, DB, AC, DT, 876 SE)) { 877 LLVM_DEBUG(dbgs() << "Found a XOR reduction PHI." << *Phi << "\n"); 878 return true; 879 } 880 if (AddReductionVar(Phi, RecurKind::SMax, TheLoop, FMF, RedDes, DB, AC, DT, 881 SE)) { 882 LLVM_DEBUG(dbgs() << "Found a SMAX reduction PHI." << *Phi << "\n"); 883 return true; 884 } 885 if (AddReductionVar(Phi, RecurKind::SMin, TheLoop, FMF, RedDes, DB, AC, DT, 886 SE)) { 887 LLVM_DEBUG(dbgs() << "Found a SMIN reduction PHI." << *Phi << "\n"); 888 return true; 889 } 890 if (AddReductionVar(Phi, RecurKind::UMax, TheLoop, FMF, RedDes, DB, AC, DT, 891 SE)) { 892 LLVM_DEBUG(dbgs() << "Found a UMAX reduction PHI." << *Phi << "\n"); 893 return true; 894 } 895 if (AddReductionVar(Phi, RecurKind::UMin, TheLoop, FMF, RedDes, DB, AC, DT, 896 SE)) { 897 LLVM_DEBUG(dbgs() << "Found a UMIN reduction PHI." << *Phi << "\n"); 898 return true; 899 } 900 if (AddReductionVar(Phi, RecurKind::SelectICmp, TheLoop, FMF, RedDes, DB, AC, 901 DT, SE)) { 902 LLVM_DEBUG(dbgs() << "Found an integer conditional select reduction PHI." 903 << *Phi << "\n"); 904 return true; 905 } 906 if (AddReductionVar(Phi, RecurKind::FMul, TheLoop, FMF, RedDes, DB, AC, DT, 907 SE)) { 908 LLVM_DEBUG(dbgs() << "Found an FMult reduction PHI." << *Phi << "\n"); 909 return true; 910 } 911 if (AddReductionVar(Phi, RecurKind::FAdd, TheLoop, FMF, RedDes, DB, AC, DT, 912 SE)) { 913 LLVM_DEBUG(dbgs() << "Found an FAdd reduction PHI." << *Phi << "\n"); 914 return true; 915 } 916 if (AddReductionVar(Phi, RecurKind::FMax, TheLoop, FMF, RedDes, DB, AC, DT, 917 SE)) { 918 LLVM_DEBUG(dbgs() << "Found a float MAX reduction PHI." << *Phi << "\n"); 919 return true; 920 } 921 if (AddReductionVar(Phi, RecurKind::FMin, TheLoop, FMF, RedDes, DB, AC, DT, 922 SE)) { 923 LLVM_DEBUG(dbgs() << "Found a float MIN reduction PHI." << *Phi << "\n"); 924 return true; 925 } 926 if (AddReductionVar(Phi, RecurKind::SelectFCmp, TheLoop, FMF, RedDes, DB, AC, 927 DT, SE)) { 928 LLVM_DEBUG(dbgs() << "Found a float conditional select reduction PHI." 929 << " PHI." << *Phi << "\n"); 930 return true; 931 } 932 if (AddReductionVar(Phi, RecurKind::FMulAdd, TheLoop, FMF, RedDes, DB, AC, DT, 933 SE)) { 934 LLVM_DEBUG(dbgs() << "Found an FMulAdd reduction PHI." << *Phi << "\n"); 935 return true; 936 } 937 if (AddReductionVar(Phi, RecurKind::FMaximum, TheLoop, FMF, RedDes, DB, AC, DT, 938 SE)) { 939 LLVM_DEBUG(dbgs() << "Found a float MAXIMUM reduction PHI." << *Phi << "\n"); 940 return true; 941 } 942 if (AddReductionVar(Phi, RecurKind::FMinimum, TheLoop, FMF, RedDes, DB, AC, DT, 943 SE)) { 944 LLVM_DEBUG(dbgs() << "Found a float MINIMUM reduction PHI." << *Phi << "\n"); 945 return true; 946 } 947 // Not a reduction of known type. 948 return false; 949 } 950 951 bool RecurrenceDescriptor::isFixedOrderRecurrence(PHINode *Phi, Loop *TheLoop, 952 DominatorTree *DT) { 953 954 // Ensure the phi node is in the loop header and has two incoming values. 955 if (Phi->getParent() != TheLoop->getHeader() || 956 Phi->getNumIncomingValues() != 2) 957 return false; 958 959 // Ensure the loop has a preheader and a single latch block. The loop 960 // vectorizer will need the latch to set up the next iteration of the loop. 961 auto *Preheader = TheLoop->getLoopPreheader(); 962 auto *Latch = TheLoop->getLoopLatch(); 963 if (!Preheader || !Latch) 964 return false; 965 966 // Ensure the phi node's incoming blocks are the loop preheader and latch. 967 if (Phi->getBasicBlockIndex(Preheader) < 0 || 968 Phi->getBasicBlockIndex(Latch) < 0) 969 return false; 970 971 // Get the previous value. The previous value comes from the latch edge while 972 // the initial value comes from the preheader edge. 973 auto *Previous = dyn_cast<Instruction>(Phi->getIncomingValueForBlock(Latch)); 974 975 // If Previous is a phi in the header, go through incoming values from the 976 // latch until we find a non-phi value. Use this as the new Previous, all uses 977 // in the header will be dominated by the original phi, but need to be moved 978 // after the non-phi previous value. 979 SmallPtrSet<PHINode *, 4> SeenPhis; 980 while (auto *PrevPhi = dyn_cast_or_null<PHINode>(Previous)) { 981 if (PrevPhi->getParent() != Phi->getParent()) 982 return false; 983 if (!SeenPhis.insert(PrevPhi).second) 984 return false; 985 Previous = dyn_cast<Instruction>(PrevPhi->getIncomingValueForBlock(Latch)); 986 } 987 988 if (!Previous || !TheLoop->contains(Previous) || isa<PHINode>(Previous)) 989 return false; 990 991 // Ensure every user of the phi node (recursively) is dominated by the 992 // previous value. The dominance requirement ensures the loop vectorizer will 993 // not need to vectorize the initial value prior to the first iteration of the 994 // loop. 995 // TODO: Consider extending this sinking to handle memory instructions. 996 997 SmallPtrSet<Value *, 8> Seen; 998 BasicBlock *PhiBB = Phi->getParent(); 999 SmallVector<Instruction *, 8> WorkList; 1000 auto TryToPushSinkCandidate = [&](Instruction *SinkCandidate) { 1001 // Cyclic dependence. 1002 if (Previous == SinkCandidate) 1003 return false; 1004 1005 if (!Seen.insert(SinkCandidate).second) 1006 return true; 1007 if (DT->dominates(Previous, 1008 SinkCandidate)) // We already are good w/o sinking. 1009 return true; 1010 1011 if (SinkCandidate->getParent() != PhiBB || 1012 SinkCandidate->mayHaveSideEffects() || 1013 SinkCandidate->mayReadFromMemory() || SinkCandidate->isTerminator()) 1014 return false; 1015 1016 // If we reach a PHI node that is not dominated by Previous, we reached a 1017 // header PHI. No need for sinking. 1018 if (isa<PHINode>(SinkCandidate)) 1019 return true; 1020 1021 // Sink User tentatively and check its users 1022 WorkList.push_back(SinkCandidate); 1023 return true; 1024 }; 1025 1026 WorkList.push_back(Phi); 1027 // Try to recursively sink instructions and their users after Previous. 1028 while (!WorkList.empty()) { 1029 Instruction *Current = WorkList.pop_back_val(); 1030 for (User *User : Current->users()) { 1031 if (!TryToPushSinkCandidate(cast<Instruction>(User))) 1032 return false; 1033 } 1034 } 1035 1036 return true; 1037 } 1038 1039 /// This function returns the identity element (or neutral element) for 1040 /// the operation K. 1041 Value *RecurrenceDescriptor::getRecurrenceIdentity(RecurKind K, Type *Tp, 1042 FastMathFlags FMF) const { 1043 switch (K) { 1044 case RecurKind::Xor: 1045 case RecurKind::Add: 1046 case RecurKind::Or: 1047 // Adding, Xoring, Oring zero to a number does not change it. 1048 return ConstantInt::get(Tp, 0); 1049 case RecurKind::Mul: 1050 // Multiplying a number by 1 does not change it. 1051 return ConstantInt::get(Tp, 1); 1052 case RecurKind::And: 1053 // AND-ing a number with an all-1 value does not change it. 1054 return ConstantInt::get(Tp, -1, true); 1055 case RecurKind::FMul: 1056 // Multiplying a number by 1 does not change it. 1057 return ConstantFP::get(Tp, 1.0L); 1058 case RecurKind::FMulAdd: 1059 case RecurKind::FAdd: 1060 // Adding zero to a number does not change it. 1061 // FIXME: Ideally we should not need to check FMF for FAdd and should always 1062 // use -0.0. However, this will currently result in mixed vectors of 0.0/-0.0. 1063 // Instead, we should ensure that 1) the FMF from FAdd are propagated to the PHI 1064 // nodes where possible, and 2) PHIs with the nsz flag + -0.0 use 0.0. This would 1065 // mean we can then remove the check for noSignedZeros() below (see D98963). 1066 if (FMF.noSignedZeros()) 1067 return ConstantFP::get(Tp, 0.0L); 1068 return ConstantFP::get(Tp, -0.0L); 1069 case RecurKind::UMin: 1070 return ConstantInt::get(Tp, -1, true); 1071 case RecurKind::UMax: 1072 return ConstantInt::get(Tp, 0); 1073 case RecurKind::SMin: 1074 return ConstantInt::get(Tp, 1075 APInt::getSignedMaxValue(Tp->getIntegerBitWidth())); 1076 case RecurKind::SMax: 1077 return ConstantInt::get(Tp, 1078 APInt::getSignedMinValue(Tp->getIntegerBitWidth())); 1079 case RecurKind::FMin: 1080 assert((FMF.noNaNs() && FMF.noSignedZeros()) && 1081 "nnan, nsz is expected to be set for FP min reduction."); 1082 return ConstantFP::getInfinity(Tp, false /*Negative*/); 1083 case RecurKind::FMax: 1084 assert((FMF.noNaNs() && FMF.noSignedZeros()) && 1085 "nnan, nsz is expected to be set for FP max reduction."); 1086 return ConstantFP::getInfinity(Tp, true /*Negative*/); 1087 case RecurKind::FMinimum: 1088 return ConstantFP::getInfinity(Tp, false /*Negative*/); 1089 case RecurKind::FMaximum: 1090 return ConstantFP::getInfinity(Tp, true /*Negative*/); 1091 case RecurKind::SelectICmp: 1092 case RecurKind::SelectFCmp: 1093 return getRecurrenceStartValue(); 1094 break; 1095 default: 1096 llvm_unreachable("Unknown recurrence kind"); 1097 } 1098 } 1099 1100 unsigned RecurrenceDescriptor::getOpcode(RecurKind Kind) { 1101 switch (Kind) { 1102 case RecurKind::Add: 1103 return Instruction::Add; 1104 case RecurKind::Mul: 1105 return Instruction::Mul; 1106 case RecurKind::Or: 1107 return Instruction::Or; 1108 case RecurKind::And: 1109 return Instruction::And; 1110 case RecurKind::Xor: 1111 return Instruction::Xor; 1112 case RecurKind::FMul: 1113 return Instruction::FMul; 1114 case RecurKind::FMulAdd: 1115 case RecurKind::FAdd: 1116 return Instruction::FAdd; 1117 case RecurKind::SMax: 1118 case RecurKind::SMin: 1119 case RecurKind::UMax: 1120 case RecurKind::UMin: 1121 case RecurKind::SelectICmp: 1122 return Instruction::ICmp; 1123 case RecurKind::FMax: 1124 case RecurKind::FMin: 1125 case RecurKind::FMaximum: 1126 case RecurKind::FMinimum: 1127 case RecurKind::SelectFCmp: 1128 return Instruction::FCmp; 1129 default: 1130 llvm_unreachable("Unknown recurrence operation"); 1131 } 1132 } 1133 1134 SmallVector<Instruction *, 4> 1135 RecurrenceDescriptor::getReductionOpChain(PHINode *Phi, Loop *L) const { 1136 SmallVector<Instruction *, 4> ReductionOperations; 1137 unsigned RedOp = getOpcode(Kind); 1138 1139 // Search down from the Phi to the LoopExitInstr, looking for instructions 1140 // with a single user of the correct type for the reduction. 1141 1142 // Note that we check that the type of the operand is correct for each item in 1143 // the chain, including the last (the loop exit value). This can come up from 1144 // sub, which would otherwise be treated as an add reduction. MinMax also need 1145 // to check for a pair of icmp/select, for which we use getNextInstruction and 1146 // isCorrectOpcode functions to step the right number of instruction, and 1147 // check the icmp/select pair. 1148 // FIXME: We also do not attempt to look through Select's yet, which might 1149 // be part of the reduction chain, or attempt to looks through And's to find a 1150 // smaller bitwidth. Subs are also currently not allowed (which are usually 1151 // treated as part of a add reduction) as they are expected to generally be 1152 // more expensive than out-of-loop reductions, and need to be costed more 1153 // carefully. 1154 unsigned ExpectedUses = 1; 1155 if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp) 1156 ExpectedUses = 2; 1157 1158 auto getNextInstruction = [&](Instruction *Cur) -> Instruction * { 1159 for (auto *User : Cur->users()) { 1160 Instruction *UI = cast<Instruction>(User); 1161 if (isa<PHINode>(UI)) 1162 continue; 1163 if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp) { 1164 // We are expecting a icmp/select pair, which we go to the next select 1165 // instruction if we can. We already know that Cur has 2 uses. 1166 if (isa<SelectInst>(UI)) 1167 return UI; 1168 continue; 1169 } 1170 return UI; 1171 } 1172 return nullptr; 1173 }; 1174 auto isCorrectOpcode = [&](Instruction *Cur) { 1175 if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp) { 1176 Value *LHS, *RHS; 1177 return SelectPatternResult::isMinOrMax( 1178 matchSelectPattern(Cur, LHS, RHS).Flavor); 1179 } 1180 // Recognize a call to the llvm.fmuladd intrinsic. 1181 if (isFMulAddIntrinsic(Cur)) 1182 return true; 1183 1184 return Cur->getOpcode() == RedOp; 1185 }; 1186 1187 // Attempt to look through Phis which are part of the reduction chain 1188 unsigned ExtraPhiUses = 0; 1189 Instruction *RdxInstr = LoopExitInstr; 1190 if (auto ExitPhi = dyn_cast<PHINode>(LoopExitInstr)) { 1191 if (ExitPhi->getNumIncomingValues() != 2) 1192 return {}; 1193 1194 Instruction *Inc0 = dyn_cast<Instruction>(ExitPhi->getIncomingValue(0)); 1195 Instruction *Inc1 = dyn_cast<Instruction>(ExitPhi->getIncomingValue(1)); 1196 1197 Instruction *Chain = nullptr; 1198 if (Inc0 == Phi) 1199 Chain = Inc1; 1200 else if (Inc1 == Phi) 1201 Chain = Inc0; 1202 else 1203 return {}; 1204 1205 RdxInstr = Chain; 1206 ExtraPhiUses = 1; 1207 } 1208 1209 // The loop exit instruction we check first (as a quick test) but add last. We 1210 // check the opcode is correct (and dont allow them to be Subs) and that they 1211 // have expected to have the expected number of uses. They will have one use 1212 // from the phi and one from a LCSSA value, no matter the type. 1213 if (!isCorrectOpcode(RdxInstr) || !LoopExitInstr->hasNUses(2)) 1214 return {}; 1215 1216 // Check that the Phi has one (or two for min/max) uses, plus an extra use 1217 // for conditional reductions. 1218 if (!Phi->hasNUses(ExpectedUses + ExtraPhiUses)) 1219 return {}; 1220 1221 Instruction *Cur = getNextInstruction(Phi); 1222 1223 // Each other instruction in the chain should have the expected number of uses 1224 // and be the correct opcode. 1225 while (Cur != RdxInstr) { 1226 if (!Cur || !isCorrectOpcode(Cur) || !Cur->hasNUses(ExpectedUses)) 1227 return {}; 1228 1229 ReductionOperations.push_back(Cur); 1230 Cur = getNextInstruction(Cur); 1231 } 1232 1233 ReductionOperations.push_back(Cur); 1234 return ReductionOperations; 1235 } 1236 1237 InductionDescriptor::InductionDescriptor(Value *Start, InductionKind K, 1238 const SCEV *Step, BinaryOperator *BOp, 1239 SmallVectorImpl<Instruction *> *Casts) 1240 : StartValue(Start), IK(K), Step(Step), InductionBinOp(BOp) { 1241 assert(IK != IK_NoInduction && "Not an induction"); 1242 1243 // Start value type should match the induction kind and the value 1244 // itself should not be null. 1245 assert(StartValue && "StartValue is null"); 1246 assert((IK != IK_PtrInduction || StartValue->getType()->isPointerTy()) && 1247 "StartValue is not a pointer for pointer induction"); 1248 assert((IK != IK_IntInduction || StartValue->getType()->isIntegerTy()) && 1249 "StartValue is not an integer for integer induction"); 1250 1251 // Check the Step Value. It should be non-zero integer value. 1252 assert((!getConstIntStepValue() || !getConstIntStepValue()->isZero()) && 1253 "Step value is zero"); 1254 1255 assert((IK == IK_FpInduction || Step->getType()->isIntegerTy()) && 1256 "StepValue is not an integer"); 1257 1258 assert((IK != IK_FpInduction || Step->getType()->isFloatingPointTy()) && 1259 "StepValue is not FP for FpInduction"); 1260 assert((IK != IK_FpInduction || 1261 (InductionBinOp && 1262 (InductionBinOp->getOpcode() == Instruction::FAdd || 1263 InductionBinOp->getOpcode() == Instruction::FSub))) && 1264 "Binary opcode should be specified for FP induction"); 1265 1266 if (Casts) { 1267 for (auto &Inst : *Casts) { 1268 RedundantCasts.push_back(Inst); 1269 } 1270 } 1271 } 1272 1273 ConstantInt *InductionDescriptor::getConstIntStepValue() const { 1274 if (isa<SCEVConstant>(Step)) 1275 return dyn_cast<ConstantInt>(cast<SCEVConstant>(Step)->getValue()); 1276 return nullptr; 1277 } 1278 1279 bool InductionDescriptor::isFPInductionPHI(PHINode *Phi, const Loop *TheLoop, 1280 ScalarEvolution *SE, 1281 InductionDescriptor &D) { 1282 1283 // Here we only handle FP induction variables. 1284 assert(Phi->getType()->isFloatingPointTy() && "Unexpected Phi type"); 1285 1286 if (TheLoop->getHeader() != Phi->getParent()) 1287 return false; 1288 1289 // The loop may have multiple entrances or multiple exits; we can analyze 1290 // this phi if it has a unique entry value and a unique backedge value. 1291 if (Phi->getNumIncomingValues() != 2) 1292 return false; 1293 Value *BEValue = nullptr, *StartValue = nullptr; 1294 if (TheLoop->contains(Phi->getIncomingBlock(0))) { 1295 BEValue = Phi->getIncomingValue(0); 1296 StartValue = Phi->getIncomingValue(1); 1297 } else { 1298 assert(TheLoop->contains(Phi->getIncomingBlock(1)) && 1299 "Unexpected Phi node in the loop"); 1300 BEValue = Phi->getIncomingValue(1); 1301 StartValue = Phi->getIncomingValue(0); 1302 } 1303 1304 BinaryOperator *BOp = dyn_cast<BinaryOperator>(BEValue); 1305 if (!BOp) 1306 return false; 1307 1308 Value *Addend = nullptr; 1309 if (BOp->getOpcode() == Instruction::FAdd) { 1310 if (BOp->getOperand(0) == Phi) 1311 Addend = BOp->getOperand(1); 1312 else if (BOp->getOperand(1) == Phi) 1313 Addend = BOp->getOperand(0); 1314 } else if (BOp->getOpcode() == Instruction::FSub) 1315 if (BOp->getOperand(0) == Phi) 1316 Addend = BOp->getOperand(1); 1317 1318 if (!Addend) 1319 return false; 1320 1321 // The addend should be loop invariant 1322 if (auto *I = dyn_cast<Instruction>(Addend)) 1323 if (TheLoop->contains(I)) 1324 return false; 1325 1326 // FP Step has unknown SCEV 1327 const SCEV *Step = SE->getUnknown(Addend); 1328 D = InductionDescriptor(StartValue, IK_FpInduction, Step, BOp); 1329 return true; 1330 } 1331 1332 /// This function is called when we suspect that the update-chain of a phi node 1333 /// (whose symbolic SCEV expression sin \p PhiScev) contains redundant casts, 1334 /// that can be ignored. (This can happen when the PSCEV rewriter adds a runtime 1335 /// predicate P under which the SCEV expression for the phi can be the 1336 /// AddRecurrence \p AR; See createAddRecFromPHIWithCast). We want to find the 1337 /// cast instructions that are involved in the update-chain of this induction. 1338 /// A caller that adds the required runtime predicate can be free to drop these 1339 /// cast instructions, and compute the phi using \p AR (instead of some scev 1340 /// expression with casts). 1341 /// 1342 /// For example, without a predicate the scev expression can take the following 1343 /// form: 1344 /// (Ext ix (Trunc iy ( Start + i*Step ) to ix) to iy) 1345 /// 1346 /// It corresponds to the following IR sequence: 1347 /// %for.body: 1348 /// %x = phi i64 [ 0, %ph ], [ %add, %for.body ] 1349 /// %casted_phi = "ExtTrunc i64 %x" 1350 /// %add = add i64 %casted_phi, %step 1351 /// 1352 /// where %x is given in \p PN, 1353 /// PSE.getSCEV(%x) is equal to PSE.getSCEV(%casted_phi) under a predicate, 1354 /// and the IR sequence that "ExtTrunc i64 %x" represents can take one of 1355 /// several forms, for example, such as: 1356 /// ExtTrunc1: %casted_phi = and %x, 2^n-1 1357 /// or: 1358 /// ExtTrunc2: %t = shl %x, m 1359 /// %casted_phi = ashr %t, m 1360 /// 1361 /// If we are able to find such sequence, we return the instructions 1362 /// we found, namely %casted_phi and the instructions on its use-def chain up 1363 /// to the phi (not including the phi). 1364 static bool getCastsForInductionPHI(PredicatedScalarEvolution &PSE, 1365 const SCEVUnknown *PhiScev, 1366 const SCEVAddRecExpr *AR, 1367 SmallVectorImpl<Instruction *> &CastInsts) { 1368 1369 assert(CastInsts.empty() && "CastInsts is expected to be empty."); 1370 auto *PN = cast<PHINode>(PhiScev->getValue()); 1371 assert(PSE.getSCEV(PN) == AR && "Unexpected phi node SCEV expression"); 1372 const Loop *L = AR->getLoop(); 1373 1374 // Find any cast instructions that participate in the def-use chain of 1375 // PhiScev in the loop. 1376 // FORNOW/TODO: We currently expect the def-use chain to include only 1377 // two-operand instructions, where one of the operands is an invariant. 1378 // createAddRecFromPHIWithCasts() currently does not support anything more 1379 // involved than that, so we keep the search simple. This can be 1380 // extended/generalized as needed. 1381 1382 auto getDef = [&](const Value *Val) -> Value * { 1383 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Val); 1384 if (!BinOp) 1385 return nullptr; 1386 Value *Op0 = BinOp->getOperand(0); 1387 Value *Op1 = BinOp->getOperand(1); 1388 Value *Def = nullptr; 1389 if (L->isLoopInvariant(Op0)) 1390 Def = Op1; 1391 else if (L->isLoopInvariant(Op1)) 1392 Def = Op0; 1393 return Def; 1394 }; 1395 1396 // Look for the instruction that defines the induction via the 1397 // loop backedge. 1398 BasicBlock *Latch = L->getLoopLatch(); 1399 if (!Latch) 1400 return false; 1401 Value *Val = PN->getIncomingValueForBlock(Latch); 1402 if (!Val) 1403 return false; 1404 1405 // Follow the def-use chain until the induction phi is reached. 1406 // If on the way we encounter a Value that has the same SCEV Expr as the 1407 // phi node, we can consider the instructions we visit from that point 1408 // as part of the cast-sequence that can be ignored. 1409 bool InCastSequence = false; 1410 auto *Inst = dyn_cast<Instruction>(Val); 1411 while (Val != PN) { 1412 // If we encountered a phi node other than PN, or if we left the loop, 1413 // we bail out. 1414 if (!Inst || !L->contains(Inst)) { 1415 return false; 1416 } 1417 auto *AddRec = dyn_cast<SCEVAddRecExpr>(PSE.getSCEV(Val)); 1418 if (AddRec && PSE.areAddRecsEqualWithPreds(AddRec, AR)) 1419 InCastSequence = true; 1420 if (InCastSequence) { 1421 // Only the last instruction in the cast sequence is expected to have 1422 // uses outside the induction def-use chain. 1423 if (!CastInsts.empty()) 1424 if (!Inst->hasOneUse()) 1425 return false; 1426 CastInsts.push_back(Inst); 1427 } 1428 Val = getDef(Val); 1429 if (!Val) 1430 return false; 1431 Inst = dyn_cast<Instruction>(Val); 1432 } 1433 1434 return InCastSequence; 1435 } 1436 1437 bool InductionDescriptor::isInductionPHI(PHINode *Phi, const Loop *TheLoop, 1438 PredicatedScalarEvolution &PSE, 1439 InductionDescriptor &D, bool Assume) { 1440 Type *PhiTy = Phi->getType(); 1441 1442 // Handle integer and pointer inductions variables. 1443 // Now we handle also FP induction but not trying to make a 1444 // recurrent expression from the PHI node in-place. 1445 1446 if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy() && !PhiTy->isFloatTy() && 1447 !PhiTy->isDoubleTy() && !PhiTy->isHalfTy()) 1448 return false; 1449 1450 if (PhiTy->isFloatingPointTy()) 1451 return isFPInductionPHI(Phi, TheLoop, PSE.getSE(), D); 1452 1453 const SCEV *PhiScev = PSE.getSCEV(Phi); 1454 const auto *AR = dyn_cast<SCEVAddRecExpr>(PhiScev); 1455 1456 // We need this expression to be an AddRecExpr. 1457 if (Assume && !AR) 1458 AR = PSE.getAsAddRec(Phi); 1459 1460 if (!AR) { 1461 LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n"); 1462 return false; 1463 } 1464 1465 // Record any Cast instructions that participate in the induction update 1466 const auto *SymbolicPhi = dyn_cast<SCEVUnknown>(PhiScev); 1467 // If we started from an UnknownSCEV, and managed to build an addRecurrence 1468 // only after enabling Assume with PSCEV, this means we may have encountered 1469 // cast instructions that required adding a runtime check in order to 1470 // guarantee the correctness of the AddRecurrence respresentation of the 1471 // induction. 1472 if (PhiScev != AR && SymbolicPhi) { 1473 SmallVector<Instruction *, 2> Casts; 1474 if (getCastsForInductionPHI(PSE, SymbolicPhi, AR, Casts)) 1475 return isInductionPHI(Phi, TheLoop, PSE.getSE(), D, AR, &Casts); 1476 } 1477 1478 return isInductionPHI(Phi, TheLoop, PSE.getSE(), D, AR); 1479 } 1480 1481 bool InductionDescriptor::isInductionPHI( 1482 PHINode *Phi, const Loop *TheLoop, ScalarEvolution *SE, 1483 InductionDescriptor &D, const SCEV *Expr, 1484 SmallVectorImpl<Instruction *> *CastsToIgnore) { 1485 Type *PhiTy = Phi->getType(); 1486 // We only handle integer and pointer inductions variables. 1487 if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy()) 1488 return false; 1489 1490 // Check that the PHI is consecutive. 1491 const SCEV *PhiScev = Expr ? Expr : SE->getSCEV(Phi); 1492 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PhiScev); 1493 1494 if (!AR) { 1495 LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n"); 1496 return false; 1497 } 1498 1499 if (AR->getLoop() != TheLoop) { 1500 // FIXME: We should treat this as a uniform. Unfortunately, we 1501 // don't currently know how to handled uniform PHIs. 1502 LLVM_DEBUG( 1503 dbgs() << "LV: PHI is a recurrence with respect to an outer loop.\n"); 1504 return false; 1505 } 1506 1507 // This function assumes that InductionPhi is called only on Phi nodes 1508 // present inside loop headers. Check for the same, and throw an assert if 1509 // the current Phi is not present inside the loop header. 1510 assert(Phi->getParent() == AR->getLoop()->getHeader() 1511 && "Invalid Phi node, not present in loop header"); 1512 1513 Value *StartValue = 1514 Phi->getIncomingValueForBlock(AR->getLoop()->getLoopPreheader()); 1515 1516 BasicBlock *Latch = AR->getLoop()->getLoopLatch(); 1517 if (!Latch) 1518 return false; 1519 1520 const SCEV *Step = AR->getStepRecurrence(*SE); 1521 // Calculate the pointer stride and check if it is consecutive. 1522 // The stride may be a constant or a loop invariant integer value. 1523 const SCEVConstant *ConstStep = dyn_cast<SCEVConstant>(Step); 1524 if (!ConstStep && !SE->isLoopInvariant(Step, TheLoop)) 1525 return false; 1526 1527 if (PhiTy->isIntegerTy()) { 1528 BinaryOperator *BOp = 1529 dyn_cast<BinaryOperator>(Phi->getIncomingValueForBlock(Latch)); 1530 D = InductionDescriptor(StartValue, IK_IntInduction, Step, BOp, 1531 CastsToIgnore); 1532 return true; 1533 } 1534 1535 assert(PhiTy->isPointerTy() && "The PHI must be a pointer"); 1536 1537 // This allows induction variables w/non-constant steps. 1538 D = InductionDescriptor(StartValue, IK_PtrInduction, Step); 1539 return true; 1540 } 1541