1 //===- llvm/Analysis/IVDescriptors.cpp - IndVar Descriptors -----*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file "describes" induction and recurrence variables. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Analysis/IVDescriptors.h" 14 #include "llvm/Analysis/DemandedBits.h" 15 #include "llvm/Analysis/LoopInfo.h" 16 #include "llvm/Analysis/ScalarEvolution.h" 17 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 18 #include "llvm/Analysis/ValueTracking.h" 19 #include "llvm/IR/Dominators.h" 20 #include "llvm/IR/Instructions.h" 21 #include "llvm/IR/Module.h" 22 #include "llvm/IR/PatternMatch.h" 23 #include "llvm/IR/ValueHandle.h" 24 #include "llvm/Support/Debug.h" 25 #include "llvm/Support/KnownBits.h" 26 27 using namespace llvm; 28 using namespace llvm::PatternMatch; 29 30 #define DEBUG_TYPE "iv-descriptors" 31 32 bool RecurrenceDescriptor::areAllUsesIn(Instruction *I, 33 SmallPtrSetImpl<Instruction *> &Set) { 34 for (const Use &Use : I->operands()) 35 if (!Set.count(dyn_cast<Instruction>(Use))) 36 return false; 37 return true; 38 } 39 40 bool RecurrenceDescriptor::isIntegerRecurrenceKind(RecurKind Kind) { 41 switch (Kind) { 42 default: 43 break; 44 case RecurKind::Add: 45 case RecurKind::Mul: 46 case RecurKind::Or: 47 case RecurKind::And: 48 case RecurKind::Xor: 49 case RecurKind::SMax: 50 case RecurKind::SMin: 51 case RecurKind::UMax: 52 case RecurKind::UMin: 53 case RecurKind::IAnyOf: 54 case RecurKind::FAnyOf: 55 return true; 56 } 57 return false; 58 } 59 60 bool RecurrenceDescriptor::isFloatingPointRecurrenceKind(RecurKind Kind) { 61 return (Kind != RecurKind::None) && !isIntegerRecurrenceKind(Kind); 62 } 63 64 /// Determines if Phi may have been type-promoted. If Phi has a single user 65 /// that ANDs the Phi with a type mask, return the user. RT is updated to 66 /// account for the narrower bit width represented by the mask, and the AND 67 /// instruction is added to CI. 68 static Instruction *lookThroughAnd(PHINode *Phi, Type *&RT, 69 SmallPtrSetImpl<Instruction *> &Visited, 70 SmallPtrSetImpl<Instruction *> &CI) { 71 if (!Phi->hasOneUse()) 72 return Phi; 73 74 const APInt *M = nullptr; 75 Instruction *I, *J = cast<Instruction>(Phi->use_begin()->getUser()); 76 77 // Matches either I & 2^x-1 or 2^x-1 & I. If we find a match, we update RT 78 // with a new integer type of the corresponding bit width. 79 if (match(J, m_c_And(m_Instruction(I), m_APInt(M)))) { 80 int32_t Bits = (*M + 1).exactLogBase2(); 81 if (Bits > 0) { 82 RT = IntegerType::get(Phi->getContext(), Bits); 83 Visited.insert(Phi); 84 CI.insert(J); 85 return J; 86 } 87 } 88 return Phi; 89 } 90 91 /// Compute the minimal bit width needed to represent a reduction whose exit 92 /// instruction is given by Exit. 93 static std::pair<Type *, bool> computeRecurrenceType(Instruction *Exit, 94 DemandedBits *DB, 95 AssumptionCache *AC, 96 DominatorTree *DT) { 97 bool IsSigned = false; 98 const DataLayout &DL = Exit->getModule()->getDataLayout(); 99 uint64_t MaxBitWidth = DL.getTypeSizeInBits(Exit->getType()); 100 101 if (DB) { 102 // Use the demanded bits analysis to determine the bits that are live out 103 // of the exit instruction, rounding up to the nearest power of two. If the 104 // use of demanded bits results in a smaller bit width, we know the value 105 // must be positive (i.e., IsSigned = false), because if this were not the 106 // case, the sign bit would have been demanded. 107 auto Mask = DB->getDemandedBits(Exit); 108 MaxBitWidth = Mask.getBitWidth() - Mask.countl_zero(); 109 } 110 111 if (MaxBitWidth == DL.getTypeSizeInBits(Exit->getType()) && AC && DT) { 112 // If demanded bits wasn't able to limit the bit width, we can try to use 113 // value tracking instead. This can be the case, for example, if the value 114 // may be negative. 115 auto NumSignBits = ComputeNumSignBits(Exit, DL, 0, AC, nullptr, DT); 116 auto NumTypeBits = DL.getTypeSizeInBits(Exit->getType()); 117 MaxBitWidth = NumTypeBits - NumSignBits; 118 KnownBits Bits = computeKnownBits(Exit, DL); 119 if (!Bits.isNonNegative()) { 120 // If the value is not known to be non-negative, we set IsSigned to true, 121 // meaning that we will use sext instructions instead of zext 122 // instructions to restore the original type. 123 IsSigned = true; 124 // Make sure at least one sign bit is included in the result, so it 125 // will get properly sign-extended. 126 ++MaxBitWidth; 127 } 128 } 129 MaxBitWidth = llvm::bit_ceil(MaxBitWidth); 130 131 return std::make_pair(Type::getIntNTy(Exit->getContext(), MaxBitWidth), 132 IsSigned); 133 } 134 135 /// Collect cast instructions that can be ignored in the vectorizer's cost 136 /// model, given a reduction exit value and the minimal type in which the 137 // reduction can be represented. Also search casts to the recurrence type 138 // to find the minimum width used by the recurrence. 139 static void collectCastInstrs(Loop *TheLoop, Instruction *Exit, 140 Type *RecurrenceType, 141 SmallPtrSetImpl<Instruction *> &Casts, 142 unsigned &MinWidthCastToRecurTy) { 143 144 SmallVector<Instruction *, 8> Worklist; 145 SmallPtrSet<Instruction *, 8> Visited; 146 Worklist.push_back(Exit); 147 MinWidthCastToRecurTy = -1U; 148 149 while (!Worklist.empty()) { 150 Instruction *Val = Worklist.pop_back_val(); 151 Visited.insert(Val); 152 if (auto *Cast = dyn_cast<CastInst>(Val)) { 153 if (Cast->getSrcTy() == RecurrenceType) { 154 // If the source type of a cast instruction is equal to the recurrence 155 // type, it will be eliminated, and should be ignored in the vectorizer 156 // cost model. 157 Casts.insert(Cast); 158 continue; 159 } 160 if (Cast->getDestTy() == RecurrenceType) { 161 // The minimum width used by the recurrence is found by checking for 162 // casts on its operands. The minimum width is used by the vectorizer 163 // when finding the widest type for in-loop reductions without any 164 // loads/stores. 165 MinWidthCastToRecurTy = std::min<unsigned>( 166 MinWidthCastToRecurTy, Cast->getSrcTy()->getScalarSizeInBits()); 167 continue; 168 } 169 } 170 // Add all operands to the work list if they are loop-varying values that 171 // we haven't yet visited. 172 for (Value *O : cast<User>(Val)->operands()) 173 if (auto *I = dyn_cast<Instruction>(O)) 174 if (TheLoop->contains(I) && !Visited.count(I)) 175 Worklist.push_back(I); 176 } 177 } 178 179 // Check if a given Phi node can be recognized as an ordered reduction for 180 // vectorizing floating point operations without unsafe math. 181 static bool checkOrderedReduction(RecurKind Kind, Instruction *ExactFPMathInst, 182 Instruction *Exit, PHINode *Phi) { 183 // Currently only FAdd and FMulAdd are supported. 184 if (Kind != RecurKind::FAdd && Kind != RecurKind::FMulAdd) 185 return false; 186 187 if (Kind == RecurKind::FAdd && Exit->getOpcode() != Instruction::FAdd) 188 return false; 189 190 if (Kind == RecurKind::FMulAdd && 191 !RecurrenceDescriptor::isFMulAddIntrinsic(Exit)) 192 return false; 193 194 // Ensure the exit instruction has only one user other than the reduction PHI 195 if (Exit != ExactFPMathInst || Exit->hasNUsesOrMore(3)) 196 return false; 197 198 // The only pattern accepted is the one in which the reduction PHI 199 // is used as one of the operands of the exit instruction 200 auto *Op0 = Exit->getOperand(0); 201 auto *Op1 = Exit->getOperand(1); 202 if (Kind == RecurKind::FAdd && Op0 != Phi && Op1 != Phi) 203 return false; 204 if (Kind == RecurKind::FMulAdd && Exit->getOperand(2) != Phi) 205 return false; 206 207 LLVM_DEBUG(dbgs() << "LV: Found an ordered reduction: Phi: " << *Phi 208 << ", ExitInst: " << *Exit << "\n"); 209 210 return true; 211 } 212 213 bool RecurrenceDescriptor::AddReductionVar( 214 PHINode *Phi, RecurKind Kind, Loop *TheLoop, FastMathFlags FuncFMF, 215 RecurrenceDescriptor &RedDes, DemandedBits *DB, AssumptionCache *AC, 216 DominatorTree *DT, ScalarEvolution *SE) { 217 if (Phi->getNumIncomingValues() != 2) 218 return false; 219 220 // Reduction variables are only found in the loop header block. 221 if (Phi->getParent() != TheLoop->getHeader()) 222 return false; 223 224 // Obtain the reduction start value from the value that comes from the loop 225 // preheader. 226 Value *RdxStart = Phi->getIncomingValueForBlock(TheLoop->getLoopPreheader()); 227 228 // ExitInstruction is the single value which is used outside the loop. 229 // We only allow for a single reduction value to be used outside the loop. 230 // This includes users of the reduction, variables (which form a cycle 231 // which ends in the phi node). 232 Instruction *ExitInstruction = nullptr; 233 234 // Variable to keep last visited store instruction. By the end of the 235 // algorithm this variable will be either empty or having intermediate 236 // reduction value stored in invariant address. 237 StoreInst *IntermediateStore = nullptr; 238 239 // Indicates that we found a reduction operation in our scan. 240 bool FoundReduxOp = false; 241 242 // We start with the PHI node and scan for all of the users of this 243 // instruction. All users must be instructions that can be used as reduction 244 // variables (such as ADD). We must have a single out-of-block user. The cycle 245 // must include the original PHI. 246 bool FoundStartPHI = false; 247 248 // To recognize min/max patterns formed by a icmp select sequence, we store 249 // the number of instruction we saw from the recognized min/max pattern, 250 // to make sure we only see exactly the two instructions. 251 unsigned NumCmpSelectPatternInst = 0; 252 InstDesc ReduxDesc(false, nullptr); 253 254 // Data used for determining if the recurrence has been type-promoted. 255 Type *RecurrenceType = Phi->getType(); 256 SmallPtrSet<Instruction *, 4> CastInsts; 257 unsigned MinWidthCastToRecurrenceType; 258 Instruction *Start = Phi; 259 bool IsSigned = false; 260 261 SmallPtrSet<Instruction *, 8> VisitedInsts; 262 SmallVector<Instruction *, 8> Worklist; 263 264 // Return early if the recurrence kind does not match the type of Phi. If the 265 // recurrence kind is arithmetic, we attempt to look through AND operations 266 // resulting from the type promotion performed by InstCombine. Vector 267 // operations are not limited to the legal integer widths, so we may be able 268 // to evaluate the reduction in the narrower width. 269 if (RecurrenceType->isFloatingPointTy()) { 270 if (!isFloatingPointRecurrenceKind(Kind)) 271 return false; 272 } else if (RecurrenceType->isIntegerTy()) { 273 if (!isIntegerRecurrenceKind(Kind)) 274 return false; 275 if (!isMinMaxRecurrenceKind(Kind)) 276 Start = lookThroughAnd(Phi, RecurrenceType, VisitedInsts, CastInsts); 277 } else { 278 // Pointer min/max may exist, but it is not supported as a reduction op. 279 return false; 280 } 281 282 Worklist.push_back(Start); 283 VisitedInsts.insert(Start); 284 285 // Start with all flags set because we will intersect this with the reduction 286 // flags from all the reduction operations. 287 FastMathFlags FMF = FastMathFlags::getFast(); 288 289 // The first instruction in the use-def chain of the Phi node that requires 290 // exact floating point operations. 291 Instruction *ExactFPMathInst = nullptr; 292 293 // A value in the reduction can be used: 294 // - By the reduction: 295 // - Reduction operation: 296 // - One use of reduction value (safe). 297 // - Multiple use of reduction value (not safe). 298 // - PHI: 299 // - All uses of the PHI must be the reduction (safe). 300 // - Otherwise, not safe. 301 // - By instructions outside of the loop (safe). 302 // * One value may have several outside users, but all outside 303 // uses must be of the same value. 304 // - By store instructions with a loop invariant address (safe with 305 // the following restrictions): 306 // * If there are several stores, all must have the same address. 307 // * Final value should be stored in that loop invariant address. 308 // - By an instruction that is not part of the reduction (not safe). 309 // This is either: 310 // * An instruction type other than PHI or the reduction operation. 311 // * A PHI in the header other than the initial PHI. 312 while (!Worklist.empty()) { 313 Instruction *Cur = Worklist.pop_back_val(); 314 315 // Store instructions are allowed iff it is the store of the reduction 316 // value to the same loop invariant memory location. 317 if (auto *SI = dyn_cast<StoreInst>(Cur)) { 318 if (!SE) { 319 LLVM_DEBUG(dbgs() << "Store instructions are not processed without " 320 << "Scalar Evolution Analysis\n"); 321 return false; 322 } 323 324 const SCEV *PtrScev = SE->getSCEV(SI->getPointerOperand()); 325 // Check it is the same address as previous stores 326 if (IntermediateStore) { 327 const SCEV *OtherScev = 328 SE->getSCEV(IntermediateStore->getPointerOperand()); 329 330 if (OtherScev != PtrScev) { 331 LLVM_DEBUG(dbgs() << "Storing reduction value to different addresses " 332 << "inside the loop: " << *SI->getPointerOperand() 333 << " and " 334 << *IntermediateStore->getPointerOperand() << '\n'); 335 return false; 336 } 337 } 338 339 // Check the pointer is loop invariant 340 if (!SE->isLoopInvariant(PtrScev, TheLoop)) { 341 LLVM_DEBUG(dbgs() << "Storing reduction value to non-uniform address " 342 << "inside the loop: " << *SI->getPointerOperand() 343 << '\n'); 344 return false; 345 } 346 347 // IntermediateStore is always the last store in the loop. 348 IntermediateStore = SI; 349 continue; 350 } 351 352 // No Users. 353 // If the instruction has no users then this is a broken chain and can't be 354 // a reduction variable. 355 if (Cur->use_empty()) 356 return false; 357 358 bool IsAPhi = isa<PHINode>(Cur); 359 360 // A header PHI use other than the original PHI. 361 if (Cur != Phi && IsAPhi && Cur->getParent() == Phi->getParent()) 362 return false; 363 364 // Reductions of instructions such as Div, and Sub is only possible if the 365 // LHS is the reduction variable. 366 if (!Cur->isCommutative() && !IsAPhi && !isa<SelectInst>(Cur) && 367 !isa<ICmpInst>(Cur) && !isa<FCmpInst>(Cur) && 368 !VisitedInsts.count(dyn_cast<Instruction>(Cur->getOperand(0)))) 369 return false; 370 371 // Any reduction instruction must be of one of the allowed kinds. We ignore 372 // the starting value (the Phi or an AND instruction if the Phi has been 373 // type-promoted). 374 if (Cur != Start) { 375 ReduxDesc = 376 isRecurrenceInstr(TheLoop, Phi, Cur, Kind, ReduxDesc, FuncFMF); 377 ExactFPMathInst = ExactFPMathInst == nullptr 378 ? ReduxDesc.getExactFPMathInst() 379 : ExactFPMathInst; 380 if (!ReduxDesc.isRecurrence()) 381 return false; 382 // FIXME: FMF is allowed on phi, but propagation is not handled correctly. 383 if (isa<FPMathOperator>(ReduxDesc.getPatternInst()) && !IsAPhi) { 384 FastMathFlags CurFMF = ReduxDesc.getPatternInst()->getFastMathFlags(); 385 if (auto *Sel = dyn_cast<SelectInst>(ReduxDesc.getPatternInst())) { 386 // Accept FMF on either fcmp or select of a min/max idiom. 387 // TODO: This is a hack to work-around the fact that FMF may not be 388 // assigned/propagated correctly. If that problem is fixed or we 389 // standardize on fmin/fmax via intrinsics, this can be removed. 390 if (auto *FCmp = dyn_cast<FCmpInst>(Sel->getCondition())) 391 CurFMF |= FCmp->getFastMathFlags(); 392 } 393 FMF &= CurFMF; 394 } 395 // Update this reduction kind if we matched a new instruction. 396 // TODO: Can we eliminate the need for a 2nd InstDesc by keeping 'Kind' 397 // state accurate while processing the worklist? 398 if (ReduxDesc.getRecKind() != RecurKind::None) 399 Kind = ReduxDesc.getRecKind(); 400 } 401 402 bool IsASelect = isa<SelectInst>(Cur); 403 404 // A conditional reduction operation must only have 2 or less uses in 405 // VisitedInsts. 406 if (IsASelect && (Kind == RecurKind::FAdd || Kind == RecurKind::FMul) && 407 hasMultipleUsesOf(Cur, VisitedInsts, 2)) 408 return false; 409 410 // A reduction operation must only have one use of the reduction value. 411 if (!IsAPhi && !IsASelect && !isMinMaxRecurrenceKind(Kind) && 412 !isAnyOfRecurrenceKind(Kind) && hasMultipleUsesOf(Cur, VisitedInsts, 1)) 413 return false; 414 415 // All inputs to a PHI node must be a reduction value. 416 if (IsAPhi && Cur != Phi && !areAllUsesIn(Cur, VisitedInsts)) 417 return false; 418 419 if ((isIntMinMaxRecurrenceKind(Kind) || Kind == RecurKind::IAnyOf) && 420 (isa<ICmpInst>(Cur) || isa<SelectInst>(Cur))) 421 ++NumCmpSelectPatternInst; 422 if ((isFPMinMaxRecurrenceKind(Kind) || Kind == RecurKind::FAnyOf) && 423 (isa<FCmpInst>(Cur) || isa<SelectInst>(Cur))) 424 ++NumCmpSelectPatternInst; 425 426 // Check whether we found a reduction operator. 427 FoundReduxOp |= !IsAPhi && Cur != Start; 428 429 // Process users of current instruction. Push non-PHI nodes after PHI nodes 430 // onto the stack. This way we are going to have seen all inputs to PHI 431 // nodes once we get to them. 432 SmallVector<Instruction *, 8> NonPHIs; 433 SmallVector<Instruction *, 8> PHIs; 434 for (User *U : Cur->users()) { 435 Instruction *UI = cast<Instruction>(U); 436 437 // If the user is a call to llvm.fmuladd then the instruction can only be 438 // the final operand. 439 if (isFMulAddIntrinsic(UI)) 440 if (Cur == UI->getOperand(0) || Cur == UI->getOperand(1)) 441 return false; 442 443 // Check if we found the exit user. 444 BasicBlock *Parent = UI->getParent(); 445 if (!TheLoop->contains(Parent)) { 446 // If we already know this instruction is used externally, move on to 447 // the next user. 448 if (ExitInstruction == Cur) 449 continue; 450 451 // Exit if you find multiple values used outside or if the header phi 452 // node is being used. In this case the user uses the value of the 453 // previous iteration, in which case we would loose "VF-1" iterations of 454 // the reduction operation if we vectorize. 455 if (ExitInstruction != nullptr || Cur == Phi) 456 return false; 457 458 // The instruction used by an outside user must be the last instruction 459 // before we feed back to the reduction phi. Otherwise, we loose VF-1 460 // operations on the value. 461 if (!is_contained(Phi->operands(), Cur)) 462 return false; 463 464 ExitInstruction = Cur; 465 continue; 466 } 467 468 // Process instructions only once (termination). Each reduction cycle 469 // value must only be used once, except by phi nodes and min/max 470 // reductions which are represented as a cmp followed by a select. 471 InstDesc IgnoredVal(false, nullptr); 472 if (VisitedInsts.insert(UI).second) { 473 if (isa<PHINode>(UI)) { 474 PHIs.push_back(UI); 475 } else { 476 StoreInst *SI = dyn_cast<StoreInst>(UI); 477 if (SI && SI->getPointerOperand() == Cur) { 478 // Reduction variable chain can only be stored somewhere but it 479 // can't be used as an address. 480 return false; 481 } 482 NonPHIs.push_back(UI); 483 } 484 } else if (!isa<PHINode>(UI) && 485 ((!isa<FCmpInst>(UI) && !isa<ICmpInst>(UI) && 486 !isa<SelectInst>(UI)) || 487 (!isConditionalRdxPattern(Kind, UI).isRecurrence() && 488 !isAnyOfPattern(TheLoop, Phi, UI, IgnoredVal) 489 .isRecurrence() && 490 !isMinMaxPattern(UI, Kind, IgnoredVal).isRecurrence()))) 491 return false; 492 493 // Remember that we completed the cycle. 494 if (UI == Phi) 495 FoundStartPHI = true; 496 } 497 Worklist.append(PHIs.begin(), PHIs.end()); 498 Worklist.append(NonPHIs.begin(), NonPHIs.end()); 499 } 500 501 // This means we have seen one but not the other instruction of the 502 // pattern or more than just a select and cmp. Zero implies that we saw a 503 // llvm.min/max intrinsic, which is always OK. 504 if (isMinMaxRecurrenceKind(Kind) && NumCmpSelectPatternInst != 2 && 505 NumCmpSelectPatternInst != 0) 506 return false; 507 508 if (isAnyOfRecurrenceKind(Kind) && NumCmpSelectPatternInst != 1) 509 return false; 510 511 if (IntermediateStore) { 512 // Check that stored value goes to the phi node again. This way we make sure 513 // that the value stored in IntermediateStore is indeed the final reduction 514 // value. 515 if (!is_contained(Phi->operands(), IntermediateStore->getValueOperand())) { 516 LLVM_DEBUG(dbgs() << "Not a final reduction value stored: " 517 << *IntermediateStore << '\n'); 518 return false; 519 } 520 521 // If there is an exit instruction it's value should be stored in 522 // IntermediateStore 523 if (ExitInstruction && 524 IntermediateStore->getValueOperand() != ExitInstruction) { 525 LLVM_DEBUG(dbgs() << "Last store Instruction of reduction value does not " 526 "store last calculated value of the reduction: " 527 << *IntermediateStore << '\n'); 528 return false; 529 } 530 531 // If all uses are inside the loop (intermediate stores), then the 532 // reduction value after the loop will be the one used in the last store. 533 if (!ExitInstruction) 534 ExitInstruction = cast<Instruction>(IntermediateStore->getValueOperand()); 535 } 536 537 if (!FoundStartPHI || !FoundReduxOp || !ExitInstruction) 538 return false; 539 540 const bool IsOrdered = 541 checkOrderedReduction(Kind, ExactFPMathInst, ExitInstruction, Phi); 542 543 if (Start != Phi) { 544 // If the starting value is not the same as the phi node, we speculatively 545 // looked through an 'and' instruction when evaluating a potential 546 // arithmetic reduction to determine if it may have been type-promoted. 547 // 548 // We now compute the minimal bit width that is required to represent the 549 // reduction. If this is the same width that was indicated by the 'and', we 550 // can represent the reduction in the smaller type. The 'and' instruction 551 // will be eliminated since it will essentially be a cast instruction that 552 // can be ignore in the cost model. If we compute a different type than we 553 // did when evaluating the 'and', the 'and' will not be eliminated, and we 554 // will end up with different kinds of operations in the recurrence 555 // expression (e.g., IntegerAND, IntegerADD). We give up if this is 556 // the case. 557 // 558 // The vectorizer relies on InstCombine to perform the actual 559 // type-shrinking. It does this by inserting instructions to truncate the 560 // exit value of the reduction to the width indicated by RecurrenceType and 561 // then extend this value back to the original width. If IsSigned is false, 562 // a 'zext' instruction will be generated; otherwise, a 'sext' will be 563 // used. 564 // 565 // TODO: We should not rely on InstCombine to rewrite the reduction in the 566 // smaller type. We should just generate a correctly typed expression 567 // to begin with. 568 Type *ComputedType; 569 std::tie(ComputedType, IsSigned) = 570 computeRecurrenceType(ExitInstruction, DB, AC, DT); 571 if (ComputedType != RecurrenceType) 572 return false; 573 } 574 575 // Collect cast instructions and the minimum width used by the recurrence. 576 // If the starting value is not the same as the phi node and the computed 577 // recurrence type is equal to the recurrence type, the recurrence expression 578 // will be represented in a narrower or wider type. If there are any cast 579 // instructions that will be unnecessary, collect them in CastsFromRecurTy. 580 // Note that the 'and' instruction was already included in this list. 581 // 582 // TODO: A better way to represent this may be to tag in some way all the 583 // instructions that are a part of the reduction. The vectorizer cost 584 // model could then apply the recurrence type to these instructions, 585 // without needing a white list of instructions to ignore. 586 // This may also be useful for the inloop reductions, if it can be 587 // kept simple enough. 588 collectCastInstrs(TheLoop, ExitInstruction, RecurrenceType, CastInsts, 589 MinWidthCastToRecurrenceType); 590 591 // We found a reduction var if we have reached the original phi node and we 592 // only have a single instruction with out-of-loop users. 593 594 // The ExitInstruction(Instruction which is allowed to have out-of-loop users) 595 // is saved as part of the RecurrenceDescriptor. 596 597 // Save the description of this reduction variable. 598 RecurrenceDescriptor RD(RdxStart, ExitInstruction, IntermediateStore, Kind, 599 FMF, ExactFPMathInst, RecurrenceType, IsSigned, 600 IsOrdered, CastInsts, MinWidthCastToRecurrenceType); 601 RedDes = RD; 602 603 return true; 604 } 605 606 // We are looking for loops that do something like this: 607 // int r = 0; 608 // for (int i = 0; i < n; i++) { 609 // if (src[i] > 3) 610 // r = 3; 611 // } 612 // where the reduction value (r) only has two states, in this example 0 or 3. 613 // The generated LLVM IR for this type of loop will be like this: 614 // for.body: 615 // %r = phi i32 [ %spec.select, %for.body ], [ 0, %entry ] 616 // ... 617 // %cmp = icmp sgt i32 %5, 3 618 // %spec.select = select i1 %cmp, i32 3, i32 %r 619 // ... 620 // In general we can support vectorization of loops where 'r' flips between 621 // any two non-constants, provided they are loop invariant. The only thing 622 // we actually care about at the end of the loop is whether or not any lane 623 // in the selected vector is different from the start value. The final 624 // across-vector reduction after the loop simply involves choosing the start 625 // value if nothing changed (0 in the example above) or the other selected 626 // value (3 in the example above). 627 RecurrenceDescriptor::InstDesc 628 RecurrenceDescriptor::isAnyOfPattern(Loop *Loop, PHINode *OrigPhi, 629 Instruction *I, InstDesc &Prev) { 630 // We must handle the select(cmp(),x,y) as a single instruction. Advance to 631 // the select. 632 CmpInst::Predicate Pred; 633 if (match(I, m_OneUse(m_Cmp(Pred, m_Value(), m_Value())))) { 634 if (auto *Select = dyn_cast<SelectInst>(*I->user_begin())) 635 return InstDesc(Select, Prev.getRecKind()); 636 } 637 638 // Only match select with single use cmp condition. 639 if (!match(I, m_Select(m_OneUse(m_Cmp(Pred, m_Value(), m_Value())), m_Value(), 640 m_Value()))) 641 return InstDesc(false, I); 642 643 SelectInst *SI = cast<SelectInst>(I); 644 Value *NonPhi = nullptr; 645 646 if (OrigPhi == dyn_cast<PHINode>(SI->getTrueValue())) 647 NonPhi = SI->getFalseValue(); 648 else if (OrigPhi == dyn_cast<PHINode>(SI->getFalseValue())) 649 NonPhi = SI->getTrueValue(); 650 else 651 return InstDesc(false, I); 652 653 // We are looking for selects of the form: 654 // select(cmp(), phi, loop_invariant) or 655 // select(cmp(), loop_invariant, phi) 656 if (!Loop->isLoopInvariant(NonPhi)) 657 return InstDesc(false, I); 658 659 return InstDesc(I, isa<ICmpInst>(I->getOperand(0)) ? RecurKind::IAnyOf 660 : RecurKind::FAnyOf); 661 } 662 663 RecurrenceDescriptor::InstDesc 664 RecurrenceDescriptor::isMinMaxPattern(Instruction *I, RecurKind Kind, 665 const InstDesc &Prev) { 666 assert((isa<CmpInst>(I) || isa<SelectInst>(I) || isa<CallInst>(I)) && 667 "Expected a cmp or select or call instruction"); 668 if (!isMinMaxRecurrenceKind(Kind)) 669 return InstDesc(false, I); 670 671 // We must handle the select(cmp()) as a single instruction. Advance to the 672 // select. 673 CmpInst::Predicate Pred; 674 if (match(I, m_OneUse(m_Cmp(Pred, m_Value(), m_Value())))) { 675 if (auto *Select = dyn_cast<SelectInst>(*I->user_begin())) 676 return InstDesc(Select, Prev.getRecKind()); 677 } 678 679 // Only match select with single use cmp condition, or a min/max intrinsic. 680 if (!isa<IntrinsicInst>(I) && 681 !match(I, m_Select(m_OneUse(m_Cmp(Pred, m_Value(), m_Value())), m_Value(), 682 m_Value()))) 683 return InstDesc(false, I); 684 685 // Look for a min/max pattern. 686 if (match(I, m_UMin(m_Value(), m_Value()))) 687 return InstDesc(Kind == RecurKind::UMin, I); 688 if (match(I, m_UMax(m_Value(), m_Value()))) 689 return InstDesc(Kind == RecurKind::UMax, I); 690 if (match(I, m_SMax(m_Value(), m_Value()))) 691 return InstDesc(Kind == RecurKind::SMax, I); 692 if (match(I, m_SMin(m_Value(), m_Value()))) 693 return InstDesc(Kind == RecurKind::SMin, I); 694 if (match(I, m_OrdFMin(m_Value(), m_Value()))) 695 return InstDesc(Kind == RecurKind::FMin, I); 696 if (match(I, m_OrdFMax(m_Value(), m_Value()))) 697 return InstDesc(Kind == RecurKind::FMax, I); 698 if (match(I, m_UnordFMin(m_Value(), m_Value()))) 699 return InstDesc(Kind == RecurKind::FMin, I); 700 if (match(I, m_UnordFMax(m_Value(), m_Value()))) 701 return InstDesc(Kind == RecurKind::FMax, I); 702 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value()))) 703 return InstDesc(Kind == RecurKind::FMin, I); 704 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value()))) 705 return InstDesc(Kind == RecurKind::FMax, I); 706 if (match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(), m_Value()))) 707 return InstDesc(Kind == RecurKind::FMinimum, I); 708 if (match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(), m_Value()))) 709 return InstDesc(Kind == RecurKind::FMaximum, I); 710 711 return InstDesc(false, I); 712 } 713 714 /// Returns true if the select instruction has users in the compare-and-add 715 /// reduction pattern below. The select instruction argument is the last one 716 /// in the sequence. 717 /// 718 /// %sum.1 = phi ... 719 /// ... 720 /// %cmp = fcmp pred %0, %CFP 721 /// %add = fadd %0, %sum.1 722 /// %sum.2 = select %cmp, %add, %sum.1 723 RecurrenceDescriptor::InstDesc 724 RecurrenceDescriptor::isConditionalRdxPattern(RecurKind Kind, Instruction *I) { 725 SelectInst *SI = dyn_cast<SelectInst>(I); 726 if (!SI) 727 return InstDesc(false, I); 728 729 CmpInst *CI = dyn_cast<CmpInst>(SI->getCondition()); 730 // Only handle single use cases for now. 731 if (!CI || !CI->hasOneUse()) 732 return InstDesc(false, I); 733 734 Value *TrueVal = SI->getTrueValue(); 735 Value *FalseVal = SI->getFalseValue(); 736 // Handle only when either of operands of select instruction is a PHI 737 // node for now. 738 if ((isa<PHINode>(*TrueVal) && isa<PHINode>(*FalseVal)) || 739 (!isa<PHINode>(*TrueVal) && !isa<PHINode>(*FalseVal))) 740 return InstDesc(false, I); 741 742 Instruction *I1 = 743 isa<PHINode>(*TrueVal) ? dyn_cast<Instruction>(FalseVal) 744 : dyn_cast<Instruction>(TrueVal); 745 if (!I1 || !I1->isBinaryOp()) 746 return InstDesc(false, I); 747 748 Value *Op1, *Op2; 749 if (!(((m_FAdd(m_Value(Op1), m_Value(Op2)).match(I1) || 750 m_FSub(m_Value(Op1), m_Value(Op2)).match(I1)) && 751 I1->isFast()) || 752 (m_FMul(m_Value(Op1), m_Value(Op2)).match(I1) && (I1->isFast())) || 753 ((m_Add(m_Value(Op1), m_Value(Op2)).match(I1) || 754 m_Sub(m_Value(Op1), m_Value(Op2)).match(I1))) || 755 (m_Mul(m_Value(Op1), m_Value(Op2)).match(I1)))) 756 return InstDesc(false, I); 757 758 Instruction *IPhi = isa<PHINode>(*Op1) ? dyn_cast<Instruction>(Op1) 759 : dyn_cast<Instruction>(Op2); 760 if (!IPhi || IPhi != FalseVal) 761 return InstDesc(false, I); 762 763 return InstDesc(true, SI); 764 } 765 766 RecurrenceDescriptor::InstDesc 767 RecurrenceDescriptor::isRecurrenceInstr(Loop *L, PHINode *OrigPhi, 768 Instruction *I, RecurKind Kind, 769 InstDesc &Prev, FastMathFlags FuncFMF) { 770 assert(Prev.getRecKind() == RecurKind::None || Prev.getRecKind() == Kind); 771 switch (I->getOpcode()) { 772 default: 773 return InstDesc(false, I); 774 case Instruction::PHI: 775 return InstDesc(I, Prev.getRecKind(), Prev.getExactFPMathInst()); 776 case Instruction::Sub: 777 case Instruction::Add: 778 return InstDesc(Kind == RecurKind::Add, I); 779 case Instruction::Mul: 780 return InstDesc(Kind == RecurKind::Mul, I); 781 case Instruction::And: 782 return InstDesc(Kind == RecurKind::And, I); 783 case Instruction::Or: 784 return InstDesc(Kind == RecurKind::Or, I); 785 case Instruction::Xor: 786 return InstDesc(Kind == RecurKind::Xor, I); 787 case Instruction::FDiv: 788 case Instruction::FMul: 789 return InstDesc(Kind == RecurKind::FMul, I, 790 I->hasAllowReassoc() ? nullptr : I); 791 case Instruction::FSub: 792 case Instruction::FAdd: 793 return InstDesc(Kind == RecurKind::FAdd, I, 794 I->hasAllowReassoc() ? nullptr : I); 795 case Instruction::Select: 796 if (Kind == RecurKind::FAdd || Kind == RecurKind::FMul || 797 Kind == RecurKind::Add || Kind == RecurKind::Mul) 798 return isConditionalRdxPattern(Kind, I); 799 [[fallthrough]]; 800 case Instruction::FCmp: 801 case Instruction::ICmp: 802 case Instruction::Call: 803 if (isAnyOfRecurrenceKind(Kind)) 804 return isAnyOfPattern(L, OrigPhi, I, Prev); 805 auto HasRequiredFMF = [&]() { 806 if (FuncFMF.noNaNs() && FuncFMF.noSignedZeros()) 807 return true; 808 if (isa<FPMathOperator>(I) && I->hasNoNaNs() && I->hasNoSignedZeros()) 809 return true; 810 // minimum and maximum intrinsics do not require nsz and nnan flags since 811 // NaN and signed zeroes are propagated in the intrinsic implementation. 812 return match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(), m_Value())) || 813 match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(), m_Value())); 814 }; 815 if (isIntMinMaxRecurrenceKind(Kind) || 816 (HasRequiredFMF() && isFPMinMaxRecurrenceKind(Kind))) 817 return isMinMaxPattern(I, Kind, Prev); 818 else if (isFMulAddIntrinsic(I)) 819 return InstDesc(Kind == RecurKind::FMulAdd, I, 820 I->hasAllowReassoc() ? nullptr : I); 821 return InstDesc(false, I); 822 } 823 } 824 825 bool RecurrenceDescriptor::hasMultipleUsesOf( 826 Instruction *I, SmallPtrSetImpl<Instruction *> &Insts, 827 unsigned MaxNumUses) { 828 unsigned NumUses = 0; 829 for (const Use &U : I->operands()) { 830 if (Insts.count(dyn_cast<Instruction>(U))) 831 ++NumUses; 832 if (NumUses > MaxNumUses) 833 return true; 834 } 835 836 return false; 837 } 838 839 bool RecurrenceDescriptor::isReductionPHI(PHINode *Phi, Loop *TheLoop, 840 RecurrenceDescriptor &RedDes, 841 DemandedBits *DB, AssumptionCache *AC, 842 DominatorTree *DT, 843 ScalarEvolution *SE) { 844 BasicBlock *Header = TheLoop->getHeader(); 845 Function &F = *Header->getParent(); 846 FastMathFlags FMF; 847 FMF.setNoNaNs( 848 F.getFnAttribute("no-nans-fp-math").getValueAsBool()); 849 FMF.setNoSignedZeros( 850 F.getFnAttribute("no-signed-zeros-fp-math").getValueAsBool()); 851 852 if (AddReductionVar(Phi, RecurKind::Add, TheLoop, FMF, RedDes, DB, AC, DT, 853 SE)) { 854 LLVM_DEBUG(dbgs() << "Found an ADD reduction PHI." << *Phi << "\n"); 855 return true; 856 } 857 if (AddReductionVar(Phi, RecurKind::Mul, TheLoop, FMF, RedDes, DB, AC, DT, 858 SE)) { 859 LLVM_DEBUG(dbgs() << "Found a MUL reduction PHI." << *Phi << "\n"); 860 return true; 861 } 862 if (AddReductionVar(Phi, RecurKind::Or, TheLoop, FMF, RedDes, DB, AC, DT, 863 SE)) { 864 LLVM_DEBUG(dbgs() << "Found an OR reduction PHI." << *Phi << "\n"); 865 return true; 866 } 867 if (AddReductionVar(Phi, RecurKind::And, TheLoop, FMF, RedDes, DB, AC, DT, 868 SE)) { 869 LLVM_DEBUG(dbgs() << "Found an AND reduction PHI." << *Phi << "\n"); 870 return true; 871 } 872 if (AddReductionVar(Phi, RecurKind::Xor, TheLoop, FMF, RedDes, DB, AC, DT, 873 SE)) { 874 LLVM_DEBUG(dbgs() << "Found a XOR reduction PHI." << *Phi << "\n"); 875 return true; 876 } 877 if (AddReductionVar(Phi, RecurKind::SMax, TheLoop, FMF, RedDes, DB, AC, DT, 878 SE)) { 879 LLVM_DEBUG(dbgs() << "Found a SMAX reduction PHI." << *Phi << "\n"); 880 return true; 881 } 882 if (AddReductionVar(Phi, RecurKind::SMin, TheLoop, FMF, RedDes, DB, AC, DT, 883 SE)) { 884 LLVM_DEBUG(dbgs() << "Found a SMIN reduction PHI." << *Phi << "\n"); 885 return true; 886 } 887 if (AddReductionVar(Phi, RecurKind::UMax, TheLoop, FMF, RedDes, DB, AC, DT, 888 SE)) { 889 LLVM_DEBUG(dbgs() << "Found a UMAX reduction PHI." << *Phi << "\n"); 890 return true; 891 } 892 if (AddReductionVar(Phi, RecurKind::UMin, TheLoop, FMF, RedDes, DB, AC, DT, 893 SE)) { 894 LLVM_DEBUG(dbgs() << "Found a UMIN reduction PHI." << *Phi << "\n"); 895 return true; 896 } 897 if (AddReductionVar(Phi, RecurKind::IAnyOf, TheLoop, FMF, RedDes, DB, AC, DT, 898 SE)) { 899 LLVM_DEBUG(dbgs() << "Found an integer conditional select reduction PHI." 900 << *Phi << "\n"); 901 return true; 902 } 903 if (AddReductionVar(Phi, RecurKind::FMul, TheLoop, FMF, RedDes, DB, AC, DT, 904 SE)) { 905 LLVM_DEBUG(dbgs() << "Found an FMult reduction PHI." << *Phi << "\n"); 906 return true; 907 } 908 if (AddReductionVar(Phi, RecurKind::FAdd, TheLoop, FMF, RedDes, DB, AC, DT, 909 SE)) { 910 LLVM_DEBUG(dbgs() << "Found an FAdd reduction PHI." << *Phi << "\n"); 911 return true; 912 } 913 if (AddReductionVar(Phi, RecurKind::FMax, TheLoop, FMF, RedDes, DB, AC, DT, 914 SE)) { 915 LLVM_DEBUG(dbgs() << "Found a float MAX reduction PHI." << *Phi << "\n"); 916 return true; 917 } 918 if (AddReductionVar(Phi, RecurKind::FMin, TheLoop, FMF, RedDes, DB, AC, DT, 919 SE)) { 920 LLVM_DEBUG(dbgs() << "Found a float MIN reduction PHI." << *Phi << "\n"); 921 return true; 922 } 923 if (AddReductionVar(Phi, RecurKind::FAnyOf, TheLoop, FMF, RedDes, DB, AC, DT, 924 SE)) { 925 LLVM_DEBUG(dbgs() << "Found a float conditional select reduction PHI." 926 << " PHI." << *Phi << "\n"); 927 return true; 928 } 929 if (AddReductionVar(Phi, RecurKind::FMulAdd, TheLoop, FMF, RedDes, DB, AC, DT, 930 SE)) { 931 LLVM_DEBUG(dbgs() << "Found an FMulAdd reduction PHI." << *Phi << "\n"); 932 return true; 933 } 934 if (AddReductionVar(Phi, RecurKind::FMaximum, TheLoop, FMF, RedDes, DB, AC, DT, 935 SE)) { 936 LLVM_DEBUG(dbgs() << "Found a float MAXIMUM reduction PHI." << *Phi << "\n"); 937 return true; 938 } 939 if (AddReductionVar(Phi, RecurKind::FMinimum, TheLoop, FMF, RedDes, DB, AC, DT, 940 SE)) { 941 LLVM_DEBUG(dbgs() << "Found a float MINIMUM reduction PHI." << *Phi << "\n"); 942 return true; 943 } 944 // Not a reduction of known type. 945 return false; 946 } 947 948 bool RecurrenceDescriptor::isFixedOrderRecurrence(PHINode *Phi, Loop *TheLoop, 949 DominatorTree *DT) { 950 951 // Ensure the phi node is in the loop header and has two incoming values. 952 if (Phi->getParent() != TheLoop->getHeader() || 953 Phi->getNumIncomingValues() != 2) 954 return false; 955 956 // Ensure the loop has a preheader and a single latch block. The loop 957 // vectorizer will need the latch to set up the next iteration of the loop. 958 auto *Preheader = TheLoop->getLoopPreheader(); 959 auto *Latch = TheLoop->getLoopLatch(); 960 if (!Preheader || !Latch) 961 return false; 962 963 // Ensure the phi node's incoming blocks are the loop preheader and latch. 964 if (Phi->getBasicBlockIndex(Preheader) < 0 || 965 Phi->getBasicBlockIndex(Latch) < 0) 966 return false; 967 968 // Get the previous value. The previous value comes from the latch edge while 969 // the initial value comes from the preheader edge. 970 auto *Previous = dyn_cast<Instruction>(Phi->getIncomingValueForBlock(Latch)); 971 972 // If Previous is a phi in the header, go through incoming values from the 973 // latch until we find a non-phi value. Use this as the new Previous, all uses 974 // in the header will be dominated by the original phi, but need to be moved 975 // after the non-phi previous value. 976 SmallPtrSet<PHINode *, 4> SeenPhis; 977 while (auto *PrevPhi = dyn_cast_or_null<PHINode>(Previous)) { 978 if (PrevPhi->getParent() != Phi->getParent()) 979 return false; 980 if (!SeenPhis.insert(PrevPhi).second) 981 return false; 982 Previous = dyn_cast<Instruction>(PrevPhi->getIncomingValueForBlock(Latch)); 983 } 984 985 if (!Previous || !TheLoop->contains(Previous) || isa<PHINode>(Previous)) 986 return false; 987 988 // Ensure every user of the phi node (recursively) is dominated by the 989 // previous value. The dominance requirement ensures the loop vectorizer will 990 // not need to vectorize the initial value prior to the first iteration of the 991 // loop. 992 // TODO: Consider extending this sinking to handle memory instructions. 993 994 SmallPtrSet<Value *, 8> Seen; 995 BasicBlock *PhiBB = Phi->getParent(); 996 SmallVector<Instruction *, 8> WorkList; 997 auto TryToPushSinkCandidate = [&](Instruction *SinkCandidate) { 998 // Cyclic dependence. 999 if (Previous == SinkCandidate) 1000 return false; 1001 1002 if (!Seen.insert(SinkCandidate).second) 1003 return true; 1004 if (DT->dominates(Previous, 1005 SinkCandidate)) // We already are good w/o sinking. 1006 return true; 1007 1008 if (SinkCandidate->getParent() != PhiBB || 1009 SinkCandidate->mayHaveSideEffects() || 1010 SinkCandidate->mayReadFromMemory() || SinkCandidate->isTerminator()) 1011 return false; 1012 1013 // If we reach a PHI node that is not dominated by Previous, we reached a 1014 // header PHI. No need for sinking. 1015 if (isa<PHINode>(SinkCandidate)) 1016 return true; 1017 1018 // Sink User tentatively and check its users 1019 WorkList.push_back(SinkCandidate); 1020 return true; 1021 }; 1022 1023 WorkList.push_back(Phi); 1024 // Try to recursively sink instructions and their users after Previous. 1025 while (!WorkList.empty()) { 1026 Instruction *Current = WorkList.pop_back_val(); 1027 for (User *User : Current->users()) { 1028 if (!TryToPushSinkCandidate(cast<Instruction>(User))) 1029 return false; 1030 } 1031 } 1032 1033 return true; 1034 } 1035 1036 /// This function returns the identity element (or neutral element) for 1037 /// the operation K. 1038 Value *RecurrenceDescriptor::getRecurrenceIdentity(RecurKind K, Type *Tp, 1039 FastMathFlags FMF) const { 1040 switch (K) { 1041 case RecurKind::Xor: 1042 case RecurKind::Add: 1043 case RecurKind::Or: 1044 // Adding, Xoring, Oring zero to a number does not change it. 1045 return ConstantInt::get(Tp, 0); 1046 case RecurKind::Mul: 1047 // Multiplying a number by 1 does not change it. 1048 return ConstantInt::get(Tp, 1); 1049 case RecurKind::And: 1050 // AND-ing a number with an all-1 value does not change it. 1051 return ConstantInt::get(Tp, -1, true); 1052 case RecurKind::FMul: 1053 // Multiplying a number by 1 does not change it. 1054 return ConstantFP::get(Tp, 1.0L); 1055 case RecurKind::FMulAdd: 1056 case RecurKind::FAdd: 1057 // Adding zero to a number does not change it. 1058 // FIXME: Ideally we should not need to check FMF for FAdd and should always 1059 // use -0.0. However, this will currently result in mixed vectors of 0.0/-0.0. 1060 // Instead, we should ensure that 1) the FMF from FAdd are propagated to the PHI 1061 // nodes where possible, and 2) PHIs with the nsz flag + -0.0 use 0.0. This would 1062 // mean we can then remove the check for noSignedZeros() below (see D98963). 1063 if (FMF.noSignedZeros()) 1064 return ConstantFP::get(Tp, 0.0L); 1065 return ConstantFP::get(Tp, -0.0L); 1066 case RecurKind::UMin: 1067 return ConstantInt::get(Tp, -1, true); 1068 case RecurKind::UMax: 1069 return ConstantInt::get(Tp, 0); 1070 case RecurKind::SMin: 1071 return ConstantInt::get(Tp, 1072 APInt::getSignedMaxValue(Tp->getIntegerBitWidth())); 1073 case RecurKind::SMax: 1074 return ConstantInt::get(Tp, 1075 APInt::getSignedMinValue(Tp->getIntegerBitWidth())); 1076 case RecurKind::FMin: 1077 assert((FMF.noNaNs() && FMF.noSignedZeros()) && 1078 "nnan, nsz is expected to be set for FP min reduction."); 1079 return ConstantFP::getInfinity(Tp, false /*Negative*/); 1080 case RecurKind::FMax: 1081 assert((FMF.noNaNs() && FMF.noSignedZeros()) && 1082 "nnan, nsz is expected to be set for FP max reduction."); 1083 return ConstantFP::getInfinity(Tp, true /*Negative*/); 1084 case RecurKind::FMinimum: 1085 return ConstantFP::getInfinity(Tp, false /*Negative*/); 1086 case RecurKind::FMaximum: 1087 return ConstantFP::getInfinity(Tp, true /*Negative*/); 1088 case RecurKind::IAnyOf: 1089 case RecurKind::FAnyOf: 1090 return getRecurrenceStartValue(); 1091 break; 1092 default: 1093 llvm_unreachable("Unknown recurrence kind"); 1094 } 1095 } 1096 1097 unsigned RecurrenceDescriptor::getOpcode(RecurKind Kind) { 1098 switch (Kind) { 1099 case RecurKind::Add: 1100 return Instruction::Add; 1101 case RecurKind::Mul: 1102 return Instruction::Mul; 1103 case RecurKind::Or: 1104 return Instruction::Or; 1105 case RecurKind::And: 1106 return Instruction::And; 1107 case RecurKind::Xor: 1108 return Instruction::Xor; 1109 case RecurKind::FMul: 1110 return Instruction::FMul; 1111 case RecurKind::FMulAdd: 1112 case RecurKind::FAdd: 1113 return Instruction::FAdd; 1114 case RecurKind::SMax: 1115 case RecurKind::SMin: 1116 case RecurKind::UMax: 1117 case RecurKind::UMin: 1118 case RecurKind::IAnyOf: 1119 return Instruction::ICmp; 1120 case RecurKind::FMax: 1121 case RecurKind::FMin: 1122 case RecurKind::FMaximum: 1123 case RecurKind::FMinimum: 1124 case RecurKind::FAnyOf: 1125 return Instruction::FCmp; 1126 default: 1127 llvm_unreachable("Unknown recurrence operation"); 1128 } 1129 } 1130 1131 SmallVector<Instruction *, 4> 1132 RecurrenceDescriptor::getReductionOpChain(PHINode *Phi, Loop *L) const { 1133 SmallVector<Instruction *, 4> ReductionOperations; 1134 unsigned RedOp = getOpcode(Kind); 1135 1136 // Search down from the Phi to the LoopExitInstr, looking for instructions 1137 // with a single user of the correct type for the reduction. 1138 1139 // Note that we check that the type of the operand is correct for each item in 1140 // the chain, including the last (the loop exit value). This can come up from 1141 // sub, which would otherwise be treated as an add reduction. MinMax also need 1142 // to check for a pair of icmp/select, for which we use getNextInstruction and 1143 // isCorrectOpcode functions to step the right number of instruction, and 1144 // check the icmp/select pair. 1145 // FIXME: We also do not attempt to look through Select's yet, which might 1146 // be part of the reduction chain, or attempt to looks through And's to find a 1147 // smaller bitwidth. Subs are also currently not allowed (which are usually 1148 // treated as part of a add reduction) as they are expected to generally be 1149 // more expensive than out-of-loop reductions, and need to be costed more 1150 // carefully. 1151 unsigned ExpectedUses = 1; 1152 if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp) 1153 ExpectedUses = 2; 1154 1155 auto getNextInstruction = [&](Instruction *Cur) -> Instruction * { 1156 for (auto *User : Cur->users()) { 1157 Instruction *UI = cast<Instruction>(User); 1158 if (isa<PHINode>(UI)) 1159 continue; 1160 if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp) { 1161 // We are expecting a icmp/select pair, which we go to the next select 1162 // instruction if we can. We already know that Cur has 2 uses. 1163 if (isa<SelectInst>(UI)) 1164 return UI; 1165 continue; 1166 } 1167 return UI; 1168 } 1169 return nullptr; 1170 }; 1171 auto isCorrectOpcode = [&](Instruction *Cur) { 1172 if (RedOp == Instruction::ICmp || RedOp == Instruction::FCmp) { 1173 Value *LHS, *RHS; 1174 return SelectPatternResult::isMinOrMax( 1175 matchSelectPattern(Cur, LHS, RHS).Flavor); 1176 } 1177 // Recognize a call to the llvm.fmuladd intrinsic. 1178 if (isFMulAddIntrinsic(Cur)) 1179 return true; 1180 1181 return Cur->getOpcode() == RedOp; 1182 }; 1183 1184 // Attempt to look through Phis which are part of the reduction chain 1185 unsigned ExtraPhiUses = 0; 1186 Instruction *RdxInstr = LoopExitInstr; 1187 if (auto ExitPhi = dyn_cast<PHINode>(LoopExitInstr)) { 1188 if (ExitPhi->getNumIncomingValues() != 2) 1189 return {}; 1190 1191 Instruction *Inc0 = dyn_cast<Instruction>(ExitPhi->getIncomingValue(0)); 1192 Instruction *Inc1 = dyn_cast<Instruction>(ExitPhi->getIncomingValue(1)); 1193 1194 Instruction *Chain = nullptr; 1195 if (Inc0 == Phi) 1196 Chain = Inc1; 1197 else if (Inc1 == Phi) 1198 Chain = Inc0; 1199 else 1200 return {}; 1201 1202 RdxInstr = Chain; 1203 ExtraPhiUses = 1; 1204 } 1205 1206 // The loop exit instruction we check first (as a quick test) but add last. We 1207 // check the opcode is correct (and dont allow them to be Subs) and that they 1208 // have expected to have the expected number of uses. They will have one use 1209 // from the phi and one from a LCSSA value, no matter the type. 1210 if (!isCorrectOpcode(RdxInstr) || !LoopExitInstr->hasNUses(2)) 1211 return {}; 1212 1213 // Check that the Phi has one (or two for min/max) uses, plus an extra use 1214 // for conditional reductions. 1215 if (!Phi->hasNUses(ExpectedUses + ExtraPhiUses)) 1216 return {}; 1217 1218 Instruction *Cur = getNextInstruction(Phi); 1219 1220 // Each other instruction in the chain should have the expected number of uses 1221 // and be the correct opcode. 1222 while (Cur != RdxInstr) { 1223 if (!Cur || !isCorrectOpcode(Cur) || !Cur->hasNUses(ExpectedUses)) 1224 return {}; 1225 1226 ReductionOperations.push_back(Cur); 1227 Cur = getNextInstruction(Cur); 1228 } 1229 1230 ReductionOperations.push_back(Cur); 1231 return ReductionOperations; 1232 } 1233 1234 InductionDescriptor::InductionDescriptor(Value *Start, InductionKind K, 1235 const SCEV *Step, BinaryOperator *BOp, 1236 SmallVectorImpl<Instruction *> *Casts) 1237 : StartValue(Start), IK(K), Step(Step), InductionBinOp(BOp) { 1238 assert(IK != IK_NoInduction && "Not an induction"); 1239 1240 // Start value type should match the induction kind and the value 1241 // itself should not be null. 1242 assert(StartValue && "StartValue is null"); 1243 assert((IK != IK_PtrInduction || StartValue->getType()->isPointerTy()) && 1244 "StartValue is not a pointer for pointer induction"); 1245 assert((IK != IK_IntInduction || StartValue->getType()->isIntegerTy()) && 1246 "StartValue is not an integer for integer induction"); 1247 1248 // Check the Step Value. It should be non-zero integer value. 1249 assert((!getConstIntStepValue() || !getConstIntStepValue()->isZero()) && 1250 "Step value is zero"); 1251 1252 assert((IK == IK_FpInduction || Step->getType()->isIntegerTy()) && 1253 "StepValue is not an integer"); 1254 1255 assert((IK != IK_FpInduction || Step->getType()->isFloatingPointTy()) && 1256 "StepValue is not FP for FpInduction"); 1257 assert((IK != IK_FpInduction || 1258 (InductionBinOp && 1259 (InductionBinOp->getOpcode() == Instruction::FAdd || 1260 InductionBinOp->getOpcode() == Instruction::FSub))) && 1261 "Binary opcode should be specified for FP induction"); 1262 1263 if (Casts) { 1264 for (auto &Inst : *Casts) { 1265 RedundantCasts.push_back(Inst); 1266 } 1267 } 1268 } 1269 1270 ConstantInt *InductionDescriptor::getConstIntStepValue() const { 1271 if (isa<SCEVConstant>(Step)) 1272 return dyn_cast<ConstantInt>(cast<SCEVConstant>(Step)->getValue()); 1273 return nullptr; 1274 } 1275 1276 bool InductionDescriptor::isFPInductionPHI(PHINode *Phi, const Loop *TheLoop, 1277 ScalarEvolution *SE, 1278 InductionDescriptor &D) { 1279 1280 // Here we only handle FP induction variables. 1281 assert(Phi->getType()->isFloatingPointTy() && "Unexpected Phi type"); 1282 1283 if (TheLoop->getHeader() != Phi->getParent()) 1284 return false; 1285 1286 // The loop may have multiple entrances or multiple exits; we can analyze 1287 // this phi if it has a unique entry value and a unique backedge value. 1288 if (Phi->getNumIncomingValues() != 2) 1289 return false; 1290 Value *BEValue = nullptr, *StartValue = nullptr; 1291 if (TheLoop->contains(Phi->getIncomingBlock(0))) { 1292 BEValue = Phi->getIncomingValue(0); 1293 StartValue = Phi->getIncomingValue(1); 1294 } else { 1295 assert(TheLoop->contains(Phi->getIncomingBlock(1)) && 1296 "Unexpected Phi node in the loop"); 1297 BEValue = Phi->getIncomingValue(1); 1298 StartValue = Phi->getIncomingValue(0); 1299 } 1300 1301 BinaryOperator *BOp = dyn_cast<BinaryOperator>(BEValue); 1302 if (!BOp) 1303 return false; 1304 1305 Value *Addend = nullptr; 1306 if (BOp->getOpcode() == Instruction::FAdd) { 1307 if (BOp->getOperand(0) == Phi) 1308 Addend = BOp->getOperand(1); 1309 else if (BOp->getOperand(1) == Phi) 1310 Addend = BOp->getOperand(0); 1311 } else if (BOp->getOpcode() == Instruction::FSub) 1312 if (BOp->getOperand(0) == Phi) 1313 Addend = BOp->getOperand(1); 1314 1315 if (!Addend) 1316 return false; 1317 1318 // The addend should be loop invariant 1319 if (auto *I = dyn_cast<Instruction>(Addend)) 1320 if (TheLoop->contains(I)) 1321 return false; 1322 1323 // FP Step has unknown SCEV 1324 const SCEV *Step = SE->getUnknown(Addend); 1325 D = InductionDescriptor(StartValue, IK_FpInduction, Step, BOp); 1326 return true; 1327 } 1328 1329 /// This function is called when we suspect that the update-chain of a phi node 1330 /// (whose symbolic SCEV expression sin \p PhiScev) contains redundant casts, 1331 /// that can be ignored. (This can happen when the PSCEV rewriter adds a runtime 1332 /// predicate P under which the SCEV expression for the phi can be the 1333 /// AddRecurrence \p AR; See createAddRecFromPHIWithCast). We want to find the 1334 /// cast instructions that are involved in the update-chain of this induction. 1335 /// A caller that adds the required runtime predicate can be free to drop these 1336 /// cast instructions, and compute the phi using \p AR (instead of some scev 1337 /// expression with casts). 1338 /// 1339 /// For example, without a predicate the scev expression can take the following 1340 /// form: 1341 /// (Ext ix (Trunc iy ( Start + i*Step ) to ix) to iy) 1342 /// 1343 /// It corresponds to the following IR sequence: 1344 /// %for.body: 1345 /// %x = phi i64 [ 0, %ph ], [ %add, %for.body ] 1346 /// %casted_phi = "ExtTrunc i64 %x" 1347 /// %add = add i64 %casted_phi, %step 1348 /// 1349 /// where %x is given in \p PN, 1350 /// PSE.getSCEV(%x) is equal to PSE.getSCEV(%casted_phi) under a predicate, 1351 /// and the IR sequence that "ExtTrunc i64 %x" represents can take one of 1352 /// several forms, for example, such as: 1353 /// ExtTrunc1: %casted_phi = and %x, 2^n-1 1354 /// or: 1355 /// ExtTrunc2: %t = shl %x, m 1356 /// %casted_phi = ashr %t, m 1357 /// 1358 /// If we are able to find such sequence, we return the instructions 1359 /// we found, namely %casted_phi and the instructions on its use-def chain up 1360 /// to the phi (not including the phi). 1361 static bool getCastsForInductionPHI(PredicatedScalarEvolution &PSE, 1362 const SCEVUnknown *PhiScev, 1363 const SCEVAddRecExpr *AR, 1364 SmallVectorImpl<Instruction *> &CastInsts) { 1365 1366 assert(CastInsts.empty() && "CastInsts is expected to be empty."); 1367 auto *PN = cast<PHINode>(PhiScev->getValue()); 1368 assert(PSE.getSCEV(PN) == AR && "Unexpected phi node SCEV expression"); 1369 const Loop *L = AR->getLoop(); 1370 1371 // Find any cast instructions that participate in the def-use chain of 1372 // PhiScev in the loop. 1373 // FORNOW/TODO: We currently expect the def-use chain to include only 1374 // two-operand instructions, where one of the operands is an invariant. 1375 // createAddRecFromPHIWithCasts() currently does not support anything more 1376 // involved than that, so we keep the search simple. This can be 1377 // extended/generalized as needed. 1378 1379 auto getDef = [&](const Value *Val) -> Value * { 1380 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Val); 1381 if (!BinOp) 1382 return nullptr; 1383 Value *Op0 = BinOp->getOperand(0); 1384 Value *Op1 = BinOp->getOperand(1); 1385 Value *Def = nullptr; 1386 if (L->isLoopInvariant(Op0)) 1387 Def = Op1; 1388 else if (L->isLoopInvariant(Op1)) 1389 Def = Op0; 1390 return Def; 1391 }; 1392 1393 // Look for the instruction that defines the induction via the 1394 // loop backedge. 1395 BasicBlock *Latch = L->getLoopLatch(); 1396 if (!Latch) 1397 return false; 1398 Value *Val = PN->getIncomingValueForBlock(Latch); 1399 if (!Val) 1400 return false; 1401 1402 // Follow the def-use chain until the induction phi is reached. 1403 // If on the way we encounter a Value that has the same SCEV Expr as the 1404 // phi node, we can consider the instructions we visit from that point 1405 // as part of the cast-sequence that can be ignored. 1406 bool InCastSequence = false; 1407 auto *Inst = dyn_cast<Instruction>(Val); 1408 while (Val != PN) { 1409 // If we encountered a phi node other than PN, or if we left the loop, 1410 // we bail out. 1411 if (!Inst || !L->contains(Inst)) { 1412 return false; 1413 } 1414 auto *AddRec = dyn_cast<SCEVAddRecExpr>(PSE.getSCEV(Val)); 1415 if (AddRec && PSE.areAddRecsEqualWithPreds(AddRec, AR)) 1416 InCastSequence = true; 1417 if (InCastSequence) { 1418 // Only the last instruction in the cast sequence is expected to have 1419 // uses outside the induction def-use chain. 1420 if (!CastInsts.empty()) 1421 if (!Inst->hasOneUse()) 1422 return false; 1423 CastInsts.push_back(Inst); 1424 } 1425 Val = getDef(Val); 1426 if (!Val) 1427 return false; 1428 Inst = dyn_cast<Instruction>(Val); 1429 } 1430 1431 return InCastSequence; 1432 } 1433 1434 bool InductionDescriptor::isInductionPHI(PHINode *Phi, const Loop *TheLoop, 1435 PredicatedScalarEvolution &PSE, 1436 InductionDescriptor &D, bool Assume) { 1437 Type *PhiTy = Phi->getType(); 1438 1439 // Handle integer and pointer inductions variables. 1440 // Now we handle also FP induction but not trying to make a 1441 // recurrent expression from the PHI node in-place. 1442 1443 if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy() && !PhiTy->isFloatTy() && 1444 !PhiTy->isDoubleTy() && !PhiTy->isHalfTy()) 1445 return false; 1446 1447 if (PhiTy->isFloatingPointTy()) 1448 return isFPInductionPHI(Phi, TheLoop, PSE.getSE(), D); 1449 1450 const SCEV *PhiScev = PSE.getSCEV(Phi); 1451 const auto *AR = dyn_cast<SCEVAddRecExpr>(PhiScev); 1452 1453 // We need this expression to be an AddRecExpr. 1454 if (Assume && !AR) 1455 AR = PSE.getAsAddRec(Phi); 1456 1457 if (!AR) { 1458 LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n"); 1459 return false; 1460 } 1461 1462 // Record any Cast instructions that participate in the induction update 1463 const auto *SymbolicPhi = dyn_cast<SCEVUnknown>(PhiScev); 1464 // If we started from an UnknownSCEV, and managed to build an addRecurrence 1465 // only after enabling Assume with PSCEV, this means we may have encountered 1466 // cast instructions that required adding a runtime check in order to 1467 // guarantee the correctness of the AddRecurrence respresentation of the 1468 // induction. 1469 if (PhiScev != AR && SymbolicPhi) { 1470 SmallVector<Instruction *, 2> Casts; 1471 if (getCastsForInductionPHI(PSE, SymbolicPhi, AR, Casts)) 1472 return isInductionPHI(Phi, TheLoop, PSE.getSE(), D, AR, &Casts); 1473 } 1474 1475 return isInductionPHI(Phi, TheLoop, PSE.getSE(), D, AR); 1476 } 1477 1478 bool InductionDescriptor::isInductionPHI( 1479 PHINode *Phi, const Loop *TheLoop, ScalarEvolution *SE, 1480 InductionDescriptor &D, const SCEV *Expr, 1481 SmallVectorImpl<Instruction *> *CastsToIgnore) { 1482 Type *PhiTy = Phi->getType(); 1483 // We only handle integer and pointer inductions variables. 1484 if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy()) 1485 return false; 1486 1487 // Check that the PHI is consecutive. 1488 const SCEV *PhiScev = Expr ? Expr : SE->getSCEV(Phi); 1489 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PhiScev); 1490 1491 if (!AR) { 1492 LLVM_DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n"); 1493 return false; 1494 } 1495 1496 if (AR->getLoop() != TheLoop) { 1497 // FIXME: We should treat this as a uniform. Unfortunately, we 1498 // don't currently know how to handled uniform PHIs. 1499 LLVM_DEBUG( 1500 dbgs() << "LV: PHI is a recurrence with respect to an outer loop.\n"); 1501 return false; 1502 } 1503 1504 // This function assumes that InductionPhi is called only on Phi nodes 1505 // present inside loop headers. Check for the same, and throw an assert if 1506 // the current Phi is not present inside the loop header. 1507 assert(Phi->getParent() == AR->getLoop()->getHeader() 1508 && "Invalid Phi node, not present in loop header"); 1509 1510 Value *StartValue = 1511 Phi->getIncomingValueForBlock(AR->getLoop()->getLoopPreheader()); 1512 1513 BasicBlock *Latch = AR->getLoop()->getLoopLatch(); 1514 if (!Latch) 1515 return false; 1516 1517 const SCEV *Step = AR->getStepRecurrence(*SE); 1518 // Calculate the pointer stride and check if it is consecutive. 1519 // The stride may be a constant or a loop invariant integer value. 1520 const SCEVConstant *ConstStep = dyn_cast<SCEVConstant>(Step); 1521 if (!ConstStep && !SE->isLoopInvariant(Step, TheLoop)) 1522 return false; 1523 1524 if (PhiTy->isIntegerTy()) { 1525 BinaryOperator *BOp = 1526 dyn_cast<BinaryOperator>(Phi->getIncomingValueForBlock(Latch)); 1527 D = InductionDescriptor(StartValue, IK_IntInduction, Step, BOp, 1528 CastsToIgnore); 1529 return true; 1530 } 1531 1532 assert(PhiTy->isPointerTy() && "The PHI must be a pointer"); 1533 1534 // This allows induction variables w/non-constant steps. 1535 D = InductionDescriptor(StartValue, IK_PtrInduction, Step); 1536 return true; 1537 } 1538