1 //===- InstCombineNegator.cpp -----------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements sinking of negation into expression trees, 10 // as long as that can be done without increasing instruction count. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "InstCombineInternal.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/None.h" 19 #include "llvm/ADT/Optional.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/ADT/StringRef.h" 24 #include "llvm/ADT/Twine.h" 25 #include "llvm/ADT/iterator_range.h" 26 #include "llvm/Analysis/TargetFolder.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/IR/Constant.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/DebugLoc.h" 31 #include "llvm/IR/IRBuilder.h" 32 #include "llvm/IR/Instruction.h" 33 #include "llvm/IR/Instructions.h" 34 #include "llvm/IR/PatternMatch.h" 35 #include "llvm/IR/Type.h" 36 #include "llvm/IR/Use.h" 37 #include "llvm/IR/User.h" 38 #include "llvm/IR/Value.h" 39 #include "llvm/Support/Casting.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Compiler.h" 42 #include "llvm/Support/DebugCounter.h" 43 #include "llvm/Support/ErrorHandling.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include "llvm/Transforms/InstCombine/InstCombiner.h" 46 #include <cassert> 47 #include <cstdint> 48 #include <functional> 49 #include <tuple> 50 #include <type_traits> 51 #include <utility> 52 53 namespace llvm { 54 class AssumptionCache; 55 class DataLayout; 56 class DominatorTree; 57 class LLVMContext; 58 } // namespace llvm 59 60 using namespace llvm; 61 62 #define DEBUG_TYPE "instcombine" 63 64 STATISTIC(NegatorTotalNegationsAttempted, 65 "Negator: Number of negations attempted to be sinked"); 66 STATISTIC(NegatorNumTreesNegated, 67 "Negator: Number of negations successfully sinked"); 68 STATISTIC(NegatorMaxDepthVisited, "Negator: Maximal traversal depth ever " 69 "reached while attempting to sink negation"); 70 STATISTIC(NegatorTimesDepthLimitReached, 71 "Negator: How many times did the traversal depth limit was reached " 72 "during sinking"); 73 STATISTIC( 74 NegatorNumValuesVisited, 75 "Negator: Total number of values visited during attempts to sink negation"); 76 STATISTIC(NegatorNumNegationsFoundInCache, 77 "Negator: How many negations did we retrieve/reuse from cache"); 78 STATISTIC(NegatorMaxTotalValuesVisited, 79 "Negator: Maximal number of values ever visited while attempting to " 80 "sink negation"); 81 STATISTIC(NegatorNumInstructionsCreatedTotal, 82 "Negator: Number of new negated instructions created, total"); 83 STATISTIC(NegatorMaxInstructionsCreated, 84 "Negator: Maximal number of new instructions created during negation " 85 "attempt"); 86 STATISTIC(NegatorNumInstructionsNegatedSuccess, 87 "Negator: Number of new negated instructions created in successful " 88 "negation sinking attempts"); 89 90 DEBUG_COUNTER(NegatorCounter, "instcombine-negator", 91 "Controls Negator transformations in InstCombine pass"); 92 93 static cl::opt<bool> 94 NegatorEnabled("instcombine-negator-enabled", cl::init(true), 95 cl::desc("Should we attempt to sink negations?")); 96 97 static cl::opt<unsigned> 98 NegatorMaxDepth("instcombine-negator-max-depth", 99 cl::init(NegatorDefaultMaxDepth), 100 cl::desc("What is the maximal lookup depth when trying to " 101 "check for viability of negation sinking.")); 102 103 Negator::Negator(LLVMContext &C, const DataLayout &DL_, AssumptionCache &AC_, 104 const DominatorTree &DT_, bool IsTrulyNegation_) 105 : Builder(C, TargetFolder(DL_), 106 IRBuilderCallbackInserter([&](Instruction *I) { 107 ++NegatorNumInstructionsCreatedTotal; 108 NewInstructions.push_back(I); 109 })), 110 DL(DL_), AC(AC_), DT(DT_), IsTrulyNegation(IsTrulyNegation_) {} 111 112 #if LLVM_ENABLE_STATS 113 Negator::~Negator() { 114 NegatorMaxTotalValuesVisited.updateMax(NumValuesVisitedInThisNegator); 115 } 116 #endif 117 118 // Due to the InstCombine's worklist management, there are no guarantees that 119 // each instruction we'll encounter has been visited by InstCombine already. 120 // In particular, most importantly for us, that means we have to canonicalize 121 // constants to RHS ourselves, since that is helpful sometimes. 122 std::array<Value *, 2> Negator::getSortedOperandsOfBinOp(Instruction *I) { 123 assert(I->getNumOperands() == 2 && "Only for binops!"); 124 std::array<Value *, 2> Ops{I->getOperand(0), I->getOperand(1)}; 125 if (I->isCommutative() && InstCombiner::getComplexity(I->getOperand(0)) < 126 InstCombiner::getComplexity(I->getOperand(1))) 127 std::swap(Ops[0], Ops[1]); 128 return Ops; 129 } 130 131 // FIXME: can this be reworked into a worklist-based algorithm while preserving 132 // the depth-first, early bailout traversal? 133 LLVM_NODISCARD Value *Negator::visitImpl(Value *V, unsigned Depth) { 134 // -(undef) -> undef. 135 if (match(V, m_Undef())) 136 return V; 137 138 // In i1, negation can simply be ignored. 139 if (V->getType()->isIntOrIntVectorTy(1)) 140 return V; 141 142 Value *X; 143 144 // -(-(X)) -> X. 145 if (match(V, m_Neg(m_Value(X)))) 146 return X; 147 148 // Integral constants can be freely negated. 149 if (match(V, m_AnyIntegralConstant())) 150 return ConstantExpr::getNeg(cast<Constant>(V), /*HasNUW=*/false, 151 /*HasNSW=*/false); 152 153 // If we have a non-instruction, then give up. 154 if (!isa<Instruction>(V)) 155 return nullptr; 156 157 // If we have started with a true negation (i.e. `sub 0, %y`), then if we've 158 // got instruction that does not require recursive reasoning, we can still 159 // negate it even if it has other uses, without increasing instruction count. 160 if (!V->hasOneUse() && !IsTrulyNegation) 161 return nullptr; 162 163 auto *I = cast<Instruction>(V); 164 unsigned BitWidth = I->getType()->getScalarSizeInBits(); 165 166 // We must preserve the insertion point and debug info that is set in the 167 // builder at the time this function is called. 168 InstCombiner::BuilderTy::InsertPointGuard Guard(Builder); 169 // And since we are trying to negate instruction I, that tells us about the 170 // insertion point and the debug info that we need to keep. 171 Builder.SetInsertPoint(I); 172 173 // In some cases we can give the answer without further recursion. 174 switch (I->getOpcode()) { 175 case Instruction::Add: { 176 std::array<Value *, 2> Ops = getSortedOperandsOfBinOp(I); 177 // `inc` is always negatible. 178 if (match(Ops[1], m_One())) 179 return Builder.CreateNot(Ops[0], I->getName() + ".neg"); 180 break; 181 } 182 case Instruction::Xor: 183 // `not` is always negatible. 184 if (match(I, m_Not(m_Value(X)))) 185 return Builder.CreateAdd(X, ConstantInt::get(X->getType(), 1), 186 I->getName() + ".neg"); 187 break; 188 case Instruction::AShr: 189 case Instruction::LShr: { 190 // Right-shift sign bit smear is negatible. 191 const APInt *Op1Val; 192 if (match(I->getOperand(1), m_APInt(Op1Val)) && *Op1Val == BitWidth - 1) { 193 Value *BO = I->getOpcode() == Instruction::AShr 194 ? Builder.CreateLShr(I->getOperand(0), I->getOperand(1)) 195 : Builder.CreateAShr(I->getOperand(0), I->getOperand(1)); 196 if (auto *NewInstr = dyn_cast<Instruction>(BO)) { 197 NewInstr->copyIRFlags(I); 198 NewInstr->setName(I->getName() + ".neg"); 199 } 200 return BO; 201 } 202 // While we could negate exact arithmetic shift: 203 // ashr exact %x, C --> sdiv exact i8 %x, -1<<C 204 // iff C != 0 and C u< bitwidth(%x), we don't want to, 205 // because division is *THAT* much worse than a shift. 206 break; 207 } 208 case Instruction::SExt: 209 case Instruction::ZExt: 210 // `*ext` of i1 is always negatible 211 if (I->getOperand(0)->getType()->isIntOrIntVectorTy(1)) 212 return I->getOpcode() == Instruction::SExt 213 ? Builder.CreateZExt(I->getOperand(0), I->getType(), 214 I->getName() + ".neg") 215 : Builder.CreateSExt(I->getOperand(0), I->getType(), 216 I->getName() + ".neg"); 217 break; 218 case Instruction::Select: { 219 // If both arms of the select are constants, we don't need to recurse. 220 // Therefore, this transform is not limited by uses. 221 auto *Sel = cast<SelectInst>(I); 222 Constant *TrueC, *FalseC; 223 if (match(Sel->getTrueValue(), m_ImmConstant(TrueC)) && 224 match(Sel->getFalseValue(), m_ImmConstant(FalseC))) { 225 Constant *NegTrueC = ConstantExpr::getNeg(TrueC); 226 Constant *NegFalseC = ConstantExpr::getNeg(FalseC); 227 return Builder.CreateSelect(Sel->getCondition(), NegTrueC, NegFalseC, 228 I->getName() + ".neg", /*MDFrom=*/I); 229 } 230 break; 231 } 232 default: 233 break; // Other instructions require recursive reasoning. 234 } 235 236 if (I->getOpcode() == Instruction::Sub && 237 (I->hasOneUse() || match(I->getOperand(0), m_ImmConstant()))) { 238 // `sub` is always negatible. 239 // However, only do this either if the old `sub` doesn't stick around, or 240 // it was subtracting from a constant. Otherwise, this isn't profitable. 241 return Builder.CreateSub(I->getOperand(1), I->getOperand(0), 242 I->getName() + ".neg"); 243 } 244 245 // Some other cases, while still don't require recursion, 246 // are restricted to the one-use case. 247 if (!V->hasOneUse()) 248 return nullptr; 249 250 switch (I->getOpcode()) { 251 case Instruction::And: { 252 Constant *ShAmt; 253 // sub(y,and(lshr(x,C),1)) --> add(ashr(shl(x,(BW-1)-C),BW-1),y) 254 if (match(I, m_c_And(m_OneUse(m_TruncOrSelf( 255 m_LShr(m_Value(X), m_ImmConstant(ShAmt)))), 256 m_One()))) { 257 unsigned BW = X->getType()->getScalarSizeInBits(); 258 Constant *BWMinusOne = ConstantInt::get(X->getType(), BW - 1); 259 Value *R = Builder.CreateShl(X, Builder.CreateSub(BWMinusOne, ShAmt)); 260 R = Builder.CreateAShr(R, BWMinusOne); 261 return Builder.CreateTruncOrBitCast(R, I->getType()); 262 } 263 break; 264 } 265 case Instruction::SDiv: 266 // `sdiv` is negatible if divisor is not undef/INT_MIN/1. 267 // While this is normally not behind a use-check, 268 // let's consider division to be special since it's costly. 269 if (auto *Op1C = dyn_cast<Constant>(I->getOperand(1))) { 270 if (!Op1C->containsUndefOrPoisonElement() && 271 Op1C->isNotMinSignedValue() && Op1C->isNotOneValue()) { 272 Value *BO = 273 Builder.CreateSDiv(I->getOperand(0), ConstantExpr::getNeg(Op1C), 274 I->getName() + ".neg"); 275 if (auto *NewInstr = dyn_cast<Instruction>(BO)) 276 NewInstr->setIsExact(I->isExact()); 277 return BO; 278 } 279 } 280 break; 281 } 282 283 // Rest of the logic is recursive, so if it's time to give up then it's time. 284 if (Depth > NegatorMaxDepth) { 285 LLVM_DEBUG(dbgs() << "Negator: reached maximal allowed traversal depth in " 286 << *V << ". Giving up.\n"); 287 ++NegatorTimesDepthLimitReached; 288 return nullptr; 289 } 290 291 switch (I->getOpcode()) { 292 case Instruction::Freeze: { 293 // `freeze` is negatible if its operand is negatible. 294 Value *NegOp = negate(I->getOperand(0), Depth + 1); 295 if (!NegOp) // Early return. 296 return nullptr; 297 return Builder.CreateFreeze(NegOp, I->getName() + ".neg"); 298 } 299 case Instruction::PHI: { 300 // `phi` is negatible if all the incoming values are negatible. 301 auto *PHI = cast<PHINode>(I); 302 SmallVector<Value *, 4> NegatedIncomingValues(PHI->getNumOperands()); 303 for (auto I : zip(PHI->incoming_values(), NegatedIncomingValues)) { 304 if (!(std::get<1>(I) = 305 negate(std::get<0>(I), Depth + 1))) // Early return. 306 return nullptr; 307 } 308 // All incoming values are indeed negatible. Create negated PHI node. 309 PHINode *NegatedPHI = Builder.CreatePHI( 310 PHI->getType(), PHI->getNumOperands(), PHI->getName() + ".neg"); 311 for (auto I : zip(NegatedIncomingValues, PHI->blocks())) 312 NegatedPHI->addIncoming(std::get<0>(I), std::get<1>(I)); 313 return NegatedPHI; 314 } 315 case Instruction::Select: { 316 if (isKnownNegation(I->getOperand(1), I->getOperand(2))) { 317 // Of one hand of select is known to be negation of another hand, 318 // just swap the hands around. 319 auto *NewSelect = cast<SelectInst>(I->clone()); 320 // Just swap the operands of the select. 321 NewSelect->swapValues(); 322 // Don't swap prof metadata, we didn't change the branch behavior. 323 NewSelect->setName(I->getName() + ".neg"); 324 Builder.Insert(NewSelect); 325 return NewSelect; 326 } 327 // `select` is negatible if both hands of `select` are negatible. 328 Value *NegOp1 = negate(I->getOperand(1), Depth + 1); 329 if (!NegOp1) // Early return. 330 return nullptr; 331 Value *NegOp2 = negate(I->getOperand(2), Depth + 1); 332 if (!NegOp2) 333 return nullptr; 334 // Do preserve the metadata! 335 return Builder.CreateSelect(I->getOperand(0), NegOp1, NegOp2, 336 I->getName() + ".neg", /*MDFrom=*/I); 337 } 338 case Instruction::ShuffleVector: { 339 // `shufflevector` is negatible if both operands are negatible. 340 auto *Shuf = cast<ShuffleVectorInst>(I); 341 Value *NegOp0 = negate(I->getOperand(0), Depth + 1); 342 if (!NegOp0) // Early return. 343 return nullptr; 344 Value *NegOp1 = negate(I->getOperand(1), Depth + 1); 345 if (!NegOp1) 346 return nullptr; 347 return Builder.CreateShuffleVector(NegOp0, NegOp1, Shuf->getShuffleMask(), 348 I->getName() + ".neg"); 349 } 350 case Instruction::ExtractElement: { 351 // `extractelement` is negatible if source operand is negatible. 352 auto *EEI = cast<ExtractElementInst>(I); 353 Value *NegVector = negate(EEI->getVectorOperand(), Depth + 1); 354 if (!NegVector) // Early return. 355 return nullptr; 356 return Builder.CreateExtractElement(NegVector, EEI->getIndexOperand(), 357 I->getName() + ".neg"); 358 } 359 case Instruction::InsertElement: { 360 // `insertelement` is negatible if both the source vector and 361 // element-to-be-inserted are negatible. 362 auto *IEI = cast<InsertElementInst>(I); 363 Value *NegVector = negate(IEI->getOperand(0), Depth + 1); 364 if (!NegVector) // Early return. 365 return nullptr; 366 Value *NegNewElt = negate(IEI->getOperand(1), Depth + 1); 367 if (!NegNewElt) // Early return. 368 return nullptr; 369 return Builder.CreateInsertElement(NegVector, NegNewElt, IEI->getOperand(2), 370 I->getName() + ".neg"); 371 } 372 case Instruction::Trunc: { 373 // `trunc` is negatible if its operand is negatible. 374 Value *NegOp = negate(I->getOperand(0), Depth + 1); 375 if (!NegOp) // Early return. 376 return nullptr; 377 return Builder.CreateTrunc(NegOp, I->getType(), I->getName() + ".neg"); 378 } 379 case Instruction::Shl: { 380 // `shl` is negatible if the first operand is negatible. 381 if (Value *NegOp0 = negate(I->getOperand(0), Depth + 1)) 382 return Builder.CreateShl(NegOp0, I->getOperand(1), I->getName() + ".neg"); 383 // Otherwise, `shl %x, C` can be interpreted as `mul %x, 1<<C`. 384 auto *Op1C = dyn_cast<Constant>(I->getOperand(1)); 385 if (!Op1C) // Early return. 386 return nullptr; 387 return Builder.CreateMul( 388 I->getOperand(0), 389 ConstantExpr::getShl(Constant::getAllOnesValue(Op1C->getType()), Op1C), 390 I->getName() + ".neg"); 391 } 392 case Instruction::Or: { 393 if (!haveNoCommonBitsSet(I->getOperand(0), I->getOperand(1), DL, &AC, I, 394 &DT)) 395 return nullptr; // Don't know how to handle `or` in general. 396 std::array<Value *, 2> Ops = getSortedOperandsOfBinOp(I); 397 // `or`/`add` are interchangeable when operands have no common bits set. 398 // `inc` is always negatible. 399 if (match(Ops[1], m_One())) 400 return Builder.CreateNot(Ops[0], I->getName() + ".neg"); 401 // Else, just defer to Instruction::Add handling. 402 LLVM_FALLTHROUGH; 403 } 404 case Instruction::Add: { 405 // `add` is negatible if both of its operands are negatible. 406 SmallVector<Value *, 2> NegatedOps, NonNegatedOps; 407 for (Value *Op : I->operands()) { 408 // Can we sink the negation into this operand? 409 if (Value *NegOp = negate(Op, Depth + 1)) { 410 NegatedOps.emplace_back(NegOp); // Successfully negated operand! 411 continue; 412 } 413 // Failed to sink negation into this operand. IFF we started from negation 414 // and we manage to sink negation into one operand, we can still do this. 415 if (!IsTrulyNegation) 416 return nullptr; 417 NonNegatedOps.emplace_back(Op); // Just record which operand that was. 418 } 419 assert((NegatedOps.size() + NonNegatedOps.size()) == 2 && 420 "Internal consistency check failed."); 421 // Did we manage to sink negation into both of the operands? 422 if (NegatedOps.size() == 2) // Then we get to keep the `add`! 423 return Builder.CreateAdd(NegatedOps[0], NegatedOps[1], 424 I->getName() + ".neg"); 425 assert(IsTrulyNegation && "We should have early-exited then."); 426 // Completely failed to sink negation? 427 if (NonNegatedOps.size() == 2) 428 return nullptr; 429 // 0-(a+b) --> (-a)-b 430 return Builder.CreateSub(NegatedOps[0], NonNegatedOps[0], 431 I->getName() + ".neg"); 432 } 433 case Instruction::Xor: { 434 std::array<Value *, 2> Ops = getSortedOperandsOfBinOp(I); 435 // `xor` is negatible if one of its operands is invertible. 436 // FIXME: InstCombineInverter? But how to connect Inverter and Negator? 437 if (auto *C = dyn_cast<Constant>(Ops[1])) { 438 Value *Xor = Builder.CreateXor(Ops[0], ConstantExpr::getNot(C)); 439 return Builder.CreateAdd(Xor, ConstantInt::get(Xor->getType(), 1), 440 I->getName() + ".neg"); 441 } 442 return nullptr; 443 } 444 case Instruction::Mul: { 445 std::array<Value *, 2> Ops = getSortedOperandsOfBinOp(I); 446 // `mul` is negatible if one of its operands is negatible. 447 Value *NegatedOp, *OtherOp; 448 // First try the second operand, in case it's a constant it will be best to 449 // just invert it instead of sinking the `neg` deeper. 450 if (Value *NegOp1 = negate(Ops[1], Depth + 1)) { 451 NegatedOp = NegOp1; 452 OtherOp = Ops[0]; 453 } else if (Value *NegOp0 = negate(Ops[0], Depth + 1)) { 454 NegatedOp = NegOp0; 455 OtherOp = Ops[1]; 456 } else 457 // Can't negate either of them. 458 return nullptr; 459 return Builder.CreateMul(NegatedOp, OtherOp, I->getName() + ".neg"); 460 } 461 default: 462 return nullptr; // Don't know, likely not negatible for free. 463 } 464 465 llvm_unreachable("Can't get here. We always return from switch."); 466 } 467 468 LLVM_NODISCARD Value *Negator::negate(Value *V, unsigned Depth) { 469 NegatorMaxDepthVisited.updateMax(Depth); 470 ++NegatorNumValuesVisited; 471 472 #if LLVM_ENABLE_STATS 473 ++NumValuesVisitedInThisNegator; 474 #endif 475 476 #ifndef NDEBUG 477 // We can't ever have a Value with such an address. 478 Value *Placeholder = reinterpret_cast<Value *>(static_cast<uintptr_t>(-1)); 479 #endif 480 481 // Did we already try to negate this value? 482 auto NegationsCacheIterator = NegationsCache.find(V); 483 if (NegationsCacheIterator != NegationsCache.end()) { 484 ++NegatorNumNegationsFoundInCache; 485 Value *NegatedV = NegationsCacheIterator->second; 486 assert(NegatedV != Placeholder && "Encountered a cycle during negation."); 487 return NegatedV; 488 } 489 490 #ifndef NDEBUG 491 // We did not find a cached result for negation of V. While there, 492 // let's temporairly cache a placeholder value, with the idea that if later 493 // during negation we fetch it from cache, we'll know we're in a cycle. 494 NegationsCache[V] = Placeholder; 495 #endif 496 497 // No luck. Try negating it for real. 498 Value *NegatedV = visitImpl(V, Depth); 499 // And cache the (real) result for the future. 500 NegationsCache[V] = NegatedV; 501 502 return NegatedV; 503 } 504 505 LLVM_NODISCARD Optional<Negator::Result> Negator::run(Value *Root) { 506 Value *Negated = negate(Root, /*Depth=*/0); 507 if (!Negated) { 508 // We must cleanup newly-inserted instructions, to avoid any potential 509 // endless combine looping. 510 for (Instruction *I : llvm::reverse(NewInstructions)) 511 I->eraseFromParent(); 512 return llvm::None; 513 } 514 return std::make_pair(ArrayRef<Instruction *>(NewInstructions), Negated); 515 } 516 517 LLVM_NODISCARD Value *Negator::Negate(bool LHSIsZero, Value *Root, 518 InstCombinerImpl &IC) { 519 ++NegatorTotalNegationsAttempted; 520 LLVM_DEBUG(dbgs() << "Negator: attempting to sink negation into " << *Root 521 << "\n"); 522 523 if (!NegatorEnabled || !DebugCounter::shouldExecute(NegatorCounter)) 524 return nullptr; 525 526 Negator N(Root->getContext(), IC.getDataLayout(), IC.getAssumptionCache(), 527 IC.getDominatorTree(), LHSIsZero); 528 Optional<Result> Res = N.run(Root); 529 if (!Res) { // Negation failed. 530 LLVM_DEBUG(dbgs() << "Negator: failed to sink negation into " << *Root 531 << "\n"); 532 return nullptr; 533 } 534 535 LLVM_DEBUG(dbgs() << "Negator: successfully sunk negation into " << *Root 536 << "\n NEW: " << *Res->second << "\n"); 537 ++NegatorNumTreesNegated; 538 539 // We must temporarily unset the 'current' insertion point and DebugLoc of the 540 // InstCombine's IRBuilder so that it won't interfere with the ones we have 541 // already specified when producing negated instructions. 542 InstCombiner::BuilderTy::InsertPointGuard Guard(IC.Builder); 543 IC.Builder.ClearInsertionPoint(); 544 IC.Builder.SetCurrentDebugLocation(DebugLoc()); 545 546 // And finally, we must add newly-created instructions into the InstCombine's 547 // worklist (in a proper order!) so it can attempt to combine them. 548 LLVM_DEBUG(dbgs() << "Negator: Propagating " << Res->first.size() 549 << " instrs to InstCombine\n"); 550 NegatorMaxInstructionsCreated.updateMax(Res->first.size()); 551 NegatorNumInstructionsNegatedSuccess += Res->first.size(); 552 553 // They are in def-use order, so nothing fancy, just insert them in order. 554 for (Instruction *I : Res->first) 555 IC.Builder.Insert(I, I->getName()); 556 557 // And return the new root. 558 return Res->second; 559 } 560