1 //===- InstCombinePHI.cpp -------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitPHINode function.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallPtrSet.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/InstructionSimplify.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/IR/PatternMatch.h"
20 #include "llvm/Support/CommandLine.h"
21 #include "llvm/Transforms/InstCombine/InstCombiner.h"
22 #include "llvm/Transforms/Utils/Local.h"
23 #include <optional>
24
25 using namespace llvm;
26 using namespace llvm::PatternMatch;
27
28 #define DEBUG_TYPE "instcombine"
29
30 static cl::opt<unsigned>
31 MaxNumPhis("instcombine-max-num-phis", cl::init(512),
32 cl::desc("Maximum number phis to handle in intptr/ptrint folding"));
33
34 STATISTIC(NumPHIsOfInsertValues,
35 "Number of phi-of-insertvalue turned into insertvalue-of-phis");
36 STATISTIC(NumPHIsOfExtractValues,
37 "Number of phi-of-extractvalue turned into extractvalue-of-phi");
38 STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
39
40 /// The PHI arguments will be folded into a single operation with a PHI node
41 /// as input. The debug location of the single operation will be the merged
42 /// locations of the original PHI node arguments.
PHIArgMergedDebugLoc(Instruction * Inst,PHINode & PN)43 void InstCombinerImpl::PHIArgMergedDebugLoc(Instruction *Inst, PHINode &PN) {
44 auto *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
45 Inst->setDebugLoc(FirstInst->getDebugLoc());
46 // We do not expect a CallInst here, otherwise, N-way merging of DebugLoc
47 // will be inefficient.
48 assert(!isa<CallInst>(Inst));
49
50 for (Value *V : drop_begin(PN.incoming_values())) {
51 auto *I = cast<Instruction>(V);
52 Inst->applyMergedLocation(Inst->getDebugLoc(), I->getDebugLoc());
53 }
54 }
55
56 // Replace Integer typed PHI PN if the PHI's value is used as a pointer value.
57 // If there is an existing pointer typed PHI that produces the same value as PN,
58 // replace PN and the IntToPtr operation with it. Otherwise, synthesize a new
59 // PHI node:
60 //
61 // Case-1:
62 // bb1:
63 // int_init = PtrToInt(ptr_init)
64 // br label %bb2
65 // bb2:
66 // int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
67 // ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
68 // ptr_val2 = IntToPtr(int_val)
69 // ...
70 // use(ptr_val2)
71 // ptr_val_inc = ...
72 // inc_val_inc = PtrToInt(ptr_val_inc)
73 //
74 // ==>
75 // bb1:
76 // br label %bb2
77 // bb2:
78 // ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
79 // ...
80 // use(ptr_val)
81 // ptr_val_inc = ...
82 //
83 // Case-2:
84 // bb1:
85 // int_ptr = BitCast(ptr_ptr)
86 // int_init = Load(int_ptr)
87 // br label %bb2
88 // bb2:
89 // int_val = PHI([int_init, %bb1], [int_val_inc, %bb2]
90 // ptr_val2 = IntToPtr(int_val)
91 // ...
92 // use(ptr_val2)
93 // ptr_val_inc = ...
94 // inc_val_inc = PtrToInt(ptr_val_inc)
95 // ==>
96 // bb1:
97 // ptr_init = Load(ptr_ptr)
98 // br label %bb2
99 // bb2:
100 // ptr_val = PHI([ptr_init, %bb1], [ptr_val_inc, %bb2]
101 // ...
102 // use(ptr_val)
103 // ptr_val_inc = ...
104 // ...
105 //
foldIntegerTypedPHI(PHINode & PN)106 bool InstCombinerImpl::foldIntegerTypedPHI(PHINode &PN) {
107 if (!PN.getType()->isIntegerTy())
108 return false;
109 if (!PN.hasOneUse())
110 return false;
111
112 auto *IntToPtr = dyn_cast<IntToPtrInst>(PN.user_back());
113 if (!IntToPtr)
114 return false;
115
116 // Check if the pointer is actually used as pointer:
117 auto HasPointerUse = [](Instruction *IIP) {
118 for (User *U : IIP->users()) {
119 Value *Ptr = nullptr;
120 if (LoadInst *LoadI = dyn_cast<LoadInst>(U)) {
121 Ptr = LoadI->getPointerOperand();
122 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
123 Ptr = SI->getPointerOperand();
124 } else if (GetElementPtrInst *GI = dyn_cast<GetElementPtrInst>(U)) {
125 Ptr = GI->getPointerOperand();
126 }
127
128 if (Ptr && Ptr == IIP)
129 return true;
130 }
131 return false;
132 };
133
134 if (!HasPointerUse(IntToPtr))
135 return false;
136
137 if (DL.getPointerSizeInBits(IntToPtr->getAddressSpace()) !=
138 DL.getTypeSizeInBits(IntToPtr->getOperand(0)->getType()))
139 return false;
140
141 SmallVector<Value *, 4> AvailablePtrVals;
142 for (auto Incoming : zip(PN.blocks(), PN.incoming_values())) {
143 BasicBlock *BB = std::get<0>(Incoming);
144 Value *Arg = std::get<1>(Incoming);
145
146 // First look backward:
147 if (auto *PI = dyn_cast<PtrToIntInst>(Arg)) {
148 AvailablePtrVals.emplace_back(PI->getOperand(0));
149 continue;
150 }
151
152 // Next look forward:
153 Value *ArgIntToPtr = nullptr;
154 for (User *U : Arg->users()) {
155 if (isa<IntToPtrInst>(U) && U->getType() == IntToPtr->getType() &&
156 (DT.dominates(cast<Instruction>(U), BB) ||
157 cast<Instruction>(U)->getParent() == BB)) {
158 ArgIntToPtr = U;
159 break;
160 }
161 }
162
163 if (ArgIntToPtr) {
164 AvailablePtrVals.emplace_back(ArgIntToPtr);
165 continue;
166 }
167
168 // If Arg is defined by a PHI, allow it. This will also create
169 // more opportunities iteratively.
170 if (isa<PHINode>(Arg)) {
171 AvailablePtrVals.emplace_back(Arg);
172 continue;
173 }
174
175 // For a single use integer load:
176 auto *LoadI = dyn_cast<LoadInst>(Arg);
177 if (!LoadI)
178 return false;
179
180 if (!LoadI->hasOneUse())
181 return false;
182
183 // Push the integer typed Load instruction into the available
184 // value set, and fix it up later when the pointer typed PHI
185 // is synthesized.
186 AvailablePtrVals.emplace_back(LoadI);
187 }
188
189 // Now search for a matching PHI
190 auto *BB = PN.getParent();
191 assert(AvailablePtrVals.size() == PN.getNumIncomingValues() &&
192 "Not enough available ptr typed incoming values");
193 PHINode *MatchingPtrPHI = nullptr;
194 unsigned NumPhis = 0;
195 for (PHINode &PtrPHI : BB->phis()) {
196 // FIXME: consider handling this in AggressiveInstCombine
197 if (NumPhis++ > MaxNumPhis)
198 return false;
199 if (&PtrPHI == &PN || PtrPHI.getType() != IntToPtr->getType())
200 continue;
201 if (any_of(zip(PN.blocks(), AvailablePtrVals),
202 [&](const auto &BlockAndValue) {
203 BasicBlock *BB = std::get<0>(BlockAndValue);
204 Value *V = std::get<1>(BlockAndValue);
205 return PtrPHI.getIncomingValueForBlock(BB) != V;
206 }))
207 continue;
208 MatchingPtrPHI = &PtrPHI;
209 break;
210 }
211
212 if (MatchingPtrPHI) {
213 assert(MatchingPtrPHI->getType() == IntToPtr->getType() &&
214 "Phi's Type does not match with IntToPtr");
215 // Explicitly replace the inttoptr (rather than inserting a ptrtoint) here,
216 // to make sure another transform can't undo it in the meantime.
217 replaceInstUsesWith(*IntToPtr, MatchingPtrPHI);
218 eraseInstFromFunction(*IntToPtr);
219 eraseInstFromFunction(PN);
220 return true;
221 }
222
223 // If it requires a conversion for every PHI operand, do not do it.
224 if (all_of(AvailablePtrVals, [&](Value *V) {
225 return (V->getType() != IntToPtr->getType()) || isa<IntToPtrInst>(V);
226 }))
227 return false;
228
229 // If any of the operand that requires casting is a terminator
230 // instruction, do not do it. Similarly, do not do the transform if the value
231 // is PHI in a block with no insertion point, for example, a catchswitch
232 // block, since we will not be able to insert a cast after the PHI.
233 if (any_of(AvailablePtrVals, [&](Value *V) {
234 if (V->getType() == IntToPtr->getType())
235 return false;
236 auto *Inst = dyn_cast<Instruction>(V);
237 if (!Inst)
238 return false;
239 if (Inst->isTerminator())
240 return true;
241 auto *BB = Inst->getParent();
242 if (isa<PHINode>(Inst) && BB->getFirstInsertionPt() == BB->end())
243 return true;
244 return false;
245 }))
246 return false;
247
248 PHINode *NewPtrPHI = PHINode::Create(
249 IntToPtr->getType(), PN.getNumIncomingValues(), PN.getName() + ".ptr");
250
251 InsertNewInstBefore(NewPtrPHI, PN.getIterator());
252 SmallDenseMap<Value *, Instruction *> Casts;
253 for (auto Incoming : zip(PN.blocks(), AvailablePtrVals)) {
254 auto *IncomingBB = std::get<0>(Incoming);
255 auto *IncomingVal = std::get<1>(Incoming);
256
257 if (IncomingVal->getType() == IntToPtr->getType()) {
258 NewPtrPHI->addIncoming(IncomingVal, IncomingBB);
259 continue;
260 }
261
262 #ifndef NDEBUG
263 LoadInst *LoadI = dyn_cast<LoadInst>(IncomingVal);
264 assert((isa<PHINode>(IncomingVal) ||
265 IncomingVal->getType()->isPointerTy() ||
266 (LoadI && LoadI->hasOneUse())) &&
267 "Can not replace LoadInst with multiple uses");
268 #endif
269 // Need to insert a BitCast.
270 // For an integer Load instruction with a single use, the load + IntToPtr
271 // cast will be simplified into a pointer load:
272 // %v = load i64, i64* %a.ip, align 8
273 // %v.cast = inttoptr i64 %v to float **
274 // ==>
275 // %v.ptrp = bitcast i64 * %a.ip to float **
276 // %v.cast = load float *, float ** %v.ptrp, align 8
277 Instruction *&CI = Casts[IncomingVal];
278 if (!CI) {
279 CI = CastInst::CreateBitOrPointerCast(IncomingVal, IntToPtr->getType(),
280 IncomingVal->getName() + ".ptr");
281 if (auto *IncomingI = dyn_cast<Instruction>(IncomingVal)) {
282 BasicBlock::iterator InsertPos(IncomingI);
283 InsertPos++;
284 BasicBlock *BB = IncomingI->getParent();
285 if (isa<PHINode>(IncomingI))
286 InsertPos = BB->getFirstInsertionPt();
287 assert(InsertPos != BB->end() && "should have checked above");
288 InsertNewInstBefore(CI, InsertPos);
289 } else {
290 auto *InsertBB = &IncomingBB->getParent()->getEntryBlock();
291 InsertNewInstBefore(CI, InsertBB->getFirstInsertionPt());
292 }
293 }
294 NewPtrPHI->addIncoming(CI, IncomingBB);
295 }
296
297 // Explicitly replace the inttoptr (rather than inserting a ptrtoint) here,
298 // to make sure another transform can't undo it in the meantime.
299 replaceInstUsesWith(*IntToPtr, NewPtrPHI);
300 eraseInstFromFunction(*IntToPtr);
301 eraseInstFromFunction(PN);
302 return true;
303 }
304
305 // Remove RoundTrip IntToPtr/PtrToInt Cast on PHI-Operand and
306 // fold Phi-operand to bitcast.
foldPHIArgIntToPtrToPHI(PHINode & PN)307 Instruction *InstCombinerImpl::foldPHIArgIntToPtrToPHI(PHINode &PN) {
308 // convert ptr2int ( phi[ int2ptr(ptr2int(x))] ) --> ptr2int ( phi [ x ] )
309 // Make sure all uses of phi are ptr2int.
310 if (!all_of(PN.users(), [](User *U) { return isa<PtrToIntInst>(U); }))
311 return nullptr;
312
313 // Iterating over all operands to check presence of target pointers for
314 // optimization.
315 bool OperandWithRoundTripCast = false;
316 for (unsigned OpNum = 0; OpNum != PN.getNumIncomingValues(); ++OpNum) {
317 if (auto *NewOp =
318 simplifyIntToPtrRoundTripCast(PN.getIncomingValue(OpNum))) {
319 replaceOperand(PN, OpNum, NewOp);
320 OperandWithRoundTripCast = true;
321 }
322 }
323 if (!OperandWithRoundTripCast)
324 return nullptr;
325 return &PN;
326 }
327
328 /// If we have something like phi [insertvalue(a,b,0), insertvalue(c,d,0)],
329 /// turn this into a phi[a,c] and phi[b,d] and a single insertvalue.
330 Instruction *
foldPHIArgInsertValueInstructionIntoPHI(PHINode & PN)331 InstCombinerImpl::foldPHIArgInsertValueInstructionIntoPHI(PHINode &PN) {
332 auto *FirstIVI = cast<InsertValueInst>(PN.getIncomingValue(0));
333
334 // Scan to see if all operands are `insertvalue`'s with the same indices,
335 // and all have a single use.
336 for (Value *V : drop_begin(PN.incoming_values())) {
337 auto *I = dyn_cast<InsertValueInst>(V);
338 if (!I || !I->hasOneUser() || I->getIndices() != FirstIVI->getIndices())
339 return nullptr;
340 }
341
342 // For each operand of an `insertvalue`
343 std::array<PHINode *, 2> NewOperands;
344 for (int OpIdx : {0, 1}) {
345 auto *&NewOperand = NewOperands[OpIdx];
346 // Create a new PHI node to receive the values the operand has in each
347 // incoming basic block.
348 NewOperand = PHINode::Create(
349 FirstIVI->getOperand(OpIdx)->getType(), PN.getNumIncomingValues(),
350 FirstIVI->getOperand(OpIdx)->getName() + ".pn");
351 // And populate each operand's PHI with said values.
352 for (auto Incoming : zip(PN.blocks(), PN.incoming_values()))
353 NewOperand->addIncoming(
354 cast<InsertValueInst>(std::get<1>(Incoming))->getOperand(OpIdx),
355 std::get<0>(Incoming));
356 InsertNewInstBefore(NewOperand, PN.getIterator());
357 }
358
359 // And finally, create `insertvalue` over the newly-formed PHI nodes.
360 auto *NewIVI = InsertValueInst::Create(NewOperands[0], NewOperands[1],
361 FirstIVI->getIndices(), PN.getName());
362
363 PHIArgMergedDebugLoc(NewIVI, PN);
364 ++NumPHIsOfInsertValues;
365 return NewIVI;
366 }
367
368 /// If we have something like phi [extractvalue(a,0), extractvalue(b,0)],
369 /// turn this into a phi[a,b] and a single extractvalue.
370 Instruction *
foldPHIArgExtractValueInstructionIntoPHI(PHINode & PN)371 InstCombinerImpl::foldPHIArgExtractValueInstructionIntoPHI(PHINode &PN) {
372 auto *FirstEVI = cast<ExtractValueInst>(PN.getIncomingValue(0));
373
374 // Scan to see if all operands are `extractvalue`'s with the same indices,
375 // and all have a single use.
376 for (Value *V : drop_begin(PN.incoming_values())) {
377 auto *I = dyn_cast<ExtractValueInst>(V);
378 if (!I || !I->hasOneUser() || I->getIndices() != FirstEVI->getIndices() ||
379 I->getAggregateOperand()->getType() !=
380 FirstEVI->getAggregateOperand()->getType())
381 return nullptr;
382 }
383
384 // Create a new PHI node to receive the values the aggregate operand has
385 // in each incoming basic block.
386 auto *NewAggregateOperand = PHINode::Create(
387 FirstEVI->getAggregateOperand()->getType(), PN.getNumIncomingValues(),
388 FirstEVI->getAggregateOperand()->getName() + ".pn");
389 // And populate the PHI with said values.
390 for (auto Incoming : zip(PN.blocks(), PN.incoming_values()))
391 NewAggregateOperand->addIncoming(
392 cast<ExtractValueInst>(std::get<1>(Incoming))->getAggregateOperand(),
393 std::get<0>(Incoming));
394 InsertNewInstBefore(NewAggregateOperand, PN.getIterator());
395
396 // And finally, create `extractvalue` over the newly-formed PHI nodes.
397 auto *NewEVI = ExtractValueInst::Create(NewAggregateOperand,
398 FirstEVI->getIndices(), PN.getName());
399
400 PHIArgMergedDebugLoc(NewEVI, PN);
401 ++NumPHIsOfExtractValues;
402 return NewEVI;
403 }
404
405 /// If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the
406 /// adds all have a single user, turn this into a phi and a single binop.
foldPHIArgBinOpIntoPHI(PHINode & PN)407 Instruction *InstCombinerImpl::foldPHIArgBinOpIntoPHI(PHINode &PN) {
408 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
409 assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
410 unsigned Opc = FirstInst->getOpcode();
411 Value *LHSVal = FirstInst->getOperand(0);
412 Value *RHSVal = FirstInst->getOperand(1);
413
414 Type *LHSType = LHSVal->getType();
415 Type *RHSType = RHSVal->getType();
416
417 // Scan to see if all operands are the same opcode, and all have one user.
418 for (Value *V : drop_begin(PN.incoming_values())) {
419 Instruction *I = dyn_cast<Instruction>(V);
420 if (!I || I->getOpcode() != Opc || !I->hasOneUser() ||
421 // Verify type of the LHS matches so we don't fold cmp's of different
422 // types.
423 I->getOperand(0)->getType() != LHSType ||
424 I->getOperand(1)->getType() != RHSType)
425 return nullptr;
426
427 // If they are CmpInst instructions, check their predicates
428 if (CmpInst *CI = dyn_cast<CmpInst>(I))
429 if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate())
430 return nullptr;
431
432 // Keep track of which operand needs a phi node.
433 if (I->getOperand(0) != LHSVal) LHSVal = nullptr;
434 if (I->getOperand(1) != RHSVal) RHSVal = nullptr;
435 }
436
437 // If both LHS and RHS would need a PHI, don't do this transformation,
438 // because it would increase the number of PHIs entering the block,
439 // which leads to higher register pressure. This is especially
440 // bad when the PHIs are in the header of a loop.
441 if (!LHSVal && !RHSVal)
442 return nullptr;
443
444 // Otherwise, this is safe to transform!
445
446 Value *InLHS = FirstInst->getOperand(0);
447 Value *InRHS = FirstInst->getOperand(1);
448 PHINode *NewLHS = nullptr, *NewRHS = nullptr;
449 if (!LHSVal) {
450 NewLHS = PHINode::Create(LHSType, PN.getNumIncomingValues(),
451 FirstInst->getOperand(0)->getName() + ".pn");
452 NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
453 InsertNewInstBefore(NewLHS, PN.getIterator());
454 LHSVal = NewLHS;
455 }
456
457 if (!RHSVal) {
458 NewRHS = PHINode::Create(RHSType, PN.getNumIncomingValues(),
459 FirstInst->getOperand(1)->getName() + ".pn");
460 NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
461 InsertNewInstBefore(NewRHS, PN.getIterator());
462 RHSVal = NewRHS;
463 }
464
465 // Add all operands to the new PHIs.
466 if (NewLHS || NewRHS) {
467 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
468 BasicBlock *InBB = std::get<0>(Incoming);
469 Value *InVal = std::get<1>(Incoming);
470 Instruction *InInst = cast<Instruction>(InVal);
471 if (NewLHS) {
472 Value *NewInLHS = InInst->getOperand(0);
473 NewLHS->addIncoming(NewInLHS, InBB);
474 }
475 if (NewRHS) {
476 Value *NewInRHS = InInst->getOperand(1);
477 NewRHS->addIncoming(NewInRHS, InBB);
478 }
479 }
480 }
481
482 if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst)) {
483 CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
484 LHSVal, RHSVal);
485 PHIArgMergedDebugLoc(NewCI, PN);
486 return NewCI;
487 }
488
489 BinaryOperator *BinOp = cast<BinaryOperator>(FirstInst);
490 BinaryOperator *NewBinOp =
491 BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
492
493 NewBinOp->copyIRFlags(PN.getIncomingValue(0));
494
495 for (Value *V : drop_begin(PN.incoming_values()))
496 NewBinOp->andIRFlags(V);
497
498 PHIArgMergedDebugLoc(NewBinOp, PN);
499 return NewBinOp;
500 }
501
foldPHIArgGEPIntoPHI(PHINode & PN)502 Instruction *InstCombinerImpl::foldPHIArgGEPIntoPHI(PHINode &PN) {
503 GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0));
504
505 SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
506 FirstInst->op_end());
507 // This is true if all GEP bases are allocas and if all indices into them are
508 // constants.
509 bool AllBasePointersAreAllocas = true;
510
511 // We don't want to replace this phi if the replacement would require
512 // more than one phi, which leads to higher register pressure. This is
513 // especially bad when the PHIs are in the header of a loop.
514 bool NeededPhi = false;
515
516 GEPNoWrapFlags NW = GEPNoWrapFlags::all();
517
518 // Scan to see if all operands are the same opcode, and all have one user.
519 for (Value *V : drop_begin(PN.incoming_values())) {
520 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V);
521 if (!GEP || !GEP->hasOneUser() ||
522 GEP->getSourceElementType() != FirstInst->getSourceElementType() ||
523 GEP->getNumOperands() != FirstInst->getNumOperands())
524 return nullptr;
525
526 NW &= GEP->getNoWrapFlags();
527
528 // Keep track of whether or not all GEPs are of alloca pointers.
529 if (AllBasePointersAreAllocas &&
530 (!isa<AllocaInst>(GEP->getOperand(0)) ||
531 !GEP->hasAllConstantIndices()))
532 AllBasePointersAreAllocas = false;
533
534 // Compare the operand lists.
535 for (unsigned Op = 0, E = FirstInst->getNumOperands(); Op != E; ++Op) {
536 if (FirstInst->getOperand(Op) == GEP->getOperand(Op))
537 continue;
538
539 // Don't merge two GEPs when two operands differ (introducing phi nodes)
540 // if one of the PHIs has a constant for the index. The index may be
541 // substantially cheaper to compute for the constants, so making it a
542 // variable index could pessimize the path. This also handles the case
543 // for struct indices, which must always be constant.
544 if (isa<ConstantInt>(FirstInst->getOperand(Op)) ||
545 isa<ConstantInt>(GEP->getOperand(Op)))
546 return nullptr;
547
548 if (FirstInst->getOperand(Op)->getType() !=
549 GEP->getOperand(Op)->getType())
550 return nullptr;
551
552 // If we already needed a PHI for an earlier operand, and another operand
553 // also requires a PHI, we'd be introducing more PHIs than we're
554 // eliminating, which increases register pressure on entry to the PHI's
555 // block.
556 if (NeededPhi)
557 return nullptr;
558
559 FixedOperands[Op] = nullptr; // Needs a PHI.
560 NeededPhi = true;
561 }
562 }
563
564 // If all of the base pointers of the PHI'd GEPs are from allocas, don't
565 // bother doing this transformation. At best, this will just save a bit of
566 // offset calculation, but all the predecessors will have to materialize the
567 // stack address into a register anyway. We'd actually rather *clone* the
568 // load up into the predecessors so that we have a load of a gep of an alloca,
569 // which can usually all be folded into the load.
570 if (AllBasePointersAreAllocas)
571 return nullptr;
572
573 // Otherwise, this is safe to transform. Insert PHI nodes for each operand
574 // that is variable.
575 SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
576
577 bool HasAnyPHIs = false;
578 for (unsigned I = 0, E = FixedOperands.size(); I != E; ++I) {
579 if (FixedOperands[I])
580 continue; // operand doesn't need a phi.
581 Value *FirstOp = FirstInst->getOperand(I);
582 PHINode *NewPN =
583 PHINode::Create(FirstOp->getType(), E, FirstOp->getName() + ".pn");
584 InsertNewInstBefore(NewPN, PN.getIterator());
585
586 NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
587 OperandPhis[I] = NewPN;
588 FixedOperands[I] = NewPN;
589 HasAnyPHIs = true;
590 }
591
592 // Add all operands to the new PHIs.
593 if (HasAnyPHIs) {
594 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
595 BasicBlock *InBB = std::get<0>(Incoming);
596 Value *InVal = std::get<1>(Incoming);
597 GetElementPtrInst *InGEP = cast<GetElementPtrInst>(InVal);
598
599 for (unsigned Op = 0, E = OperandPhis.size(); Op != E; ++Op)
600 if (PHINode *OpPhi = OperandPhis[Op])
601 OpPhi->addIncoming(InGEP->getOperand(Op), InBB);
602 }
603 }
604
605 Value *Base = FixedOperands[0];
606 GetElementPtrInst *NewGEP =
607 GetElementPtrInst::Create(FirstInst->getSourceElementType(), Base,
608 ArrayRef(FixedOperands).slice(1), NW);
609 PHIArgMergedDebugLoc(NewGEP, PN);
610 return NewGEP;
611 }
612
613 /// Return true if we know that it is safe to sink the load out of the block
614 /// that defines it. This means that it must be obvious the value of the load is
615 /// not changed from the point of the load to the end of the block it is in.
616 ///
617 /// Finally, it is safe, but not profitable, to sink a load targeting a
618 /// non-address-taken alloca. Doing so will cause us to not promote the alloca
619 /// to a register.
isSafeAndProfitableToSinkLoad(LoadInst * L)620 static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
621 BasicBlock::iterator BBI = L->getIterator(), E = L->getParent()->end();
622
623 for (++BBI; BBI != E; ++BBI)
624 if (BBI->mayWriteToMemory()) {
625 // Calls that only access inaccessible memory do not block sinking the
626 // load.
627 if (auto *CB = dyn_cast<CallBase>(BBI))
628 if (CB->onlyAccessesInaccessibleMemory())
629 continue;
630 return false;
631 }
632
633 // Check for non-address taken alloca. If not address-taken already, it isn't
634 // profitable to do this xform.
635 if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
636 bool IsAddressTaken = false;
637 for (User *U : AI->users()) {
638 if (isa<LoadInst>(U)) continue;
639 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
640 // If storing TO the alloca, then the address isn't taken.
641 if (SI->getOperand(1) == AI) continue;
642 }
643 IsAddressTaken = true;
644 break;
645 }
646
647 if (!IsAddressTaken && AI->isStaticAlloca())
648 return false;
649 }
650
651 // If this load is a load from a GEP with a constant offset from an alloca,
652 // then we don't want to sink it. In its present form, it will be
653 // load [constant stack offset]. Sinking it will cause us to have to
654 // materialize the stack addresses in each predecessor in a register only to
655 // do a shared load from register in the successor.
656 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
657 if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
658 if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
659 return false;
660
661 return true;
662 }
663
foldPHIArgLoadIntoPHI(PHINode & PN)664 Instruction *InstCombinerImpl::foldPHIArgLoadIntoPHI(PHINode &PN) {
665 LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
666
667 // Can't forward swifterror through a phi.
668 if (FirstLI->getOperand(0)->isSwiftError())
669 return nullptr;
670
671 // FIXME: This is overconservative; this transform is allowed in some cases
672 // for atomic operations.
673 if (FirstLI->isAtomic())
674 return nullptr;
675
676 // When processing loads, we need to propagate two bits of information to the
677 // sunk load: whether it is volatile, and what its alignment is.
678 bool IsVolatile = FirstLI->isVolatile();
679 Align LoadAlignment = FirstLI->getAlign();
680 const unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace();
681
682 // We can't sink the load if the loaded value could be modified between the
683 // load and the PHI.
684 if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
685 !isSafeAndProfitableToSinkLoad(FirstLI))
686 return nullptr;
687
688 // If the PHI is of volatile loads and the load block has multiple
689 // successors, sinking it would remove a load of the volatile value from
690 // the path through the other successor.
691 if (IsVolatile &&
692 FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
693 return nullptr;
694
695 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
696 BasicBlock *InBB = std::get<0>(Incoming);
697 Value *InVal = std::get<1>(Incoming);
698 LoadInst *LI = dyn_cast<LoadInst>(InVal);
699 if (!LI || !LI->hasOneUser() || LI->isAtomic())
700 return nullptr;
701
702 // Make sure all arguments are the same type of operation.
703 if (LI->isVolatile() != IsVolatile ||
704 LI->getPointerAddressSpace() != LoadAddrSpace)
705 return nullptr;
706
707 // Can't forward swifterror through a phi.
708 if (LI->getOperand(0)->isSwiftError())
709 return nullptr;
710
711 // We can't sink the load if the loaded value could be modified between
712 // the load and the PHI.
713 if (LI->getParent() != InBB || !isSafeAndProfitableToSinkLoad(LI))
714 return nullptr;
715
716 LoadAlignment = std::min(LoadAlignment, LI->getAlign());
717
718 // If the PHI is of volatile loads and the load block has multiple
719 // successors, sinking it would remove a load of the volatile value from
720 // the path through the other successor.
721 if (IsVolatile && LI->getParent()->getTerminator()->getNumSuccessors() != 1)
722 return nullptr;
723 }
724
725 // Okay, they are all the same operation. Create a new PHI node of the
726 // correct type, and PHI together all of the LHS's of the instructions.
727 PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
728 PN.getNumIncomingValues(),
729 PN.getName()+".in");
730
731 Value *InVal = FirstLI->getOperand(0);
732 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
733 LoadInst *NewLI =
734 new LoadInst(FirstLI->getType(), NewPN, "", IsVolatile, LoadAlignment);
735
736 unsigned KnownIDs[] = {
737 LLVMContext::MD_tbaa,
738 LLVMContext::MD_range,
739 LLVMContext::MD_invariant_load,
740 LLVMContext::MD_alias_scope,
741 LLVMContext::MD_noalias,
742 LLVMContext::MD_nonnull,
743 LLVMContext::MD_align,
744 LLVMContext::MD_dereferenceable,
745 LLVMContext::MD_dereferenceable_or_null,
746 LLVMContext::MD_access_group,
747 LLVMContext::MD_noundef,
748 };
749
750 for (unsigned ID : KnownIDs)
751 NewLI->setMetadata(ID, FirstLI->getMetadata(ID));
752
753 // Add all operands to the new PHI and combine TBAA metadata.
754 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
755 BasicBlock *BB = std::get<0>(Incoming);
756 Value *V = std::get<1>(Incoming);
757 LoadInst *LI = cast<LoadInst>(V);
758 combineMetadata(NewLI, LI, KnownIDs, true);
759 Value *NewInVal = LI->getOperand(0);
760 if (NewInVal != InVal)
761 InVal = nullptr;
762 NewPN->addIncoming(NewInVal, BB);
763 }
764
765 if (InVal) {
766 // The new PHI unions all of the same values together. This is really
767 // common, so we handle it intelligently here for compile-time speed.
768 NewLI->setOperand(0, InVal);
769 delete NewPN;
770 } else {
771 InsertNewInstBefore(NewPN, PN.getIterator());
772 }
773
774 // If this was a volatile load that we are merging, make sure to loop through
775 // and mark all the input loads as non-volatile. If we don't do this, we will
776 // insert a new volatile load and the old ones will not be deletable.
777 if (IsVolatile)
778 for (Value *IncValue : PN.incoming_values())
779 cast<LoadInst>(IncValue)->setVolatile(false);
780
781 PHIArgMergedDebugLoc(NewLI, PN);
782 return NewLI;
783 }
784
785 /// TODO: This function could handle other cast types, but then it might
786 /// require special-casing a cast from the 'i1' type. See the comment in
787 /// FoldPHIArgOpIntoPHI() about pessimizing illegal integer types.
foldPHIArgZextsIntoPHI(PHINode & Phi)788 Instruction *InstCombinerImpl::foldPHIArgZextsIntoPHI(PHINode &Phi) {
789 // We cannot create a new instruction after the PHI if the terminator is an
790 // EHPad because there is no valid insertion point.
791 if (Instruction *TI = Phi.getParent()->getTerminator())
792 if (TI->isEHPad())
793 return nullptr;
794
795 // Early exit for the common case of a phi with two operands. These are
796 // handled elsewhere. See the comment below where we check the count of zexts
797 // and constants for more details.
798 unsigned NumIncomingValues = Phi.getNumIncomingValues();
799 if (NumIncomingValues < 3)
800 return nullptr;
801
802 // Find the narrower type specified by the first zext.
803 Type *NarrowType = nullptr;
804 for (Value *V : Phi.incoming_values()) {
805 if (auto *Zext = dyn_cast<ZExtInst>(V)) {
806 NarrowType = Zext->getSrcTy();
807 break;
808 }
809 }
810 if (!NarrowType)
811 return nullptr;
812
813 // Walk the phi operands checking that we only have zexts or constants that
814 // we can shrink for free. Store the new operands for the new phi.
815 SmallVector<Value *, 4> NewIncoming;
816 unsigned NumZexts = 0;
817 unsigned NumConsts = 0;
818 for (Value *V : Phi.incoming_values()) {
819 if (auto *Zext = dyn_cast<ZExtInst>(V)) {
820 // All zexts must be identical and have one user.
821 if (Zext->getSrcTy() != NarrowType || !Zext->hasOneUser())
822 return nullptr;
823 NewIncoming.push_back(Zext->getOperand(0));
824 NumZexts++;
825 } else if (auto *C = dyn_cast<Constant>(V)) {
826 // Make sure that constants can fit in the new type.
827 Constant *Trunc = getLosslessUnsignedTrunc(C, NarrowType);
828 if (!Trunc)
829 return nullptr;
830 NewIncoming.push_back(Trunc);
831 NumConsts++;
832 } else {
833 // If it's not a cast or a constant, bail out.
834 return nullptr;
835 }
836 }
837
838 // The more common cases of a phi with no constant operands or just one
839 // variable operand are handled by FoldPHIArgOpIntoPHI() and foldOpIntoPhi()
840 // respectively. foldOpIntoPhi() wants to do the opposite transform that is
841 // performed here. It tries to replicate a cast in the phi operand's basic
842 // block to expose other folding opportunities. Thus, InstCombine will
843 // infinite loop without this check.
844 if (NumConsts == 0 || NumZexts < 2)
845 return nullptr;
846
847 // All incoming values are zexts or constants that are safe to truncate.
848 // Create a new phi node of the narrow type, phi together all of the new
849 // operands, and zext the result back to the original type.
850 PHINode *NewPhi = PHINode::Create(NarrowType, NumIncomingValues,
851 Phi.getName() + ".shrunk");
852 for (unsigned I = 0; I != NumIncomingValues; ++I)
853 NewPhi->addIncoming(NewIncoming[I], Phi.getIncomingBlock(I));
854
855 InsertNewInstBefore(NewPhi, Phi.getIterator());
856 return CastInst::CreateZExtOrBitCast(NewPhi, Phi.getType());
857 }
858
859 /// If all operands to a PHI node are the same "unary" operator and they all are
860 /// only used by the PHI, PHI together their inputs, and do the operation once,
861 /// to the result of the PHI.
foldPHIArgOpIntoPHI(PHINode & PN)862 Instruction *InstCombinerImpl::foldPHIArgOpIntoPHI(PHINode &PN) {
863 // We cannot create a new instruction after the PHI if the terminator is an
864 // EHPad because there is no valid insertion point.
865 if (Instruction *TI = PN.getParent()->getTerminator())
866 if (TI->isEHPad())
867 return nullptr;
868
869 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
870
871 if (isa<GetElementPtrInst>(FirstInst))
872 return foldPHIArgGEPIntoPHI(PN);
873 if (isa<LoadInst>(FirstInst))
874 return foldPHIArgLoadIntoPHI(PN);
875 if (isa<InsertValueInst>(FirstInst))
876 return foldPHIArgInsertValueInstructionIntoPHI(PN);
877 if (isa<ExtractValueInst>(FirstInst))
878 return foldPHIArgExtractValueInstructionIntoPHI(PN);
879
880 // Scan the instruction, looking for input operations that can be folded away.
881 // If all input operands to the phi are the same instruction (e.g. a cast from
882 // the same type or "+42") we can pull the operation through the PHI, reducing
883 // code size and simplifying code.
884 Constant *ConstantOp = nullptr;
885 Type *CastSrcTy = nullptr;
886
887 if (isa<CastInst>(FirstInst)) {
888 CastSrcTy = FirstInst->getOperand(0)->getType();
889
890 // Be careful about transforming integer PHIs. We don't want to pessimize
891 // the code by turning an i32 into an i1293.
892 if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
893 if (!shouldChangeType(PN.getType(), CastSrcTy))
894 return nullptr;
895 }
896 } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
897 // Can fold binop, compare or shift here if the RHS is a constant,
898 // otherwise call FoldPHIArgBinOpIntoPHI.
899 ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
900 if (!ConstantOp)
901 return foldPHIArgBinOpIntoPHI(PN);
902 } else {
903 return nullptr; // Cannot fold this operation.
904 }
905
906 // Check to see if all arguments are the same operation.
907 for (Value *V : drop_begin(PN.incoming_values())) {
908 Instruction *I = dyn_cast<Instruction>(V);
909 if (!I || !I->hasOneUser() || !I->isSameOperationAs(FirstInst))
910 return nullptr;
911 if (CastSrcTy) {
912 if (I->getOperand(0)->getType() != CastSrcTy)
913 return nullptr; // Cast operation must match.
914 } else if (I->getOperand(1) != ConstantOp) {
915 return nullptr;
916 }
917 }
918
919 // Okay, they are all the same operation. Create a new PHI node of the
920 // correct type, and PHI together all of the LHS's of the instructions.
921 PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
922 PN.getNumIncomingValues(),
923 PN.getName()+".in");
924
925 Value *InVal = FirstInst->getOperand(0);
926 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
927
928 // Add all operands to the new PHI.
929 for (auto Incoming : drop_begin(zip(PN.blocks(), PN.incoming_values()))) {
930 BasicBlock *BB = std::get<0>(Incoming);
931 Value *V = std::get<1>(Incoming);
932 Value *NewInVal = cast<Instruction>(V)->getOperand(0);
933 if (NewInVal != InVal)
934 InVal = nullptr;
935 NewPN->addIncoming(NewInVal, BB);
936 }
937
938 Value *PhiVal;
939 if (InVal) {
940 // The new PHI unions all of the same values together. This is really
941 // common, so we handle it intelligently here for compile-time speed.
942 PhiVal = InVal;
943 delete NewPN;
944 } else {
945 InsertNewInstBefore(NewPN, PN.getIterator());
946 PhiVal = NewPN;
947 }
948
949 // Insert and return the new operation.
950 if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst)) {
951 CastInst *NewCI = CastInst::Create(FirstCI->getOpcode(), PhiVal,
952 PN.getType());
953 PHIArgMergedDebugLoc(NewCI, PN);
954 return NewCI;
955 }
956
957 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) {
958 BinOp = BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
959 BinOp->copyIRFlags(PN.getIncomingValue(0));
960
961 for (Value *V : drop_begin(PN.incoming_values()))
962 BinOp->andIRFlags(V);
963
964 PHIArgMergedDebugLoc(BinOp, PN);
965 return BinOp;
966 }
967
968 CmpInst *CIOp = cast<CmpInst>(FirstInst);
969 CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
970 PhiVal, ConstantOp);
971 PHIArgMergedDebugLoc(NewCI, PN);
972 return NewCI;
973 }
974
975 /// Return true if this PHI node is only used by a PHI node cycle that is dead.
isDeadPHICycle(PHINode * PN,SmallPtrSetImpl<PHINode * > & PotentiallyDeadPHIs)976 static bool isDeadPHICycle(PHINode *PN,
977 SmallPtrSetImpl<PHINode *> &PotentiallyDeadPHIs) {
978 if (PN->use_empty()) return true;
979 if (!PN->hasOneUse()) return false;
980
981 // Remember this node, and if we find the cycle, return.
982 if (!PotentiallyDeadPHIs.insert(PN).second)
983 return true;
984
985 // Don't scan crazily complex things.
986 if (PotentiallyDeadPHIs.size() == 16)
987 return false;
988
989 if (PHINode *PU = dyn_cast<PHINode>(PN->user_back()))
990 return isDeadPHICycle(PU, PotentiallyDeadPHIs);
991
992 return false;
993 }
994
995 /// Return true if this phi node is always equal to NonPhiInVal.
996 /// This happens with mutually cyclic phi nodes like:
997 /// z = some value; x = phi (y, z); y = phi (x, z)
PHIsEqualValue(PHINode * PN,Value * & NonPhiInVal,SmallPtrSetImpl<PHINode * > & ValueEqualPHIs)998 static bool PHIsEqualValue(PHINode *PN, Value *&NonPhiInVal,
999 SmallPtrSetImpl<PHINode *> &ValueEqualPHIs) {
1000 // See if we already saw this PHI node.
1001 if (!ValueEqualPHIs.insert(PN).second)
1002 return true;
1003
1004 // Don't scan crazily complex things.
1005 if (ValueEqualPHIs.size() == 16)
1006 return false;
1007
1008 // Scan the operands to see if they are either phi nodes or are equal to
1009 // the value.
1010 for (Value *Op : PN->incoming_values()) {
1011 if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
1012 if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs)) {
1013 if (NonPhiInVal)
1014 return false;
1015 NonPhiInVal = OpPN;
1016 }
1017 } else if (Op != NonPhiInVal)
1018 return false;
1019 }
1020
1021 return true;
1022 }
1023
1024 /// Return an existing non-zero constant if this phi node has one, otherwise
1025 /// return constant 1.
getAnyNonZeroConstInt(PHINode & PN)1026 static ConstantInt *getAnyNonZeroConstInt(PHINode &PN) {
1027 assert(isa<IntegerType>(PN.getType()) && "Expect only integer type phi");
1028 for (Value *V : PN.operands())
1029 if (auto *ConstVA = dyn_cast<ConstantInt>(V))
1030 if (!ConstVA->isZero())
1031 return ConstVA;
1032 return ConstantInt::get(cast<IntegerType>(PN.getType()), 1);
1033 }
1034
1035 namespace {
1036 struct PHIUsageRecord {
1037 unsigned PHIId; // The ID # of the PHI (something determinstic to sort on)
1038 unsigned Shift; // The amount shifted.
1039 Instruction *Inst; // The trunc instruction.
1040
PHIUsageRecord__anona26342140611::PHIUsageRecord1041 PHIUsageRecord(unsigned Pn, unsigned Sh, Instruction *User)
1042 : PHIId(Pn), Shift(Sh), Inst(User) {}
1043
operator <__anona26342140611::PHIUsageRecord1044 bool operator<(const PHIUsageRecord &RHS) const {
1045 if (PHIId < RHS.PHIId) return true;
1046 if (PHIId > RHS.PHIId) return false;
1047 if (Shift < RHS.Shift) return true;
1048 if (Shift > RHS.Shift) return false;
1049 return Inst->getType()->getPrimitiveSizeInBits() <
1050 RHS.Inst->getType()->getPrimitiveSizeInBits();
1051 }
1052 };
1053
1054 struct LoweredPHIRecord {
1055 PHINode *PN; // The PHI that was lowered.
1056 unsigned Shift; // The amount shifted.
1057 unsigned Width; // The width extracted.
1058
LoweredPHIRecord__anona26342140611::LoweredPHIRecord1059 LoweredPHIRecord(PHINode *Phi, unsigned Sh, Type *Ty)
1060 : PN(Phi), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
1061
1062 // Ctor form used by DenseMap.
LoweredPHIRecord__anona26342140611::LoweredPHIRecord1063 LoweredPHIRecord(PHINode *Phi, unsigned Sh) : PN(Phi), Shift(Sh), Width(0) {}
1064 };
1065 } // namespace
1066
1067 namespace llvm {
1068 template<>
1069 struct DenseMapInfo<LoweredPHIRecord> {
getEmptyKeyllvm::DenseMapInfo1070 static inline LoweredPHIRecord getEmptyKey() {
1071 return LoweredPHIRecord(nullptr, 0);
1072 }
getTombstoneKeyllvm::DenseMapInfo1073 static inline LoweredPHIRecord getTombstoneKey() {
1074 return LoweredPHIRecord(nullptr, 1);
1075 }
getHashValuellvm::DenseMapInfo1076 static unsigned getHashValue(const LoweredPHIRecord &Val) {
1077 return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
1078 (Val.Width>>3);
1079 }
isEqualllvm::DenseMapInfo1080 static bool isEqual(const LoweredPHIRecord &LHS,
1081 const LoweredPHIRecord &RHS) {
1082 return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift &&
1083 LHS.Width == RHS.Width;
1084 }
1085 };
1086 } // namespace llvm
1087
1088
1089 /// This is an integer PHI and we know that it has an illegal type: see if it is
1090 /// only used by trunc or trunc(lshr) operations. If so, we split the PHI into
1091 /// the various pieces being extracted. This sort of thing is introduced when
1092 /// SROA promotes an aggregate to large integer values.
1093 ///
1094 /// TODO: The user of the trunc may be an bitcast to float/double/vector or an
1095 /// inttoptr. We should produce new PHIs in the right type.
1096 ///
SliceUpIllegalIntegerPHI(PHINode & FirstPhi)1097 Instruction *InstCombinerImpl::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
1098 // PHIUsers - Keep track of all of the truncated values extracted from a set
1099 // of PHIs, along with their offset. These are the things we want to rewrite.
1100 SmallVector<PHIUsageRecord, 16> PHIUsers;
1101
1102 // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
1103 // nodes which are extracted from. PHIsToSlice is a set we use to avoid
1104 // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
1105 // check the uses of (to ensure they are all extracts).
1106 SmallVector<PHINode*, 8> PHIsToSlice;
1107 SmallPtrSet<PHINode*, 8> PHIsInspected;
1108
1109 PHIsToSlice.push_back(&FirstPhi);
1110 PHIsInspected.insert(&FirstPhi);
1111
1112 for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
1113 PHINode *PN = PHIsToSlice[PHIId];
1114
1115 // Scan the input list of the PHI. If any input is an invoke, and if the
1116 // input is defined in the predecessor, then we won't be split the critical
1117 // edge which is required to insert a truncate. Because of this, we have to
1118 // bail out.
1119 for (auto Incoming : zip(PN->blocks(), PN->incoming_values())) {
1120 BasicBlock *BB = std::get<0>(Incoming);
1121 Value *V = std::get<1>(Incoming);
1122 InvokeInst *II = dyn_cast<InvokeInst>(V);
1123 if (!II)
1124 continue;
1125 if (II->getParent() != BB)
1126 continue;
1127
1128 // If we have a phi, and if it's directly in the predecessor, then we have
1129 // a critical edge where we need to put the truncate. Since we can't
1130 // split the edge in instcombine, we have to bail out.
1131 return nullptr;
1132 }
1133
1134 // If the incoming value is a PHI node before a catchswitch, we cannot
1135 // extract the value within that BB because we cannot insert any non-PHI
1136 // instructions in the BB.
1137 for (auto *Pred : PN->blocks())
1138 if (Pred->getFirstInsertionPt() == Pred->end())
1139 return nullptr;
1140
1141 for (User *U : PN->users()) {
1142 Instruction *UserI = cast<Instruction>(U);
1143
1144 // If the user is a PHI, inspect its uses recursively.
1145 if (PHINode *UserPN = dyn_cast<PHINode>(UserI)) {
1146 if (PHIsInspected.insert(UserPN).second)
1147 PHIsToSlice.push_back(UserPN);
1148 continue;
1149 }
1150
1151 // Truncates are always ok.
1152 if (isa<TruncInst>(UserI)) {
1153 PHIUsers.push_back(PHIUsageRecord(PHIId, 0, UserI));
1154 continue;
1155 }
1156
1157 // Otherwise it must be a lshr which can only be used by one trunc.
1158 if (UserI->getOpcode() != Instruction::LShr ||
1159 !UserI->hasOneUse() || !isa<TruncInst>(UserI->user_back()) ||
1160 !isa<ConstantInt>(UserI->getOperand(1)))
1161 return nullptr;
1162
1163 // Bail on out of range shifts.
1164 unsigned SizeInBits = UserI->getType()->getScalarSizeInBits();
1165 if (cast<ConstantInt>(UserI->getOperand(1))->getValue().uge(SizeInBits))
1166 return nullptr;
1167
1168 unsigned Shift = cast<ConstantInt>(UserI->getOperand(1))->getZExtValue();
1169 PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, UserI->user_back()));
1170 }
1171 }
1172
1173 // If we have no users, they must be all self uses, just nuke the PHI.
1174 if (PHIUsers.empty())
1175 return replaceInstUsesWith(FirstPhi, PoisonValue::get(FirstPhi.getType()));
1176
1177 // If this phi node is transformable, create new PHIs for all the pieces
1178 // extracted out of it. First, sort the users by their offset and size.
1179 array_pod_sort(PHIUsers.begin(), PHIUsers.end());
1180
1181 LLVM_DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n';
1182 for (unsigned I = 1; I != PHIsToSlice.size(); ++I) dbgs()
1183 << "AND USER PHI #" << I << ": " << *PHIsToSlice[I] << '\n');
1184
1185 // PredValues - This is a temporary used when rewriting PHI nodes. It is
1186 // hoisted out here to avoid construction/destruction thrashing.
1187 DenseMap<BasicBlock*, Value*> PredValues;
1188
1189 // ExtractedVals - Each new PHI we introduce is saved here so we don't
1190 // introduce redundant PHIs.
1191 DenseMap<LoweredPHIRecord, PHINode*> ExtractedVals;
1192
1193 for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
1194 unsigned PHIId = PHIUsers[UserI].PHIId;
1195 PHINode *PN = PHIsToSlice[PHIId];
1196 unsigned Offset = PHIUsers[UserI].Shift;
1197 Type *Ty = PHIUsers[UserI].Inst->getType();
1198
1199 PHINode *EltPHI;
1200
1201 // If we've already lowered a user like this, reuse the previously lowered
1202 // value.
1203 if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == nullptr) {
1204
1205 // Otherwise, Create the new PHI node for this user.
1206 EltPHI = PHINode::Create(Ty, PN->getNumIncomingValues(),
1207 PN->getName() + ".off" + Twine(Offset),
1208 PN->getIterator());
1209 assert(EltPHI->getType() != PN->getType() &&
1210 "Truncate didn't shrink phi?");
1211
1212 for (auto Incoming : zip(PN->blocks(), PN->incoming_values())) {
1213 BasicBlock *Pred = std::get<0>(Incoming);
1214 Value *InVal = std::get<1>(Incoming);
1215 Value *&PredVal = PredValues[Pred];
1216
1217 // If we already have a value for this predecessor, reuse it.
1218 if (PredVal) {
1219 EltPHI->addIncoming(PredVal, Pred);
1220 continue;
1221 }
1222
1223 // Handle the PHI self-reuse case.
1224 if (InVal == PN) {
1225 PredVal = EltPHI;
1226 EltPHI->addIncoming(PredVal, Pred);
1227 continue;
1228 }
1229
1230 if (PHINode *InPHI = dyn_cast<PHINode>(PN)) {
1231 // If the incoming value was a PHI, and if it was one of the PHIs we
1232 // already rewrote it, just use the lowered value.
1233 if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) {
1234 PredVal = Res;
1235 EltPHI->addIncoming(PredVal, Pred);
1236 continue;
1237 }
1238 }
1239
1240 // Otherwise, do an extract in the predecessor.
1241 Builder.SetInsertPoint(Pred->getTerminator());
1242 Value *Res = InVal;
1243 if (Offset)
1244 Res = Builder.CreateLShr(
1245 Res, ConstantInt::get(InVal->getType(), Offset), "extract");
1246 Res = Builder.CreateTrunc(Res, Ty, "extract.t");
1247 PredVal = Res;
1248 EltPHI->addIncoming(Res, Pred);
1249
1250 // If the incoming value was a PHI, and if it was one of the PHIs we are
1251 // rewriting, we will ultimately delete the code we inserted. This
1252 // means we need to revisit that PHI to make sure we extract out the
1253 // needed piece.
1254 if (PHINode *OldInVal = dyn_cast<PHINode>(InVal))
1255 if (PHIsInspected.count(OldInVal)) {
1256 unsigned RefPHIId =
1257 find(PHIsToSlice, OldInVal) - PHIsToSlice.begin();
1258 PHIUsers.push_back(
1259 PHIUsageRecord(RefPHIId, Offset, cast<Instruction>(Res)));
1260 ++UserE;
1261 }
1262 }
1263 PredValues.clear();
1264
1265 LLVM_DEBUG(dbgs() << " Made element PHI for offset " << Offset << ": "
1266 << *EltPHI << '\n');
1267 ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
1268 }
1269
1270 // Replace the use of this piece with the PHI node.
1271 replaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
1272 }
1273
1274 // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
1275 // with poison.
1276 Value *Poison = PoisonValue::get(FirstPhi.getType());
1277 for (PHINode *PHI : drop_begin(PHIsToSlice))
1278 replaceInstUsesWith(*PHI, Poison);
1279 return replaceInstUsesWith(FirstPhi, Poison);
1280 }
1281
simplifyUsingControlFlow(InstCombiner & Self,PHINode & PN,const DominatorTree & DT)1282 static Value *simplifyUsingControlFlow(InstCombiner &Self, PHINode &PN,
1283 const DominatorTree &DT) {
1284 // Simplify the following patterns:
1285 // if (cond)
1286 // / \
1287 // ... ...
1288 // \ /
1289 // phi [true] [false]
1290 // and
1291 // switch (cond)
1292 // case v1: / \ case v2:
1293 // ... ...
1294 // \ /
1295 // phi [v1] [v2]
1296 // Make sure all inputs are constants.
1297 if (!all_of(PN.operands(), [](Value *V) { return isa<ConstantInt>(V); }))
1298 return nullptr;
1299
1300 BasicBlock *BB = PN.getParent();
1301 // Do not bother with unreachable instructions.
1302 if (!DT.isReachableFromEntry(BB))
1303 return nullptr;
1304
1305 // Determine which value the condition of the idom has for which successor.
1306 LLVMContext &Context = PN.getContext();
1307 auto *IDom = DT.getNode(BB)->getIDom()->getBlock();
1308 Value *Cond;
1309 SmallDenseMap<ConstantInt *, BasicBlock *, 8> SuccForValue;
1310 SmallDenseMap<BasicBlock *, unsigned, 8> SuccCount;
1311 auto AddSucc = [&](ConstantInt *C, BasicBlock *Succ) {
1312 SuccForValue[C] = Succ;
1313 ++SuccCount[Succ];
1314 };
1315 if (auto *BI = dyn_cast<BranchInst>(IDom->getTerminator())) {
1316 if (BI->isUnconditional())
1317 return nullptr;
1318
1319 Cond = BI->getCondition();
1320 AddSucc(ConstantInt::getTrue(Context), BI->getSuccessor(0));
1321 AddSucc(ConstantInt::getFalse(Context), BI->getSuccessor(1));
1322 } else if (auto *SI = dyn_cast<SwitchInst>(IDom->getTerminator())) {
1323 Cond = SI->getCondition();
1324 ++SuccCount[SI->getDefaultDest()];
1325 for (auto Case : SI->cases())
1326 AddSucc(Case.getCaseValue(), Case.getCaseSuccessor());
1327 } else {
1328 return nullptr;
1329 }
1330
1331 if (Cond->getType() != PN.getType())
1332 return nullptr;
1333
1334 // Check that edges outgoing from the idom's terminators dominate respective
1335 // inputs of the Phi.
1336 std::optional<bool> Invert;
1337 for (auto Pair : zip(PN.incoming_values(), PN.blocks())) {
1338 auto *Input = cast<ConstantInt>(std::get<0>(Pair));
1339 BasicBlock *Pred = std::get<1>(Pair);
1340 auto IsCorrectInput = [&](ConstantInt *Input) {
1341 // The input needs to be dominated by the corresponding edge of the idom.
1342 // This edge cannot be a multi-edge, as that would imply that multiple
1343 // different condition values follow the same edge.
1344 auto It = SuccForValue.find(Input);
1345 return It != SuccForValue.end() && SuccCount[It->second] == 1 &&
1346 DT.dominates(BasicBlockEdge(IDom, It->second),
1347 BasicBlockEdge(Pred, BB));
1348 };
1349
1350 // Depending on the constant, the condition may need to be inverted.
1351 bool NeedsInvert;
1352 if (IsCorrectInput(Input))
1353 NeedsInvert = false;
1354 else if (IsCorrectInput(cast<ConstantInt>(ConstantExpr::getNot(Input))))
1355 NeedsInvert = true;
1356 else
1357 return nullptr;
1358
1359 // Make sure the inversion requirement is always the same.
1360 if (Invert && *Invert != NeedsInvert)
1361 return nullptr;
1362
1363 Invert = NeedsInvert;
1364 }
1365
1366 if (!*Invert)
1367 return Cond;
1368
1369 // This Phi is actually opposite to branching condition of IDom. We invert
1370 // the condition that will potentially open up some opportunities for
1371 // sinking.
1372 auto InsertPt = BB->getFirstInsertionPt();
1373 if (InsertPt != BB->end()) {
1374 Self.Builder.SetInsertPoint(&*BB, InsertPt);
1375 return Self.Builder.CreateNot(Cond);
1376 }
1377
1378 return nullptr;
1379 }
1380
1381 // Fold iv = phi(start, iv.next = iv2.next op start)
1382 // where iv2 = phi(iv2.start, iv2.next = iv2 + iv2.step)
1383 // and iv2.start op start = start
1384 // to iv = iv2 op start
foldDependentIVs(PHINode & PN,IRBuilderBase & Builder)1385 static Value *foldDependentIVs(PHINode &PN, IRBuilderBase &Builder) {
1386 BasicBlock *BB = PN.getParent();
1387 if (PN.getNumIncomingValues() != 2)
1388 return nullptr;
1389
1390 Value *Start;
1391 Instruction *IvNext;
1392 BinaryOperator *Iv2Next;
1393 auto MatchOuterIV = [&](Value *V1, Value *V2) {
1394 if (match(V2, m_c_BinOp(m_Specific(V1), m_BinOp(Iv2Next))) ||
1395 match(V2, m_GEP(m_Specific(V1), m_BinOp(Iv2Next)))) {
1396 Start = V1;
1397 IvNext = cast<Instruction>(V2);
1398 return true;
1399 }
1400 return false;
1401 };
1402
1403 if (!MatchOuterIV(PN.getIncomingValue(0), PN.getIncomingValue(1)) &&
1404 !MatchOuterIV(PN.getIncomingValue(1), PN.getIncomingValue(0)))
1405 return nullptr;
1406
1407 PHINode *Iv2;
1408 Value *Iv2Start, *Iv2Step;
1409 if (!matchSimpleRecurrence(Iv2Next, Iv2, Iv2Start, Iv2Step) ||
1410 Iv2->getParent() != BB)
1411 return nullptr;
1412
1413 auto *BO = dyn_cast<BinaryOperator>(IvNext);
1414 Constant *Identity =
1415 BO ? ConstantExpr::getBinOpIdentity(BO->getOpcode(), Iv2Start->getType())
1416 : Constant::getNullValue(Iv2Start->getType());
1417 if (Iv2Start != Identity)
1418 return nullptr;
1419
1420 Builder.SetInsertPoint(&*BB, BB->getFirstInsertionPt());
1421 if (!BO) {
1422 auto *GEP = cast<GEPOperator>(IvNext);
1423 return Builder.CreateGEP(GEP->getSourceElementType(), Start, Iv2, "",
1424 cast<GEPOperator>(IvNext)->getNoWrapFlags());
1425 }
1426
1427 assert(BO->isCommutative() && "Must be commutative");
1428 Value *Res = Builder.CreateBinOp(BO->getOpcode(), Iv2, Start);
1429 cast<Instruction>(Res)->copyIRFlags(BO);
1430 return Res;
1431 }
1432
1433 // PHINode simplification
1434 //
visitPHINode(PHINode & PN)1435 Instruction *InstCombinerImpl::visitPHINode(PHINode &PN) {
1436 if (Value *V = simplifyInstruction(&PN, SQ.getWithInstruction(&PN)))
1437 return replaceInstUsesWith(PN, V);
1438
1439 if (Instruction *Result = foldPHIArgZextsIntoPHI(PN))
1440 return Result;
1441
1442 if (Instruction *Result = foldPHIArgIntToPtrToPHI(PN))
1443 return Result;
1444
1445 // If all PHI operands are the same operation, pull them through the PHI,
1446 // reducing code size.
1447 auto *Inst0 = dyn_cast<Instruction>(PN.getIncomingValue(0));
1448 auto *Inst1 = dyn_cast<Instruction>(PN.getIncomingValue(1));
1449 if (Inst0 && Inst1 && Inst0->getOpcode() == Inst1->getOpcode() &&
1450 Inst0->hasOneUser())
1451 if (Instruction *Result = foldPHIArgOpIntoPHI(PN))
1452 return Result;
1453
1454 // If the incoming values are pointer casts of the same original value,
1455 // replace the phi with a single cast iff we can insert a non-PHI instruction.
1456 if (PN.getType()->isPointerTy() &&
1457 PN.getParent()->getFirstInsertionPt() != PN.getParent()->end()) {
1458 Value *IV0 = PN.getIncomingValue(0);
1459 Value *IV0Stripped = IV0->stripPointerCasts();
1460 // Set to keep track of values known to be equal to IV0Stripped after
1461 // stripping pointer casts.
1462 SmallPtrSet<Value *, 4> CheckedIVs;
1463 CheckedIVs.insert(IV0);
1464 if (IV0 != IV0Stripped &&
1465 all_of(PN.incoming_values(), [&CheckedIVs, IV0Stripped](Value *IV) {
1466 return !CheckedIVs.insert(IV).second ||
1467 IV0Stripped == IV->stripPointerCasts();
1468 })) {
1469 return CastInst::CreatePointerCast(IV0Stripped, PN.getType());
1470 }
1471 }
1472
1473 // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
1474 // this PHI only has a single use (a PHI), and if that PHI only has one use (a
1475 // PHI)... break the cycle.
1476 if (PN.hasOneUse()) {
1477 if (foldIntegerTypedPHI(PN))
1478 return nullptr;
1479
1480 Instruction *PHIUser = cast<Instruction>(PN.user_back());
1481 if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
1482 SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
1483 PotentiallyDeadPHIs.insert(&PN);
1484 if (isDeadPHICycle(PU, PotentiallyDeadPHIs))
1485 return replaceInstUsesWith(PN, PoisonValue::get(PN.getType()));
1486 }
1487
1488 // If this phi has a single use, and if that use just computes a value for
1489 // the next iteration of a loop, delete the phi. This occurs with unused
1490 // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
1491 // common case here is good because the only other things that catch this
1492 // are induction variable analysis (sometimes) and ADCE, which is only run
1493 // late.
1494 if (PHIUser->hasOneUse() &&
1495 (isa<BinaryOperator>(PHIUser) || isa<UnaryOperator>(PHIUser) ||
1496 isa<GetElementPtrInst>(PHIUser)) &&
1497 PHIUser->user_back() == &PN) {
1498 return replaceInstUsesWith(PN, PoisonValue::get(PN.getType()));
1499 }
1500 }
1501
1502 // When a PHI is used only to be compared with zero, it is safe to replace
1503 // an incoming value proved as known nonzero with any non-zero constant.
1504 // For example, in the code below, the incoming value %v can be replaced
1505 // with any non-zero constant based on the fact that the PHI is only used to
1506 // be compared with zero and %v is a known non-zero value:
1507 // %v = select %cond, 1, 2
1508 // %p = phi [%v, BB] ...
1509 // icmp eq, %p, 0
1510 // FIXME: To be simple, handle only integer type for now.
1511 // This handles a small number of uses to keep the complexity down, and an
1512 // icmp(or(phi)) can equally be replaced with any non-zero constant as the
1513 // "or" will only add bits.
1514 if (!PN.hasNUsesOrMore(3)) {
1515 SmallVector<Instruction *> DropPoisonFlags;
1516 bool AllUsesOfPhiEndsInCmp = all_of(PN.users(), [&](User *U) {
1517 auto *CmpInst = dyn_cast<ICmpInst>(U);
1518 if (!CmpInst) {
1519 // This is always correct as OR only add bits and we are checking
1520 // against 0.
1521 if (U->hasOneUse() && match(U, m_c_Or(m_Specific(&PN), m_Value()))) {
1522 DropPoisonFlags.push_back(cast<Instruction>(U));
1523 CmpInst = dyn_cast<ICmpInst>(U->user_back());
1524 }
1525 }
1526 if (!CmpInst || !isa<IntegerType>(PN.getType()) ||
1527 !CmpInst->isEquality() || !match(CmpInst->getOperand(1), m_Zero())) {
1528 return false;
1529 }
1530 return true;
1531 });
1532 // All uses of PHI results in a compare with zero.
1533 if (AllUsesOfPhiEndsInCmp) {
1534 ConstantInt *NonZeroConst = nullptr;
1535 bool MadeChange = false;
1536 for (unsigned I = 0, E = PN.getNumIncomingValues(); I != E; ++I) {
1537 Instruction *CtxI = PN.getIncomingBlock(I)->getTerminator();
1538 Value *VA = PN.getIncomingValue(I);
1539 if (isKnownNonZero(VA, getSimplifyQuery().getWithInstruction(CtxI))) {
1540 if (!NonZeroConst)
1541 NonZeroConst = getAnyNonZeroConstInt(PN);
1542 if (NonZeroConst != VA) {
1543 replaceOperand(PN, I, NonZeroConst);
1544 // The "disjoint" flag may no longer hold after the transform.
1545 for (Instruction *I : DropPoisonFlags)
1546 I->dropPoisonGeneratingFlags();
1547 MadeChange = true;
1548 }
1549 }
1550 }
1551 if (MadeChange)
1552 return &PN;
1553 }
1554 }
1555
1556 // We sometimes end up with phi cycles that non-obviously end up being the
1557 // same value, for example:
1558 // z = some value; x = phi (y, z); y = phi (x, z)
1559 // where the phi nodes don't necessarily need to be in the same block. Do a
1560 // quick check to see if the PHI node only contains a single non-phi value, if
1561 // so, scan to see if the phi cycle is actually equal to that value. If the
1562 // phi has no non-phi values then allow the "NonPhiInVal" to be set later if
1563 // one of the phis itself does not have a single input.
1564 {
1565 unsigned InValNo = 0, NumIncomingVals = PN.getNumIncomingValues();
1566 // Scan for the first non-phi operand.
1567 while (InValNo != NumIncomingVals &&
1568 isa<PHINode>(PN.getIncomingValue(InValNo)))
1569 ++InValNo;
1570
1571 Value *NonPhiInVal =
1572 InValNo != NumIncomingVals ? PN.getIncomingValue(InValNo) : nullptr;
1573
1574 // Scan the rest of the operands to see if there are any conflicts, if so
1575 // there is no need to recursively scan other phis.
1576 if (NonPhiInVal)
1577 for (++InValNo; InValNo != NumIncomingVals; ++InValNo) {
1578 Value *OpVal = PN.getIncomingValue(InValNo);
1579 if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
1580 break;
1581 }
1582
1583 // If we scanned over all operands, then we have one unique value plus
1584 // phi values. Scan PHI nodes to see if they all merge in each other or
1585 // the value.
1586 if (InValNo == NumIncomingVals) {
1587 SmallPtrSet<PHINode *, 16> ValueEqualPHIs;
1588 if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
1589 return replaceInstUsesWith(PN, NonPhiInVal);
1590 }
1591 }
1592
1593 // If there are multiple PHIs, sort their operands so that they all list
1594 // the blocks in the same order. This will help identical PHIs be eliminated
1595 // by other passes. Other passes shouldn't depend on this for correctness
1596 // however.
1597 auto Res = PredOrder.try_emplace(PN.getParent());
1598 if (!Res.second) {
1599 const auto &Preds = Res.first->second;
1600 for (unsigned I = 0, E = PN.getNumIncomingValues(); I != E; ++I) {
1601 BasicBlock *BBA = PN.getIncomingBlock(I);
1602 BasicBlock *BBB = Preds[I];
1603 if (BBA != BBB) {
1604 Value *VA = PN.getIncomingValue(I);
1605 unsigned J = PN.getBasicBlockIndex(BBB);
1606 Value *VB = PN.getIncomingValue(J);
1607 PN.setIncomingBlock(I, BBB);
1608 PN.setIncomingValue(I, VB);
1609 PN.setIncomingBlock(J, BBA);
1610 PN.setIncomingValue(J, VA);
1611 // NOTE: Instcombine normally would want us to "return &PN" if we
1612 // modified any of the operands of an instruction. However, since we
1613 // aren't adding or removing uses (just rearranging them) we don't do
1614 // this in this case.
1615 }
1616 }
1617 } else {
1618 // Remember the block order of the first encountered phi node.
1619 append_range(Res.first->second, PN.blocks());
1620 }
1621
1622 // Is there an identical PHI node in this basic block?
1623 for (PHINode &IdenticalPN : PN.getParent()->phis()) {
1624 // Ignore the PHI node itself.
1625 if (&IdenticalPN == &PN)
1626 continue;
1627 // Note that even though we've just canonicalized this PHI, due to the
1628 // worklist visitation order, there are no guarantess that *every* PHI
1629 // has been canonicalized, so we can't just compare operands ranges.
1630 if (!PN.isIdenticalToWhenDefined(&IdenticalPN))
1631 continue;
1632 // Just use that PHI instead then.
1633 ++NumPHICSEs;
1634 return replaceInstUsesWith(PN, &IdenticalPN);
1635 }
1636
1637 // If this is an integer PHI and we know that it has an illegal type, see if
1638 // it is only used by trunc or trunc(lshr) operations. If so, we split the
1639 // PHI into the various pieces being extracted. This sort of thing is
1640 // introduced when SROA promotes an aggregate to a single large integer type.
1641 if (PN.getType()->isIntegerTy() &&
1642 !DL.isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
1643 if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
1644 return Res;
1645
1646 // Ultimately, try to replace this Phi with a dominating condition.
1647 if (auto *V = simplifyUsingControlFlow(*this, PN, DT))
1648 return replaceInstUsesWith(PN, V);
1649
1650 if (Value *Res = foldDependentIVs(PN, Builder))
1651 return replaceInstUsesWith(PN, Res);
1652
1653 return nullptr;
1654 }
1655