1 //===- InstCombineVectorOps.cpp -------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements instcombine for ExtractElement, InsertElement and
10 // ShuffleVector.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallBitVector.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/InstructionSimplify.h"
23 #include "llvm/Analysis/VectorUtils.h"
24 #include "llvm/IR/BasicBlock.h"
25 #include "llvm/IR/Constant.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/DerivedTypes.h"
28 #include "llvm/IR/InstrTypes.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/Operator.h"
32 #include "llvm/IR/PatternMatch.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/IR/User.h"
35 #include "llvm/IR/Value.h"
36 #include "llvm/Support/Casting.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Transforms/InstCombine/InstCombiner.h"
39 #include <cassert>
40 #include <cstdint>
41 #include <iterator>
42 #include <utility>
43
44 #define DEBUG_TYPE "instcombine"
45
46 using namespace llvm;
47 using namespace PatternMatch;
48
49 STATISTIC(NumAggregateReconstructionsSimplified,
50 "Number of aggregate reconstructions turned into reuse of the "
51 "original aggregate");
52
53 /// Return true if the value is cheaper to scalarize than it is to leave as a
54 /// vector operation. If the extract index \p EI is a constant integer then
55 /// some operations may be cheap to scalarize.
56 ///
57 /// FIXME: It's possible to create more instructions than previously existed.
cheapToScalarize(Value * V,Value * EI)58 static bool cheapToScalarize(Value *V, Value *EI) {
59 ConstantInt *CEI = dyn_cast<ConstantInt>(EI);
60
61 // If we can pick a scalar constant value out of a vector, that is free.
62 if (auto *C = dyn_cast<Constant>(V))
63 return CEI || C->getSplatValue();
64
65 if (CEI && match(V, m_Intrinsic<Intrinsic::stepvector>())) {
66 ElementCount EC = cast<VectorType>(V->getType())->getElementCount();
67 // Index needs to be lower than the minimum size of the vector, because
68 // for scalable vector, the vector size is known at run time.
69 return CEI->getValue().ult(EC.getKnownMinValue());
70 }
71
72 // An insertelement to the same constant index as our extract will simplify
73 // to the scalar inserted element. An insertelement to a different constant
74 // index is irrelevant to our extract.
75 if (match(V, m_InsertElt(m_Value(), m_Value(), m_ConstantInt())))
76 return CEI;
77
78 if (match(V, m_OneUse(m_Load(m_Value()))))
79 return true;
80
81 if (match(V, m_OneUse(m_UnOp())))
82 return true;
83
84 Value *V0, *V1;
85 if (match(V, m_OneUse(m_BinOp(m_Value(V0), m_Value(V1)))))
86 if (cheapToScalarize(V0, EI) || cheapToScalarize(V1, EI))
87 return true;
88
89 CmpPredicate UnusedPred;
90 if (match(V, m_OneUse(m_Cmp(UnusedPred, m_Value(V0), m_Value(V1)))))
91 if (cheapToScalarize(V0, EI) || cheapToScalarize(V1, EI))
92 return true;
93
94 return false;
95 }
96
97 // If we have a PHI node with a vector type that is only used to feed
98 // itself and be an operand of extractelement at a constant location,
99 // try to replace the PHI of the vector type with a PHI of a scalar type.
scalarizePHI(ExtractElementInst & EI,PHINode * PN)100 Instruction *InstCombinerImpl::scalarizePHI(ExtractElementInst &EI,
101 PHINode *PN) {
102 SmallVector<Instruction *, 2> Extracts;
103 // The users we want the PHI to have are:
104 // 1) The EI ExtractElement (we already know this)
105 // 2) Possibly more ExtractElements with the same index.
106 // 3) Another operand, which will feed back into the PHI.
107 Instruction *PHIUser = nullptr;
108 for (auto *U : PN->users()) {
109 if (ExtractElementInst *EU = dyn_cast<ExtractElementInst>(U)) {
110 if (EI.getIndexOperand() == EU->getIndexOperand())
111 Extracts.push_back(EU);
112 else
113 return nullptr;
114 } else if (!PHIUser) {
115 PHIUser = cast<Instruction>(U);
116 } else {
117 return nullptr;
118 }
119 }
120
121 if (!PHIUser)
122 return nullptr;
123
124 // Verify that this PHI user has one use, which is the PHI itself,
125 // and that it is a binary operation which is cheap to scalarize.
126 // otherwise return nullptr.
127 if (!PHIUser->hasOneUse() || !(PHIUser->user_back() == PN) ||
128 !(isa<BinaryOperator>(PHIUser)) ||
129 !cheapToScalarize(PHIUser, EI.getIndexOperand()))
130 return nullptr;
131
132 // Create a scalar PHI node that will replace the vector PHI node
133 // just before the current PHI node.
134 PHINode *scalarPHI = cast<PHINode>(InsertNewInstWith(
135 PHINode::Create(EI.getType(), PN->getNumIncomingValues(), ""), PN->getIterator()));
136 // Scalarize each PHI operand.
137 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
138 Value *PHIInVal = PN->getIncomingValue(i);
139 BasicBlock *inBB = PN->getIncomingBlock(i);
140 Value *Elt = EI.getIndexOperand();
141 // If the operand is the PHI induction variable:
142 if (PHIInVal == PHIUser) {
143 // Scalarize the binary operation. Its first operand is the
144 // scalar PHI, and the second operand is extracted from the other
145 // vector operand.
146 BinaryOperator *B0 = cast<BinaryOperator>(PHIUser);
147 unsigned opId = (B0->getOperand(0) == PN) ? 1 : 0;
148 Value *Op = InsertNewInstWith(
149 ExtractElementInst::Create(B0->getOperand(opId), Elt,
150 B0->getOperand(opId)->getName() + ".Elt"),
151 B0->getIterator());
152 Value *newPHIUser = InsertNewInstWith(
153 BinaryOperator::CreateWithCopiedFlags(B0->getOpcode(),
154 scalarPHI, Op, B0), B0->getIterator());
155 scalarPHI->addIncoming(newPHIUser, inBB);
156 } else {
157 // Scalarize PHI input:
158 Instruction *newEI = ExtractElementInst::Create(PHIInVal, Elt, "");
159 // Insert the new instruction into the predecessor basic block.
160 Instruction *pos = dyn_cast<Instruction>(PHIInVal);
161 BasicBlock::iterator InsertPos;
162 if (pos && !isa<PHINode>(pos)) {
163 InsertPos = ++pos->getIterator();
164 } else {
165 InsertPos = inBB->getFirstInsertionPt();
166 }
167
168 InsertNewInstWith(newEI, InsertPos);
169
170 scalarPHI->addIncoming(newEI, inBB);
171 }
172 }
173
174 for (auto *E : Extracts) {
175 replaceInstUsesWith(*E, scalarPHI);
176 // Add old extract to worklist for DCE.
177 addToWorklist(E);
178 }
179
180 return &EI;
181 }
182
foldBitcastExtElt(ExtractElementInst & Ext)183 Instruction *InstCombinerImpl::foldBitcastExtElt(ExtractElementInst &Ext) {
184 Value *X;
185 uint64_t ExtIndexC;
186 if (!match(Ext.getVectorOperand(), m_BitCast(m_Value(X))) ||
187 !match(Ext.getIndexOperand(), m_ConstantInt(ExtIndexC)))
188 return nullptr;
189
190 ElementCount NumElts =
191 cast<VectorType>(Ext.getVectorOperandType())->getElementCount();
192 Type *DestTy = Ext.getType();
193 unsigned DestWidth = DestTy->getPrimitiveSizeInBits();
194 bool IsBigEndian = DL.isBigEndian();
195
196 // If we are casting an integer to vector and extracting a portion, that is
197 // a shift-right and truncate.
198 if (X->getType()->isIntegerTy()) {
199 assert(isa<FixedVectorType>(Ext.getVectorOperand()->getType()) &&
200 "Expected fixed vector type for bitcast from scalar integer");
201
202 // Big endian requires adjusting the extract index since MSB is at index 0.
203 // LittleEndian: extelt (bitcast i32 X to v4i8), 0 -> trunc i32 X to i8
204 // BigEndian: extelt (bitcast i32 X to v4i8), 0 -> trunc i32 (X >> 24) to i8
205 if (IsBigEndian)
206 ExtIndexC = NumElts.getKnownMinValue() - 1 - ExtIndexC;
207 unsigned ShiftAmountC = ExtIndexC * DestWidth;
208 if ((!ShiftAmountC ||
209 isDesirableIntType(X->getType()->getPrimitiveSizeInBits())) &&
210 Ext.getVectorOperand()->hasOneUse()) {
211 if (ShiftAmountC)
212 X = Builder.CreateLShr(X, ShiftAmountC, "extelt.offset");
213 if (DestTy->isFloatingPointTy()) {
214 Type *DstIntTy = IntegerType::getIntNTy(X->getContext(), DestWidth);
215 Value *Trunc = Builder.CreateTrunc(X, DstIntTy);
216 return new BitCastInst(Trunc, DestTy);
217 }
218 return new TruncInst(X, DestTy);
219 }
220 }
221
222 if (!X->getType()->isVectorTy())
223 return nullptr;
224
225 // If this extractelement is using a bitcast from a vector of the same number
226 // of elements, see if we can find the source element from the source vector:
227 // extelt (bitcast VecX), IndexC --> bitcast X[IndexC]
228 auto *SrcTy = cast<VectorType>(X->getType());
229 ElementCount NumSrcElts = SrcTy->getElementCount();
230 if (NumSrcElts == NumElts)
231 if (Value *Elt = findScalarElement(X, ExtIndexC))
232 return new BitCastInst(Elt, DestTy);
233
234 assert(NumSrcElts.isScalable() == NumElts.isScalable() &&
235 "Src and Dst must be the same sort of vector type");
236
237 // If the source elements are wider than the destination, try to shift and
238 // truncate a subset of scalar bits of an insert op.
239 if (NumSrcElts.getKnownMinValue() < NumElts.getKnownMinValue()) {
240 Value *Scalar;
241 Value *Vec;
242 uint64_t InsIndexC;
243 if (!match(X, m_InsertElt(m_Value(Vec), m_Value(Scalar),
244 m_ConstantInt(InsIndexC))))
245 return nullptr;
246
247 // The extract must be from the subset of vector elements that we inserted
248 // into. Example: if we inserted element 1 of a <2 x i64> and we are
249 // extracting an i16 (narrowing ratio = 4), then this extract must be from 1
250 // of elements 4-7 of the bitcasted vector.
251 unsigned NarrowingRatio =
252 NumElts.getKnownMinValue() / NumSrcElts.getKnownMinValue();
253
254 if (ExtIndexC / NarrowingRatio != InsIndexC) {
255 // Remove insertelement, if we don't use the inserted element.
256 // extractelement (bitcast (insertelement (Vec, b)), a) ->
257 // extractelement (bitcast (Vec), a)
258 // FIXME: this should be removed to SimplifyDemandedVectorElts,
259 // once scale vectors are supported.
260 if (X->hasOneUse() && Ext.getVectorOperand()->hasOneUse()) {
261 Value *NewBC = Builder.CreateBitCast(Vec, Ext.getVectorOperandType());
262 return ExtractElementInst::Create(NewBC, Ext.getIndexOperand());
263 }
264 return nullptr;
265 }
266
267 // We are extracting part of the original scalar. How that scalar is
268 // inserted into the vector depends on the endian-ness. Example:
269 // Vector Byte Elt Index: 0 1 2 3 4 5 6 7
270 // +--+--+--+--+--+--+--+--+
271 // inselt <2 x i32> V, <i32> S, 1: |V0|V1|V2|V3|S0|S1|S2|S3|
272 // extelt <4 x i16> V', 3: | |S2|S3|
273 // +--+--+--+--+--+--+--+--+
274 // If this is little-endian, S2|S3 are the MSB of the 32-bit 'S' value.
275 // If this is big-endian, S2|S3 are the LSB of the 32-bit 'S' value.
276 // In this example, we must right-shift little-endian. Big-endian is just a
277 // truncate.
278 unsigned Chunk = ExtIndexC % NarrowingRatio;
279 if (IsBigEndian)
280 Chunk = NarrowingRatio - 1 - Chunk;
281
282 // Bail out if this is an FP vector to FP vector sequence. That would take
283 // more instructions than we started with unless there is no shift, and it
284 // may not be handled as well in the backend.
285 bool NeedSrcBitcast = SrcTy->getScalarType()->isFloatingPointTy();
286 bool NeedDestBitcast = DestTy->isFloatingPointTy();
287 if (NeedSrcBitcast && NeedDestBitcast)
288 return nullptr;
289
290 unsigned SrcWidth = SrcTy->getScalarSizeInBits();
291 unsigned ShAmt = Chunk * DestWidth;
292
293 // TODO: This limitation is more strict than necessary. We could sum the
294 // number of new instructions and subtract the number eliminated to know if
295 // we can proceed.
296 if (!X->hasOneUse() || !Ext.getVectorOperand()->hasOneUse())
297 if (NeedSrcBitcast || NeedDestBitcast)
298 return nullptr;
299
300 if (NeedSrcBitcast) {
301 Type *SrcIntTy = IntegerType::getIntNTy(Scalar->getContext(), SrcWidth);
302 Scalar = Builder.CreateBitCast(Scalar, SrcIntTy);
303 }
304
305 if (ShAmt) {
306 // Bail out if we could end with more instructions than we started with.
307 if (!Ext.getVectorOperand()->hasOneUse())
308 return nullptr;
309 Scalar = Builder.CreateLShr(Scalar, ShAmt);
310 }
311
312 if (NeedDestBitcast) {
313 Type *DestIntTy = IntegerType::getIntNTy(Scalar->getContext(), DestWidth);
314 return new BitCastInst(Builder.CreateTrunc(Scalar, DestIntTy), DestTy);
315 }
316 return new TruncInst(Scalar, DestTy);
317 }
318
319 return nullptr;
320 }
321
322 /// Find elements of V demanded by UserInstr.
findDemandedEltsBySingleUser(Value * V,Instruction * UserInstr)323 static APInt findDemandedEltsBySingleUser(Value *V, Instruction *UserInstr) {
324 unsigned VWidth = cast<FixedVectorType>(V->getType())->getNumElements();
325
326 // Conservatively assume that all elements are needed.
327 APInt UsedElts(APInt::getAllOnes(VWidth));
328
329 switch (UserInstr->getOpcode()) {
330 case Instruction::ExtractElement: {
331 ExtractElementInst *EEI = cast<ExtractElementInst>(UserInstr);
332 assert(EEI->getVectorOperand() == V);
333 ConstantInt *EEIIndexC = dyn_cast<ConstantInt>(EEI->getIndexOperand());
334 if (EEIIndexC && EEIIndexC->getValue().ult(VWidth)) {
335 UsedElts = APInt::getOneBitSet(VWidth, EEIIndexC->getZExtValue());
336 }
337 break;
338 }
339 case Instruction::ShuffleVector: {
340 ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(UserInstr);
341 unsigned MaskNumElts =
342 cast<FixedVectorType>(UserInstr->getType())->getNumElements();
343
344 UsedElts = APInt(VWidth, 0);
345 for (unsigned i = 0; i < MaskNumElts; i++) {
346 unsigned MaskVal = Shuffle->getMaskValue(i);
347 if (MaskVal == -1u || MaskVal >= 2 * VWidth)
348 continue;
349 if (Shuffle->getOperand(0) == V && (MaskVal < VWidth))
350 UsedElts.setBit(MaskVal);
351 if (Shuffle->getOperand(1) == V &&
352 ((MaskVal >= VWidth) && (MaskVal < 2 * VWidth)))
353 UsedElts.setBit(MaskVal - VWidth);
354 }
355 break;
356 }
357 default:
358 break;
359 }
360 return UsedElts;
361 }
362
363 /// Find union of elements of V demanded by all its users.
364 /// If it is known by querying findDemandedEltsBySingleUser that
365 /// no user demands an element of V, then the corresponding bit
366 /// remains unset in the returned value.
findDemandedEltsByAllUsers(Value * V)367 static APInt findDemandedEltsByAllUsers(Value *V) {
368 unsigned VWidth = cast<FixedVectorType>(V->getType())->getNumElements();
369
370 APInt UnionUsedElts(VWidth, 0);
371 for (const Use &U : V->uses()) {
372 if (Instruction *I = dyn_cast<Instruction>(U.getUser())) {
373 UnionUsedElts |= findDemandedEltsBySingleUser(V, I);
374 } else {
375 UnionUsedElts = APInt::getAllOnes(VWidth);
376 break;
377 }
378
379 if (UnionUsedElts.isAllOnes())
380 break;
381 }
382
383 return UnionUsedElts;
384 }
385
386 /// Given a constant index for a extractelement or insertelement instruction,
387 /// return it with the canonical type if it isn't already canonical. We
388 /// arbitrarily pick 64 bit as our canonical type. The actual bitwidth doesn't
389 /// matter, we just want a consistent type to simplify CSE.
getPreferredVectorIndex(ConstantInt * IndexC)390 static ConstantInt *getPreferredVectorIndex(ConstantInt *IndexC) {
391 const unsigned IndexBW = IndexC->getBitWidth();
392 if (IndexBW == 64 || IndexC->getValue().getActiveBits() > 64)
393 return nullptr;
394 return ConstantInt::get(IndexC->getContext(),
395 IndexC->getValue().zextOrTrunc(64));
396 }
397
visitExtractElementInst(ExtractElementInst & EI)398 Instruction *InstCombinerImpl::visitExtractElementInst(ExtractElementInst &EI) {
399 Value *SrcVec = EI.getVectorOperand();
400 Value *Index = EI.getIndexOperand();
401 if (Value *V = simplifyExtractElementInst(SrcVec, Index,
402 SQ.getWithInstruction(&EI)))
403 return replaceInstUsesWith(EI, V);
404
405 // extractelt (select %x, %vec1, %vec2), %const ->
406 // select %x, %vec1[%const], %vec2[%const]
407 // TODO: Support constant folding of multiple select operands:
408 // extractelt (select %x, %vec1, %vec2), (select %x, %c1, %c2)
409 // If the extractelement will for instance try to do out of bounds accesses
410 // because of the values of %c1 and/or %c2, the sequence could be optimized
411 // early. This is currently not possible because constant folding will reach
412 // an unreachable assertion if it doesn't find a constant operand.
413 if (SelectInst *SI = dyn_cast<SelectInst>(EI.getVectorOperand()))
414 if (SI->getCondition()->getType()->isIntegerTy() &&
415 isa<Constant>(EI.getIndexOperand()))
416 if (Instruction *R = FoldOpIntoSelect(EI, SI))
417 return R;
418
419 // If extracting a specified index from the vector, see if we can recursively
420 // find a previously computed scalar that was inserted into the vector.
421 auto *IndexC = dyn_cast<ConstantInt>(Index);
422 bool HasKnownValidIndex = false;
423 if (IndexC) {
424 // Canonicalize type of constant indices to i64 to simplify CSE
425 if (auto *NewIdx = getPreferredVectorIndex(IndexC))
426 return replaceOperand(EI, 1, NewIdx);
427
428 ElementCount EC = EI.getVectorOperandType()->getElementCount();
429 unsigned NumElts = EC.getKnownMinValue();
430 HasKnownValidIndex = IndexC->getValue().ult(NumElts);
431
432 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(SrcVec)) {
433 Intrinsic::ID IID = II->getIntrinsicID();
434 // Index needs to be lower than the minimum size of the vector, because
435 // for scalable vector, the vector size is known at run time.
436 if (IID == Intrinsic::stepvector && IndexC->getValue().ult(NumElts)) {
437 Type *Ty = EI.getType();
438 unsigned BitWidth = Ty->getIntegerBitWidth();
439 Value *Idx;
440 // Return index when its value does not exceed the allowed limit
441 // for the element type of the vector, otherwise return undefined.
442 if (IndexC->getValue().getActiveBits() <= BitWidth)
443 Idx = ConstantInt::get(Ty, IndexC->getValue().zextOrTrunc(BitWidth));
444 else
445 Idx = PoisonValue::get(Ty);
446 return replaceInstUsesWith(EI, Idx);
447 }
448 }
449
450 // InstSimplify should handle cases where the index is invalid.
451 // For fixed-length vector, it's invalid to extract out-of-range element.
452 if (!EC.isScalable() && IndexC->getValue().uge(NumElts))
453 return nullptr;
454
455 if (Instruction *I = foldBitcastExtElt(EI))
456 return I;
457
458 // If there's a vector PHI feeding a scalar use through this extractelement
459 // instruction, try to scalarize the PHI.
460 if (auto *Phi = dyn_cast<PHINode>(SrcVec))
461 if (Instruction *ScalarPHI = scalarizePHI(EI, Phi))
462 return ScalarPHI;
463 }
464
465 // TODO come up with a n-ary matcher that subsumes both unary and
466 // binary matchers.
467 UnaryOperator *UO;
468 if (match(SrcVec, m_UnOp(UO)) && cheapToScalarize(SrcVec, Index)) {
469 // extelt (unop X), Index --> unop (extelt X, Index)
470 Value *X = UO->getOperand(0);
471 Value *E = Builder.CreateExtractElement(X, Index);
472 return UnaryOperator::CreateWithCopiedFlags(UO->getOpcode(), E, UO);
473 }
474
475 // If the binop is not speculatable, we cannot hoist the extractelement if
476 // it may make the operand poison.
477 BinaryOperator *BO;
478 if (match(SrcVec, m_BinOp(BO)) && cheapToScalarize(SrcVec, Index) &&
479 (HasKnownValidIndex ||
480 isSafeToSpeculativelyExecuteWithVariableReplaced(BO))) {
481 // extelt (binop X, Y), Index --> binop (extelt X, Index), (extelt Y, Index)
482 Value *X = BO->getOperand(0), *Y = BO->getOperand(1);
483 Value *E0 = Builder.CreateExtractElement(X, Index);
484 Value *E1 = Builder.CreateExtractElement(Y, Index);
485 return BinaryOperator::CreateWithCopiedFlags(BO->getOpcode(), E0, E1, BO);
486 }
487
488 Value *X, *Y;
489 CmpPredicate Pred;
490 if (match(SrcVec, m_Cmp(Pred, m_Value(X), m_Value(Y))) &&
491 cheapToScalarize(SrcVec, Index)) {
492 // extelt (cmp X, Y), Index --> cmp (extelt X, Index), (extelt Y, Index)
493 Value *E0 = Builder.CreateExtractElement(X, Index);
494 Value *E1 = Builder.CreateExtractElement(Y, Index);
495 CmpInst *SrcCmpInst = cast<CmpInst>(SrcVec);
496 return CmpInst::CreateWithCopiedFlags(SrcCmpInst->getOpcode(), Pred, E0, E1,
497 SrcCmpInst);
498 }
499
500 if (auto *I = dyn_cast<Instruction>(SrcVec)) {
501 if (auto *IE = dyn_cast<InsertElementInst>(I)) {
502 // instsimplify already handled the case where the indices are constants
503 // and equal by value, if both are constants, they must not be the same
504 // value, extract from the pre-inserted value instead.
505 if (isa<Constant>(IE->getOperand(2)) && IndexC)
506 return replaceOperand(EI, 0, IE->getOperand(0));
507 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
508 auto *VecType = cast<VectorType>(GEP->getType());
509 ElementCount EC = VecType->getElementCount();
510 uint64_t IdxVal = IndexC ? IndexC->getZExtValue() : 0;
511 if (IndexC && IdxVal < EC.getKnownMinValue() && GEP->hasOneUse()) {
512 // Find out why we have a vector result - these are a few examples:
513 // 1. We have a scalar pointer and a vector of indices, or
514 // 2. We have a vector of pointers and a scalar index, or
515 // 3. We have a vector of pointers and a vector of indices, etc.
516 // Here we only consider combining when there is exactly one vector
517 // operand, since the optimization is less obviously a win due to
518 // needing more than one extractelements.
519
520 unsigned VectorOps =
521 llvm::count_if(GEP->operands(), [](const Value *V) {
522 return isa<VectorType>(V->getType());
523 });
524 if (VectorOps == 1) {
525 Value *NewPtr = GEP->getPointerOperand();
526 if (isa<VectorType>(NewPtr->getType()))
527 NewPtr = Builder.CreateExtractElement(NewPtr, IndexC);
528
529 SmallVector<Value *> NewOps;
530 for (unsigned I = 1; I != GEP->getNumOperands(); ++I) {
531 Value *Op = GEP->getOperand(I);
532 if (isa<VectorType>(Op->getType()))
533 NewOps.push_back(Builder.CreateExtractElement(Op, IndexC));
534 else
535 NewOps.push_back(Op);
536 }
537
538 GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
539 GEP->getSourceElementType(), NewPtr, NewOps);
540 NewGEP->setNoWrapFlags(GEP->getNoWrapFlags());
541 return NewGEP;
542 }
543 }
544 } else if (auto *SVI = dyn_cast<ShuffleVectorInst>(I)) {
545 int SplatIndex = getSplatIndex(SVI->getShuffleMask());
546 // We know the all-0 splat must be reading from the first operand, even
547 // in the case of scalable vectors (vscale is always > 0).
548 if (SplatIndex == 0)
549 return ExtractElementInst::Create(SVI->getOperand(0),
550 Builder.getInt64(0));
551
552 if (isa<FixedVectorType>(SVI->getType())) {
553 std::optional<int> SrcIdx;
554 // getSplatIndex returns -1 to mean not-found.
555 if (SplatIndex != -1)
556 SrcIdx = SplatIndex;
557 else if (ConstantInt *CI = dyn_cast<ConstantInt>(Index))
558 SrcIdx = SVI->getMaskValue(CI->getZExtValue());
559
560 if (SrcIdx) {
561 Value *Src;
562 unsigned LHSWidth =
563 cast<FixedVectorType>(SVI->getOperand(0)->getType())
564 ->getNumElements();
565
566 if (*SrcIdx < 0)
567 return replaceInstUsesWith(EI, PoisonValue::get(EI.getType()));
568 if (*SrcIdx < (int)LHSWidth)
569 Src = SVI->getOperand(0);
570 else {
571 *SrcIdx -= LHSWidth;
572 Src = SVI->getOperand(1);
573 }
574 Type *Int64Ty = Type::getInt64Ty(EI.getContext());
575 return ExtractElementInst::Create(
576 Src, ConstantInt::get(Int64Ty, *SrcIdx, false));
577 }
578 }
579 } else if (auto *CI = dyn_cast<CastInst>(I)) {
580 // Canonicalize extractelement(cast) -> cast(extractelement).
581 // Bitcasts can change the number of vector elements, and they cost
582 // nothing.
583 if (CI->hasOneUse() && (CI->getOpcode() != Instruction::BitCast)) {
584 Value *EE = Builder.CreateExtractElement(CI->getOperand(0), Index);
585 return CastInst::Create(CI->getOpcode(), EE, EI.getType());
586 }
587 }
588 }
589
590 // Run demanded elements after other transforms as this can drop flags on
591 // binops. If there's two paths to the same final result, we prefer the
592 // one which doesn't force us to drop flags.
593 if (IndexC) {
594 ElementCount EC = EI.getVectorOperandType()->getElementCount();
595 unsigned NumElts = EC.getKnownMinValue();
596 // This instruction only demands the single element from the input vector.
597 // Skip for scalable type, the number of elements is unknown at
598 // compile-time.
599 if (!EC.isScalable() && NumElts != 1) {
600 // If the input vector has a single use, simplify it based on this use
601 // property.
602 if (SrcVec->hasOneUse()) {
603 APInt PoisonElts(NumElts, 0);
604 APInt DemandedElts(NumElts, 0);
605 DemandedElts.setBit(IndexC->getZExtValue());
606 if (Value *V =
607 SimplifyDemandedVectorElts(SrcVec, DemandedElts, PoisonElts))
608 return replaceOperand(EI, 0, V);
609 } else {
610 // If the input vector has multiple uses, simplify it based on a union
611 // of all elements used.
612 APInt DemandedElts = findDemandedEltsByAllUsers(SrcVec);
613 if (!DemandedElts.isAllOnes()) {
614 APInt PoisonElts(NumElts, 0);
615 if (Value *V = SimplifyDemandedVectorElts(
616 SrcVec, DemandedElts, PoisonElts, 0 /* Depth */,
617 true /* AllowMultipleUsers */)) {
618 if (V != SrcVec) {
619 Worklist.addValue(SrcVec);
620 SrcVec->replaceAllUsesWith(V);
621 return &EI;
622 }
623 }
624 }
625 }
626 }
627 }
628 return nullptr;
629 }
630
631 /// If V is a shuffle of values that ONLY returns elements from either LHS or
632 /// RHS, return the shuffle mask and true. Otherwise, return false.
collectSingleShuffleElements(Value * V,Value * LHS,Value * RHS,SmallVectorImpl<int> & Mask)633 static bool collectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
634 SmallVectorImpl<int> &Mask) {
635 assert(LHS->getType() == RHS->getType() &&
636 "Invalid CollectSingleShuffleElements");
637 unsigned NumElts = cast<FixedVectorType>(V->getType())->getNumElements();
638
639 if (match(V, m_Poison())) {
640 Mask.assign(NumElts, -1);
641 return true;
642 }
643
644 if (V == LHS) {
645 for (unsigned i = 0; i != NumElts; ++i)
646 Mask.push_back(i);
647 return true;
648 }
649
650 if (V == RHS) {
651 for (unsigned i = 0; i != NumElts; ++i)
652 Mask.push_back(i + NumElts);
653 return true;
654 }
655
656 if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
657 // If this is an insert of an extract from some other vector, include it.
658 Value *VecOp = IEI->getOperand(0);
659 Value *ScalarOp = IEI->getOperand(1);
660 Value *IdxOp = IEI->getOperand(2);
661
662 if (!isa<ConstantInt>(IdxOp))
663 return false;
664 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
665
666 if (isa<PoisonValue>(ScalarOp)) { // inserting poison into vector.
667 // We can handle this if the vector we are inserting into is
668 // transitively ok.
669 if (collectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
670 // If so, update the mask to reflect the inserted poison.
671 Mask[InsertedIdx] = -1;
672 return true;
673 }
674 } else if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)){
675 if (isa<ConstantInt>(EI->getOperand(1))) {
676 unsigned ExtractedIdx =
677 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
678 unsigned NumLHSElts =
679 cast<FixedVectorType>(LHS->getType())->getNumElements();
680
681 // This must be extracting from either LHS or RHS.
682 if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) {
683 // We can handle this if the vector we are inserting into is
684 // transitively ok.
685 if (collectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
686 // If so, update the mask to reflect the inserted value.
687 if (EI->getOperand(0) == LHS) {
688 Mask[InsertedIdx % NumElts] = ExtractedIdx;
689 } else {
690 assert(EI->getOperand(0) == RHS);
691 Mask[InsertedIdx % NumElts] = ExtractedIdx + NumLHSElts;
692 }
693 return true;
694 }
695 }
696 }
697 }
698 }
699
700 return false;
701 }
702
703 /// If we have insertion into a vector that is wider than the vector that we
704 /// are extracting from, try to widen the source vector to allow a single
705 /// shufflevector to replace one or more insert/extract pairs.
replaceExtractElements(InsertElementInst * InsElt,ExtractElementInst * ExtElt,InstCombinerImpl & IC)706 static bool replaceExtractElements(InsertElementInst *InsElt,
707 ExtractElementInst *ExtElt,
708 InstCombinerImpl &IC) {
709 auto *InsVecType = cast<FixedVectorType>(InsElt->getType());
710 auto *ExtVecType = cast<FixedVectorType>(ExtElt->getVectorOperandType());
711 unsigned NumInsElts = InsVecType->getNumElements();
712 unsigned NumExtElts = ExtVecType->getNumElements();
713
714 // The inserted-to vector must be wider than the extracted-from vector.
715 if (InsVecType->getElementType() != ExtVecType->getElementType() ||
716 NumExtElts >= NumInsElts)
717 return false;
718
719 // Create a shuffle mask to widen the extended-from vector using poison
720 // values. The mask selects all of the values of the original vector followed
721 // by as many poison values as needed to create a vector of the same length
722 // as the inserted-to vector.
723 SmallVector<int, 16> ExtendMask;
724 for (unsigned i = 0; i < NumExtElts; ++i)
725 ExtendMask.push_back(i);
726 for (unsigned i = NumExtElts; i < NumInsElts; ++i)
727 ExtendMask.push_back(-1);
728
729 Value *ExtVecOp = ExtElt->getVectorOperand();
730 auto *ExtVecOpInst = dyn_cast<Instruction>(ExtVecOp);
731 BasicBlock *InsertionBlock = (ExtVecOpInst && !isa<PHINode>(ExtVecOpInst))
732 ? ExtVecOpInst->getParent()
733 : ExtElt->getParent();
734
735 // TODO: This restriction matches the basic block check below when creating
736 // new extractelement instructions. If that limitation is removed, this one
737 // could also be removed. But for now, we just bail out to ensure that we
738 // will replace the extractelement instruction that is feeding our
739 // insertelement instruction. This allows the insertelement to then be
740 // replaced by a shufflevector. If the insertelement is not replaced, we can
741 // induce infinite looping because there's an optimization for extractelement
742 // that will delete our widening shuffle. This would trigger another attempt
743 // here to create that shuffle, and we spin forever.
744 if (InsertionBlock != InsElt->getParent())
745 return false;
746
747 // TODO: This restriction matches the check in visitInsertElementInst() and
748 // prevents an infinite loop caused by not turning the extract/insert pair
749 // into a shuffle. We really should not need either check, but we're lacking
750 // folds for shufflevectors because we're afraid to generate shuffle masks
751 // that the backend can't handle.
752 if (InsElt->hasOneUse() && isa<InsertElementInst>(InsElt->user_back()))
753 return false;
754
755 auto *WideVec = new ShuffleVectorInst(ExtVecOp, ExtendMask);
756
757 // Insert the new shuffle after the vector operand of the extract is defined
758 // (as long as it's not a PHI) or at the start of the basic block of the
759 // extract, so any subsequent extracts in the same basic block can use it.
760 // TODO: Insert before the earliest ExtractElementInst that is replaced.
761 if (ExtVecOpInst && !isa<PHINode>(ExtVecOpInst))
762 WideVec->insertAfter(ExtVecOpInst->getIterator());
763 else
764 IC.InsertNewInstWith(WideVec, ExtElt->getParent()->getFirstInsertionPt());
765
766 // Replace extracts from the original narrow vector with extracts from the new
767 // wide vector.
768 for (User *U : ExtVecOp->users()) {
769 ExtractElementInst *OldExt = dyn_cast<ExtractElementInst>(U);
770 if (!OldExt || OldExt->getParent() != WideVec->getParent())
771 continue;
772 auto *NewExt = ExtractElementInst::Create(WideVec, OldExt->getOperand(1));
773 IC.InsertNewInstWith(NewExt, OldExt->getIterator());
774 IC.replaceInstUsesWith(*OldExt, NewExt);
775 // Add the old extracts to the worklist for DCE. We can't remove the
776 // extracts directly, because they may still be used by the calling code.
777 IC.addToWorklist(OldExt);
778 }
779
780 return true;
781 }
782
783 /// We are building a shuffle to create V, which is a sequence of insertelement,
784 /// extractelement pairs. If PermittedRHS is set, then we must either use it or
785 /// not rely on the second vector source. Return a std::pair containing the
786 /// left and right vectors of the proposed shuffle (or 0), and set the Mask
787 /// parameter as required.
788 ///
789 /// Note: we intentionally don't try to fold earlier shuffles since they have
790 /// often been chosen carefully to be efficiently implementable on the target.
791 using ShuffleOps = std::pair<Value *, Value *>;
792
collectShuffleElements(Value * V,SmallVectorImpl<int> & Mask,Value * PermittedRHS,InstCombinerImpl & IC,bool & Rerun)793 static ShuffleOps collectShuffleElements(Value *V, SmallVectorImpl<int> &Mask,
794 Value *PermittedRHS,
795 InstCombinerImpl &IC, bool &Rerun) {
796 assert(V->getType()->isVectorTy() && "Invalid shuffle!");
797 unsigned NumElts = cast<FixedVectorType>(V->getType())->getNumElements();
798
799 if (match(V, m_Poison())) {
800 Mask.assign(NumElts, -1);
801 return std::make_pair(
802 PermittedRHS ? PoisonValue::get(PermittedRHS->getType()) : V, nullptr);
803 }
804
805 if (isa<ConstantAggregateZero>(V)) {
806 Mask.assign(NumElts, 0);
807 return std::make_pair(V, nullptr);
808 }
809
810 if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
811 // If this is an insert of an extract from some other vector, include it.
812 Value *VecOp = IEI->getOperand(0);
813 Value *ScalarOp = IEI->getOperand(1);
814 Value *IdxOp = IEI->getOperand(2);
815
816 if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
817 if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp)) {
818 unsigned ExtractedIdx =
819 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
820 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
821
822 // Either the extracted from or inserted into vector must be RHSVec,
823 // otherwise we'd end up with a shuffle of three inputs.
824 if (EI->getOperand(0) == PermittedRHS || PermittedRHS == nullptr) {
825 Value *RHS = EI->getOperand(0);
826 ShuffleOps LR = collectShuffleElements(VecOp, Mask, RHS, IC, Rerun);
827 assert(LR.second == nullptr || LR.second == RHS);
828
829 if (LR.first->getType() != RHS->getType()) {
830 // Although we are giving up for now, see if we can create extracts
831 // that match the inserts for another round of combining.
832 if (replaceExtractElements(IEI, EI, IC))
833 Rerun = true;
834
835 // We tried our best, but we can't find anything compatible with RHS
836 // further up the chain. Return a trivial shuffle.
837 for (unsigned i = 0; i < NumElts; ++i)
838 Mask[i] = i;
839 return std::make_pair(V, nullptr);
840 }
841
842 unsigned NumLHSElts =
843 cast<FixedVectorType>(RHS->getType())->getNumElements();
844 Mask[InsertedIdx % NumElts] = NumLHSElts + ExtractedIdx;
845 return std::make_pair(LR.first, RHS);
846 }
847
848 if (VecOp == PermittedRHS) {
849 // We've gone as far as we can: anything on the other side of the
850 // extractelement will already have been converted into a shuffle.
851 unsigned NumLHSElts =
852 cast<FixedVectorType>(EI->getOperand(0)->getType())
853 ->getNumElements();
854 for (unsigned i = 0; i != NumElts; ++i)
855 Mask.push_back(i == InsertedIdx ? ExtractedIdx : NumLHSElts + i);
856 return std::make_pair(EI->getOperand(0), PermittedRHS);
857 }
858
859 // If this insertelement is a chain that comes from exactly these two
860 // vectors, return the vector and the effective shuffle.
861 if (EI->getOperand(0)->getType() == PermittedRHS->getType() &&
862 collectSingleShuffleElements(IEI, EI->getOperand(0), PermittedRHS,
863 Mask))
864 return std::make_pair(EI->getOperand(0), PermittedRHS);
865 }
866 }
867 }
868
869 // Otherwise, we can't do anything fancy. Return an identity vector.
870 for (unsigned i = 0; i != NumElts; ++i)
871 Mask.push_back(i);
872 return std::make_pair(V, nullptr);
873 }
874
875 /// Look for chain of insertvalue's that fully define an aggregate, and trace
876 /// back the values inserted, see if they are all were extractvalue'd from
877 /// the same source aggregate from the exact same element indexes.
878 /// If they were, just reuse the source aggregate.
879 /// This potentially deals with PHI indirections.
foldAggregateConstructionIntoAggregateReuse(InsertValueInst & OrigIVI)880 Instruction *InstCombinerImpl::foldAggregateConstructionIntoAggregateReuse(
881 InsertValueInst &OrigIVI) {
882 Type *AggTy = OrigIVI.getType();
883 unsigned NumAggElts;
884 switch (AggTy->getTypeID()) {
885 case Type::StructTyID:
886 NumAggElts = AggTy->getStructNumElements();
887 break;
888 case Type::ArrayTyID:
889 NumAggElts = AggTy->getArrayNumElements();
890 break;
891 default:
892 llvm_unreachable("Unhandled aggregate type?");
893 }
894
895 // Arbitrary aggregate size cut-off. Motivation for limit of 2 is to be able
896 // to handle clang C++ exception struct (which is hardcoded as {i8*, i32}),
897 // FIXME: any interesting patterns to be caught with larger limit?
898 assert(NumAggElts > 0 && "Aggregate should have elements.");
899 if (NumAggElts > 2)
900 return nullptr;
901
902 static constexpr auto NotFound = std::nullopt;
903 static constexpr auto FoundMismatch = nullptr;
904
905 // Try to find a value of each element of an aggregate.
906 // FIXME: deal with more complex, not one-dimensional, aggregate types
907 SmallVector<std::optional<Instruction *>, 2> AggElts(NumAggElts, NotFound);
908
909 // Do we know values for each element of the aggregate?
910 auto KnowAllElts = [&AggElts]() {
911 return !llvm::is_contained(AggElts, NotFound);
912 };
913
914 int Depth = 0;
915
916 // Arbitrary `insertvalue` visitation depth limit. Let's be okay with
917 // every element being overwritten twice, which should never happen.
918 static const int DepthLimit = 2 * NumAggElts;
919
920 // Recurse up the chain of `insertvalue` aggregate operands until either we've
921 // reconstructed full initializer or can't visit any more `insertvalue`'s.
922 for (InsertValueInst *CurrIVI = &OrigIVI;
923 Depth < DepthLimit && CurrIVI && !KnowAllElts();
924 CurrIVI = dyn_cast<InsertValueInst>(CurrIVI->getAggregateOperand()),
925 ++Depth) {
926 auto *InsertedValue =
927 dyn_cast<Instruction>(CurrIVI->getInsertedValueOperand());
928 if (!InsertedValue)
929 return nullptr; // Inserted value must be produced by an instruction.
930
931 ArrayRef<unsigned int> Indices = CurrIVI->getIndices();
932
933 // Don't bother with more than single-level aggregates.
934 if (Indices.size() != 1)
935 return nullptr; // FIXME: deal with more complex aggregates?
936
937 // Now, we may have already previously recorded the value for this element
938 // of an aggregate. If we did, that means the CurrIVI will later be
939 // overwritten with the already-recorded value. But if not, let's record it!
940 std::optional<Instruction *> &Elt = AggElts[Indices.front()];
941 Elt = Elt.value_or(InsertedValue);
942
943 // FIXME: should we handle chain-terminating undef base operand?
944 }
945
946 // Was that sufficient to deduce the full initializer for the aggregate?
947 if (!KnowAllElts())
948 return nullptr; // Give up then.
949
950 // We now want to find the source[s] of the aggregate elements we've found.
951 // And with "source" we mean the original aggregate[s] from which
952 // the inserted elements were extracted. This may require PHI translation.
953
954 enum class AggregateDescription {
955 /// When analyzing the value that was inserted into an aggregate, we did
956 /// not manage to find defining `extractvalue` instruction to analyze.
957 NotFound,
958 /// When analyzing the value that was inserted into an aggregate, we did
959 /// manage to find defining `extractvalue` instruction[s], and everything
960 /// matched perfectly - aggregate type, element insertion/extraction index.
961 Found,
962 /// When analyzing the value that was inserted into an aggregate, we did
963 /// manage to find defining `extractvalue` instruction, but there was
964 /// a mismatch: either the source type from which the extraction was didn't
965 /// match the aggregate type into which the insertion was,
966 /// or the extraction/insertion channels mismatched,
967 /// or different elements had different source aggregates.
968 FoundMismatch
969 };
970 auto Describe = [](std::optional<Value *> SourceAggregate) {
971 if (SourceAggregate == NotFound)
972 return AggregateDescription::NotFound;
973 if (*SourceAggregate == FoundMismatch)
974 return AggregateDescription::FoundMismatch;
975 return AggregateDescription::Found;
976 };
977
978 // If an aggregate element is defined in UseBB, we can't use it in PredBB.
979 bool EltDefinedInUseBB = false;
980
981 // Given the value \p Elt that was being inserted into element \p EltIdx of an
982 // aggregate AggTy, see if \p Elt was originally defined by an
983 // appropriate extractvalue (same element index, same aggregate type).
984 // If found, return the source aggregate from which the extraction was.
985 // If \p PredBB is provided, does PHI translation of an \p Elt first.
986 auto FindSourceAggregate =
987 [&](Instruction *Elt, unsigned EltIdx, std::optional<BasicBlock *> UseBB,
988 std::optional<BasicBlock *> PredBB) -> std::optional<Value *> {
989 // For now(?), only deal with, at most, a single level of PHI indirection.
990 if (UseBB && PredBB) {
991 Elt = dyn_cast<Instruction>(Elt->DoPHITranslation(*UseBB, *PredBB));
992 if (Elt && Elt->getParent() == *UseBB)
993 EltDefinedInUseBB = true;
994 }
995 // FIXME: deal with multiple levels of PHI indirection?
996
997 // Did we find an extraction?
998 auto *EVI = dyn_cast_or_null<ExtractValueInst>(Elt);
999 if (!EVI)
1000 return NotFound;
1001
1002 Value *SourceAggregate = EVI->getAggregateOperand();
1003
1004 // Is the extraction from the same type into which the insertion was?
1005 if (SourceAggregate->getType() != AggTy)
1006 return FoundMismatch;
1007 // And the element index doesn't change between extraction and insertion?
1008 if (EVI->getNumIndices() != 1 || EltIdx != EVI->getIndices().front())
1009 return FoundMismatch;
1010
1011 return SourceAggregate; // AggregateDescription::Found
1012 };
1013
1014 // Given elements AggElts that were constructing an aggregate OrigIVI,
1015 // see if we can find appropriate source aggregate for each of the elements,
1016 // and see it's the same aggregate for each element. If so, return it.
1017 auto FindCommonSourceAggregate =
1018 [&](std::optional<BasicBlock *> UseBB,
1019 std::optional<BasicBlock *> PredBB) -> std::optional<Value *> {
1020 std::optional<Value *> SourceAggregate;
1021
1022 for (auto I : enumerate(AggElts)) {
1023 assert(Describe(SourceAggregate) != AggregateDescription::FoundMismatch &&
1024 "We don't store nullptr in SourceAggregate!");
1025 assert((Describe(SourceAggregate) == AggregateDescription::Found) ==
1026 (I.index() != 0) &&
1027 "SourceAggregate should be valid after the first element,");
1028
1029 // For this element, is there a plausible source aggregate?
1030 // FIXME: we could special-case undef element, IFF we know that in the
1031 // source aggregate said element isn't poison.
1032 std::optional<Value *> SourceAggregateForElement =
1033 FindSourceAggregate(*I.value(), I.index(), UseBB, PredBB);
1034
1035 // Okay, what have we found? Does that correlate with previous findings?
1036
1037 // Regardless of whether or not we have previously found source
1038 // aggregate for previous elements (if any), if we didn't find one for
1039 // this element, passthrough whatever we have just found.
1040 if (Describe(SourceAggregateForElement) != AggregateDescription::Found)
1041 return SourceAggregateForElement;
1042
1043 // Okay, we have found source aggregate for this element.
1044 // Let's see what we already know from previous elements, if any.
1045 switch (Describe(SourceAggregate)) {
1046 case AggregateDescription::NotFound:
1047 // This is apparently the first element that we have examined.
1048 SourceAggregate = SourceAggregateForElement; // Record the aggregate!
1049 continue; // Great, now look at next element.
1050 case AggregateDescription::Found:
1051 // We have previously already successfully examined other elements.
1052 // Is this the same source aggregate we've found for other elements?
1053 if (*SourceAggregateForElement != *SourceAggregate)
1054 return FoundMismatch;
1055 continue; // Still the same aggregate, look at next element.
1056 case AggregateDescription::FoundMismatch:
1057 llvm_unreachable("Can't happen. We would have early-exited then.");
1058 };
1059 }
1060
1061 assert(Describe(SourceAggregate) == AggregateDescription::Found &&
1062 "Must be a valid Value");
1063 return *SourceAggregate;
1064 };
1065
1066 std::optional<Value *> SourceAggregate;
1067
1068 // Can we find the source aggregate without looking at predecessors?
1069 SourceAggregate = FindCommonSourceAggregate(/*UseBB=*/std::nullopt,
1070 /*PredBB=*/std::nullopt);
1071 if (Describe(SourceAggregate) != AggregateDescription::NotFound) {
1072 if (Describe(SourceAggregate) == AggregateDescription::FoundMismatch)
1073 return nullptr; // Conflicting source aggregates!
1074 ++NumAggregateReconstructionsSimplified;
1075 return replaceInstUsesWith(OrigIVI, *SourceAggregate);
1076 }
1077
1078 // Okay, apparently we need to look at predecessors.
1079
1080 // We should be smart about picking the "use" basic block, which will be the
1081 // merge point for aggregate, where we'll insert the final PHI that will be
1082 // used instead of OrigIVI. Basic block of OrigIVI is *not* the right choice.
1083 // We should look in which blocks each of the AggElts is being defined,
1084 // they all should be defined in the same basic block.
1085 BasicBlock *UseBB = nullptr;
1086
1087 for (const std::optional<Instruction *> &I : AggElts) {
1088 BasicBlock *BB = (*I)->getParent();
1089 // If it's the first instruction we've encountered, record the basic block.
1090 if (!UseBB) {
1091 UseBB = BB;
1092 continue;
1093 }
1094 // Otherwise, this must be the same basic block we've seen previously.
1095 if (UseBB != BB)
1096 return nullptr;
1097 }
1098
1099 // If *all* of the elements are basic-block-independent, meaning they are
1100 // either function arguments, or constant expressions, then if we didn't
1101 // handle them without predecessor-aware handling, we won't handle them now.
1102 if (!UseBB)
1103 return nullptr;
1104
1105 // If we didn't manage to find source aggregate without looking at
1106 // predecessors, and there are no predecessors to look at, then we're done.
1107 if (pred_empty(UseBB))
1108 return nullptr;
1109
1110 // Arbitrary predecessor count limit.
1111 static const int PredCountLimit = 64;
1112
1113 // Cache the (non-uniqified!) list of predecessors in a vector,
1114 // checking the limit at the same time for efficiency.
1115 SmallVector<BasicBlock *, 4> Preds; // May have duplicates!
1116 for (BasicBlock *Pred : predecessors(UseBB)) {
1117 // Don't bother if there are too many predecessors.
1118 if (Preds.size() >= PredCountLimit) // FIXME: only count duplicates once?
1119 return nullptr;
1120 Preds.emplace_back(Pred);
1121 }
1122
1123 // For each predecessor, what is the source aggregate,
1124 // from which all the elements were originally extracted from?
1125 // Note that we want for the map to have stable iteration order!
1126 SmallMapVector<BasicBlock *, Value *, 4> SourceAggregates;
1127 bool FoundSrcAgg = false;
1128 for (BasicBlock *Pred : Preds) {
1129 std::pair<decltype(SourceAggregates)::iterator, bool> IV =
1130 SourceAggregates.try_emplace(Pred);
1131 // Did we already evaluate this predecessor?
1132 if (!IV.second)
1133 continue;
1134
1135 // Let's hope that when coming from predecessor Pred, all elements of the
1136 // aggregate produced by OrigIVI must have been originally extracted from
1137 // the same aggregate. Is that so? Can we find said original aggregate?
1138 SourceAggregate = FindCommonSourceAggregate(UseBB, Pred);
1139 if (Describe(SourceAggregate) == AggregateDescription::Found) {
1140 FoundSrcAgg = true;
1141 IV.first->second = *SourceAggregate;
1142 } else {
1143 // If UseBB is the single successor of Pred, we can add InsertValue to
1144 // Pred.
1145 auto *BI = dyn_cast<BranchInst>(Pred->getTerminator());
1146 if (!BI || !BI->isUnconditional())
1147 return nullptr;
1148 }
1149 }
1150
1151 if (!FoundSrcAgg)
1152 return nullptr;
1153
1154 // Do some sanity check if we need to add insertvalue into predecessors.
1155 auto OrigBB = OrigIVI.getParent();
1156 for (auto &It : SourceAggregates) {
1157 if (Describe(It.second) == AggregateDescription::Found)
1158 continue;
1159
1160 // Element is defined in UseBB, so it can't be used in predecessors.
1161 if (EltDefinedInUseBB)
1162 return nullptr;
1163
1164 // Do this transformation cross loop boundary may cause dead loop. So we
1165 // should avoid this situation. But LoopInfo is not generally available, we
1166 // must be conservative here.
1167 // If OrigIVI is in UseBB and it's the only successor of PredBB, PredBB
1168 // can't be in inner loop.
1169 if (UseBB != OrigBB)
1170 return nullptr;
1171
1172 // Avoid constructing constant aggregate because constant value may expose
1173 // more optimizations.
1174 bool ConstAgg = true;
1175 for (auto Val : AggElts) {
1176 Value *Elt = (*Val)->DoPHITranslation(UseBB, It.first);
1177 if (!isa<Constant>(Elt)) {
1178 ConstAgg = false;
1179 break;
1180 }
1181 }
1182 if (ConstAgg)
1183 return nullptr;
1184 }
1185
1186 // For predecessors without appropriate source aggregate, create one in the
1187 // predecessor.
1188 for (auto &It : SourceAggregates) {
1189 if (Describe(It.second) == AggregateDescription::Found)
1190 continue;
1191
1192 BasicBlock *Pred = It.first;
1193 Builder.SetInsertPoint(Pred->getTerminator());
1194 Value *V = PoisonValue::get(AggTy);
1195 for (auto [Idx, Val] : enumerate(AggElts)) {
1196 Value *Elt = (*Val)->DoPHITranslation(UseBB, Pred);
1197 V = Builder.CreateInsertValue(V, Elt, Idx);
1198 }
1199
1200 It.second = V;
1201 }
1202
1203 // All good! Now we just need to thread the source aggregates here.
1204 // Note that we have to insert the new PHI here, ourselves, because we can't
1205 // rely on InstCombinerImpl::run() inserting it into the right basic block.
1206 // Note that the same block can be a predecessor more than once,
1207 // and we need to preserve that invariant for the PHI node.
1208 BuilderTy::InsertPointGuard Guard(Builder);
1209 Builder.SetInsertPoint(UseBB, UseBB->getFirstNonPHIIt());
1210 auto *PHI =
1211 Builder.CreatePHI(AggTy, Preds.size(), OrigIVI.getName() + ".merged");
1212 for (BasicBlock *Pred : Preds)
1213 PHI->addIncoming(SourceAggregates[Pred], Pred);
1214
1215 ++NumAggregateReconstructionsSimplified;
1216 return replaceInstUsesWith(OrigIVI, PHI);
1217 }
1218
1219 /// Try to find redundant insertvalue instructions, like the following ones:
1220 /// %0 = insertvalue { i8, i32 } undef, i8 %x, 0
1221 /// %1 = insertvalue { i8, i32 } %0, i8 %y, 0
1222 /// Here the second instruction inserts values at the same indices, as the
1223 /// first one, making the first one redundant.
1224 /// It should be transformed to:
1225 /// %0 = insertvalue { i8, i32 } undef, i8 %y, 0
visitInsertValueInst(InsertValueInst & I)1226 Instruction *InstCombinerImpl::visitInsertValueInst(InsertValueInst &I) {
1227 if (Value *V = simplifyInsertValueInst(
1228 I.getAggregateOperand(), I.getInsertedValueOperand(), I.getIndices(),
1229 SQ.getWithInstruction(&I)))
1230 return replaceInstUsesWith(I, V);
1231
1232 bool IsRedundant = false;
1233 ArrayRef<unsigned int> FirstIndices = I.getIndices();
1234
1235 // If there is a chain of insertvalue instructions (each of them except the
1236 // last one has only one use and it's another insertvalue insn from this
1237 // chain), check if any of the 'children' uses the same indices as the first
1238 // instruction. In this case, the first one is redundant.
1239 Value *V = &I;
1240 unsigned Depth = 0;
1241 while (V->hasOneUse() && Depth < 10) {
1242 User *U = V->user_back();
1243 auto UserInsInst = dyn_cast<InsertValueInst>(U);
1244 if (!UserInsInst || U->getOperand(0) != V)
1245 break;
1246 if (UserInsInst->getIndices() == FirstIndices) {
1247 IsRedundant = true;
1248 break;
1249 }
1250 V = UserInsInst;
1251 Depth++;
1252 }
1253
1254 if (IsRedundant)
1255 return replaceInstUsesWith(I, I.getOperand(0));
1256
1257 if (Instruction *NewI = foldAggregateConstructionIntoAggregateReuse(I))
1258 return NewI;
1259
1260 return nullptr;
1261 }
1262
isShuffleEquivalentToSelect(ShuffleVectorInst & Shuf)1263 static bool isShuffleEquivalentToSelect(ShuffleVectorInst &Shuf) {
1264 // Can not analyze scalable type, the number of elements is not a compile-time
1265 // constant.
1266 if (isa<ScalableVectorType>(Shuf.getOperand(0)->getType()))
1267 return false;
1268
1269 int MaskSize = Shuf.getShuffleMask().size();
1270 int VecSize =
1271 cast<FixedVectorType>(Shuf.getOperand(0)->getType())->getNumElements();
1272
1273 // A vector select does not change the size of the operands.
1274 if (MaskSize != VecSize)
1275 return false;
1276
1277 // Each mask element must be undefined or choose a vector element from one of
1278 // the source operands without crossing vector lanes.
1279 for (int i = 0; i != MaskSize; ++i) {
1280 int Elt = Shuf.getMaskValue(i);
1281 if (Elt != -1 && Elt != i && Elt != i + VecSize)
1282 return false;
1283 }
1284
1285 return true;
1286 }
1287
1288 /// Turn a chain of inserts that splats a value into an insert + shuffle:
1289 /// insertelt(insertelt(insertelt(insertelt X, %k, 0), %k, 1), %k, 2) ... ->
1290 /// shufflevector(insertelt(X, %k, 0), poison, zero)
foldInsSequenceIntoSplat(InsertElementInst & InsElt)1291 static Instruction *foldInsSequenceIntoSplat(InsertElementInst &InsElt) {
1292 // We are interested in the last insert in a chain. So if this insert has a
1293 // single user and that user is an insert, bail.
1294 if (InsElt.hasOneUse() && isa<InsertElementInst>(InsElt.user_back()))
1295 return nullptr;
1296
1297 VectorType *VecTy = InsElt.getType();
1298 // Can not handle scalable type, the number of elements is not a compile-time
1299 // constant.
1300 if (isa<ScalableVectorType>(VecTy))
1301 return nullptr;
1302 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1303
1304 // Do not try to do this for a one-element vector, since that's a nop,
1305 // and will cause an inf-loop.
1306 if (NumElements == 1)
1307 return nullptr;
1308
1309 Value *SplatVal = InsElt.getOperand(1);
1310 InsertElementInst *CurrIE = &InsElt;
1311 SmallBitVector ElementPresent(NumElements, false);
1312 InsertElementInst *FirstIE = nullptr;
1313
1314 // Walk the chain backwards, keeping track of which indices we inserted into,
1315 // until we hit something that isn't an insert of the splatted value.
1316 while (CurrIE) {
1317 auto *Idx = dyn_cast<ConstantInt>(CurrIE->getOperand(2));
1318 if (!Idx || CurrIE->getOperand(1) != SplatVal)
1319 return nullptr;
1320
1321 auto *NextIE = dyn_cast<InsertElementInst>(CurrIE->getOperand(0));
1322 // Check none of the intermediate steps have any additional uses, except
1323 // for the root insertelement instruction, which can be re-used, if it
1324 // inserts at position 0.
1325 if (CurrIE != &InsElt &&
1326 (!CurrIE->hasOneUse() && (NextIE != nullptr || !Idx->isZero())))
1327 return nullptr;
1328
1329 ElementPresent[Idx->getZExtValue()] = true;
1330 FirstIE = CurrIE;
1331 CurrIE = NextIE;
1332 }
1333
1334 // If this is just a single insertelement (not a sequence), we are done.
1335 if (FirstIE == &InsElt)
1336 return nullptr;
1337
1338 // If we are not inserting into a poison vector, make sure we've seen an
1339 // insert into every element.
1340 // TODO: If the base vector is not undef, it might be better to create a splat
1341 // and then a select-shuffle (blend) with the base vector.
1342 if (!match(FirstIE->getOperand(0), m_Poison()))
1343 if (!ElementPresent.all())
1344 return nullptr;
1345
1346 // Create the insert + shuffle.
1347 Type *Int64Ty = Type::getInt64Ty(InsElt.getContext());
1348 PoisonValue *PoisonVec = PoisonValue::get(VecTy);
1349 Constant *Zero = ConstantInt::get(Int64Ty, 0);
1350 if (!cast<ConstantInt>(FirstIE->getOperand(2))->isZero())
1351 FirstIE = InsertElementInst::Create(PoisonVec, SplatVal, Zero, "",
1352 InsElt.getIterator());
1353
1354 // Splat from element 0, but replace absent elements with poison in the mask.
1355 SmallVector<int, 16> Mask(NumElements, 0);
1356 for (unsigned i = 0; i != NumElements; ++i)
1357 if (!ElementPresent[i])
1358 Mask[i] = -1;
1359
1360 return new ShuffleVectorInst(FirstIE, Mask);
1361 }
1362
1363 /// Try to fold an insert element into an existing splat shuffle by changing
1364 /// the shuffle's mask to include the index of this insert element.
foldInsEltIntoSplat(InsertElementInst & InsElt)1365 static Instruction *foldInsEltIntoSplat(InsertElementInst &InsElt) {
1366 // Check if the vector operand of this insert is a canonical splat shuffle.
1367 auto *Shuf = dyn_cast<ShuffleVectorInst>(InsElt.getOperand(0));
1368 if (!Shuf || !Shuf->isZeroEltSplat())
1369 return nullptr;
1370
1371 // Bail out early if shuffle is scalable type. The number of elements in
1372 // shuffle mask is unknown at compile-time.
1373 if (isa<ScalableVectorType>(Shuf->getType()))
1374 return nullptr;
1375
1376 // Check for a constant insertion index.
1377 uint64_t IdxC;
1378 if (!match(InsElt.getOperand(2), m_ConstantInt(IdxC)))
1379 return nullptr;
1380
1381 // Check if the splat shuffle's input is the same as this insert's scalar op.
1382 Value *X = InsElt.getOperand(1);
1383 Value *Op0 = Shuf->getOperand(0);
1384 if (!match(Op0, m_InsertElt(m_Undef(), m_Specific(X), m_ZeroInt())))
1385 return nullptr;
1386
1387 // Replace the shuffle mask element at the index of this insert with a zero.
1388 // For example:
1389 // inselt (shuf (inselt undef, X, 0), _, <0,undef,0,undef>), X, 1
1390 // --> shuf (inselt undef, X, 0), poison, <0,0,0,undef>
1391 unsigned NumMaskElts =
1392 cast<FixedVectorType>(Shuf->getType())->getNumElements();
1393 SmallVector<int, 16> NewMask(NumMaskElts);
1394 for (unsigned i = 0; i != NumMaskElts; ++i)
1395 NewMask[i] = i == IdxC ? 0 : Shuf->getMaskValue(i);
1396
1397 return new ShuffleVectorInst(Op0, NewMask);
1398 }
1399
1400 /// Try to fold an extract+insert element into an existing identity shuffle by
1401 /// changing the shuffle's mask to include the index of this insert element.
foldInsEltIntoIdentityShuffle(InsertElementInst & InsElt)1402 static Instruction *foldInsEltIntoIdentityShuffle(InsertElementInst &InsElt) {
1403 // Check if the vector operand of this insert is an identity shuffle.
1404 auto *Shuf = dyn_cast<ShuffleVectorInst>(InsElt.getOperand(0));
1405 if (!Shuf || !match(Shuf->getOperand(1), m_Poison()) ||
1406 !(Shuf->isIdentityWithExtract() || Shuf->isIdentityWithPadding()))
1407 return nullptr;
1408
1409 // Bail out early if shuffle is scalable type. The number of elements in
1410 // shuffle mask is unknown at compile-time.
1411 if (isa<ScalableVectorType>(Shuf->getType()))
1412 return nullptr;
1413
1414 // Check for a constant insertion index.
1415 uint64_t IdxC;
1416 if (!match(InsElt.getOperand(2), m_ConstantInt(IdxC)))
1417 return nullptr;
1418
1419 // Check if this insert's scalar op is extracted from the identity shuffle's
1420 // input vector.
1421 Value *Scalar = InsElt.getOperand(1);
1422 Value *X = Shuf->getOperand(0);
1423 if (!match(Scalar, m_ExtractElt(m_Specific(X), m_SpecificInt(IdxC))))
1424 return nullptr;
1425
1426 // Replace the shuffle mask element at the index of this extract+insert with
1427 // that same index value.
1428 // For example:
1429 // inselt (shuf X, IdMask), (extelt X, IdxC), IdxC --> shuf X, IdMask'
1430 unsigned NumMaskElts =
1431 cast<FixedVectorType>(Shuf->getType())->getNumElements();
1432 SmallVector<int, 16> NewMask(NumMaskElts);
1433 ArrayRef<int> OldMask = Shuf->getShuffleMask();
1434 for (unsigned i = 0; i != NumMaskElts; ++i) {
1435 if (i != IdxC) {
1436 // All mask elements besides the inserted element remain the same.
1437 NewMask[i] = OldMask[i];
1438 } else if (OldMask[i] == (int)IdxC) {
1439 // If the mask element was already set, there's nothing to do
1440 // (demanded elements analysis may unset it later).
1441 return nullptr;
1442 } else {
1443 assert(OldMask[i] == PoisonMaskElem &&
1444 "Unexpected shuffle mask element for identity shuffle");
1445 NewMask[i] = IdxC;
1446 }
1447 }
1448
1449 return new ShuffleVectorInst(X, Shuf->getOperand(1), NewMask);
1450 }
1451
1452 /// If we have an insertelement instruction feeding into another insertelement
1453 /// and the 2nd is inserting a constant into the vector, canonicalize that
1454 /// constant insertion before the insertion of a variable:
1455 ///
1456 /// insertelement (insertelement X, Y, IdxC1), ScalarC, IdxC2 -->
1457 /// insertelement (insertelement X, ScalarC, IdxC2), Y, IdxC1
1458 ///
1459 /// This has the potential of eliminating the 2nd insertelement instruction
1460 /// via constant folding of the scalar constant into a vector constant.
hoistInsEltConst(InsertElementInst & InsElt2,InstCombiner::BuilderTy & Builder)1461 static Instruction *hoistInsEltConst(InsertElementInst &InsElt2,
1462 InstCombiner::BuilderTy &Builder) {
1463 auto *InsElt1 = dyn_cast<InsertElementInst>(InsElt2.getOperand(0));
1464 if (!InsElt1 || !InsElt1->hasOneUse())
1465 return nullptr;
1466
1467 Value *X, *Y;
1468 Constant *ScalarC;
1469 ConstantInt *IdxC1, *IdxC2;
1470 if (match(InsElt1->getOperand(0), m_Value(X)) &&
1471 match(InsElt1->getOperand(1), m_Value(Y)) && !isa<Constant>(Y) &&
1472 match(InsElt1->getOperand(2), m_ConstantInt(IdxC1)) &&
1473 match(InsElt2.getOperand(1), m_Constant(ScalarC)) &&
1474 match(InsElt2.getOperand(2), m_ConstantInt(IdxC2)) && IdxC1 != IdxC2) {
1475 Value *NewInsElt1 = Builder.CreateInsertElement(X, ScalarC, IdxC2);
1476 return InsertElementInst::Create(NewInsElt1, Y, IdxC1);
1477 }
1478
1479 return nullptr;
1480 }
1481
1482 /// insertelt (shufflevector X, CVec, Mask|insertelt X, C1, CIndex1), C, CIndex
1483 /// --> shufflevector X, CVec', Mask'
foldConstantInsEltIntoShuffle(InsertElementInst & InsElt)1484 static Instruction *foldConstantInsEltIntoShuffle(InsertElementInst &InsElt) {
1485 auto *Inst = dyn_cast<Instruction>(InsElt.getOperand(0));
1486 // Bail out if the parent has more than one use. In that case, we'd be
1487 // replacing the insertelt with a shuffle, and that's not a clear win.
1488 if (!Inst || !Inst->hasOneUse())
1489 return nullptr;
1490 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(InsElt.getOperand(0))) {
1491 // The shuffle must have a constant vector operand. The insertelt must have
1492 // a constant scalar being inserted at a constant position in the vector.
1493 Constant *ShufConstVec, *InsEltScalar;
1494 uint64_t InsEltIndex;
1495 if (!match(Shuf->getOperand(1), m_Constant(ShufConstVec)) ||
1496 !match(InsElt.getOperand(1), m_Constant(InsEltScalar)) ||
1497 !match(InsElt.getOperand(2), m_ConstantInt(InsEltIndex)))
1498 return nullptr;
1499
1500 // Adding an element to an arbitrary shuffle could be expensive, but a
1501 // shuffle that selects elements from vectors without crossing lanes is
1502 // assumed cheap.
1503 // If we're just adding a constant into that shuffle, it will still be
1504 // cheap.
1505 if (!isShuffleEquivalentToSelect(*Shuf))
1506 return nullptr;
1507
1508 // From the above 'select' check, we know that the mask has the same number
1509 // of elements as the vector input operands. We also know that each constant
1510 // input element is used in its lane and can not be used more than once by
1511 // the shuffle. Therefore, replace the constant in the shuffle's constant
1512 // vector with the insertelt constant. Replace the constant in the shuffle's
1513 // mask vector with the insertelt index plus the length of the vector
1514 // (because the constant vector operand of a shuffle is always the 2nd
1515 // operand).
1516 ArrayRef<int> Mask = Shuf->getShuffleMask();
1517 unsigned NumElts = Mask.size();
1518 SmallVector<Constant *, 16> NewShufElts(NumElts);
1519 SmallVector<int, 16> NewMaskElts(NumElts);
1520 for (unsigned I = 0; I != NumElts; ++I) {
1521 if (I == InsEltIndex) {
1522 NewShufElts[I] = InsEltScalar;
1523 NewMaskElts[I] = InsEltIndex + NumElts;
1524 } else {
1525 // Copy over the existing values.
1526 NewShufElts[I] = ShufConstVec->getAggregateElement(I);
1527 NewMaskElts[I] = Mask[I];
1528 }
1529
1530 // Bail if we failed to find an element.
1531 if (!NewShufElts[I])
1532 return nullptr;
1533 }
1534
1535 // Create new operands for a shuffle that includes the constant of the
1536 // original insertelt. The old shuffle will be dead now.
1537 return new ShuffleVectorInst(Shuf->getOperand(0),
1538 ConstantVector::get(NewShufElts), NewMaskElts);
1539 } else if (auto *IEI = dyn_cast<InsertElementInst>(Inst)) {
1540 // Transform sequences of insertelements ops with constant data/indexes into
1541 // a single shuffle op.
1542 // Can not handle scalable type, the number of elements needed to create
1543 // shuffle mask is not a compile-time constant.
1544 if (isa<ScalableVectorType>(InsElt.getType()))
1545 return nullptr;
1546 unsigned NumElts =
1547 cast<FixedVectorType>(InsElt.getType())->getNumElements();
1548
1549 uint64_t InsertIdx[2];
1550 Constant *Val[2];
1551 if (!match(InsElt.getOperand(2), m_ConstantInt(InsertIdx[0])) ||
1552 !match(InsElt.getOperand(1), m_Constant(Val[0])) ||
1553 !match(IEI->getOperand(2), m_ConstantInt(InsertIdx[1])) ||
1554 !match(IEI->getOperand(1), m_Constant(Val[1])))
1555 return nullptr;
1556 SmallVector<Constant *, 16> Values(NumElts);
1557 SmallVector<int, 16> Mask(NumElts);
1558 auto ValI = std::begin(Val);
1559 // Generate new constant vector and mask.
1560 // We have 2 values/masks from the insertelements instructions. Insert them
1561 // into new value/mask vectors.
1562 for (uint64_t I : InsertIdx) {
1563 if (!Values[I]) {
1564 Values[I] = *ValI;
1565 Mask[I] = NumElts + I;
1566 }
1567 ++ValI;
1568 }
1569 // Remaining values are filled with 'poison' values.
1570 for (unsigned I = 0; I < NumElts; ++I) {
1571 if (!Values[I]) {
1572 Values[I] = PoisonValue::get(InsElt.getType()->getElementType());
1573 Mask[I] = I;
1574 }
1575 }
1576 // Create new operands for a shuffle that includes the constant of the
1577 // original insertelt.
1578 return new ShuffleVectorInst(IEI->getOperand(0),
1579 ConstantVector::get(Values), Mask);
1580 }
1581 return nullptr;
1582 }
1583
1584 /// If both the base vector and the inserted element are extended from the same
1585 /// type, do the insert element in the narrow source type followed by extend.
1586 /// TODO: This can be extended to include other cast opcodes, but particularly
1587 /// if we create a wider insertelement, make sure codegen is not harmed.
narrowInsElt(InsertElementInst & InsElt,InstCombiner::BuilderTy & Builder)1588 static Instruction *narrowInsElt(InsertElementInst &InsElt,
1589 InstCombiner::BuilderTy &Builder) {
1590 // We are creating a vector extend. If the original vector extend has another
1591 // use, that would mean we end up with 2 vector extends, so avoid that.
1592 // TODO: We could ease the use-clause to "if at least one op has one use"
1593 // (assuming that the source types match - see next TODO comment).
1594 Value *Vec = InsElt.getOperand(0);
1595 if (!Vec->hasOneUse())
1596 return nullptr;
1597
1598 Value *Scalar = InsElt.getOperand(1);
1599 Value *X, *Y;
1600 CastInst::CastOps CastOpcode;
1601 if (match(Vec, m_FPExt(m_Value(X))) && match(Scalar, m_FPExt(m_Value(Y))))
1602 CastOpcode = Instruction::FPExt;
1603 else if (match(Vec, m_SExt(m_Value(X))) && match(Scalar, m_SExt(m_Value(Y))))
1604 CastOpcode = Instruction::SExt;
1605 else if (match(Vec, m_ZExt(m_Value(X))) && match(Scalar, m_ZExt(m_Value(Y))))
1606 CastOpcode = Instruction::ZExt;
1607 else
1608 return nullptr;
1609
1610 // TODO: We can allow mismatched types by creating an intermediate cast.
1611 if (X->getType()->getScalarType() != Y->getType())
1612 return nullptr;
1613
1614 // inselt (ext X), (ext Y), Index --> ext (inselt X, Y, Index)
1615 Value *NewInsElt = Builder.CreateInsertElement(X, Y, InsElt.getOperand(2));
1616 return CastInst::Create(CastOpcode, NewInsElt, InsElt.getType());
1617 }
1618
1619 /// If we are inserting 2 halves of a value into adjacent elements of a vector,
1620 /// try to convert to a single insert with appropriate bitcasts.
foldTruncInsEltPair(InsertElementInst & InsElt,bool IsBigEndian,InstCombiner::BuilderTy & Builder)1621 static Instruction *foldTruncInsEltPair(InsertElementInst &InsElt,
1622 bool IsBigEndian,
1623 InstCombiner::BuilderTy &Builder) {
1624 Value *VecOp = InsElt.getOperand(0);
1625 Value *ScalarOp = InsElt.getOperand(1);
1626 Value *IndexOp = InsElt.getOperand(2);
1627
1628 // Pattern depends on endian because we expect lower index is inserted first.
1629 // Big endian:
1630 // inselt (inselt BaseVec, (trunc (lshr X, BW/2), Index0), (trunc X), Index1
1631 // Little endian:
1632 // inselt (inselt BaseVec, (trunc X), Index0), (trunc (lshr X, BW/2)), Index1
1633 // Note: It is not safe to do this transform with an arbitrary base vector
1634 // because the bitcast of that vector to fewer/larger elements could
1635 // allow poison to spill into an element that was not poison before.
1636 // TODO: Detect smaller fractions of the scalar.
1637 // TODO: One-use checks are conservative.
1638 auto *VTy = dyn_cast<FixedVectorType>(InsElt.getType());
1639 Value *Scalar0, *BaseVec;
1640 uint64_t Index0, Index1;
1641 if (!VTy || (VTy->getNumElements() & 1) ||
1642 !match(IndexOp, m_ConstantInt(Index1)) ||
1643 !match(VecOp, m_InsertElt(m_Value(BaseVec), m_Value(Scalar0),
1644 m_ConstantInt(Index0))) ||
1645 !match(BaseVec, m_Undef()))
1646 return nullptr;
1647
1648 // The first insert must be to the index one less than this one, and
1649 // the first insert must be to an even index.
1650 if (Index0 + 1 != Index1 || Index0 & 1)
1651 return nullptr;
1652
1653 // For big endian, the high half of the value should be inserted first.
1654 // For little endian, the low half of the value should be inserted first.
1655 Value *X;
1656 uint64_t ShAmt;
1657 if (IsBigEndian) {
1658 if (!match(ScalarOp, m_Trunc(m_Value(X))) ||
1659 !match(Scalar0, m_Trunc(m_LShr(m_Specific(X), m_ConstantInt(ShAmt)))))
1660 return nullptr;
1661 } else {
1662 if (!match(Scalar0, m_Trunc(m_Value(X))) ||
1663 !match(ScalarOp, m_Trunc(m_LShr(m_Specific(X), m_ConstantInt(ShAmt)))))
1664 return nullptr;
1665 }
1666
1667 Type *SrcTy = X->getType();
1668 unsigned ScalarWidth = SrcTy->getScalarSizeInBits();
1669 unsigned VecEltWidth = VTy->getScalarSizeInBits();
1670 if (ScalarWidth != VecEltWidth * 2 || ShAmt != VecEltWidth)
1671 return nullptr;
1672
1673 // Bitcast the base vector to a vector type with the source element type.
1674 Type *CastTy = FixedVectorType::get(SrcTy, VTy->getNumElements() / 2);
1675 Value *CastBaseVec = Builder.CreateBitCast(BaseVec, CastTy);
1676
1677 // Scale the insert index for a vector with half as many elements.
1678 // bitcast (inselt (bitcast BaseVec), X, NewIndex)
1679 uint64_t NewIndex = IsBigEndian ? Index1 / 2 : Index0 / 2;
1680 Value *NewInsert = Builder.CreateInsertElement(CastBaseVec, X, NewIndex);
1681 return new BitCastInst(NewInsert, VTy);
1682 }
1683
visitInsertElementInst(InsertElementInst & IE)1684 Instruction *InstCombinerImpl::visitInsertElementInst(InsertElementInst &IE) {
1685 Value *VecOp = IE.getOperand(0);
1686 Value *ScalarOp = IE.getOperand(1);
1687 Value *IdxOp = IE.getOperand(2);
1688
1689 if (auto *V = simplifyInsertElementInst(
1690 VecOp, ScalarOp, IdxOp, SQ.getWithInstruction(&IE)))
1691 return replaceInstUsesWith(IE, V);
1692
1693 // Canonicalize type of constant indices to i64 to simplify CSE
1694 if (auto *IndexC = dyn_cast<ConstantInt>(IdxOp)) {
1695 if (auto *NewIdx = getPreferredVectorIndex(IndexC))
1696 return replaceOperand(IE, 2, NewIdx);
1697
1698 Value *BaseVec, *OtherScalar;
1699 uint64_t OtherIndexVal;
1700 if (match(VecOp, m_OneUse(m_InsertElt(m_Value(BaseVec),
1701 m_Value(OtherScalar),
1702 m_ConstantInt(OtherIndexVal)))) &&
1703 !isa<Constant>(OtherScalar) && OtherIndexVal > IndexC->getZExtValue()) {
1704 Value *NewIns = Builder.CreateInsertElement(BaseVec, ScalarOp, IdxOp);
1705 return InsertElementInst::Create(NewIns, OtherScalar,
1706 Builder.getInt64(OtherIndexVal));
1707 }
1708 }
1709
1710 // If the scalar is bitcast and inserted into undef, do the insert in the
1711 // source type followed by bitcast.
1712 // TODO: Generalize for insert into any constant, not just undef?
1713 Value *ScalarSrc;
1714 if (match(VecOp, m_Undef()) &&
1715 match(ScalarOp, m_OneUse(m_BitCast(m_Value(ScalarSrc)))) &&
1716 (ScalarSrc->getType()->isIntegerTy() ||
1717 ScalarSrc->getType()->isFloatingPointTy())) {
1718 // inselt undef, (bitcast ScalarSrc), IdxOp -->
1719 // bitcast (inselt undef, ScalarSrc, IdxOp)
1720 Type *ScalarTy = ScalarSrc->getType();
1721 Type *VecTy = VectorType::get(ScalarTy, IE.getType()->getElementCount());
1722 Constant *NewUndef = isa<PoisonValue>(VecOp) ? PoisonValue::get(VecTy)
1723 : UndefValue::get(VecTy);
1724 Value *NewInsElt = Builder.CreateInsertElement(NewUndef, ScalarSrc, IdxOp);
1725 return new BitCastInst(NewInsElt, IE.getType());
1726 }
1727
1728 // If the vector and scalar are both bitcast from the same element type, do
1729 // the insert in that source type followed by bitcast.
1730 Value *VecSrc;
1731 if (match(VecOp, m_BitCast(m_Value(VecSrc))) &&
1732 match(ScalarOp, m_BitCast(m_Value(ScalarSrc))) &&
1733 (VecOp->hasOneUse() || ScalarOp->hasOneUse()) &&
1734 VecSrc->getType()->isVectorTy() && !ScalarSrc->getType()->isVectorTy() &&
1735 cast<VectorType>(VecSrc->getType())->getElementType() ==
1736 ScalarSrc->getType()) {
1737 // inselt (bitcast VecSrc), (bitcast ScalarSrc), IdxOp -->
1738 // bitcast (inselt VecSrc, ScalarSrc, IdxOp)
1739 Value *NewInsElt = Builder.CreateInsertElement(VecSrc, ScalarSrc, IdxOp);
1740 return new BitCastInst(NewInsElt, IE.getType());
1741 }
1742
1743 // If the inserted element was extracted from some other fixed-length vector
1744 // and both indexes are valid constants, try to turn this into a shuffle.
1745 // Can not handle scalable vector type, the number of elements needed to
1746 // create shuffle mask is not a compile-time constant.
1747 uint64_t InsertedIdx, ExtractedIdx;
1748 Value *ExtVecOp;
1749 if (isa<FixedVectorType>(IE.getType()) &&
1750 match(IdxOp, m_ConstantInt(InsertedIdx)) &&
1751 match(ScalarOp,
1752 m_ExtractElt(m_Value(ExtVecOp), m_ConstantInt(ExtractedIdx))) &&
1753 isa<FixedVectorType>(ExtVecOp->getType()) &&
1754 ExtractedIdx <
1755 cast<FixedVectorType>(ExtVecOp->getType())->getNumElements()) {
1756 // TODO: Looking at the user(s) to determine if this insert is a
1757 // fold-to-shuffle opportunity does not match the usual instcombine
1758 // constraints. We should decide if the transform is worthy based only
1759 // on this instruction and its operands, but that may not work currently.
1760 //
1761 // Here, we are trying to avoid creating shuffles before reaching
1762 // the end of a chain of extract-insert pairs. This is complicated because
1763 // we do not generally form arbitrary shuffle masks in instcombine
1764 // (because those may codegen poorly), but collectShuffleElements() does
1765 // exactly that.
1766 //
1767 // The rules for determining what is an acceptable target-independent
1768 // shuffle mask are fuzzy because they evolve based on the backend's
1769 // capabilities and real-world impact.
1770 auto isShuffleRootCandidate = [](InsertElementInst &Insert) {
1771 if (!Insert.hasOneUse())
1772 return true;
1773 auto *InsertUser = dyn_cast<InsertElementInst>(Insert.user_back());
1774 if (!InsertUser)
1775 return true;
1776 return false;
1777 };
1778
1779 // Try to form a shuffle from a chain of extract-insert ops.
1780 if (isShuffleRootCandidate(IE)) {
1781 bool Rerun = true;
1782 while (Rerun) {
1783 Rerun = false;
1784
1785 SmallVector<int, 16> Mask;
1786 ShuffleOps LR =
1787 collectShuffleElements(&IE, Mask, nullptr, *this, Rerun);
1788
1789 // The proposed shuffle may be trivial, in which case we shouldn't
1790 // perform the combine.
1791 if (LR.first != &IE && LR.second != &IE) {
1792 // We now have a shuffle of LHS, RHS, Mask.
1793 if (LR.second == nullptr)
1794 LR.second = PoisonValue::get(LR.first->getType());
1795 return new ShuffleVectorInst(LR.first, LR.second, Mask);
1796 }
1797 }
1798 }
1799 }
1800
1801 if (auto VecTy = dyn_cast<FixedVectorType>(VecOp->getType())) {
1802 unsigned VWidth = VecTy->getNumElements();
1803 APInt PoisonElts(VWidth, 0);
1804 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
1805 if (Value *V = SimplifyDemandedVectorElts(&IE, AllOnesEltMask,
1806 PoisonElts)) {
1807 if (V != &IE)
1808 return replaceInstUsesWith(IE, V);
1809 return &IE;
1810 }
1811 }
1812
1813 if (Instruction *Shuf = foldConstantInsEltIntoShuffle(IE))
1814 return Shuf;
1815
1816 if (Instruction *NewInsElt = hoistInsEltConst(IE, Builder))
1817 return NewInsElt;
1818
1819 if (Instruction *Broadcast = foldInsSequenceIntoSplat(IE))
1820 return Broadcast;
1821
1822 if (Instruction *Splat = foldInsEltIntoSplat(IE))
1823 return Splat;
1824
1825 if (Instruction *IdentityShuf = foldInsEltIntoIdentityShuffle(IE))
1826 return IdentityShuf;
1827
1828 if (Instruction *Ext = narrowInsElt(IE, Builder))
1829 return Ext;
1830
1831 if (Instruction *Ext = foldTruncInsEltPair(IE, DL.isBigEndian(), Builder))
1832 return Ext;
1833
1834 return nullptr;
1835 }
1836
1837 /// Return true if we can evaluate the specified expression tree if the vector
1838 /// elements were shuffled in a different order.
canEvaluateShuffled(Value * V,ArrayRef<int> Mask,unsigned Depth=5)1839 static bool canEvaluateShuffled(Value *V, ArrayRef<int> Mask,
1840 unsigned Depth = 5) {
1841 // We can always reorder the elements of a constant.
1842 if (isa<Constant>(V))
1843 return true;
1844
1845 // We won't reorder vector arguments. No IPO here.
1846 Instruction *I = dyn_cast<Instruction>(V);
1847 if (!I) return false;
1848
1849 // Two users may expect different orders of the elements. Don't try it.
1850 if (!I->hasOneUse())
1851 return false;
1852
1853 if (Depth == 0) return false;
1854
1855 switch (I->getOpcode()) {
1856 case Instruction::UDiv:
1857 case Instruction::SDiv:
1858 case Instruction::URem:
1859 case Instruction::SRem:
1860 // Propagating an undefined shuffle mask element to integer div/rem is not
1861 // allowed because those opcodes can create immediate undefined behavior
1862 // from an undefined element in an operand.
1863 if (llvm::is_contained(Mask, -1))
1864 return false;
1865 [[fallthrough]];
1866 case Instruction::Add:
1867 case Instruction::FAdd:
1868 case Instruction::Sub:
1869 case Instruction::FSub:
1870 case Instruction::Mul:
1871 case Instruction::FMul:
1872 case Instruction::FDiv:
1873 case Instruction::FRem:
1874 case Instruction::Shl:
1875 case Instruction::LShr:
1876 case Instruction::AShr:
1877 case Instruction::And:
1878 case Instruction::Or:
1879 case Instruction::Xor:
1880 case Instruction::ICmp:
1881 case Instruction::FCmp:
1882 case Instruction::Trunc:
1883 case Instruction::ZExt:
1884 case Instruction::SExt:
1885 case Instruction::FPToUI:
1886 case Instruction::FPToSI:
1887 case Instruction::UIToFP:
1888 case Instruction::SIToFP:
1889 case Instruction::FPTrunc:
1890 case Instruction::FPExt:
1891 case Instruction::GetElementPtr: {
1892 // Bail out if we would create longer vector ops. We could allow creating
1893 // longer vector ops, but that may result in more expensive codegen.
1894 Type *ITy = I->getType();
1895 if (ITy->isVectorTy() &&
1896 Mask.size() > cast<FixedVectorType>(ITy)->getNumElements())
1897 return false;
1898 for (Value *Operand : I->operands()) {
1899 if (!canEvaluateShuffled(Operand, Mask, Depth - 1))
1900 return false;
1901 }
1902 return true;
1903 }
1904 case Instruction::InsertElement: {
1905 ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(2));
1906 if (!CI) return false;
1907 int ElementNumber = CI->getLimitedValue();
1908
1909 // Verify that 'CI' does not occur twice in Mask. A single 'insertelement'
1910 // can't put an element into multiple indices.
1911 bool SeenOnce = false;
1912 for (int I : Mask) {
1913 if (I == ElementNumber) {
1914 if (SeenOnce)
1915 return false;
1916 SeenOnce = true;
1917 }
1918 }
1919 return canEvaluateShuffled(I->getOperand(0), Mask, Depth - 1);
1920 }
1921 }
1922 return false;
1923 }
1924
1925 /// Rebuild a new instruction just like 'I' but with the new operands given.
1926 /// In the event of type mismatch, the type of the operands is correct.
buildNew(Instruction * I,ArrayRef<Value * > NewOps,IRBuilderBase & Builder)1927 static Value *buildNew(Instruction *I, ArrayRef<Value*> NewOps,
1928 IRBuilderBase &Builder) {
1929 Builder.SetInsertPoint(I);
1930 switch (I->getOpcode()) {
1931 case Instruction::Add:
1932 case Instruction::FAdd:
1933 case Instruction::Sub:
1934 case Instruction::FSub:
1935 case Instruction::Mul:
1936 case Instruction::FMul:
1937 case Instruction::UDiv:
1938 case Instruction::SDiv:
1939 case Instruction::FDiv:
1940 case Instruction::URem:
1941 case Instruction::SRem:
1942 case Instruction::FRem:
1943 case Instruction::Shl:
1944 case Instruction::LShr:
1945 case Instruction::AShr:
1946 case Instruction::And:
1947 case Instruction::Or:
1948 case Instruction::Xor: {
1949 BinaryOperator *BO = cast<BinaryOperator>(I);
1950 assert(NewOps.size() == 2 && "binary operator with #ops != 2");
1951 Value *New = Builder.CreateBinOp(cast<BinaryOperator>(I)->getOpcode(),
1952 NewOps[0], NewOps[1]);
1953 if (auto *NewI = dyn_cast<Instruction>(New)) {
1954 if (isa<OverflowingBinaryOperator>(BO)) {
1955 NewI->setHasNoUnsignedWrap(BO->hasNoUnsignedWrap());
1956 NewI->setHasNoSignedWrap(BO->hasNoSignedWrap());
1957 }
1958 if (isa<PossiblyExactOperator>(BO)) {
1959 NewI->setIsExact(BO->isExact());
1960 }
1961 if (isa<FPMathOperator>(BO))
1962 NewI->copyFastMathFlags(I);
1963 }
1964 return New;
1965 }
1966 case Instruction::ICmp:
1967 assert(NewOps.size() == 2 && "icmp with #ops != 2");
1968 return Builder.CreateICmp(cast<ICmpInst>(I)->getPredicate(), NewOps[0],
1969 NewOps[1]);
1970 case Instruction::FCmp:
1971 assert(NewOps.size() == 2 && "fcmp with #ops != 2");
1972 return Builder.CreateFCmp(cast<FCmpInst>(I)->getPredicate(), NewOps[0],
1973 NewOps[1]);
1974 case Instruction::Trunc:
1975 case Instruction::ZExt:
1976 case Instruction::SExt:
1977 case Instruction::FPToUI:
1978 case Instruction::FPToSI:
1979 case Instruction::UIToFP:
1980 case Instruction::SIToFP:
1981 case Instruction::FPTrunc:
1982 case Instruction::FPExt: {
1983 // It's possible that the mask has a different number of elements from
1984 // the original cast. We recompute the destination type to match the mask.
1985 Type *DestTy = VectorType::get(
1986 I->getType()->getScalarType(),
1987 cast<VectorType>(NewOps[0]->getType())->getElementCount());
1988 assert(NewOps.size() == 1 && "cast with #ops != 1");
1989 return Builder.CreateCast(cast<CastInst>(I)->getOpcode(), NewOps[0],
1990 DestTy);
1991 }
1992 case Instruction::GetElementPtr: {
1993 Value *Ptr = NewOps[0];
1994 ArrayRef<Value*> Idx = NewOps.slice(1);
1995 return Builder.CreateGEP(cast<GEPOperator>(I)->getSourceElementType(),
1996 Ptr, Idx, "",
1997 cast<GEPOperator>(I)->getNoWrapFlags());
1998 }
1999 }
2000 llvm_unreachable("failed to rebuild vector instructions");
2001 }
2002
evaluateInDifferentElementOrder(Value * V,ArrayRef<int> Mask,IRBuilderBase & Builder)2003 static Value *evaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask,
2004 IRBuilderBase &Builder) {
2005 // Mask.size() does not need to be equal to the number of vector elements.
2006
2007 assert(V->getType()->isVectorTy() && "can't reorder non-vector elements");
2008 Type *EltTy = V->getType()->getScalarType();
2009
2010 if (isa<PoisonValue>(V))
2011 return PoisonValue::get(FixedVectorType::get(EltTy, Mask.size()));
2012
2013 if (match(V, m_Undef()))
2014 return UndefValue::get(FixedVectorType::get(EltTy, Mask.size()));
2015
2016 if (isa<ConstantAggregateZero>(V))
2017 return ConstantAggregateZero::get(FixedVectorType::get(EltTy, Mask.size()));
2018
2019 if (Constant *C = dyn_cast<Constant>(V))
2020 return ConstantExpr::getShuffleVector(C, PoisonValue::get(C->getType()),
2021 Mask);
2022
2023 Instruction *I = cast<Instruction>(V);
2024 switch (I->getOpcode()) {
2025 case Instruction::Add:
2026 case Instruction::FAdd:
2027 case Instruction::Sub:
2028 case Instruction::FSub:
2029 case Instruction::Mul:
2030 case Instruction::FMul:
2031 case Instruction::UDiv:
2032 case Instruction::SDiv:
2033 case Instruction::FDiv:
2034 case Instruction::URem:
2035 case Instruction::SRem:
2036 case Instruction::FRem:
2037 case Instruction::Shl:
2038 case Instruction::LShr:
2039 case Instruction::AShr:
2040 case Instruction::And:
2041 case Instruction::Or:
2042 case Instruction::Xor:
2043 case Instruction::ICmp:
2044 case Instruction::FCmp:
2045 case Instruction::Trunc:
2046 case Instruction::ZExt:
2047 case Instruction::SExt:
2048 case Instruction::FPToUI:
2049 case Instruction::FPToSI:
2050 case Instruction::UIToFP:
2051 case Instruction::SIToFP:
2052 case Instruction::FPTrunc:
2053 case Instruction::FPExt:
2054 case Instruction::Select:
2055 case Instruction::GetElementPtr: {
2056 SmallVector<Value*, 8> NewOps;
2057 bool NeedsRebuild =
2058 (Mask.size() !=
2059 cast<FixedVectorType>(I->getType())->getNumElements());
2060 for (int i = 0, e = I->getNumOperands(); i != e; ++i) {
2061 Value *V;
2062 // Recursively call evaluateInDifferentElementOrder on vector arguments
2063 // as well. E.g. GetElementPtr may have scalar operands even if the
2064 // return value is a vector, so we need to examine the operand type.
2065 if (I->getOperand(i)->getType()->isVectorTy())
2066 V = evaluateInDifferentElementOrder(I->getOperand(i), Mask, Builder);
2067 else
2068 V = I->getOperand(i);
2069 NewOps.push_back(V);
2070 NeedsRebuild |= (V != I->getOperand(i));
2071 }
2072 if (NeedsRebuild)
2073 return buildNew(I, NewOps, Builder);
2074 return I;
2075 }
2076 case Instruction::InsertElement: {
2077 int Element = cast<ConstantInt>(I->getOperand(2))->getLimitedValue();
2078
2079 // The insertelement was inserting at Element. Figure out which element
2080 // that becomes after shuffling. The answer is guaranteed to be unique
2081 // by CanEvaluateShuffled.
2082 bool Found = false;
2083 int Index = 0;
2084 for (int e = Mask.size(); Index != e; ++Index) {
2085 if (Mask[Index] == Element) {
2086 Found = true;
2087 break;
2088 }
2089 }
2090
2091 // If element is not in Mask, no need to handle the operand 1 (element to
2092 // be inserted). Just evaluate values in operand 0 according to Mask.
2093 if (!Found)
2094 return evaluateInDifferentElementOrder(I->getOperand(0), Mask, Builder);
2095
2096 Value *V = evaluateInDifferentElementOrder(I->getOperand(0), Mask,
2097 Builder);
2098 Builder.SetInsertPoint(I);
2099 return Builder.CreateInsertElement(V, I->getOperand(1), Index);
2100 }
2101 }
2102 llvm_unreachable("failed to reorder elements of vector instruction!");
2103 }
2104
2105 // Returns true if the shuffle is extracting a contiguous range of values from
2106 // LHS, for example:
2107 // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
2108 // Input: |AA|BB|CC|DD|EE|FF|GG|HH|II|JJ|KK|LL|MM|NN|OO|PP|
2109 // Shuffles to: |EE|FF|GG|HH|
2110 // +--+--+--+--+
isShuffleExtractingFromLHS(ShuffleVectorInst & SVI,ArrayRef<int> Mask)2111 static bool isShuffleExtractingFromLHS(ShuffleVectorInst &SVI,
2112 ArrayRef<int> Mask) {
2113 unsigned LHSElems =
2114 cast<FixedVectorType>(SVI.getOperand(0)->getType())->getNumElements();
2115 unsigned MaskElems = Mask.size();
2116 unsigned BegIdx = Mask.front();
2117 unsigned EndIdx = Mask.back();
2118 if (BegIdx > EndIdx || EndIdx >= LHSElems || EndIdx - BegIdx != MaskElems - 1)
2119 return false;
2120 for (unsigned I = 0; I != MaskElems; ++I)
2121 if (static_cast<unsigned>(Mask[I]) != BegIdx + I)
2122 return false;
2123 return true;
2124 }
2125
2126 /// These are the ingredients in an alternate form binary operator as described
2127 /// below.
2128 struct BinopElts {
2129 BinaryOperator::BinaryOps Opcode;
2130 Value *Op0;
2131 Value *Op1;
BinopEltsBinopElts2132 BinopElts(BinaryOperator::BinaryOps Opc = (BinaryOperator::BinaryOps)0,
2133 Value *V0 = nullptr, Value *V1 = nullptr) :
2134 Opcode(Opc), Op0(V0), Op1(V1) {}
operator boolBinopElts2135 operator bool() const { return Opcode != 0; }
2136 };
2137
2138 /// Binops may be transformed into binops with different opcodes and operands.
2139 /// Reverse the usual canonicalization to enable folds with the non-canonical
2140 /// form of the binop. If a transform is possible, return the elements of the
2141 /// new binop. If not, return invalid elements.
getAlternateBinop(BinaryOperator * BO,const DataLayout & DL)2142 static BinopElts getAlternateBinop(BinaryOperator *BO, const DataLayout &DL) {
2143 Value *BO0 = BO->getOperand(0), *BO1 = BO->getOperand(1);
2144 Type *Ty = BO->getType();
2145 switch (BO->getOpcode()) {
2146 case Instruction::Shl: {
2147 // shl X, C --> mul X, (1 << C)
2148 Constant *C;
2149 if (match(BO1, m_ImmConstant(C))) {
2150 Constant *ShlOne = ConstantFoldBinaryOpOperands(
2151 Instruction::Shl, ConstantInt::get(Ty, 1), C, DL);
2152 assert(ShlOne && "Constant folding of immediate constants failed");
2153 return {Instruction::Mul, BO0, ShlOne};
2154 }
2155 break;
2156 }
2157 case Instruction::Or: {
2158 // or disjoin X, C --> add X, C
2159 if (cast<PossiblyDisjointInst>(BO)->isDisjoint())
2160 return {Instruction::Add, BO0, BO1};
2161 break;
2162 }
2163 case Instruction::Sub:
2164 // sub 0, X --> mul X, -1
2165 if (match(BO0, m_ZeroInt()))
2166 return {Instruction::Mul, BO1, ConstantInt::getAllOnesValue(Ty)};
2167 break;
2168 default:
2169 break;
2170 }
2171 return {};
2172 }
2173
2174 /// A select shuffle of a select shuffle with a shared operand can be reduced
2175 /// to a single select shuffle. This is an obvious improvement in IR, and the
2176 /// backend is expected to lower select shuffles efficiently.
foldSelectShuffleOfSelectShuffle(ShuffleVectorInst & Shuf)2177 static Instruction *foldSelectShuffleOfSelectShuffle(ShuffleVectorInst &Shuf) {
2178 assert(Shuf.isSelect() && "Must have select-equivalent shuffle");
2179
2180 Value *Op0 = Shuf.getOperand(0), *Op1 = Shuf.getOperand(1);
2181 SmallVector<int, 16> Mask;
2182 Shuf.getShuffleMask(Mask);
2183 unsigned NumElts = Mask.size();
2184
2185 // Canonicalize a select shuffle with common operand as Op1.
2186 auto *ShufOp = dyn_cast<ShuffleVectorInst>(Op0);
2187 if (ShufOp && ShufOp->isSelect() &&
2188 (ShufOp->getOperand(0) == Op1 || ShufOp->getOperand(1) == Op1)) {
2189 std::swap(Op0, Op1);
2190 ShuffleVectorInst::commuteShuffleMask(Mask, NumElts);
2191 }
2192
2193 ShufOp = dyn_cast<ShuffleVectorInst>(Op1);
2194 if (!ShufOp || !ShufOp->isSelect() ||
2195 (ShufOp->getOperand(0) != Op0 && ShufOp->getOperand(1) != Op0))
2196 return nullptr;
2197
2198 Value *X = ShufOp->getOperand(0), *Y = ShufOp->getOperand(1);
2199 SmallVector<int, 16> Mask1;
2200 ShufOp->getShuffleMask(Mask1);
2201 assert(Mask1.size() == NumElts && "Vector size changed with select shuffle");
2202
2203 // Canonicalize common operand (Op0) as X (first operand of first shuffle).
2204 if (Y == Op0) {
2205 std::swap(X, Y);
2206 ShuffleVectorInst::commuteShuffleMask(Mask1, NumElts);
2207 }
2208
2209 // If the mask chooses from X (operand 0), it stays the same.
2210 // If the mask chooses from the earlier shuffle, the other mask value is
2211 // transferred to the combined select shuffle:
2212 // shuf X, (shuf X, Y, M1), M --> shuf X, Y, M'
2213 SmallVector<int, 16> NewMask(NumElts);
2214 for (unsigned i = 0; i != NumElts; ++i)
2215 NewMask[i] = Mask[i] < (signed)NumElts ? Mask[i] : Mask1[i];
2216
2217 // A select mask with undef elements might look like an identity mask.
2218 assert((ShuffleVectorInst::isSelectMask(NewMask, NumElts) ||
2219 ShuffleVectorInst::isIdentityMask(NewMask, NumElts)) &&
2220 "Unexpected shuffle mask");
2221 return new ShuffleVectorInst(X, Y, NewMask);
2222 }
2223
foldSelectShuffleWith1Binop(ShuffleVectorInst & Shuf,const SimplifyQuery & SQ)2224 static Instruction *foldSelectShuffleWith1Binop(ShuffleVectorInst &Shuf,
2225 const SimplifyQuery &SQ) {
2226 assert(Shuf.isSelect() && "Must have select-equivalent shuffle");
2227
2228 // Are we shuffling together some value and that same value after it has been
2229 // modified by a binop with a constant?
2230 Value *Op0 = Shuf.getOperand(0), *Op1 = Shuf.getOperand(1);
2231 Constant *C;
2232 bool Op0IsBinop;
2233 if (match(Op0, m_BinOp(m_Specific(Op1), m_Constant(C))))
2234 Op0IsBinop = true;
2235 else if (match(Op1, m_BinOp(m_Specific(Op0), m_Constant(C))))
2236 Op0IsBinop = false;
2237 else
2238 return nullptr;
2239
2240 // The identity constant for a binop leaves a variable operand unchanged. For
2241 // a vector, this is a splat of something like 0, -1, or 1.
2242 // If there's no identity constant for this binop, we're done.
2243 auto *BO = cast<BinaryOperator>(Op0IsBinop ? Op0 : Op1);
2244 BinaryOperator::BinaryOps BOpcode = BO->getOpcode();
2245 Constant *IdC = ConstantExpr::getBinOpIdentity(BOpcode, Shuf.getType(), true);
2246 if (!IdC)
2247 return nullptr;
2248
2249 Value *X = Op0IsBinop ? Op1 : Op0;
2250
2251 // Prevent folding in the case the non-binop operand might have NaN values.
2252 // If X can have NaN elements then we have that the floating point math
2253 // operation in the transformed code may not preserve the exact NaN
2254 // bit-pattern -- e.g. `fadd sNaN, 0.0 -> qNaN`.
2255 // This makes the transformation incorrect since the original program would
2256 // have preserved the exact NaN bit-pattern.
2257 // Avoid the folding if X can have NaN elements.
2258 if (Shuf.getType()->getElementType()->isFloatingPointTy() &&
2259 !isKnownNeverNaN(X, SQ))
2260 return nullptr;
2261
2262 // Shuffle identity constants into the lanes that return the original value.
2263 // Example: shuf (mul X, {-1,-2,-3,-4}), X, {0,5,6,3} --> mul X, {-1,1,1,-4}
2264 // Example: shuf X, (add X, {-1,-2,-3,-4}), {0,1,6,7} --> add X, {0,0,-3,-4}
2265 // The existing binop constant vector remains in the same operand position.
2266 ArrayRef<int> Mask = Shuf.getShuffleMask();
2267 Constant *NewC = Op0IsBinop ? ConstantExpr::getShuffleVector(C, IdC, Mask) :
2268 ConstantExpr::getShuffleVector(IdC, C, Mask);
2269
2270 bool MightCreatePoisonOrUB =
2271 is_contained(Mask, PoisonMaskElem) &&
2272 (Instruction::isIntDivRem(BOpcode) || Instruction::isShift(BOpcode));
2273 if (MightCreatePoisonOrUB)
2274 NewC = InstCombiner::getSafeVectorConstantForBinop(BOpcode, NewC, true);
2275
2276 // shuf (bop X, C), X, M --> bop X, C'
2277 // shuf X, (bop X, C), M --> bop X, C'
2278 Instruction *NewBO = BinaryOperator::Create(BOpcode, X, NewC);
2279 NewBO->copyIRFlags(BO);
2280
2281 // An undef shuffle mask element may propagate as an undef constant element in
2282 // the new binop. That would produce poison where the original code might not.
2283 // If we already made a safe constant, then there's no danger.
2284 if (is_contained(Mask, PoisonMaskElem) && !MightCreatePoisonOrUB)
2285 NewBO->dropPoisonGeneratingFlags();
2286 return NewBO;
2287 }
2288
2289 /// If we have an insert of a scalar to a non-zero element of an undefined
2290 /// vector and then shuffle that value, that's the same as inserting to the zero
2291 /// element and shuffling. Splatting from the zero element is recognized as the
2292 /// canonical form of splat.
canonicalizeInsertSplat(ShuffleVectorInst & Shuf,InstCombiner::BuilderTy & Builder)2293 static Instruction *canonicalizeInsertSplat(ShuffleVectorInst &Shuf,
2294 InstCombiner::BuilderTy &Builder) {
2295 Value *Op0 = Shuf.getOperand(0), *Op1 = Shuf.getOperand(1);
2296 ArrayRef<int> Mask = Shuf.getShuffleMask();
2297 Value *X;
2298 uint64_t IndexC;
2299
2300 // Match a shuffle that is a splat to a non-zero element.
2301 if (!match(Op0, m_OneUse(m_InsertElt(m_Poison(), m_Value(X),
2302 m_ConstantInt(IndexC)))) ||
2303 !match(Op1, m_Poison()) || match(Mask, m_ZeroMask()) || IndexC == 0)
2304 return nullptr;
2305
2306 // Insert into element 0 of a poison vector.
2307 PoisonValue *PoisonVec = PoisonValue::get(Shuf.getType());
2308 Value *NewIns = Builder.CreateInsertElement(PoisonVec, X, (uint64_t)0);
2309
2310 // Splat from element 0. Any mask element that is poison remains poison.
2311 // For example:
2312 // shuf (inselt poison, X, 2), _, <2,2,undef>
2313 // --> shuf (inselt poison, X, 0), poison, <0,0,undef>
2314 unsigned NumMaskElts =
2315 cast<FixedVectorType>(Shuf.getType())->getNumElements();
2316 SmallVector<int, 16> NewMask(NumMaskElts, 0);
2317 for (unsigned i = 0; i != NumMaskElts; ++i)
2318 if (Mask[i] == PoisonMaskElem)
2319 NewMask[i] = Mask[i];
2320
2321 return new ShuffleVectorInst(NewIns, NewMask);
2322 }
2323
2324 /// Try to fold shuffles that are the equivalent of a vector select.
foldSelectShuffle(ShuffleVectorInst & Shuf)2325 Instruction *InstCombinerImpl::foldSelectShuffle(ShuffleVectorInst &Shuf) {
2326 if (!Shuf.isSelect())
2327 return nullptr;
2328
2329 // Canonicalize to choose from operand 0 first unless operand 1 is undefined.
2330 // Commuting undef to operand 0 conflicts with another canonicalization.
2331 unsigned NumElts = cast<FixedVectorType>(Shuf.getType())->getNumElements();
2332 if (!match(Shuf.getOperand(1), m_Undef()) &&
2333 Shuf.getMaskValue(0) >= (int)NumElts) {
2334 // TODO: Can we assert that both operands of a shuffle-select are not undef
2335 // (otherwise, it would have been folded by instsimplify?
2336 Shuf.commute();
2337 return &Shuf;
2338 }
2339
2340 if (Instruction *I = foldSelectShuffleOfSelectShuffle(Shuf))
2341 return I;
2342
2343 if (Instruction *I = foldSelectShuffleWith1Binop(
2344 Shuf, getSimplifyQuery().getWithInstruction(&Shuf)))
2345 return I;
2346
2347 BinaryOperator *B0, *B1;
2348 if (!match(Shuf.getOperand(0), m_BinOp(B0)) ||
2349 !match(Shuf.getOperand(1), m_BinOp(B1)))
2350 return nullptr;
2351
2352 // If one operand is "0 - X", allow that to be viewed as "X * -1"
2353 // (ConstantsAreOp1) by getAlternateBinop below. If the neg is not paired
2354 // with a multiply, we will exit because C0/C1 will not be set.
2355 Value *X, *Y;
2356 Constant *C0 = nullptr, *C1 = nullptr;
2357 bool ConstantsAreOp1;
2358 if (match(B0, m_BinOp(m_Constant(C0), m_Value(X))) &&
2359 match(B1, m_BinOp(m_Constant(C1), m_Value(Y))))
2360 ConstantsAreOp1 = false;
2361 else if (match(B0, m_CombineOr(m_BinOp(m_Value(X), m_Constant(C0)),
2362 m_Neg(m_Value(X)))) &&
2363 match(B1, m_CombineOr(m_BinOp(m_Value(Y), m_Constant(C1)),
2364 m_Neg(m_Value(Y)))))
2365 ConstantsAreOp1 = true;
2366 else
2367 return nullptr;
2368
2369 // We need matching binops to fold the lanes together.
2370 BinaryOperator::BinaryOps Opc0 = B0->getOpcode();
2371 BinaryOperator::BinaryOps Opc1 = B1->getOpcode();
2372 bool DropNSW = false;
2373 if (ConstantsAreOp1 && Opc0 != Opc1) {
2374 // TODO: We drop "nsw" if shift is converted into multiply because it may
2375 // not be correct when the shift amount is BitWidth - 1. We could examine
2376 // each vector element to determine if it is safe to keep that flag.
2377 if (Opc0 == Instruction::Shl || Opc1 == Instruction::Shl)
2378 DropNSW = true;
2379 if (BinopElts AltB0 = getAlternateBinop(B0, DL)) {
2380 assert(isa<Constant>(AltB0.Op1) && "Expecting constant with alt binop");
2381 Opc0 = AltB0.Opcode;
2382 C0 = cast<Constant>(AltB0.Op1);
2383 } else if (BinopElts AltB1 = getAlternateBinop(B1, DL)) {
2384 assert(isa<Constant>(AltB1.Op1) && "Expecting constant with alt binop");
2385 Opc1 = AltB1.Opcode;
2386 C1 = cast<Constant>(AltB1.Op1);
2387 }
2388 }
2389
2390 if (Opc0 != Opc1 || !C0 || !C1)
2391 return nullptr;
2392
2393 // The opcodes must be the same. Use a new name to make that clear.
2394 BinaryOperator::BinaryOps BOpc = Opc0;
2395
2396 // Select the constant elements needed for the single binop.
2397 ArrayRef<int> Mask = Shuf.getShuffleMask();
2398 Constant *NewC = ConstantExpr::getShuffleVector(C0, C1, Mask);
2399
2400 // We are moving a binop after a shuffle. When a shuffle has an undefined
2401 // mask element, the result is undefined, but it is not poison or undefined
2402 // behavior. That is not necessarily true for div/rem/shift.
2403 bool MightCreatePoisonOrUB =
2404 is_contained(Mask, PoisonMaskElem) &&
2405 (Instruction::isIntDivRem(BOpc) || Instruction::isShift(BOpc));
2406 if (MightCreatePoisonOrUB)
2407 NewC = InstCombiner::getSafeVectorConstantForBinop(BOpc, NewC,
2408 ConstantsAreOp1);
2409
2410 Value *V;
2411 if (X == Y) {
2412 // Remove a binop and the shuffle by rearranging the constant:
2413 // shuffle (op V, C0), (op V, C1), M --> op V, C'
2414 // shuffle (op C0, V), (op C1, V), M --> op C', V
2415 V = X;
2416 } else {
2417 // If there are 2 different variable operands, we must create a new shuffle
2418 // (select) first, so check uses to ensure that we don't end up with more
2419 // instructions than we started with.
2420 if (!B0->hasOneUse() && !B1->hasOneUse())
2421 return nullptr;
2422
2423 // If we use the original shuffle mask and op1 is *variable*, we would be
2424 // putting an undef into operand 1 of div/rem/shift. This is either UB or
2425 // poison. We do not have to guard against UB when *constants* are op1
2426 // because safe constants guarantee that we do not overflow sdiv/srem (and
2427 // there's no danger for other opcodes).
2428 // TODO: To allow this case, create a new shuffle mask with no undefs.
2429 if (MightCreatePoisonOrUB && !ConstantsAreOp1)
2430 return nullptr;
2431
2432 // Note: In general, we do not create new shuffles in InstCombine because we
2433 // do not know if a target can lower an arbitrary shuffle optimally. In this
2434 // case, the shuffle uses the existing mask, so there is no additional risk.
2435
2436 // Select the variable vectors first, then perform the binop:
2437 // shuffle (op X, C0), (op Y, C1), M --> op (shuffle X, Y, M), C'
2438 // shuffle (op C0, X), (op C1, Y), M --> op C', (shuffle X, Y, M)
2439 V = Builder.CreateShuffleVector(X, Y, Mask);
2440 }
2441
2442 Value *NewBO = ConstantsAreOp1 ? Builder.CreateBinOp(BOpc, V, NewC) :
2443 Builder.CreateBinOp(BOpc, NewC, V);
2444
2445 // Flags are intersected from the 2 source binops. But there are 2 exceptions:
2446 // 1. If we changed an opcode, poison conditions might have changed.
2447 // 2. If the shuffle had undef mask elements, the new binop might have undefs
2448 // where the original code did not. But if we already made a safe constant,
2449 // then there's no danger.
2450 if (auto *NewI = dyn_cast<Instruction>(NewBO)) {
2451 NewI->copyIRFlags(B0);
2452 NewI->andIRFlags(B1);
2453 if (DropNSW)
2454 NewI->setHasNoSignedWrap(false);
2455 if (is_contained(Mask, PoisonMaskElem) && !MightCreatePoisonOrUB)
2456 NewI->dropPoisonGeneratingFlags();
2457 }
2458 return replaceInstUsesWith(Shuf, NewBO);
2459 }
2460
2461 /// Convert a narrowing shuffle of a bitcasted vector into a vector truncate.
2462 /// Example (little endian):
2463 /// shuf (bitcast <4 x i16> X to <8 x i8>), <0, 2, 4, 6> --> trunc X to <4 x i8>
foldTruncShuffle(ShuffleVectorInst & Shuf,bool IsBigEndian)2464 static Instruction *foldTruncShuffle(ShuffleVectorInst &Shuf,
2465 bool IsBigEndian) {
2466 // This must be a bitcasted shuffle of 1 vector integer operand.
2467 Type *DestType = Shuf.getType();
2468 Value *X;
2469 if (!match(Shuf.getOperand(0), m_BitCast(m_Value(X))) ||
2470 !match(Shuf.getOperand(1), m_Poison()) || !DestType->isIntOrIntVectorTy())
2471 return nullptr;
2472
2473 // The source type must have the same number of elements as the shuffle,
2474 // and the source element type must be larger than the shuffle element type.
2475 Type *SrcType = X->getType();
2476 if (!SrcType->isVectorTy() || !SrcType->isIntOrIntVectorTy() ||
2477 cast<FixedVectorType>(SrcType)->getNumElements() !=
2478 cast<FixedVectorType>(DestType)->getNumElements() ||
2479 SrcType->getScalarSizeInBits() % DestType->getScalarSizeInBits() != 0)
2480 return nullptr;
2481
2482 assert(Shuf.changesLength() && !Shuf.increasesLength() &&
2483 "Expected a shuffle that decreases length");
2484
2485 // Last, check that the mask chooses the correct low bits for each narrow
2486 // element in the result.
2487 uint64_t TruncRatio =
2488 SrcType->getScalarSizeInBits() / DestType->getScalarSizeInBits();
2489 ArrayRef<int> Mask = Shuf.getShuffleMask();
2490 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
2491 if (Mask[i] == PoisonMaskElem)
2492 continue;
2493 uint64_t LSBIndex = IsBigEndian ? (i + 1) * TruncRatio - 1 : i * TruncRatio;
2494 assert(LSBIndex <= INT32_MAX && "Overflowed 32-bits");
2495 if (Mask[i] != (int)LSBIndex)
2496 return nullptr;
2497 }
2498
2499 return new TruncInst(X, DestType);
2500 }
2501
2502 /// Match a shuffle-select-shuffle pattern where the shuffles are widening and
2503 /// narrowing (concatenating with poison and extracting back to the original
2504 /// length). This allows replacing the wide select with a narrow select.
narrowVectorSelect(ShuffleVectorInst & Shuf,InstCombiner::BuilderTy & Builder)2505 static Instruction *narrowVectorSelect(ShuffleVectorInst &Shuf,
2506 InstCombiner::BuilderTy &Builder) {
2507 // This must be a narrowing identity shuffle. It extracts the 1st N elements
2508 // of the 1st vector operand of a shuffle.
2509 if (!match(Shuf.getOperand(1), m_Poison()) || !Shuf.isIdentityWithExtract())
2510 return nullptr;
2511
2512 // The vector being shuffled must be a vector select that we can eliminate.
2513 // TODO: The one-use requirement could be eased if X and/or Y are constants.
2514 Value *Cond, *X, *Y;
2515 if (!match(Shuf.getOperand(0),
2516 m_OneUse(m_Select(m_Value(Cond), m_Value(X), m_Value(Y)))))
2517 return nullptr;
2518
2519 // We need a narrow condition value. It must be extended with poison elements
2520 // and have the same number of elements as this shuffle.
2521 unsigned NarrowNumElts =
2522 cast<FixedVectorType>(Shuf.getType())->getNumElements();
2523 Value *NarrowCond;
2524 if (!match(Cond, m_OneUse(m_Shuffle(m_Value(NarrowCond), m_Poison()))) ||
2525 cast<FixedVectorType>(NarrowCond->getType())->getNumElements() !=
2526 NarrowNumElts ||
2527 !cast<ShuffleVectorInst>(Cond)->isIdentityWithPadding())
2528 return nullptr;
2529
2530 // shuf (sel (shuf NarrowCond, poison, WideMask), X, Y), poison, NarrowMask)
2531 // -->
2532 // sel NarrowCond, (shuf X, poison, NarrowMask), (shuf Y, poison, NarrowMask)
2533 Value *NarrowX = Builder.CreateShuffleVector(X, Shuf.getShuffleMask());
2534 Value *NarrowY = Builder.CreateShuffleVector(Y, Shuf.getShuffleMask());
2535 return SelectInst::Create(NarrowCond, NarrowX, NarrowY);
2536 }
2537
2538 /// Canonicalize FP negate/abs after shuffle.
foldShuffleOfUnaryOps(ShuffleVectorInst & Shuf,InstCombiner::BuilderTy & Builder)2539 static Instruction *foldShuffleOfUnaryOps(ShuffleVectorInst &Shuf,
2540 InstCombiner::BuilderTy &Builder) {
2541 auto *S0 = dyn_cast<Instruction>(Shuf.getOperand(0));
2542 Value *X;
2543 if (!S0 || !match(S0, m_CombineOr(m_FNeg(m_Value(X)), m_FAbs(m_Value(X)))))
2544 return nullptr;
2545
2546 bool IsFNeg = S0->getOpcode() == Instruction::FNeg;
2547
2548 // Match 2-input (binary) shuffle.
2549 auto *S1 = dyn_cast<Instruction>(Shuf.getOperand(1));
2550 Value *Y;
2551 if (!S1 || !match(S1, m_CombineOr(m_FNeg(m_Value(Y)), m_FAbs(m_Value(Y)))) ||
2552 S0->getOpcode() != S1->getOpcode() ||
2553 (!S0->hasOneUse() && !S1->hasOneUse()))
2554 return nullptr;
2555
2556 // shuf (fneg/fabs X), (fneg/fabs Y), Mask --> fneg/fabs (shuf X, Y, Mask)
2557 Value *NewShuf = Builder.CreateShuffleVector(X, Y, Shuf.getShuffleMask());
2558 Instruction *NewF;
2559 if (IsFNeg) {
2560 NewF = UnaryOperator::CreateFNeg(NewShuf);
2561 } else {
2562 Function *FAbs = Intrinsic::getOrInsertDeclaration(
2563 Shuf.getModule(), Intrinsic::fabs, Shuf.getType());
2564 NewF = CallInst::Create(FAbs, {NewShuf});
2565 }
2566 NewF->copyIRFlags(S0);
2567 NewF->andIRFlags(S1);
2568 return NewF;
2569 }
2570
2571 /// Canonicalize casts after shuffle.
foldCastShuffle(ShuffleVectorInst & Shuf,InstCombiner::BuilderTy & Builder)2572 static Instruction *foldCastShuffle(ShuffleVectorInst &Shuf,
2573 InstCombiner::BuilderTy &Builder) {
2574 auto *Cast0 = dyn_cast<CastInst>(Shuf.getOperand(0));
2575 if (!Cast0)
2576 return nullptr;
2577
2578 // TODO: Allow other opcodes? That would require easing the type restrictions
2579 // below here.
2580 CastInst::CastOps CastOpcode = Cast0->getOpcode();
2581 switch (CastOpcode) {
2582 case Instruction::SExt:
2583 case Instruction::ZExt:
2584 case Instruction::FPToSI:
2585 case Instruction::FPToUI:
2586 case Instruction::SIToFP:
2587 case Instruction::UIToFP:
2588 break;
2589 default:
2590 return nullptr;
2591 }
2592
2593 VectorType *CastSrcTy = cast<VectorType>(Cast0->getSrcTy());
2594 VectorType *ShufTy = Shuf.getType();
2595 VectorType *ShufOpTy = cast<VectorType>(Shuf.getOperand(0)->getType());
2596
2597 // TODO: Allow length-increasing shuffles?
2598 if (ShufTy->getElementCount().getKnownMinValue() >
2599 ShufOpTy->getElementCount().getKnownMinValue())
2600 return nullptr;
2601
2602 // shuffle (cast X), Poison, identity-with-extract-mask -->
2603 // cast (shuffle X, Poison, identity-with-extract-mask).
2604 if (isa<PoisonValue>(Shuf.getOperand(1)) && Cast0->hasOneUse() &&
2605 Shuf.isIdentityWithExtract()) {
2606 auto *NewIns = Builder.CreateShuffleVector(Cast0->getOperand(0),
2607 PoisonValue::get(CastSrcTy),
2608 Shuf.getShuffleMask());
2609 return CastInst::Create(Cast0->getOpcode(), NewIns, Shuf.getType());
2610 }
2611
2612 auto *Cast1 = dyn_cast<CastInst>(Shuf.getOperand(1));
2613 // Do we have 2 matching cast operands?
2614 if (!Cast1 || Cast0->getOpcode() != Cast1->getOpcode() ||
2615 Cast0->getSrcTy() != Cast1->getSrcTy())
2616 return nullptr;
2617
2618 // TODO: Allow element-size-decreasing casts (ex: fptosi float to i8)?
2619 assert(isa<FixedVectorType>(CastSrcTy) && isa<FixedVectorType>(ShufOpTy) &&
2620 "Expected fixed vector operands for casts and binary shuffle");
2621 if (CastSrcTy->getPrimitiveSizeInBits() > ShufOpTy->getPrimitiveSizeInBits())
2622 return nullptr;
2623
2624 // At least one of the operands must have only one use (the shuffle).
2625 if (!Cast0->hasOneUse() && !Cast1->hasOneUse())
2626 return nullptr;
2627
2628 // shuffle (cast X), (cast Y), Mask --> cast (shuffle X, Y, Mask)
2629 Value *X = Cast0->getOperand(0);
2630 Value *Y = Cast1->getOperand(0);
2631 Value *NewShuf = Builder.CreateShuffleVector(X, Y, Shuf.getShuffleMask());
2632 return CastInst::Create(CastOpcode, NewShuf, ShufTy);
2633 }
2634
2635 /// Try to fold an extract subvector operation.
foldIdentityExtractShuffle(ShuffleVectorInst & Shuf)2636 static Instruction *foldIdentityExtractShuffle(ShuffleVectorInst &Shuf) {
2637 Value *Op0 = Shuf.getOperand(0), *Op1 = Shuf.getOperand(1);
2638 if (!Shuf.isIdentityWithExtract() || !match(Op1, m_Poison()))
2639 return nullptr;
2640
2641 // Check if we are extracting all bits of an inserted scalar:
2642 // extract-subvec (bitcast (inselt ?, X, 0) --> bitcast X to subvec type
2643 Value *X;
2644 if (match(Op0, m_BitCast(m_InsertElt(m_Value(), m_Value(X), m_Zero()))) &&
2645 X->getType()->getPrimitiveSizeInBits() ==
2646 Shuf.getType()->getPrimitiveSizeInBits())
2647 return new BitCastInst(X, Shuf.getType());
2648
2649 // Try to combine 2 shuffles into 1 shuffle by concatenating a shuffle mask.
2650 Value *Y;
2651 ArrayRef<int> Mask;
2652 if (!match(Op0, m_Shuffle(m_Value(X), m_Value(Y), m_Mask(Mask))))
2653 return nullptr;
2654
2655 // Be conservative with shuffle transforms. If we can't kill the 1st shuffle,
2656 // then combining may result in worse codegen.
2657 if (!Op0->hasOneUse())
2658 return nullptr;
2659
2660 // We are extracting a subvector from a shuffle. Remove excess elements from
2661 // the 1st shuffle mask to eliminate the extract.
2662 //
2663 // This transform is conservatively limited to identity extracts because we do
2664 // not allow arbitrary shuffle mask creation as a target-independent transform
2665 // (because we can't guarantee that will lower efficiently).
2666 //
2667 // If the extracting shuffle has an poison mask element, it transfers to the
2668 // new shuffle mask. Otherwise, copy the original mask element. Example:
2669 // shuf (shuf X, Y, <C0, C1, C2, poison, C4>), poison, <0, poison, 2, 3> -->
2670 // shuf X, Y, <C0, poison, C2, poison>
2671 unsigned NumElts = cast<FixedVectorType>(Shuf.getType())->getNumElements();
2672 SmallVector<int, 16> NewMask(NumElts);
2673 assert(NumElts < Mask.size() &&
2674 "Identity with extract must have less elements than its inputs");
2675
2676 for (unsigned i = 0; i != NumElts; ++i) {
2677 int ExtractMaskElt = Shuf.getMaskValue(i);
2678 int MaskElt = Mask[i];
2679 NewMask[i] = ExtractMaskElt == PoisonMaskElem ? ExtractMaskElt : MaskElt;
2680 }
2681 return new ShuffleVectorInst(X, Y, NewMask);
2682 }
2683
2684 /// Try to replace a shuffle with an insertelement or try to replace a shuffle
2685 /// operand with the operand of an insertelement.
foldShuffleWithInsert(ShuffleVectorInst & Shuf,InstCombinerImpl & IC)2686 static Instruction *foldShuffleWithInsert(ShuffleVectorInst &Shuf,
2687 InstCombinerImpl &IC) {
2688 Value *V0 = Shuf.getOperand(0), *V1 = Shuf.getOperand(1);
2689 SmallVector<int, 16> Mask;
2690 Shuf.getShuffleMask(Mask);
2691
2692 int NumElts = Mask.size();
2693 int InpNumElts = cast<FixedVectorType>(V0->getType())->getNumElements();
2694
2695 // This is a specialization of a fold in SimplifyDemandedVectorElts. We may
2696 // not be able to handle it there if the insertelement has >1 use.
2697 // If the shuffle has an insertelement operand but does not choose the
2698 // inserted scalar element from that value, then we can replace that shuffle
2699 // operand with the source vector of the insertelement.
2700 Value *X;
2701 uint64_t IdxC;
2702 if (match(V0, m_InsertElt(m_Value(X), m_Value(), m_ConstantInt(IdxC)))) {
2703 // shuf (inselt X, ?, IdxC), ?, Mask --> shuf X, ?, Mask
2704 if (!is_contained(Mask, (int)IdxC))
2705 return IC.replaceOperand(Shuf, 0, X);
2706 }
2707 if (match(V1, m_InsertElt(m_Value(X), m_Value(), m_ConstantInt(IdxC)))) {
2708 // Offset the index constant by the vector width because we are checking for
2709 // accesses to the 2nd vector input of the shuffle.
2710 IdxC += InpNumElts;
2711 // shuf ?, (inselt X, ?, IdxC), Mask --> shuf ?, X, Mask
2712 if (!is_contained(Mask, (int)IdxC))
2713 return IC.replaceOperand(Shuf, 1, X);
2714 }
2715 // For the rest of the transform, the shuffle must not change vector sizes.
2716 // TODO: This restriction could be removed if the insert has only one use
2717 // (because the transform would require a new length-changing shuffle).
2718 if (NumElts != InpNumElts)
2719 return nullptr;
2720
2721 // shuffle (insert ?, Scalar, IndexC), V1, Mask --> insert V1, Scalar, IndexC'
2722 auto isShufflingScalarIntoOp1 = [&](Value *&Scalar, ConstantInt *&IndexC) {
2723 // We need an insertelement with a constant index.
2724 if (!match(V0, m_InsertElt(m_Value(), m_Value(Scalar),
2725 m_ConstantInt(IndexC))))
2726 return false;
2727
2728 // Test the shuffle mask to see if it splices the inserted scalar into the
2729 // operand 1 vector of the shuffle.
2730 int NewInsIndex = -1;
2731 for (int i = 0; i != NumElts; ++i) {
2732 // Ignore undef mask elements.
2733 if (Mask[i] == -1)
2734 continue;
2735
2736 // The shuffle takes elements of operand 1 without lane changes.
2737 if (Mask[i] == NumElts + i)
2738 continue;
2739
2740 // The shuffle must choose the inserted scalar exactly once.
2741 if (NewInsIndex != -1 || Mask[i] != IndexC->getSExtValue())
2742 return false;
2743
2744 // The shuffle is placing the inserted scalar into element i.
2745 NewInsIndex = i;
2746 }
2747
2748 assert(NewInsIndex != -1 && "Did not fold shuffle with unused operand?");
2749
2750 // Index is updated to the potentially translated insertion lane.
2751 IndexC = ConstantInt::get(IndexC->getIntegerType(), NewInsIndex);
2752 return true;
2753 };
2754
2755 // If the shuffle is unnecessary, insert the scalar operand directly into
2756 // operand 1 of the shuffle. Example:
2757 // shuffle (insert ?, S, 1), V1, <1, 5, 6, 7> --> insert V1, S, 0
2758 Value *Scalar;
2759 ConstantInt *IndexC;
2760 if (isShufflingScalarIntoOp1(Scalar, IndexC))
2761 return InsertElementInst::Create(V1, Scalar, IndexC);
2762
2763 // Try again after commuting shuffle. Example:
2764 // shuffle V0, (insert ?, S, 0), <0, 1, 2, 4> -->
2765 // shuffle (insert ?, S, 0), V0, <4, 5, 6, 0> --> insert V0, S, 3
2766 std::swap(V0, V1);
2767 ShuffleVectorInst::commuteShuffleMask(Mask, NumElts);
2768 if (isShufflingScalarIntoOp1(Scalar, IndexC))
2769 return InsertElementInst::Create(V1, Scalar, IndexC);
2770
2771 return nullptr;
2772 }
2773
foldIdentityPaddedShuffles(ShuffleVectorInst & Shuf)2774 static Instruction *foldIdentityPaddedShuffles(ShuffleVectorInst &Shuf) {
2775 // Match the operands as identity with padding (also known as concatenation
2776 // with undef) shuffles of the same source type. The backend is expected to
2777 // recreate these concatenations from a shuffle of narrow operands.
2778 auto *Shuffle0 = dyn_cast<ShuffleVectorInst>(Shuf.getOperand(0));
2779 auto *Shuffle1 = dyn_cast<ShuffleVectorInst>(Shuf.getOperand(1));
2780 if (!Shuffle0 || !Shuffle0->isIdentityWithPadding() ||
2781 !Shuffle1 || !Shuffle1->isIdentityWithPadding())
2782 return nullptr;
2783
2784 // We limit this transform to power-of-2 types because we expect that the
2785 // backend can convert the simplified IR patterns to identical nodes as the
2786 // original IR.
2787 // TODO: If we can verify the same behavior for arbitrary types, the
2788 // power-of-2 checks can be removed.
2789 Value *X = Shuffle0->getOperand(0);
2790 Value *Y = Shuffle1->getOperand(0);
2791 if (X->getType() != Y->getType() ||
2792 !isPowerOf2_32(cast<FixedVectorType>(Shuf.getType())->getNumElements()) ||
2793 !isPowerOf2_32(
2794 cast<FixedVectorType>(Shuffle0->getType())->getNumElements()) ||
2795 !isPowerOf2_32(cast<FixedVectorType>(X->getType())->getNumElements()) ||
2796 match(X, m_Undef()) || match(Y, m_Undef()))
2797 return nullptr;
2798 assert(match(Shuffle0->getOperand(1), m_Undef()) &&
2799 match(Shuffle1->getOperand(1), m_Undef()) &&
2800 "Unexpected operand for identity shuffle");
2801
2802 // This is a shuffle of 2 widening shuffles. We can shuffle the narrow source
2803 // operands directly by adjusting the shuffle mask to account for the narrower
2804 // types:
2805 // shuf (widen X), (widen Y), Mask --> shuf X, Y, Mask'
2806 int NarrowElts = cast<FixedVectorType>(X->getType())->getNumElements();
2807 int WideElts = cast<FixedVectorType>(Shuffle0->getType())->getNumElements();
2808 assert(WideElts > NarrowElts && "Unexpected types for identity with padding");
2809
2810 ArrayRef<int> Mask = Shuf.getShuffleMask();
2811 SmallVector<int, 16> NewMask(Mask.size(), -1);
2812 for (int i = 0, e = Mask.size(); i != e; ++i) {
2813 if (Mask[i] == -1)
2814 continue;
2815
2816 // If this shuffle is choosing an undef element from 1 of the sources, that
2817 // element is undef.
2818 if (Mask[i] < WideElts) {
2819 if (Shuffle0->getMaskValue(Mask[i]) == -1)
2820 continue;
2821 } else {
2822 if (Shuffle1->getMaskValue(Mask[i] - WideElts) == -1)
2823 continue;
2824 }
2825
2826 // If this shuffle is choosing from the 1st narrow op, the mask element is
2827 // the same. If this shuffle is choosing from the 2nd narrow op, the mask
2828 // element is offset down to adjust for the narrow vector widths.
2829 if (Mask[i] < WideElts) {
2830 assert(Mask[i] < NarrowElts && "Unexpected shuffle mask");
2831 NewMask[i] = Mask[i];
2832 } else {
2833 assert(Mask[i] < (WideElts + NarrowElts) && "Unexpected shuffle mask");
2834 NewMask[i] = Mask[i] - (WideElts - NarrowElts);
2835 }
2836 }
2837 return new ShuffleVectorInst(X, Y, NewMask);
2838 }
2839
2840 // Splatting the first element of the result of a BinOp, where any of the
2841 // BinOp's operands are the result of a first element splat can be simplified to
2842 // splatting the first element of the result of the BinOp
simplifyBinOpSplats(ShuffleVectorInst & SVI)2843 Instruction *InstCombinerImpl::simplifyBinOpSplats(ShuffleVectorInst &SVI) {
2844 if (!match(SVI.getOperand(1), m_Poison()) ||
2845 !match(SVI.getShuffleMask(), m_ZeroMask()) ||
2846 !SVI.getOperand(0)->hasOneUse())
2847 return nullptr;
2848
2849 Value *Op0 = SVI.getOperand(0);
2850 Value *X, *Y;
2851 if (!match(Op0, m_BinOp(m_Shuffle(m_Value(X), m_Poison(), m_ZeroMask()),
2852 m_Value(Y))) &&
2853 !match(Op0, m_BinOp(m_Value(X),
2854 m_Shuffle(m_Value(Y), m_Poison(), m_ZeroMask()))))
2855 return nullptr;
2856 if (X->getType() != Y->getType())
2857 return nullptr;
2858
2859 auto *BinOp = cast<BinaryOperator>(Op0);
2860 if (!isSafeToSpeculativelyExecuteWithVariableReplaced(BinOp))
2861 return nullptr;
2862
2863 Value *NewBO = Builder.CreateBinOp(BinOp->getOpcode(), X, Y);
2864 if (auto NewBOI = dyn_cast<Instruction>(NewBO))
2865 NewBOI->copyIRFlags(BinOp);
2866
2867 return new ShuffleVectorInst(NewBO, SVI.getShuffleMask());
2868 }
2869
visitShuffleVectorInst(ShuffleVectorInst & SVI)2870 Instruction *InstCombinerImpl::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
2871 Value *LHS = SVI.getOperand(0);
2872 Value *RHS = SVI.getOperand(1);
2873 SimplifyQuery ShufQuery = SQ.getWithInstruction(&SVI);
2874 if (auto *V = simplifyShuffleVectorInst(LHS, RHS, SVI.getShuffleMask(),
2875 SVI.getType(), ShufQuery))
2876 return replaceInstUsesWith(SVI, V);
2877
2878 if (Instruction *I = simplifyBinOpSplats(SVI))
2879 return I;
2880
2881 // Canonicalize splat shuffle to use poison RHS. Handle this explicitly in
2882 // order to support scalable vectors.
2883 if (match(SVI.getShuffleMask(), m_ZeroMask()) && !isa<PoisonValue>(RHS))
2884 return replaceOperand(SVI, 1, PoisonValue::get(RHS->getType()));
2885
2886 if (isa<ScalableVectorType>(LHS->getType()))
2887 return nullptr;
2888
2889 unsigned VWidth = cast<FixedVectorType>(SVI.getType())->getNumElements();
2890 unsigned LHSWidth = cast<FixedVectorType>(LHS->getType())->getNumElements();
2891
2892 // shuffle (bitcast X), (bitcast Y), Mask --> bitcast (shuffle X, Y, Mask)
2893 //
2894 // if X and Y are of the same (vector) type, and the element size is not
2895 // changed by the bitcasts, we can distribute the bitcasts through the
2896 // shuffle, hopefully reducing the number of instructions. We make sure that
2897 // at least one bitcast only has one use, so we don't *increase* the number of
2898 // instructions here.
2899 Value *X, *Y;
2900 if (match(LHS, m_BitCast(m_Value(X))) && match(RHS, m_BitCast(m_Value(Y))) &&
2901 X->getType()->isVectorTy() && X->getType() == Y->getType() &&
2902 X->getType()->getScalarSizeInBits() ==
2903 SVI.getType()->getScalarSizeInBits() &&
2904 (LHS->hasOneUse() || RHS->hasOneUse())) {
2905 Value *V = Builder.CreateShuffleVector(X, Y, SVI.getShuffleMask(),
2906 SVI.getName() + ".uncasted");
2907 return new BitCastInst(V, SVI.getType());
2908 }
2909
2910 ArrayRef<int> Mask = SVI.getShuffleMask();
2911
2912 // Peek through a bitcasted shuffle operand by scaling the mask. If the
2913 // simulated shuffle can simplify, then this shuffle is unnecessary:
2914 // shuf (bitcast X), undef, Mask --> bitcast X'
2915 // TODO: This could be extended to allow length-changing shuffles.
2916 // The transform might also be obsoleted if we allowed canonicalization
2917 // of bitcasted shuffles.
2918 if (match(LHS, m_BitCast(m_Value(X))) && match(RHS, m_Undef()) &&
2919 X->getType()->isVectorTy() && VWidth == LHSWidth) {
2920 // Try to create a scaled mask constant.
2921 auto *XType = cast<FixedVectorType>(X->getType());
2922 unsigned XNumElts = XType->getNumElements();
2923 SmallVector<int, 16> ScaledMask;
2924 if (scaleShuffleMaskElts(XNumElts, Mask, ScaledMask)) {
2925 // If the shuffled source vector simplifies, cast that value to this
2926 // shuffle's type.
2927 if (auto *V = simplifyShuffleVectorInst(X, UndefValue::get(XType),
2928 ScaledMask, XType, ShufQuery))
2929 return BitCastInst::Create(Instruction::BitCast, V, SVI.getType());
2930 }
2931 }
2932
2933 // shuffle x, x, mask --> shuffle x, undef, mask'
2934 if (LHS == RHS) {
2935 assert(!match(RHS, m_Undef()) &&
2936 "Shuffle with 2 undef ops not simplified?");
2937 return new ShuffleVectorInst(LHS, createUnaryMask(Mask, LHSWidth));
2938 }
2939
2940 // shuffle undef, x, mask --> shuffle x, undef, mask'
2941 if (match(LHS, m_Undef())) {
2942 SVI.commute();
2943 return &SVI;
2944 }
2945
2946 if (Instruction *I = canonicalizeInsertSplat(SVI, Builder))
2947 return I;
2948
2949 if (Instruction *I = foldSelectShuffle(SVI))
2950 return I;
2951
2952 if (Instruction *I = foldTruncShuffle(SVI, DL.isBigEndian()))
2953 return I;
2954
2955 if (Instruction *I = narrowVectorSelect(SVI, Builder))
2956 return I;
2957
2958 if (Instruction *I = foldShuffleOfUnaryOps(SVI, Builder))
2959 return I;
2960
2961 if (Instruction *I = foldCastShuffle(SVI, Builder))
2962 return I;
2963
2964 APInt PoisonElts(VWidth, 0);
2965 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
2966 if (Value *V = SimplifyDemandedVectorElts(&SVI, AllOnesEltMask, PoisonElts)) {
2967 if (V != &SVI)
2968 return replaceInstUsesWith(SVI, V);
2969 return &SVI;
2970 }
2971
2972 if (Instruction *I = foldIdentityExtractShuffle(SVI))
2973 return I;
2974
2975 // These transforms have the potential to lose undef knowledge, so they are
2976 // intentionally placed after SimplifyDemandedVectorElts().
2977 if (Instruction *I = foldShuffleWithInsert(SVI, *this))
2978 return I;
2979 if (Instruction *I = foldIdentityPaddedShuffles(SVI))
2980 return I;
2981
2982 if (match(RHS, m_Constant())) {
2983 if (auto *SI = dyn_cast<SelectInst>(LHS)) {
2984 // We cannot do this fold for elementwise select since ShuffleVector is
2985 // not elementwise.
2986 if (SI->getCondition()->getType()->isIntegerTy() &&
2987 (isa<PoisonValue>(RHS) ||
2988 isGuaranteedNotToBePoison(SI->getCondition()))) {
2989 if (Instruction *I = FoldOpIntoSelect(SVI, SI))
2990 return I;
2991 }
2992 }
2993 if (auto *PN = dyn_cast<PHINode>(LHS)) {
2994 if (Instruction *I = foldOpIntoPhi(SVI, PN, /*AllowMultipleUses=*/true))
2995 return I;
2996 }
2997 }
2998
2999 if (match(RHS, m_Poison()) && canEvaluateShuffled(LHS, Mask)) {
3000 Value *V = evaluateInDifferentElementOrder(LHS, Mask, Builder);
3001 return replaceInstUsesWith(SVI, V);
3002 }
3003
3004 // SROA generates shuffle+bitcast when the extracted sub-vector is bitcast to
3005 // a non-vector type. We can instead bitcast the original vector followed by
3006 // an extract of the desired element:
3007 //
3008 // %sroa = shufflevector <16 x i8> %in, <16 x i8> undef,
3009 // <4 x i32> <i32 0, i32 1, i32 2, i32 3>
3010 // %1 = bitcast <4 x i8> %sroa to i32
3011 // Becomes:
3012 // %bc = bitcast <16 x i8> %in to <4 x i32>
3013 // %ext = extractelement <4 x i32> %bc, i32 0
3014 //
3015 // If the shuffle is extracting a contiguous range of values from the input
3016 // vector then each use which is a bitcast of the extracted size can be
3017 // replaced. This will work if the vector types are compatible, and the begin
3018 // index is aligned to a value in the casted vector type. If the begin index
3019 // isn't aligned then we can shuffle the original vector (keeping the same
3020 // vector type) before extracting.
3021 //
3022 // This code will bail out if the target type is fundamentally incompatible
3023 // with vectors of the source type.
3024 //
3025 // Example of <16 x i8>, target type i32:
3026 // Index range [4,8): v-----------v Will work.
3027 // +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
3028 // <16 x i8>: | | | | | | | | | | | | | | | | |
3029 // <4 x i32>: | | | | |
3030 // +-----------+-----------+-----------+-----------+
3031 // Index range [6,10): ^-----------^ Needs an extra shuffle.
3032 // Target type i40: ^--------------^ Won't work, bail.
3033 bool MadeChange = false;
3034 if (isShuffleExtractingFromLHS(SVI, Mask)) {
3035 Value *V = LHS;
3036 unsigned MaskElems = Mask.size();
3037 auto *SrcTy = cast<FixedVectorType>(V->getType());
3038 unsigned VecBitWidth = SrcTy->getPrimitiveSizeInBits().getFixedValue();
3039 unsigned SrcElemBitWidth = DL.getTypeSizeInBits(SrcTy->getElementType());
3040 assert(SrcElemBitWidth && "vector elements must have a bitwidth");
3041 unsigned SrcNumElems = SrcTy->getNumElements();
3042 SmallVector<BitCastInst *, 8> BCs;
3043 DenseMap<Type *, Value *> NewBCs;
3044 for (User *U : SVI.users())
3045 if (BitCastInst *BC = dyn_cast<BitCastInst>(U)) {
3046 // Only visit bitcasts that weren't previously handled.
3047 if (BC->use_empty())
3048 continue;
3049 // Prefer to combine bitcasts of bitcasts before attempting this fold.
3050 if (BC->hasOneUse()) {
3051 auto *BC2 = dyn_cast<BitCastInst>(BC->user_back());
3052 if (BC2 && isEliminableCastPair(BC, BC2))
3053 continue;
3054 }
3055 BCs.push_back(BC);
3056 }
3057 for (BitCastInst *BC : BCs) {
3058 unsigned BegIdx = Mask.front();
3059 Type *TgtTy = BC->getDestTy();
3060 unsigned TgtElemBitWidth = DL.getTypeSizeInBits(TgtTy);
3061 if (!TgtElemBitWidth)
3062 continue;
3063 unsigned TgtNumElems = VecBitWidth / TgtElemBitWidth;
3064 bool VecBitWidthsEqual = VecBitWidth == TgtNumElems * TgtElemBitWidth;
3065 bool BegIsAligned = 0 == ((SrcElemBitWidth * BegIdx) % TgtElemBitWidth);
3066 if (!VecBitWidthsEqual)
3067 continue;
3068 if (!VectorType::isValidElementType(TgtTy))
3069 continue;
3070 auto *CastSrcTy = FixedVectorType::get(TgtTy, TgtNumElems);
3071 if (!BegIsAligned) {
3072 // Shuffle the input so [0,NumElements) contains the output, and
3073 // [NumElems,SrcNumElems) is undef.
3074 SmallVector<int, 16> ShuffleMask(SrcNumElems, -1);
3075 for (unsigned I = 0, E = MaskElems, Idx = BegIdx; I != E; ++Idx, ++I)
3076 ShuffleMask[I] = Idx;
3077 V = Builder.CreateShuffleVector(V, ShuffleMask,
3078 SVI.getName() + ".extract");
3079 BegIdx = 0;
3080 }
3081 unsigned SrcElemsPerTgtElem = TgtElemBitWidth / SrcElemBitWidth;
3082 assert(SrcElemsPerTgtElem);
3083 BegIdx /= SrcElemsPerTgtElem;
3084 auto [It, Inserted] = NewBCs.try_emplace(CastSrcTy);
3085 if (Inserted)
3086 It->second = Builder.CreateBitCast(V, CastSrcTy, SVI.getName() + ".bc");
3087 auto *Ext = Builder.CreateExtractElement(It->second, BegIdx,
3088 SVI.getName() + ".extract");
3089 // The shufflevector isn't being replaced: the bitcast that used it
3090 // is. InstCombine will visit the newly-created instructions.
3091 replaceInstUsesWith(*BC, Ext);
3092 MadeChange = true;
3093 }
3094 }
3095
3096 // If the LHS is a shufflevector itself, see if we can combine it with this
3097 // one without producing an unusual shuffle.
3098 // Cases that might be simplified:
3099 // 1.
3100 // x1=shuffle(v1,v2,mask1)
3101 // x=shuffle(x1,undef,mask)
3102 // ==>
3103 // x=shuffle(v1,undef,newMask)
3104 // newMask[i] = (mask[i] < x1.size()) ? mask1[mask[i]] : -1
3105 // 2.
3106 // x1=shuffle(v1,undef,mask1)
3107 // x=shuffle(x1,x2,mask)
3108 // where v1.size() == mask1.size()
3109 // ==>
3110 // x=shuffle(v1,x2,newMask)
3111 // newMask[i] = (mask[i] < x1.size()) ? mask1[mask[i]] : mask[i]
3112 // 3.
3113 // x2=shuffle(v2,undef,mask2)
3114 // x=shuffle(x1,x2,mask)
3115 // where v2.size() == mask2.size()
3116 // ==>
3117 // x=shuffle(x1,v2,newMask)
3118 // newMask[i] = (mask[i] < x1.size())
3119 // ? mask[i] : mask2[mask[i]-x1.size()]+x1.size()
3120 // 4.
3121 // x1=shuffle(v1,undef,mask1)
3122 // x2=shuffle(v2,undef,mask2)
3123 // x=shuffle(x1,x2,mask)
3124 // where v1.size() == v2.size()
3125 // ==>
3126 // x=shuffle(v1,v2,newMask)
3127 // newMask[i] = (mask[i] < x1.size())
3128 // ? mask1[mask[i]] : mask2[mask[i]-x1.size()]+v1.size()
3129 //
3130 // Here we are really conservative:
3131 // we are absolutely afraid of producing a shuffle mask not in the input
3132 // program, because the code gen may not be smart enough to turn a merged
3133 // shuffle into two specific shuffles: it may produce worse code. As such,
3134 // we only merge two shuffles if the result is either a splat or one of the
3135 // input shuffle masks. In this case, merging the shuffles just removes
3136 // one instruction, which we know is safe. This is good for things like
3137 // turning: (splat(splat)) -> splat, or
3138 // merge(V[0..n], V[n+1..2n]) -> V[0..2n]
3139 ShuffleVectorInst* LHSShuffle = dyn_cast<ShuffleVectorInst>(LHS);
3140 ShuffleVectorInst* RHSShuffle = dyn_cast<ShuffleVectorInst>(RHS);
3141 if (LHSShuffle)
3142 if (!match(LHSShuffle->getOperand(1), m_Poison()) &&
3143 !match(RHS, m_Poison()))
3144 LHSShuffle = nullptr;
3145 if (RHSShuffle)
3146 if (!match(RHSShuffle->getOperand(1), m_Poison()))
3147 RHSShuffle = nullptr;
3148 if (!LHSShuffle && !RHSShuffle)
3149 return MadeChange ? &SVI : nullptr;
3150
3151 Value* LHSOp0 = nullptr;
3152 Value* LHSOp1 = nullptr;
3153 Value* RHSOp0 = nullptr;
3154 unsigned LHSOp0Width = 0;
3155 unsigned RHSOp0Width = 0;
3156 if (LHSShuffle) {
3157 LHSOp0 = LHSShuffle->getOperand(0);
3158 LHSOp1 = LHSShuffle->getOperand(1);
3159 LHSOp0Width = cast<FixedVectorType>(LHSOp0->getType())->getNumElements();
3160 }
3161 if (RHSShuffle) {
3162 RHSOp0 = RHSShuffle->getOperand(0);
3163 RHSOp0Width = cast<FixedVectorType>(RHSOp0->getType())->getNumElements();
3164 }
3165 Value* newLHS = LHS;
3166 Value* newRHS = RHS;
3167 if (LHSShuffle) {
3168 // case 1
3169 if (match(RHS, m_Poison())) {
3170 newLHS = LHSOp0;
3171 newRHS = LHSOp1;
3172 }
3173 // case 2 or 4
3174 else if (LHSOp0Width == LHSWidth) {
3175 newLHS = LHSOp0;
3176 }
3177 }
3178 // case 3 or 4
3179 if (RHSShuffle && RHSOp0Width == LHSWidth) {
3180 newRHS = RHSOp0;
3181 }
3182 // case 4
3183 if (LHSOp0 == RHSOp0) {
3184 newLHS = LHSOp0;
3185 newRHS = nullptr;
3186 }
3187
3188 if (newLHS == LHS && newRHS == RHS)
3189 return MadeChange ? &SVI : nullptr;
3190
3191 ArrayRef<int> LHSMask;
3192 ArrayRef<int> RHSMask;
3193 if (newLHS != LHS)
3194 LHSMask = LHSShuffle->getShuffleMask();
3195 if (RHSShuffle && newRHS != RHS)
3196 RHSMask = RHSShuffle->getShuffleMask();
3197
3198 unsigned newLHSWidth = (newLHS != LHS) ? LHSOp0Width : LHSWidth;
3199 SmallVector<int, 16> newMask;
3200 bool isSplat = true;
3201 int SplatElt = -1;
3202 // Create a new mask for the new ShuffleVectorInst so that the new
3203 // ShuffleVectorInst is equivalent to the original one.
3204 for (unsigned i = 0; i < VWidth; ++i) {
3205 int eltMask;
3206 if (Mask[i] < 0) {
3207 // This element is a poison value.
3208 eltMask = -1;
3209 } else if (Mask[i] < (int)LHSWidth) {
3210 // This element is from left hand side vector operand.
3211 //
3212 // If LHS is going to be replaced (case 1, 2, or 4), calculate the
3213 // new mask value for the element.
3214 if (newLHS != LHS) {
3215 eltMask = LHSMask[Mask[i]];
3216 // If the value selected is an poison value, explicitly specify it
3217 // with a -1 mask value.
3218 if (eltMask >= (int)LHSOp0Width && isa<PoisonValue>(LHSOp1))
3219 eltMask = -1;
3220 } else
3221 eltMask = Mask[i];
3222 } else {
3223 // This element is from right hand side vector operand
3224 //
3225 // If the value selected is a poison value, explicitly specify it
3226 // with a -1 mask value. (case 1)
3227 if (match(RHS, m_Poison()))
3228 eltMask = -1;
3229 // If RHS is going to be replaced (case 3 or 4), calculate the
3230 // new mask value for the element.
3231 else if (newRHS != RHS) {
3232 eltMask = RHSMask[Mask[i]-LHSWidth];
3233 // If the value selected is an poison value, explicitly specify it
3234 // with a -1 mask value.
3235 if (eltMask >= (int)RHSOp0Width) {
3236 assert(match(RHSShuffle->getOperand(1), m_Poison()) &&
3237 "should have been check above");
3238 eltMask = -1;
3239 }
3240 } else
3241 eltMask = Mask[i]-LHSWidth;
3242
3243 // If LHS's width is changed, shift the mask value accordingly.
3244 // If newRHS == nullptr, i.e. LHSOp0 == RHSOp0, we want to remap any
3245 // references from RHSOp0 to LHSOp0, so we don't need to shift the mask.
3246 // If newRHS == newLHS, we want to remap any references from newRHS to
3247 // newLHS so that we can properly identify splats that may occur due to
3248 // obfuscation across the two vectors.
3249 if (eltMask >= 0 && newRHS != nullptr && newLHS != newRHS)
3250 eltMask += newLHSWidth;
3251 }
3252
3253 // Check if this could still be a splat.
3254 if (eltMask >= 0) {
3255 if (SplatElt >= 0 && SplatElt != eltMask)
3256 isSplat = false;
3257 SplatElt = eltMask;
3258 }
3259
3260 newMask.push_back(eltMask);
3261 }
3262
3263 // If the result mask is equal to one of the original shuffle masks,
3264 // or is a splat, do the replacement.
3265 if (isSplat || newMask == LHSMask || newMask == RHSMask || newMask == Mask) {
3266 if (!newRHS)
3267 newRHS = PoisonValue::get(newLHS->getType());
3268 return new ShuffleVectorInst(newLHS, newRHS, newMask);
3269 }
3270
3271 return MadeChange ? &SVI : nullptr;
3272 }
3273