1 //===- InstCombineCalls.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitCall, visitInvoke, and visitCallBr functions.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APFloat.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/STLFunctionalExtras.h"
19 #include "llvm/ADT/SmallBitVector.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/Analysis/AssumeBundleQueries.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/Analysis/MemoryBuiltins.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/Analysis/VectorUtils.h"
30 #include "llvm/IR/AttributeMask.h"
31 #include "llvm/IR/Attributes.h"
32 #include "llvm/IR/BasicBlock.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DebugInfo.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/GlobalVariable.h"
40 #include "llvm/IR/InlineAsm.h"
41 #include "llvm/IR/InstrTypes.h"
42 #include "llvm/IR/Instruction.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/Intrinsics.h"
46 #include "llvm/IR/IntrinsicsAArch64.h"
47 #include "llvm/IR/IntrinsicsAMDGPU.h"
48 #include "llvm/IR/IntrinsicsARM.h"
49 #include "llvm/IR/IntrinsicsHexagon.h"
50 #include "llvm/IR/LLVMContext.h"
51 #include "llvm/IR/Metadata.h"
52 #include "llvm/IR/PatternMatch.h"
53 #include "llvm/IR/Statepoint.h"
54 #include "llvm/IR/Type.h"
55 #include "llvm/IR/User.h"
56 #include "llvm/IR/Value.h"
57 #include "llvm/IR/ValueHandle.h"
58 #include "llvm/Support/AtomicOrdering.h"
59 #include "llvm/Support/Casting.h"
60 #include "llvm/Support/CommandLine.h"
61 #include "llvm/Support/Compiler.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/ErrorHandling.h"
64 #include "llvm/Support/KnownBits.h"
65 #include "llvm/Support/MathExtras.h"
66 #include "llvm/Support/raw_ostream.h"
67 #include "llvm/Transforms/InstCombine/InstCombiner.h"
68 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
69 #include "llvm/Transforms/Utils/Local.h"
70 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
71 #include <algorithm>
72 #include <cassert>
73 #include <cstdint>
74 #include <optional>
75 #include <utility>
76 #include <vector>
77
78 #define DEBUG_TYPE "instcombine"
79 #include "llvm/Transforms/Utils/InstructionWorklist.h"
80
81 using namespace llvm;
82 using namespace PatternMatch;
83
84 STATISTIC(NumSimplified, "Number of library calls simplified");
85
86 static cl::opt<unsigned> GuardWideningWindow(
87 "instcombine-guard-widening-window",
88 cl::init(3),
89 cl::desc("How wide an instruction window to bypass looking for "
90 "another guard"));
91
92 /// Return the specified type promoted as it would be to pass though a va_arg
93 /// area.
getPromotedType(Type * Ty)94 static Type *getPromotedType(Type *Ty) {
95 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
96 if (ITy->getBitWidth() < 32)
97 return Type::getInt32Ty(Ty->getContext());
98 }
99 return Ty;
100 }
101
102 /// Recognize a memcpy/memmove from a trivially otherwise unused alloca.
103 /// TODO: This should probably be integrated with visitAllocSites, but that
104 /// requires a deeper change to allow either unread or unwritten objects.
hasUndefSource(AnyMemTransferInst * MI)105 static bool hasUndefSource(AnyMemTransferInst *MI) {
106 auto *Src = MI->getRawSource();
107 while (isa<GetElementPtrInst>(Src) || isa<BitCastInst>(Src)) {
108 if (!Src->hasOneUse())
109 return false;
110 Src = cast<Instruction>(Src)->getOperand(0);
111 }
112 return isa<AllocaInst>(Src) && Src->hasOneUse();
113 }
114
SimplifyAnyMemTransfer(AnyMemTransferInst * MI)115 Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
116 Align DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT);
117 MaybeAlign CopyDstAlign = MI->getDestAlign();
118 if (!CopyDstAlign || *CopyDstAlign < DstAlign) {
119 MI->setDestAlignment(DstAlign);
120 return MI;
121 }
122
123 Align SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT);
124 MaybeAlign CopySrcAlign = MI->getSourceAlign();
125 if (!CopySrcAlign || *CopySrcAlign < SrcAlign) {
126 MI->setSourceAlignment(SrcAlign);
127 return MI;
128 }
129
130 // If we have a store to a location which is known constant, we can conclude
131 // that the store must be storing the constant value (else the memory
132 // wouldn't be constant), and this must be a noop.
133 if (!isModSet(AA->getModRefInfoMask(MI->getDest()))) {
134 // Set the size of the copy to 0, it will be deleted on the next iteration.
135 MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
136 return MI;
137 }
138
139 // If the source is provably undef, the memcpy/memmove doesn't do anything
140 // (unless the transfer is volatile).
141 if (hasUndefSource(MI) && !MI->isVolatile()) {
142 // Set the size of the copy to 0, it will be deleted on the next iteration.
143 MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
144 return MI;
145 }
146
147 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
148 // load/store.
149 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength());
150 if (!MemOpLength) return nullptr;
151
152 // Source and destination pointer types are always "i8*" for intrinsic. See
153 // if the size is something we can handle with a single primitive load/store.
154 // A single load+store correctly handles overlapping memory in the memmove
155 // case.
156 uint64_t Size = MemOpLength->getLimitedValue();
157 assert(Size && "0-sized memory transferring should be removed already.");
158
159 if (Size > 8 || (Size&(Size-1)))
160 return nullptr; // If not 1/2/4/8 bytes, exit.
161
162 // If it is an atomic and alignment is less than the size then we will
163 // introduce the unaligned memory access which will be later transformed
164 // into libcall in CodeGen. This is not evident performance gain so disable
165 // it now.
166 if (isa<AtomicMemTransferInst>(MI))
167 if (*CopyDstAlign < Size || *CopySrcAlign < Size)
168 return nullptr;
169
170 // Use an integer load+store unless we can find something better.
171 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
172
173 // If the memcpy has metadata describing the members, see if we can get the
174 // TBAA, scope and noalias tags describing our copy.
175 AAMDNodes AACopyMD = MI->getAAMetadata().adjustForAccess(Size);
176
177 Value *Src = MI->getArgOperand(1);
178 Value *Dest = MI->getArgOperand(0);
179 LoadInst *L = Builder.CreateLoad(IntType, Src);
180 // Alignment from the mem intrinsic will be better, so use it.
181 L->setAlignment(*CopySrcAlign);
182 L->setAAMetadata(AACopyMD);
183 MDNode *LoopMemParallelMD =
184 MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
185 if (LoopMemParallelMD)
186 L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
187 MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group);
188 if (AccessGroupMD)
189 L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
190
191 StoreInst *S = Builder.CreateStore(L, Dest);
192 // Alignment from the mem intrinsic will be better, so use it.
193 S->setAlignment(*CopyDstAlign);
194 S->setAAMetadata(AACopyMD);
195 if (LoopMemParallelMD)
196 S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
197 if (AccessGroupMD)
198 S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
199 S->copyMetadata(*MI, LLVMContext::MD_DIAssignID);
200
201 if (auto *MT = dyn_cast<MemTransferInst>(MI)) {
202 // non-atomics can be volatile
203 L->setVolatile(MT->isVolatile());
204 S->setVolatile(MT->isVolatile());
205 }
206 if (isa<AtomicMemTransferInst>(MI)) {
207 // atomics have to be unordered
208 L->setOrdering(AtomicOrdering::Unordered);
209 S->setOrdering(AtomicOrdering::Unordered);
210 }
211
212 // Set the size of the copy to 0, it will be deleted on the next iteration.
213 MI->setLength(Constant::getNullValue(MemOpLength->getType()));
214 return MI;
215 }
216
SimplifyAnyMemSet(AnyMemSetInst * MI)217 Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) {
218 const Align KnownAlignment =
219 getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT);
220 MaybeAlign MemSetAlign = MI->getDestAlign();
221 if (!MemSetAlign || *MemSetAlign < KnownAlignment) {
222 MI->setDestAlignment(KnownAlignment);
223 return MI;
224 }
225
226 // If we have a store to a location which is known constant, we can conclude
227 // that the store must be storing the constant value (else the memory
228 // wouldn't be constant), and this must be a noop.
229 if (!isModSet(AA->getModRefInfoMask(MI->getDest()))) {
230 // Set the size of the copy to 0, it will be deleted on the next iteration.
231 MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
232 return MI;
233 }
234
235 // Remove memset with an undef value.
236 // FIXME: This is technically incorrect because it might overwrite a poison
237 // value. Change to PoisonValue once #52930 is resolved.
238 if (isa<UndefValue>(MI->getValue())) {
239 // Set the size of the copy to 0, it will be deleted on the next iteration.
240 MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
241 return MI;
242 }
243
244 // Extract the length and alignment and fill if they are constant.
245 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
246 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
247 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
248 return nullptr;
249 const uint64_t Len = LenC->getLimitedValue();
250 assert(Len && "0-sized memory setting should be removed already.");
251 const Align Alignment = MI->getDestAlign().valueOrOne();
252
253 // If it is an atomic and alignment is less than the size then we will
254 // introduce the unaligned memory access which will be later transformed
255 // into libcall in CodeGen. This is not evident performance gain so disable
256 // it now.
257 if (isa<AtomicMemSetInst>(MI))
258 if (Alignment < Len)
259 return nullptr;
260
261 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
262 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
263 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
264
265 Value *Dest = MI->getDest();
266
267 // Extract the fill value and store.
268 const uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
269 Constant *FillVal = ConstantInt::get(ITy, Fill);
270 StoreInst *S = Builder.CreateStore(FillVal, Dest, MI->isVolatile());
271 S->copyMetadata(*MI, LLVMContext::MD_DIAssignID);
272 auto replaceOpForAssignmentMarkers = [FillC, FillVal](auto *DbgAssign) {
273 if (llvm::is_contained(DbgAssign->location_ops(), FillC))
274 DbgAssign->replaceVariableLocationOp(FillC, FillVal);
275 };
276 for_each(at::getAssignmentMarkers(S), replaceOpForAssignmentMarkers);
277 for_each(at::getDVRAssignmentMarkers(S), replaceOpForAssignmentMarkers);
278
279 S->setAlignment(Alignment);
280 if (isa<AtomicMemSetInst>(MI))
281 S->setOrdering(AtomicOrdering::Unordered);
282
283 // Set the size of the copy to 0, it will be deleted on the next iteration.
284 MI->setLength(Constant::getNullValue(LenC->getType()));
285 return MI;
286 }
287
288 return nullptr;
289 }
290
291 // TODO, Obvious Missing Transforms:
292 // * Narrow width by halfs excluding zero/undef lanes
simplifyMaskedLoad(IntrinsicInst & II)293 Value *InstCombinerImpl::simplifyMaskedLoad(IntrinsicInst &II) {
294 Value *LoadPtr = II.getArgOperand(0);
295 const Align Alignment =
296 cast<ConstantInt>(II.getArgOperand(1))->getAlignValue();
297
298 // If the mask is all ones or undefs, this is a plain vector load of the 1st
299 // argument.
300 if (maskIsAllOneOrUndef(II.getArgOperand(2))) {
301 LoadInst *L = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
302 "unmaskedload");
303 L->copyMetadata(II);
304 return L;
305 }
306
307 // If we can unconditionally load from this address, replace with a
308 // load/select idiom. TODO: use DT for context sensitive query
309 if (isDereferenceablePointer(LoadPtr, II.getType(),
310 II.getDataLayout(), &II, &AC)) {
311 LoadInst *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
312 "unmaskedload");
313 LI->copyMetadata(II);
314 return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3));
315 }
316
317 return nullptr;
318 }
319
320 // TODO, Obvious Missing Transforms:
321 // * Single constant active lane -> store
322 // * Narrow width by halfs excluding zero/undef lanes
simplifyMaskedStore(IntrinsicInst & II)323 Instruction *InstCombinerImpl::simplifyMaskedStore(IntrinsicInst &II) {
324 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
325 if (!ConstMask)
326 return nullptr;
327
328 // If the mask is all zeros, this instruction does nothing.
329 if (ConstMask->isNullValue())
330 return eraseInstFromFunction(II);
331
332 // If the mask is all ones, this is a plain vector store of the 1st argument.
333 if (ConstMask->isAllOnesValue()) {
334 Value *StorePtr = II.getArgOperand(1);
335 Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue();
336 StoreInst *S =
337 new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);
338 S->copyMetadata(II);
339 return S;
340 }
341
342 if (isa<ScalableVectorType>(ConstMask->getType()))
343 return nullptr;
344
345 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
346 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
347 APInt PoisonElts(DemandedElts.getBitWidth(), 0);
348 if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts,
349 PoisonElts))
350 return replaceOperand(II, 0, V);
351
352 return nullptr;
353 }
354
355 // TODO, Obvious Missing Transforms:
356 // * Single constant active lane load -> load
357 // * Dereferenceable address & few lanes -> scalarize speculative load/selects
358 // * Adjacent vector addresses -> masked.load
359 // * Narrow width by halfs excluding zero/undef lanes
360 // * Vector incrementing address -> vector masked load
simplifyMaskedGather(IntrinsicInst & II)361 Instruction *InstCombinerImpl::simplifyMaskedGather(IntrinsicInst &II) {
362 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(2));
363 if (!ConstMask)
364 return nullptr;
365
366 // Vector splat address w/known mask -> scalar load
367 // Fold the gather to load the source vector first lane
368 // because it is reloading the same value each time
369 if (ConstMask->isAllOnesValue())
370 if (auto *SplatPtr = getSplatValue(II.getArgOperand(0))) {
371 auto *VecTy = cast<VectorType>(II.getType());
372 const Align Alignment =
373 cast<ConstantInt>(II.getArgOperand(1))->getAlignValue();
374 LoadInst *L = Builder.CreateAlignedLoad(VecTy->getElementType(), SplatPtr,
375 Alignment, "load.scalar");
376 Value *Shuf =
377 Builder.CreateVectorSplat(VecTy->getElementCount(), L, "broadcast");
378 return replaceInstUsesWith(II, cast<Instruction>(Shuf));
379 }
380
381 return nullptr;
382 }
383
384 // TODO, Obvious Missing Transforms:
385 // * Single constant active lane -> store
386 // * Adjacent vector addresses -> masked.store
387 // * Narrow store width by halfs excluding zero/undef lanes
388 // * Vector incrementing address -> vector masked store
simplifyMaskedScatter(IntrinsicInst & II)389 Instruction *InstCombinerImpl::simplifyMaskedScatter(IntrinsicInst &II) {
390 auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
391 if (!ConstMask)
392 return nullptr;
393
394 // If the mask is all zeros, a scatter does nothing.
395 if (ConstMask->isNullValue())
396 return eraseInstFromFunction(II);
397
398 // Vector splat address -> scalar store
399 if (auto *SplatPtr = getSplatValue(II.getArgOperand(1))) {
400 // scatter(splat(value), splat(ptr), non-zero-mask) -> store value, ptr
401 if (auto *SplatValue = getSplatValue(II.getArgOperand(0))) {
402 if (maskContainsAllOneOrUndef(ConstMask)) {
403 Align Alignment =
404 cast<ConstantInt>(II.getArgOperand(2))->getAlignValue();
405 StoreInst *S = new StoreInst(SplatValue, SplatPtr, /*IsVolatile=*/false,
406 Alignment);
407 S->copyMetadata(II);
408 return S;
409 }
410 }
411 // scatter(vector, splat(ptr), splat(true)) -> store extract(vector,
412 // lastlane), ptr
413 if (ConstMask->isAllOnesValue()) {
414 Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue();
415 VectorType *WideLoadTy = cast<VectorType>(II.getArgOperand(1)->getType());
416 ElementCount VF = WideLoadTy->getElementCount();
417 Value *RunTimeVF = Builder.CreateElementCount(Builder.getInt32Ty(), VF);
418 Value *LastLane = Builder.CreateSub(RunTimeVF, Builder.getInt32(1));
419 Value *Extract =
420 Builder.CreateExtractElement(II.getArgOperand(0), LastLane);
421 StoreInst *S =
422 new StoreInst(Extract, SplatPtr, /*IsVolatile=*/false, Alignment);
423 S->copyMetadata(II);
424 return S;
425 }
426 }
427 if (isa<ScalableVectorType>(ConstMask->getType()))
428 return nullptr;
429
430 // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
431 APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
432 APInt PoisonElts(DemandedElts.getBitWidth(), 0);
433 if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts,
434 PoisonElts))
435 return replaceOperand(II, 0, V);
436 if (Value *V = SimplifyDemandedVectorElts(II.getOperand(1), DemandedElts,
437 PoisonElts))
438 return replaceOperand(II, 1, V);
439
440 return nullptr;
441 }
442
443 /// This function transforms launder.invariant.group and strip.invariant.group
444 /// like:
445 /// launder(launder(%x)) -> launder(%x) (the result is not the argument)
446 /// launder(strip(%x)) -> launder(%x)
447 /// strip(strip(%x)) -> strip(%x) (the result is not the argument)
448 /// strip(launder(%x)) -> strip(%x)
449 /// This is legal because it preserves the most recent information about
450 /// the presence or absence of invariant.group.
simplifyInvariantGroupIntrinsic(IntrinsicInst & II,InstCombinerImpl & IC)451 static Instruction *simplifyInvariantGroupIntrinsic(IntrinsicInst &II,
452 InstCombinerImpl &IC) {
453 auto *Arg = II.getArgOperand(0);
454 auto *StrippedArg = Arg->stripPointerCasts();
455 auto *StrippedInvariantGroupsArg = StrippedArg;
456 while (auto *Intr = dyn_cast<IntrinsicInst>(StrippedInvariantGroupsArg)) {
457 if (Intr->getIntrinsicID() != Intrinsic::launder_invariant_group &&
458 Intr->getIntrinsicID() != Intrinsic::strip_invariant_group)
459 break;
460 StrippedInvariantGroupsArg = Intr->getArgOperand(0)->stripPointerCasts();
461 }
462 if (StrippedArg == StrippedInvariantGroupsArg)
463 return nullptr; // No launders/strips to remove.
464
465 Value *Result = nullptr;
466
467 if (II.getIntrinsicID() == Intrinsic::launder_invariant_group)
468 Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg);
469 else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group)
470 Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg);
471 else
472 llvm_unreachable(
473 "simplifyInvariantGroupIntrinsic only handles launder and strip");
474 if (Result->getType()->getPointerAddressSpace() !=
475 II.getType()->getPointerAddressSpace())
476 Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType());
477
478 return cast<Instruction>(Result);
479 }
480
foldCttzCtlz(IntrinsicInst & II,InstCombinerImpl & IC)481 static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC) {
482 assert((II.getIntrinsicID() == Intrinsic::cttz ||
483 II.getIntrinsicID() == Intrinsic::ctlz) &&
484 "Expected cttz or ctlz intrinsic");
485 bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz;
486 Value *Op0 = II.getArgOperand(0);
487 Value *Op1 = II.getArgOperand(1);
488 Value *X;
489 // ctlz(bitreverse(x)) -> cttz(x)
490 // cttz(bitreverse(x)) -> ctlz(x)
491 if (match(Op0, m_BitReverse(m_Value(X)))) {
492 Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz;
493 Function *F = Intrinsic::getDeclaration(II.getModule(), ID, II.getType());
494 return CallInst::Create(F, {X, II.getArgOperand(1)});
495 }
496
497 if (II.getType()->isIntOrIntVectorTy(1)) {
498 // ctlz/cttz i1 Op0 --> not Op0
499 if (match(Op1, m_Zero()))
500 return BinaryOperator::CreateNot(Op0);
501 // If zero is poison, then the input can be assumed to be "true", so the
502 // instruction simplifies to "false".
503 assert(match(Op1, m_One()) && "Expected ctlz/cttz operand to be 0 or 1");
504 return IC.replaceInstUsesWith(II, ConstantInt::getNullValue(II.getType()));
505 }
506
507 // If ctlz/cttz is only used as a shift amount, set is_zero_poison to true.
508 if (II.hasOneUse() && match(Op1, m_Zero()) &&
509 match(II.user_back(), m_Shift(m_Value(), m_Specific(&II)))) {
510 II.dropUBImplyingAttrsAndMetadata();
511 return IC.replaceOperand(II, 1, IC.Builder.getTrue());
512 }
513
514 Constant *C;
515
516 if (IsTZ) {
517 // cttz(-x) -> cttz(x)
518 if (match(Op0, m_Neg(m_Value(X))))
519 return IC.replaceOperand(II, 0, X);
520
521 // cttz(-x & x) -> cttz(x)
522 if (match(Op0, m_c_And(m_Neg(m_Value(X)), m_Deferred(X))))
523 return IC.replaceOperand(II, 0, X);
524
525 // cttz(sext(x)) -> cttz(zext(x))
526 if (match(Op0, m_OneUse(m_SExt(m_Value(X))))) {
527 auto *Zext = IC.Builder.CreateZExt(X, II.getType());
528 auto *CttzZext =
529 IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, Zext, Op1);
530 return IC.replaceInstUsesWith(II, CttzZext);
531 }
532
533 // Zext doesn't change the number of trailing zeros, so narrow:
534 // cttz(zext(x)) -> zext(cttz(x)) if the 'ZeroIsPoison' parameter is 'true'.
535 if (match(Op0, m_OneUse(m_ZExt(m_Value(X)))) && match(Op1, m_One())) {
536 auto *Cttz = IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, X,
537 IC.Builder.getTrue());
538 auto *ZextCttz = IC.Builder.CreateZExt(Cttz, II.getType());
539 return IC.replaceInstUsesWith(II, ZextCttz);
540 }
541
542 // cttz(abs(x)) -> cttz(x)
543 // cttz(nabs(x)) -> cttz(x)
544 Value *Y;
545 SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor;
546 if (SPF == SPF_ABS || SPF == SPF_NABS)
547 return IC.replaceOperand(II, 0, X);
548
549 if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(X))))
550 return IC.replaceOperand(II, 0, X);
551
552 // cttz(shl(%const, %val), 1) --> add(cttz(%const, 1), %val)
553 if (match(Op0, m_Shl(m_ImmConstant(C), m_Value(X))) &&
554 match(Op1, m_One())) {
555 Value *ConstCttz =
556 IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, C, Op1);
557 return BinaryOperator::CreateAdd(ConstCttz, X);
558 }
559
560 // cttz(lshr exact (%const, %val), 1) --> sub(cttz(%const, 1), %val)
561 if (match(Op0, m_Exact(m_LShr(m_ImmConstant(C), m_Value(X)))) &&
562 match(Op1, m_One())) {
563 Value *ConstCttz =
564 IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, C, Op1);
565 return BinaryOperator::CreateSub(ConstCttz, X);
566 }
567
568 // cttz(add(lshr(UINT_MAX, %val), 1)) --> sub(width, %val)
569 if (match(Op0, m_Add(m_LShr(m_AllOnes(), m_Value(X)), m_One()))) {
570 Value *Width =
571 ConstantInt::get(II.getType(), II.getType()->getScalarSizeInBits());
572 return BinaryOperator::CreateSub(Width, X);
573 }
574 } else {
575 // ctlz(lshr(%const, %val), 1) --> add(ctlz(%const, 1), %val)
576 if (match(Op0, m_LShr(m_ImmConstant(C), m_Value(X))) &&
577 match(Op1, m_One())) {
578 Value *ConstCtlz =
579 IC.Builder.CreateBinaryIntrinsic(Intrinsic::ctlz, C, Op1);
580 return BinaryOperator::CreateAdd(ConstCtlz, X);
581 }
582
583 // ctlz(shl nuw (%const, %val), 1) --> sub(ctlz(%const, 1), %val)
584 if (match(Op0, m_NUWShl(m_ImmConstant(C), m_Value(X))) &&
585 match(Op1, m_One())) {
586 Value *ConstCtlz =
587 IC.Builder.CreateBinaryIntrinsic(Intrinsic::ctlz, C, Op1);
588 return BinaryOperator::CreateSub(ConstCtlz, X);
589 }
590 }
591
592 KnownBits Known = IC.computeKnownBits(Op0, 0, &II);
593
594 // Create a mask for bits above (ctlz) or below (cttz) the first known one.
595 unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros()
596 : Known.countMaxLeadingZeros();
597 unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros()
598 : Known.countMinLeadingZeros();
599
600 // If all bits above (ctlz) or below (cttz) the first known one are known
601 // zero, this value is constant.
602 // FIXME: This should be in InstSimplify because we're replacing an
603 // instruction with a constant.
604 if (PossibleZeros == DefiniteZeros) {
605 auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros);
606 return IC.replaceInstUsesWith(II, C);
607 }
608
609 // If the input to cttz/ctlz is known to be non-zero,
610 // then change the 'ZeroIsPoison' parameter to 'true'
611 // because we know the zero behavior can't affect the result.
612 if (!Known.One.isZero() ||
613 isKnownNonZero(Op0, IC.getSimplifyQuery().getWithInstruction(&II))) {
614 if (!match(II.getArgOperand(1), m_One()))
615 return IC.replaceOperand(II, 1, IC.Builder.getTrue());
616 }
617
618 // Add range attribute since known bits can't completely reflect what we know.
619 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
620 if (BitWidth != 1 && !II.hasRetAttr(Attribute::Range) &&
621 !II.getMetadata(LLVMContext::MD_range)) {
622 ConstantRange Range(APInt(BitWidth, DefiniteZeros),
623 APInt(BitWidth, PossibleZeros + 1));
624 II.addRangeRetAttr(Range);
625 return &II;
626 }
627
628 return nullptr;
629 }
630
foldCtpop(IntrinsicInst & II,InstCombinerImpl & IC)631 static Instruction *foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC) {
632 assert(II.getIntrinsicID() == Intrinsic::ctpop &&
633 "Expected ctpop intrinsic");
634 Type *Ty = II.getType();
635 unsigned BitWidth = Ty->getScalarSizeInBits();
636 Value *Op0 = II.getArgOperand(0);
637 Value *X, *Y;
638
639 // ctpop(bitreverse(x)) -> ctpop(x)
640 // ctpop(bswap(x)) -> ctpop(x)
641 if (match(Op0, m_BitReverse(m_Value(X))) || match(Op0, m_BSwap(m_Value(X))))
642 return IC.replaceOperand(II, 0, X);
643
644 // ctpop(rot(x)) -> ctpop(x)
645 if ((match(Op0, m_FShl(m_Value(X), m_Value(Y), m_Value())) ||
646 match(Op0, m_FShr(m_Value(X), m_Value(Y), m_Value()))) &&
647 X == Y)
648 return IC.replaceOperand(II, 0, X);
649
650 // ctpop(x | -x) -> bitwidth - cttz(x, false)
651 if (Op0->hasOneUse() &&
652 match(Op0, m_c_Or(m_Value(X), m_Neg(m_Deferred(X))))) {
653 Function *F =
654 Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty);
655 auto *Cttz = IC.Builder.CreateCall(F, {X, IC.Builder.getFalse()});
656 auto *Bw = ConstantInt::get(Ty, APInt(BitWidth, BitWidth));
657 return IC.replaceInstUsesWith(II, IC.Builder.CreateSub(Bw, Cttz));
658 }
659
660 // ctpop(~x & (x - 1)) -> cttz(x, false)
661 if (match(Op0,
662 m_c_And(m_Not(m_Value(X)), m_Add(m_Deferred(X), m_AllOnes())))) {
663 Function *F =
664 Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty);
665 return CallInst::Create(F, {X, IC.Builder.getFalse()});
666 }
667
668 // Zext doesn't change the number of set bits, so narrow:
669 // ctpop (zext X) --> zext (ctpop X)
670 if (match(Op0, m_OneUse(m_ZExt(m_Value(X))))) {
671 Value *NarrowPop = IC.Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, X);
672 return CastInst::Create(Instruction::ZExt, NarrowPop, Ty);
673 }
674
675 KnownBits Known(BitWidth);
676 IC.computeKnownBits(Op0, Known, 0, &II);
677
678 // If all bits are zero except for exactly one fixed bit, then the result
679 // must be 0 or 1, and we can get that answer by shifting to LSB:
680 // ctpop (X & 32) --> (X & 32) >> 5
681 // TODO: Investigate removing this as its likely unnecessary given the below
682 // `isKnownToBeAPowerOfTwo` check.
683 if ((~Known.Zero).isPowerOf2())
684 return BinaryOperator::CreateLShr(
685 Op0, ConstantInt::get(Ty, (~Known.Zero).exactLogBase2()));
686
687 // More generally we can also handle non-constant power of 2 patterns such as
688 // shl/shr(Pow2, X), (X & -X), etc... by transforming:
689 // ctpop(Pow2OrZero) --> icmp ne X, 0
690 if (IC.isKnownToBeAPowerOfTwo(Op0, /* OrZero */ true))
691 return CastInst::Create(Instruction::ZExt,
692 IC.Builder.CreateICmp(ICmpInst::ICMP_NE, Op0,
693 Constant::getNullValue(Ty)),
694 Ty);
695
696 // Add range attribute since known bits can't completely reflect what we know.
697 if (BitWidth != 1 && !II.hasRetAttr(Attribute::Range) &&
698 !II.getMetadata(LLVMContext::MD_range)) {
699 ConstantRange Range(APInt(BitWidth, Known.countMinPopulation()),
700 APInt(BitWidth, Known.countMaxPopulation() + 1));
701 II.addRangeRetAttr(Range);
702 return &II;
703 }
704
705 return nullptr;
706 }
707
708 /// Convert a table lookup to shufflevector if the mask is constant.
709 /// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in
710 /// which case we could lower the shufflevector with rev64 instructions
711 /// as it's actually a byte reverse.
simplifyNeonTbl1(const IntrinsicInst & II,InstCombiner::BuilderTy & Builder)712 static Value *simplifyNeonTbl1(const IntrinsicInst &II,
713 InstCombiner::BuilderTy &Builder) {
714 // Bail out if the mask is not a constant.
715 auto *C = dyn_cast<Constant>(II.getArgOperand(1));
716 if (!C)
717 return nullptr;
718
719 auto *VecTy = cast<FixedVectorType>(II.getType());
720 unsigned NumElts = VecTy->getNumElements();
721
722 // Only perform this transformation for <8 x i8> vector types.
723 if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
724 return nullptr;
725
726 int Indexes[8];
727
728 for (unsigned I = 0; I < NumElts; ++I) {
729 Constant *COp = C->getAggregateElement(I);
730
731 if (!COp || !isa<ConstantInt>(COp))
732 return nullptr;
733
734 Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue();
735
736 // Make sure the mask indices are in range.
737 if ((unsigned)Indexes[I] >= NumElts)
738 return nullptr;
739 }
740
741 auto *V1 = II.getArgOperand(0);
742 auto *V2 = Constant::getNullValue(V1->getType());
743 return Builder.CreateShuffleVector(V1, V2, ArrayRef(Indexes));
744 }
745
746 // Returns true iff the 2 intrinsics have the same operands, limiting the
747 // comparison to the first NumOperands.
haveSameOperands(const IntrinsicInst & I,const IntrinsicInst & E,unsigned NumOperands)748 static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E,
749 unsigned NumOperands) {
750 assert(I.arg_size() >= NumOperands && "Not enough operands");
751 assert(E.arg_size() >= NumOperands && "Not enough operands");
752 for (unsigned i = 0; i < NumOperands; i++)
753 if (I.getArgOperand(i) != E.getArgOperand(i))
754 return false;
755 return true;
756 }
757
758 // Remove trivially empty start/end intrinsic ranges, i.e. a start
759 // immediately followed by an end (ignoring debuginfo or other
760 // start/end intrinsics in between). As this handles only the most trivial
761 // cases, tracking the nesting level is not needed:
762 //
763 // call @llvm.foo.start(i1 0)
764 // call @llvm.foo.start(i1 0) ; This one won't be skipped: it will be removed
765 // call @llvm.foo.end(i1 0)
766 // call @llvm.foo.end(i1 0) ; &I
767 static bool
removeTriviallyEmptyRange(IntrinsicInst & EndI,InstCombinerImpl & IC,std::function<bool (const IntrinsicInst &)> IsStart)768 removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC,
769 std::function<bool(const IntrinsicInst &)> IsStart) {
770 // We start from the end intrinsic and scan backwards, so that InstCombine
771 // has already processed (and potentially removed) all the instructions
772 // before the end intrinsic.
773 BasicBlock::reverse_iterator BI(EndI), BE(EndI.getParent()->rend());
774 for (; BI != BE; ++BI) {
775 if (auto *I = dyn_cast<IntrinsicInst>(&*BI)) {
776 if (I->isDebugOrPseudoInst() ||
777 I->getIntrinsicID() == EndI.getIntrinsicID())
778 continue;
779 if (IsStart(*I)) {
780 if (haveSameOperands(EndI, *I, EndI.arg_size())) {
781 IC.eraseInstFromFunction(*I);
782 IC.eraseInstFromFunction(EndI);
783 return true;
784 }
785 // Skip start intrinsics that don't pair with this end intrinsic.
786 continue;
787 }
788 }
789 break;
790 }
791
792 return false;
793 }
794
visitVAEndInst(VAEndInst & I)795 Instruction *InstCombinerImpl::visitVAEndInst(VAEndInst &I) {
796 removeTriviallyEmptyRange(I, *this, [](const IntrinsicInst &I) {
797 return I.getIntrinsicID() == Intrinsic::vastart ||
798 I.getIntrinsicID() == Intrinsic::vacopy;
799 });
800 return nullptr;
801 }
802
canonicalizeConstantArg0ToArg1(CallInst & Call)803 static CallInst *canonicalizeConstantArg0ToArg1(CallInst &Call) {
804 assert(Call.arg_size() > 1 && "Need at least 2 args to swap");
805 Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
806 if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
807 Call.setArgOperand(0, Arg1);
808 Call.setArgOperand(1, Arg0);
809 return &Call;
810 }
811 return nullptr;
812 }
813
814 /// Creates a result tuple for an overflow intrinsic \p II with a given
815 /// \p Result and a constant \p Overflow value.
createOverflowTuple(IntrinsicInst * II,Value * Result,Constant * Overflow)816 static Instruction *createOverflowTuple(IntrinsicInst *II, Value *Result,
817 Constant *Overflow) {
818 Constant *V[] = {PoisonValue::get(Result->getType()), Overflow};
819 StructType *ST = cast<StructType>(II->getType());
820 Constant *Struct = ConstantStruct::get(ST, V);
821 return InsertValueInst::Create(Struct, Result, 0);
822 }
823
824 Instruction *
foldIntrinsicWithOverflowCommon(IntrinsicInst * II)825 InstCombinerImpl::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) {
826 WithOverflowInst *WO = cast<WithOverflowInst>(II);
827 Value *OperationResult = nullptr;
828 Constant *OverflowResult = nullptr;
829 if (OptimizeOverflowCheck(WO->getBinaryOp(), WO->isSigned(), WO->getLHS(),
830 WO->getRHS(), *WO, OperationResult, OverflowResult))
831 return createOverflowTuple(WO, OperationResult, OverflowResult);
832 return nullptr;
833 }
834
inputDenormalIsIEEE(const Function & F,const Type * Ty)835 static bool inputDenormalIsIEEE(const Function &F, const Type *Ty) {
836 Ty = Ty->getScalarType();
837 return F.getDenormalMode(Ty->getFltSemantics()).Input == DenormalMode::IEEE;
838 }
839
inputDenormalIsDAZ(const Function & F,const Type * Ty)840 static bool inputDenormalIsDAZ(const Function &F, const Type *Ty) {
841 Ty = Ty->getScalarType();
842 return F.getDenormalMode(Ty->getFltSemantics()).inputsAreZero();
843 }
844
845 /// \returns the compare predicate type if the test performed by
846 /// llvm.is.fpclass(x, \p Mask) is equivalent to fcmp o__ x, 0.0 with the
847 /// floating-point environment assumed for \p F for type \p Ty
fpclassTestIsFCmp0(FPClassTest Mask,const Function & F,Type * Ty)848 static FCmpInst::Predicate fpclassTestIsFCmp0(FPClassTest Mask,
849 const Function &F, Type *Ty) {
850 switch (static_cast<unsigned>(Mask)) {
851 case fcZero:
852 if (inputDenormalIsIEEE(F, Ty))
853 return FCmpInst::FCMP_OEQ;
854 break;
855 case fcZero | fcSubnormal:
856 if (inputDenormalIsDAZ(F, Ty))
857 return FCmpInst::FCMP_OEQ;
858 break;
859 case fcPositive | fcNegZero:
860 if (inputDenormalIsIEEE(F, Ty))
861 return FCmpInst::FCMP_OGE;
862 break;
863 case fcPositive | fcNegZero | fcNegSubnormal:
864 if (inputDenormalIsDAZ(F, Ty))
865 return FCmpInst::FCMP_OGE;
866 break;
867 case fcPosSubnormal | fcPosNormal | fcPosInf:
868 if (inputDenormalIsIEEE(F, Ty))
869 return FCmpInst::FCMP_OGT;
870 break;
871 case fcNegative | fcPosZero:
872 if (inputDenormalIsIEEE(F, Ty))
873 return FCmpInst::FCMP_OLE;
874 break;
875 case fcNegative | fcPosZero | fcPosSubnormal:
876 if (inputDenormalIsDAZ(F, Ty))
877 return FCmpInst::FCMP_OLE;
878 break;
879 case fcNegSubnormal | fcNegNormal | fcNegInf:
880 if (inputDenormalIsIEEE(F, Ty))
881 return FCmpInst::FCMP_OLT;
882 break;
883 case fcPosNormal | fcPosInf:
884 if (inputDenormalIsDAZ(F, Ty))
885 return FCmpInst::FCMP_OGT;
886 break;
887 case fcNegNormal | fcNegInf:
888 if (inputDenormalIsDAZ(F, Ty))
889 return FCmpInst::FCMP_OLT;
890 break;
891 case ~fcZero & ~fcNan:
892 if (inputDenormalIsIEEE(F, Ty))
893 return FCmpInst::FCMP_ONE;
894 break;
895 case ~(fcZero | fcSubnormal) & ~fcNan:
896 if (inputDenormalIsDAZ(F, Ty))
897 return FCmpInst::FCMP_ONE;
898 break;
899 default:
900 break;
901 }
902
903 return FCmpInst::BAD_FCMP_PREDICATE;
904 }
905
foldIntrinsicIsFPClass(IntrinsicInst & II)906 Instruction *InstCombinerImpl::foldIntrinsicIsFPClass(IntrinsicInst &II) {
907 Value *Src0 = II.getArgOperand(0);
908 Value *Src1 = II.getArgOperand(1);
909 const ConstantInt *CMask = cast<ConstantInt>(Src1);
910 FPClassTest Mask = static_cast<FPClassTest>(CMask->getZExtValue());
911 const bool IsUnordered = (Mask & fcNan) == fcNan;
912 const bool IsOrdered = (Mask & fcNan) == fcNone;
913 const FPClassTest OrderedMask = Mask & ~fcNan;
914 const FPClassTest OrderedInvertedMask = ~OrderedMask & ~fcNan;
915
916 const bool IsStrict =
917 II.getFunction()->getAttributes().hasFnAttr(Attribute::StrictFP);
918
919 Value *FNegSrc;
920 if (match(Src0, m_FNeg(m_Value(FNegSrc)))) {
921 // is.fpclass (fneg x), mask -> is.fpclass x, (fneg mask)
922
923 II.setArgOperand(1, ConstantInt::get(Src1->getType(), fneg(Mask)));
924 return replaceOperand(II, 0, FNegSrc);
925 }
926
927 Value *FAbsSrc;
928 if (match(Src0, m_FAbs(m_Value(FAbsSrc)))) {
929 II.setArgOperand(1, ConstantInt::get(Src1->getType(), inverse_fabs(Mask)));
930 return replaceOperand(II, 0, FAbsSrc);
931 }
932
933 if ((OrderedMask == fcInf || OrderedInvertedMask == fcInf) &&
934 (IsOrdered || IsUnordered) && !IsStrict) {
935 // is.fpclass(x, fcInf) -> fcmp oeq fabs(x), +inf
936 // is.fpclass(x, ~fcInf) -> fcmp one fabs(x), +inf
937 // is.fpclass(x, fcInf|fcNan) -> fcmp ueq fabs(x), +inf
938 // is.fpclass(x, ~(fcInf|fcNan)) -> fcmp une fabs(x), +inf
939 Constant *Inf = ConstantFP::getInfinity(Src0->getType());
940 FCmpInst::Predicate Pred =
941 IsUnordered ? FCmpInst::FCMP_UEQ : FCmpInst::FCMP_OEQ;
942 if (OrderedInvertedMask == fcInf)
943 Pred = IsUnordered ? FCmpInst::FCMP_UNE : FCmpInst::FCMP_ONE;
944
945 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Src0);
946 Value *CmpInf = Builder.CreateFCmp(Pred, Fabs, Inf);
947 CmpInf->takeName(&II);
948 return replaceInstUsesWith(II, CmpInf);
949 }
950
951 if ((OrderedMask == fcPosInf || OrderedMask == fcNegInf) &&
952 (IsOrdered || IsUnordered) && !IsStrict) {
953 // is.fpclass(x, fcPosInf) -> fcmp oeq x, +inf
954 // is.fpclass(x, fcNegInf) -> fcmp oeq x, -inf
955 // is.fpclass(x, fcPosInf|fcNan) -> fcmp ueq x, +inf
956 // is.fpclass(x, fcNegInf|fcNan) -> fcmp ueq x, -inf
957 Constant *Inf =
958 ConstantFP::getInfinity(Src0->getType(), OrderedMask == fcNegInf);
959 Value *EqInf = IsUnordered ? Builder.CreateFCmpUEQ(Src0, Inf)
960 : Builder.CreateFCmpOEQ(Src0, Inf);
961
962 EqInf->takeName(&II);
963 return replaceInstUsesWith(II, EqInf);
964 }
965
966 if ((OrderedInvertedMask == fcPosInf || OrderedInvertedMask == fcNegInf) &&
967 (IsOrdered || IsUnordered) && !IsStrict) {
968 // is.fpclass(x, ~fcPosInf) -> fcmp one x, +inf
969 // is.fpclass(x, ~fcNegInf) -> fcmp one x, -inf
970 // is.fpclass(x, ~fcPosInf|fcNan) -> fcmp une x, +inf
971 // is.fpclass(x, ~fcNegInf|fcNan) -> fcmp une x, -inf
972 Constant *Inf = ConstantFP::getInfinity(Src0->getType(),
973 OrderedInvertedMask == fcNegInf);
974 Value *NeInf = IsUnordered ? Builder.CreateFCmpUNE(Src0, Inf)
975 : Builder.CreateFCmpONE(Src0, Inf);
976 NeInf->takeName(&II);
977 return replaceInstUsesWith(II, NeInf);
978 }
979
980 if (Mask == fcNan && !IsStrict) {
981 // Equivalent of isnan. Replace with standard fcmp if we don't care about FP
982 // exceptions.
983 Value *IsNan =
984 Builder.CreateFCmpUNO(Src0, ConstantFP::getZero(Src0->getType()));
985 IsNan->takeName(&II);
986 return replaceInstUsesWith(II, IsNan);
987 }
988
989 if (Mask == (~fcNan & fcAllFlags) && !IsStrict) {
990 // Equivalent of !isnan. Replace with standard fcmp.
991 Value *FCmp =
992 Builder.CreateFCmpORD(Src0, ConstantFP::getZero(Src0->getType()));
993 FCmp->takeName(&II);
994 return replaceInstUsesWith(II, FCmp);
995 }
996
997 FCmpInst::Predicate PredType = FCmpInst::BAD_FCMP_PREDICATE;
998
999 // Try to replace with an fcmp with 0
1000 //
1001 // is.fpclass(x, fcZero) -> fcmp oeq x, 0.0
1002 // is.fpclass(x, fcZero | fcNan) -> fcmp ueq x, 0.0
1003 // is.fpclass(x, ~fcZero & ~fcNan) -> fcmp one x, 0.0
1004 // is.fpclass(x, ~fcZero) -> fcmp une x, 0.0
1005 //
1006 // is.fpclass(x, fcPosSubnormal | fcPosNormal | fcPosInf) -> fcmp ogt x, 0.0
1007 // is.fpclass(x, fcPositive | fcNegZero) -> fcmp oge x, 0.0
1008 //
1009 // is.fpclass(x, fcNegSubnormal | fcNegNormal | fcNegInf) -> fcmp olt x, 0.0
1010 // is.fpclass(x, fcNegative | fcPosZero) -> fcmp ole x, 0.0
1011 //
1012 if (!IsStrict && (IsOrdered || IsUnordered) &&
1013 (PredType = fpclassTestIsFCmp0(OrderedMask, *II.getFunction(),
1014 Src0->getType())) !=
1015 FCmpInst::BAD_FCMP_PREDICATE) {
1016 Constant *Zero = ConstantFP::getZero(Src0->getType());
1017 // Equivalent of == 0.
1018 Value *FCmp = Builder.CreateFCmp(
1019 IsUnordered ? FCmpInst::getUnorderedPredicate(PredType) : PredType,
1020 Src0, Zero);
1021
1022 FCmp->takeName(&II);
1023 return replaceInstUsesWith(II, FCmp);
1024 }
1025
1026 KnownFPClass Known = computeKnownFPClass(Src0, Mask, &II);
1027
1028 // Clear test bits we know must be false from the source value.
1029 // fp_class (nnan x), qnan|snan|other -> fp_class (nnan x), other
1030 // fp_class (ninf x), ninf|pinf|other -> fp_class (ninf x), other
1031 if ((Mask & Known.KnownFPClasses) != Mask) {
1032 II.setArgOperand(
1033 1, ConstantInt::get(Src1->getType(), Mask & Known.KnownFPClasses));
1034 return &II;
1035 }
1036
1037 // If none of the tests which can return false are possible, fold to true.
1038 // fp_class (nnan x), ~(qnan|snan) -> true
1039 // fp_class (ninf x), ~(ninf|pinf) -> true
1040 if (Mask == Known.KnownFPClasses)
1041 return replaceInstUsesWith(II, ConstantInt::get(II.getType(), true));
1042
1043 return nullptr;
1044 }
1045
getKnownSign(Value * Op,const SimplifyQuery & SQ)1046 static std::optional<bool> getKnownSign(Value *Op, const SimplifyQuery &SQ) {
1047 KnownBits Known = computeKnownBits(Op, /*Depth=*/0, SQ);
1048 if (Known.isNonNegative())
1049 return false;
1050 if (Known.isNegative())
1051 return true;
1052
1053 Value *X, *Y;
1054 if (match(Op, m_NSWSub(m_Value(X), m_Value(Y))))
1055 return isImpliedByDomCondition(ICmpInst::ICMP_SLT, X, Y, SQ.CxtI, SQ.DL);
1056
1057 return std::nullopt;
1058 }
1059
getKnownSignOrZero(Value * Op,const SimplifyQuery & SQ)1060 static std::optional<bool> getKnownSignOrZero(Value *Op,
1061 const SimplifyQuery &SQ) {
1062 if (std::optional<bool> Sign = getKnownSign(Op, SQ))
1063 return Sign;
1064
1065 Value *X, *Y;
1066 if (match(Op, m_NSWSub(m_Value(X), m_Value(Y))))
1067 return isImpliedByDomCondition(ICmpInst::ICMP_SLE, X, Y, SQ.CxtI, SQ.DL);
1068
1069 return std::nullopt;
1070 }
1071
1072 /// Return true if two values \p Op0 and \p Op1 are known to have the same sign.
signBitMustBeTheSame(Value * Op0,Value * Op1,const SimplifyQuery & SQ)1073 static bool signBitMustBeTheSame(Value *Op0, Value *Op1,
1074 const SimplifyQuery &SQ) {
1075 std::optional<bool> Known1 = getKnownSign(Op1, SQ);
1076 if (!Known1)
1077 return false;
1078 std::optional<bool> Known0 = getKnownSign(Op0, SQ);
1079 if (!Known0)
1080 return false;
1081 return *Known0 == *Known1;
1082 }
1083
1084 /// Try to canonicalize min/max(X + C0, C1) as min/max(X, C1 - C0) + C0. This
1085 /// can trigger other combines.
moveAddAfterMinMax(IntrinsicInst * II,InstCombiner::BuilderTy & Builder)1086 static Instruction *moveAddAfterMinMax(IntrinsicInst *II,
1087 InstCombiner::BuilderTy &Builder) {
1088 Intrinsic::ID MinMaxID = II->getIntrinsicID();
1089 assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin ||
1090 MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) &&
1091 "Expected a min or max intrinsic");
1092
1093 // TODO: Match vectors with undef elements, but undef may not propagate.
1094 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);
1095 Value *X;
1096 const APInt *C0, *C1;
1097 if (!match(Op0, m_OneUse(m_Add(m_Value(X), m_APInt(C0)))) ||
1098 !match(Op1, m_APInt(C1)))
1099 return nullptr;
1100
1101 // Check for necessary no-wrap and overflow constraints.
1102 bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin;
1103 auto *Add = cast<BinaryOperator>(Op0);
1104 if ((IsSigned && !Add->hasNoSignedWrap()) ||
1105 (!IsSigned && !Add->hasNoUnsignedWrap()))
1106 return nullptr;
1107
1108 // If the constant difference overflows, then instsimplify should reduce the
1109 // min/max to the add or C1.
1110 bool Overflow;
1111 APInt CDiff =
1112 IsSigned ? C1->ssub_ov(*C0, Overflow) : C1->usub_ov(*C0, Overflow);
1113 assert(!Overflow && "Expected simplify of min/max");
1114
1115 // min/max (add X, C0), C1 --> add (min/max X, C1 - C0), C0
1116 // Note: the "mismatched" no-overflow setting does not propagate.
1117 Constant *NewMinMaxC = ConstantInt::get(II->getType(), CDiff);
1118 Value *NewMinMax = Builder.CreateBinaryIntrinsic(MinMaxID, X, NewMinMaxC);
1119 return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax, Add->getOperand(1))
1120 : BinaryOperator::CreateNUWAdd(NewMinMax, Add->getOperand(1));
1121 }
1122 /// Match a sadd_sat or ssub_sat which is using min/max to clamp the value.
matchSAddSubSat(IntrinsicInst & MinMax1)1123 Instruction *InstCombinerImpl::matchSAddSubSat(IntrinsicInst &MinMax1) {
1124 Type *Ty = MinMax1.getType();
1125
1126 // We are looking for a tree of:
1127 // max(INT_MIN, min(INT_MAX, add(sext(A), sext(B))))
1128 // Where the min and max could be reversed
1129 Instruction *MinMax2;
1130 BinaryOperator *AddSub;
1131 const APInt *MinValue, *MaxValue;
1132 if (match(&MinMax1, m_SMin(m_Instruction(MinMax2), m_APInt(MaxValue)))) {
1133 if (!match(MinMax2, m_SMax(m_BinOp(AddSub), m_APInt(MinValue))))
1134 return nullptr;
1135 } else if (match(&MinMax1,
1136 m_SMax(m_Instruction(MinMax2), m_APInt(MinValue)))) {
1137 if (!match(MinMax2, m_SMin(m_BinOp(AddSub), m_APInt(MaxValue))))
1138 return nullptr;
1139 } else
1140 return nullptr;
1141
1142 // Check that the constants clamp a saturate, and that the new type would be
1143 // sensible to convert to.
1144 if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1)
1145 return nullptr;
1146 // In what bitwidth can this be treated as saturating arithmetics?
1147 unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1;
1148 // FIXME: This isn't quite right for vectors, but using the scalar type is a
1149 // good first approximation for what should be done there.
1150 if (!shouldChangeType(Ty->getScalarType()->getIntegerBitWidth(), NewBitWidth))
1151 return nullptr;
1152
1153 // Also make sure that the inner min/max and the add/sub have one use.
1154 if (!MinMax2->hasOneUse() || !AddSub->hasOneUse())
1155 return nullptr;
1156
1157 // Create the new type (which can be a vector type)
1158 Type *NewTy = Ty->getWithNewBitWidth(NewBitWidth);
1159
1160 Intrinsic::ID IntrinsicID;
1161 if (AddSub->getOpcode() == Instruction::Add)
1162 IntrinsicID = Intrinsic::sadd_sat;
1163 else if (AddSub->getOpcode() == Instruction::Sub)
1164 IntrinsicID = Intrinsic::ssub_sat;
1165 else
1166 return nullptr;
1167
1168 // The two operands of the add/sub must be nsw-truncatable to the NewTy. This
1169 // is usually achieved via a sext from a smaller type.
1170 if (ComputeMaxSignificantBits(AddSub->getOperand(0), 0, AddSub) >
1171 NewBitWidth ||
1172 ComputeMaxSignificantBits(AddSub->getOperand(1), 0, AddSub) > NewBitWidth)
1173 return nullptr;
1174
1175 // Finally create and return the sat intrinsic, truncated to the new type
1176 Function *F = Intrinsic::getDeclaration(MinMax1.getModule(), IntrinsicID, NewTy);
1177 Value *AT = Builder.CreateTrunc(AddSub->getOperand(0), NewTy);
1178 Value *BT = Builder.CreateTrunc(AddSub->getOperand(1), NewTy);
1179 Value *Sat = Builder.CreateCall(F, {AT, BT});
1180 return CastInst::Create(Instruction::SExt, Sat, Ty);
1181 }
1182
1183
1184 /// If we have a clamp pattern like max (min X, 42), 41 -- where the output
1185 /// can only be one of two possible constant values -- turn that into a select
1186 /// of constants.
foldClampRangeOfTwo(IntrinsicInst * II,InstCombiner::BuilderTy & Builder)1187 static Instruction *foldClampRangeOfTwo(IntrinsicInst *II,
1188 InstCombiner::BuilderTy &Builder) {
1189 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
1190 Value *X;
1191 const APInt *C0, *C1;
1192 if (!match(I1, m_APInt(C1)) || !I0->hasOneUse())
1193 return nullptr;
1194
1195 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
1196 switch (II->getIntrinsicID()) {
1197 case Intrinsic::smax:
1198 if (match(I0, m_SMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1)
1199 Pred = ICmpInst::ICMP_SGT;
1200 break;
1201 case Intrinsic::smin:
1202 if (match(I0, m_SMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1)
1203 Pred = ICmpInst::ICMP_SLT;
1204 break;
1205 case Intrinsic::umax:
1206 if (match(I0, m_UMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1)
1207 Pred = ICmpInst::ICMP_UGT;
1208 break;
1209 case Intrinsic::umin:
1210 if (match(I0, m_UMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1)
1211 Pred = ICmpInst::ICMP_ULT;
1212 break;
1213 default:
1214 llvm_unreachable("Expected min/max intrinsic");
1215 }
1216 if (Pred == CmpInst::BAD_ICMP_PREDICATE)
1217 return nullptr;
1218
1219 // max (min X, 42), 41 --> X > 41 ? 42 : 41
1220 // min (max X, 42), 43 --> X < 43 ? 42 : 43
1221 Value *Cmp = Builder.CreateICmp(Pred, X, I1);
1222 return SelectInst::Create(Cmp, ConstantInt::get(II->getType(), *C0), I1);
1223 }
1224
1225 /// If this min/max has a constant operand and an operand that is a matching
1226 /// min/max with a constant operand, constant-fold the 2 constant operands.
reassociateMinMaxWithConstants(IntrinsicInst * II,IRBuilderBase & Builder,const SimplifyQuery & SQ)1227 static Value *reassociateMinMaxWithConstants(IntrinsicInst *II,
1228 IRBuilderBase &Builder,
1229 const SimplifyQuery &SQ) {
1230 Intrinsic::ID MinMaxID = II->getIntrinsicID();
1231 auto *LHS = dyn_cast<MinMaxIntrinsic>(II->getArgOperand(0));
1232 if (!LHS)
1233 return nullptr;
1234
1235 Constant *C0, *C1;
1236 if (!match(LHS->getArgOperand(1), m_ImmConstant(C0)) ||
1237 !match(II->getArgOperand(1), m_ImmConstant(C1)))
1238 return nullptr;
1239
1240 // max (max X, C0), C1 --> max X, (max C0, C1)
1241 // min (min X, C0), C1 --> min X, (min C0, C1)
1242 // umax (smax X, nneg C0), nneg C1 --> smax X, (umax C0, C1)
1243 // smin (umin X, nneg C0), nneg C1 --> umin X, (smin C0, C1)
1244 Intrinsic::ID InnerMinMaxID = LHS->getIntrinsicID();
1245 if (InnerMinMaxID != MinMaxID &&
1246 !(((MinMaxID == Intrinsic::umax && InnerMinMaxID == Intrinsic::smax) ||
1247 (MinMaxID == Intrinsic::smin && InnerMinMaxID == Intrinsic::umin)) &&
1248 isKnownNonNegative(C0, SQ) && isKnownNonNegative(C1, SQ)))
1249 return nullptr;
1250
1251 ICmpInst::Predicate Pred = MinMaxIntrinsic::getPredicate(MinMaxID);
1252 Value *CondC = Builder.CreateICmp(Pred, C0, C1);
1253 Value *NewC = Builder.CreateSelect(CondC, C0, C1);
1254 return Builder.CreateIntrinsic(InnerMinMaxID, II->getType(),
1255 {LHS->getArgOperand(0), NewC});
1256 }
1257
1258 /// If this min/max has a matching min/max operand with a constant, try to push
1259 /// the constant operand into this instruction. This can enable more folds.
1260 static Instruction *
reassociateMinMaxWithConstantInOperand(IntrinsicInst * II,InstCombiner::BuilderTy & Builder)1261 reassociateMinMaxWithConstantInOperand(IntrinsicInst *II,
1262 InstCombiner::BuilderTy &Builder) {
1263 // Match and capture a min/max operand candidate.
1264 Value *X, *Y;
1265 Constant *C;
1266 Instruction *Inner;
1267 if (!match(II, m_c_MaxOrMin(m_OneUse(m_CombineAnd(
1268 m_Instruction(Inner),
1269 m_MaxOrMin(m_Value(X), m_ImmConstant(C)))),
1270 m_Value(Y))))
1271 return nullptr;
1272
1273 // The inner op must match. Check for constants to avoid infinite loops.
1274 Intrinsic::ID MinMaxID = II->getIntrinsicID();
1275 auto *InnerMM = dyn_cast<IntrinsicInst>(Inner);
1276 if (!InnerMM || InnerMM->getIntrinsicID() != MinMaxID ||
1277 match(X, m_ImmConstant()) || match(Y, m_ImmConstant()))
1278 return nullptr;
1279
1280 // max (max X, C), Y --> max (max X, Y), C
1281 Function *MinMax =
1282 Intrinsic::getDeclaration(II->getModule(), MinMaxID, II->getType());
1283 Value *NewInner = Builder.CreateBinaryIntrinsic(MinMaxID, X, Y);
1284 NewInner->takeName(Inner);
1285 return CallInst::Create(MinMax, {NewInner, C});
1286 }
1287
1288 /// Reduce a sequence of min/max intrinsics with a common operand.
factorizeMinMaxTree(IntrinsicInst * II)1289 static Instruction *factorizeMinMaxTree(IntrinsicInst *II) {
1290 // Match 3 of the same min/max ops. Example: umin(umin(), umin()).
1291 auto *LHS = dyn_cast<IntrinsicInst>(II->getArgOperand(0));
1292 auto *RHS = dyn_cast<IntrinsicInst>(II->getArgOperand(1));
1293 Intrinsic::ID MinMaxID = II->getIntrinsicID();
1294 if (!LHS || !RHS || LHS->getIntrinsicID() != MinMaxID ||
1295 RHS->getIntrinsicID() != MinMaxID ||
1296 (!LHS->hasOneUse() && !RHS->hasOneUse()))
1297 return nullptr;
1298
1299 Value *A = LHS->getArgOperand(0);
1300 Value *B = LHS->getArgOperand(1);
1301 Value *C = RHS->getArgOperand(0);
1302 Value *D = RHS->getArgOperand(1);
1303
1304 // Look for a common operand.
1305 Value *MinMaxOp = nullptr;
1306 Value *ThirdOp = nullptr;
1307 if (LHS->hasOneUse()) {
1308 // If the LHS is only used in this chain and the RHS is used outside of it,
1309 // reuse the RHS min/max because that will eliminate the LHS.
1310 if (D == A || C == A) {
1311 // min(min(a, b), min(c, a)) --> min(min(c, a), b)
1312 // min(min(a, b), min(a, d)) --> min(min(a, d), b)
1313 MinMaxOp = RHS;
1314 ThirdOp = B;
1315 } else if (D == B || C == B) {
1316 // min(min(a, b), min(c, b)) --> min(min(c, b), a)
1317 // min(min(a, b), min(b, d)) --> min(min(b, d), a)
1318 MinMaxOp = RHS;
1319 ThirdOp = A;
1320 }
1321 } else {
1322 assert(RHS->hasOneUse() && "Expected one-use operand");
1323 // Reuse the LHS. This will eliminate the RHS.
1324 if (D == A || D == B) {
1325 // min(min(a, b), min(c, a)) --> min(min(a, b), c)
1326 // min(min(a, b), min(c, b)) --> min(min(a, b), c)
1327 MinMaxOp = LHS;
1328 ThirdOp = C;
1329 } else if (C == A || C == B) {
1330 // min(min(a, b), min(b, d)) --> min(min(a, b), d)
1331 // min(min(a, b), min(c, b)) --> min(min(a, b), d)
1332 MinMaxOp = LHS;
1333 ThirdOp = D;
1334 }
1335 }
1336
1337 if (!MinMaxOp || !ThirdOp)
1338 return nullptr;
1339
1340 Module *Mod = II->getModule();
1341 Function *MinMax = Intrinsic::getDeclaration(Mod, MinMaxID, II->getType());
1342 return CallInst::Create(MinMax, { MinMaxOp, ThirdOp });
1343 }
1344
1345 /// If all arguments of the intrinsic are unary shuffles with the same mask,
1346 /// try to shuffle after the intrinsic.
1347 static Instruction *
foldShuffledIntrinsicOperands(IntrinsicInst * II,InstCombiner::BuilderTy & Builder)1348 foldShuffledIntrinsicOperands(IntrinsicInst *II,
1349 InstCombiner::BuilderTy &Builder) {
1350 // TODO: This should be extended to handle other intrinsics like fshl, ctpop,
1351 // etc. Use llvm::isTriviallyVectorizable() and related to determine
1352 // which intrinsics are safe to shuffle?
1353 switch (II->getIntrinsicID()) {
1354 case Intrinsic::smax:
1355 case Intrinsic::smin:
1356 case Intrinsic::umax:
1357 case Intrinsic::umin:
1358 case Intrinsic::fma:
1359 case Intrinsic::fshl:
1360 case Intrinsic::fshr:
1361 break;
1362 default:
1363 return nullptr;
1364 }
1365
1366 Value *X;
1367 ArrayRef<int> Mask;
1368 if (!match(II->getArgOperand(0),
1369 m_Shuffle(m_Value(X), m_Undef(), m_Mask(Mask))))
1370 return nullptr;
1371
1372 // At least 1 operand must have 1 use because we are creating 2 instructions.
1373 if (none_of(II->args(), [](Value *V) { return V->hasOneUse(); }))
1374 return nullptr;
1375
1376 // See if all arguments are shuffled with the same mask.
1377 SmallVector<Value *, 4> NewArgs(II->arg_size());
1378 NewArgs[0] = X;
1379 Type *SrcTy = X->getType();
1380 for (unsigned i = 1, e = II->arg_size(); i != e; ++i) {
1381 if (!match(II->getArgOperand(i),
1382 m_Shuffle(m_Value(X), m_Undef(), m_SpecificMask(Mask))) ||
1383 X->getType() != SrcTy)
1384 return nullptr;
1385 NewArgs[i] = X;
1386 }
1387
1388 // intrinsic (shuf X, M), (shuf Y, M), ... --> shuf (intrinsic X, Y, ...), M
1389 Instruction *FPI = isa<FPMathOperator>(II) ? II : nullptr;
1390 Value *NewIntrinsic =
1391 Builder.CreateIntrinsic(II->getIntrinsicID(), SrcTy, NewArgs, FPI);
1392 return new ShuffleVectorInst(NewIntrinsic, Mask);
1393 }
1394
1395 /// Fold the following cases and accepts bswap and bitreverse intrinsics:
1396 /// bswap(logic_op(bswap(x), y)) --> logic_op(x, bswap(y))
1397 /// bswap(logic_op(bswap(x), bswap(y))) --> logic_op(x, y) (ignores multiuse)
1398 template <Intrinsic::ID IntrID>
foldBitOrderCrossLogicOp(Value * V,InstCombiner::BuilderTy & Builder)1399 static Instruction *foldBitOrderCrossLogicOp(Value *V,
1400 InstCombiner::BuilderTy &Builder) {
1401 static_assert(IntrID == Intrinsic::bswap || IntrID == Intrinsic::bitreverse,
1402 "This helper only supports BSWAP and BITREVERSE intrinsics");
1403
1404 Value *X, *Y;
1405 // Find bitwise logic op. Check that it is a BinaryOperator explicitly so we
1406 // don't match ConstantExpr that aren't meaningful for this transform.
1407 if (match(V, m_OneUse(m_BitwiseLogic(m_Value(X), m_Value(Y)))) &&
1408 isa<BinaryOperator>(V)) {
1409 Value *OldReorderX, *OldReorderY;
1410 BinaryOperator::BinaryOps Op = cast<BinaryOperator>(V)->getOpcode();
1411
1412 // If both X and Y are bswap/bitreverse, the transform reduces the number
1413 // of instructions even if there's multiuse.
1414 // If only one operand is bswap/bitreverse, we need to ensure the operand
1415 // have only one use.
1416 if (match(X, m_Intrinsic<IntrID>(m_Value(OldReorderX))) &&
1417 match(Y, m_Intrinsic<IntrID>(m_Value(OldReorderY)))) {
1418 return BinaryOperator::Create(Op, OldReorderX, OldReorderY);
1419 }
1420
1421 if (match(X, m_OneUse(m_Intrinsic<IntrID>(m_Value(OldReorderX))))) {
1422 Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID, Y);
1423 return BinaryOperator::Create(Op, OldReorderX, NewReorder);
1424 }
1425
1426 if (match(Y, m_OneUse(m_Intrinsic<IntrID>(m_Value(OldReorderY))))) {
1427 Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID, X);
1428 return BinaryOperator::Create(Op, NewReorder, OldReorderY);
1429 }
1430 }
1431 return nullptr;
1432 }
1433
simplifyReductionOperand(Value * Arg,bool CanReorderLanes)1434 static Value *simplifyReductionOperand(Value *Arg, bool CanReorderLanes) {
1435 if (!CanReorderLanes)
1436 return nullptr;
1437
1438 Value *V;
1439 if (match(Arg, m_VecReverse(m_Value(V))))
1440 return V;
1441
1442 ArrayRef<int> Mask;
1443 if (!isa<FixedVectorType>(Arg->getType()) ||
1444 !match(Arg, m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask))) ||
1445 !cast<ShuffleVectorInst>(Arg)->isSingleSource())
1446 return nullptr;
1447
1448 int Sz = Mask.size();
1449 SmallBitVector UsedIndices(Sz);
1450 for (int Idx : Mask) {
1451 if (Idx == PoisonMaskElem || UsedIndices.test(Idx))
1452 return nullptr;
1453 UsedIndices.set(Idx);
1454 }
1455
1456 // Can remove shuffle iff just shuffled elements, no repeats, undefs, or
1457 // other changes.
1458 return UsedIndices.all() ? V : nullptr;
1459 }
1460
1461 /// Fold an unsigned minimum of trailing or leading zero bits counts:
1462 /// umin(cttz(CtOp, ZeroUndef), ConstOp) --> cttz(CtOp | (1 << ConstOp))
1463 /// umin(ctlz(CtOp, ZeroUndef), ConstOp) --> ctlz(CtOp | (SignedMin
1464 /// >> ConstOp))
1465 template <Intrinsic::ID IntrID>
1466 static Value *
foldMinimumOverTrailingOrLeadingZeroCount(Value * I0,Value * I1,const DataLayout & DL,InstCombiner::BuilderTy & Builder)1467 foldMinimumOverTrailingOrLeadingZeroCount(Value *I0, Value *I1,
1468 const DataLayout &DL,
1469 InstCombiner::BuilderTy &Builder) {
1470 static_assert(IntrID == Intrinsic::cttz || IntrID == Intrinsic::ctlz,
1471 "This helper only supports cttz and ctlz intrinsics");
1472
1473 Value *CtOp;
1474 Value *ZeroUndef;
1475 if (!match(I0,
1476 m_OneUse(m_Intrinsic<IntrID>(m_Value(CtOp), m_Value(ZeroUndef)))))
1477 return nullptr;
1478
1479 unsigned BitWidth = I1->getType()->getScalarSizeInBits();
1480 auto LessBitWidth = [BitWidth](auto &C) { return C.ult(BitWidth); };
1481 if (!match(I1, m_CheckedInt(LessBitWidth)))
1482 // We have a constant >= BitWidth (which can be handled by CVP)
1483 // or a non-splat vector with elements < and >= BitWidth
1484 return nullptr;
1485
1486 Type *Ty = I1->getType();
1487 Constant *NewConst = ConstantFoldBinaryOpOperands(
1488 IntrID == Intrinsic::cttz ? Instruction::Shl : Instruction::LShr,
1489 IntrID == Intrinsic::cttz
1490 ? ConstantInt::get(Ty, 1)
1491 : ConstantInt::get(Ty, APInt::getSignedMinValue(BitWidth)),
1492 cast<Constant>(I1), DL);
1493 return Builder.CreateBinaryIntrinsic(
1494 IntrID, Builder.CreateOr(CtOp, NewConst),
1495 ConstantInt::getTrue(ZeroUndef->getType()));
1496 }
1497
1498 /// CallInst simplification. This mostly only handles folding of intrinsic
1499 /// instructions. For normal calls, it allows visitCallBase to do the heavy
1500 /// lifting.
visitCallInst(CallInst & CI)1501 Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
1502 // Don't try to simplify calls without uses. It will not do anything useful,
1503 // but will result in the following folds being skipped.
1504 if (!CI.use_empty()) {
1505 SmallVector<Value *, 4> Args;
1506 Args.reserve(CI.arg_size());
1507 for (Value *Op : CI.args())
1508 Args.push_back(Op);
1509 if (Value *V = simplifyCall(&CI, CI.getCalledOperand(), Args,
1510 SQ.getWithInstruction(&CI)))
1511 return replaceInstUsesWith(CI, V);
1512 }
1513
1514 if (Value *FreedOp = getFreedOperand(&CI, &TLI))
1515 return visitFree(CI, FreedOp);
1516
1517 // If the caller function (i.e. us, the function that contains this CallInst)
1518 // is nounwind, mark the call as nounwind, even if the callee isn't.
1519 if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) {
1520 CI.setDoesNotThrow();
1521 return &CI;
1522 }
1523
1524 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
1525 if (!II) return visitCallBase(CI);
1526
1527 // For atomic unordered mem intrinsics if len is not a positive or
1528 // not a multiple of element size then behavior is undefined.
1529 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(II))
1530 if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength()))
1531 if (NumBytes->isNegative() ||
1532 (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) {
1533 CreateNonTerminatorUnreachable(AMI);
1534 assert(AMI->getType()->isVoidTy() &&
1535 "non void atomic unordered mem intrinsic");
1536 return eraseInstFromFunction(*AMI);
1537 }
1538
1539 // Intrinsics cannot occur in an invoke or a callbr, so handle them here
1540 // instead of in visitCallBase.
1541 if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
1542 bool Changed = false;
1543
1544 // memmove/cpy/set of zero bytes is a noop.
1545 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
1546 if (NumBytes->isNullValue())
1547 return eraseInstFromFunction(CI);
1548 }
1549
1550 // No other transformations apply to volatile transfers.
1551 if (auto *M = dyn_cast<MemIntrinsic>(MI))
1552 if (M->isVolatile())
1553 return nullptr;
1554
1555 // If we have a memmove and the source operation is a constant global,
1556 // then the source and dest pointers can't alias, so we can change this
1557 // into a call to memcpy.
1558 if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) {
1559 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1560 if (GVSrc->isConstant()) {
1561 Module *M = CI.getModule();
1562 Intrinsic::ID MemCpyID =
1563 isa<AtomicMemMoveInst>(MMI)
1564 ? Intrinsic::memcpy_element_unordered_atomic
1565 : Intrinsic::memcpy;
1566 Type *Tys[3] = { CI.getArgOperand(0)->getType(),
1567 CI.getArgOperand(1)->getType(),
1568 CI.getArgOperand(2)->getType() };
1569 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
1570 Changed = true;
1571 }
1572 }
1573
1574 if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1575 // memmove(x,x,size) -> noop.
1576 if (MTI->getSource() == MTI->getDest())
1577 return eraseInstFromFunction(CI);
1578 }
1579
1580 // If we can determine a pointer alignment that is bigger than currently
1581 // set, update the alignment.
1582 if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1583 if (Instruction *I = SimplifyAnyMemTransfer(MTI))
1584 return I;
1585 } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) {
1586 if (Instruction *I = SimplifyAnyMemSet(MSI))
1587 return I;
1588 }
1589
1590 if (Changed) return II;
1591 }
1592
1593 // For fixed width vector result intrinsics, use the generic demanded vector
1594 // support.
1595 if (auto *IIFVTy = dyn_cast<FixedVectorType>(II->getType())) {
1596 auto VWidth = IIFVTy->getNumElements();
1597 APInt PoisonElts(VWidth, 0);
1598 APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
1599 if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, PoisonElts)) {
1600 if (V != II)
1601 return replaceInstUsesWith(*II, V);
1602 return II;
1603 }
1604 }
1605
1606 if (II->isCommutative()) {
1607 if (auto Pair = matchSymmetricPair(II->getOperand(0), II->getOperand(1))) {
1608 replaceOperand(*II, 0, Pair->first);
1609 replaceOperand(*II, 1, Pair->second);
1610 return II;
1611 }
1612
1613 if (CallInst *NewCall = canonicalizeConstantArg0ToArg1(CI))
1614 return NewCall;
1615 }
1616
1617 // Unused constrained FP intrinsic calls may have declared side effect, which
1618 // prevents it from being removed. In some cases however the side effect is
1619 // actually absent. To detect this case, call SimplifyConstrainedFPCall. If it
1620 // returns a replacement, the call may be removed.
1621 if (CI.use_empty() && isa<ConstrainedFPIntrinsic>(CI)) {
1622 if (simplifyConstrainedFPCall(&CI, SQ.getWithInstruction(&CI)))
1623 return eraseInstFromFunction(CI);
1624 }
1625
1626 Intrinsic::ID IID = II->getIntrinsicID();
1627 switch (IID) {
1628 case Intrinsic::objectsize: {
1629 SmallVector<Instruction *> InsertedInstructions;
1630 if (Value *V = lowerObjectSizeCall(II, DL, &TLI, AA, /*MustSucceed=*/false,
1631 &InsertedInstructions)) {
1632 for (Instruction *Inserted : InsertedInstructions)
1633 Worklist.add(Inserted);
1634 return replaceInstUsesWith(CI, V);
1635 }
1636 return nullptr;
1637 }
1638 case Intrinsic::abs: {
1639 Value *IIOperand = II->getArgOperand(0);
1640 bool IntMinIsPoison = cast<Constant>(II->getArgOperand(1))->isOneValue();
1641
1642 // abs(-x) -> abs(x)
1643 // TODO: Copy nsw if it was present on the neg?
1644 Value *X;
1645 if (match(IIOperand, m_Neg(m_Value(X))))
1646 return replaceOperand(*II, 0, X);
1647 if (match(IIOperand, m_Select(m_Value(), m_Value(X), m_Neg(m_Deferred(X)))))
1648 return replaceOperand(*II, 0, X);
1649 if (match(IIOperand, m_Select(m_Value(), m_Neg(m_Value(X)), m_Deferred(X))))
1650 return replaceOperand(*II, 0, X);
1651
1652 Value *Y;
1653 // abs(a * abs(b)) -> abs(a * b)
1654 if (match(IIOperand,
1655 m_OneUse(m_c_Mul(m_Value(X),
1656 m_Intrinsic<Intrinsic::abs>(m_Value(Y)))))) {
1657 bool NSW =
1658 cast<Instruction>(IIOperand)->hasNoSignedWrap() && IntMinIsPoison;
1659 auto *XY = NSW ? Builder.CreateNSWMul(X, Y) : Builder.CreateMul(X, Y);
1660 return replaceOperand(*II, 0, XY);
1661 }
1662
1663 if (std::optional<bool> Known =
1664 getKnownSignOrZero(IIOperand, SQ.getWithInstruction(II))) {
1665 // abs(x) -> x if x >= 0 (include abs(x-y) --> x - y where x >= y)
1666 // abs(x) -> x if x > 0 (include abs(x-y) --> x - y where x > y)
1667 if (!*Known)
1668 return replaceInstUsesWith(*II, IIOperand);
1669
1670 // abs(x) -> -x if x < 0
1671 // abs(x) -> -x if x < = 0 (include abs(x-y) --> y - x where x <= y)
1672 if (IntMinIsPoison)
1673 return BinaryOperator::CreateNSWNeg(IIOperand);
1674 return BinaryOperator::CreateNeg(IIOperand);
1675 }
1676
1677 // abs (sext X) --> zext (abs X*)
1678 // Clear the IsIntMin (nsw) bit on the abs to allow narrowing.
1679 if (match(IIOperand, m_OneUse(m_SExt(m_Value(X))))) {
1680 Value *NarrowAbs =
1681 Builder.CreateBinaryIntrinsic(Intrinsic::abs, X, Builder.getFalse());
1682 return CastInst::Create(Instruction::ZExt, NarrowAbs, II->getType());
1683 }
1684
1685 // Match a complicated way to check if a number is odd/even:
1686 // abs (srem X, 2) --> and X, 1
1687 const APInt *C;
1688 if (match(IIOperand, m_SRem(m_Value(X), m_APInt(C))) && *C == 2)
1689 return BinaryOperator::CreateAnd(X, ConstantInt::get(II->getType(), 1));
1690
1691 break;
1692 }
1693 case Intrinsic::umin: {
1694 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
1695 // umin(x, 1) == zext(x != 0)
1696 if (match(I1, m_One())) {
1697 assert(II->getType()->getScalarSizeInBits() != 1 &&
1698 "Expected simplify of umin with max constant");
1699 Value *Zero = Constant::getNullValue(I0->getType());
1700 Value *Cmp = Builder.CreateICmpNE(I0, Zero);
1701 return CastInst::Create(Instruction::ZExt, Cmp, II->getType());
1702 }
1703 // umin(cttz(x), const) --> cttz(x | (1 << const))
1704 if (Value *FoldedCttz =
1705 foldMinimumOverTrailingOrLeadingZeroCount<Intrinsic::cttz>(
1706 I0, I1, DL, Builder))
1707 return replaceInstUsesWith(*II, FoldedCttz);
1708 // umin(ctlz(x), const) --> ctlz(x | (SignedMin >> const))
1709 if (Value *FoldedCtlz =
1710 foldMinimumOverTrailingOrLeadingZeroCount<Intrinsic::ctlz>(
1711 I0, I1, DL, Builder))
1712 return replaceInstUsesWith(*II, FoldedCtlz);
1713 [[fallthrough]];
1714 }
1715 case Intrinsic::umax: {
1716 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
1717 Value *X, *Y;
1718 if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_ZExt(m_Value(Y))) &&
1719 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) {
1720 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y);
1721 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType());
1722 }
1723 Constant *C;
1724 if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_Constant(C)) &&
1725 I0->hasOneUse()) {
1726 if (Constant *NarrowC = getLosslessUnsignedTrunc(C, X->getType())) {
1727 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC);
1728 return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType());
1729 }
1730 }
1731 // If both operands of unsigned min/max are sign-extended, it is still ok
1732 // to narrow the operation.
1733 [[fallthrough]];
1734 }
1735 case Intrinsic::smax:
1736 case Intrinsic::smin: {
1737 Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
1738 Value *X, *Y;
1739 if (match(I0, m_SExt(m_Value(X))) && match(I1, m_SExt(m_Value(Y))) &&
1740 (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) {
1741 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y);
1742 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType());
1743 }
1744
1745 Constant *C;
1746 if (match(I0, m_SExt(m_Value(X))) && match(I1, m_Constant(C)) &&
1747 I0->hasOneUse()) {
1748 if (Constant *NarrowC = getLosslessSignedTrunc(C, X->getType())) {
1749 Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC);
1750 return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType());
1751 }
1752 }
1753
1754 // umin(i1 X, i1 Y) -> and i1 X, Y
1755 // smax(i1 X, i1 Y) -> and i1 X, Y
1756 if ((IID == Intrinsic::umin || IID == Intrinsic::smax) &&
1757 II->getType()->isIntOrIntVectorTy(1)) {
1758 return BinaryOperator::CreateAnd(I0, I1);
1759 }
1760
1761 // umax(i1 X, i1 Y) -> or i1 X, Y
1762 // smin(i1 X, i1 Y) -> or i1 X, Y
1763 if ((IID == Intrinsic::umax || IID == Intrinsic::smin) &&
1764 II->getType()->isIntOrIntVectorTy(1)) {
1765 return BinaryOperator::CreateOr(I0, I1);
1766 }
1767
1768 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
1769 // smax (neg nsw X), (neg nsw Y) --> neg nsw (smin X, Y)
1770 // smin (neg nsw X), (neg nsw Y) --> neg nsw (smax X, Y)
1771 // TODO: Canonicalize neg after min/max if I1 is constant.
1772 if (match(I0, m_NSWNeg(m_Value(X))) && match(I1, m_NSWNeg(m_Value(Y))) &&
1773 (I0->hasOneUse() || I1->hasOneUse())) {
1774 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID);
1775 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, Y);
1776 return BinaryOperator::CreateNSWNeg(InvMaxMin);
1777 }
1778 }
1779
1780 // (umax X, (xor X, Pow2))
1781 // -> (or X, Pow2)
1782 // (umin X, (xor X, Pow2))
1783 // -> (and X, ~Pow2)
1784 // (smax X, (xor X, Pos_Pow2))
1785 // -> (or X, Pos_Pow2)
1786 // (smin X, (xor X, Pos_Pow2))
1787 // -> (and X, ~Pos_Pow2)
1788 // (smax X, (xor X, Neg_Pow2))
1789 // -> (and X, ~Neg_Pow2)
1790 // (smin X, (xor X, Neg_Pow2))
1791 // -> (or X, Neg_Pow2)
1792 if ((match(I0, m_c_Xor(m_Specific(I1), m_Value(X))) ||
1793 match(I1, m_c_Xor(m_Specific(I0), m_Value(X)))) &&
1794 isKnownToBeAPowerOfTwo(X, /* OrZero */ true)) {
1795 bool UseOr = IID == Intrinsic::smax || IID == Intrinsic::umax;
1796 bool UseAndN = IID == Intrinsic::smin || IID == Intrinsic::umin;
1797
1798 if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
1799 auto KnownSign = getKnownSign(X, SQ.getWithInstruction(II));
1800 if (KnownSign == std::nullopt) {
1801 UseOr = false;
1802 UseAndN = false;
1803 } else if (*KnownSign /* true is Signed. */) {
1804 UseOr ^= true;
1805 UseAndN ^= true;
1806 Type *Ty = I0->getType();
1807 // Negative power of 2 must be IntMin. It's possible to be able to
1808 // prove negative / power of 2 without actually having known bits, so
1809 // just get the value by hand.
1810 X = Constant::getIntegerValue(
1811 Ty, APInt::getSignedMinValue(Ty->getScalarSizeInBits()));
1812 }
1813 }
1814 if (UseOr)
1815 return BinaryOperator::CreateOr(I0, X);
1816 else if (UseAndN)
1817 return BinaryOperator::CreateAnd(I0, Builder.CreateNot(X));
1818 }
1819
1820 // If we can eliminate ~A and Y is free to invert:
1821 // max ~A, Y --> ~(min A, ~Y)
1822 //
1823 // Examples:
1824 // max ~A, ~Y --> ~(min A, Y)
1825 // max ~A, C --> ~(min A, ~C)
1826 // max ~A, (max ~Y, ~Z) --> ~min( A, (min Y, Z))
1827 auto moveNotAfterMinMax = [&](Value *X, Value *Y) -> Instruction * {
1828 Value *A;
1829 if (match(X, m_OneUse(m_Not(m_Value(A)))) &&
1830 !isFreeToInvert(A, A->hasOneUse())) {
1831 if (Value *NotY = getFreelyInverted(Y, Y->hasOneUse(), &Builder)) {
1832 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID);
1833 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, A, NotY);
1834 return BinaryOperator::CreateNot(InvMaxMin);
1835 }
1836 }
1837 return nullptr;
1838 };
1839
1840 if (Instruction *I = moveNotAfterMinMax(I0, I1))
1841 return I;
1842 if (Instruction *I = moveNotAfterMinMax(I1, I0))
1843 return I;
1844
1845 if (Instruction *I = moveAddAfterMinMax(II, Builder))
1846 return I;
1847
1848 // minmax (X & NegPow2C, Y & NegPow2C) --> minmax(X, Y) & NegPow2C
1849 const APInt *RHSC;
1850 if (match(I0, m_OneUse(m_And(m_Value(X), m_NegatedPower2(RHSC)))) &&
1851 match(I1, m_OneUse(m_And(m_Value(Y), m_SpecificInt(*RHSC)))))
1852 return BinaryOperator::CreateAnd(Builder.CreateBinaryIntrinsic(IID, X, Y),
1853 ConstantInt::get(II->getType(), *RHSC));
1854
1855 // smax(X, -X) --> abs(X)
1856 // smin(X, -X) --> -abs(X)
1857 // umax(X, -X) --> -abs(X)
1858 // umin(X, -X) --> abs(X)
1859 if (isKnownNegation(I0, I1)) {
1860 // We can choose either operand as the input to abs(), but if we can
1861 // eliminate the only use of a value, that's better for subsequent
1862 // transforms/analysis.
1863 if (I0->hasOneUse() && !I1->hasOneUse())
1864 std::swap(I0, I1);
1865
1866 // This is some variant of abs(). See if we can propagate 'nsw' to the abs
1867 // operation and potentially its negation.
1868 bool IntMinIsPoison = isKnownNegation(I0, I1, /* NeedNSW */ true);
1869 Value *Abs = Builder.CreateBinaryIntrinsic(
1870 Intrinsic::abs, I0,
1871 ConstantInt::getBool(II->getContext(), IntMinIsPoison));
1872
1873 // We don't have a "nabs" intrinsic, so negate if needed based on the
1874 // max/min operation.
1875 if (IID == Intrinsic::smin || IID == Intrinsic::umax)
1876 Abs = Builder.CreateNeg(Abs, "nabs", IntMinIsPoison);
1877 return replaceInstUsesWith(CI, Abs);
1878 }
1879
1880 if (Instruction *Sel = foldClampRangeOfTwo(II, Builder))
1881 return Sel;
1882
1883 if (Instruction *SAdd = matchSAddSubSat(*II))
1884 return SAdd;
1885
1886 if (Value *NewMinMax = reassociateMinMaxWithConstants(II, Builder, SQ))
1887 return replaceInstUsesWith(*II, NewMinMax);
1888
1889 if (Instruction *R = reassociateMinMaxWithConstantInOperand(II, Builder))
1890 return R;
1891
1892 if (Instruction *NewMinMax = factorizeMinMaxTree(II))
1893 return NewMinMax;
1894
1895 // Try to fold minmax with constant RHS based on range information
1896 if (match(I1, m_APIntAllowPoison(RHSC))) {
1897 ICmpInst::Predicate Pred =
1898 ICmpInst::getNonStrictPredicate(MinMaxIntrinsic::getPredicate(IID));
1899 bool IsSigned = MinMaxIntrinsic::isSigned(IID);
1900 ConstantRange LHS_CR = computeConstantRangeIncludingKnownBits(
1901 I0, IsSigned, SQ.getWithInstruction(II));
1902 if (!LHS_CR.isFullSet()) {
1903 if (LHS_CR.icmp(Pred, *RHSC))
1904 return replaceInstUsesWith(*II, I0);
1905 if (LHS_CR.icmp(ICmpInst::getSwappedPredicate(Pred), *RHSC))
1906 return replaceInstUsesWith(*II,
1907 ConstantInt::get(II->getType(), *RHSC));
1908 }
1909 }
1910
1911 break;
1912 }
1913 case Intrinsic::bitreverse: {
1914 Value *IIOperand = II->getArgOperand(0);
1915 // bitrev (zext i1 X to ?) --> X ? SignBitC : 0
1916 Value *X;
1917 if (match(IIOperand, m_ZExt(m_Value(X))) &&
1918 X->getType()->isIntOrIntVectorTy(1)) {
1919 Type *Ty = II->getType();
1920 APInt SignBit = APInt::getSignMask(Ty->getScalarSizeInBits());
1921 return SelectInst::Create(X, ConstantInt::get(Ty, SignBit),
1922 ConstantInt::getNullValue(Ty));
1923 }
1924
1925 if (Instruction *crossLogicOpFold =
1926 foldBitOrderCrossLogicOp<Intrinsic::bitreverse>(IIOperand, Builder))
1927 return crossLogicOpFold;
1928
1929 break;
1930 }
1931 case Intrinsic::bswap: {
1932 Value *IIOperand = II->getArgOperand(0);
1933
1934 // Try to canonicalize bswap-of-logical-shift-by-8-bit-multiple as
1935 // inverse-shift-of-bswap:
1936 // bswap (shl X, Y) --> lshr (bswap X), Y
1937 // bswap (lshr X, Y) --> shl (bswap X), Y
1938 Value *X, *Y;
1939 if (match(IIOperand, m_OneUse(m_LogicalShift(m_Value(X), m_Value(Y))))) {
1940 unsigned BitWidth = IIOperand->getType()->getScalarSizeInBits();
1941 if (MaskedValueIsZero(Y, APInt::getLowBitsSet(BitWidth, 3))) {
1942 Value *NewSwap = Builder.CreateUnaryIntrinsic(Intrinsic::bswap, X);
1943 BinaryOperator::BinaryOps InverseShift =
1944 cast<BinaryOperator>(IIOperand)->getOpcode() == Instruction::Shl
1945 ? Instruction::LShr
1946 : Instruction::Shl;
1947 return BinaryOperator::Create(InverseShift, NewSwap, Y);
1948 }
1949 }
1950
1951 KnownBits Known = computeKnownBits(IIOperand, 0, II);
1952 uint64_t LZ = alignDown(Known.countMinLeadingZeros(), 8);
1953 uint64_t TZ = alignDown(Known.countMinTrailingZeros(), 8);
1954 unsigned BW = Known.getBitWidth();
1955
1956 // bswap(x) -> shift(x) if x has exactly one "active byte"
1957 if (BW - LZ - TZ == 8) {
1958 assert(LZ != TZ && "active byte cannot be in the middle");
1959 if (LZ > TZ) // -> shl(x) if the "active byte" is in the low part of x
1960 return BinaryOperator::CreateNUWShl(
1961 IIOperand, ConstantInt::get(IIOperand->getType(), LZ - TZ));
1962 // -> lshr(x) if the "active byte" is in the high part of x
1963 return BinaryOperator::CreateExactLShr(
1964 IIOperand, ConstantInt::get(IIOperand->getType(), TZ - LZ));
1965 }
1966
1967 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
1968 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
1969 unsigned C = X->getType()->getScalarSizeInBits() - BW;
1970 Value *CV = ConstantInt::get(X->getType(), C);
1971 Value *V = Builder.CreateLShr(X, CV);
1972 return new TruncInst(V, IIOperand->getType());
1973 }
1974
1975 if (Instruction *crossLogicOpFold =
1976 foldBitOrderCrossLogicOp<Intrinsic::bswap>(IIOperand, Builder)) {
1977 return crossLogicOpFold;
1978 }
1979
1980 // Try to fold into bitreverse if bswap is the root of the expression tree.
1981 if (Instruction *BitOp = matchBSwapOrBitReverse(*II, /*MatchBSwaps*/ false,
1982 /*MatchBitReversals*/ true))
1983 return BitOp;
1984 break;
1985 }
1986 case Intrinsic::masked_load:
1987 if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II))
1988 return replaceInstUsesWith(CI, SimplifiedMaskedOp);
1989 break;
1990 case Intrinsic::masked_store:
1991 return simplifyMaskedStore(*II);
1992 case Intrinsic::masked_gather:
1993 return simplifyMaskedGather(*II);
1994 case Intrinsic::masked_scatter:
1995 return simplifyMaskedScatter(*II);
1996 case Intrinsic::launder_invariant_group:
1997 case Intrinsic::strip_invariant_group:
1998 if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this))
1999 return replaceInstUsesWith(*II, SkippedBarrier);
2000 break;
2001 case Intrinsic::powi:
2002 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
2003 // 0 and 1 are handled in instsimplify
2004 // powi(x, -1) -> 1/x
2005 if (Power->isMinusOne())
2006 return BinaryOperator::CreateFDivFMF(ConstantFP::get(CI.getType(), 1.0),
2007 II->getArgOperand(0), II);
2008 // powi(x, 2) -> x*x
2009 if (Power->equalsInt(2))
2010 return BinaryOperator::CreateFMulFMF(II->getArgOperand(0),
2011 II->getArgOperand(0), II);
2012
2013 if (!Power->getValue()[0]) {
2014 Value *X;
2015 // If power is even:
2016 // powi(-x, p) -> powi(x, p)
2017 // powi(fabs(x), p) -> powi(x, p)
2018 // powi(copysign(x, y), p) -> powi(x, p)
2019 if (match(II->getArgOperand(0), m_FNeg(m_Value(X))) ||
2020 match(II->getArgOperand(0), m_FAbs(m_Value(X))) ||
2021 match(II->getArgOperand(0),
2022 m_Intrinsic<Intrinsic::copysign>(m_Value(X), m_Value())))
2023 return replaceOperand(*II, 0, X);
2024 }
2025 }
2026 break;
2027
2028 case Intrinsic::cttz:
2029 case Intrinsic::ctlz:
2030 if (auto *I = foldCttzCtlz(*II, *this))
2031 return I;
2032 break;
2033
2034 case Intrinsic::ctpop:
2035 if (auto *I = foldCtpop(*II, *this))
2036 return I;
2037 break;
2038
2039 case Intrinsic::fshl:
2040 case Intrinsic::fshr: {
2041 Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);
2042 Type *Ty = II->getType();
2043 unsigned BitWidth = Ty->getScalarSizeInBits();
2044 Constant *ShAmtC;
2045 if (match(II->getArgOperand(2), m_ImmConstant(ShAmtC))) {
2046 // Canonicalize a shift amount constant operand to modulo the bit-width.
2047 Constant *WidthC = ConstantInt::get(Ty, BitWidth);
2048 Constant *ModuloC =
2049 ConstantFoldBinaryOpOperands(Instruction::URem, ShAmtC, WidthC, DL);
2050 if (!ModuloC)
2051 return nullptr;
2052 if (ModuloC != ShAmtC)
2053 return replaceOperand(*II, 2, ModuloC);
2054
2055 assert(match(ConstantFoldCompareInstOperands(ICmpInst::ICMP_UGT, WidthC,
2056 ShAmtC, DL),
2057 m_One()) &&
2058 "Shift amount expected to be modulo bitwidth");
2059
2060 // Canonicalize funnel shift right by constant to funnel shift left. This
2061 // is not entirely arbitrary. For historical reasons, the backend may
2062 // recognize rotate left patterns but miss rotate right patterns.
2063 if (IID == Intrinsic::fshr) {
2064 // fshr X, Y, C --> fshl X, Y, (BitWidth - C) if C is not zero.
2065 if (!isKnownNonZero(ShAmtC, SQ.getWithInstruction(II)))
2066 return nullptr;
2067
2068 Constant *LeftShiftC = ConstantExpr::getSub(WidthC, ShAmtC);
2069 Module *Mod = II->getModule();
2070 Function *Fshl = Intrinsic::getDeclaration(Mod, Intrinsic::fshl, Ty);
2071 return CallInst::Create(Fshl, { Op0, Op1, LeftShiftC });
2072 }
2073 assert(IID == Intrinsic::fshl &&
2074 "All funnel shifts by simple constants should go left");
2075
2076 // fshl(X, 0, C) --> shl X, C
2077 // fshl(X, undef, C) --> shl X, C
2078 if (match(Op1, m_ZeroInt()) || match(Op1, m_Undef()))
2079 return BinaryOperator::CreateShl(Op0, ShAmtC);
2080
2081 // fshl(0, X, C) --> lshr X, (BW-C)
2082 // fshl(undef, X, C) --> lshr X, (BW-C)
2083 if (match(Op0, m_ZeroInt()) || match(Op0, m_Undef()))
2084 return BinaryOperator::CreateLShr(Op1,
2085 ConstantExpr::getSub(WidthC, ShAmtC));
2086
2087 // fshl i16 X, X, 8 --> bswap i16 X (reduce to more-specific form)
2088 if (Op0 == Op1 && BitWidth == 16 && match(ShAmtC, m_SpecificInt(8))) {
2089 Module *Mod = II->getModule();
2090 Function *Bswap = Intrinsic::getDeclaration(Mod, Intrinsic::bswap, Ty);
2091 return CallInst::Create(Bswap, { Op0 });
2092 }
2093 if (Instruction *BitOp =
2094 matchBSwapOrBitReverse(*II, /*MatchBSwaps*/ true,
2095 /*MatchBitReversals*/ true))
2096 return BitOp;
2097 }
2098
2099 // Left or right might be masked.
2100 if (SimplifyDemandedInstructionBits(*II))
2101 return &CI;
2102
2103 // The shift amount (operand 2) of a funnel shift is modulo the bitwidth,
2104 // so only the low bits of the shift amount are demanded if the bitwidth is
2105 // a power-of-2.
2106 if (!isPowerOf2_32(BitWidth))
2107 break;
2108 APInt Op2Demanded = APInt::getLowBitsSet(BitWidth, Log2_32_Ceil(BitWidth));
2109 KnownBits Op2Known(BitWidth);
2110 if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known))
2111 return &CI;
2112 break;
2113 }
2114 case Intrinsic::ptrmask: {
2115 unsigned BitWidth = DL.getPointerTypeSizeInBits(II->getType());
2116 KnownBits Known(BitWidth);
2117 if (SimplifyDemandedInstructionBits(*II, Known))
2118 return II;
2119
2120 Value *InnerPtr, *InnerMask;
2121 bool Changed = false;
2122 // Combine:
2123 // (ptrmask (ptrmask p, A), B)
2124 // -> (ptrmask p, (and A, B))
2125 if (match(II->getArgOperand(0),
2126 m_OneUse(m_Intrinsic<Intrinsic::ptrmask>(m_Value(InnerPtr),
2127 m_Value(InnerMask))))) {
2128 assert(II->getArgOperand(1)->getType() == InnerMask->getType() &&
2129 "Mask types must match");
2130 // TODO: If InnerMask == Op1, we could copy attributes from inner
2131 // callsite -> outer callsite.
2132 Value *NewMask = Builder.CreateAnd(II->getArgOperand(1), InnerMask);
2133 replaceOperand(CI, 0, InnerPtr);
2134 replaceOperand(CI, 1, NewMask);
2135 Changed = true;
2136 }
2137
2138 // See if we can deduce non-null.
2139 if (!CI.hasRetAttr(Attribute::NonNull) &&
2140 (Known.isNonZero() ||
2141 isKnownNonZero(II, getSimplifyQuery().getWithInstruction(II)))) {
2142 CI.addRetAttr(Attribute::NonNull);
2143 Changed = true;
2144 }
2145
2146 unsigned NewAlignmentLog =
2147 std::min(Value::MaxAlignmentExponent,
2148 std::min(BitWidth - 1, Known.countMinTrailingZeros()));
2149 // Known bits will capture if we had alignment information associated with
2150 // the pointer argument.
2151 if (NewAlignmentLog > Log2(CI.getRetAlign().valueOrOne())) {
2152 CI.addRetAttr(Attribute::getWithAlignment(
2153 CI.getContext(), Align(uint64_t(1) << NewAlignmentLog)));
2154 Changed = true;
2155 }
2156 if (Changed)
2157 return &CI;
2158 break;
2159 }
2160 case Intrinsic::uadd_with_overflow:
2161 case Intrinsic::sadd_with_overflow: {
2162 if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2163 return I;
2164
2165 // Given 2 constant operands whose sum does not overflow:
2166 // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1
2167 // saddo (X +nsw C0), C1 -> saddo X, C0 + C1
2168 Value *X;
2169 const APInt *C0, *C1;
2170 Value *Arg0 = II->getArgOperand(0);
2171 Value *Arg1 = II->getArgOperand(1);
2172 bool IsSigned = IID == Intrinsic::sadd_with_overflow;
2173 bool HasNWAdd = IsSigned
2174 ? match(Arg0, m_NSWAddLike(m_Value(X), m_APInt(C0)))
2175 : match(Arg0, m_NUWAddLike(m_Value(X), m_APInt(C0)));
2176 if (HasNWAdd && match(Arg1, m_APInt(C1))) {
2177 bool Overflow;
2178 APInt NewC =
2179 IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow);
2180 if (!Overflow)
2181 return replaceInstUsesWith(
2182 *II, Builder.CreateBinaryIntrinsic(
2183 IID, X, ConstantInt::get(Arg1->getType(), NewC)));
2184 }
2185 break;
2186 }
2187
2188 case Intrinsic::umul_with_overflow:
2189 case Intrinsic::smul_with_overflow:
2190 case Intrinsic::usub_with_overflow:
2191 if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2192 return I;
2193 break;
2194
2195 case Intrinsic::ssub_with_overflow: {
2196 if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
2197 return I;
2198
2199 Constant *C;
2200 Value *Arg0 = II->getArgOperand(0);
2201 Value *Arg1 = II->getArgOperand(1);
2202 // Given a constant C that is not the minimum signed value
2203 // for an integer of a given bit width:
2204 //
2205 // ssubo X, C -> saddo X, -C
2206 if (match(Arg1, m_Constant(C)) && C->isNotMinSignedValue()) {
2207 Value *NegVal = ConstantExpr::getNeg(C);
2208 // Build a saddo call that is equivalent to the discovered
2209 // ssubo call.
2210 return replaceInstUsesWith(
2211 *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow,
2212 Arg0, NegVal));
2213 }
2214
2215 break;
2216 }
2217
2218 case Intrinsic::uadd_sat:
2219 case Intrinsic::sadd_sat:
2220 case Intrinsic::usub_sat:
2221 case Intrinsic::ssub_sat: {
2222 SaturatingInst *SI = cast<SaturatingInst>(II);
2223 Type *Ty = SI->getType();
2224 Value *Arg0 = SI->getLHS();
2225 Value *Arg1 = SI->getRHS();
2226
2227 // Make use of known overflow information.
2228 OverflowResult OR = computeOverflow(SI->getBinaryOp(), SI->isSigned(),
2229 Arg0, Arg1, SI);
2230 switch (OR) {
2231 case OverflowResult::MayOverflow:
2232 break;
2233 case OverflowResult::NeverOverflows:
2234 if (SI->isSigned())
2235 return BinaryOperator::CreateNSW(SI->getBinaryOp(), Arg0, Arg1);
2236 else
2237 return BinaryOperator::CreateNUW(SI->getBinaryOp(), Arg0, Arg1);
2238 case OverflowResult::AlwaysOverflowsLow: {
2239 unsigned BitWidth = Ty->getScalarSizeInBits();
2240 APInt Min = APSInt::getMinValue(BitWidth, !SI->isSigned());
2241 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Min));
2242 }
2243 case OverflowResult::AlwaysOverflowsHigh: {
2244 unsigned BitWidth = Ty->getScalarSizeInBits();
2245 APInt Max = APSInt::getMaxValue(BitWidth, !SI->isSigned());
2246 return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Max));
2247 }
2248 }
2249
2250 // usub_sat((sub nuw C, A), C1) -> usub_sat(usub_sat(C, C1), A)
2251 // which after that:
2252 // usub_sat((sub nuw C, A), C1) -> usub_sat(C - C1, A) if C1 u< C
2253 // usub_sat((sub nuw C, A), C1) -> 0 otherwise
2254 Constant *C, *C1;
2255 Value *A;
2256 if (IID == Intrinsic::usub_sat &&
2257 match(Arg0, m_NUWSub(m_ImmConstant(C), m_Value(A))) &&
2258 match(Arg1, m_ImmConstant(C1))) {
2259 auto *NewC = Builder.CreateBinaryIntrinsic(Intrinsic::usub_sat, C, C1);
2260 auto *NewSub =
2261 Builder.CreateBinaryIntrinsic(Intrinsic::usub_sat, NewC, A);
2262 return replaceInstUsesWith(*SI, NewSub);
2263 }
2264
2265 // ssub.sat(X, C) -> sadd.sat(X, -C) if C != MIN
2266 if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) &&
2267 C->isNotMinSignedValue()) {
2268 Value *NegVal = ConstantExpr::getNeg(C);
2269 return replaceInstUsesWith(
2270 *II, Builder.CreateBinaryIntrinsic(
2271 Intrinsic::sadd_sat, Arg0, NegVal));
2272 }
2273
2274 // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2))
2275 // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2))
2276 // if Val and Val2 have the same sign
2277 if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) {
2278 Value *X;
2279 const APInt *Val, *Val2;
2280 APInt NewVal;
2281 bool IsUnsigned =
2282 IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
2283 if (Other->getIntrinsicID() == IID &&
2284 match(Arg1, m_APInt(Val)) &&
2285 match(Other->getArgOperand(0), m_Value(X)) &&
2286 match(Other->getArgOperand(1), m_APInt(Val2))) {
2287 if (IsUnsigned)
2288 NewVal = Val->uadd_sat(*Val2);
2289 else if (Val->isNonNegative() == Val2->isNonNegative()) {
2290 bool Overflow;
2291 NewVal = Val->sadd_ov(*Val2, Overflow);
2292 if (Overflow) {
2293 // Both adds together may add more than SignedMaxValue
2294 // without saturating the final result.
2295 break;
2296 }
2297 } else {
2298 // Cannot fold saturated addition with different signs.
2299 break;
2300 }
2301
2302 return replaceInstUsesWith(
2303 *II, Builder.CreateBinaryIntrinsic(
2304 IID, X, ConstantInt::get(II->getType(), NewVal)));
2305 }
2306 }
2307 break;
2308 }
2309
2310 case Intrinsic::minnum:
2311 case Intrinsic::maxnum:
2312 case Intrinsic::minimum:
2313 case Intrinsic::maximum: {
2314 Value *Arg0 = II->getArgOperand(0);
2315 Value *Arg1 = II->getArgOperand(1);
2316 Value *X, *Y;
2317 if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) &&
2318 (Arg0->hasOneUse() || Arg1->hasOneUse())) {
2319 // If both operands are negated, invert the call and negate the result:
2320 // min(-X, -Y) --> -(max(X, Y))
2321 // max(-X, -Y) --> -(min(X, Y))
2322 Intrinsic::ID NewIID;
2323 switch (IID) {
2324 case Intrinsic::maxnum:
2325 NewIID = Intrinsic::minnum;
2326 break;
2327 case Intrinsic::minnum:
2328 NewIID = Intrinsic::maxnum;
2329 break;
2330 case Intrinsic::maximum:
2331 NewIID = Intrinsic::minimum;
2332 break;
2333 case Intrinsic::minimum:
2334 NewIID = Intrinsic::maximum;
2335 break;
2336 default:
2337 llvm_unreachable("unexpected intrinsic ID");
2338 }
2339 Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II);
2340 Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall);
2341 FNeg->copyIRFlags(II);
2342 return FNeg;
2343 }
2344
2345 // m(m(X, C2), C1) -> m(X, C)
2346 const APFloat *C1, *C2;
2347 if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
2348 if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) &&
2349 ((match(M->getArgOperand(0), m_Value(X)) &&
2350 match(M->getArgOperand(1), m_APFloat(C2))) ||
2351 (match(M->getArgOperand(1), m_Value(X)) &&
2352 match(M->getArgOperand(0), m_APFloat(C2))))) {
2353 APFloat Res(0.0);
2354 switch (IID) {
2355 case Intrinsic::maxnum:
2356 Res = maxnum(*C1, *C2);
2357 break;
2358 case Intrinsic::minnum:
2359 Res = minnum(*C1, *C2);
2360 break;
2361 case Intrinsic::maximum:
2362 Res = maximum(*C1, *C2);
2363 break;
2364 case Intrinsic::minimum:
2365 Res = minimum(*C1, *C2);
2366 break;
2367 default:
2368 llvm_unreachable("unexpected intrinsic ID");
2369 }
2370 Value *V = Builder.CreateBinaryIntrinsic(
2371 IID, X, ConstantFP::get(Arg0->getType(), Res), II);
2372 // TODO: Conservatively intersecting FMF. If Res == C2, the transform
2373 // was a simplification (so Arg0 and its original flags could
2374 // propagate?)
2375 if (auto *CI = dyn_cast<CallInst>(V))
2376 CI->andIRFlags(M);
2377 return replaceInstUsesWith(*II, V);
2378 }
2379 }
2380
2381 // m((fpext X), (fpext Y)) -> fpext (m(X, Y))
2382 if (match(Arg0, m_OneUse(m_FPExt(m_Value(X)))) &&
2383 match(Arg1, m_OneUse(m_FPExt(m_Value(Y)))) &&
2384 X->getType() == Y->getType()) {
2385 Value *NewCall =
2386 Builder.CreateBinaryIntrinsic(IID, X, Y, II, II->getName());
2387 return new FPExtInst(NewCall, II->getType());
2388 }
2389
2390 // max X, -X --> fabs X
2391 // min X, -X --> -(fabs X)
2392 // TODO: Remove one-use limitation? That is obviously better for max,
2393 // hence why we don't check for one-use for that. However,
2394 // it would be an extra instruction for min (fnabs), but
2395 // that is still likely better for analysis and codegen.
2396 auto IsMinMaxOrXNegX = [IID, &X](Value *Op0, Value *Op1) {
2397 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_Specific(X)))
2398 return Op0->hasOneUse() ||
2399 (IID != Intrinsic::minimum && IID != Intrinsic::minnum);
2400 return false;
2401 };
2402
2403 if (IsMinMaxOrXNegX(Arg0, Arg1) || IsMinMaxOrXNegX(Arg1, Arg0)) {
2404 Value *R = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, X, II);
2405 if (IID == Intrinsic::minimum || IID == Intrinsic::minnum)
2406 R = Builder.CreateFNegFMF(R, II);
2407 return replaceInstUsesWith(*II, R);
2408 }
2409
2410 break;
2411 }
2412 case Intrinsic::matrix_multiply: {
2413 // Optimize negation in matrix multiplication.
2414
2415 // -A * -B -> A * B
2416 Value *A, *B;
2417 if (match(II->getArgOperand(0), m_FNeg(m_Value(A))) &&
2418 match(II->getArgOperand(1), m_FNeg(m_Value(B)))) {
2419 replaceOperand(*II, 0, A);
2420 replaceOperand(*II, 1, B);
2421 return II;
2422 }
2423
2424 Value *Op0 = II->getOperand(0);
2425 Value *Op1 = II->getOperand(1);
2426 Value *OpNotNeg, *NegatedOp;
2427 unsigned NegatedOpArg, OtherOpArg;
2428 if (match(Op0, m_FNeg(m_Value(OpNotNeg)))) {
2429 NegatedOp = Op0;
2430 NegatedOpArg = 0;
2431 OtherOpArg = 1;
2432 } else if (match(Op1, m_FNeg(m_Value(OpNotNeg)))) {
2433 NegatedOp = Op1;
2434 NegatedOpArg = 1;
2435 OtherOpArg = 0;
2436 } else
2437 // Multiplication doesn't have a negated operand.
2438 break;
2439
2440 // Only optimize if the negated operand has only one use.
2441 if (!NegatedOp->hasOneUse())
2442 break;
2443
2444 Value *OtherOp = II->getOperand(OtherOpArg);
2445 VectorType *RetTy = cast<VectorType>(II->getType());
2446 VectorType *NegatedOpTy = cast<VectorType>(NegatedOp->getType());
2447 VectorType *OtherOpTy = cast<VectorType>(OtherOp->getType());
2448 ElementCount NegatedCount = NegatedOpTy->getElementCount();
2449 ElementCount OtherCount = OtherOpTy->getElementCount();
2450 ElementCount RetCount = RetTy->getElementCount();
2451 // (-A) * B -> A * (-B), if it is cheaper to negate B and vice versa.
2452 if (ElementCount::isKnownGT(NegatedCount, OtherCount) &&
2453 ElementCount::isKnownLT(OtherCount, RetCount)) {
2454 Value *InverseOtherOp = Builder.CreateFNeg(OtherOp);
2455 replaceOperand(*II, NegatedOpArg, OpNotNeg);
2456 replaceOperand(*II, OtherOpArg, InverseOtherOp);
2457 return II;
2458 }
2459 // (-A) * B -> -(A * B), if it is cheaper to negate the result
2460 if (ElementCount::isKnownGT(NegatedCount, RetCount)) {
2461 SmallVector<Value *, 5> NewArgs(II->args());
2462 NewArgs[NegatedOpArg] = OpNotNeg;
2463 Instruction *NewMul =
2464 Builder.CreateIntrinsic(II->getType(), IID, NewArgs, II);
2465 return replaceInstUsesWith(*II, Builder.CreateFNegFMF(NewMul, II));
2466 }
2467 break;
2468 }
2469 case Intrinsic::fmuladd: {
2470 // Try to simplify the underlying FMul.
2471 if (Value *V = simplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1),
2472 II->getFastMathFlags(),
2473 SQ.getWithInstruction(II))) {
2474 auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2));
2475 FAdd->copyFastMathFlags(II);
2476 return FAdd;
2477 }
2478
2479 [[fallthrough]];
2480 }
2481 case Intrinsic::fma: {
2482 // fma fneg(x), fneg(y), z -> fma x, y, z
2483 Value *Src0 = II->getArgOperand(0);
2484 Value *Src1 = II->getArgOperand(1);
2485 Value *X, *Y;
2486 if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) {
2487 replaceOperand(*II, 0, X);
2488 replaceOperand(*II, 1, Y);
2489 return II;
2490 }
2491
2492 // fma fabs(x), fabs(x), z -> fma x, x, z
2493 if (match(Src0, m_FAbs(m_Value(X))) &&
2494 match(Src1, m_FAbs(m_Specific(X)))) {
2495 replaceOperand(*II, 0, X);
2496 replaceOperand(*II, 1, X);
2497 return II;
2498 }
2499
2500 // Try to simplify the underlying FMul. We can only apply simplifications
2501 // that do not require rounding.
2502 if (Value *V = simplifyFMAFMul(II->getArgOperand(0), II->getArgOperand(1),
2503 II->getFastMathFlags(),
2504 SQ.getWithInstruction(II))) {
2505 auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2));
2506 FAdd->copyFastMathFlags(II);
2507 return FAdd;
2508 }
2509
2510 // fma x, y, 0 -> fmul x, y
2511 // This is always valid for -0.0, but requires nsz for +0.0 as
2512 // -0.0 + 0.0 = 0.0, which would not be the same as the fmul on its own.
2513 if (match(II->getArgOperand(2), m_NegZeroFP()) ||
2514 (match(II->getArgOperand(2), m_PosZeroFP()) &&
2515 II->getFastMathFlags().noSignedZeros()))
2516 return BinaryOperator::CreateFMulFMF(Src0, Src1, II);
2517
2518 break;
2519 }
2520 case Intrinsic::copysign: {
2521 Value *Mag = II->getArgOperand(0), *Sign = II->getArgOperand(1);
2522 if (std::optional<bool> KnownSignBit = computeKnownFPSignBit(
2523 Sign, /*Depth=*/0, getSimplifyQuery().getWithInstruction(II))) {
2524 if (*KnownSignBit) {
2525 // If we know that the sign argument is negative, reduce to FNABS:
2526 // copysign Mag, -Sign --> fneg (fabs Mag)
2527 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II);
2528 return replaceInstUsesWith(*II, Builder.CreateFNegFMF(Fabs, II));
2529 }
2530
2531 // If we know that the sign argument is positive, reduce to FABS:
2532 // copysign Mag, +Sign --> fabs Mag
2533 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II);
2534 return replaceInstUsesWith(*II, Fabs);
2535 }
2536
2537 // Propagate sign argument through nested calls:
2538 // copysign Mag, (copysign ?, X) --> copysign Mag, X
2539 Value *X;
2540 if (match(Sign, m_Intrinsic<Intrinsic::copysign>(m_Value(), m_Value(X))))
2541 return replaceOperand(*II, 1, X);
2542
2543 // Clear sign-bit of constant magnitude:
2544 // copysign -MagC, X --> copysign MagC, X
2545 // TODO: Support constant folding for fabs
2546 const APFloat *MagC;
2547 if (match(Mag, m_APFloat(MagC)) && MagC->isNegative()) {
2548 APFloat PosMagC = *MagC;
2549 PosMagC.clearSign();
2550 return replaceOperand(*II, 0, ConstantFP::get(Mag->getType(), PosMagC));
2551 }
2552
2553 // Peek through changes of magnitude's sign-bit. This call rewrites those:
2554 // copysign (fabs X), Sign --> copysign X, Sign
2555 // copysign (fneg X), Sign --> copysign X, Sign
2556 if (match(Mag, m_FAbs(m_Value(X))) || match(Mag, m_FNeg(m_Value(X))))
2557 return replaceOperand(*II, 0, X);
2558
2559 break;
2560 }
2561 case Intrinsic::fabs: {
2562 Value *Cond, *TVal, *FVal;
2563 Value *Arg = II->getArgOperand(0);
2564 Value *X;
2565 // fabs (-X) --> fabs (X)
2566 if (match(Arg, m_FNeg(m_Value(X)))) {
2567 CallInst *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, X, II);
2568 return replaceInstUsesWith(CI, Fabs);
2569 }
2570
2571 if (match(Arg, m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))) {
2572 // fabs (select Cond, TrueC, FalseC) --> select Cond, AbsT, AbsF
2573 if (isa<Constant>(TVal) || isa<Constant>(FVal)) {
2574 CallInst *AbsT = Builder.CreateCall(II->getCalledFunction(), {TVal});
2575 CallInst *AbsF = Builder.CreateCall(II->getCalledFunction(), {FVal});
2576 SelectInst *SI = SelectInst::Create(Cond, AbsT, AbsF);
2577 FastMathFlags FMF1 = II->getFastMathFlags();
2578 FastMathFlags FMF2 = cast<SelectInst>(Arg)->getFastMathFlags();
2579 FMF2.setNoSignedZeros(false);
2580 SI->setFastMathFlags(FMF1 | FMF2);
2581 return SI;
2582 }
2583 // fabs (select Cond, -FVal, FVal) --> fabs FVal
2584 if (match(TVal, m_FNeg(m_Specific(FVal))))
2585 return replaceOperand(*II, 0, FVal);
2586 // fabs (select Cond, TVal, -TVal) --> fabs TVal
2587 if (match(FVal, m_FNeg(m_Specific(TVal))))
2588 return replaceOperand(*II, 0, TVal);
2589 }
2590
2591 Value *Magnitude, *Sign;
2592 if (match(II->getArgOperand(0),
2593 m_CopySign(m_Value(Magnitude), m_Value(Sign)))) {
2594 // fabs (copysign x, y) -> (fabs x)
2595 CallInst *AbsSign =
2596 Builder.CreateCall(II->getCalledFunction(), {Magnitude});
2597 AbsSign->copyFastMathFlags(II);
2598 return replaceInstUsesWith(*II, AbsSign);
2599 }
2600
2601 [[fallthrough]];
2602 }
2603 case Intrinsic::ceil:
2604 case Intrinsic::floor:
2605 case Intrinsic::round:
2606 case Intrinsic::roundeven:
2607 case Intrinsic::nearbyint:
2608 case Intrinsic::rint:
2609 case Intrinsic::trunc: {
2610 Value *ExtSrc;
2611 if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) {
2612 // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x)
2613 Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II);
2614 return new FPExtInst(NarrowII, II->getType());
2615 }
2616 break;
2617 }
2618 case Intrinsic::cos:
2619 case Intrinsic::amdgcn_cos: {
2620 Value *X, *Sign;
2621 Value *Src = II->getArgOperand(0);
2622 if (match(Src, m_FNeg(m_Value(X))) || match(Src, m_FAbs(m_Value(X))) ||
2623 match(Src, m_CopySign(m_Value(X), m_Value(Sign)))) {
2624 // cos(-x) --> cos(x)
2625 // cos(fabs(x)) --> cos(x)
2626 // cos(copysign(x, y)) --> cos(x)
2627 return replaceOperand(*II, 0, X);
2628 }
2629 break;
2630 }
2631 case Intrinsic::sin:
2632 case Intrinsic::amdgcn_sin: {
2633 Value *X;
2634 if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) {
2635 // sin(-x) --> -sin(x)
2636 Value *NewSin = Builder.CreateUnaryIntrinsic(IID, X, II);
2637 return UnaryOperator::CreateFNegFMF(NewSin, II);
2638 }
2639 break;
2640 }
2641 case Intrinsic::ldexp: {
2642 // ldexp(ldexp(x, a), b) -> ldexp(x, a + b)
2643 //
2644 // The danger is if the first ldexp would overflow to infinity or underflow
2645 // to zero, but the combined exponent avoids it. We ignore this with
2646 // reassoc.
2647 //
2648 // It's also safe to fold if we know both exponents are >= 0 or <= 0 since
2649 // it would just double down on the overflow/underflow which would occur
2650 // anyway.
2651 //
2652 // TODO: Could do better if we had range tracking for the input value
2653 // exponent. Also could broaden sign check to cover == 0 case.
2654 Value *Src = II->getArgOperand(0);
2655 Value *Exp = II->getArgOperand(1);
2656 Value *InnerSrc;
2657 Value *InnerExp;
2658 if (match(Src, m_OneUse(m_Intrinsic<Intrinsic::ldexp>(
2659 m_Value(InnerSrc), m_Value(InnerExp)))) &&
2660 Exp->getType() == InnerExp->getType()) {
2661 FastMathFlags FMF = II->getFastMathFlags();
2662 FastMathFlags InnerFlags = cast<FPMathOperator>(Src)->getFastMathFlags();
2663
2664 if ((FMF.allowReassoc() && InnerFlags.allowReassoc()) ||
2665 signBitMustBeTheSame(Exp, InnerExp, SQ.getWithInstruction(II))) {
2666 // TODO: Add nsw/nuw probably safe if integer type exceeds exponent
2667 // width.
2668 Value *NewExp = Builder.CreateAdd(InnerExp, Exp);
2669 II->setArgOperand(1, NewExp);
2670 II->setFastMathFlags(InnerFlags); // Or the inner flags.
2671 return replaceOperand(*II, 0, InnerSrc);
2672 }
2673 }
2674
2675 // ldexp(x, zext(i1 y)) -> fmul x, (select y, 2.0, 1.0)
2676 // ldexp(x, sext(i1 y)) -> fmul x, (select y, 0.5, 1.0)
2677 Value *ExtSrc;
2678 if (match(Exp, m_ZExt(m_Value(ExtSrc))) &&
2679 ExtSrc->getType()->getScalarSizeInBits() == 1) {
2680 Value *Select =
2681 Builder.CreateSelect(ExtSrc, ConstantFP::get(II->getType(), 2.0),
2682 ConstantFP::get(II->getType(), 1.0));
2683 return BinaryOperator::CreateFMulFMF(Src, Select, II);
2684 }
2685 if (match(Exp, m_SExt(m_Value(ExtSrc))) &&
2686 ExtSrc->getType()->getScalarSizeInBits() == 1) {
2687 Value *Select =
2688 Builder.CreateSelect(ExtSrc, ConstantFP::get(II->getType(), 0.5),
2689 ConstantFP::get(II->getType(), 1.0));
2690 return BinaryOperator::CreateFMulFMF(Src, Select, II);
2691 }
2692
2693 // ldexp(x, c ? exp : 0) -> c ? ldexp(x, exp) : x
2694 // ldexp(x, c ? 0 : exp) -> c ? x : ldexp(x, exp)
2695 ///
2696 // TODO: If we cared, should insert a canonicalize for x
2697 Value *SelectCond, *SelectLHS, *SelectRHS;
2698 if (match(II->getArgOperand(1),
2699 m_OneUse(m_Select(m_Value(SelectCond), m_Value(SelectLHS),
2700 m_Value(SelectRHS))))) {
2701 Value *NewLdexp = nullptr;
2702 Value *Select = nullptr;
2703 if (match(SelectRHS, m_ZeroInt())) {
2704 NewLdexp = Builder.CreateLdexp(Src, SelectLHS);
2705 Select = Builder.CreateSelect(SelectCond, NewLdexp, Src);
2706 } else if (match(SelectLHS, m_ZeroInt())) {
2707 NewLdexp = Builder.CreateLdexp(Src, SelectRHS);
2708 Select = Builder.CreateSelect(SelectCond, Src, NewLdexp);
2709 }
2710
2711 if (NewLdexp) {
2712 Select->takeName(II);
2713 cast<Instruction>(NewLdexp)->copyFastMathFlags(II);
2714 return replaceInstUsesWith(*II, Select);
2715 }
2716 }
2717
2718 break;
2719 }
2720 case Intrinsic::ptrauth_auth:
2721 case Intrinsic::ptrauth_resign: {
2722 // (sign|resign) + (auth|resign) can be folded by omitting the middle
2723 // sign+auth component if the key and discriminator match.
2724 bool NeedSign = II->getIntrinsicID() == Intrinsic::ptrauth_resign;
2725 Value *Ptr = II->getArgOperand(0);
2726 Value *Key = II->getArgOperand(1);
2727 Value *Disc = II->getArgOperand(2);
2728
2729 // AuthKey will be the key we need to end up authenticating against in
2730 // whatever we replace this sequence with.
2731 Value *AuthKey = nullptr, *AuthDisc = nullptr, *BasePtr;
2732 if (const auto *CI = dyn_cast<CallBase>(Ptr)) {
2733 BasePtr = CI->getArgOperand(0);
2734 if (CI->getIntrinsicID() == Intrinsic::ptrauth_sign) {
2735 if (CI->getArgOperand(1) != Key || CI->getArgOperand(2) != Disc)
2736 break;
2737 } else if (CI->getIntrinsicID() == Intrinsic::ptrauth_resign) {
2738 if (CI->getArgOperand(3) != Key || CI->getArgOperand(4) != Disc)
2739 break;
2740 AuthKey = CI->getArgOperand(1);
2741 AuthDisc = CI->getArgOperand(2);
2742 } else
2743 break;
2744 } else if (const auto *PtrToInt = dyn_cast<PtrToIntOperator>(Ptr)) {
2745 // ptrauth constants are equivalent to a call to @llvm.ptrauth.sign for
2746 // our purposes, so check for that too.
2747 const auto *CPA = dyn_cast<ConstantPtrAuth>(PtrToInt->getOperand(0));
2748 if (!CPA || !CPA->isKnownCompatibleWith(Key, Disc, DL))
2749 break;
2750
2751 // resign(ptrauth(p,ks,ds),ks,ds,kr,dr) -> ptrauth(p,kr,dr)
2752 if (NeedSign && isa<ConstantInt>(II->getArgOperand(4))) {
2753 auto *SignKey = cast<ConstantInt>(II->getArgOperand(3));
2754 auto *SignDisc = cast<ConstantInt>(II->getArgOperand(4));
2755 auto *SignAddrDisc = ConstantPointerNull::get(Builder.getPtrTy());
2756 auto *NewCPA = ConstantPtrAuth::get(CPA->getPointer(), SignKey,
2757 SignDisc, SignAddrDisc);
2758 replaceInstUsesWith(
2759 *II, ConstantExpr::getPointerCast(NewCPA, II->getType()));
2760 return eraseInstFromFunction(*II);
2761 }
2762
2763 // auth(ptrauth(p,k,d),k,d) -> p
2764 BasePtr = Builder.CreatePtrToInt(CPA->getPointer(), II->getType());
2765 } else
2766 break;
2767
2768 unsigned NewIntrin;
2769 if (AuthKey && NeedSign) {
2770 // resign(0,1) + resign(1,2) = resign(0, 2)
2771 NewIntrin = Intrinsic::ptrauth_resign;
2772 } else if (AuthKey) {
2773 // resign(0,1) + auth(1) = auth(0)
2774 NewIntrin = Intrinsic::ptrauth_auth;
2775 } else if (NeedSign) {
2776 // sign(0) + resign(0, 1) = sign(1)
2777 NewIntrin = Intrinsic::ptrauth_sign;
2778 } else {
2779 // sign(0) + auth(0) = nop
2780 replaceInstUsesWith(*II, BasePtr);
2781 return eraseInstFromFunction(*II);
2782 }
2783
2784 SmallVector<Value *, 4> CallArgs;
2785 CallArgs.push_back(BasePtr);
2786 if (AuthKey) {
2787 CallArgs.push_back(AuthKey);
2788 CallArgs.push_back(AuthDisc);
2789 }
2790
2791 if (NeedSign) {
2792 CallArgs.push_back(II->getArgOperand(3));
2793 CallArgs.push_back(II->getArgOperand(4));
2794 }
2795
2796 Function *NewFn = Intrinsic::getDeclaration(II->getModule(), NewIntrin);
2797 return CallInst::Create(NewFn, CallArgs);
2798 }
2799 case Intrinsic::arm_neon_vtbl1:
2800 case Intrinsic::aarch64_neon_tbl1:
2801 if (Value *V = simplifyNeonTbl1(*II, Builder))
2802 return replaceInstUsesWith(*II, V);
2803 break;
2804
2805 case Intrinsic::arm_neon_vmulls:
2806 case Intrinsic::arm_neon_vmullu:
2807 case Intrinsic::aarch64_neon_smull:
2808 case Intrinsic::aarch64_neon_umull: {
2809 Value *Arg0 = II->getArgOperand(0);
2810 Value *Arg1 = II->getArgOperand(1);
2811
2812 // Handle mul by zero first:
2813 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
2814 return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
2815 }
2816
2817 // Check for constant LHS & RHS - in this case we just simplify.
2818 bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
2819 IID == Intrinsic::aarch64_neon_umull);
2820 VectorType *NewVT = cast<VectorType>(II->getType());
2821 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
2822 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
2823 Value *V0 = Builder.CreateIntCast(CV0, NewVT, /*isSigned=*/!Zext);
2824 Value *V1 = Builder.CreateIntCast(CV1, NewVT, /*isSigned=*/!Zext);
2825 return replaceInstUsesWith(CI, Builder.CreateMul(V0, V1));
2826 }
2827
2828 // Couldn't simplify - canonicalize constant to the RHS.
2829 std::swap(Arg0, Arg1);
2830 }
2831
2832 // Handle mul by one:
2833 if (Constant *CV1 = dyn_cast<Constant>(Arg1))
2834 if (ConstantInt *Splat =
2835 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
2836 if (Splat->isOne())
2837 return CastInst::CreateIntegerCast(Arg0, II->getType(),
2838 /*isSigned=*/!Zext);
2839
2840 break;
2841 }
2842 case Intrinsic::arm_neon_aesd:
2843 case Intrinsic::arm_neon_aese:
2844 case Intrinsic::aarch64_crypto_aesd:
2845 case Intrinsic::aarch64_crypto_aese: {
2846 Value *DataArg = II->getArgOperand(0);
2847 Value *KeyArg = II->getArgOperand(1);
2848
2849 // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR
2850 Value *Data, *Key;
2851 if (match(KeyArg, m_ZeroInt()) &&
2852 match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))) {
2853 replaceOperand(*II, 0, Data);
2854 replaceOperand(*II, 1, Key);
2855 return II;
2856 }
2857 break;
2858 }
2859 case Intrinsic::hexagon_V6_vandvrt:
2860 case Intrinsic::hexagon_V6_vandvrt_128B: {
2861 // Simplify Q -> V -> Q conversion.
2862 if (auto Op0 = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
2863 Intrinsic::ID ID0 = Op0->getIntrinsicID();
2864 if (ID0 != Intrinsic::hexagon_V6_vandqrt &&
2865 ID0 != Intrinsic::hexagon_V6_vandqrt_128B)
2866 break;
2867 Value *Bytes = Op0->getArgOperand(1), *Mask = II->getArgOperand(1);
2868 uint64_t Bytes1 = computeKnownBits(Bytes, 0, Op0).One.getZExtValue();
2869 uint64_t Mask1 = computeKnownBits(Mask, 0, II).One.getZExtValue();
2870 // Check if every byte has common bits in Bytes and Mask.
2871 uint64_t C = Bytes1 & Mask1;
2872 if ((C & 0xFF) && (C & 0xFF00) && (C & 0xFF0000) && (C & 0xFF000000))
2873 return replaceInstUsesWith(*II, Op0->getArgOperand(0));
2874 }
2875 break;
2876 }
2877 case Intrinsic::stackrestore: {
2878 enum class ClassifyResult {
2879 None,
2880 Alloca,
2881 StackRestore,
2882 CallWithSideEffects,
2883 };
2884 auto Classify = [](const Instruction *I) {
2885 if (isa<AllocaInst>(I))
2886 return ClassifyResult::Alloca;
2887
2888 if (auto *CI = dyn_cast<CallInst>(I)) {
2889 if (auto *II = dyn_cast<IntrinsicInst>(CI)) {
2890 if (II->getIntrinsicID() == Intrinsic::stackrestore)
2891 return ClassifyResult::StackRestore;
2892
2893 if (II->mayHaveSideEffects())
2894 return ClassifyResult::CallWithSideEffects;
2895 } else {
2896 // Consider all non-intrinsic calls to be side effects
2897 return ClassifyResult::CallWithSideEffects;
2898 }
2899 }
2900
2901 return ClassifyResult::None;
2902 };
2903
2904 // If the stacksave and the stackrestore are in the same BB, and there is
2905 // no intervening call, alloca, or stackrestore of a different stacksave,
2906 // remove the restore. This can happen when variable allocas are DCE'd.
2907 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
2908 if (SS->getIntrinsicID() == Intrinsic::stacksave &&
2909 SS->getParent() == II->getParent()) {
2910 BasicBlock::iterator BI(SS);
2911 bool CannotRemove = false;
2912 for (++BI; &*BI != II; ++BI) {
2913 switch (Classify(&*BI)) {
2914 case ClassifyResult::None:
2915 // So far so good, look at next instructions.
2916 break;
2917
2918 case ClassifyResult::StackRestore:
2919 // If we found an intervening stackrestore for a different
2920 // stacksave, we can't remove the stackrestore. Otherwise, continue.
2921 if (cast<IntrinsicInst>(*BI).getArgOperand(0) != SS)
2922 CannotRemove = true;
2923 break;
2924
2925 case ClassifyResult::Alloca:
2926 case ClassifyResult::CallWithSideEffects:
2927 // If we found an alloca, a non-intrinsic call, or an intrinsic
2928 // call with side effects, we can't remove the stackrestore.
2929 CannotRemove = true;
2930 break;
2931 }
2932 if (CannotRemove)
2933 break;
2934 }
2935
2936 if (!CannotRemove)
2937 return eraseInstFromFunction(CI);
2938 }
2939 }
2940
2941 // Scan down this block to see if there is another stack restore in the
2942 // same block without an intervening call/alloca.
2943 BasicBlock::iterator BI(II);
2944 Instruction *TI = II->getParent()->getTerminator();
2945 bool CannotRemove = false;
2946 for (++BI; &*BI != TI; ++BI) {
2947 switch (Classify(&*BI)) {
2948 case ClassifyResult::None:
2949 // So far so good, look at next instructions.
2950 break;
2951
2952 case ClassifyResult::StackRestore:
2953 // If there is a stackrestore below this one, remove this one.
2954 return eraseInstFromFunction(CI);
2955
2956 case ClassifyResult::Alloca:
2957 case ClassifyResult::CallWithSideEffects:
2958 // If we found an alloca, a non-intrinsic call, or an intrinsic call
2959 // with side effects (such as llvm.stacksave and llvm.read_register),
2960 // we can't remove the stack restore.
2961 CannotRemove = true;
2962 break;
2963 }
2964 if (CannotRemove)
2965 break;
2966 }
2967
2968 // If the stack restore is in a return, resume, or unwind block and if there
2969 // are no allocas or calls between the restore and the return, nuke the
2970 // restore.
2971 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
2972 return eraseInstFromFunction(CI);
2973 break;
2974 }
2975 case Intrinsic::lifetime_end:
2976 // Asan needs to poison memory to detect invalid access which is possible
2977 // even for empty lifetime range.
2978 if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
2979 II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||
2980 II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
2981 break;
2982
2983 if (removeTriviallyEmptyRange(*II, *this, [](const IntrinsicInst &I) {
2984 return I.getIntrinsicID() == Intrinsic::lifetime_start;
2985 }))
2986 return nullptr;
2987 break;
2988 case Intrinsic::assume: {
2989 Value *IIOperand = II->getArgOperand(0);
2990 SmallVector<OperandBundleDef, 4> OpBundles;
2991 II->getOperandBundlesAsDefs(OpBundles);
2992
2993 /// This will remove the boolean Condition from the assume given as
2994 /// argument and remove the assume if it becomes useless.
2995 /// always returns nullptr for use as a return values.
2996 auto RemoveConditionFromAssume = [&](Instruction *Assume) -> Instruction * {
2997 assert(isa<AssumeInst>(Assume));
2998 if (isAssumeWithEmptyBundle(*cast<AssumeInst>(II)))
2999 return eraseInstFromFunction(CI);
3000 replaceUse(II->getOperandUse(0), ConstantInt::getTrue(II->getContext()));
3001 return nullptr;
3002 };
3003 // Remove an assume if it is followed by an identical assume.
3004 // TODO: Do we need this? Unless there are conflicting assumptions, the
3005 // computeKnownBits(IIOperand) below here eliminates redundant assumes.
3006 Instruction *Next = II->getNextNonDebugInstruction();
3007 if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))))
3008 return RemoveConditionFromAssume(Next);
3009
3010 // Canonicalize assume(a && b) -> assume(a); assume(b);
3011 // Note: New assumption intrinsics created here are registered by
3012 // the InstCombineIRInserter object.
3013 FunctionType *AssumeIntrinsicTy = II->getFunctionType();
3014 Value *AssumeIntrinsic = II->getCalledOperand();
3015 Value *A, *B;
3016 if (match(IIOperand, m_LogicalAnd(m_Value(A), m_Value(B)))) {
3017 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, OpBundles,
3018 II->getName());
3019 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName());
3020 return eraseInstFromFunction(*II);
3021 }
3022 // assume(!(a || b)) -> assume(!a); assume(!b);
3023 if (match(IIOperand, m_Not(m_LogicalOr(m_Value(A), m_Value(B))))) {
3024 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
3025 Builder.CreateNot(A), OpBundles, II->getName());
3026 Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
3027 Builder.CreateNot(B), II->getName());
3028 return eraseInstFromFunction(*II);
3029 }
3030
3031 // assume( (load addr) != null ) -> add 'nonnull' metadata to load
3032 // (if assume is valid at the load)
3033 CmpInst::Predicate Pred;
3034 Instruction *LHS;
3035 if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) &&
3036 Pred == ICmpInst::ICMP_NE && LHS->getOpcode() == Instruction::Load &&
3037 LHS->getType()->isPointerTy() &&
3038 isValidAssumeForContext(II, LHS, &DT)) {
3039 MDNode *MD = MDNode::get(II->getContext(), std::nullopt);
3040 LHS->setMetadata(LLVMContext::MD_nonnull, MD);
3041 LHS->setMetadata(LLVMContext::MD_noundef, MD);
3042 return RemoveConditionFromAssume(II);
3043
3044 // TODO: apply nonnull return attributes to calls and invokes
3045 // TODO: apply range metadata for range check patterns?
3046 }
3047
3048 // Separate storage assumptions apply to the underlying allocations, not any
3049 // particular pointer within them. When evaluating the hints for AA purposes
3050 // we getUnderlyingObject them; by precomputing the answers here we can
3051 // avoid having to do so repeatedly there.
3052 for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) {
3053 OperandBundleUse OBU = II->getOperandBundleAt(Idx);
3054 if (OBU.getTagName() == "separate_storage") {
3055 assert(OBU.Inputs.size() == 2);
3056 auto MaybeSimplifyHint = [&](const Use &U) {
3057 Value *Hint = U.get();
3058 // Not having a limit is safe because InstCombine removes unreachable
3059 // code.
3060 Value *UnderlyingObject = getUnderlyingObject(Hint, /*MaxLookup*/ 0);
3061 if (Hint != UnderlyingObject)
3062 replaceUse(const_cast<Use &>(U), UnderlyingObject);
3063 };
3064 MaybeSimplifyHint(OBU.Inputs[0]);
3065 MaybeSimplifyHint(OBU.Inputs[1]);
3066 }
3067 }
3068
3069 // Convert nonnull assume like:
3070 // %A = icmp ne i32* %PTR, null
3071 // call void @llvm.assume(i1 %A)
3072 // into
3073 // call void @llvm.assume(i1 true) [ "nonnull"(i32* %PTR) ]
3074 if (EnableKnowledgeRetention &&
3075 match(IIOperand, m_Cmp(Pred, m_Value(A), m_Zero())) &&
3076 Pred == CmpInst::ICMP_NE && A->getType()->isPointerTy()) {
3077 if (auto *Replacement = buildAssumeFromKnowledge(
3078 {RetainedKnowledge{Attribute::NonNull, 0, A}}, Next, &AC, &DT)) {
3079
3080 Replacement->insertBefore(Next);
3081 AC.registerAssumption(Replacement);
3082 return RemoveConditionFromAssume(II);
3083 }
3084 }
3085
3086 // Convert alignment assume like:
3087 // %B = ptrtoint i32* %A to i64
3088 // %C = and i64 %B, Constant
3089 // %D = icmp eq i64 %C, 0
3090 // call void @llvm.assume(i1 %D)
3091 // into
3092 // call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 Constant + 1)]
3093 uint64_t AlignMask;
3094 if (EnableKnowledgeRetention &&
3095 match(IIOperand,
3096 m_Cmp(Pred, m_And(m_Value(A), m_ConstantInt(AlignMask)),
3097 m_Zero())) &&
3098 Pred == CmpInst::ICMP_EQ) {
3099 if (isPowerOf2_64(AlignMask + 1)) {
3100 uint64_t Offset = 0;
3101 match(A, m_Add(m_Value(A), m_ConstantInt(Offset)));
3102 if (match(A, m_PtrToInt(m_Value(A)))) {
3103 /// Note: this doesn't preserve the offset information but merges
3104 /// offset and alignment.
3105 /// TODO: we can generate a GEP instead of merging the alignment with
3106 /// the offset.
3107 RetainedKnowledge RK{Attribute::Alignment,
3108 (unsigned)MinAlign(Offset, AlignMask + 1), A};
3109 if (auto *Replacement =
3110 buildAssumeFromKnowledge(RK, Next, &AC, &DT)) {
3111
3112 Replacement->insertAfter(II);
3113 AC.registerAssumption(Replacement);
3114 }
3115 return RemoveConditionFromAssume(II);
3116 }
3117 }
3118 }
3119
3120 /// Canonicalize Knowledge in operand bundles.
3121 if (EnableKnowledgeRetention && II->hasOperandBundles()) {
3122 for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) {
3123 auto &BOI = II->bundle_op_info_begin()[Idx];
3124 RetainedKnowledge RK =
3125 llvm::getKnowledgeFromBundle(cast<AssumeInst>(*II), BOI);
3126 if (BOI.End - BOI.Begin > 2)
3127 continue; // Prevent reducing knowledge in an align with offset since
3128 // extracting a RetainedKnowledge from them looses offset
3129 // information
3130 RetainedKnowledge CanonRK =
3131 llvm::simplifyRetainedKnowledge(cast<AssumeInst>(II), RK,
3132 &getAssumptionCache(),
3133 &getDominatorTree());
3134 if (CanonRK == RK)
3135 continue;
3136 if (!CanonRK) {
3137 if (BOI.End - BOI.Begin > 0) {
3138 Worklist.pushValue(II->op_begin()[BOI.Begin]);
3139 Value::dropDroppableUse(II->op_begin()[BOI.Begin]);
3140 }
3141 continue;
3142 }
3143 assert(RK.AttrKind == CanonRK.AttrKind);
3144 if (BOI.End - BOI.Begin > 0)
3145 II->op_begin()[BOI.Begin].set(CanonRK.WasOn);
3146 if (BOI.End - BOI.Begin > 1)
3147 II->op_begin()[BOI.Begin + 1].set(ConstantInt::get(
3148 Type::getInt64Ty(II->getContext()), CanonRK.ArgValue));
3149 if (RK.WasOn)
3150 Worklist.pushValue(RK.WasOn);
3151 return II;
3152 }
3153 }
3154
3155 // If there is a dominating assume with the same condition as this one,
3156 // then this one is redundant, and should be removed.
3157 KnownBits Known(1);
3158 computeKnownBits(IIOperand, Known, 0, II);
3159 if (Known.isAllOnes() && isAssumeWithEmptyBundle(cast<AssumeInst>(*II)))
3160 return eraseInstFromFunction(*II);
3161
3162 // assume(false) is unreachable.
3163 if (match(IIOperand, m_CombineOr(m_Zero(), m_Undef()))) {
3164 CreateNonTerminatorUnreachable(II);
3165 return eraseInstFromFunction(*II);
3166 }
3167
3168 // Update the cache of affected values for this assumption (we might be
3169 // here because we just simplified the condition).
3170 AC.updateAffectedValues(cast<AssumeInst>(II));
3171 break;
3172 }
3173 case Intrinsic::experimental_guard: {
3174 // Is this guard followed by another guard? We scan forward over a small
3175 // fixed window of instructions to handle common cases with conditions
3176 // computed between guards.
3177 Instruction *NextInst = II->getNextNonDebugInstruction();
3178 for (unsigned i = 0; i < GuardWideningWindow; i++) {
3179 // Note: Using context-free form to avoid compile time blow up
3180 if (!isSafeToSpeculativelyExecute(NextInst))
3181 break;
3182 NextInst = NextInst->getNextNonDebugInstruction();
3183 }
3184 Value *NextCond = nullptr;
3185 if (match(NextInst,
3186 m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) {
3187 Value *CurrCond = II->getArgOperand(0);
3188
3189 // Remove a guard that it is immediately preceded by an identical guard.
3190 // Otherwise canonicalize guard(a); guard(b) -> guard(a & b).
3191 if (CurrCond != NextCond) {
3192 Instruction *MoveI = II->getNextNonDebugInstruction();
3193 while (MoveI != NextInst) {
3194 auto *Temp = MoveI;
3195 MoveI = MoveI->getNextNonDebugInstruction();
3196 Temp->moveBefore(II);
3197 }
3198 replaceOperand(*II, 0, Builder.CreateAnd(CurrCond, NextCond));
3199 }
3200 eraseInstFromFunction(*NextInst);
3201 return II;
3202 }
3203 break;
3204 }
3205 case Intrinsic::vector_insert: {
3206 Value *Vec = II->getArgOperand(0);
3207 Value *SubVec = II->getArgOperand(1);
3208 Value *Idx = II->getArgOperand(2);
3209 auto *DstTy = dyn_cast<FixedVectorType>(II->getType());
3210 auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType());
3211 auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->getType());
3212
3213 // Only canonicalize if the destination vector, Vec, and SubVec are all
3214 // fixed vectors.
3215 if (DstTy && VecTy && SubVecTy) {
3216 unsigned DstNumElts = DstTy->getNumElements();
3217 unsigned VecNumElts = VecTy->getNumElements();
3218 unsigned SubVecNumElts = SubVecTy->getNumElements();
3219 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
3220
3221 // An insert that entirely overwrites Vec with SubVec is a nop.
3222 if (VecNumElts == SubVecNumElts)
3223 return replaceInstUsesWith(CI, SubVec);
3224
3225 // Widen SubVec into a vector of the same width as Vec, since
3226 // shufflevector requires the two input vectors to be the same width.
3227 // Elements beyond the bounds of SubVec within the widened vector are
3228 // undefined.
3229 SmallVector<int, 8> WidenMask;
3230 unsigned i;
3231 for (i = 0; i != SubVecNumElts; ++i)
3232 WidenMask.push_back(i);
3233 for (; i != VecNumElts; ++i)
3234 WidenMask.push_back(PoisonMaskElem);
3235
3236 Value *WidenShuffle = Builder.CreateShuffleVector(SubVec, WidenMask);
3237
3238 SmallVector<int, 8> Mask;
3239 for (unsigned i = 0; i != IdxN; ++i)
3240 Mask.push_back(i);
3241 for (unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i)
3242 Mask.push_back(i);
3243 for (unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i)
3244 Mask.push_back(i);
3245
3246 Value *Shuffle = Builder.CreateShuffleVector(Vec, WidenShuffle, Mask);
3247 return replaceInstUsesWith(CI, Shuffle);
3248 }
3249 break;
3250 }
3251 case Intrinsic::vector_extract: {
3252 Value *Vec = II->getArgOperand(0);
3253 Value *Idx = II->getArgOperand(1);
3254
3255 Type *ReturnType = II->getType();
3256 // (extract_vector (insert_vector InsertTuple, InsertValue, InsertIdx),
3257 // ExtractIdx)
3258 unsigned ExtractIdx = cast<ConstantInt>(Idx)->getZExtValue();
3259 Value *InsertTuple, *InsertIdx, *InsertValue;
3260 if (match(Vec, m_Intrinsic<Intrinsic::vector_insert>(m_Value(InsertTuple),
3261 m_Value(InsertValue),
3262 m_Value(InsertIdx))) &&
3263 InsertValue->getType() == ReturnType) {
3264 unsigned Index = cast<ConstantInt>(InsertIdx)->getZExtValue();
3265 // Case where we get the same index right after setting it.
3266 // extract.vector(insert.vector(InsertTuple, InsertValue, Idx), Idx) -->
3267 // InsertValue
3268 if (ExtractIdx == Index)
3269 return replaceInstUsesWith(CI, InsertValue);
3270 // If we are getting a different index than what was set in the
3271 // insert.vector intrinsic. We can just set the input tuple to the one up
3272 // in the chain. extract.vector(insert.vector(InsertTuple, InsertValue,
3273 // InsertIndex), ExtractIndex)
3274 // --> extract.vector(InsertTuple, ExtractIndex)
3275 else
3276 return replaceOperand(CI, 0, InsertTuple);
3277 }
3278
3279 auto *DstTy = dyn_cast<VectorType>(ReturnType);
3280 auto *VecTy = dyn_cast<VectorType>(Vec->getType());
3281
3282 if (DstTy && VecTy) {
3283 auto DstEltCnt = DstTy->getElementCount();
3284 auto VecEltCnt = VecTy->getElementCount();
3285 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
3286
3287 // Extracting the entirety of Vec is a nop.
3288 if (DstEltCnt == VecTy->getElementCount()) {
3289 replaceInstUsesWith(CI, Vec);
3290 return eraseInstFromFunction(CI);
3291 }
3292
3293 // Only canonicalize to shufflevector if the destination vector and
3294 // Vec are fixed vectors.
3295 if (VecEltCnt.isScalable() || DstEltCnt.isScalable())
3296 break;
3297
3298 SmallVector<int, 8> Mask;
3299 for (unsigned i = 0; i != DstEltCnt.getKnownMinValue(); ++i)
3300 Mask.push_back(IdxN + i);
3301
3302 Value *Shuffle = Builder.CreateShuffleVector(Vec, Mask);
3303 return replaceInstUsesWith(CI, Shuffle);
3304 }
3305 break;
3306 }
3307 case Intrinsic::vector_reverse: {
3308 Value *BO0, *BO1, *X, *Y;
3309 Value *Vec = II->getArgOperand(0);
3310 if (match(Vec, m_OneUse(m_BinOp(m_Value(BO0), m_Value(BO1))))) {
3311 auto *OldBinOp = cast<BinaryOperator>(Vec);
3312 if (match(BO0, m_VecReverse(m_Value(X)))) {
3313 // rev(binop rev(X), rev(Y)) --> binop X, Y
3314 if (match(BO1, m_VecReverse(m_Value(Y))))
3315 return replaceInstUsesWith(CI, BinaryOperator::CreateWithCopiedFlags(
3316 OldBinOp->getOpcode(), X, Y,
3317 OldBinOp, OldBinOp->getName(),
3318 II->getIterator()));
3319 // rev(binop rev(X), BO1Splat) --> binop X, BO1Splat
3320 if (isSplatValue(BO1))
3321 return replaceInstUsesWith(CI, BinaryOperator::CreateWithCopiedFlags(
3322 OldBinOp->getOpcode(), X, BO1,
3323 OldBinOp, OldBinOp->getName(),
3324 II->getIterator()));
3325 }
3326 // rev(binop BO0Splat, rev(Y)) --> binop BO0Splat, Y
3327 if (match(BO1, m_VecReverse(m_Value(Y))) && isSplatValue(BO0))
3328 return replaceInstUsesWith(CI,
3329 BinaryOperator::CreateWithCopiedFlags(
3330 OldBinOp->getOpcode(), BO0, Y, OldBinOp,
3331 OldBinOp->getName(), II->getIterator()));
3332 }
3333 // rev(unop rev(X)) --> unop X
3334 if (match(Vec, m_OneUse(m_UnOp(m_VecReverse(m_Value(X)))))) {
3335 auto *OldUnOp = cast<UnaryOperator>(Vec);
3336 auto *NewUnOp = UnaryOperator::CreateWithCopiedFlags(
3337 OldUnOp->getOpcode(), X, OldUnOp, OldUnOp->getName(),
3338 II->getIterator());
3339 return replaceInstUsesWith(CI, NewUnOp);
3340 }
3341 break;
3342 }
3343 case Intrinsic::vector_reduce_or:
3344 case Intrinsic::vector_reduce_and: {
3345 // Canonicalize logical or/and reductions:
3346 // Or reduction for i1 is represented as:
3347 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
3348 // %res = cmp ne iReduxWidth %val, 0
3349 // And reduction for i1 is represented as:
3350 // %val = bitcast <ReduxWidth x i1> to iReduxWidth
3351 // %res = cmp eq iReduxWidth %val, 11111
3352 Value *Arg = II->getArgOperand(0);
3353 Value *Vect;
3354
3355 if (Value *NewOp =
3356 simplifyReductionOperand(Arg, /*CanReorderLanes=*/true)) {
3357 replaceUse(II->getOperandUse(0), NewOp);
3358 return II;
3359 }
3360
3361 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
3362 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType()))
3363 if (FTy->getElementType() == Builder.getInt1Ty()) {
3364 Value *Res = Builder.CreateBitCast(
3365 Vect, Builder.getIntNTy(FTy->getNumElements()));
3366 if (IID == Intrinsic::vector_reduce_and) {
3367 Res = Builder.CreateICmpEQ(
3368 Res, ConstantInt::getAllOnesValue(Res->getType()));
3369 } else {
3370 assert(IID == Intrinsic::vector_reduce_or &&
3371 "Expected or reduction.");
3372 Res = Builder.CreateIsNotNull(Res);
3373 }
3374 if (Arg != Vect)
3375 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res,
3376 II->getType());
3377 return replaceInstUsesWith(CI, Res);
3378 }
3379 }
3380 [[fallthrough]];
3381 }
3382 case Intrinsic::vector_reduce_add: {
3383 if (IID == Intrinsic::vector_reduce_add) {
3384 // Convert vector_reduce_add(ZExt(<n x i1>)) to
3385 // ZExtOrTrunc(ctpop(bitcast <n x i1> to in)).
3386 // Convert vector_reduce_add(SExt(<n x i1>)) to
3387 // -ZExtOrTrunc(ctpop(bitcast <n x i1> to in)).
3388 // Convert vector_reduce_add(<n x i1>) to
3389 // Trunc(ctpop(bitcast <n x i1> to in)).
3390 Value *Arg = II->getArgOperand(0);
3391 Value *Vect;
3392
3393 if (Value *NewOp =
3394 simplifyReductionOperand(Arg, /*CanReorderLanes=*/true)) {
3395 replaceUse(II->getOperandUse(0), NewOp);
3396 return II;
3397 }
3398
3399 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
3400 if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType()))
3401 if (FTy->getElementType() == Builder.getInt1Ty()) {
3402 Value *V = Builder.CreateBitCast(
3403 Vect, Builder.getIntNTy(FTy->getNumElements()));
3404 Value *Res = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, V);
3405 if (Res->getType() != II->getType())
3406 Res = Builder.CreateZExtOrTrunc(Res, II->getType());
3407 if (Arg != Vect &&
3408 cast<Instruction>(Arg)->getOpcode() == Instruction::SExt)
3409 Res = Builder.CreateNeg(Res);
3410 return replaceInstUsesWith(CI, Res);
3411 }
3412 }
3413 }
3414 [[fallthrough]];
3415 }
3416 case Intrinsic::vector_reduce_xor: {
3417 if (IID == Intrinsic::vector_reduce_xor) {
3418 // Exclusive disjunction reduction over the vector with
3419 // (potentially-extended) i1 element type is actually a
3420 // (potentially-extended) arithmetic `add` reduction over the original
3421 // non-extended value:
3422 // vector_reduce_xor(?ext(<n x i1>))
3423 // -->
3424 // ?ext(vector_reduce_add(<n x i1>))
3425 Value *Arg = II->getArgOperand(0);
3426 Value *Vect;
3427
3428 if (Value *NewOp =
3429 simplifyReductionOperand(Arg, /*CanReorderLanes=*/true)) {
3430 replaceUse(II->getOperandUse(0), NewOp);
3431 return II;
3432 }
3433
3434 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
3435 if (auto *VTy = dyn_cast<VectorType>(Vect->getType()))
3436 if (VTy->getElementType() == Builder.getInt1Ty()) {
3437 Value *Res = Builder.CreateAddReduce(Vect);
3438 if (Arg != Vect)
3439 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res,
3440 II->getType());
3441 return replaceInstUsesWith(CI, Res);
3442 }
3443 }
3444 }
3445 [[fallthrough]];
3446 }
3447 case Intrinsic::vector_reduce_mul: {
3448 if (IID == Intrinsic::vector_reduce_mul) {
3449 // Multiplicative reduction over the vector with (potentially-extended)
3450 // i1 element type is actually a (potentially zero-extended)
3451 // logical `and` reduction over the original non-extended value:
3452 // vector_reduce_mul(?ext(<n x i1>))
3453 // -->
3454 // zext(vector_reduce_and(<n x i1>))
3455 Value *Arg = II->getArgOperand(0);
3456 Value *Vect;
3457
3458 if (Value *NewOp =
3459 simplifyReductionOperand(Arg, /*CanReorderLanes=*/true)) {
3460 replaceUse(II->getOperandUse(0), NewOp);
3461 return II;
3462 }
3463
3464 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
3465 if (auto *VTy = dyn_cast<VectorType>(Vect->getType()))
3466 if (VTy->getElementType() == Builder.getInt1Ty()) {
3467 Value *Res = Builder.CreateAndReduce(Vect);
3468 if (Res->getType() != II->getType())
3469 Res = Builder.CreateZExt(Res, II->getType());
3470 return replaceInstUsesWith(CI, Res);
3471 }
3472 }
3473 }
3474 [[fallthrough]];
3475 }
3476 case Intrinsic::vector_reduce_umin:
3477 case Intrinsic::vector_reduce_umax: {
3478 if (IID == Intrinsic::vector_reduce_umin ||
3479 IID == Intrinsic::vector_reduce_umax) {
3480 // UMin/UMax reduction over the vector with (potentially-extended)
3481 // i1 element type is actually a (potentially-extended)
3482 // logical `and`/`or` reduction over the original non-extended value:
3483 // vector_reduce_u{min,max}(?ext(<n x i1>))
3484 // -->
3485 // ?ext(vector_reduce_{and,or}(<n x i1>))
3486 Value *Arg = II->getArgOperand(0);
3487 Value *Vect;
3488
3489 if (Value *NewOp =
3490 simplifyReductionOperand(Arg, /*CanReorderLanes=*/true)) {
3491 replaceUse(II->getOperandUse(0), NewOp);
3492 return II;
3493 }
3494
3495 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
3496 if (auto *VTy = dyn_cast<VectorType>(Vect->getType()))
3497 if (VTy->getElementType() == Builder.getInt1Ty()) {
3498 Value *Res = IID == Intrinsic::vector_reduce_umin
3499 ? Builder.CreateAndReduce(Vect)
3500 : Builder.CreateOrReduce(Vect);
3501 if (Arg != Vect)
3502 Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res,
3503 II->getType());
3504 return replaceInstUsesWith(CI, Res);
3505 }
3506 }
3507 }
3508 [[fallthrough]];
3509 }
3510 case Intrinsic::vector_reduce_smin:
3511 case Intrinsic::vector_reduce_smax: {
3512 if (IID == Intrinsic::vector_reduce_smin ||
3513 IID == Intrinsic::vector_reduce_smax) {
3514 // SMin/SMax reduction over the vector with (potentially-extended)
3515 // i1 element type is actually a (potentially-extended)
3516 // logical `and`/`or` reduction over the original non-extended value:
3517 // vector_reduce_s{min,max}(<n x i1>)
3518 // -->
3519 // vector_reduce_{or,and}(<n x i1>)
3520 // and
3521 // vector_reduce_s{min,max}(sext(<n x i1>))
3522 // -->
3523 // sext(vector_reduce_{or,and}(<n x i1>))
3524 // and
3525 // vector_reduce_s{min,max}(zext(<n x i1>))
3526 // -->
3527 // zext(vector_reduce_{and,or}(<n x i1>))
3528 Value *Arg = II->getArgOperand(0);
3529 Value *Vect;
3530
3531 if (Value *NewOp =
3532 simplifyReductionOperand(Arg, /*CanReorderLanes=*/true)) {
3533 replaceUse(II->getOperandUse(0), NewOp);
3534 return II;
3535 }
3536
3537 if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
3538 if (auto *VTy = dyn_cast<VectorType>(Vect->getType()))
3539 if (VTy->getElementType() == Builder.getInt1Ty()) {
3540 Instruction::CastOps ExtOpc = Instruction::CastOps::CastOpsEnd;
3541 if (Arg != Vect)
3542 ExtOpc = cast<CastInst>(Arg)->getOpcode();
3543 Value *Res = ((IID == Intrinsic::vector_reduce_smin) ==
3544 (ExtOpc == Instruction::CastOps::ZExt))
3545 ? Builder.CreateAndReduce(Vect)
3546 : Builder.CreateOrReduce(Vect);
3547 if (Arg != Vect)
3548 Res = Builder.CreateCast(ExtOpc, Res, II->getType());
3549 return replaceInstUsesWith(CI, Res);
3550 }
3551 }
3552 }
3553 [[fallthrough]];
3554 }
3555 case Intrinsic::vector_reduce_fmax:
3556 case Intrinsic::vector_reduce_fmin:
3557 case Intrinsic::vector_reduce_fadd:
3558 case Intrinsic::vector_reduce_fmul: {
3559 bool CanReorderLanes = (IID != Intrinsic::vector_reduce_fadd &&
3560 IID != Intrinsic::vector_reduce_fmul) ||
3561 II->hasAllowReassoc();
3562 const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd ||
3563 IID == Intrinsic::vector_reduce_fmul)
3564 ? 1
3565 : 0;
3566 Value *Arg = II->getArgOperand(ArgIdx);
3567 if (Value *NewOp = simplifyReductionOperand(Arg, CanReorderLanes)) {
3568 replaceUse(II->getOperandUse(ArgIdx), NewOp);
3569 return nullptr;
3570 }
3571 break;
3572 }
3573 case Intrinsic::is_fpclass: {
3574 if (Instruction *I = foldIntrinsicIsFPClass(*II))
3575 return I;
3576 break;
3577 }
3578 case Intrinsic::threadlocal_address: {
3579 Align MinAlign = getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT);
3580 MaybeAlign Align = II->getRetAlign();
3581 if (MinAlign > Align.valueOrOne()) {
3582 II->addRetAttr(Attribute::getWithAlignment(II->getContext(), MinAlign));
3583 return II;
3584 }
3585 break;
3586 }
3587 default: {
3588 // Handle target specific intrinsics
3589 std::optional<Instruction *> V = targetInstCombineIntrinsic(*II);
3590 if (V)
3591 return *V;
3592 break;
3593 }
3594 }
3595
3596 // Try to fold intrinsic into select operands. This is legal if:
3597 // * The intrinsic is speculatable.
3598 // * The select condition is not a vector, or the intrinsic does not
3599 // perform cross-lane operations.
3600 switch (IID) {
3601 case Intrinsic::ctlz:
3602 case Intrinsic::cttz:
3603 case Intrinsic::ctpop:
3604 case Intrinsic::umin:
3605 case Intrinsic::umax:
3606 case Intrinsic::smin:
3607 case Intrinsic::smax:
3608 case Intrinsic::usub_sat:
3609 case Intrinsic::uadd_sat:
3610 case Intrinsic::ssub_sat:
3611 case Intrinsic::sadd_sat:
3612 for (Value *Op : II->args())
3613 if (auto *Sel = dyn_cast<SelectInst>(Op))
3614 if (Instruction *R = FoldOpIntoSelect(*II, Sel))
3615 return R;
3616 [[fallthrough]];
3617 default:
3618 break;
3619 }
3620
3621 if (Instruction *Shuf = foldShuffledIntrinsicOperands(II, Builder))
3622 return Shuf;
3623
3624 // Some intrinsics (like experimental_gc_statepoint) can be used in invoke
3625 // context, so it is handled in visitCallBase and we should trigger it.
3626 return visitCallBase(*II);
3627 }
3628
3629 // Fence instruction simplification
visitFenceInst(FenceInst & FI)3630 Instruction *InstCombinerImpl::visitFenceInst(FenceInst &FI) {
3631 auto *NFI = dyn_cast<FenceInst>(FI.getNextNonDebugInstruction());
3632 // This check is solely here to handle arbitrary target-dependent syncscopes.
3633 // TODO: Can remove if does not matter in practice.
3634 if (NFI && FI.isIdenticalTo(NFI))
3635 return eraseInstFromFunction(FI);
3636
3637 // Returns true if FI1 is identical or stronger fence than FI2.
3638 auto isIdenticalOrStrongerFence = [](FenceInst *FI1, FenceInst *FI2) {
3639 auto FI1SyncScope = FI1->getSyncScopeID();
3640 // Consider same scope, where scope is global or single-thread.
3641 if (FI1SyncScope != FI2->getSyncScopeID() ||
3642 (FI1SyncScope != SyncScope::System &&
3643 FI1SyncScope != SyncScope::SingleThread))
3644 return false;
3645
3646 return isAtLeastOrStrongerThan(FI1->getOrdering(), FI2->getOrdering());
3647 };
3648 if (NFI && isIdenticalOrStrongerFence(NFI, &FI))
3649 return eraseInstFromFunction(FI);
3650
3651 if (auto *PFI = dyn_cast_or_null<FenceInst>(FI.getPrevNonDebugInstruction()))
3652 if (isIdenticalOrStrongerFence(PFI, &FI))
3653 return eraseInstFromFunction(FI);
3654 return nullptr;
3655 }
3656
3657 // InvokeInst simplification
visitInvokeInst(InvokeInst & II)3658 Instruction *InstCombinerImpl::visitInvokeInst(InvokeInst &II) {
3659 return visitCallBase(II);
3660 }
3661
3662 // CallBrInst simplification
visitCallBrInst(CallBrInst & CBI)3663 Instruction *InstCombinerImpl::visitCallBrInst(CallBrInst &CBI) {
3664 return visitCallBase(CBI);
3665 }
3666
tryOptimizeCall(CallInst * CI)3667 Instruction *InstCombinerImpl::tryOptimizeCall(CallInst *CI) {
3668 if (!CI->getCalledFunction()) return nullptr;
3669
3670 // Skip optimizing notail and musttail calls so
3671 // LibCallSimplifier::optimizeCall doesn't have to preserve those invariants.
3672 // LibCallSimplifier::optimizeCall should try to preseve tail calls though.
3673 if (CI->isMustTailCall() || CI->isNoTailCall())
3674 return nullptr;
3675
3676 auto InstCombineRAUW = [this](Instruction *From, Value *With) {
3677 replaceInstUsesWith(*From, With);
3678 };
3679 auto InstCombineErase = [this](Instruction *I) {
3680 eraseInstFromFunction(*I);
3681 };
3682 LibCallSimplifier Simplifier(DL, &TLI, &AC, ORE, BFI, PSI, InstCombineRAUW,
3683 InstCombineErase);
3684 if (Value *With = Simplifier.optimizeCall(CI, Builder)) {
3685 ++NumSimplified;
3686 return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With);
3687 }
3688
3689 return nullptr;
3690 }
3691
findInitTrampolineFromAlloca(Value * TrampMem)3692 static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) {
3693 // Strip off at most one level of pointer casts, looking for an alloca. This
3694 // is good enough in practice and simpler than handling any number of casts.
3695 Value *Underlying = TrampMem->stripPointerCasts();
3696 if (Underlying != TrampMem &&
3697 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
3698 return nullptr;
3699 if (!isa<AllocaInst>(Underlying))
3700 return nullptr;
3701
3702 IntrinsicInst *InitTrampoline = nullptr;
3703 for (User *U : TrampMem->users()) {
3704 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3705 if (!II)
3706 return nullptr;
3707 if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
3708 if (InitTrampoline)
3709 // More than one init_trampoline writes to this value. Give up.
3710 return nullptr;
3711 InitTrampoline = II;
3712 continue;
3713 }
3714 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
3715 // Allow any number of calls to adjust.trampoline.
3716 continue;
3717 return nullptr;
3718 }
3719
3720 // No call to init.trampoline found.
3721 if (!InitTrampoline)
3722 return nullptr;
3723
3724 // Check that the alloca is being used in the expected way.
3725 if (InitTrampoline->getOperand(0) != TrampMem)
3726 return nullptr;
3727
3728 return InitTrampoline;
3729 }
3730
findInitTrampolineFromBB(IntrinsicInst * AdjustTramp,Value * TrampMem)3731 static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
3732 Value *TrampMem) {
3733 // Visit all the previous instructions in the basic block, and try to find a
3734 // init.trampoline which has a direct path to the adjust.trampoline.
3735 for (BasicBlock::iterator I = AdjustTramp->getIterator(),
3736 E = AdjustTramp->getParent()->begin();
3737 I != E;) {
3738 Instruction *Inst = &*--I;
3739 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
3740 if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
3741 II->getOperand(0) == TrampMem)
3742 return II;
3743 if (Inst->mayWriteToMemory())
3744 return nullptr;
3745 }
3746 return nullptr;
3747 }
3748
3749 // Given a call to llvm.adjust.trampoline, find and return the corresponding
3750 // call to llvm.init.trampoline if the call to the trampoline can be optimized
3751 // to a direct call to a function. Otherwise return NULL.
findInitTrampoline(Value * Callee)3752 static IntrinsicInst *findInitTrampoline(Value *Callee) {
3753 Callee = Callee->stripPointerCasts();
3754 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
3755 if (!AdjustTramp ||
3756 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
3757 return nullptr;
3758
3759 Value *TrampMem = AdjustTramp->getOperand(0);
3760
3761 if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem))
3762 return IT;
3763 if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem))
3764 return IT;
3765 return nullptr;
3766 }
3767
annotateAnyAllocSite(CallBase & Call,const TargetLibraryInfo * TLI)3768 bool InstCombinerImpl::annotateAnyAllocSite(CallBase &Call,
3769 const TargetLibraryInfo *TLI) {
3770 // Note: We only handle cases which can't be driven from generic attributes
3771 // here. So, for example, nonnull and noalias (which are common properties
3772 // of some allocation functions) are expected to be handled via annotation
3773 // of the respective allocator declaration with generic attributes.
3774 bool Changed = false;
3775
3776 if (!Call.getType()->isPointerTy())
3777 return Changed;
3778
3779 std::optional<APInt> Size = getAllocSize(&Call, TLI);
3780 if (Size && *Size != 0) {
3781 // TODO: We really should just emit deref_or_null here and then
3782 // let the generic inference code combine that with nonnull.
3783 if (Call.hasRetAttr(Attribute::NonNull)) {
3784 Changed = !Call.hasRetAttr(Attribute::Dereferenceable);
3785 Call.addRetAttr(Attribute::getWithDereferenceableBytes(
3786 Call.getContext(), Size->getLimitedValue()));
3787 } else {
3788 Changed = !Call.hasRetAttr(Attribute::DereferenceableOrNull);
3789 Call.addRetAttr(Attribute::getWithDereferenceableOrNullBytes(
3790 Call.getContext(), Size->getLimitedValue()));
3791 }
3792 }
3793
3794 // Add alignment attribute if alignment is a power of two constant.
3795 Value *Alignment = getAllocAlignment(&Call, TLI);
3796 if (!Alignment)
3797 return Changed;
3798
3799 ConstantInt *AlignOpC = dyn_cast<ConstantInt>(Alignment);
3800 if (AlignOpC && AlignOpC->getValue().ult(llvm::Value::MaximumAlignment)) {
3801 uint64_t AlignmentVal = AlignOpC->getZExtValue();
3802 if (llvm::isPowerOf2_64(AlignmentVal)) {
3803 Align ExistingAlign = Call.getRetAlign().valueOrOne();
3804 Align NewAlign = Align(AlignmentVal);
3805 if (NewAlign > ExistingAlign) {
3806 Call.addRetAttr(
3807 Attribute::getWithAlignment(Call.getContext(), NewAlign));
3808 Changed = true;
3809 }
3810 }
3811 }
3812 return Changed;
3813 }
3814
3815 /// Improvements for call, callbr and invoke instructions.
visitCallBase(CallBase & Call)3816 Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) {
3817 bool Changed = annotateAnyAllocSite(Call, &TLI);
3818
3819 // Mark any parameters that are known to be non-null with the nonnull
3820 // attribute. This is helpful for inlining calls to functions with null
3821 // checks on their arguments.
3822 SmallVector<unsigned, 4> ArgNos;
3823 unsigned ArgNo = 0;
3824
3825 for (Value *V : Call.args()) {
3826 if (V->getType()->isPointerTy() &&
3827 !Call.paramHasAttr(ArgNo, Attribute::NonNull) &&
3828 isKnownNonZero(V, getSimplifyQuery().getWithInstruction(&Call)))
3829 ArgNos.push_back(ArgNo);
3830 ArgNo++;
3831 }
3832
3833 assert(ArgNo == Call.arg_size() && "Call arguments not processed correctly.");
3834
3835 if (!ArgNos.empty()) {
3836 AttributeList AS = Call.getAttributes();
3837 LLVMContext &Ctx = Call.getContext();
3838 AS = AS.addParamAttribute(Ctx, ArgNos,
3839 Attribute::get(Ctx, Attribute::NonNull));
3840 Call.setAttributes(AS);
3841 Changed = true;
3842 }
3843
3844 // If the callee is a pointer to a function, attempt to move any casts to the
3845 // arguments of the call/callbr/invoke.
3846 Value *Callee = Call.getCalledOperand();
3847 Function *CalleeF = dyn_cast<Function>(Callee);
3848 if ((!CalleeF || CalleeF->getFunctionType() != Call.getFunctionType()) &&
3849 transformConstExprCastCall(Call))
3850 return nullptr;
3851
3852 if (CalleeF) {
3853 // Remove the convergent attr on calls when the callee is not convergent.
3854 if (Call.isConvergent() && !CalleeF->isConvergent() &&
3855 !CalleeF->isIntrinsic()) {
3856 LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call
3857 << "\n");
3858 Call.setNotConvergent();
3859 return &Call;
3860 }
3861
3862 // If the call and callee calling conventions don't match, and neither one
3863 // of the calling conventions is compatible with C calling convention
3864 // this call must be unreachable, as the call is undefined.
3865 if ((CalleeF->getCallingConv() != Call.getCallingConv() &&
3866 !(CalleeF->getCallingConv() == llvm::CallingConv::C &&
3867 TargetLibraryInfoImpl::isCallingConvCCompatible(&Call)) &&
3868 !(Call.getCallingConv() == llvm::CallingConv::C &&
3869 TargetLibraryInfoImpl::isCallingConvCCompatible(CalleeF))) &&
3870 // Only do this for calls to a function with a body. A prototype may
3871 // not actually end up matching the implementation's calling conv for a
3872 // variety of reasons (e.g. it may be written in assembly).
3873 !CalleeF->isDeclaration()) {
3874 Instruction *OldCall = &Call;
3875 CreateNonTerminatorUnreachable(OldCall);
3876 // If OldCall does not return void then replaceInstUsesWith poison.
3877 // This allows ValueHandlers and custom metadata to adjust itself.
3878 if (!OldCall->getType()->isVoidTy())
3879 replaceInstUsesWith(*OldCall, PoisonValue::get(OldCall->getType()));
3880 if (isa<CallInst>(OldCall))
3881 return eraseInstFromFunction(*OldCall);
3882
3883 // We cannot remove an invoke or a callbr, because it would change thexi
3884 // CFG, just change the callee to a null pointer.
3885 cast<CallBase>(OldCall)->setCalledFunction(
3886 CalleeF->getFunctionType(),
3887 Constant::getNullValue(CalleeF->getType()));
3888 return nullptr;
3889 }
3890 }
3891
3892 // Calling a null function pointer is undefined if a null address isn't
3893 // dereferenceable.
3894 if ((isa<ConstantPointerNull>(Callee) &&
3895 !NullPointerIsDefined(Call.getFunction())) ||
3896 isa<UndefValue>(Callee)) {
3897 // If Call does not return void then replaceInstUsesWith poison.
3898 // This allows ValueHandlers and custom metadata to adjust itself.
3899 if (!Call.getType()->isVoidTy())
3900 replaceInstUsesWith(Call, PoisonValue::get(Call.getType()));
3901
3902 if (Call.isTerminator()) {
3903 // Can't remove an invoke or callbr because we cannot change the CFG.
3904 return nullptr;
3905 }
3906
3907 // This instruction is not reachable, just remove it.
3908 CreateNonTerminatorUnreachable(&Call);
3909 return eraseInstFromFunction(Call);
3910 }
3911
3912 if (IntrinsicInst *II = findInitTrampoline(Callee))
3913 return transformCallThroughTrampoline(Call, *II);
3914
3915 if (isa<InlineAsm>(Callee) && !Call.doesNotThrow()) {
3916 InlineAsm *IA = cast<InlineAsm>(Callee);
3917 if (!IA->canThrow()) {
3918 // Normal inline asm calls cannot throw - mark them
3919 // 'nounwind'.
3920 Call.setDoesNotThrow();
3921 Changed = true;
3922 }
3923 }
3924
3925 // Try to optimize the call if possible, we require DataLayout for most of
3926 // this. None of these calls are seen as possibly dead so go ahead and
3927 // delete the instruction now.
3928 if (CallInst *CI = dyn_cast<CallInst>(&Call)) {
3929 Instruction *I = tryOptimizeCall(CI);
3930 // If we changed something return the result, etc. Otherwise let
3931 // the fallthrough check.
3932 if (I) return eraseInstFromFunction(*I);
3933 }
3934
3935 if (!Call.use_empty() && !Call.isMustTailCall())
3936 if (Value *ReturnedArg = Call.getReturnedArgOperand()) {
3937 Type *CallTy = Call.getType();
3938 Type *RetArgTy = ReturnedArg->getType();
3939 if (RetArgTy->canLosslesslyBitCastTo(CallTy))
3940 return replaceInstUsesWith(
3941 Call, Builder.CreateBitOrPointerCast(ReturnedArg, CallTy));
3942 }
3943
3944 // Drop unnecessary kcfi operand bundles from calls that were converted
3945 // into direct calls.
3946 auto Bundle = Call.getOperandBundle(LLVMContext::OB_kcfi);
3947 if (Bundle && !Call.isIndirectCall()) {
3948 DEBUG_WITH_TYPE(DEBUG_TYPE "-kcfi", {
3949 if (CalleeF) {
3950 ConstantInt *FunctionType = nullptr;
3951 ConstantInt *ExpectedType = cast<ConstantInt>(Bundle->Inputs[0]);
3952
3953 if (MDNode *MD = CalleeF->getMetadata(LLVMContext::MD_kcfi_type))
3954 FunctionType = mdconst::extract<ConstantInt>(MD->getOperand(0));
3955
3956 if (FunctionType &&
3957 FunctionType->getZExtValue() != ExpectedType->getZExtValue())
3958 dbgs() << Call.getModule()->getName()
3959 << ": warning: kcfi: " << Call.getCaller()->getName()
3960 << ": call to " << CalleeF->getName()
3961 << " using a mismatching function pointer type\n";
3962 }
3963 });
3964
3965 return CallBase::removeOperandBundle(&Call, LLVMContext::OB_kcfi);
3966 }
3967
3968 if (isRemovableAlloc(&Call, &TLI))
3969 return visitAllocSite(Call);
3970
3971 // Handle intrinsics which can be used in both call and invoke context.
3972 switch (Call.getIntrinsicID()) {
3973 case Intrinsic::experimental_gc_statepoint: {
3974 GCStatepointInst &GCSP = *cast<GCStatepointInst>(&Call);
3975 SmallPtrSet<Value *, 32> LiveGcValues;
3976 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) {
3977 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc);
3978
3979 // Remove the relocation if unused.
3980 if (GCR.use_empty()) {
3981 eraseInstFromFunction(GCR);
3982 continue;
3983 }
3984
3985 Value *DerivedPtr = GCR.getDerivedPtr();
3986 Value *BasePtr = GCR.getBasePtr();
3987
3988 // Undef is undef, even after relocation.
3989 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
3990 replaceInstUsesWith(GCR, UndefValue::get(GCR.getType()));
3991 eraseInstFromFunction(GCR);
3992 continue;
3993 }
3994
3995 if (auto *PT = dyn_cast<PointerType>(GCR.getType())) {
3996 // The relocation of null will be null for most any collector.
3997 // TODO: provide a hook for this in GCStrategy. There might be some
3998 // weird collector this property does not hold for.
3999 if (isa<ConstantPointerNull>(DerivedPtr)) {
4000 // Use null-pointer of gc_relocate's type to replace it.
4001 replaceInstUsesWith(GCR, ConstantPointerNull::get(PT));
4002 eraseInstFromFunction(GCR);
4003 continue;
4004 }
4005
4006 // isKnownNonNull -> nonnull attribute
4007 if (!GCR.hasRetAttr(Attribute::NonNull) &&
4008 isKnownNonZero(DerivedPtr,
4009 getSimplifyQuery().getWithInstruction(&Call))) {
4010 GCR.addRetAttr(Attribute::NonNull);
4011 // We discovered new fact, re-check users.
4012 Worklist.pushUsersToWorkList(GCR);
4013 }
4014 }
4015
4016 // If we have two copies of the same pointer in the statepoint argument
4017 // list, canonicalize to one. This may let us common gc.relocates.
4018 if (GCR.getBasePtr() == GCR.getDerivedPtr() &&
4019 GCR.getBasePtrIndex() != GCR.getDerivedPtrIndex()) {
4020 auto *OpIntTy = GCR.getOperand(2)->getType();
4021 GCR.setOperand(2, ConstantInt::get(OpIntTy, GCR.getBasePtrIndex()));
4022 }
4023
4024 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
4025 // Canonicalize on the type from the uses to the defs
4026
4027 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
4028 LiveGcValues.insert(BasePtr);
4029 LiveGcValues.insert(DerivedPtr);
4030 }
4031 std::optional<OperandBundleUse> Bundle =
4032 GCSP.getOperandBundle(LLVMContext::OB_gc_live);
4033 unsigned NumOfGCLives = LiveGcValues.size();
4034 if (!Bundle || NumOfGCLives == Bundle->Inputs.size())
4035 break;
4036 // We can reduce the size of gc live bundle.
4037 DenseMap<Value *, unsigned> Val2Idx;
4038 std::vector<Value *> NewLiveGc;
4039 for (Value *V : Bundle->Inputs) {
4040 if (Val2Idx.count(V))
4041 continue;
4042 if (LiveGcValues.count(V)) {
4043 Val2Idx[V] = NewLiveGc.size();
4044 NewLiveGc.push_back(V);
4045 } else
4046 Val2Idx[V] = NumOfGCLives;
4047 }
4048 // Update all gc.relocates
4049 for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) {
4050 GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc);
4051 Value *BasePtr = GCR.getBasePtr();
4052 assert(Val2Idx.count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives &&
4053 "Missed live gc for base pointer");
4054 auto *OpIntTy1 = GCR.getOperand(1)->getType();
4055 GCR.setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr]));
4056 Value *DerivedPtr = GCR.getDerivedPtr();
4057 assert(Val2Idx.count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives &&
4058 "Missed live gc for derived pointer");
4059 auto *OpIntTy2 = GCR.getOperand(2)->getType();
4060 GCR.setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr]));
4061 }
4062 // Create new statepoint instruction.
4063 OperandBundleDef NewBundle("gc-live", NewLiveGc);
4064 return CallBase::Create(&Call, NewBundle);
4065 }
4066 default: { break; }
4067 }
4068
4069 return Changed ? &Call : nullptr;
4070 }
4071
4072 /// If the callee is a constexpr cast of a function, attempt to move the cast to
4073 /// the arguments of the call/invoke.
4074 /// CallBrInst is not supported.
transformConstExprCastCall(CallBase & Call)4075 bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) {
4076 auto *Callee =
4077 dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
4078 if (!Callee)
4079 return false;
4080
4081 assert(!isa<CallBrInst>(Call) &&
4082 "CallBr's don't have a single point after a def to insert at");
4083
4084 // If this is a call to a thunk function, don't remove the cast. Thunks are
4085 // used to transparently forward all incoming parameters and outgoing return
4086 // values, so it's important to leave the cast in place.
4087 if (Callee->hasFnAttribute("thunk"))
4088 return false;
4089
4090 // If this is a call to a naked function, the assembly might be
4091 // using an argument, or otherwise rely on the frame layout,
4092 // the function prototype will mismatch.
4093 if (Callee->hasFnAttribute(Attribute::Naked))
4094 return false;
4095
4096 // If this is a musttail call, the callee's prototype must match the caller's
4097 // prototype with the exception of pointee types. The code below doesn't
4098 // implement that, so we can't do this transform.
4099 // TODO: Do the transform if it only requires adding pointer casts.
4100 if (Call.isMustTailCall())
4101 return false;
4102
4103 Instruction *Caller = &Call;
4104 const AttributeList &CallerPAL = Call.getAttributes();
4105
4106 // Okay, this is a cast from a function to a different type. Unless doing so
4107 // would cause a type conversion of one of our arguments, change this call to
4108 // be a direct call with arguments casted to the appropriate types.
4109 FunctionType *FT = Callee->getFunctionType();
4110 Type *OldRetTy = Caller->getType();
4111 Type *NewRetTy = FT->getReturnType();
4112
4113 // Check to see if we are changing the return type...
4114 if (OldRetTy != NewRetTy) {
4115
4116 if (NewRetTy->isStructTy())
4117 return false; // TODO: Handle multiple return values.
4118
4119 if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
4120 if (Callee->isDeclaration())
4121 return false; // Cannot transform this return value.
4122
4123 if (!Caller->use_empty() &&
4124 // void -> non-void is handled specially
4125 !NewRetTy->isVoidTy())
4126 return false; // Cannot transform this return value.
4127 }
4128
4129 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
4130 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs());
4131 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
4132 return false; // Attribute not compatible with transformed value.
4133 }
4134
4135 // If the callbase is an invoke instruction, and the return value is
4136 // used by a PHI node in a successor, we cannot change the return type of
4137 // the call because there is no place to put the cast instruction (without
4138 // breaking the critical edge). Bail out in this case.
4139 if (!Caller->use_empty()) {
4140 BasicBlock *PhisNotSupportedBlock = nullptr;
4141 if (auto *II = dyn_cast<InvokeInst>(Caller))
4142 PhisNotSupportedBlock = II->getNormalDest();
4143 if (PhisNotSupportedBlock)
4144 for (User *U : Caller->users())
4145 if (PHINode *PN = dyn_cast<PHINode>(U))
4146 if (PN->getParent() == PhisNotSupportedBlock)
4147 return false;
4148 }
4149 }
4150
4151 unsigned NumActualArgs = Call.arg_size();
4152 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
4153
4154 // Prevent us turning:
4155 // declare void @takes_i32_inalloca(i32* inalloca)
4156 // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
4157 //
4158 // into:
4159 // call void @takes_i32_inalloca(i32* null)
4160 //
4161 // Similarly, avoid folding away bitcasts of byval calls.
4162 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
4163 Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated))
4164 return false;
4165
4166 auto AI = Call.arg_begin();
4167 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
4168 Type *ParamTy = FT->getParamType(i);
4169 Type *ActTy = (*AI)->getType();
4170
4171 if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
4172 return false; // Cannot transform this parameter value.
4173
4174 // Check if there are any incompatible attributes we cannot drop safely.
4175 if (AttrBuilder(FT->getContext(), CallerPAL.getParamAttrs(i))
4176 .overlaps(AttributeFuncs::typeIncompatible(
4177 ParamTy, AttributeFuncs::ASK_UNSAFE_TO_DROP)))
4178 return false; // Attribute not compatible with transformed value.
4179
4180 if (Call.isInAllocaArgument(i) ||
4181 CallerPAL.hasParamAttr(i, Attribute::Preallocated))
4182 return false; // Cannot transform to and from inalloca/preallocated.
4183
4184 if (CallerPAL.hasParamAttr(i, Attribute::SwiftError))
4185 return false;
4186
4187 if (CallerPAL.hasParamAttr(i, Attribute::ByVal) !=
4188 Callee->getAttributes().hasParamAttr(i, Attribute::ByVal))
4189 return false; // Cannot transform to or from byval.
4190 }
4191
4192 if (Callee->isDeclaration()) {
4193 // Do not delete arguments unless we have a function body.
4194 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
4195 return false;
4196
4197 // If the callee is just a declaration, don't change the varargsness of the
4198 // call. We don't want to introduce a varargs call where one doesn't
4199 // already exist.
4200 if (FT->isVarArg() != Call.getFunctionType()->isVarArg())
4201 return false;
4202
4203 // If both the callee and the cast type are varargs, we still have to make
4204 // sure the number of fixed parameters are the same or we have the same
4205 // ABI issues as if we introduce a varargs call.
4206 if (FT->isVarArg() && Call.getFunctionType()->isVarArg() &&
4207 FT->getNumParams() != Call.getFunctionType()->getNumParams())
4208 return false;
4209 }
4210
4211 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
4212 !CallerPAL.isEmpty()) {
4213 // In this case we have more arguments than the new function type, but we
4214 // won't be dropping them. Check that these extra arguments have attributes
4215 // that are compatible with being a vararg call argument.
4216 unsigned SRetIdx;
4217 if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) &&
4218 SRetIdx - AttributeList::FirstArgIndex >= FT->getNumParams())
4219 return false;
4220 }
4221
4222 // Okay, we decided that this is a safe thing to do: go ahead and start
4223 // inserting cast instructions as necessary.
4224 SmallVector<Value *, 8> Args;
4225 SmallVector<AttributeSet, 8> ArgAttrs;
4226 Args.reserve(NumActualArgs);
4227 ArgAttrs.reserve(NumActualArgs);
4228
4229 // Get any return attributes.
4230 AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs());
4231
4232 // If the return value is not being used, the type may not be compatible
4233 // with the existing attributes. Wipe out any problematic attributes.
4234 RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
4235
4236 LLVMContext &Ctx = Call.getContext();
4237 AI = Call.arg_begin();
4238 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
4239 Type *ParamTy = FT->getParamType(i);
4240
4241 Value *NewArg = *AI;
4242 if ((*AI)->getType() != ParamTy)
4243 NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy);
4244 Args.push_back(NewArg);
4245
4246 // Add any parameter attributes except the ones incompatible with the new
4247 // type. Note that we made sure all incompatible ones are safe to drop.
4248 AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(
4249 ParamTy, AttributeFuncs::ASK_SAFE_TO_DROP);
4250 ArgAttrs.push_back(
4251 CallerPAL.getParamAttrs(i).removeAttributes(Ctx, IncompatibleAttrs));
4252 }
4253
4254 // If the function takes more arguments than the call was taking, add them
4255 // now.
4256 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
4257 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
4258 ArgAttrs.push_back(AttributeSet());
4259 }
4260
4261 // If we are removing arguments to the function, emit an obnoxious warning.
4262 if (FT->getNumParams() < NumActualArgs) {
4263 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
4264 if (FT->isVarArg()) {
4265 // Add all of the arguments in their promoted form to the arg list.
4266 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
4267 Type *PTy = getPromotedType((*AI)->getType());
4268 Value *NewArg = *AI;
4269 if (PTy != (*AI)->getType()) {
4270 // Must promote to pass through va_arg area!
4271 Instruction::CastOps opcode =
4272 CastInst::getCastOpcode(*AI, false, PTy, false);
4273 NewArg = Builder.CreateCast(opcode, *AI, PTy);
4274 }
4275 Args.push_back(NewArg);
4276
4277 // Add any parameter attributes.
4278 ArgAttrs.push_back(CallerPAL.getParamAttrs(i));
4279 }
4280 }
4281 }
4282
4283 AttributeSet FnAttrs = CallerPAL.getFnAttrs();
4284
4285 if (NewRetTy->isVoidTy())
4286 Caller->setName(""); // Void type should not have a name.
4287
4288 assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) &&
4289 "missing argument attributes");
4290 AttributeList NewCallerPAL = AttributeList::get(
4291 Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs);
4292
4293 SmallVector<OperandBundleDef, 1> OpBundles;
4294 Call.getOperandBundlesAsDefs(OpBundles);
4295
4296 CallBase *NewCall;
4297 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
4298 NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(),
4299 II->getUnwindDest(), Args, OpBundles);
4300 } else {
4301 NewCall = Builder.CreateCall(Callee, Args, OpBundles);
4302 cast<CallInst>(NewCall)->setTailCallKind(
4303 cast<CallInst>(Caller)->getTailCallKind());
4304 }
4305 NewCall->takeName(Caller);
4306 NewCall->setCallingConv(Call.getCallingConv());
4307 NewCall->setAttributes(NewCallerPAL);
4308
4309 // Preserve prof metadata if any.
4310 NewCall->copyMetadata(*Caller, {LLVMContext::MD_prof});
4311
4312 // Insert a cast of the return type as necessary.
4313 Instruction *NC = NewCall;
4314 Value *NV = NC;
4315 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
4316 if (!NV->getType()->isVoidTy()) {
4317 NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
4318 NC->setDebugLoc(Caller->getDebugLoc());
4319
4320 auto OptInsertPt = NewCall->getInsertionPointAfterDef();
4321 assert(OptInsertPt && "No place to insert cast");
4322 InsertNewInstBefore(NC, *OptInsertPt);
4323 Worklist.pushUsersToWorkList(*Caller);
4324 } else {
4325 NV = PoisonValue::get(Caller->getType());
4326 }
4327 }
4328
4329 if (!Caller->use_empty())
4330 replaceInstUsesWith(*Caller, NV);
4331 else if (Caller->hasValueHandle()) {
4332 if (OldRetTy == NV->getType())
4333 ValueHandleBase::ValueIsRAUWd(Caller, NV);
4334 else
4335 // We cannot call ValueIsRAUWd with a different type, and the
4336 // actual tracked value will disappear.
4337 ValueHandleBase::ValueIsDeleted(Caller);
4338 }
4339
4340 eraseInstFromFunction(*Caller);
4341 return true;
4342 }
4343
4344 /// Turn a call to a function created by init_trampoline / adjust_trampoline
4345 /// intrinsic pair into a direct call to the underlying function.
4346 Instruction *
transformCallThroughTrampoline(CallBase & Call,IntrinsicInst & Tramp)4347 InstCombinerImpl::transformCallThroughTrampoline(CallBase &Call,
4348 IntrinsicInst &Tramp) {
4349 FunctionType *FTy = Call.getFunctionType();
4350 AttributeList Attrs = Call.getAttributes();
4351
4352 // If the call already has the 'nest' attribute somewhere then give up -
4353 // otherwise 'nest' would occur twice after splicing in the chain.
4354 if (Attrs.hasAttrSomewhere(Attribute::Nest))
4355 return nullptr;
4356
4357 Function *NestF = cast<Function>(Tramp.getArgOperand(1)->stripPointerCasts());
4358 FunctionType *NestFTy = NestF->getFunctionType();
4359
4360 AttributeList NestAttrs = NestF->getAttributes();
4361 if (!NestAttrs.isEmpty()) {
4362 unsigned NestArgNo = 0;
4363 Type *NestTy = nullptr;
4364 AttributeSet NestAttr;
4365
4366 // Look for a parameter marked with the 'nest' attribute.
4367 for (FunctionType::param_iterator I = NestFTy->param_begin(),
4368 E = NestFTy->param_end();
4369 I != E; ++NestArgNo, ++I) {
4370 AttributeSet AS = NestAttrs.getParamAttrs(NestArgNo);
4371 if (AS.hasAttribute(Attribute::Nest)) {
4372 // Record the parameter type and any other attributes.
4373 NestTy = *I;
4374 NestAttr = AS;
4375 break;
4376 }
4377 }
4378
4379 if (NestTy) {
4380 std::vector<Value*> NewArgs;
4381 std::vector<AttributeSet> NewArgAttrs;
4382 NewArgs.reserve(Call.arg_size() + 1);
4383 NewArgAttrs.reserve(Call.arg_size());
4384
4385 // Insert the nest argument into the call argument list, which may
4386 // mean appending it. Likewise for attributes.
4387
4388 {
4389 unsigned ArgNo = 0;
4390 auto I = Call.arg_begin(), E = Call.arg_end();
4391 do {
4392 if (ArgNo == NestArgNo) {
4393 // Add the chain argument and attributes.
4394 Value *NestVal = Tramp.getArgOperand(2);
4395 if (NestVal->getType() != NestTy)
4396 NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");
4397 NewArgs.push_back(NestVal);
4398 NewArgAttrs.push_back(NestAttr);
4399 }
4400
4401 if (I == E)
4402 break;
4403
4404 // Add the original argument and attributes.
4405 NewArgs.push_back(*I);
4406 NewArgAttrs.push_back(Attrs.getParamAttrs(ArgNo));
4407
4408 ++ArgNo;
4409 ++I;
4410 } while (true);
4411 }
4412
4413 // The trampoline may have been bitcast to a bogus type (FTy).
4414 // Handle this by synthesizing a new function type, equal to FTy
4415 // with the chain parameter inserted.
4416
4417 std::vector<Type*> NewTypes;
4418 NewTypes.reserve(FTy->getNumParams()+1);
4419
4420 // Insert the chain's type into the list of parameter types, which may
4421 // mean appending it.
4422 {
4423 unsigned ArgNo = 0;
4424 FunctionType::param_iterator I = FTy->param_begin(),
4425 E = FTy->param_end();
4426
4427 do {
4428 if (ArgNo == NestArgNo)
4429 // Add the chain's type.
4430 NewTypes.push_back(NestTy);
4431
4432 if (I == E)
4433 break;
4434
4435 // Add the original type.
4436 NewTypes.push_back(*I);
4437
4438 ++ArgNo;
4439 ++I;
4440 } while (true);
4441 }
4442
4443 // Replace the trampoline call with a direct call. Let the generic
4444 // code sort out any function type mismatches.
4445 FunctionType *NewFTy =
4446 FunctionType::get(FTy->getReturnType(), NewTypes, FTy->isVarArg());
4447 AttributeList NewPAL =
4448 AttributeList::get(FTy->getContext(), Attrs.getFnAttrs(),
4449 Attrs.getRetAttrs(), NewArgAttrs);
4450
4451 SmallVector<OperandBundleDef, 1> OpBundles;
4452 Call.getOperandBundlesAsDefs(OpBundles);
4453
4454 Instruction *NewCaller;
4455 if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
4456 NewCaller = InvokeInst::Create(NewFTy, NestF, II->getNormalDest(),
4457 II->getUnwindDest(), NewArgs, OpBundles);
4458 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
4459 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
4460 } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) {
4461 NewCaller =
4462 CallBrInst::Create(NewFTy, NestF, CBI->getDefaultDest(),
4463 CBI->getIndirectDests(), NewArgs, OpBundles);
4464 cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv());
4465 cast<CallBrInst>(NewCaller)->setAttributes(NewPAL);
4466 } else {
4467 NewCaller = CallInst::Create(NewFTy, NestF, NewArgs, OpBundles);
4468 cast<CallInst>(NewCaller)->setTailCallKind(
4469 cast<CallInst>(Call).getTailCallKind());
4470 cast<CallInst>(NewCaller)->setCallingConv(
4471 cast<CallInst>(Call).getCallingConv());
4472 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
4473 }
4474 NewCaller->setDebugLoc(Call.getDebugLoc());
4475
4476 return NewCaller;
4477 }
4478 }
4479
4480 // Replace the trampoline call with a direct call. Since there is no 'nest'
4481 // parameter, there is no need to adjust the argument list. Let the generic
4482 // code sort out any function type mismatches.
4483 Call.setCalledFunction(FTy, NestF);
4484 return &Call;
4485 }
4486