1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines routines for folding instructions into constants.
10 //
11 // Also, to supplement the basic IR ConstantExpr simplifications,
12 // this file defines some additional folding routines that can make use of
13 // DataLayout information. These functions cannot go in IR due to library
14 // dependency issues.
15 //
16 //===----------------------------------------------------------------------===//
17
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/APSInt.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/Analysis/TargetFolder.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Analysis/VectorUtils.h"
31 #include "llvm/Config/config.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/ConstantFold.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/GlobalValue.h"
39 #include "llvm/IR/GlobalVariable.h"
40 #include "llvm/IR/InstrTypes.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/IntrinsicInst.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/IR/IntrinsicsAArch64.h"
46 #include "llvm/IR/IntrinsicsAMDGPU.h"
47 #include "llvm/IR/IntrinsicsARM.h"
48 #include "llvm/IR/IntrinsicsWebAssembly.h"
49 #include "llvm/IR/IntrinsicsX86.h"
50 #include "llvm/IR/Operator.h"
51 #include "llvm/IR/Type.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Support/KnownBits.h"
56 #include "llvm/Support/MathExtras.h"
57 #include <cassert>
58 #include <cerrno>
59 #include <cfenv>
60 #include <cmath>
61 #include <cstdint>
62
63 using namespace llvm;
64
65 namespace {
66
67 //===----------------------------------------------------------------------===//
68 // Constant Folding internal helper functions
69 //===----------------------------------------------------------------------===//
70
foldConstVectorToAPInt(APInt & Result,Type * DestTy,Constant * C,Type * SrcEltTy,unsigned NumSrcElts,const DataLayout & DL)71 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
72 Constant *C, Type *SrcEltTy,
73 unsigned NumSrcElts,
74 const DataLayout &DL) {
75 // Now that we know that the input value is a vector of integers, just shift
76 // and insert them into our result.
77 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
78 for (unsigned i = 0; i != NumSrcElts; ++i) {
79 Constant *Element;
80 if (DL.isLittleEndian())
81 Element = C->getAggregateElement(NumSrcElts - i - 1);
82 else
83 Element = C->getAggregateElement(i);
84
85 if (Element && isa<UndefValue>(Element)) {
86 Result <<= BitShift;
87 continue;
88 }
89
90 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
91 if (!ElementCI)
92 return ConstantExpr::getBitCast(C, DestTy);
93
94 Result <<= BitShift;
95 Result |= ElementCI->getValue().zext(Result.getBitWidth());
96 }
97
98 return nullptr;
99 }
100
101 /// Constant fold bitcast, symbolically evaluating it with DataLayout.
102 /// This always returns a non-null constant, but it may be a
103 /// ConstantExpr if unfoldable.
FoldBitCast(Constant * C,Type * DestTy,const DataLayout & DL)104 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
105 assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) &&
106 "Invalid constantexpr bitcast!");
107
108 // Catch the obvious splat cases.
109 if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy, DL))
110 return Res;
111
112 if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
113 // Handle a vector->scalar integer/fp cast.
114 if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
115 unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements();
116 Type *SrcEltTy = VTy->getElementType();
117
118 // If the vector is a vector of floating point, convert it to vector of int
119 // to simplify things.
120 if (SrcEltTy->isFloatingPointTy()) {
121 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
122 auto *SrcIVTy = FixedVectorType::get(
123 IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
124 // Ask IR to do the conversion now that #elts line up.
125 C = ConstantExpr::getBitCast(C, SrcIVTy);
126 }
127
128 APInt Result(DL.getTypeSizeInBits(DestTy), 0);
129 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
130 SrcEltTy, NumSrcElts, DL))
131 return CE;
132
133 if (isa<IntegerType>(DestTy))
134 return ConstantInt::get(DestTy, Result);
135
136 APFloat FP(DestTy->getFltSemantics(), Result);
137 return ConstantFP::get(DestTy->getContext(), FP);
138 }
139 }
140
141 // The code below only handles casts to vectors currently.
142 auto *DestVTy = dyn_cast<VectorType>(DestTy);
143 if (!DestVTy)
144 return ConstantExpr::getBitCast(C, DestTy);
145
146 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
147 // vector so the code below can handle it uniformly.
148 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
149 Constant *Ops = C; // don't take the address of C!
150 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
151 }
152
153 // If this is a bitcast from constant vector -> vector, fold it.
154 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
155 return ConstantExpr::getBitCast(C, DestTy);
156
157 // If the element types match, IR can fold it.
158 unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements();
159 unsigned NumSrcElt = cast<FixedVectorType>(C->getType())->getNumElements();
160 if (NumDstElt == NumSrcElt)
161 return ConstantExpr::getBitCast(C, DestTy);
162
163 Type *SrcEltTy = cast<VectorType>(C->getType())->getElementType();
164 Type *DstEltTy = DestVTy->getElementType();
165
166 // Otherwise, we're changing the number of elements in a vector, which
167 // requires endianness information to do the right thing. For example,
168 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
169 // folds to (little endian):
170 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
171 // and to (big endian):
172 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
173
174 // First thing is first. We only want to think about integer here, so if
175 // we have something in FP form, recast it as integer.
176 if (DstEltTy->isFloatingPointTy()) {
177 // Fold to an vector of integers with same size as our FP type.
178 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
179 auto *DestIVTy = FixedVectorType::get(
180 IntegerType::get(C->getContext(), FPWidth), NumDstElt);
181 // Recursively handle this integer conversion, if possible.
182 C = FoldBitCast(C, DestIVTy, DL);
183
184 // Finally, IR can handle this now that #elts line up.
185 return ConstantExpr::getBitCast(C, DestTy);
186 }
187
188 // Okay, we know the destination is integer, if the input is FP, convert
189 // it to integer first.
190 if (SrcEltTy->isFloatingPointTy()) {
191 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
192 auto *SrcIVTy = FixedVectorType::get(
193 IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
194 // Ask IR to do the conversion now that #elts line up.
195 C = ConstantExpr::getBitCast(C, SrcIVTy);
196 // If IR wasn't able to fold it, bail out.
197 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
198 !isa<ConstantDataVector>(C))
199 return C;
200 }
201
202 // Now we know that the input and output vectors are both integer vectors
203 // of the same size, and that their #elements is not the same. Do the
204 // conversion here, which depends on whether the input or output has
205 // more elements.
206 bool isLittleEndian = DL.isLittleEndian();
207
208 SmallVector<Constant*, 32> Result;
209 if (NumDstElt < NumSrcElt) {
210 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
211 Constant *Zero = Constant::getNullValue(DstEltTy);
212 unsigned Ratio = NumSrcElt/NumDstElt;
213 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
214 unsigned SrcElt = 0;
215 for (unsigned i = 0; i != NumDstElt; ++i) {
216 // Build each element of the result.
217 Constant *Elt = Zero;
218 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
219 for (unsigned j = 0; j != Ratio; ++j) {
220 Constant *Src = C->getAggregateElement(SrcElt++);
221 if (Src && isa<UndefValue>(Src))
222 Src = Constant::getNullValue(
223 cast<VectorType>(C->getType())->getElementType());
224 else
225 Src = dyn_cast_or_null<ConstantInt>(Src);
226 if (!Src) // Reject constantexpr elements.
227 return ConstantExpr::getBitCast(C, DestTy);
228
229 // Zero extend the element to the right size.
230 Src = ConstantFoldCastOperand(Instruction::ZExt, Src, Elt->getType(),
231 DL);
232 assert(Src && "Constant folding cannot fail on plain integers");
233
234 // Shift it to the right place, depending on endianness.
235 Src = ConstantFoldBinaryOpOperands(
236 Instruction::Shl, Src, ConstantInt::get(Src->getType(), ShiftAmt),
237 DL);
238 assert(Src && "Constant folding cannot fail on plain integers");
239
240 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
241
242 // Mix it in.
243 Elt = ConstantFoldBinaryOpOperands(Instruction::Or, Elt, Src, DL);
244 assert(Elt && "Constant folding cannot fail on plain integers");
245 }
246 Result.push_back(Elt);
247 }
248 return ConstantVector::get(Result);
249 }
250
251 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
252 unsigned Ratio = NumDstElt/NumSrcElt;
253 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
254
255 // Loop over each source value, expanding into multiple results.
256 for (unsigned i = 0; i != NumSrcElt; ++i) {
257 auto *Element = C->getAggregateElement(i);
258
259 if (!Element) // Reject constantexpr elements.
260 return ConstantExpr::getBitCast(C, DestTy);
261
262 if (isa<UndefValue>(Element)) {
263 // Correctly Propagate undef values.
264 Result.append(Ratio, UndefValue::get(DstEltTy));
265 continue;
266 }
267
268 auto *Src = dyn_cast<ConstantInt>(Element);
269 if (!Src)
270 return ConstantExpr::getBitCast(C, DestTy);
271
272 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
273 for (unsigned j = 0; j != Ratio; ++j) {
274 // Shift the piece of the value into the right place, depending on
275 // endianness.
276 APInt Elt = Src->getValue().lshr(ShiftAmt);
277 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
278
279 // Truncate and remember this piece.
280 Result.push_back(ConstantInt::get(DstEltTy, Elt.trunc(DstBitSize)));
281 }
282 }
283
284 return ConstantVector::get(Result);
285 }
286
287 } // end anonymous namespace
288
289 /// If this constant is a constant offset from a global, return the global and
290 /// the constant. Because of constantexprs, this function is recursive.
IsConstantOffsetFromGlobal(Constant * C,GlobalValue * & GV,APInt & Offset,const DataLayout & DL,DSOLocalEquivalent ** DSOEquiv)291 bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
292 APInt &Offset, const DataLayout &DL,
293 DSOLocalEquivalent **DSOEquiv) {
294 if (DSOEquiv)
295 *DSOEquiv = nullptr;
296
297 // Trivial case, constant is the global.
298 if ((GV = dyn_cast<GlobalValue>(C))) {
299 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
300 Offset = APInt(BitWidth, 0);
301 return true;
302 }
303
304 if (auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(C)) {
305 if (DSOEquiv)
306 *DSOEquiv = FoundDSOEquiv;
307 GV = FoundDSOEquiv->getGlobalValue();
308 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
309 Offset = APInt(BitWidth, 0);
310 return true;
311 }
312
313 // Otherwise, if this isn't a constant expr, bail out.
314 auto *CE = dyn_cast<ConstantExpr>(C);
315 if (!CE) return false;
316
317 // Look through ptr->int and ptr->ptr casts.
318 if (CE->getOpcode() == Instruction::PtrToInt ||
319 CE->getOpcode() == Instruction::BitCast)
320 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL,
321 DSOEquiv);
322
323 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
324 auto *GEP = dyn_cast<GEPOperator>(CE);
325 if (!GEP)
326 return false;
327
328 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
329 APInt TmpOffset(BitWidth, 0);
330
331 // If the base isn't a global+constant, we aren't either.
332 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL,
333 DSOEquiv))
334 return false;
335
336 // Otherwise, add any offset that our operands provide.
337 if (!GEP->accumulateConstantOffset(DL, TmpOffset))
338 return false;
339
340 Offset = TmpOffset;
341 return true;
342 }
343
ConstantFoldLoadThroughBitcast(Constant * C,Type * DestTy,const DataLayout & DL)344 Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
345 const DataLayout &DL) {
346 do {
347 Type *SrcTy = C->getType();
348 if (SrcTy == DestTy)
349 return C;
350
351 TypeSize DestSize = DL.getTypeSizeInBits(DestTy);
352 TypeSize SrcSize = DL.getTypeSizeInBits(SrcTy);
353 if (!TypeSize::isKnownGE(SrcSize, DestSize))
354 return nullptr;
355
356 // Catch the obvious splat cases (since all-zeros can coerce non-integral
357 // pointers legally).
358 if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy, DL))
359 return Res;
360
361 // If the type sizes are the same and a cast is legal, just directly
362 // cast the constant.
363 // But be careful not to coerce non-integral pointers illegally.
364 if (SrcSize == DestSize &&
365 DL.isNonIntegralPointerType(SrcTy->getScalarType()) ==
366 DL.isNonIntegralPointerType(DestTy->getScalarType())) {
367 Instruction::CastOps Cast = Instruction::BitCast;
368 // If we are going from a pointer to int or vice versa, we spell the cast
369 // differently.
370 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
371 Cast = Instruction::IntToPtr;
372 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
373 Cast = Instruction::PtrToInt;
374
375 if (CastInst::castIsValid(Cast, C, DestTy))
376 return ConstantFoldCastOperand(Cast, C, DestTy, DL);
377 }
378
379 // If this isn't an aggregate type, there is nothing we can do to drill down
380 // and find a bitcastable constant.
381 if (!SrcTy->isAggregateType() && !SrcTy->isVectorTy())
382 return nullptr;
383
384 // We're simulating a load through a pointer that was bitcast to point to
385 // a different type, so we can try to walk down through the initial
386 // elements of an aggregate to see if some part of the aggregate is
387 // castable to implement the "load" semantic model.
388 if (SrcTy->isStructTy()) {
389 // Struct types might have leading zero-length elements like [0 x i32],
390 // which are certainly not what we are looking for, so skip them.
391 unsigned Elem = 0;
392 Constant *ElemC;
393 do {
394 ElemC = C->getAggregateElement(Elem++);
395 } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()).isZero());
396 C = ElemC;
397 } else {
398 // For non-byte-sized vector elements, the first element is not
399 // necessarily located at the vector base address.
400 if (auto *VT = dyn_cast<VectorType>(SrcTy))
401 if (!DL.typeSizeEqualsStoreSize(VT->getElementType()))
402 return nullptr;
403
404 C = C->getAggregateElement(0u);
405 }
406 } while (C);
407
408 return nullptr;
409 }
410
411 namespace {
412
413 /// Recursive helper to read bits out of global. C is the constant being copied
414 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
415 /// results into and BytesLeft is the number of bytes left in
416 /// the CurPtr buffer. DL is the DataLayout.
ReadDataFromGlobal(Constant * C,uint64_t ByteOffset,unsigned char * CurPtr,unsigned BytesLeft,const DataLayout & DL)417 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
418 unsigned BytesLeft, const DataLayout &DL) {
419 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
420 "Out of range access");
421
422 // If this element is zero or undefined, we can just return since *CurPtr is
423 // zero initialized.
424 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
425 return true;
426
427 if (auto *CI = dyn_cast<ConstantInt>(C)) {
428 if ((CI->getBitWidth() & 7) != 0)
429 return false;
430 const APInt &Val = CI->getValue();
431 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
432
433 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
434 unsigned n = ByteOffset;
435 if (!DL.isLittleEndian())
436 n = IntBytes - n - 1;
437 CurPtr[i] = Val.extractBits(8, n * 8).getZExtValue();
438 ++ByteOffset;
439 }
440 return true;
441 }
442
443 if (auto *CFP = dyn_cast<ConstantFP>(C)) {
444 if (CFP->getType()->isDoubleTy()) {
445 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
446 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
447 }
448 if (CFP->getType()->isFloatTy()){
449 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
450 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
451 }
452 if (CFP->getType()->isHalfTy()){
453 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
454 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
455 }
456 return false;
457 }
458
459 if (auto *CS = dyn_cast<ConstantStruct>(C)) {
460 const StructLayout *SL = DL.getStructLayout(CS->getType());
461 unsigned Index = SL->getElementContainingOffset(ByteOffset);
462 uint64_t CurEltOffset = SL->getElementOffset(Index);
463 ByteOffset -= CurEltOffset;
464
465 while (true) {
466 // If the element access is to the element itself and not to tail padding,
467 // read the bytes from the element.
468 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
469
470 if (ByteOffset < EltSize &&
471 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
472 BytesLeft, DL))
473 return false;
474
475 ++Index;
476
477 // Check to see if we read from the last struct element, if so we're done.
478 if (Index == CS->getType()->getNumElements())
479 return true;
480
481 // If we read all of the bytes we needed from this element we're done.
482 uint64_t NextEltOffset = SL->getElementOffset(Index);
483
484 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
485 return true;
486
487 // Move to the next element of the struct.
488 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
489 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
490 ByteOffset = 0;
491 CurEltOffset = NextEltOffset;
492 }
493 // not reached.
494 }
495
496 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
497 isa<ConstantDataSequential>(C)) {
498 uint64_t NumElts, EltSize;
499 Type *EltTy;
500 if (auto *AT = dyn_cast<ArrayType>(C->getType())) {
501 NumElts = AT->getNumElements();
502 EltTy = AT->getElementType();
503 EltSize = DL.getTypeAllocSize(EltTy);
504 } else {
505 NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
506 EltTy = cast<FixedVectorType>(C->getType())->getElementType();
507 // TODO: For non-byte-sized vectors, current implementation assumes there is
508 // padding to the next byte boundary between elements.
509 if (!DL.typeSizeEqualsStoreSize(EltTy))
510 return false;
511
512 EltSize = DL.getTypeStoreSize(EltTy);
513 }
514 uint64_t Index = ByteOffset / EltSize;
515 uint64_t Offset = ByteOffset - Index * EltSize;
516
517 for (; Index != NumElts; ++Index) {
518 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
519 BytesLeft, DL))
520 return false;
521
522 uint64_t BytesWritten = EltSize - Offset;
523 assert(BytesWritten <= EltSize && "Not indexing into this element?");
524 if (BytesWritten >= BytesLeft)
525 return true;
526
527 Offset = 0;
528 BytesLeft -= BytesWritten;
529 CurPtr += BytesWritten;
530 }
531 return true;
532 }
533
534 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
535 if (CE->getOpcode() == Instruction::IntToPtr &&
536 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
537 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
538 BytesLeft, DL);
539 }
540 }
541
542 // Otherwise, unknown initializer type.
543 return false;
544 }
545
FoldReinterpretLoadFromConst(Constant * C,Type * LoadTy,int64_t Offset,const DataLayout & DL)546 Constant *FoldReinterpretLoadFromConst(Constant *C, Type *LoadTy,
547 int64_t Offset, const DataLayout &DL) {
548 // Bail out early. Not expect to load from scalable global variable.
549 if (isa<ScalableVectorType>(LoadTy))
550 return nullptr;
551
552 auto *IntType = dyn_cast<IntegerType>(LoadTy);
553
554 // If this isn't an integer load we can't fold it directly.
555 if (!IntType) {
556 // If this is a non-integer load, we can try folding it as an int load and
557 // then bitcast the result. This can be useful for union cases. Note
558 // that address spaces don't matter here since we're not going to result in
559 // an actual new load.
560 if (!LoadTy->isFloatingPointTy() && !LoadTy->isPointerTy() &&
561 !LoadTy->isVectorTy())
562 return nullptr;
563
564 Type *MapTy = Type::getIntNTy(C->getContext(),
565 DL.getTypeSizeInBits(LoadTy).getFixedValue());
566 if (Constant *Res = FoldReinterpretLoadFromConst(C, MapTy, Offset, DL)) {
567 if (Res->isNullValue() && !LoadTy->isX86_MMXTy() &&
568 !LoadTy->isX86_AMXTy())
569 // Materializing a zero can be done trivially without a bitcast
570 return Constant::getNullValue(LoadTy);
571 Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy;
572 Res = FoldBitCast(Res, CastTy, DL);
573 if (LoadTy->isPtrOrPtrVectorTy()) {
574 // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr
575 if (Res->isNullValue() && !LoadTy->isX86_MMXTy() &&
576 !LoadTy->isX86_AMXTy())
577 return Constant::getNullValue(LoadTy);
578 if (DL.isNonIntegralPointerType(LoadTy->getScalarType()))
579 // Be careful not to replace a load of an addrspace value with an inttoptr here
580 return nullptr;
581 Res = ConstantExpr::getIntToPtr(Res, LoadTy);
582 }
583 return Res;
584 }
585 return nullptr;
586 }
587
588 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
589 if (BytesLoaded > 32 || BytesLoaded == 0)
590 return nullptr;
591
592 // If we're not accessing anything in this constant, the result is undefined.
593 if (Offset <= -1 * static_cast<int64_t>(BytesLoaded))
594 return PoisonValue::get(IntType);
595
596 // TODO: We should be able to support scalable types.
597 TypeSize InitializerSize = DL.getTypeAllocSize(C->getType());
598 if (InitializerSize.isScalable())
599 return nullptr;
600
601 // If we're not accessing anything in this constant, the result is undefined.
602 if (Offset >= (int64_t)InitializerSize.getFixedValue())
603 return PoisonValue::get(IntType);
604
605 unsigned char RawBytes[32] = {0};
606 unsigned char *CurPtr = RawBytes;
607 unsigned BytesLeft = BytesLoaded;
608
609 // If we're loading off the beginning of the global, some bytes may be valid.
610 if (Offset < 0) {
611 CurPtr += -Offset;
612 BytesLeft += Offset;
613 Offset = 0;
614 }
615
616 if (!ReadDataFromGlobal(C, Offset, CurPtr, BytesLeft, DL))
617 return nullptr;
618
619 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
620 if (DL.isLittleEndian()) {
621 ResultVal = RawBytes[BytesLoaded - 1];
622 for (unsigned i = 1; i != BytesLoaded; ++i) {
623 ResultVal <<= 8;
624 ResultVal |= RawBytes[BytesLoaded - 1 - i];
625 }
626 } else {
627 ResultVal = RawBytes[0];
628 for (unsigned i = 1; i != BytesLoaded; ++i) {
629 ResultVal <<= 8;
630 ResultVal |= RawBytes[i];
631 }
632 }
633
634 return ConstantInt::get(IntType->getContext(), ResultVal);
635 }
636
637 } // anonymous namespace
638
639 // If GV is a constant with an initializer read its representation starting
640 // at Offset and return it as a constant array of unsigned char. Otherwise
641 // return null.
ReadByteArrayFromGlobal(const GlobalVariable * GV,uint64_t Offset)642 Constant *llvm::ReadByteArrayFromGlobal(const GlobalVariable *GV,
643 uint64_t Offset) {
644 if (!GV->isConstant() || !GV->hasDefinitiveInitializer())
645 return nullptr;
646
647 const DataLayout &DL = GV->getDataLayout();
648 Constant *Init = const_cast<Constant *>(GV->getInitializer());
649 TypeSize InitSize = DL.getTypeAllocSize(Init->getType());
650 if (InitSize < Offset)
651 return nullptr;
652
653 uint64_t NBytes = InitSize - Offset;
654 if (NBytes > UINT16_MAX)
655 // Bail for large initializers in excess of 64K to avoid allocating
656 // too much memory.
657 // Offset is assumed to be less than or equal than InitSize (this
658 // is enforced in ReadDataFromGlobal).
659 return nullptr;
660
661 SmallVector<unsigned char, 256> RawBytes(static_cast<size_t>(NBytes));
662 unsigned char *CurPtr = RawBytes.data();
663
664 if (!ReadDataFromGlobal(Init, Offset, CurPtr, NBytes, DL))
665 return nullptr;
666
667 return ConstantDataArray::get(GV->getContext(), RawBytes);
668 }
669
670 /// If this Offset points exactly to the start of an aggregate element, return
671 /// that element, otherwise return nullptr.
getConstantAtOffset(Constant * Base,APInt Offset,const DataLayout & DL)672 Constant *getConstantAtOffset(Constant *Base, APInt Offset,
673 const DataLayout &DL) {
674 if (Offset.isZero())
675 return Base;
676
677 if (!isa<ConstantAggregate>(Base) && !isa<ConstantDataSequential>(Base))
678 return nullptr;
679
680 Type *ElemTy = Base->getType();
681 SmallVector<APInt> Indices = DL.getGEPIndicesForOffset(ElemTy, Offset);
682 if (!Offset.isZero() || !Indices[0].isZero())
683 return nullptr;
684
685 Constant *C = Base;
686 for (const APInt &Index : drop_begin(Indices)) {
687 if (Index.isNegative() || Index.getActiveBits() >= 32)
688 return nullptr;
689
690 C = C->getAggregateElement(Index.getZExtValue());
691 if (!C)
692 return nullptr;
693 }
694
695 return C;
696 }
697
ConstantFoldLoadFromConst(Constant * C,Type * Ty,const APInt & Offset,const DataLayout & DL)698 Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
699 const APInt &Offset,
700 const DataLayout &DL) {
701 if (Constant *AtOffset = getConstantAtOffset(C, Offset, DL))
702 if (Constant *Result = ConstantFoldLoadThroughBitcast(AtOffset, Ty, DL))
703 return Result;
704
705 // Explicitly check for out-of-bounds access, so we return poison even if the
706 // constant is a uniform value.
707 TypeSize Size = DL.getTypeAllocSize(C->getType());
708 if (!Size.isScalable() && Offset.sge(Size.getFixedValue()))
709 return PoisonValue::get(Ty);
710
711 // Try an offset-independent fold of a uniform value.
712 if (Constant *Result = ConstantFoldLoadFromUniformValue(C, Ty, DL))
713 return Result;
714
715 // Try hard to fold loads from bitcasted strange and non-type-safe things.
716 if (Offset.getSignificantBits() <= 64)
717 if (Constant *Result =
718 FoldReinterpretLoadFromConst(C, Ty, Offset.getSExtValue(), DL))
719 return Result;
720
721 return nullptr;
722 }
723
ConstantFoldLoadFromConst(Constant * C,Type * Ty,const DataLayout & DL)724 Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
725 const DataLayout &DL) {
726 return ConstantFoldLoadFromConst(C, Ty, APInt(64, 0), DL);
727 }
728
ConstantFoldLoadFromConstPtr(Constant * C,Type * Ty,APInt Offset,const DataLayout & DL)729 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
730 APInt Offset,
731 const DataLayout &DL) {
732 // We can only fold loads from constant globals with a definitive initializer.
733 // Check this upfront, to skip expensive offset calculations.
734 auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(C));
735 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
736 return nullptr;
737
738 C = cast<Constant>(C->stripAndAccumulateConstantOffsets(
739 DL, Offset, /* AllowNonInbounds */ true));
740
741 if (C == GV)
742 if (Constant *Result = ConstantFoldLoadFromConst(GV->getInitializer(), Ty,
743 Offset, DL))
744 return Result;
745
746 // If this load comes from anywhere in a uniform constant global, the value
747 // is always the same, regardless of the loaded offset.
748 return ConstantFoldLoadFromUniformValue(GV->getInitializer(), Ty, DL);
749 }
750
ConstantFoldLoadFromConstPtr(Constant * C,Type * Ty,const DataLayout & DL)751 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
752 const DataLayout &DL) {
753 APInt Offset(DL.getIndexTypeSizeInBits(C->getType()), 0);
754 return ConstantFoldLoadFromConstPtr(C, Ty, std::move(Offset), DL);
755 }
756
ConstantFoldLoadFromUniformValue(Constant * C,Type * Ty,const DataLayout & DL)757 Constant *llvm::ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty,
758 const DataLayout &DL) {
759 if (isa<PoisonValue>(C))
760 return PoisonValue::get(Ty);
761 if (isa<UndefValue>(C))
762 return UndefValue::get(Ty);
763 // If padding is needed when storing C to memory, then it isn't considered as
764 // uniform.
765 if (!DL.typeSizeEqualsStoreSize(C->getType()))
766 return nullptr;
767 if (C->isNullValue() && !Ty->isX86_MMXTy() && !Ty->isX86_AMXTy())
768 return Constant::getNullValue(Ty);
769 if (C->isAllOnesValue() &&
770 (Ty->isIntOrIntVectorTy() || Ty->isFPOrFPVectorTy()))
771 return Constant::getAllOnesValue(Ty);
772 return nullptr;
773 }
774
775 namespace {
776
777 /// One of Op0/Op1 is a constant expression.
778 /// Attempt to symbolically evaluate the result of a binary operator merging
779 /// these together. If target data info is available, it is provided as DL,
780 /// otherwise DL is null.
SymbolicallyEvaluateBinop(unsigned Opc,Constant * Op0,Constant * Op1,const DataLayout & DL)781 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
782 const DataLayout &DL) {
783 // SROA
784
785 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
786 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
787 // bits.
788
789 if (Opc == Instruction::And) {
790 KnownBits Known0 = computeKnownBits(Op0, DL);
791 KnownBits Known1 = computeKnownBits(Op1, DL);
792 if ((Known1.One | Known0.Zero).isAllOnes()) {
793 // All the bits of Op0 that the 'and' could be masking are already zero.
794 return Op0;
795 }
796 if ((Known0.One | Known1.Zero).isAllOnes()) {
797 // All the bits of Op1 that the 'and' could be masking are already zero.
798 return Op1;
799 }
800
801 Known0 &= Known1;
802 if (Known0.isConstant())
803 return ConstantInt::get(Op0->getType(), Known0.getConstant());
804 }
805
806 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
807 // constant. This happens frequently when iterating over a global array.
808 if (Opc == Instruction::Sub) {
809 GlobalValue *GV1, *GV2;
810 APInt Offs1, Offs2;
811
812 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
813 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
814 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
815
816 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
817 // PtrToInt may change the bitwidth so we have convert to the right size
818 // first.
819 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
820 Offs2.zextOrTrunc(OpSize));
821 }
822 }
823
824 return nullptr;
825 }
826
827 /// If array indices are not pointer-sized integers, explicitly cast them so
828 /// that they aren't implicitly casted by the getelementptr.
CastGEPIndices(Type * SrcElemTy,ArrayRef<Constant * > Ops,Type * ResultTy,GEPNoWrapFlags NW,std::optional<ConstantRange> InRange,const DataLayout & DL,const TargetLibraryInfo * TLI)829 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
830 Type *ResultTy, GEPNoWrapFlags NW,
831 std::optional<ConstantRange> InRange,
832 const DataLayout &DL, const TargetLibraryInfo *TLI) {
833 Type *IntIdxTy = DL.getIndexType(ResultTy);
834 Type *IntIdxScalarTy = IntIdxTy->getScalarType();
835
836 bool Any = false;
837 SmallVector<Constant*, 32> NewIdxs;
838 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
839 if ((i == 1 ||
840 !isa<StructType>(GetElementPtrInst::getIndexedType(
841 SrcElemTy, Ops.slice(1, i - 1)))) &&
842 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
843 Any = true;
844 Type *NewType =
845 Ops[i]->getType()->isVectorTy() ? IntIdxTy : IntIdxScalarTy;
846 Constant *NewIdx = ConstantFoldCastOperand(
847 CastInst::getCastOpcode(Ops[i], true, NewType, true), Ops[i], NewType,
848 DL);
849 if (!NewIdx)
850 return nullptr;
851 NewIdxs.push_back(NewIdx);
852 } else
853 NewIdxs.push_back(Ops[i]);
854 }
855
856 if (!Any)
857 return nullptr;
858
859 Constant *C =
860 ConstantExpr::getGetElementPtr(SrcElemTy, Ops[0], NewIdxs, NW, InRange);
861 return ConstantFoldConstant(C, DL, TLI);
862 }
863
864 /// If we can symbolically evaluate the GEP constant expression, do so.
SymbolicallyEvaluateGEP(const GEPOperator * GEP,ArrayRef<Constant * > Ops,const DataLayout & DL,const TargetLibraryInfo * TLI)865 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
866 ArrayRef<Constant *> Ops,
867 const DataLayout &DL,
868 const TargetLibraryInfo *TLI) {
869 Type *SrcElemTy = GEP->getSourceElementType();
870 Type *ResTy = GEP->getType();
871 if (!SrcElemTy->isSized() || isa<ScalableVectorType>(SrcElemTy))
872 return nullptr;
873
874 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy, GEP->getNoWrapFlags(),
875 GEP->getInRange(), DL, TLI))
876 return C;
877
878 Constant *Ptr = Ops[0];
879 if (!Ptr->getType()->isPointerTy())
880 return nullptr;
881
882 Type *IntIdxTy = DL.getIndexType(Ptr->getType());
883
884 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
885 if (!isa<ConstantInt>(Ops[i]))
886 return nullptr;
887
888 unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy);
889 APInt Offset = APInt(
890 BitWidth,
891 DL.getIndexedOffsetInType(
892 SrcElemTy, ArrayRef((Value *const *)Ops.data() + 1, Ops.size() - 1)));
893
894 std::optional<ConstantRange> InRange = GEP->getInRange();
895 if (InRange)
896 InRange = InRange->sextOrTrunc(BitWidth);
897
898 // If this is a GEP of a GEP, fold it all into a single GEP.
899 GEPNoWrapFlags NW = GEP->getNoWrapFlags();
900 bool Overflow = false;
901 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
902 NW &= GEP->getNoWrapFlags();
903
904 SmallVector<Value *, 4> NestedOps(llvm::drop_begin(GEP->operands()));
905
906 // Do not try the incorporate the sub-GEP if some index is not a number.
907 bool AllConstantInt = true;
908 for (Value *NestedOp : NestedOps)
909 if (!isa<ConstantInt>(NestedOp)) {
910 AllConstantInt = false;
911 break;
912 }
913 if (!AllConstantInt)
914 break;
915
916 // TODO: Try to intersect two inrange attributes?
917 if (!InRange) {
918 InRange = GEP->getInRange();
919 if (InRange)
920 // Adjust inrange by offset until now.
921 InRange = InRange->sextOrTrunc(BitWidth).subtract(Offset);
922 }
923
924 Ptr = cast<Constant>(GEP->getOperand(0));
925 SrcElemTy = GEP->getSourceElementType();
926 Offset = Offset.sadd_ov(
927 APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps)),
928 Overflow);
929 }
930
931 // Preserving nusw (without inbounds) also requires that the offset
932 // additions did not overflow.
933 if (NW.hasNoUnsignedSignedWrap() && !NW.isInBounds() && Overflow)
934 NW = NW.withoutNoUnsignedSignedWrap();
935
936 // If the base value for this address is a literal integer value, fold the
937 // getelementptr to the resulting integer value casted to the pointer type.
938 APInt BasePtr(BitWidth, 0);
939 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
940 if (CE->getOpcode() == Instruction::IntToPtr) {
941 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
942 BasePtr = Base->getValue().zextOrTrunc(BitWidth);
943 }
944 }
945
946 auto *PTy = cast<PointerType>(Ptr->getType());
947 if ((Ptr->isNullValue() || BasePtr != 0) &&
948 !DL.isNonIntegralPointerType(PTy)) {
949 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
950 return ConstantExpr::getIntToPtr(C, ResTy);
951 }
952
953 // Try to infer inbounds for GEPs of globals.
954 // TODO(gep_nowrap): Also infer nuw flag.
955 if (!NW.isInBounds() && Offset.isNonNegative()) {
956 bool CanBeNull, CanBeFreed;
957 uint64_t DerefBytes =
958 Ptr->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
959 if (DerefBytes != 0 && !CanBeNull && Offset.sle(DerefBytes))
960 NW |= GEPNoWrapFlags::inBounds();
961 }
962
963 // Otherwise canonicalize this to a single ptradd.
964 LLVMContext &Ctx = Ptr->getContext();
965 return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ctx), Ptr,
966 ConstantInt::get(Ctx, Offset), NW,
967 InRange);
968 }
969
970 /// Attempt to constant fold an instruction with the
971 /// specified opcode and operands. If successful, the constant result is
972 /// returned, if not, null is returned. Note that this function can fail when
973 /// attempting to fold instructions like loads and stores, which have no
974 /// constant expression form.
ConstantFoldInstOperandsImpl(const Value * InstOrCE,unsigned Opcode,ArrayRef<Constant * > Ops,const DataLayout & DL,const TargetLibraryInfo * TLI,bool AllowNonDeterministic)975 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
976 ArrayRef<Constant *> Ops,
977 const DataLayout &DL,
978 const TargetLibraryInfo *TLI,
979 bool AllowNonDeterministic) {
980 Type *DestTy = InstOrCE->getType();
981
982 if (Instruction::isUnaryOp(Opcode))
983 return ConstantFoldUnaryOpOperand(Opcode, Ops[0], DL);
984
985 if (Instruction::isBinaryOp(Opcode)) {
986 switch (Opcode) {
987 default:
988 break;
989 case Instruction::FAdd:
990 case Instruction::FSub:
991 case Instruction::FMul:
992 case Instruction::FDiv:
993 case Instruction::FRem:
994 // Handle floating point instructions separately to account for denormals
995 // TODO: If a constant expression is being folded rather than an
996 // instruction, denormals will not be flushed/treated as zero
997 if (const auto *I = dyn_cast<Instruction>(InstOrCE)) {
998 return ConstantFoldFPInstOperands(Opcode, Ops[0], Ops[1], DL, I,
999 AllowNonDeterministic);
1000 }
1001 }
1002 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
1003 }
1004
1005 if (Instruction::isCast(Opcode))
1006 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
1007
1008 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1009 Type *SrcElemTy = GEP->getSourceElementType();
1010 if (!ConstantExpr::isSupportedGetElementPtr(SrcElemTy))
1011 return nullptr;
1012
1013 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
1014 return C;
1015
1016 return ConstantExpr::getGetElementPtr(SrcElemTy, Ops[0], Ops.slice(1),
1017 GEP->getNoWrapFlags(),
1018 GEP->getInRange());
1019 }
1020
1021 if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
1022 return CE->getWithOperands(Ops);
1023
1024 switch (Opcode) {
1025 default: return nullptr;
1026 case Instruction::ICmp:
1027 case Instruction::FCmp: {
1028 auto *C = cast<CmpInst>(InstOrCE);
1029 return ConstantFoldCompareInstOperands(C->getPredicate(), Ops[0], Ops[1],
1030 DL, TLI, C);
1031 }
1032 case Instruction::Freeze:
1033 return isGuaranteedNotToBeUndefOrPoison(Ops[0]) ? Ops[0] : nullptr;
1034 case Instruction::Call:
1035 if (auto *F = dyn_cast<Function>(Ops.back())) {
1036 const auto *Call = cast<CallBase>(InstOrCE);
1037 if (canConstantFoldCallTo(Call, F))
1038 return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI,
1039 AllowNonDeterministic);
1040 }
1041 return nullptr;
1042 case Instruction::Select:
1043 return ConstantFoldSelectInstruction(Ops[0], Ops[1], Ops[2]);
1044 case Instruction::ExtractElement:
1045 return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
1046 case Instruction::ExtractValue:
1047 return ConstantFoldExtractValueInstruction(
1048 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices());
1049 case Instruction::InsertElement:
1050 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
1051 case Instruction::InsertValue:
1052 return ConstantFoldInsertValueInstruction(
1053 Ops[0], Ops[1], cast<InsertValueInst>(InstOrCE)->getIndices());
1054 case Instruction::ShuffleVector:
1055 return ConstantExpr::getShuffleVector(
1056 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask());
1057 case Instruction::Load: {
1058 const auto *LI = dyn_cast<LoadInst>(InstOrCE);
1059 if (LI->isVolatile())
1060 return nullptr;
1061 return ConstantFoldLoadFromConstPtr(Ops[0], LI->getType(), DL);
1062 }
1063 }
1064 }
1065
1066 } // end anonymous namespace
1067
1068 //===----------------------------------------------------------------------===//
1069 // Constant Folding public APIs
1070 //===----------------------------------------------------------------------===//
1071
1072 namespace {
1073
1074 Constant *
ConstantFoldConstantImpl(const Constant * C,const DataLayout & DL,const TargetLibraryInfo * TLI,SmallDenseMap<Constant *,Constant * > & FoldedOps)1075 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1076 const TargetLibraryInfo *TLI,
1077 SmallDenseMap<Constant *, Constant *> &FoldedOps) {
1078 if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
1079 return const_cast<Constant *>(C);
1080
1081 SmallVector<Constant *, 8> Ops;
1082 for (const Use &OldU : C->operands()) {
1083 Constant *OldC = cast<Constant>(&OldU);
1084 Constant *NewC = OldC;
1085 // Recursively fold the ConstantExpr's operands. If we have already folded
1086 // a ConstantExpr, we don't have to process it again.
1087 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) {
1088 auto It = FoldedOps.find(OldC);
1089 if (It == FoldedOps.end()) {
1090 NewC = ConstantFoldConstantImpl(OldC, DL, TLI, FoldedOps);
1091 FoldedOps.insert({OldC, NewC});
1092 } else {
1093 NewC = It->second;
1094 }
1095 }
1096 Ops.push_back(NewC);
1097 }
1098
1099 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1100 if (Constant *Res = ConstantFoldInstOperandsImpl(
1101 CE, CE->getOpcode(), Ops, DL, TLI, /*AllowNonDeterministic=*/true))
1102 return Res;
1103 return const_cast<Constant *>(C);
1104 }
1105
1106 assert(isa<ConstantVector>(C));
1107 return ConstantVector::get(Ops);
1108 }
1109
1110 } // end anonymous namespace
1111
ConstantFoldInstruction(Instruction * I,const DataLayout & DL,const TargetLibraryInfo * TLI)1112 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
1113 const TargetLibraryInfo *TLI) {
1114 // Handle PHI nodes quickly here...
1115 if (auto *PN = dyn_cast<PHINode>(I)) {
1116 Constant *CommonValue = nullptr;
1117
1118 SmallDenseMap<Constant *, Constant *> FoldedOps;
1119 for (Value *Incoming : PN->incoming_values()) {
1120 // If the incoming value is undef then skip it. Note that while we could
1121 // skip the value if it is equal to the phi node itself we choose not to
1122 // because that would break the rule that constant folding only applies if
1123 // all operands are constants.
1124 if (isa<UndefValue>(Incoming))
1125 continue;
1126 // If the incoming value is not a constant, then give up.
1127 auto *C = dyn_cast<Constant>(Incoming);
1128 if (!C)
1129 return nullptr;
1130 // Fold the PHI's operands.
1131 C = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1132 // If the incoming value is a different constant to
1133 // the one we saw previously, then give up.
1134 if (CommonValue && C != CommonValue)
1135 return nullptr;
1136 CommonValue = C;
1137 }
1138
1139 // If we reach here, all incoming values are the same constant or undef.
1140 return CommonValue ? CommonValue : UndefValue::get(PN->getType());
1141 }
1142
1143 // Scan the operand list, checking to see if they are all constants, if so,
1144 // hand off to ConstantFoldInstOperandsImpl.
1145 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
1146 return nullptr;
1147
1148 SmallDenseMap<Constant *, Constant *> FoldedOps;
1149 SmallVector<Constant *, 8> Ops;
1150 for (const Use &OpU : I->operands()) {
1151 auto *Op = cast<Constant>(&OpU);
1152 // Fold the Instruction's operands.
1153 Op = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps);
1154 Ops.push_back(Op);
1155 }
1156
1157 return ConstantFoldInstOperands(I, Ops, DL, TLI);
1158 }
1159
ConstantFoldConstant(const Constant * C,const DataLayout & DL,const TargetLibraryInfo * TLI)1160 Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
1161 const TargetLibraryInfo *TLI) {
1162 SmallDenseMap<Constant *, Constant *> FoldedOps;
1163 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1164 }
1165
ConstantFoldInstOperands(Instruction * I,ArrayRef<Constant * > Ops,const DataLayout & DL,const TargetLibraryInfo * TLI,bool AllowNonDeterministic)1166 Constant *llvm::ConstantFoldInstOperands(Instruction *I,
1167 ArrayRef<Constant *> Ops,
1168 const DataLayout &DL,
1169 const TargetLibraryInfo *TLI,
1170 bool AllowNonDeterministic) {
1171 return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI,
1172 AllowNonDeterministic);
1173 }
1174
ConstantFoldCompareInstOperands(unsigned IntPredicate,Constant * Ops0,Constant * Ops1,const DataLayout & DL,const TargetLibraryInfo * TLI,const Instruction * I)1175 Constant *llvm::ConstantFoldCompareInstOperands(
1176 unsigned IntPredicate, Constant *Ops0, Constant *Ops1, const DataLayout &DL,
1177 const TargetLibraryInfo *TLI, const Instruction *I) {
1178 CmpInst::Predicate Predicate = (CmpInst::Predicate)IntPredicate;
1179 // fold: icmp (inttoptr x), null -> icmp x, 0
1180 // fold: icmp null, (inttoptr x) -> icmp 0, x
1181 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1182 // fold: icmp 0, (ptrtoint x) -> icmp null, x
1183 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1184 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1185 //
1186 // FIXME: The following comment is out of data and the DataLayout is here now.
1187 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1188 // around to know if bit truncation is happening.
1189 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1190 if (Ops1->isNullValue()) {
1191 if (CE0->getOpcode() == Instruction::IntToPtr) {
1192 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1193 // Convert the integer value to the right size to ensure we get the
1194 // proper extension or truncation.
1195 if (Constant *C = ConstantFoldIntegerCast(CE0->getOperand(0), IntPtrTy,
1196 /*IsSigned*/ false, DL)) {
1197 Constant *Null = Constant::getNullValue(C->getType());
1198 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1199 }
1200 }
1201
1202 // Only do this transformation if the int is intptrty in size, otherwise
1203 // there is a truncation or extension that we aren't modeling.
1204 if (CE0->getOpcode() == Instruction::PtrToInt) {
1205 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1206 if (CE0->getType() == IntPtrTy) {
1207 Constant *C = CE0->getOperand(0);
1208 Constant *Null = Constant::getNullValue(C->getType());
1209 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1210 }
1211 }
1212 }
1213
1214 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1215 if (CE0->getOpcode() == CE1->getOpcode()) {
1216 if (CE0->getOpcode() == Instruction::IntToPtr) {
1217 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1218
1219 // Convert the integer value to the right size to ensure we get the
1220 // proper extension or truncation.
1221 Constant *C0 = ConstantFoldIntegerCast(CE0->getOperand(0), IntPtrTy,
1222 /*IsSigned*/ false, DL);
1223 Constant *C1 = ConstantFoldIntegerCast(CE1->getOperand(0), IntPtrTy,
1224 /*IsSigned*/ false, DL);
1225 if (C0 && C1)
1226 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1227 }
1228
1229 // Only do this transformation if the int is intptrty in size, otherwise
1230 // there is a truncation or extension that we aren't modeling.
1231 if (CE0->getOpcode() == Instruction::PtrToInt) {
1232 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1233 if (CE0->getType() == IntPtrTy &&
1234 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1235 return ConstantFoldCompareInstOperands(
1236 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1237 }
1238 }
1239 }
1240 }
1241
1242 // Convert pointer comparison (base+offset1) pred (base+offset2) into
1243 // offset1 pred offset2, for the case where the offset is inbounds. This
1244 // only works for equality and unsigned comparison, as inbounds permits
1245 // crossing the sign boundary. However, the offset comparison itself is
1246 // signed.
1247 if (Ops0->getType()->isPointerTy() && !ICmpInst::isSigned(Predicate)) {
1248 unsigned IndexWidth = DL.getIndexTypeSizeInBits(Ops0->getType());
1249 APInt Offset0(IndexWidth, 0);
1250 Value *Stripped0 =
1251 Ops0->stripAndAccumulateInBoundsConstantOffsets(DL, Offset0);
1252 APInt Offset1(IndexWidth, 0);
1253 Value *Stripped1 =
1254 Ops1->stripAndAccumulateInBoundsConstantOffsets(DL, Offset1);
1255 if (Stripped0 == Stripped1)
1256 return ConstantInt::getBool(
1257 Ops0->getContext(),
1258 ICmpInst::compare(Offset0, Offset1,
1259 ICmpInst::getSignedPredicate(Predicate)));
1260 }
1261 } else if (isa<ConstantExpr>(Ops1)) {
1262 // If RHS is a constant expression, but the left side isn't, swap the
1263 // operands and try again.
1264 Predicate = ICmpInst::getSwappedPredicate(Predicate);
1265 return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
1266 }
1267
1268 // Flush any denormal constant float input according to denormal handling
1269 // mode.
1270 Ops0 = FlushFPConstant(Ops0, I, /* IsOutput */ false);
1271 if (!Ops0)
1272 return nullptr;
1273 Ops1 = FlushFPConstant(Ops1, I, /* IsOutput */ false);
1274 if (!Ops1)
1275 return nullptr;
1276
1277 return ConstantFoldCompareInstruction(Predicate, Ops0, Ops1);
1278 }
1279
ConstantFoldUnaryOpOperand(unsigned Opcode,Constant * Op,const DataLayout & DL)1280 Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op,
1281 const DataLayout &DL) {
1282 assert(Instruction::isUnaryOp(Opcode));
1283
1284 return ConstantFoldUnaryInstruction(Opcode, Op);
1285 }
1286
ConstantFoldBinaryOpOperands(unsigned Opcode,Constant * LHS,Constant * RHS,const DataLayout & DL)1287 Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
1288 Constant *RHS,
1289 const DataLayout &DL) {
1290 assert(Instruction::isBinaryOp(Opcode));
1291 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
1292 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
1293 return C;
1294
1295 if (ConstantExpr::isDesirableBinOp(Opcode))
1296 return ConstantExpr::get(Opcode, LHS, RHS);
1297 return ConstantFoldBinaryInstruction(Opcode, LHS, RHS);
1298 }
1299
FlushFPConstant(Constant * Operand,const Instruction * I,bool IsOutput)1300 Constant *llvm::FlushFPConstant(Constant *Operand, const Instruction *I,
1301 bool IsOutput) {
1302 if (!I || !I->getParent() || !I->getFunction())
1303 return Operand;
1304
1305 ConstantFP *CFP = dyn_cast<ConstantFP>(Operand);
1306 if (!CFP)
1307 return Operand;
1308
1309 const APFloat &APF = CFP->getValueAPF();
1310 // TODO: Should this canonicalize nans?
1311 if (!APF.isDenormal())
1312 return Operand;
1313
1314 Type *Ty = CFP->getType();
1315 DenormalMode DenormMode =
1316 I->getFunction()->getDenormalMode(Ty->getFltSemantics());
1317 DenormalMode::DenormalModeKind Mode =
1318 IsOutput ? DenormMode.Output : DenormMode.Input;
1319 switch (Mode) {
1320 default:
1321 llvm_unreachable("unknown denormal mode");
1322 case DenormalMode::Dynamic:
1323 return nullptr;
1324 case DenormalMode::IEEE:
1325 return Operand;
1326 case DenormalMode::PreserveSign:
1327 if (APF.isDenormal()) {
1328 return ConstantFP::get(
1329 Ty->getContext(),
1330 APFloat::getZero(Ty->getFltSemantics(), APF.isNegative()));
1331 }
1332 return Operand;
1333 case DenormalMode::PositiveZero:
1334 if (APF.isDenormal()) {
1335 return ConstantFP::get(Ty->getContext(),
1336 APFloat::getZero(Ty->getFltSemantics(), false));
1337 }
1338 return Operand;
1339 }
1340 return Operand;
1341 }
1342
ConstantFoldFPInstOperands(unsigned Opcode,Constant * LHS,Constant * RHS,const DataLayout & DL,const Instruction * I,bool AllowNonDeterministic)1343 Constant *llvm::ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS,
1344 Constant *RHS, const DataLayout &DL,
1345 const Instruction *I,
1346 bool AllowNonDeterministic) {
1347 if (Instruction::isBinaryOp(Opcode)) {
1348 // Flush denormal inputs if needed.
1349 Constant *Op0 = FlushFPConstant(LHS, I, /* IsOutput */ false);
1350 if (!Op0)
1351 return nullptr;
1352 Constant *Op1 = FlushFPConstant(RHS, I, /* IsOutput */ false);
1353 if (!Op1)
1354 return nullptr;
1355
1356 // If nsz or an algebraic FMF flag is set, the result of the FP operation
1357 // may change due to future optimization. Don't constant fold them if
1358 // non-deterministic results are not allowed.
1359 if (!AllowNonDeterministic)
1360 if (auto *FP = dyn_cast_or_null<FPMathOperator>(I))
1361 if (FP->hasNoSignedZeros() || FP->hasAllowReassoc() ||
1362 FP->hasAllowContract() || FP->hasAllowReciprocal())
1363 return nullptr;
1364
1365 // Calculate constant result.
1366 Constant *C = ConstantFoldBinaryOpOperands(Opcode, Op0, Op1, DL);
1367 if (!C)
1368 return nullptr;
1369
1370 // Flush denormal output if needed.
1371 C = FlushFPConstant(C, I, /* IsOutput */ true);
1372 if (!C)
1373 return nullptr;
1374
1375 // The precise NaN value is non-deterministic.
1376 if (!AllowNonDeterministic && C->isNaN())
1377 return nullptr;
1378
1379 return C;
1380 }
1381 // If instruction lacks a parent/function and the denormal mode cannot be
1382 // determined, use the default (IEEE).
1383 return ConstantFoldBinaryOpOperands(Opcode, LHS, RHS, DL);
1384 }
1385
ConstantFoldCastOperand(unsigned Opcode,Constant * C,Type * DestTy,const DataLayout & DL)1386 Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
1387 Type *DestTy, const DataLayout &DL) {
1388 assert(Instruction::isCast(Opcode));
1389 switch (Opcode) {
1390 default:
1391 llvm_unreachable("Missing case");
1392 case Instruction::PtrToInt:
1393 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1394 Constant *FoldedValue = nullptr;
1395 // If the input is a inttoptr, eliminate the pair. This requires knowing
1396 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1397 if (CE->getOpcode() == Instruction::IntToPtr) {
1398 // zext/trunc the inttoptr to pointer size.
1399 FoldedValue = ConstantFoldIntegerCast(CE->getOperand(0),
1400 DL.getIntPtrType(CE->getType()),
1401 /*IsSigned=*/false, DL);
1402 } else if (auto *GEP = dyn_cast<GEPOperator>(CE)) {
1403 // If we have GEP, we can perform the following folds:
1404 // (ptrtoint (gep null, x)) -> x
1405 // (ptrtoint (gep (gep null, x), y) -> x + y, etc.
1406 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
1407 APInt BaseOffset(BitWidth, 0);
1408 auto *Base = cast<Constant>(GEP->stripAndAccumulateConstantOffsets(
1409 DL, BaseOffset, /*AllowNonInbounds=*/true));
1410 if (Base->isNullValue()) {
1411 FoldedValue = ConstantInt::get(CE->getContext(), BaseOffset);
1412 } else {
1413 // ptrtoint (gep i8, Ptr, (sub 0, V)) -> sub (ptrtoint Ptr), V
1414 if (GEP->getNumIndices() == 1 &&
1415 GEP->getSourceElementType()->isIntegerTy(8)) {
1416 auto *Ptr = cast<Constant>(GEP->getPointerOperand());
1417 auto *Sub = dyn_cast<ConstantExpr>(GEP->getOperand(1));
1418 Type *IntIdxTy = DL.getIndexType(Ptr->getType());
1419 if (Sub && Sub->getType() == IntIdxTy &&
1420 Sub->getOpcode() == Instruction::Sub &&
1421 Sub->getOperand(0)->isNullValue())
1422 FoldedValue = ConstantExpr::getSub(
1423 ConstantExpr::getPtrToInt(Ptr, IntIdxTy), Sub->getOperand(1));
1424 }
1425 }
1426 }
1427 if (FoldedValue) {
1428 // Do a zext or trunc to get to the ptrtoint dest size.
1429 return ConstantFoldIntegerCast(FoldedValue, DestTy, /*IsSigned=*/false,
1430 DL);
1431 }
1432 }
1433 break;
1434 case Instruction::IntToPtr:
1435 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1436 // the int size is >= the ptr size and the address spaces are the same.
1437 // This requires knowing the width of a pointer, so it can't be done in
1438 // ConstantExpr::getCast.
1439 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1440 if (CE->getOpcode() == Instruction::PtrToInt) {
1441 Constant *SrcPtr = CE->getOperand(0);
1442 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1443 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1444
1445 if (MidIntSize >= SrcPtrSize) {
1446 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1447 if (SrcAS == DestTy->getPointerAddressSpace())
1448 return FoldBitCast(CE->getOperand(0), DestTy, DL);
1449 }
1450 }
1451 }
1452 break;
1453 case Instruction::Trunc:
1454 case Instruction::ZExt:
1455 case Instruction::SExt:
1456 case Instruction::FPTrunc:
1457 case Instruction::FPExt:
1458 case Instruction::UIToFP:
1459 case Instruction::SIToFP:
1460 case Instruction::FPToUI:
1461 case Instruction::FPToSI:
1462 case Instruction::AddrSpaceCast:
1463 break;
1464 case Instruction::BitCast:
1465 return FoldBitCast(C, DestTy, DL);
1466 }
1467
1468 if (ConstantExpr::isDesirableCastOp(Opcode))
1469 return ConstantExpr::getCast(Opcode, C, DestTy);
1470 return ConstantFoldCastInstruction(Opcode, C, DestTy);
1471 }
1472
ConstantFoldIntegerCast(Constant * C,Type * DestTy,bool IsSigned,const DataLayout & DL)1473 Constant *llvm::ConstantFoldIntegerCast(Constant *C, Type *DestTy,
1474 bool IsSigned, const DataLayout &DL) {
1475 Type *SrcTy = C->getType();
1476 if (SrcTy == DestTy)
1477 return C;
1478 if (SrcTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
1479 return ConstantFoldCastOperand(Instruction::Trunc, C, DestTy, DL);
1480 if (IsSigned)
1481 return ConstantFoldCastOperand(Instruction::SExt, C, DestTy, DL);
1482 return ConstantFoldCastOperand(Instruction::ZExt, C, DestTy, DL);
1483 }
1484
1485 //===----------------------------------------------------------------------===//
1486 // Constant Folding for Calls
1487 //
1488
canConstantFoldCallTo(const CallBase * Call,const Function * F)1489 bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
1490 if (Call->isNoBuiltin())
1491 return false;
1492 if (Call->getFunctionType() != F->getFunctionType())
1493 return false;
1494 switch (F->getIntrinsicID()) {
1495 // Operations that do not operate floating-point numbers and do not depend on
1496 // FP environment can be folded even in strictfp functions.
1497 case Intrinsic::bswap:
1498 case Intrinsic::ctpop:
1499 case Intrinsic::ctlz:
1500 case Intrinsic::cttz:
1501 case Intrinsic::fshl:
1502 case Intrinsic::fshr:
1503 case Intrinsic::launder_invariant_group:
1504 case Intrinsic::strip_invariant_group:
1505 case Intrinsic::masked_load:
1506 case Intrinsic::get_active_lane_mask:
1507 case Intrinsic::abs:
1508 case Intrinsic::smax:
1509 case Intrinsic::smin:
1510 case Intrinsic::umax:
1511 case Intrinsic::umin:
1512 case Intrinsic::scmp:
1513 case Intrinsic::ucmp:
1514 case Intrinsic::sadd_with_overflow:
1515 case Intrinsic::uadd_with_overflow:
1516 case Intrinsic::ssub_with_overflow:
1517 case Intrinsic::usub_with_overflow:
1518 case Intrinsic::smul_with_overflow:
1519 case Intrinsic::umul_with_overflow:
1520 case Intrinsic::sadd_sat:
1521 case Intrinsic::uadd_sat:
1522 case Intrinsic::ssub_sat:
1523 case Intrinsic::usub_sat:
1524 case Intrinsic::smul_fix:
1525 case Intrinsic::smul_fix_sat:
1526 case Intrinsic::bitreverse:
1527 case Intrinsic::is_constant:
1528 case Intrinsic::vector_reduce_add:
1529 case Intrinsic::vector_reduce_mul:
1530 case Intrinsic::vector_reduce_and:
1531 case Intrinsic::vector_reduce_or:
1532 case Intrinsic::vector_reduce_xor:
1533 case Intrinsic::vector_reduce_smin:
1534 case Intrinsic::vector_reduce_smax:
1535 case Intrinsic::vector_reduce_umin:
1536 case Intrinsic::vector_reduce_umax:
1537 // Target intrinsics
1538 case Intrinsic::amdgcn_perm:
1539 case Intrinsic::amdgcn_wave_reduce_umin:
1540 case Intrinsic::amdgcn_wave_reduce_umax:
1541 case Intrinsic::amdgcn_s_wqm:
1542 case Intrinsic::amdgcn_s_quadmask:
1543 case Intrinsic::amdgcn_s_bitreplicate:
1544 case Intrinsic::arm_mve_vctp8:
1545 case Intrinsic::arm_mve_vctp16:
1546 case Intrinsic::arm_mve_vctp32:
1547 case Intrinsic::arm_mve_vctp64:
1548 case Intrinsic::aarch64_sve_convert_from_svbool:
1549 // WebAssembly float semantics are always known
1550 case Intrinsic::wasm_trunc_signed:
1551 case Intrinsic::wasm_trunc_unsigned:
1552 return true;
1553
1554 // Floating point operations cannot be folded in strictfp functions in
1555 // general case. They can be folded if FP environment is known to compiler.
1556 case Intrinsic::minnum:
1557 case Intrinsic::maxnum:
1558 case Intrinsic::minimum:
1559 case Intrinsic::maximum:
1560 case Intrinsic::log:
1561 case Intrinsic::log2:
1562 case Intrinsic::log10:
1563 case Intrinsic::exp:
1564 case Intrinsic::exp2:
1565 case Intrinsic::exp10:
1566 case Intrinsic::sqrt:
1567 case Intrinsic::sin:
1568 case Intrinsic::cos:
1569 case Intrinsic::pow:
1570 case Intrinsic::powi:
1571 case Intrinsic::ldexp:
1572 case Intrinsic::fma:
1573 case Intrinsic::fmuladd:
1574 case Intrinsic::frexp:
1575 case Intrinsic::fptoui_sat:
1576 case Intrinsic::fptosi_sat:
1577 case Intrinsic::convert_from_fp16:
1578 case Intrinsic::convert_to_fp16:
1579 case Intrinsic::amdgcn_cos:
1580 case Intrinsic::amdgcn_cubeid:
1581 case Intrinsic::amdgcn_cubema:
1582 case Intrinsic::amdgcn_cubesc:
1583 case Intrinsic::amdgcn_cubetc:
1584 case Intrinsic::amdgcn_fmul_legacy:
1585 case Intrinsic::amdgcn_fma_legacy:
1586 case Intrinsic::amdgcn_fract:
1587 case Intrinsic::amdgcn_sin:
1588 // The intrinsics below depend on rounding mode in MXCSR.
1589 case Intrinsic::x86_sse_cvtss2si:
1590 case Intrinsic::x86_sse_cvtss2si64:
1591 case Intrinsic::x86_sse_cvttss2si:
1592 case Intrinsic::x86_sse_cvttss2si64:
1593 case Intrinsic::x86_sse2_cvtsd2si:
1594 case Intrinsic::x86_sse2_cvtsd2si64:
1595 case Intrinsic::x86_sse2_cvttsd2si:
1596 case Intrinsic::x86_sse2_cvttsd2si64:
1597 case Intrinsic::x86_avx512_vcvtss2si32:
1598 case Intrinsic::x86_avx512_vcvtss2si64:
1599 case Intrinsic::x86_avx512_cvttss2si:
1600 case Intrinsic::x86_avx512_cvttss2si64:
1601 case Intrinsic::x86_avx512_vcvtsd2si32:
1602 case Intrinsic::x86_avx512_vcvtsd2si64:
1603 case Intrinsic::x86_avx512_cvttsd2si:
1604 case Intrinsic::x86_avx512_cvttsd2si64:
1605 case Intrinsic::x86_avx512_vcvtss2usi32:
1606 case Intrinsic::x86_avx512_vcvtss2usi64:
1607 case Intrinsic::x86_avx512_cvttss2usi:
1608 case Intrinsic::x86_avx512_cvttss2usi64:
1609 case Intrinsic::x86_avx512_vcvtsd2usi32:
1610 case Intrinsic::x86_avx512_vcvtsd2usi64:
1611 case Intrinsic::x86_avx512_cvttsd2usi:
1612 case Intrinsic::x86_avx512_cvttsd2usi64:
1613 return !Call->isStrictFP();
1614
1615 // Sign operations are actually bitwise operations, they do not raise
1616 // exceptions even for SNANs.
1617 case Intrinsic::fabs:
1618 case Intrinsic::copysign:
1619 case Intrinsic::is_fpclass:
1620 // Non-constrained variants of rounding operations means default FP
1621 // environment, they can be folded in any case.
1622 case Intrinsic::ceil:
1623 case Intrinsic::floor:
1624 case Intrinsic::round:
1625 case Intrinsic::roundeven:
1626 case Intrinsic::trunc:
1627 case Intrinsic::nearbyint:
1628 case Intrinsic::rint:
1629 case Intrinsic::canonicalize:
1630 // Constrained intrinsics can be folded if FP environment is known
1631 // to compiler.
1632 case Intrinsic::experimental_constrained_fma:
1633 case Intrinsic::experimental_constrained_fmuladd:
1634 case Intrinsic::experimental_constrained_fadd:
1635 case Intrinsic::experimental_constrained_fsub:
1636 case Intrinsic::experimental_constrained_fmul:
1637 case Intrinsic::experimental_constrained_fdiv:
1638 case Intrinsic::experimental_constrained_frem:
1639 case Intrinsic::experimental_constrained_ceil:
1640 case Intrinsic::experimental_constrained_floor:
1641 case Intrinsic::experimental_constrained_round:
1642 case Intrinsic::experimental_constrained_roundeven:
1643 case Intrinsic::experimental_constrained_trunc:
1644 case Intrinsic::experimental_constrained_nearbyint:
1645 case Intrinsic::experimental_constrained_rint:
1646 case Intrinsic::experimental_constrained_fcmp:
1647 case Intrinsic::experimental_constrained_fcmps:
1648 return true;
1649 default:
1650 return false;
1651 case Intrinsic::not_intrinsic: break;
1652 }
1653
1654 if (!F->hasName() || Call->isStrictFP())
1655 return false;
1656
1657 // In these cases, the check of the length is required. We don't want to
1658 // return true for a name like "cos\0blah" which strcmp would return equal to
1659 // "cos", but has length 8.
1660 StringRef Name = F->getName();
1661 switch (Name[0]) {
1662 default:
1663 return false;
1664 case 'a':
1665 return Name == "acos" || Name == "acosf" ||
1666 Name == "asin" || Name == "asinf" ||
1667 Name == "atan" || Name == "atanf" ||
1668 Name == "atan2" || Name == "atan2f";
1669 case 'c':
1670 return Name == "ceil" || Name == "ceilf" ||
1671 Name == "cos" || Name == "cosf" ||
1672 Name == "cosh" || Name == "coshf";
1673 case 'e':
1674 return Name == "exp" || Name == "expf" ||
1675 Name == "exp2" || Name == "exp2f";
1676 case 'f':
1677 return Name == "fabs" || Name == "fabsf" ||
1678 Name == "floor" || Name == "floorf" ||
1679 Name == "fmod" || Name == "fmodf";
1680 case 'l':
1681 return Name == "log" || Name == "logf" || Name == "log2" ||
1682 Name == "log2f" || Name == "log10" || Name == "log10f" ||
1683 Name == "logl";
1684 case 'n':
1685 return Name == "nearbyint" || Name == "nearbyintf";
1686 case 'p':
1687 return Name == "pow" || Name == "powf";
1688 case 'r':
1689 return Name == "remainder" || Name == "remainderf" ||
1690 Name == "rint" || Name == "rintf" ||
1691 Name == "round" || Name == "roundf";
1692 case 's':
1693 return Name == "sin" || Name == "sinf" ||
1694 Name == "sinh" || Name == "sinhf" ||
1695 Name == "sqrt" || Name == "sqrtf";
1696 case 't':
1697 return Name == "tan" || Name == "tanf" ||
1698 Name == "tanh" || Name == "tanhf" ||
1699 Name == "trunc" || Name == "truncf";
1700 case '_':
1701 // Check for various function names that get used for the math functions
1702 // when the header files are preprocessed with the macro
1703 // __FINITE_MATH_ONLY__ enabled.
1704 // The '12' here is the length of the shortest name that can match.
1705 // We need to check the size before looking at Name[1] and Name[2]
1706 // so we may as well check a limit that will eliminate mismatches.
1707 if (Name.size() < 12 || Name[1] != '_')
1708 return false;
1709 switch (Name[2]) {
1710 default:
1711 return false;
1712 case 'a':
1713 return Name == "__acos_finite" || Name == "__acosf_finite" ||
1714 Name == "__asin_finite" || Name == "__asinf_finite" ||
1715 Name == "__atan2_finite" || Name == "__atan2f_finite";
1716 case 'c':
1717 return Name == "__cosh_finite" || Name == "__coshf_finite";
1718 case 'e':
1719 return Name == "__exp_finite" || Name == "__expf_finite" ||
1720 Name == "__exp2_finite" || Name == "__exp2f_finite";
1721 case 'l':
1722 return Name == "__log_finite" || Name == "__logf_finite" ||
1723 Name == "__log10_finite" || Name == "__log10f_finite";
1724 case 'p':
1725 return Name == "__pow_finite" || Name == "__powf_finite";
1726 case 's':
1727 return Name == "__sinh_finite" || Name == "__sinhf_finite";
1728 }
1729 }
1730 }
1731
1732 namespace {
1733
GetConstantFoldFPValue(double V,Type * Ty)1734 Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1735 if (Ty->isHalfTy() || Ty->isFloatTy()) {
1736 APFloat APF(V);
1737 bool unused;
1738 APF.convert(Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1739 return ConstantFP::get(Ty->getContext(), APF);
1740 }
1741 if (Ty->isDoubleTy())
1742 return ConstantFP::get(Ty->getContext(), APFloat(V));
1743 llvm_unreachable("Can only constant fold half/float/double");
1744 }
1745
1746 #if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
GetConstantFoldFPValue128(float128 V,Type * Ty)1747 Constant *GetConstantFoldFPValue128(float128 V, Type *Ty) {
1748 if (Ty->isFP128Ty())
1749 return ConstantFP::get(Ty, V);
1750 llvm_unreachable("Can only constant fold fp128");
1751 }
1752 #endif
1753
1754 /// Clear the floating-point exception state.
llvm_fenv_clearexcept()1755 inline void llvm_fenv_clearexcept() {
1756 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1757 feclearexcept(FE_ALL_EXCEPT);
1758 #endif
1759 errno = 0;
1760 }
1761
1762 /// Test if a floating-point exception was raised.
llvm_fenv_testexcept()1763 inline bool llvm_fenv_testexcept() {
1764 int errno_val = errno;
1765 if (errno_val == ERANGE || errno_val == EDOM)
1766 return true;
1767 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1768 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1769 return true;
1770 #endif
1771 return false;
1772 }
1773
ConstantFoldFP(double (* NativeFP)(double),const APFloat & V,Type * Ty)1774 Constant *ConstantFoldFP(double (*NativeFP)(double), const APFloat &V,
1775 Type *Ty) {
1776 llvm_fenv_clearexcept();
1777 double Result = NativeFP(V.convertToDouble());
1778 if (llvm_fenv_testexcept()) {
1779 llvm_fenv_clearexcept();
1780 return nullptr;
1781 }
1782
1783 return GetConstantFoldFPValue(Result, Ty);
1784 }
1785
1786 #if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
ConstantFoldFP128(float128 (* NativeFP)(float128),const APFloat & V,Type * Ty)1787 Constant *ConstantFoldFP128(float128 (*NativeFP)(float128), const APFloat &V,
1788 Type *Ty) {
1789 llvm_fenv_clearexcept();
1790 float128 Result = NativeFP(V.convertToQuad());
1791 if (llvm_fenv_testexcept()) {
1792 llvm_fenv_clearexcept();
1793 return nullptr;
1794 }
1795
1796 return GetConstantFoldFPValue128(Result, Ty);
1797 }
1798 #endif
1799
ConstantFoldBinaryFP(double (* NativeFP)(double,double),const APFloat & V,const APFloat & W,Type * Ty)1800 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
1801 const APFloat &V, const APFloat &W, Type *Ty) {
1802 llvm_fenv_clearexcept();
1803 double Result = NativeFP(V.convertToDouble(), W.convertToDouble());
1804 if (llvm_fenv_testexcept()) {
1805 llvm_fenv_clearexcept();
1806 return nullptr;
1807 }
1808
1809 return GetConstantFoldFPValue(Result, Ty);
1810 }
1811
constantFoldVectorReduce(Intrinsic::ID IID,Constant * Op)1812 Constant *constantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) {
1813 FixedVectorType *VT = dyn_cast<FixedVectorType>(Op->getType());
1814 if (!VT)
1815 return nullptr;
1816
1817 // This isn't strictly necessary, but handle the special/common case of zero:
1818 // all integer reductions of a zero input produce zero.
1819 if (isa<ConstantAggregateZero>(Op))
1820 return ConstantInt::get(VT->getElementType(), 0);
1821
1822 // This is the same as the underlying binops - poison propagates.
1823 if (isa<PoisonValue>(Op) || Op->containsPoisonElement())
1824 return PoisonValue::get(VT->getElementType());
1825
1826 // TODO: Handle undef.
1827 if (!isa<ConstantVector>(Op) && !isa<ConstantDataVector>(Op))
1828 return nullptr;
1829
1830 auto *EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(0U));
1831 if (!EltC)
1832 return nullptr;
1833
1834 APInt Acc = EltC->getValue();
1835 for (unsigned I = 1, E = VT->getNumElements(); I != E; I++) {
1836 if (!(EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(I))))
1837 return nullptr;
1838 const APInt &X = EltC->getValue();
1839 switch (IID) {
1840 case Intrinsic::vector_reduce_add:
1841 Acc = Acc + X;
1842 break;
1843 case Intrinsic::vector_reduce_mul:
1844 Acc = Acc * X;
1845 break;
1846 case Intrinsic::vector_reduce_and:
1847 Acc = Acc & X;
1848 break;
1849 case Intrinsic::vector_reduce_or:
1850 Acc = Acc | X;
1851 break;
1852 case Intrinsic::vector_reduce_xor:
1853 Acc = Acc ^ X;
1854 break;
1855 case Intrinsic::vector_reduce_smin:
1856 Acc = APIntOps::smin(Acc, X);
1857 break;
1858 case Intrinsic::vector_reduce_smax:
1859 Acc = APIntOps::smax(Acc, X);
1860 break;
1861 case Intrinsic::vector_reduce_umin:
1862 Acc = APIntOps::umin(Acc, X);
1863 break;
1864 case Intrinsic::vector_reduce_umax:
1865 Acc = APIntOps::umax(Acc, X);
1866 break;
1867 }
1868 }
1869
1870 return ConstantInt::get(Op->getContext(), Acc);
1871 }
1872
1873 /// Attempt to fold an SSE floating point to integer conversion of a constant
1874 /// floating point. If roundTowardZero is false, the default IEEE rounding is
1875 /// used (toward nearest, ties to even). This matches the behavior of the
1876 /// non-truncating SSE instructions in the default rounding mode. The desired
1877 /// integer type Ty is used to select how many bits are available for the
1878 /// result. Returns null if the conversion cannot be performed, otherwise
1879 /// returns the Constant value resulting from the conversion.
ConstantFoldSSEConvertToInt(const APFloat & Val,bool roundTowardZero,Type * Ty,bool IsSigned)1880 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
1881 Type *Ty, bool IsSigned) {
1882 // All of these conversion intrinsics form an integer of at most 64bits.
1883 unsigned ResultWidth = Ty->getIntegerBitWidth();
1884 assert(ResultWidth <= 64 &&
1885 "Can only constant fold conversions to 64 and 32 bit ints");
1886
1887 uint64_t UIntVal;
1888 bool isExact = false;
1889 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
1890 : APFloat::rmNearestTiesToEven;
1891 APFloat::opStatus status =
1892 Val.convertToInteger(MutableArrayRef(UIntVal), ResultWidth,
1893 IsSigned, mode, &isExact);
1894 if (status != APFloat::opOK &&
1895 (!roundTowardZero || status != APFloat::opInexact))
1896 return nullptr;
1897 return ConstantInt::get(Ty, UIntVal, IsSigned);
1898 }
1899
getValueAsDouble(ConstantFP * Op)1900 double getValueAsDouble(ConstantFP *Op) {
1901 Type *Ty = Op->getType();
1902
1903 if (Ty->isBFloatTy() || Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())
1904 return Op->getValueAPF().convertToDouble();
1905
1906 bool unused;
1907 APFloat APF = Op->getValueAPF();
1908 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1909 return APF.convertToDouble();
1910 }
1911
getConstIntOrUndef(Value * Op,const APInt * & C)1912 static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
1913 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1914 C = &CI->getValue();
1915 return true;
1916 }
1917 if (isa<UndefValue>(Op)) {
1918 C = nullptr;
1919 return true;
1920 }
1921 return false;
1922 }
1923
1924 /// Checks if the given intrinsic call, which evaluates to constant, is allowed
1925 /// to be folded.
1926 ///
1927 /// \param CI Constrained intrinsic call.
1928 /// \param St Exception flags raised during constant evaluation.
mayFoldConstrained(ConstrainedFPIntrinsic * CI,APFloat::opStatus St)1929 static bool mayFoldConstrained(ConstrainedFPIntrinsic *CI,
1930 APFloat::opStatus St) {
1931 std::optional<RoundingMode> ORM = CI->getRoundingMode();
1932 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
1933
1934 // If the operation does not change exception status flags, it is safe
1935 // to fold.
1936 if (St == APFloat::opStatus::opOK)
1937 return true;
1938
1939 // If evaluation raised FP exception, the result can depend on rounding
1940 // mode. If the latter is unknown, folding is not possible.
1941 if (ORM && *ORM == RoundingMode::Dynamic)
1942 return false;
1943
1944 // If FP exceptions are ignored, fold the call, even if such exception is
1945 // raised.
1946 if (EB && *EB != fp::ExceptionBehavior::ebStrict)
1947 return true;
1948
1949 // Leave the calculation for runtime so that exception flags be correctly set
1950 // in hardware.
1951 return false;
1952 }
1953
1954 /// Returns the rounding mode that should be used for constant evaluation.
1955 static RoundingMode
getEvaluationRoundingMode(const ConstrainedFPIntrinsic * CI)1956 getEvaluationRoundingMode(const ConstrainedFPIntrinsic *CI) {
1957 std::optional<RoundingMode> ORM = CI->getRoundingMode();
1958 if (!ORM || *ORM == RoundingMode::Dynamic)
1959 // Even if the rounding mode is unknown, try evaluating the operation.
1960 // If it does not raise inexact exception, rounding was not applied,
1961 // so the result is exact and does not depend on rounding mode. Whether
1962 // other FP exceptions are raised, it does not depend on rounding mode.
1963 return RoundingMode::NearestTiesToEven;
1964 return *ORM;
1965 }
1966
1967 /// Try to constant fold llvm.canonicalize for the given caller and value.
constantFoldCanonicalize(const Type * Ty,const CallBase * CI,const APFloat & Src)1968 static Constant *constantFoldCanonicalize(const Type *Ty, const CallBase *CI,
1969 const APFloat &Src) {
1970 // Zero, positive and negative, is always OK to fold.
1971 if (Src.isZero()) {
1972 // Get a fresh 0, since ppc_fp128 does have non-canonical zeros.
1973 return ConstantFP::get(
1974 CI->getContext(),
1975 APFloat::getZero(Src.getSemantics(), Src.isNegative()));
1976 }
1977
1978 if (!Ty->isIEEELikeFPTy())
1979 return nullptr;
1980
1981 // Zero is always canonical and the sign must be preserved.
1982 //
1983 // Denorms and nans may have special encodings, but it should be OK to fold a
1984 // totally average number.
1985 if (Src.isNormal() || Src.isInfinity())
1986 return ConstantFP::get(CI->getContext(), Src);
1987
1988 if (Src.isDenormal() && CI->getParent() && CI->getFunction()) {
1989 DenormalMode DenormMode =
1990 CI->getFunction()->getDenormalMode(Src.getSemantics());
1991
1992 if (DenormMode == DenormalMode::getIEEE())
1993 return ConstantFP::get(CI->getContext(), Src);
1994
1995 if (DenormMode.Input == DenormalMode::Dynamic)
1996 return nullptr;
1997
1998 // If we know if either input or output is flushed, we can fold.
1999 if ((DenormMode.Input == DenormalMode::Dynamic &&
2000 DenormMode.Output == DenormalMode::IEEE) ||
2001 (DenormMode.Input == DenormalMode::IEEE &&
2002 DenormMode.Output == DenormalMode::Dynamic))
2003 return nullptr;
2004
2005 bool IsPositive =
2006 (!Src.isNegative() || DenormMode.Input == DenormalMode::PositiveZero ||
2007 (DenormMode.Output == DenormalMode::PositiveZero &&
2008 DenormMode.Input == DenormalMode::IEEE));
2009
2010 return ConstantFP::get(CI->getContext(),
2011 APFloat::getZero(Src.getSemantics(), !IsPositive));
2012 }
2013
2014 return nullptr;
2015 }
2016
ConstantFoldScalarCall1(StringRef Name,Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,const CallBase * Call)2017 static Constant *ConstantFoldScalarCall1(StringRef Name,
2018 Intrinsic::ID IntrinsicID,
2019 Type *Ty,
2020 ArrayRef<Constant *> Operands,
2021 const TargetLibraryInfo *TLI,
2022 const CallBase *Call) {
2023 assert(Operands.size() == 1 && "Wrong number of operands.");
2024
2025 if (IntrinsicID == Intrinsic::is_constant) {
2026 // We know we have a "Constant" argument. But we want to only
2027 // return true for manifest constants, not those that depend on
2028 // constants with unknowable values, e.g. GlobalValue or BlockAddress.
2029 if (Operands[0]->isManifestConstant())
2030 return ConstantInt::getTrue(Ty->getContext());
2031 return nullptr;
2032 }
2033
2034 if (isa<PoisonValue>(Operands[0])) {
2035 // TODO: All of these operations should probably propagate poison.
2036 if (IntrinsicID == Intrinsic::canonicalize)
2037 return PoisonValue::get(Ty);
2038 }
2039
2040 if (isa<UndefValue>(Operands[0])) {
2041 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
2042 // ctpop() is between 0 and bitwidth, pick 0 for undef.
2043 // fptoui.sat and fptosi.sat can always fold to zero (for a zero input).
2044 if (IntrinsicID == Intrinsic::cos ||
2045 IntrinsicID == Intrinsic::ctpop ||
2046 IntrinsicID == Intrinsic::fptoui_sat ||
2047 IntrinsicID == Intrinsic::fptosi_sat ||
2048 IntrinsicID == Intrinsic::canonicalize)
2049 return Constant::getNullValue(Ty);
2050 if (IntrinsicID == Intrinsic::bswap ||
2051 IntrinsicID == Intrinsic::bitreverse ||
2052 IntrinsicID == Intrinsic::launder_invariant_group ||
2053 IntrinsicID == Intrinsic::strip_invariant_group)
2054 return Operands[0];
2055 }
2056
2057 if (isa<ConstantPointerNull>(Operands[0])) {
2058 // launder(null) == null == strip(null) iff in addrspace 0
2059 if (IntrinsicID == Intrinsic::launder_invariant_group ||
2060 IntrinsicID == Intrinsic::strip_invariant_group) {
2061 // If instruction is not yet put in a basic block (e.g. when cloning
2062 // a function during inlining), Call's caller may not be available.
2063 // So check Call's BB first before querying Call->getCaller.
2064 const Function *Caller =
2065 Call->getParent() ? Call->getCaller() : nullptr;
2066 if (Caller &&
2067 !NullPointerIsDefined(
2068 Caller, Operands[0]->getType()->getPointerAddressSpace())) {
2069 return Operands[0];
2070 }
2071 return nullptr;
2072 }
2073 }
2074
2075 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
2076 if (IntrinsicID == Intrinsic::convert_to_fp16) {
2077 APFloat Val(Op->getValueAPF());
2078
2079 bool lost = false;
2080 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
2081
2082 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
2083 }
2084
2085 APFloat U = Op->getValueAPF();
2086
2087 if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
2088 IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
2089 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
2090
2091 if (U.isNaN())
2092 return nullptr;
2093
2094 unsigned Width = Ty->getIntegerBitWidth();
2095 APSInt Int(Width, !Signed);
2096 bool IsExact = false;
2097 APFloat::opStatus Status =
2098 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
2099
2100 if (Status == APFloat::opOK || Status == APFloat::opInexact)
2101 return ConstantInt::get(Ty, Int);
2102
2103 return nullptr;
2104 }
2105
2106 if (IntrinsicID == Intrinsic::fptoui_sat ||
2107 IntrinsicID == Intrinsic::fptosi_sat) {
2108 // convertToInteger() already has the desired saturation semantics.
2109 APSInt Int(Ty->getIntegerBitWidth(),
2110 IntrinsicID == Intrinsic::fptoui_sat);
2111 bool IsExact;
2112 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
2113 return ConstantInt::get(Ty, Int);
2114 }
2115
2116 if (IntrinsicID == Intrinsic::canonicalize)
2117 return constantFoldCanonicalize(Ty, Call, U);
2118
2119 #if defined(HAS_IEE754_FLOAT128) && defined(HAS_LOGF128)
2120 if (Ty->isFP128Ty()) {
2121 if (IntrinsicID == Intrinsic::log) {
2122 float128 Result = logf128(Op->getValueAPF().convertToQuad());
2123 return GetConstantFoldFPValue128(Result, Ty);
2124 }
2125
2126 LibFunc Fp128Func = NotLibFunc;
2127 if (TLI->getLibFunc(Name, Fp128Func) && TLI->has(Fp128Func) &&
2128 Fp128Func == LibFunc_logl)
2129 return ConstantFoldFP128(logf128, Op->getValueAPF(), Ty);
2130 }
2131 #endif
2132
2133 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2134 return nullptr;
2135
2136 // Use internal versions of these intrinsics.
2137
2138 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
2139 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2140 return ConstantFP::get(Ty->getContext(), U);
2141 }
2142
2143 if (IntrinsicID == Intrinsic::round) {
2144 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2145 return ConstantFP::get(Ty->getContext(), U);
2146 }
2147
2148 if (IntrinsicID == Intrinsic::roundeven) {
2149 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2150 return ConstantFP::get(Ty->getContext(), U);
2151 }
2152
2153 if (IntrinsicID == Intrinsic::ceil) {
2154 U.roundToIntegral(APFloat::rmTowardPositive);
2155 return ConstantFP::get(Ty->getContext(), U);
2156 }
2157
2158 if (IntrinsicID == Intrinsic::floor) {
2159 U.roundToIntegral(APFloat::rmTowardNegative);
2160 return ConstantFP::get(Ty->getContext(), U);
2161 }
2162
2163 if (IntrinsicID == Intrinsic::trunc) {
2164 U.roundToIntegral(APFloat::rmTowardZero);
2165 return ConstantFP::get(Ty->getContext(), U);
2166 }
2167
2168 if (IntrinsicID == Intrinsic::fabs) {
2169 U.clearSign();
2170 return ConstantFP::get(Ty->getContext(), U);
2171 }
2172
2173 if (IntrinsicID == Intrinsic::amdgcn_fract) {
2174 // The v_fract instruction behaves like the OpenCL spec, which defines
2175 // fract(x) as fmin(x - floor(x), 0x1.fffffep-1f): "The min() operator is
2176 // there to prevent fract(-small) from returning 1.0. It returns the
2177 // largest positive floating-point number less than 1.0."
2178 APFloat FloorU(U);
2179 FloorU.roundToIntegral(APFloat::rmTowardNegative);
2180 APFloat FractU(U - FloorU);
2181 APFloat AlmostOne(U.getSemantics(), 1);
2182 AlmostOne.next(/*nextDown*/ true);
2183 return ConstantFP::get(Ty->getContext(), minimum(FractU, AlmostOne));
2184 }
2185
2186 // Rounding operations (floor, trunc, ceil, round and nearbyint) do not
2187 // raise FP exceptions, unless the argument is signaling NaN.
2188
2189 std::optional<APFloat::roundingMode> RM;
2190 switch (IntrinsicID) {
2191 default:
2192 break;
2193 case Intrinsic::experimental_constrained_nearbyint:
2194 case Intrinsic::experimental_constrained_rint: {
2195 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2196 RM = CI->getRoundingMode();
2197 if (!RM || *RM == RoundingMode::Dynamic)
2198 return nullptr;
2199 break;
2200 }
2201 case Intrinsic::experimental_constrained_round:
2202 RM = APFloat::rmNearestTiesToAway;
2203 break;
2204 case Intrinsic::experimental_constrained_ceil:
2205 RM = APFloat::rmTowardPositive;
2206 break;
2207 case Intrinsic::experimental_constrained_floor:
2208 RM = APFloat::rmTowardNegative;
2209 break;
2210 case Intrinsic::experimental_constrained_trunc:
2211 RM = APFloat::rmTowardZero;
2212 break;
2213 }
2214 if (RM) {
2215 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2216 if (U.isFinite()) {
2217 APFloat::opStatus St = U.roundToIntegral(*RM);
2218 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
2219 St == APFloat::opInexact) {
2220 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2221 if (EB && *EB == fp::ebStrict)
2222 return nullptr;
2223 }
2224 } else if (U.isSignaling()) {
2225 std::optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2226 if (EB && *EB != fp::ebIgnore)
2227 return nullptr;
2228 U = APFloat::getQNaN(U.getSemantics());
2229 }
2230 return ConstantFP::get(Ty->getContext(), U);
2231 }
2232
2233 /// We only fold functions with finite arguments. Folding NaN and inf is
2234 /// likely to be aborted with an exception anyway, and some host libms
2235 /// have known errors raising exceptions.
2236 if (!U.isFinite())
2237 return nullptr;
2238
2239 /// Currently APFloat versions of these functions do not exist, so we use
2240 /// the host native double versions. Float versions are not called
2241 /// directly but for all these it is true (float)(f((double)arg)) ==
2242 /// f(arg). Long double not supported yet.
2243 const APFloat &APF = Op->getValueAPF();
2244
2245 switch (IntrinsicID) {
2246 default: break;
2247 case Intrinsic::log:
2248 return ConstantFoldFP(log, APF, Ty);
2249 case Intrinsic::log2:
2250 // TODO: What about hosts that lack a C99 library?
2251 return ConstantFoldFP(log2, APF, Ty);
2252 case Intrinsic::log10:
2253 // TODO: What about hosts that lack a C99 library?
2254 return ConstantFoldFP(log10, APF, Ty);
2255 case Intrinsic::exp:
2256 return ConstantFoldFP(exp, APF, Ty);
2257 case Intrinsic::exp2:
2258 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2259 return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
2260 case Intrinsic::exp10:
2261 // Fold exp10(x) as pow(10, x), in case the host lacks a C99 library.
2262 return ConstantFoldBinaryFP(pow, APFloat(10.0), APF, Ty);
2263 case Intrinsic::sin:
2264 return ConstantFoldFP(sin, APF, Ty);
2265 case Intrinsic::cos:
2266 return ConstantFoldFP(cos, APF, Ty);
2267 case Intrinsic::sqrt:
2268 return ConstantFoldFP(sqrt, APF, Ty);
2269 case Intrinsic::amdgcn_cos:
2270 case Intrinsic::amdgcn_sin: {
2271 double V = getValueAsDouble(Op);
2272 if (V < -256.0 || V > 256.0)
2273 // The gfx8 and gfx9 architectures handle arguments outside the range
2274 // [-256, 256] differently. This should be a rare case so bail out
2275 // rather than trying to handle the difference.
2276 return nullptr;
2277 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2278 double V4 = V * 4.0;
2279 if (V4 == floor(V4)) {
2280 // Force exact results for quarter-integer inputs.
2281 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2282 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2283 } else {
2284 if (IsCos)
2285 V = cos(V * 2.0 * numbers::pi);
2286 else
2287 V = sin(V * 2.0 * numbers::pi);
2288 }
2289 return GetConstantFoldFPValue(V, Ty);
2290 }
2291 }
2292
2293 if (!TLI)
2294 return nullptr;
2295
2296 LibFunc Func = NotLibFunc;
2297 if (!TLI->getLibFunc(Name, Func))
2298 return nullptr;
2299
2300 switch (Func) {
2301 default:
2302 break;
2303 case LibFunc_acos:
2304 case LibFunc_acosf:
2305 case LibFunc_acos_finite:
2306 case LibFunc_acosf_finite:
2307 if (TLI->has(Func))
2308 return ConstantFoldFP(acos, APF, Ty);
2309 break;
2310 case LibFunc_asin:
2311 case LibFunc_asinf:
2312 case LibFunc_asin_finite:
2313 case LibFunc_asinf_finite:
2314 if (TLI->has(Func))
2315 return ConstantFoldFP(asin, APF, Ty);
2316 break;
2317 case LibFunc_atan:
2318 case LibFunc_atanf:
2319 if (TLI->has(Func))
2320 return ConstantFoldFP(atan, APF, Ty);
2321 break;
2322 case LibFunc_ceil:
2323 case LibFunc_ceilf:
2324 if (TLI->has(Func)) {
2325 U.roundToIntegral(APFloat::rmTowardPositive);
2326 return ConstantFP::get(Ty->getContext(), U);
2327 }
2328 break;
2329 case LibFunc_cos:
2330 case LibFunc_cosf:
2331 if (TLI->has(Func))
2332 return ConstantFoldFP(cos, APF, Ty);
2333 break;
2334 case LibFunc_cosh:
2335 case LibFunc_coshf:
2336 case LibFunc_cosh_finite:
2337 case LibFunc_coshf_finite:
2338 if (TLI->has(Func))
2339 return ConstantFoldFP(cosh, APF, Ty);
2340 break;
2341 case LibFunc_exp:
2342 case LibFunc_expf:
2343 case LibFunc_exp_finite:
2344 case LibFunc_expf_finite:
2345 if (TLI->has(Func))
2346 return ConstantFoldFP(exp, APF, Ty);
2347 break;
2348 case LibFunc_exp2:
2349 case LibFunc_exp2f:
2350 case LibFunc_exp2_finite:
2351 case LibFunc_exp2f_finite:
2352 if (TLI->has(Func))
2353 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2354 return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
2355 break;
2356 case LibFunc_fabs:
2357 case LibFunc_fabsf:
2358 if (TLI->has(Func)) {
2359 U.clearSign();
2360 return ConstantFP::get(Ty->getContext(), U);
2361 }
2362 break;
2363 case LibFunc_floor:
2364 case LibFunc_floorf:
2365 if (TLI->has(Func)) {
2366 U.roundToIntegral(APFloat::rmTowardNegative);
2367 return ConstantFP::get(Ty->getContext(), U);
2368 }
2369 break;
2370 case LibFunc_log:
2371 case LibFunc_logf:
2372 case LibFunc_log_finite:
2373 case LibFunc_logf_finite:
2374 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2375 return ConstantFoldFP(log, APF, Ty);
2376 break;
2377 case LibFunc_log2:
2378 case LibFunc_log2f:
2379 case LibFunc_log2_finite:
2380 case LibFunc_log2f_finite:
2381 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2382 // TODO: What about hosts that lack a C99 library?
2383 return ConstantFoldFP(log2, APF, Ty);
2384 break;
2385 case LibFunc_log10:
2386 case LibFunc_log10f:
2387 case LibFunc_log10_finite:
2388 case LibFunc_log10f_finite:
2389 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2390 // TODO: What about hosts that lack a C99 library?
2391 return ConstantFoldFP(log10, APF, Ty);
2392 break;
2393 case LibFunc_logl:
2394 return nullptr;
2395 case LibFunc_nearbyint:
2396 case LibFunc_nearbyintf:
2397 case LibFunc_rint:
2398 case LibFunc_rintf:
2399 if (TLI->has(Func)) {
2400 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2401 return ConstantFP::get(Ty->getContext(), U);
2402 }
2403 break;
2404 case LibFunc_round:
2405 case LibFunc_roundf:
2406 if (TLI->has(Func)) {
2407 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2408 return ConstantFP::get(Ty->getContext(), U);
2409 }
2410 break;
2411 case LibFunc_sin:
2412 case LibFunc_sinf:
2413 if (TLI->has(Func))
2414 return ConstantFoldFP(sin, APF, Ty);
2415 break;
2416 case LibFunc_sinh:
2417 case LibFunc_sinhf:
2418 case LibFunc_sinh_finite:
2419 case LibFunc_sinhf_finite:
2420 if (TLI->has(Func))
2421 return ConstantFoldFP(sinh, APF, Ty);
2422 break;
2423 case LibFunc_sqrt:
2424 case LibFunc_sqrtf:
2425 if (!APF.isNegative() && TLI->has(Func))
2426 return ConstantFoldFP(sqrt, APF, Ty);
2427 break;
2428 case LibFunc_tan:
2429 case LibFunc_tanf:
2430 if (TLI->has(Func))
2431 return ConstantFoldFP(tan, APF, Ty);
2432 break;
2433 case LibFunc_tanh:
2434 case LibFunc_tanhf:
2435 if (TLI->has(Func))
2436 return ConstantFoldFP(tanh, APF, Ty);
2437 break;
2438 case LibFunc_trunc:
2439 case LibFunc_truncf:
2440 if (TLI->has(Func)) {
2441 U.roundToIntegral(APFloat::rmTowardZero);
2442 return ConstantFP::get(Ty->getContext(), U);
2443 }
2444 break;
2445 }
2446 return nullptr;
2447 }
2448
2449 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
2450 switch (IntrinsicID) {
2451 case Intrinsic::bswap:
2452 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
2453 case Intrinsic::ctpop:
2454 return ConstantInt::get(Ty, Op->getValue().popcount());
2455 case Intrinsic::bitreverse:
2456 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
2457 case Intrinsic::convert_from_fp16: {
2458 APFloat Val(APFloat::IEEEhalf(), Op->getValue());
2459
2460 bool lost = false;
2461 APFloat::opStatus status = Val.convert(
2462 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
2463
2464 // Conversion is always precise.
2465 (void)status;
2466 assert(status != APFloat::opInexact && !lost &&
2467 "Precision lost during fp16 constfolding");
2468
2469 return ConstantFP::get(Ty->getContext(), Val);
2470 }
2471
2472 case Intrinsic::amdgcn_s_wqm: {
2473 uint64_t Val = Op->getZExtValue();
2474 Val |= (Val & 0x5555555555555555ULL) << 1 |
2475 ((Val >> 1) & 0x5555555555555555ULL);
2476 Val |= (Val & 0x3333333333333333ULL) << 2 |
2477 ((Val >> 2) & 0x3333333333333333ULL);
2478 return ConstantInt::get(Ty, Val);
2479 }
2480
2481 case Intrinsic::amdgcn_s_quadmask: {
2482 uint64_t Val = Op->getZExtValue();
2483 uint64_t QuadMask = 0;
2484 for (unsigned I = 0; I < Op->getBitWidth() / 4; ++I, Val >>= 4) {
2485 if (!(Val & 0xF))
2486 continue;
2487
2488 QuadMask |= (1ULL << I);
2489 }
2490 return ConstantInt::get(Ty, QuadMask);
2491 }
2492
2493 case Intrinsic::amdgcn_s_bitreplicate: {
2494 uint64_t Val = Op->getZExtValue();
2495 Val = (Val & 0x000000000000FFFFULL) | (Val & 0x00000000FFFF0000ULL) << 16;
2496 Val = (Val & 0x000000FF000000FFULL) | (Val & 0x0000FF000000FF00ULL) << 8;
2497 Val = (Val & 0x000F000F000F000FULL) | (Val & 0x00F000F000F000F0ULL) << 4;
2498 Val = (Val & 0x0303030303030303ULL) | (Val & 0x0C0C0C0C0C0C0C0CULL) << 2;
2499 Val = (Val & 0x1111111111111111ULL) | (Val & 0x2222222222222222ULL) << 1;
2500 Val = Val | Val << 1;
2501 return ConstantInt::get(Ty, Val);
2502 }
2503
2504 default:
2505 return nullptr;
2506 }
2507 }
2508
2509 switch (IntrinsicID) {
2510 default: break;
2511 case Intrinsic::vector_reduce_add:
2512 case Intrinsic::vector_reduce_mul:
2513 case Intrinsic::vector_reduce_and:
2514 case Intrinsic::vector_reduce_or:
2515 case Intrinsic::vector_reduce_xor:
2516 case Intrinsic::vector_reduce_smin:
2517 case Intrinsic::vector_reduce_smax:
2518 case Intrinsic::vector_reduce_umin:
2519 case Intrinsic::vector_reduce_umax:
2520 if (Constant *C = constantFoldVectorReduce(IntrinsicID, Operands[0]))
2521 return C;
2522 break;
2523 }
2524
2525 // Support ConstantVector in case we have an Undef in the top.
2526 if (isa<ConstantVector>(Operands[0]) ||
2527 isa<ConstantDataVector>(Operands[0])) {
2528 auto *Op = cast<Constant>(Operands[0]);
2529 switch (IntrinsicID) {
2530 default: break;
2531 case Intrinsic::x86_sse_cvtss2si:
2532 case Intrinsic::x86_sse_cvtss2si64:
2533 case Intrinsic::x86_sse2_cvtsd2si:
2534 case Intrinsic::x86_sse2_cvtsd2si64:
2535 if (ConstantFP *FPOp =
2536 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2537 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2538 /*roundTowardZero=*/false, Ty,
2539 /*IsSigned*/true);
2540 break;
2541 case Intrinsic::x86_sse_cvttss2si:
2542 case Intrinsic::x86_sse_cvttss2si64:
2543 case Intrinsic::x86_sse2_cvttsd2si:
2544 case Intrinsic::x86_sse2_cvttsd2si64:
2545 if (ConstantFP *FPOp =
2546 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2547 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2548 /*roundTowardZero=*/true, Ty,
2549 /*IsSigned*/true);
2550 break;
2551 }
2552 }
2553
2554 return nullptr;
2555 }
2556
evaluateCompare(const APFloat & Op1,const APFloat & Op2,const ConstrainedFPIntrinsic * Call)2557 static Constant *evaluateCompare(const APFloat &Op1, const APFloat &Op2,
2558 const ConstrainedFPIntrinsic *Call) {
2559 APFloat::opStatus St = APFloat::opOK;
2560 auto *FCmp = cast<ConstrainedFPCmpIntrinsic>(Call);
2561 FCmpInst::Predicate Cond = FCmp->getPredicate();
2562 if (FCmp->isSignaling()) {
2563 if (Op1.isNaN() || Op2.isNaN())
2564 St = APFloat::opInvalidOp;
2565 } else {
2566 if (Op1.isSignaling() || Op2.isSignaling())
2567 St = APFloat::opInvalidOp;
2568 }
2569 bool Result = FCmpInst::compare(Op1, Op2, Cond);
2570 if (mayFoldConstrained(const_cast<ConstrainedFPCmpIntrinsic *>(FCmp), St))
2571 return ConstantInt::get(Call->getType()->getScalarType(), Result);
2572 return nullptr;
2573 }
2574
ConstantFoldLibCall2(StringRef Name,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI)2575 static Constant *ConstantFoldLibCall2(StringRef Name, Type *Ty,
2576 ArrayRef<Constant *> Operands,
2577 const TargetLibraryInfo *TLI) {
2578 if (!TLI)
2579 return nullptr;
2580
2581 LibFunc Func = NotLibFunc;
2582 if (!TLI->getLibFunc(Name, Func))
2583 return nullptr;
2584
2585 const auto *Op1 = dyn_cast<ConstantFP>(Operands[0]);
2586 if (!Op1)
2587 return nullptr;
2588
2589 const auto *Op2 = dyn_cast<ConstantFP>(Operands[1]);
2590 if (!Op2)
2591 return nullptr;
2592
2593 const APFloat &Op1V = Op1->getValueAPF();
2594 const APFloat &Op2V = Op2->getValueAPF();
2595
2596 switch (Func) {
2597 default:
2598 break;
2599 case LibFunc_pow:
2600 case LibFunc_powf:
2601 case LibFunc_pow_finite:
2602 case LibFunc_powf_finite:
2603 if (TLI->has(Func))
2604 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2605 break;
2606 case LibFunc_fmod:
2607 case LibFunc_fmodf:
2608 if (TLI->has(Func)) {
2609 APFloat V = Op1->getValueAPF();
2610 if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF()))
2611 return ConstantFP::get(Ty->getContext(), V);
2612 }
2613 break;
2614 case LibFunc_remainder:
2615 case LibFunc_remainderf:
2616 if (TLI->has(Func)) {
2617 APFloat V = Op1->getValueAPF();
2618 if (APFloat::opStatus::opOK == V.remainder(Op2->getValueAPF()))
2619 return ConstantFP::get(Ty->getContext(), V);
2620 }
2621 break;
2622 case LibFunc_atan2:
2623 case LibFunc_atan2f:
2624 // atan2(+/-0.0, +/-0.0) is known to raise an exception on some libm
2625 // (Solaris), so we do not assume a known result for that.
2626 if (Op1V.isZero() && Op2V.isZero())
2627 return nullptr;
2628 [[fallthrough]];
2629 case LibFunc_atan2_finite:
2630 case LibFunc_atan2f_finite:
2631 if (TLI->has(Func))
2632 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2633 break;
2634 }
2635
2636 return nullptr;
2637 }
2638
ConstantFoldIntrinsicCall2(Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const CallBase * Call)2639 static Constant *ConstantFoldIntrinsicCall2(Intrinsic::ID IntrinsicID, Type *Ty,
2640 ArrayRef<Constant *> Operands,
2641 const CallBase *Call) {
2642 assert(Operands.size() == 2 && "Wrong number of operands.");
2643
2644 if (Ty->isFloatingPointTy()) {
2645 // TODO: We should have undef handling for all of the FP intrinsics that
2646 // are attempted to be folded in this function.
2647 bool IsOp0Undef = isa<UndefValue>(Operands[0]);
2648 bool IsOp1Undef = isa<UndefValue>(Operands[1]);
2649 switch (IntrinsicID) {
2650 case Intrinsic::maxnum:
2651 case Intrinsic::minnum:
2652 case Intrinsic::maximum:
2653 case Intrinsic::minimum:
2654 // If one argument is undef, return the other argument.
2655 if (IsOp0Undef)
2656 return Operands[1];
2657 if (IsOp1Undef)
2658 return Operands[0];
2659 break;
2660 }
2661 }
2662
2663 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2664 const APFloat &Op1V = Op1->getValueAPF();
2665
2666 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2667 if (Op2->getType() != Op1->getType())
2668 return nullptr;
2669 const APFloat &Op2V = Op2->getValueAPF();
2670
2671 if (const auto *ConstrIntr =
2672 dyn_cast_if_present<ConstrainedFPIntrinsic>(Call)) {
2673 RoundingMode RM = getEvaluationRoundingMode(ConstrIntr);
2674 APFloat Res = Op1V;
2675 APFloat::opStatus St;
2676 switch (IntrinsicID) {
2677 default:
2678 return nullptr;
2679 case Intrinsic::experimental_constrained_fadd:
2680 St = Res.add(Op2V, RM);
2681 break;
2682 case Intrinsic::experimental_constrained_fsub:
2683 St = Res.subtract(Op2V, RM);
2684 break;
2685 case Intrinsic::experimental_constrained_fmul:
2686 St = Res.multiply(Op2V, RM);
2687 break;
2688 case Intrinsic::experimental_constrained_fdiv:
2689 St = Res.divide(Op2V, RM);
2690 break;
2691 case Intrinsic::experimental_constrained_frem:
2692 St = Res.mod(Op2V);
2693 break;
2694 case Intrinsic::experimental_constrained_fcmp:
2695 case Intrinsic::experimental_constrained_fcmps:
2696 return evaluateCompare(Op1V, Op2V, ConstrIntr);
2697 }
2698 if (mayFoldConstrained(const_cast<ConstrainedFPIntrinsic *>(ConstrIntr),
2699 St))
2700 return ConstantFP::get(Ty->getContext(), Res);
2701 return nullptr;
2702 }
2703
2704 switch (IntrinsicID) {
2705 default:
2706 break;
2707 case Intrinsic::copysign:
2708 return ConstantFP::get(Ty->getContext(), APFloat::copySign(Op1V, Op2V));
2709 case Intrinsic::minnum:
2710 return ConstantFP::get(Ty->getContext(), minnum(Op1V, Op2V));
2711 case Intrinsic::maxnum:
2712 return ConstantFP::get(Ty->getContext(), maxnum(Op1V, Op2V));
2713 case Intrinsic::minimum:
2714 return ConstantFP::get(Ty->getContext(), minimum(Op1V, Op2V));
2715 case Intrinsic::maximum:
2716 return ConstantFP::get(Ty->getContext(), maximum(Op1V, Op2V));
2717 }
2718
2719 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2720 return nullptr;
2721
2722 switch (IntrinsicID) {
2723 default:
2724 break;
2725 case Intrinsic::pow:
2726 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2727 case Intrinsic::amdgcn_fmul_legacy:
2728 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
2729 // NaN or infinity, gives +0.0.
2730 if (Op1V.isZero() || Op2V.isZero())
2731 return ConstantFP::getZero(Ty);
2732 return ConstantFP::get(Ty->getContext(), Op1V * Op2V);
2733 }
2734
2735 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
2736 switch (IntrinsicID) {
2737 case Intrinsic::ldexp: {
2738 return ConstantFP::get(
2739 Ty->getContext(),
2740 scalbn(Op1V, Op2C->getSExtValue(), APFloat::rmNearestTiesToEven));
2741 }
2742 case Intrinsic::is_fpclass: {
2743 FPClassTest Mask = static_cast<FPClassTest>(Op2C->getZExtValue());
2744 bool Result =
2745 ((Mask & fcSNan) && Op1V.isNaN() && Op1V.isSignaling()) ||
2746 ((Mask & fcQNan) && Op1V.isNaN() && !Op1V.isSignaling()) ||
2747 ((Mask & fcNegInf) && Op1V.isNegInfinity()) ||
2748 ((Mask & fcNegNormal) && Op1V.isNormal() && Op1V.isNegative()) ||
2749 ((Mask & fcNegSubnormal) && Op1V.isDenormal() && Op1V.isNegative()) ||
2750 ((Mask & fcNegZero) && Op1V.isZero() && Op1V.isNegative()) ||
2751 ((Mask & fcPosZero) && Op1V.isZero() && !Op1V.isNegative()) ||
2752 ((Mask & fcPosSubnormal) && Op1V.isDenormal() && !Op1V.isNegative()) ||
2753 ((Mask & fcPosNormal) && Op1V.isNormal() && !Op1V.isNegative()) ||
2754 ((Mask & fcPosInf) && Op1V.isPosInfinity());
2755 return ConstantInt::get(Ty, Result);
2756 }
2757 case Intrinsic::powi: {
2758 int Exp = static_cast<int>(Op2C->getSExtValue());
2759 switch (Ty->getTypeID()) {
2760 case Type::HalfTyID:
2761 case Type::FloatTyID: {
2762 APFloat Res(static_cast<float>(std::pow(Op1V.convertToFloat(), Exp)));
2763 if (Ty->isHalfTy()) {
2764 bool Unused;
2765 Res.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven,
2766 &Unused);
2767 }
2768 return ConstantFP::get(Ty->getContext(), Res);
2769 }
2770 case Type::DoubleTyID:
2771 return ConstantFP::get(Ty, std::pow(Op1V.convertToDouble(), Exp));
2772 default:
2773 return nullptr;
2774 }
2775 }
2776 default:
2777 break;
2778 }
2779 }
2780 return nullptr;
2781 }
2782
2783 if (Operands[0]->getType()->isIntegerTy() &&
2784 Operands[1]->getType()->isIntegerTy()) {
2785 const APInt *C0, *C1;
2786 if (!getConstIntOrUndef(Operands[0], C0) ||
2787 !getConstIntOrUndef(Operands[1], C1))
2788 return nullptr;
2789
2790 switch (IntrinsicID) {
2791 default: break;
2792 case Intrinsic::smax:
2793 case Intrinsic::smin:
2794 case Intrinsic::umax:
2795 case Intrinsic::umin:
2796 // This is the same as for binary ops - poison propagates.
2797 // TODO: Poison handling should be consolidated.
2798 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2799 return PoisonValue::get(Ty);
2800
2801 if (!C0 && !C1)
2802 return UndefValue::get(Ty);
2803 if (!C0 || !C1)
2804 return MinMaxIntrinsic::getSaturationPoint(IntrinsicID, Ty);
2805 return ConstantInt::get(
2806 Ty, ICmpInst::compare(*C0, *C1,
2807 MinMaxIntrinsic::getPredicate(IntrinsicID))
2808 ? *C0
2809 : *C1);
2810
2811 case Intrinsic::scmp:
2812 case Intrinsic::ucmp:
2813 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2814 return PoisonValue::get(Ty);
2815
2816 if (!C0 || !C1)
2817 return ConstantInt::get(Ty, 0);
2818
2819 int Res;
2820 if (IntrinsicID == Intrinsic::scmp)
2821 Res = C0->sgt(*C1) ? 1 : C0->slt(*C1) ? -1 : 0;
2822 else
2823 Res = C0->ugt(*C1) ? 1 : C0->ult(*C1) ? -1 : 0;
2824 return ConstantInt::get(Ty, Res, /*IsSigned=*/true);
2825
2826 case Intrinsic::usub_with_overflow:
2827 case Intrinsic::ssub_with_overflow:
2828 // X - undef -> { 0, false }
2829 // undef - X -> { 0, false }
2830 if (!C0 || !C1)
2831 return Constant::getNullValue(Ty);
2832 [[fallthrough]];
2833 case Intrinsic::uadd_with_overflow:
2834 case Intrinsic::sadd_with_overflow:
2835 // X + undef -> { -1, false }
2836 // undef + x -> { -1, false }
2837 if (!C0 || !C1) {
2838 return ConstantStruct::get(
2839 cast<StructType>(Ty),
2840 {Constant::getAllOnesValue(Ty->getStructElementType(0)),
2841 Constant::getNullValue(Ty->getStructElementType(1))});
2842 }
2843 [[fallthrough]];
2844 case Intrinsic::smul_with_overflow:
2845 case Intrinsic::umul_with_overflow: {
2846 // undef * X -> { 0, false }
2847 // X * undef -> { 0, false }
2848 if (!C0 || !C1)
2849 return Constant::getNullValue(Ty);
2850
2851 APInt Res;
2852 bool Overflow;
2853 switch (IntrinsicID) {
2854 default: llvm_unreachable("Invalid case");
2855 case Intrinsic::sadd_with_overflow:
2856 Res = C0->sadd_ov(*C1, Overflow);
2857 break;
2858 case Intrinsic::uadd_with_overflow:
2859 Res = C0->uadd_ov(*C1, Overflow);
2860 break;
2861 case Intrinsic::ssub_with_overflow:
2862 Res = C0->ssub_ov(*C1, Overflow);
2863 break;
2864 case Intrinsic::usub_with_overflow:
2865 Res = C0->usub_ov(*C1, Overflow);
2866 break;
2867 case Intrinsic::smul_with_overflow:
2868 Res = C0->smul_ov(*C1, Overflow);
2869 break;
2870 case Intrinsic::umul_with_overflow:
2871 Res = C0->umul_ov(*C1, Overflow);
2872 break;
2873 }
2874 Constant *Ops[] = {
2875 ConstantInt::get(Ty->getContext(), Res),
2876 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
2877 };
2878 return ConstantStruct::get(cast<StructType>(Ty), Ops);
2879 }
2880 case Intrinsic::uadd_sat:
2881 case Intrinsic::sadd_sat:
2882 // This is the same as for binary ops - poison propagates.
2883 // TODO: Poison handling should be consolidated.
2884 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2885 return PoisonValue::get(Ty);
2886
2887 if (!C0 && !C1)
2888 return UndefValue::get(Ty);
2889 if (!C0 || !C1)
2890 return Constant::getAllOnesValue(Ty);
2891 if (IntrinsicID == Intrinsic::uadd_sat)
2892 return ConstantInt::get(Ty, C0->uadd_sat(*C1));
2893 else
2894 return ConstantInt::get(Ty, C0->sadd_sat(*C1));
2895 case Intrinsic::usub_sat:
2896 case Intrinsic::ssub_sat:
2897 // This is the same as for binary ops - poison propagates.
2898 // TODO: Poison handling should be consolidated.
2899 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2900 return PoisonValue::get(Ty);
2901
2902 if (!C0 && !C1)
2903 return UndefValue::get(Ty);
2904 if (!C0 || !C1)
2905 return Constant::getNullValue(Ty);
2906 if (IntrinsicID == Intrinsic::usub_sat)
2907 return ConstantInt::get(Ty, C0->usub_sat(*C1));
2908 else
2909 return ConstantInt::get(Ty, C0->ssub_sat(*C1));
2910 case Intrinsic::cttz:
2911 case Intrinsic::ctlz:
2912 assert(C1 && "Must be constant int");
2913
2914 // cttz(0, 1) and ctlz(0, 1) are poison.
2915 if (C1->isOne() && (!C0 || C0->isZero()))
2916 return PoisonValue::get(Ty);
2917 if (!C0)
2918 return Constant::getNullValue(Ty);
2919 if (IntrinsicID == Intrinsic::cttz)
2920 return ConstantInt::get(Ty, C0->countr_zero());
2921 else
2922 return ConstantInt::get(Ty, C0->countl_zero());
2923
2924 case Intrinsic::abs:
2925 assert(C1 && "Must be constant int");
2926 assert((C1->isOne() || C1->isZero()) && "Must be 0 or 1");
2927
2928 // Undef or minimum val operand with poison min --> undef
2929 if (C1->isOne() && (!C0 || C0->isMinSignedValue()))
2930 return UndefValue::get(Ty);
2931
2932 // Undef operand with no poison min --> 0 (sign bit must be clear)
2933 if (!C0)
2934 return Constant::getNullValue(Ty);
2935
2936 return ConstantInt::get(Ty, C0->abs());
2937 case Intrinsic::amdgcn_wave_reduce_umin:
2938 case Intrinsic::amdgcn_wave_reduce_umax:
2939 return dyn_cast<Constant>(Operands[0]);
2940 }
2941
2942 return nullptr;
2943 }
2944
2945 // Support ConstantVector in case we have an Undef in the top.
2946 if ((isa<ConstantVector>(Operands[0]) ||
2947 isa<ConstantDataVector>(Operands[0])) &&
2948 // Check for default rounding mode.
2949 // FIXME: Support other rounding modes?
2950 isa<ConstantInt>(Operands[1]) &&
2951 cast<ConstantInt>(Operands[1])->getValue() == 4) {
2952 auto *Op = cast<Constant>(Operands[0]);
2953 switch (IntrinsicID) {
2954 default: break;
2955 case Intrinsic::x86_avx512_vcvtss2si32:
2956 case Intrinsic::x86_avx512_vcvtss2si64:
2957 case Intrinsic::x86_avx512_vcvtsd2si32:
2958 case Intrinsic::x86_avx512_vcvtsd2si64:
2959 if (ConstantFP *FPOp =
2960 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2961 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2962 /*roundTowardZero=*/false, Ty,
2963 /*IsSigned*/true);
2964 break;
2965 case Intrinsic::x86_avx512_vcvtss2usi32:
2966 case Intrinsic::x86_avx512_vcvtss2usi64:
2967 case Intrinsic::x86_avx512_vcvtsd2usi32:
2968 case Intrinsic::x86_avx512_vcvtsd2usi64:
2969 if (ConstantFP *FPOp =
2970 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2971 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2972 /*roundTowardZero=*/false, Ty,
2973 /*IsSigned*/false);
2974 break;
2975 case Intrinsic::x86_avx512_cvttss2si:
2976 case Intrinsic::x86_avx512_cvttss2si64:
2977 case Intrinsic::x86_avx512_cvttsd2si:
2978 case Intrinsic::x86_avx512_cvttsd2si64:
2979 if (ConstantFP *FPOp =
2980 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2981 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2982 /*roundTowardZero=*/true, Ty,
2983 /*IsSigned*/true);
2984 break;
2985 case Intrinsic::x86_avx512_cvttss2usi:
2986 case Intrinsic::x86_avx512_cvttss2usi64:
2987 case Intrinsic::x86_avx512_cvttsd2usi:
2988 case Intrinsic::x86_avx512_cvttsd2usi64:
2989 if (ConstantFP *FPOp =
2990 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2991 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2992 /*roundTowardZero=*/true, Ty,
2993 /*IsSigned*/false);
2994 break;
2995 }
2996 }
2997 return nullptr;
2998 }
2999
ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID,const APFloat & S0,const APFloat & S1,const APFloat & S2)3000 static APFloat ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID,
3001 const APFloat &S0,
3002 const APFloat &S1,
3003 const APFloat &S2) {
3004 unsigned ID;
3005 const fltSemantics &Sem = S0.getSemantics();
3006 APFloat MA(Sem), SC(Sem), TC(Sem);
3007 if (abs(S2) >= abs(S0) && abs(S2) >= abs(S1)) {
3008 if (S2.isNegative() && S2.isNonZero() && !S2.isNaN()) {
3009 // S2 < 0
3010 ID = 5;
3011 SC = -S0;
3012 } else {
3013 ID = 4;
3014 SC = S0;
3015 }
3016 MA = S2;
3017 TC = -S1;
3018 } else if (abs(S1) >= abs(S0)) {
3019 if (S1.isNegative() && S1.isNonZero() && !S1.isNaN()) {
3020 // S1 < 0
3021 ID = 3;
3022 TC = -S2;
3023 } else {
3024 ID = 2;
3025 TC = S2;
3026 }
3027 MA = S1;
3028 SC = S0;
3029 } else {
3030 if (S0.isNegative() && S0.isNonZero() && !S0.isNaN()) {
3031 // S0 < 0
3032 ID = 1;
3033 SC = S2;
3034 } else {
3035 ID = 0;
3036 SC = -S2;
3037 }
3038 MA = S0;
3039 TC = -S1;
3040 }
3041 switch (IntrinsicID) {
3042 default:
3043 llvm_unreachable("unhandled amdgcn cube intrinsic");
3044 case Intrinsic::amdgcn_cubeid:
3045 return APFloat(Sem, ID);
3046 case Intrinsic::amdgcn_cubema:
3047 return MA + MA;
3048 case Intrinsic::amdgcn_cubesc:
3049 return SC;
3050 case Intrinsic::amdgcn_cubetc:
3051 return TC;
3052 }
3053 }
3054
ConstantFoldAMDGCNPermIntrinsic(ArrayRef<Constant * > Operands,Type * Ty)3055 static Constant *ConstantFoldAMDGCNPermIntrinsic(ArrayRef<Constant *> Operands,
3056 Type *Ty) {
3057 const APInt *C0, *C1, *C2;
3058 if (!getConstIntOrUndef(Operands[0], C0) ||
3059 !getConstIntOrUndef(Operands[1], C1) ||
3060 !getConstIntOrUndef(Operands[2], C2))
3061 return nullptr;
3062
3063 if (!C2)
3064 return UndefValue::get(Ty);
3065
3066 APInt Val(32, 0);
3067 unsigned NumUndefBytes = 0;
3068 for (unsigned I = 0; I < 32; I += 8) {
3069 unsigned Sel = C2->extractBitsAsZExtValue(8, I);
3070 unsigned B = 0;
3071
3072 if (Sel >= 13)
3073 B = 0xff;
3074 else if (Sel == 12)
3075 B = 0x00;
3076 else {
3077 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
3078 if (!Src)
3079 ++NumUndefBytes;
3080 else if (Sel < 8)
3081 B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8);
3082 else
3083 B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff;
3084 }
3085
3086 Val.insertBits(B, I, 8);
3087 }
3088
3089 if (NumUndefBytes == 4)
3090 return UndefValue::get(Ty);
3091
3092 return ConstantInt::get(Ty, Val);
3093 }
3094
ConstantFoldScalarCall3(StringRef Name,Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,const CallBase * Call)3095 static Constant *ConstantFoldScalarCall3(StringRef Name,
3096 Intrinsic::ID IntrinsicID,
3097 Type *Ty,
3098 ArrayRef<Constant *> Operands,
3099 const TargetLibraryInfo *TLI,
3100 const CallBase *Call) {
3101 assert(Operands.size() == 3 && "Wrong number of operands.");
3102
3103 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
3104 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
3105 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
3106 const APFloat &C1 = Op1->getValueAPF();
3107 const APFloat &C2 = Op2->getValueAPF();
3108 const APFloat &C3 = Op3->getValueAPF();
3109
3110 if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
3111 RoundingMode RM = getEvaluationRoundingMode(ConstrIntr);
3112 APFloat Res = C1;
3113 APFloat::opStatus St;
3114 switch (IntrinsicID) {
3115 default:
3116 return nullptr;
3117 case Intrinsic::experimental_constrained_fma:
3118 case Intrinsic::experimental_constrained_fmuladd:
3119 St = Res.fusedMultiplyAdd(C2, C3, RM);
3120 break;
3121 }
3122 if (mayFoldConstrained(
3123 const_cast<ConstrainedFPIntrinsic *>(ConstrIntr), St))
3124 return ConstantFP::get(Ty->getContext(), Res);
3125 return nullptr;
3126 }
3127
3128 switch (IntrinsicID) {
3129 default: break;
3130 case Intrinsic::amdgcn_fma_legacy: {
3131 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
3132 // NaN or infinity, gives +0.0.
3133 if (C1.isZero() || C2.isZero()) {
3134 // It's tempting to just return C3 here, but that would give the
3135 // wrong result if C3 was -0.0.
3136 return ConstantFP::get(Ty->getContext(), APFloat(0.0f) + C3);
3137 }
3138 [[fallthrough]];
3139 }
3140 case Intrinsic::fma:
3141 case Intrinsic::fmuladd: {
3142 APFloat V = C1;
3143 V.fusedMultiplyAdd(C2, C3, APFloat::rmNearestTiesToEven);
3144 return ConstantFP::get(Ty->getContext(), V);
3145 }
3146 case Intrinsic::amdgcn_cubeid:
3147 case Intrinsic::amdgcn_cubema:
3148 case Intrinsic::amdgcn_cubesc:
3149 case Intrinsic::amdgcn_cubetc: {
3150 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3);
3151 return ConstantFP::get(Ty->getContext(), V);
3152 }
3153 }
3154 }
3155 }
3156 }
3157
3158 if (IntrinsicID == Intrinsic::smul_fix ||
3159 IntrinsicID == Intrinsic::smul_fix_sat) {
3160 // poison * C -> poison
3161 // C * poison -> poison
3162 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
3163 return PoisonValue::get(Ty);
3164
3165 const APInt *C0, *C1;
3166 if (!getConstIntOrUndef(Operands[0], C0) ||
3167 !getConstIntOrUndef(Operands[1], C1))
3168 return nullptr;
3169
3170 // undef * C -> 0
3171 // C * undef -> 0
3172 if (!C0 || !C1)
3173 return Constant::getNullValue(Ty);
3174
3175 // This code performs rounding towards negative infinity in case the result
3176 // cannot be represented exactly for the given scale. Targets that do care
3177 // about rounding should use a target hook for specifying how rounding
3178 // should be done, and provide their own folding to be consistent with
3179 // rounding. This is the same approach as used by
3180 // DAGTypeLegalizer::ExpandIntRes_MULFIX.
3181 unsigned Scale = cast<ConstantInt>(Operands[2])->getZExtValue();
3182 unsigned Width = C0->getBitWidth();
3183 assert(Scale < Width && "Illegal scale.");
3184 unsigned ExtendedWidth = Width * 2;
3185 APInt Product =
3186 (C0->sext(ExtendedWidth) * C1->sext(ExtendedWidth)).ashr(Scale);
3187 if (IntrinsicID == Intrinsic::smul_fix_sat) {
3188 APInt Max = APInt::getSignedMaxValue(Width).sext(ExtendedWidth);
3189 APInt Min = APInt::getSignedMinValue(Width).sext(ExtendedWidth);
3190 Product = APIntOps::smin(Product, Max);
3191 Product = APIntOps::smax(Product, Min);
3192 }
3193 return ConstantInt::get(Ty->getContext(), Product.sextOrTrunc(Width));
3194 }
3195
3196 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
3197 const APInt *C0, *C1, *C2;
3198 if (!getConstIntOrUndef(Operands[0], C0) ||
3199 !getConstIntOrUndef(Operands[1], C1) ||
3200 !getConstIntOrUndef(Operands[2], C2))
3201 return nullptr;
3202
3203 bool IsRight = IntrinsicID == Intrinsic::fshr;
3204 if (!C2)
3205 return Operands[IsRight ? 1 : 0];
3206 if (!C0 && !C1)
3207 return UndefValue::get(Ty);
3208
3209 // The shift amount is interpreted as modulo the bitwidth. If the shift
3210 // amount is effectively 0, avoid UB due to oversized inverse shift below.
3211 unsigned BitWidth = C2->getBitWidth();
3212 unsigned ShAmt = C2->urem(BitWidth);
3213 if (!ShAmt)
3214 return Operands[IsRight ? 1 : 0];
3215
3216 // (C0 << ShlAmt) | (C1 >> LshrAmt)
3217 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
3218 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
3219 if (!C0)
3220 return ConstantInt::get(Ty, C1->lshr(LshrAmt));
3221 if (!C1)
3222 return ConstantInt::get(Ty, C0->shl(ShlAmt));
3223 return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
3224 }
3225
3226 if (IntrinsicID == Intrinsic::amdgcn_perm)
3227 return ConstantFoldAMDGCNPermIntrinsic(Operands, Ty);
3228
3229 return nullptr;
3230 }
3231
ConstantFoldScalarCall(StringRef Name,Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,const CallBase * Call)3232 static Constant *ConstantFoldScalarCall(StringRef Name,
3233 Intrinsic::ID IntrinsicID,
3234 Type *Ty,
3235 ArrayRef<Constant *> Operands,
3236 const TargetLibraryInfo *TLI,
3237 const CallBase *Call) {
3238 if (Operands.size() == 1)
3239 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call);
3240
3241 if (Operands.size() == 2) {
3242 if (Constant *FoldedLibCall =
3243 ConstantFoldLibCall2(Name, Ty, Operands, TLI)) {
3244 return FoldedLibCall;
3245 }
3246 return ConstantFoldIntrinsicCall2(IntrinsicID, Ty, Operands, Call);
3247 }
3248
3249 if (Operands.size() == 3)
3250 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call);
3251
3252 return nullptr;
3253 }
3254
ConstantFoldFixedVectorCall(StringRef Name,Intrinsic::ID IntrinsicID,FixedVectorType * FVTy,ArrayRef<Constant * > Operands,const DataLayout & DL,const TargetLibraryInfo * TLI,const CallBase * Call)3255 static Constant *ConstantFoldFixedVectorCall(
3256 StringRef Name, Intrinsic::ID IntrinsicID, FixedVectorType *FVTy,
3257 ArrayRef<Constant *> Operands, const DataLayout &DL,
3258 const TargetLibraryInfo *TLI, const CallBase *Call) {
3259 SmallVector<Constant *, 4> Result(FVTy->getNumElements());
3260 SmallVector<Constant *, 4> Lane(Operands.size());
3261 Type *Ty = FVTy->getElementType();
3262
3263 switch (IntrinsicID) {
3264 case Intrinsic::masked_load: {
3265 auto *SrcPtr = Operands[0];
3266 auto *Mask = Operands[2];
3267 auto *Passthru = Operands[3];
3268
3269 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, FVTy, DL);
3270
3271 SmallVector<Constant *, 32> NewElements;
3272 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
3273 auto *MaskElt = Mask->getAggregateElement(I);
3274 if (!MaskElt)
3275 break;
3276 auto *PassthruElt = Passthru->getAggregateElement(I);
3277 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
3278 if (isa<UndefValue>(MaskElt)) {
3279 if (PassthruElt)
3280 NewElements.push_back(PassthruElt);
3281 else if (VecElt)
3282 NewElements.push_back(VecElt);
3283 else
3284 return nullptr;
3285 }
3286 if (MaskElt->isNullValue()) {
3287 if (!PassthruElt)
3288 return nullptr;
3289 NewElements.push_back(PassthruElt);
3290 } else if (MaskElt->isOneValue()) {
3291 if (!VecElt)
3292 return nullptr;
3293 NewElements.push_back(VecElt);
3294 } else {
3295 return nullptr;
3296 }
3297 }
3298 if (NewElements.size() != FVTy->getNumElements())
3299 return nullptr;
3300 return ConstantVector::get(NewElements);
3301 }
3302 case Intrinsic::arm_mve_vctp8:
3303 case Intrinsic::arm_mve_vctp16:
3304 case Intrinsic::arm_mve_vctp32:
3305 case Intrinsic::arm_mve_vctp64: {
3306 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
3307 unsigned Lanes = FVTy->getNumElements();
3308 uint64_t Limit = Op->getZExtValue();
3309
3310 SmallVector<Constant *, 16> NCs;
3311 for (unsigned i = 0; i < Lanes; i++) {
3312 if (i < Limit)
3313 NCs.push_back(ConstantInt::getTrue(Ty));
3314 else
3315 NCs.push_back(ConstantInt::getFalse(Ty));
3316 }
3317 return ConstantVector::get(NCs);
3318 }
3319 return nullptr;
3320 }
3321 case Intrinsic::get_active_lane_mask: {
3322 auto *Op0 = dyn_cast<ConstantInt>(Operands[0]);
3323 auto *Op1 = dyn_cast<ConstantInt>(Operands[1]);
3324 if (Op0 && Op1) {
3325 unsigned Lanes = FVTy->getNumElements();
3326 uint64_t Base = Op0->getZExtValue();
3327 uint64_t Limit = Op1->getZExtValue();
3328
3329 SmallVector<Constant *, 16> NCs;
3330 for (unsigned i = 0; i < Lanes; i++) {
3331 if (Base + i < Limit)
3332 NCs.push_back(ConstantInt::getTrue(Ty));
3333 else
3334 NCs.push_back(ConstantInt::getFalse(Ty));
3335 }
3336 return ConstantVector::get(NCs);
3337 }
3338 return nullptr;
3339 }
3340 default:
3341 break;
3342 }
3343
3344 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
3345 // Gather a column of constants.
3346 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
3347 // Some intrinsics use a scalar type for certain arguments.
3348 if (isVectorIntrinsicWithScalarOpAtArg(IntrinsicID, J)) {
3349 Lane[J] = Operands[J];
3350 continue;
3351 }
3352
3353 Constant *Agg = Operands[J]->getAggregateElement(I);
3354 if (!Agg)
3355 return nullptr;
3356
3357 Lane[J] = Agg;
3358 }
3359
3360 // Use the regular scalar folding to simplify this column.
3361 Constant *Folded =
3362 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call);
3363 if (!Folded)
3364 return nullptr;
3365 Result[I] = Folded;
3366 }
3367
3368 return ConstantVector::get(Result);
3369 }
3370
ConstantFoldScalableVectorCall(StringRef Name,Intrinsic::ID IntrinsicID,ScalableVectorType * SVTy,ArrayRef<Constant * > Operands,const DataLayout & DL,const TargetLibraryInfo * TLI,const CallBase * Call)3371 static Constant *ConstantFoldScalableVectorCall(
3372 StringRef Name, Intrinsic::ID IntrinsicID, ScalableVectorType *SVTy,
3373 ArrayRef<Constant *> Operands, const DataLayout &DL,
3374 const TargetLibraryInfo *TLI, const CallBase *Call) {
3375 switch (IntrinsicID) {
3376 case Intrinsic::aarch64_sve_convert_from_svbool: {
3377 auto *Src = dyn_cast<Constant>(Operands[0]);
3378 if (!Src || !Src->isNullValue())
3379 break;
3380
3381 return ConstantInt::getFalse(SVTy);
3382 }
3383 default:
3384 break;
3385 }
3386 return nullptr;
3387 }
3388
3389 static std::pair<Constant *, Constant *>
ConstantFoldScalarFrexpCall(Constant * Op,Type * IntTy)3390 ConstantFoldScalarFrexpCall(Constant *Op, Type *IntTy) {
3391 if (isa<PoisonValue>(Op))
3392 return {Op, PoisonValue::get(IntTy)};
3393
3394 auto *ConstFP = dyn_cast<ConstantFP>(Op);
3395 if (!ConstFP)
3396 return {};
3397
3398 const APFloat &U = ConstFP->getValueAPF();
3399 int FrexpExp;
3400 APFloat FrexpMant = frexp(U, FrexpExp, APFloat::rmNearestTiesToEven);
3401 Constant *Result0 = ConstantFP::get(ConstFP->getType(), FrexpMant);
3402
3403 // The exponent is an "unspecified value" for inf/nan. We use zero to avoid
3404 // using undef.
3405 Constant *Result1 = FrexpMant.isFinite() ? ConstantInt::get(IntTy, FrexpExp)
3406 : ConstantInt::getNullValue(IntTy);
3407 return {Result0, Result1};
3408 }
3409
3410 /// Handle intrinsics that return tuples, which may be tuples of vectors.
3411 static Constant *
ConstantFoldStructCall(StringRef Name,Intrinsic::ID IntrinsicID,StructType * StTy,ArrayRef<Constant * > Operands,const DataLayout & DL,const TargetLibraryInfo * TLI,const CallBase * Call)3412 ConstantFoldStructCall(StringRef Name, Intrinsic::ID IntrinsicID,
3413 StructType *StTy, ArrayRef<Constant *> Operands,
3414 const DataLayout &DL, const TargetLibraryInfo *TLI,
3415 const CallBase *Call) {
3416
3417 switch (IntrinsicID) {
3418 case Intrinsic::frexp: {
3419 Type *Ty0 = StTy->getContainedType(0);
3420 Type *Ty1 = StTy->getContainedType(1)->getScalarType();
3421
3422 if (auto *FVTy0 = dyn_cast<FixedVectorType>(Ty0)) {
3423 SmallVector<Constant *, 4> Results0(FVTy0->getNumElements());
3424 SmallVector<Constant *, 4> Results1(FVTy0->getNumElements());
3425
3426 for (unsigned I = 0, E = FVTy0->getNumElements(); I != E; ++I) {
3427 Constant *Lane = Operands[0]->getAggregateElement(I);
3428 std::tie(Results0[I], Results1[I]) =
3429 ConstantFoldScalarFrexpCall(Lane, Ty1);
3430 if (!Results0[I])
3431 return nullptr;
3432 }
3433
3434 return ConstantStruct::get(StTy, ConstantVector::get(Results0),
3435 ConstantVector::get(Results1));
3436 }
3437
3438 auto [Result0, Result1] = ConstantFoldScalarFrexpCall(Operands[0], Ty1);
3439 if (!Result0)
3440 return nullptr;
3441 return ConstantStruct::get(StTy, Result0, Result1);
3442 }
3443 default:
3444 // TODO: Constant folding of vector intrinsics that fall through here does
3445 // not work (e.g. overflow intrinsics)
3446 return ConstantFoldScalarCall(Name, IntrinsicID, StTy, Operands, TLI, Call);
3447 }
3448
3449 return nullptr;
3450 }
3451
3452 } // end anonymous namespace
3453
ConstantFoldBinaryIntrinsic(Intrinsic::ID ID,Constant * LHS,Constant * RHS,Type * Ty,Instruction * FMFSource)3454 Constant *llvm::ConstantFoldBinaryIntrinsic(Intrinsic::ID ID, Constant *LHS,
3455 Constant *RHS, Type *Ty,
3456 Instruction *FMFSource) {
3457 return ConstantFoldIntrinsicCall2(ID, Ty, {LHS, RHS},
3458 dyn_cast_if_present<CallBase>(FMFSource));
3459 }
3460
ConstantFoldCall(const CallBase * Call,Function * F,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,bool AllowNonDeterministic)3461 Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F,
3462 ArrayRef<Constant *> Operands,
3463 const TargetLibraryInfo *TLI,
3464 bool AllowNonDeterministic) {
3465 if (Call->isNoBuiltin())
3466 return nullptr;
3467 if (!F->hasName())
3468 return nullptr;
3469
3470 // If this is not an intrinsic and not recognized as a library call, bail out.
3471 Intrinsic::ID IID = F->getIntrinsicID();
3472 if (IID == Intrinsic::not_intrinsic) {
3473 if (!TLI)
3474 return nullptr;
3475 LibFunc LibF;
3476 if (!TLI->getLibFunc(*F, LibF))
3477 return nullptr;
3478 }
3479
3480 // Conservatively assume that floating-point libcalls may be
3481 // non-deterministic.
3482 Type *Ty = F->getReturnType();
3483 if (!AllowNonDeterministic && Ty->isFPOrFPVectorTy())
3484 return nullptr;
3485
3486 StringRef Name = F->getName();
3487 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty))
3488 return ConstantFoldFixedVectorCall(
3489 Name, IID, FVTy, Operands, F->getDataLayout(), TLI, Call);
3490
3491 if (auto *SVTy = dyn_cast<ScalableVectorType>(Ty))
3492 return ConstantFoldScalableVectorCall(
3493 Name, IID, SVTy, Operands, F->getDataLayout(), TLI, Call);
3494
3495 if (auto *StTy = dyn_cast<StructType>(Ty))
3496 return ConstantFoldStructCall(Name, IID, StTy, Operands,
3497 F->getDataLayout(), TLI, Call);
3498
3499 // TODO: If this is a library function, we already discovered that above,
3500 // so we should pass the LibFunc, not the name (and it might be better
3501 // still to separate intrinsic handling from libcalls).
3502 return ConstantFoldScalarCall(Name, IID, Ty, Operands, TLI, Call);
3503 }
3504
isMathLibCallNoop(const CallBase * Call,const TargetLibraryInfo * TLI)3505 bool llvm::isMathLibCallNoop(const CallBase *Call,
3506 const TargetLibraryInfo *TLI) {
3507 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
3508 // (and to some extent ConstantFoldScalarCall).
3509 if (Call->isNoBuiltin() || Call->isStrictFP())
3510 return false;
3511 Function *F = Call->getCalledFunction();
3512 if (!F)
3513 return false;
3514
3515 LibFunc Func;
3516 if (!TLI || !TLI->getLibFunc(*F, Func))
3517 return false;
3518
3519 if (Call->arg_size() == 1) {
3520 if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
3521 const APFloat &Op = OpC->getValueAPF();
3522 switch (Func) {
3523 case LibFunc_logl:
3524 case LibFunc_log:
3525 case LibFunc_logf:
3526 case LibFunc_log2l:
3527 case LibFunc_log2:
3528 case LibFunc_log2f:
3529 case LibFunc_log10l:
3530 case LibFunc_log10:
3531 case LibFunc_log10f:
3532 return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
3533
3534 case LibFunc_expl:
3535 case LibFunc_exp:
3536 case LibFunc_expf:
3537 // FIXME: These boundaries are slightly conservative.
3538 if (OpC->getType()->isDoubleTy())
3539 return !(Op < APFloat(-745.0) || Op > APFloat(709.0));
3540 if (OpC->getType()->isFloatTy())
3541 return !(Op < APFloat(-103.0f) || Op > APFloat(88.0f));
3542 break;
3543
3544 case LibFunc_exp2l:
3545 case LibFunc_exp2:
3546 case LibFunc_exp2f:
3547 // FIXME: These boundaries are slightly conservative.
3548 if (OpC->getType()->isDoubleTy())
3549 return !(Op < APFloat(-1074.0) || Op > APFloat(1023.0));
3550 if (OpC->getType()->isFloatTy())
3551 return !(Op < APFloat(-149.0f) || Op > APFloat(127.0f));
3552 break;
3553
3554 case LibFunc_sinl:
3555 case LibFunc_sin:
3556 case LibFunc_sinf:
3557 case LibFunc_cosl:
3558 case LibFunc_cos:
3559 case LibFunc_cosf:
3560 return !Op.isInfinity();
3561
3562 case LibFunc_tanl:
3563 case LibFunc_tan:
3564 case LibFunc_tanf: {
3565 // FIXME: Stop using the host math library.
3566 // FIXME: The computation isn't done in the right precision.
3567 Type *Ty = OpC->getType();
3568 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy())
3569 return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) != nullptr;
3570 break;
3571 }
3572
3573 case LibFunc_atan:
3574 case LibFunc_atanf:
3575 case LibFunc_atanl:
3576 // Per POSIX, this MAY fail if Op is denormal. We choose not failing.
3577 return true;
3578
3579
3580 case LibFunc_asinl:
3581 case LibFunc_asin:
3582 case LibFunc_asinf:
3583 case LibFunc_acosl:
3584 case LibFunc_acos:
3585 case LibFunc_acosf:
3586 return !(Op < APFloat(Op.getSemantics(), "-1") ||
3587 Op > APFloat(Op.getSemantics(), "1"));
3588
3589 case LibFunc_sinh:
3590 case LibFunc_cosh:
3591 case LibFunc_sinhf:
3592 case LibFunc_coshf:
3593 case LibFunc_sinhl:
3594 case LibFunc_coshl:
3595 // FIXME: These boundaries are slightly conservative.
3596 if (OpC->getType()->isDoubleTy())
3597 return !(Op < APFloat(-710.0) || Op > APFloat(710.0));
3598 if (OpC->getType()->isFloatTy())
3599 return !(Op < APFloat(-89.0f) || Op > APFloat(89.0f));
3600 break;
3601
3602 case LibFunc_sqrtl:
3603 case LibFunc_sqrt:
3604 case LibFunc_sqrtf:
3605 return Op.isNaN() || Op.isZero() || !Op.isNegative();
3606
3607 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
3608 // maybe others?
3609 default:
3610 break;
3611 }
3612 }
3613 }
3614
3615 if (Call->arg_size() == 2) {
3616 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
3617 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
3618 if (Op0C && Op1C) {
3619 const APFloat &Op0 = Op0C->getValueAPF();
3620 const APFloat &Op1 = Op1C->getValueAPF();
3621
3622 switch (Func) {
3623 case LibFunc_powl:
3624 case LibFunc_pow:
3625 case LibFunc_powf: {
3626 // FIXME: Stop using the host math library.
3627 // FIXME: The computation isn't done in the right precision.
3628 Type *Ty = Op0C->getType();
3629 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
3630 if (Ty == Op1C->getType())
3631 return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) != nullptr;
3632 }
3633 break;
3634 }
3635
3636 case LibFunc_fmodl:
3637 case LibFunc_fmod:
3638 case LibFunc_fmodf:
3639 case LibFunc_remainderl:
3640 case LibFunc_remainder:
3641 case LibFunc_remainderf:
3642 return Op0.isNaN() || Op1.isNaN() ||
3643 (!Op0.isInfinity() && !Op1.isZero());
3644
3645 case LibFunc_atan2:
3646 case LibFunc_atan2f:
3647 case LibFunc_atan2l:
3648 // Although IEEE-754 says atan2(+/-0.0, +/-0.0) are well-defined, and
3649 // GLIBC and MSVC do not appear to raise an error on those, we
3650 // cannot rely on that behavior. POSIX and C11 say that a domain error
3651 // may occur, so allow for that possibility.
3652 return !Op0.isZero() || !Op1.isZero();
3653
3654 default:
3655 break;
3656 }
3657 }
3658 }
3659
3660 return false;
3661 }
3662
anchor()3663 void TargetFolder::anchor() {}
3664