1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines routines for folding instructions into constants. 10 // 11 // Also, to supplement the basic IR ConstantExpr simplifications, 12 // this file defines some additional folding routines that can make use of 13 // DataLayout information. These functions cannot go in IR due to library 14 // dependency issues. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/Analysis/ConstantFolding.h" 19 #include "llvm/ADT/APFloat.h" 20 #include "llvm/ADT/APInt.h" 21 #include "llvm/ADT/APSInt.h" 22 #include "llvm/ADT/ArrayRef.h" 23 #include "llvm/ADT/DenseMap.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/Analysis/TargetFolder.h" 28 #include "llvm/Analysis/TargetLibraryInfo.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/Analysis/VectorUtils.h" 31 #include "llvm/Config/config.h" 32 #include "llvm/IR/Constant.h" 33 #include "llvm/IR/ConstantFold.h" 34 #include "llvm/IR/Constants.h" 35 #include "llvm/IR/DataLayout.h" 36 #include "llvm/IR/DerivedTypes.h" 37 #include "llvm/IR/Function.h" 38 #include "llvm/IR/GlobalValue.h" 39 #include "llvm/IR/GlobalVariable.h" 40 #include "llvm/IR/InstrTypes.h" 41 #include "llvm/IR/Instruction.h" 42 #include "llvm/IR/Instructions.h" 43 #include "llvm/IR/IntrinsicInst.h" 44 #include "llvm/IR/Intrinsics.h" 45 #include "llvm/IR/IntrinsicsAArch64.h" 46 #include "llvm/IR/IntrinsicsAMDGPU.h" 47 #include "llvm/IR/IntrinsicsARM.h" 48 #include "llvm/IR/IntrinsicsWebAssembly.h" 49 #include "llvm/IR/IntrinsicsX86.h" 50 #include "llvm/IR/Operator.h" 51 #include "llvm/IR/Type.h" 52 #include "llvm/IR/Value.h" 53 #include "llvm/Support/Casting.h" 54 #include "llvm/Support/ErrorHandling.h" 55 #include "llvm/Support/KnownBits.h" 56 #include "llvm/Support/MathExtras.h" 57 #include <cassert> 58 #include <cerrno> 59 #include <cfenv> 60 #include <cmath> 61 #include <cstdint> 62 63 using namespace llvm; 64 65 namespace { 66 67 //===----------------------------------------------------------------------===// 68 // Constant Folding internal helper functions 69 //===----------------------------------------------------------------------===// 70 71 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy, 72 Constant *C, Type *SrcEltTy, 73 unsigned NumSrcElts, 74 const DataLayout &DL) { 75 // Now that we know that the input value is a vector of integers, just shift 76 // and insert them into our result. 77 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy); 78 for (unsigned i = 0; i != NumSrcElts; ++i) { 79 Constant *Element; 80 if (DL.isLittleEndian()) 81 Element = C->getAggregateElement(NumSrcElts - i - 1); 82 else 83 Element = C->getAggregateElement(i); 84 85 if (Element && isa<UndefValue>(Element)) { 86 Result <<= BitShift; 87 continue; 88 } 89 90 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 91 if (!ElementCI) 92 return ConstantExpr::getBitCast(C, DestTy); 93 94 Result <<= BitShift; 95 Result |= ElementCI->getValue().zext(Result.getBitWidth()); 96 } 97 98 return nullptr; 99 } 100 101 /// Constant fold bitcast, symbolically evaluating it with DataLayout. 102 /// This always returns a non-null constant, but it may be a 103 /// ConstantExpr if unfoldable. 104 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) { 105 assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) && 106 "Invalid constantexpr bitcast!"); 107 108 // Catch the obvious splat cases. 109 if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy)) 110 return Res; 111 112 if (auto *VTy = dyn_cast<VectorType>(C->getType())) { 113 // Handle a vector->scalar integer/fp cast. 114 if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) { 115 unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements(); 116 Type *SrcEltTy = VTy->getElementType(); 117 118 // If the vector is a vector of floating point, convert it to vector of int 119 // to simplify things. 120 if (SrcEltTy->isFloatingPointTy()) { 121 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 122 auto *SrcIVTy = FixedVectorType::get( 123 IntegerType::get(C->getContext(), FPWidth), NumSrcElts); 124 // Ask IR to do the conversion now that #elts line up. 125 C = ConstantExpr::getBitCast(C, SrcIVTy); 126 } 127 128 APInt Result(DL.getTypeSizeInBits(DestTy), 0); 129 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C, 130 SrcEltTy, NumSrcElts, DL)) 131 return CE; 132 133 if (isa<IntegerType>(DestTy)) 134 return ConstantInt::get(DestTy, Result); 135 136 APFloat FP(DestTy->getFltSemantics(), Result); 137 return ConstantFP::get(DestTy->getContext(), FP); 138 } 139 } 140 141 // The code below only handles casts to vectors currently. 142 auto *DestVTy = dyn_cast<VectorType>(DestTy); 143 if (!DestVTy) 144 return ConstantExpr::getBitCast(C, DestTy); 145 146 // If this is a scalar -> vector cast, convert the input into a <1 x scalar> 147 // vector so the code below can handle it uniformly. 148 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) { 149 Constant *Ops = C; // don't take the address of C! 150 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL); 151 } 152 153 // If this is a bitcast from constant vector -> vector, fold it. 154 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C)) 155 return ConstantExpr::getBitCast(C, DestTy); 156 157 // If the element types match, IR can fold it. 158 unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements(); 159 unsigned NumSrcElt = cast<FixedVectorType>(C->getType())->getNumElements(); 160 if (NumDstElt == NumSrcElt) 161 return ConstantExpr::getBitCast(C, DestTy); 162 163 Type *SrcEltTy = cast<VectorType>(C->getType())->getElementType(); 164 Type *DstEltTy = DestVTy->getElementType(); 165 166 // Otherwise, we're changing the number of elements in a vector, which 167 // requires endianness information to do the right thing. For example, 168 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 169 // folds to (little endian): 170 // <4 x i32> <i32 0, i32 0, i32 1, i32 0> 171 // and to (big endian): 172 // <4 x i32> <i32 0, i32 0, i32 0, i32 1> 173 174 // First thing is first. We only want to think about integer here, so if 175 // we have something in FP form, recast it as integer. 176 if (DstEltTy->isFloatingPointTy()) { 177 // Fold to an vector of integers with same size as our FP type. 178 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits(); 179 auto *DestIVTy = FixedVectorType::get( 180 IntegerType::get(C->getContext(), FPWidth), NumDstElt); 181 // Recursively handle this integer conversion, if possible. 182 C = FoldBitCast(C, DestIVTy, DL); 183 184 // Finally, IR can handle this now that #elts line up. 185 return ConstantExpr::getBitCast(C, DestTy); 186 } 187 188 // Okay, we know the destination is integer, if the input is FP, convert 189 // it to integer first. 190 if (SrcEltTy->isFloatingPointTy()) { 191 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 192 auto *SrcIVTy = FixedVectorType::get( 193 IntegerType::get(C->getContext(), FPWidth), NumSrcElt); 194 // Ask IR to do the conversion now that #elts line up. 195 C = ConstantExpr::getBitCast(C, SrcIVTy); 196 // If IR wasn't able to fold it, bail out. 197 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector. 198 !isa<ConstantDataVector>(C)) 199 return C; 200 } 201 202 // Now we know that the input and output vectors are both integer vectors 203 // of the same size, and that their #elements is not the same. Do the 204 // conversion here, which depends on whether the input or output has 205 // more elements. 206 bool isLittleEndian = DL.isLittleEndian(); 207 208 SmallVector<Constant*, 32> Result; 209 if (NumDstElt < NumSrcElt) { 210 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>) 211 Constant *Zero = Constant::getNullValue(DstEltTy); 212 unsigned Ratio = NumSrcElt/NumDstElt; 213 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits(); 214 unsigned SrcElt = 0; 215 for (unsigned i = 0; i != NumDstElt; ++i) { 216 // Build each element of the result. 217 Constant *Elt = Zero; 218 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1); 219 for (unsigned j = 0; j != Ratio; ++j) { 220 Constant *Src = C->getAggregateElement(SrcElt++); 221 if (Src && isa<UndefValue>(Src)) 222 Src = Constant::getNullValue( 223 cast<VectorType>(C->getType())->getElementType()); 224 else 225 Src = dyn_cast_or_null<ConstantInt>(Src); 226 if (!Src) // Reject constantexpr elements. 227 return ConstantExpr::getBitCast(C, DestTy); 228 229 // Zero extend the element to the right size. 230 Src = ConstantExpr::getZExt(Src, Elt->getType()); 231 232 // Shift it to the right place, depending on endianness. 233 Src = ConstantExpr::getShl(Src, 234 ConstantInt::get(Src->getType(), ShiftAmt)); 235 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; 236 237 // Mix it in. 238 Elt = ConstantExpr::getOr(Elt, Src); 239 } 240 Result.push_back(Elt); 241 } 242 return ConstantVector::get(Result); 243 } 244 245 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 246 unsigned Ratio = NumDstElt/NumSrcElt; 247 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy); 248 249 // Loop over each source value, expanding into multiple results. 250 for (unsigned i = 0; i != NumSrcElt; ++i) { 251 auto *Element = C->getAggregateElement(i); 252 253 if (!Element) // Reject constantexpr elements. 254 return ConstantExpr::getBitCast(C, DestTy); 255 256 if (isa<UndefValue>(Element)) { 257 // Correctly Propagate undef values. 258 Result.append(Ratio, UndefValue::get(DstEltTy)); 259 continue; 260 } 261 262 auto *Src = dyn_cast<ConstantInt>(Element); 263 if (!Src) 264 return ConstantExpr::getBitCast(C, DestTy); 265 266 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); 267 for (unsigned j = 0; j != Ratio; ++j) { 268 // Shift the piece of the value into the right place, depending on 269 // endianness. 270 Constant *Elt = ConstantExpr::getLShr(Src, 271 ConstantInt::get(Src->getType(), ShiftAmt)); 272 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; 273 274 // Truncate the element to an integer with the same pointer size and 275 // convert the element back to a pointer using a inttoptr. 276 if (DstEltTy->isPointerTy()) { 277 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize); 278 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy); 279 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy)); 280 continue; 281 } 282 283 // Truncate and remember this piece. 284 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); 285 } 286 } 287 288 return ConstantVector::get(Result); 289 } 290 291 } // end anonymous namespace 292 293 /// If this constant is a constant offset from a global, return the global and 294 /// the constant. Because of constantexprs, this function is recursive. 295 bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, 296 APInt &Offset, const DataLayout &DL, 297 DSOLocalEquivalent **DSOEquiv) { 298 if (DSOEquiv) 299 *DSOEquiv = nullptr; 300 301 // Trivial case, constant is the global. 302 if ((GV = dyn_cast<GlobalValue>(C))) { 303 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType()); 304 Offset = APInt(BitWidth, 0); 305 return true; 306 } 307 308 if (auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(C)) { 309 if (DSOEquiv) 310 *DSOEquiv = FoundDSOEquiv; 311 GV = FoundDSOEquiv->getGlobalValue(); 312 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType()); 313 Offset = APInt(BitWidth, 0); 314 return true; 315 } 316 317 // Otherwise, if this isn't a constant expr, bail out. 318 auto *CE = dyn_cast<ConstantExpr>(C); 319 if (!CE) return false; 320 321 // Look through ptr->int and ptr->ptr casts. 322 if (CE->getOpcode() == Instruction::PtrToInt || 323 CE->getOpcode() == Instruction::BitCast) 324 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL, 325 DSOEquiv); 326 327 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5) 328 auto *GEP = dyn_cast<GEPOperator>(CE); 329 if (!GEP) 330 return false; 331 332 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType()); 333 APInt TmpOffset(BitWidth, 0); 334 335 // If the base isn't a global+constant, we aren't either. 336 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL, 337 DSOEquiv)) 338 return false; 339 340 // Otherwise, add any offset that our operands provide. 341 if (!GEP->accumulateConstantOffset(DL, TmpOffset)) 342 return false; 343 344 Offset = TmpOffset; 345 return true; 346 } 347 348 Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy, 349 const DataLayout &DL) { 350 do { 351 Type *SrcTy = C->getType(); 352 if (SrcTy == DestTy) 353 return C; 354 355 TypeSize DestSize = DL.getTypeSizeInBits(DestTy); 356 TypeSize SrcSize = DL.getTypeSizeInBits(SrcTy); 357 if (!TypeSize::isKnownGE(SrcSize, DestSize)) 358 return nullptr; 359 360 // Catch the obvious splat cases (since all-zeros can coerce non-integral 361 // pointers legally). 362 if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy)) 363 return Res; 364 365 // If the type sizes are the same and a cast is legal, just directly 366 // cast the constant. 367 // But be careful not to coerce non-integral pointers illegally. 368 if (SrcSize == DestSize && 369 DL.isNonIntegralPointerType(SrcTy->getScalarType()) == 370 DL.isNonIntegralPointerType(DestTy->getScalarType())) { 371 Instruction::CastOps Cast = Instruction::BitCast; 372 // If we are going from a pointer to int or vice versa, we spell the cast 373 // differently. 374 if (SrcTy->isIntegerTy() && DestTy->isPointerTy()) 375 Cast = Instruction::IntToPtr; 376 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy()) 377 Cast = Instruction::PtrToInt; 378 379 if (CastInst::castIsValid(Cast, C, DestTy)) 380 return ConstantExpr::getCast(Cast, C, DestTy); 381 } 382 383 // If this isn't an aggregate type, there is nothing we can do to drill down 384 // and find a bitcastable constant. 385 if (!SrcTy->isAggregateType() && !SrcTy->isVectorTy()) 386 return nullptr; 387 388 // We're simulating a load through a pointer that was bitcast to point to 389 // a different type, so we can try to walk down through the initial 390 // elements of an aggregate to see if some part of the aggregate is 391 // castable to implement the "load" semantic model. 392 if (SrcTy->isStructTy()) { 393 // Struct types might have leading zero-length elements like [0 x i32], 394 // which are certainly not what we are looking for, so skip them. 395 unsigned Elem = 0; 396 Constant *ElemC; 397 do { 398 ElemC = C->getAggregateElement(Elem++); 399 } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()).isZero()); 400 C = ElemC; 401 } else { 402 // For non-byte-sized vector elements, the first element is not 403 // necessarily located at the vector base address. 404 if (auto *VT = dyn_cast<VectorType>(SrcTy)) 405 if (!DL.typeSizeEqualsStoreSize(VT->getElementType())) 406 return nullptr; 407 408 C = C->getAggregateElement(0u); 409 } 410 } while (C); 411 412 return nullptr; 413 } 414 415 namespace { 416 417 /// Recursive helper to read bits out of global. C is the constant being copied 418 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy 419 /// results into and BytesLeft is the number of bytes left in 420 /// the CurPtr buffer. DL is the DataLayout. 421 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr, 422 unsigned BytesLeft, const DataLayout &DL) { 423 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) && 424 "Out of range access"); 425 426 // If this element is zero or undefined, we can just return since *CurPtr is 427 // zero initialized. 428 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) 429 return true; 430 431 if (auto *CI = dyn_cast<ConstantInt>(C)) { 432 if (CI->getBitWidth() > 64 || 433 (CI->getBitWidth() & 7) != 0) 434 return false; 435 436 uint64_t Val = CI->getZExtValue(); 437 unsigned IntBytes = unsigned(CI->getBitWidth()/8); 438 439 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) { 440 int n = ByteOffset; 441 if (!DL.isLittleEndian()) 442 n = IntBytes - n - 1; 443 CurPtr[i] = (unsigned char)(Val >> (n * 8)); 444 ++ByteOffset; 445 } 446 return true; 447 } 448 449 if (auto *CFP = dyn_cast<ConstantFP>(C)) { 450 if (CFP->getType()->isDoubleTy()) { 451 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL); 452 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 453 } 454 if (CFP->getType()->isFloatTy()){ 455 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL); 456 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 457 } 458 if (CFP->getType()->isHalfTy()){ 459 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL); 460 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 461 } 462 return false; 463 } 464 465 if (auto *CS = dyn_cast<ConstantStruct>(C)) { 466 const StructLayout *SL = DL.getStructLayout(CS->getType()); 467 unsigned Index = SL->getElementContainingOffset(ByteOffset); 468 uint64_t CurEltOffset = SL->getElementOffset(Index); 469 ByteOffset -= CurEltOffset; 470 471 while (true) { 472 // If the element access is to the element itself and not to tail padding, 473 // read the bytes from the element. 474 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType()); 475 476 if (ByteOffset < EltSize && 477 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr, 478 BytesLeft, DL)) 479 return false; 480 481 ++Index; 482 483 // Check to see if we read from the last struct element, if so we're done. 484 if (Index == CS->getType()->getNumElements()) 485 return true; 486 487 // If we read all of the bytes we needed from this element we're done. 488 uint64_t NextEltOffset = SL->getElementOffset(Index); 489 490 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset) 491 return true; 492 493 // Move to the next element of the struct. 494 CurPtr += NextEltOffset - CurEltOffset - ByteOffset; 495 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset; 496 ByteOffset = 0; 497 CurEltOffset = NextEltOffset; 498 } 499 // not reached. 500 } 501 502 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) || 503 isa<ConstantDataSequential>(C)) { 504 uint64_t NumElts; 505 Type *EltTy; 506 if (auto *AT = dyn_cast<ArrayType>(C->getType())) { 507 NumElts = AT->getNumElements(); 508 EltTy = AT->getElementType(); 509 } else { 510 NumElts = cast<FixedVectorType>(C->getType())->getNumElements(); 511 EltTy = cast<FixedVectorType>(C->getType())->getElementType(); 512 } 513 uint64_t EltSize = DL.getTypeAllocSize(EltTy); 514 uint64_t Index = ByteOffset / EltSize; 515 uint64_t Offset = ByteOffset - Index * EltSize; 516 517 for (; Index != NumElts; ++Index) { 518 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr, 519 BytesLeft, DL)) 520 return false; 521 522 uint64_t BytesWritten = EltSize - Offset; 523 assert(BytesWritten <= EltSize && "Not indexing into this element?"); 524 if (BytesWritten >= BytesLeft) 525 return true; 526 527 Offset = 0; 528 BytesLeft -= BytesWritten; 529 CurPtr += BytesWritten; 530 } 531 return true; 532 } 533 534 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 535 if (CE->getOpcode() == Instruction::IntToPtr && 536 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) { 537 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr, 538 BytesLeft, DL); 539 } 540 } 541 542 // Otherwise, unknown initializer type. 543 return false; 544 } 545 546 Constant *FoldReinterpretLoadFromConst(Constant *C, Type *LoadTy, 547 int64_t Offset, const DataLayout &DL) { 548 // Bail out early. Not expect to load from scalable global variable. 549 if (isa<ScalableVectorType>(LoadTy)) 550 return nullptr; 551 552 auto *IntType = dyn_cast<IntegerType>(LoadTy); 553 554 // If this isn't an integer load we can't fold it directly. 555 if (!IntType) { 556 // If this is a non-integer load, we can try folding it as an int load and 557 // then bitcast the result. This can be useful for union cases. Note 558 // that address spaces don't matter here since we're not going to result in 559 // an actual new load. 560 if (!LoadTy->isFloatingPointTy() && !LoadTy->isPointerTy() && 561 !LoadTy->isVectorTy()) 562 return nullptr; 563 564 Type *MapTy = Type::getIntNTy( 565 C->getContext(), DL.getTypeSizeInBits(LoadTy).getFixedSize()); 566 if (Constant *Res = FoldReinterpretLoadFromConst(C, MapTy, Offset, DL)) { 567 if (Res->isNullValue() && !LoadTy->isX86_MMXTy() && 568 !LoadTy->isX86_AMXTy()) 569 // Materializing a zero can be done trivially without a bitcast 570 return Constant::getNullValue(LoadTy); 571 Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy; 572 Res = FoldBitCast(Res, CastTy, DL); 573 if (LoadTy->isPtrOrPtrVectorTy()) { 574 // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr 575 if (Res->isNullValue() && !LoadTy->isX86_MMXTy() && 576 !LoadTy->isX86_AMXTy()) 577 return Constant::getNullValue(LoadTy); 578 if (DL.isNonIntegralPointerType(LoadTy->getScalarType())) 579 // Be careful not to replace a load of an addrspace value with an inttoptr here 580 return nullptr; 581 Res = ConstantExpr::getCast(Instruction::IntToPtr, Res, LoadTy); 582 } 583 return Res; 584 } 585 return nullptr; 586 } 587 588 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8; 589 if (BytesLoaded > 32 || BytesLoaded == 0) 590 return nullptr; 591 592 // If we're not accessing anything in this constant, the result is undefined. 593 if (Offset <= -1 * static_cast<int64_t>(BytesLoaded)) 594 return UndefValue::get(IntType); 595 596 // TODO: We should be able to support scalable types. 597 TypeSize InitializerSize = DL.getTypeAllocSize(C->getType()); 598 if (InitializerSize.isScalable()) 599 return nullptr; 600 601 // If we're not accessing anything in this constant, the result is undefined. 602 if (Offset >= (int64_t)InitializerSize.getFixedValue()) 603 return UndefValue::get(IntType); 604 605 unsigned char RawBytes[32] = {0}; 606 unsigned char *CurPtr = RawBytes; 607 unsigned BytesLeft = BytesLoaded; 608 609 // If we're loading off the beginning of the global, some bytes may be valid. 610 if (Offset < 0) { 611 CurPtr += -Offset; 612 BytesLeft += Offset; 613 Offset = 0; 614 } 615 616 if (!ReadDataFromGlobal(C, Offset, CurPtr, BytesLeft, DL)) 617 return nullptr; 618 619 APInt ResultVal = APInt(IntType->getBitWidth(), 0); 620 if (DL.isLittleEndian()) { 621 ResultVal = RawBytes[BytesLoaded - 1]; 622 for (unsigned i = 1; i != BytesLoaded; ++i) { 623 ResultVal <<= 8; 624 ResultVal |= RawBytes[BytesLoaded - 1 - i]; 625 } 626 } else { 627 ResultVal = RawBytes[0]; 628 for (unsigned i = 1; i != BytesLoaded; ++i) { 629 ResultVal <<= 8; 630 ResultVal |= RawBytes[i]; 631 } 632 } 633 634 return ConstantInt::get(IntType->getContext(), ResultVal); 635 } 636 637 } // anonymous namespace 638 639 // If GV is a constant with an initializer read its representation starting 640 // at Offset and return it as a constant array of unsigned char. Otherwise 641 // return null. 642 Constant *llvm::ReadByteArrayFromGlobal(const GlobalVariable *GV, 643 uint64_t Offset) { 644 if (!GV->isConstant() || !GV->hasDefinitiveInitializer()) 645 return nullptr; 646 647 const DataLayout &DL = GV->getParent()->getDataLayout(); 648 Constant *Init = const_cast<Constant *>(GV->getInitializer()); 649 TypeSize InitSize = DL.getTypeAllocSize(Init->getType()); 650 if (InitSize < Offset) 651 return nullptr; 652 653 uint64_t NBytes = InitSize - Offset; 654 if (NBytes > UINT16_MAX) 655 // Bail for large initializers in excess of 64K to avoid allocating 656 // too much memory. 657 // Offset is assumed to be less than or equal than InitSize (this 658 // is enforced in ReadDataFromGlobal). 659 return nullptr; 660 661 SmallVector<unsigned char, 256> RawBytes(static_cast<size_t>(NBytes)); 662 unsigned char *CurPtr = RawBytes.data(); 663 664 if (!ReadDataFromGlobal(Init, Offset, CurPtr, NBytes, DL)) 665 return nullptr; 666 667 return ConstantDataArray::get(GV->getContext(), RawBytes); 668 } 669 670 /// If this Offset points exactly to the start of an aggregate element, return 671 /// that element, otherwise return nullptr. 672 Constant *getConstantAtOffset(Constant *Base, APInt Offset, 673 const DataLayout &DL) { 674 if (Offset.isZero()) 675 return Base; 676 677 if (!isa<ConstantAggregate>(Base) && !isa<ConstantDataSequential>(Base)) 678 return nullptr; 679 680 Type *ElemTy = Base->getType(); 681 SmallVector<APInt> Indices = DL.getGEPIndicesForOffset(ElemTy, Offset); 682 if (!Offset.isZero() || !Indices[0].isZero()) 683 return nullptr; 684 685 Constant *C = Base; 686 for (const APInt &Index : drop_begin(Indices)) { 687 if (Index.isNegative() || Index.getActiveBits() >= 32) 688 return nullptr; 689 690 C = C->getAggregateElement(Index.getZExtValue()); 691 if (!C) 692 return nullptr; 693 } 694 695 return C; 696 } 697 698 Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty, 699 const APInt &Offset, 700 const DataLayout &DL) { 701 if (Constant *AtOffset = getConstantAtOffset(C, Offset, DL)) 702 if (Constant *Result = ConstantFoldLoadThroughBitcast(AtOffset, Ty, DL)) 703 return Result; 704 705 // Explicitly check for out-of-bounds access, so we return undef even if the 706 // constant is a uniform value. 707 TypeSize Size = DL.getTypeAllocSize(C->getType()); 708 if (!Size.isScalable() && Offset.sge(Size.getFixedSize())) 709 return UndefValue::get(Ty); 710 711 // Try an offset-independent fold of a uniform value. 712 if (Constant *Result = ConstantFoldLoadFromUniformValue(C, Ty)) 713 return Result; 714 715 // Try hard to fold loads from bitcasted strange and non-type-safe things. 716 if (Offset.getMinSignedBits() <= 64) 717 if (Constant *Result = 718 FoldReinterpretLoadFromConst(C, Ty, Offset.getSExtValue(), DL)) 719 return Result; 720 721 return nullptr; 722 } 723 724 Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty, 725 const DataLayout &DL) { 726 return ConstantFoldLoadFromConst(C, Ty, APInt(64, 0), DL); 727 } 728 729 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, 730 APInt Offset, 731 const DataLayout &DL) { 732 C = cast<Constant>(C->stripAndAccumulateConstantOffsets( 733 DL, Offset, /* AllowNonInbounds */ true)); 734 735 if (auto *GV = dyn_cast<GlobalVariable>(C)) 736 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 737 if (Constant *Result = ConstantFoldLoadFromConst(GV->getInitializer(), Ty, 738 Offset, DL)) 739 return Result; 740 741 // If this load comes from anywhere in a uniform constant global, the value 742 // is always the same, regardless of the loaded offset. 743 if (auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(C))) { 744 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 745 if (Constant *Res = 746 ConstantFoldLoadFromUniformValue(GV->getInitializer(), Ty)) 747 return Res; 748 } 749 } 750 751 return nullptr; 752 } 753 754 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, 755 const DataLayout &DL) { 756 APInt Offset(DL.getIndexTypeSizeInBits(C->getType()), 0); 757 return ConstantFoldLoadFromConstPtr(C, Ty, Offset, DL); 758 } 759 760 Constant *llvm::ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty) { 761 if (isa<PoisonValue>(C)) 762 return PoisonValue::get(Ty); 763 if (isa<UndefValue>(C)) 764 return UndefValue::get(Ty); 765 if (C->isNullValue() && !Ty->isX86_MMXTy() && !Ty->isX86_AMXTy()) 766 return Constant::getNullValue(Ty); 767 if (C->isAllOnesValue() && 768 (Ty->isIntOrIntVectorTy() || Ty->isFPOrFPVectorTy())) 769 return Constant::getAllOnesValue(Ty); 770 return nullptr; 771 } 772 773 namespace { 774 775 /// One of Op0/Op1 is a constant expression. 776 /// Attempt to symbolically evaluate the result of a binary operator merging 777 /// these together. If target data info is available, it is provided as DL, 778 /// otherwise DL is null. 779 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1, 780 const DataLayout &DL) { 781 // SROA 782 783 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl. 784 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute 785 // bits. 786 787 if (Opc == Instruction::And) { 788 KnownBits Known0 = computeKnownBits(Op0, DL); 789 KnownBits Known1 = computeKnownBits(Op1, DL); 790 if ((Known1.One | Known0.Zero).isAllOnes()) { 791 // All the bits of Op0 that the 'and' could be masking are already zero. 792 return Op0; 793 } 794 if ((Known0.One | Known1.Zero).isAllOnes()) { 795 // All the bits of Op1 that the 'and' could be masking are already zero. 796 return Op1; 797 } 798 799 Known0 &= Known1; 800 if (Known0.isConstant()) 801 return ConstantInt::get(Op0->getType(), Known0.getConstant()); 802 } 803 804 // If the constant expr is something like &A[123] - &A[4].f, fold this into a 805 // constant. This happens frequently when iterating over a global array. 806 if (Opc == Instruction::Sub) { 807 GlobalValue *GV1, *GV2; 808 APInt Offs1, Offs2; 809 810 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL)) 811 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) { 812 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType()); 813 814 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow. 815 // PtrToInt may change the bitwidth so we have convert to the right size 816 // first. 817 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) - 818 Offs2.zextOrTrunc(OpSize)); 819 } 820 } 821 822 return nullptr; 823 } 824 825 /// If array indices are not pointer-sized integers, explicitly cast them so 826 /// that they aren't implicitly casted by the getelementptr. 827 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops, 828 Type *ResultTy, Optional<unsigned> InRangeIndex, 829 const DataLayout &DL, const TargetLibraryInfo *TLI) { 830 Type *IntIdxTy = DL.getIndexType(ResultTy); 831 Type *IntIdxScalarTy = IntIdxTy->getScalarType(); 832 833 bool Any = false; 834 SmallVector<Constant*, 32> NewIdxs; 835 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 836 if ((i == 1 || 837 !isa<StructType>(GetElementPtrInst::getIndexedType( 838 SrcElemTy, Ops.slice(1, i - 1)))) && 839 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) { 840 Any = true; 841 Type *NewType = Ops[i]->getType()->isVectorTy() 842 ? IntIdxTy 843 : IntIdxScalarTy; 844 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i], 845 true, 846 NewType, 847 true), 848 Ops[i], NewType)); 849 } else 850 NewIdxs.push_back(Ops[i]); 851 } 852 853 if (!Any) 854 return nullptr; 855 856 Constant *C = ConstantExpr::getGetElementPtr( 857 SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex); 858 return ConstantFoldConstant(C, DL, TLI); 859 } 860 861 /// Strip the pointer casts, but preserve the address space information. 862 Constant *StripPtrCastKeepAS(Constant *Ptr) { 863 assert(Ptr->getType()->isPointerTy() && "Not a pointer type"); 864 auto *OldPtrTy = cast<PointerType>(Ptr->getType()); 865 Ptr = cast<Constant>(Ptr->stripPointerCasts()); 866 auto *NewPtrTy = cast<PointerType>(Ptr->getType()); 867 868 // Preserve the address space number of the pointer. 869 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) { 870 Ptr = ConstantExpr::getPointerCast( 871 Ptr, PointerType::getWithSamePointeeType(NewPtrTy, 872 OldPtrTy->getAddressSpace())); 873 } 874 return Ptr; 875 } 876 877 /// If we can symbolically evaluate the GEP constant expression, do so. 878 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, 879 ArrayRef<Constant *> Ops, 880 const DataLayout &DL, 881 const TargetLibraryInfo *TLI) { 882 const GEPOperator *InnermostGEP = GEP; 883 bool InBounds = GEP->isInBounds(); 884 885 Type *SrcElemTy = GEP->getSourceElementType(); 886 Type *ResElemTy = GEP->getResultElementType(); 887 Type *ResTy = GEP->getType(); 888 if (!SrcElemTy->isSized() || isa<ScalableVectorType>(SrcElemTy)) 889 return nullptr; 890 891 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy, 892 GEP->getInRangeIndex(), DL, TLI)) 893 return C; 894 895 Constant *Ptr = Ops[0]; 896 if (!Ptr->getType()->isPointerTy()) 897 return nullptr; 898 899 Type *IntIdxTy = DL.getIndexType(Ptr->getType()); 900 901 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 902 if (!isa<ConstantInt>(Ops[i])) 903 return nullptr; 904 905 unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy); 906 APInt Offset = 907 APInt(BitWidth, 908 DL.getIndexedOffsetInType( 909 SrcElemTy, 910 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1))); 911 Ptr = StripPtrCastKeepAS(Ptr); 912 913 // If this is a GEP of a GEP, fold it all into a single GEP. 914 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) { 915 InnermostGEP = GEP; 916 InBounds &= GEP->isInBounds(); 917 918 SmallVector<Value *, 4> NestedOps(llvm::drop_begin(GEP->operands())); 919 920 // Do not try the incorporate the sub-GEP if some index is not a number. 921 bool AllConstantInt = true; 922 for (Value *NestedOp : NestedOps) 923 if (!isa<ConstantInt>(NestedOp)) { 924 AllConstantInt = false; 925 break; 926 } 927 if (!AllConstantInt) 928 break; 929 930 Ptr = cast<Constant>(GEP->getOperand(0)); 931 SrcElemTy = GEP->getSourceElementType(); 932 Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps)); 933 Ptr = StripPtrCastKeepAS(Ptr); 934 } 935 936 // If the base value for this address is a literal integer value, fold the 937 // getelementptr to the resulting integer value casted to the pointer type. 938 APInt BasePtr(BitWidth, 0); 939 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) { 940 if (CE->getOpcode() == Instruction::IntToPtr) { 941 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0))) 942 BasePtr = Base->getValue().zextOrTrunc(BitWidth); 943 } 944 } 945 946 auto *PTy = cast<PointerType>(Ptr->getType()); 947 if ((Ptr->isNullValue() || BasePtr != 0) && 948 !DL.isNonIntegralPointerType(PTy)) { 949 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr); 950 return ConstantExpr::getIntToPtr(C, ResTy); 951 } 952 953 // Otherwise form a regular getelementptr. Recompute the indices so that 954 // we eliminate over-indexing of the notional static type array bounds. 955 // This makes it easy to determine if the getelementptr is "inbounds". 956 // Also, this helps GlobalOpt do SROA on GlobalVariables. 957 958 // For GEPs of GlobalValues, use the value type even for opaque pointers. 959 // Otherwise use an i8 GEP. 960 if (auto *GV = dyn_cast<GlobalValue>(Ptr)) 961 SrcElemTy = GV->getValueType(); 962 else if (!PTy->isOpaque()) 963 SrcElemTy = PTy->getNonOpaquePointerElementType(); 964 else 965 SrcElemTy = Type::getInt8Ty(Ptr->getContext()); 966 967 if (!SrcElemTy->isSized()) 968 return nullptr; 969 970 Type *ElemTy = SrcElemTy; 971 SmallVector<APInt> Indices = DL.getGEPIndicesForOffset(ElemTy, Offset); 972 if (Offset != 0) 973 return nullptr; 974 975 // Try to add additional zero indices to reach the desired result element 976 // type. 977 // TODO: Should we avoid extra zero indices if ResElemTy can't be reached and 978 // we'll have to insert a bitcast anyway? 979 while (ElemTy != ResElemTy) { 980 Type *NextTy = GetElementPtrInst::getTypeAtIndex(ElemTy, (uint64_t)0); 981 if (!NextTy) 982 break; 983 984 Indices.push_back(APInt::getZero(isa<StructType>(ElemTy) ? 32 : BitWidth)); 985 ElemTy = NextTy; 986 } 987 988 SmallVector<Constant *, 32> NewIdxs; 989 for (const APInt &Index : Indices) 990 NewIdxs.push_back(ConstantInt::get( 991 Type::getIntNTy(Ptr->getContext(), Index.getBitWidth()), Index)); 992 993 // Preserve the inrange index from the innermost GEP if possible. We must 994 // have calculated the same indices up to and including the inrange index. 995 Optional<unsigned> InRangeIndex; 996 if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex()) 997 if (SrcElemTy == InnermostGEP->getSourceElementType() && 998 NewIdxs.size() > *LastIRIndex) { 999 InRangeIndex = LastIRIndex; 1000 for (unsigned I = 0; I <= *LastIRIndex; ++I) 1001 if (NewIdxs[I] != InnermostGEP->getOperand(I + 1)) 1002 return nullptr; 1003 } 1004 1005 // Create a GEP. 1006 Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs, 1007 InBounds, InRangeIndex); 1008 assert( 1009 cast<PointerType>(C->getType())->isOpaqueOrPointeeTypeMatches(ElemTy) && 1010 "Computed GetElementPtr has unexpected type!"); 1011 1012 // If we ended up indexing a member with a type that doesn't match 1013 // the type of what the original indices indexed, add a cast. 1014 if (C->getType() != ResTy) 1015 C = FoldBitCast(C, ResTy, DL); 1016 1017 return C; 1018 } 1019 1020 /// Attempt to constant fold an instruction with the 1021 /// specified opcode and operands. If successful, the constant result is 1022 /// returned, if not, null is returned. Note that this function can fail when 1023 /// attempting to fold instructions like loads and stores, which have no 1024 /// constant expression form. 1025 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode, 1026 ArrayRef<Constant *> Ops, 1027 const DataLayout &DL, 1028 const TargetLibraryInfo *TLI) { 1029 Type *DestTy = InstOrCE->getType(); 1030 1031 if (Instruction::isUnaryOp(Opcode)) 1032 return ConstantFoldUnaryOpOperand(Opcode, Ops[0], DL); 1033 1034 if (Instruction::isBinaryOp(Opcode)) { 1035 switch (Opcode) { 1036 default: 1037 break; 1038 case Instruction::FAdd: 1039 case Instruction::FSub: 1040 case Instruction::FMul: 1041 case Instruction::FDiv: 1042 case Instruction::FRem: 1043 // Handle floating point instructions separately to account for denormals 1044 // TODO: If a constant expression is being folded rather than an 1045 // instruction, denormals will not be flushed/treated as zero 1046 if (const auto *I = dyn_cast<Instruction>(InstOrCE)) { 1047 return ConstantFoldFPInstOperands(Opcode, Ops[0], Ops[1], DL, I); 1048 } 1049 } 1050 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL); 1051 } 1052 1053 if (Instruction::isCast(Opcode)) 1054 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL); 1055 1056 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) { 1057 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI)) 1058 return C; 1059 1060 return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0], 1061 Ops.slice(1), GEP->isInBounds(), 1062 GEP->getInRangeIndex()); 1063 } 1064 1065 if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE)) { 1066 if (CE->isCompare()) 1067 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1], 1068 DL, TLI); 1069 return CE->getWithOperands(Ops); 1070 } 1071 1072 switch (Opcode) { 1073 default: return nullptr; 1074 case Instruction::ICmp: 1075 case Instruction::FCmp: { 1076 auto *C = cast<CmpInst>(InstOrCE); 1077 return ConstantFoldCompareInstOperands(C->getPredicate(), Ops[0], Ops[1], 1078 DL, TLI, C); 1079 } 1080 case Instruction::Freeze: 1081 return isGuaranteedNotToBeUndefOrPoison(Ops[0]) ? Ops[0] : nullptr; 1082 case Instruction::Call: 1083 if (auto *F = dyn_cast<Function>(Ops.back())) { 1084 const auto *Call = cast<CallBase>(InstOrCE); 1085 if (canConstantFoldCallTo(Call, F)) 1086 return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI); 1087 } 1088 return nullptr; 1089 case Instruction::Select: 1090 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]); 1091 case Instruction::ExtractElement: 1092 return ConstantExpr::getExtractElement(Ops[0], Ops[1]); 1093 case Instruction::ExtractValue: 1094 return ConstantFoldExtractValueInstruction( 1095 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices()); 1096 case Instruction::InsertElement: 1097 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]); 1098 case Instruction::InsertValue: 1099 return ConstantFoldInsertValueInstruction( 1100 Ops[0], Ops[1], cast<InsertValueInst>(InstOrCE)->getIndices()); 1101 case Instruction::ShuffleVector: 1102 return ConstantExpr::getShuffleVector( 1103 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask()); 1104 case Instruction::Load: { 1105 const auto *LI = dyn_cast<LoadInst>(InstOrCE); 1106 if (LI->isVolatile()) 1107 return nullptr; 1108 return ConstantFoldLoadFromConstPtr(Ops[0], LI->getType(), DL); 1109 } 1110 } 1111 } 1112 1113 } // end anonymous namespace 1114 1115 //===----------------------------------------------------------------------===// 1116 // Constant Folding public APIs 1117 //===----------------------------------------------------------------------===// 1118 1119 namespace { 1120 1121 Constant * 1122 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL, 1123 const TargetLibraryInfo *TLI, 1124 SmallDenseMap<Constant *, Constant *> &FoldedOps) { 1125 if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C)) 1126 return const_cast<Constant *>(C); 1127 1128 SmallVector<Constant *, 8> Ops; 1129 for (const Use &OldU : C->operands()) { 1130 Constant *OldC = cast<Constant>(&OldU); 1131 Constant *NewC = OldC; 1132 // Recursively fold the ConstantExpr's operands. If we have already folded 1133 // a ConstantExpr, we don't have to process it again. 1134 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) { 1135 auto It = FoldedOps.find(OldC); 1136 if (It == FoldedOps.end()) { 1137 NewC = ConstantFoldConstantImpl(OldC, DL, TLI, FoldedOps); 1138 FoldedOps.insert({OldC, NewC}); 1139 } else { 1140 NewC = It->second; 1141 } 1142 } 1143 Ops.push_back(NewC); 1144 } 1145 1146 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 1147 if (Constant *Res = 1148 ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI)) 1149 return Res; 1150 return const_cast<Constant *>(C); 1151 } 1152 1153 assert(isa<ConstantVector>(C)); 1154 return ConstantVector::get(Ops); 1155 } 1156 1157 } // end anonymous namespace 1158 1159 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL, 1160 const TargetLibraryInfo *TLI) { 1161 // Handle PHI nodes quickly here... 1162 if (auto *PN = dyn_cast<PHINode>(I)) { 1163 Constant *CommonValue = nullptr; 1164 1165 SmallDenseMap<Constant *, Constant *> FoldedOps; 1166 for (Value *Incoming : PN->incoming_values()) { 1167 // If the incoming value is undef then skip it. Note that while we could 1168 // skip the value if it is equal to the phi node itself we choose not to 1169 // because that would break the rule that constant folding only applies if 1170 // all operands are constants. 1171 if (isa<UndefValue>(Incoming)) 1172 continue; 1173 // If the incoming value is not a constant, then give up. 1174 auto *C = dyn_cast<Constant>(Incoming); 1175 if (!C) 1176 return nullptr; 1177 // Fold the PHI's operands. 1178 C = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps); 1179 // If the incoming value is a different constant to 1180 // the one we saw previously, then give up. 1181 if (CommonValue && C != CommonValue) 1182 return nullptr; 1183 CommonValue = C; 1184 } 1185 1186 // If we reach here, all incoming values are the same constant or undef. 1187 return CommonValue ? CommonValue : UndefValue::get(PN->getType()); 1188 } 1189 1190 // Scan the operand list, checking to see if they are all constants, if so, 1191 // hand off to ConstantFoldInstOperandsImpl. 1192 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); })) 1193 return nullptr; 1194 1195 SmallDenseMap<Constant *, Constant *> FoldedOps; 1196 SmallVector<Constant *, 8> Ops; 1197 for (const Use &OpU : I->operands()) { 1198 auto *Op = cast<Constant>(&OpU); 1199 // Fold the Instruction's operands. 1200 Op = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps); 1201 Ops.push_back(Op); 1202 } 1203 1204 return ConstantFoldInstOperands(I, Ops, DL, TLI); 1205 } 1206 1207 Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL, 1208 const TargetLibraryInfo *TLI) { 1209 SmallDenseMap<Constant *, Constant *> FoldedOps; 1210 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps); 1211 } 1212 1213 Constant *llvm::ConstantFoldInstOperands(Instruction *I, 1214 ArrayRef<Constant *> Ops, 1215 const DataLayout &DL, 1216 const TargetLibraryInfo *TLI) { 1217 return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI); 1218 } 1219 1220 Constant *llvm::ConstantFoldCompareInstOperands( 1221 unsigned IntPredicate, Constant *Ops0, Constant *Ops1, const DataLayout &DL, 1222 const TargetLibraryInfo *TLI, const Instruction *I) { 1223 CmpInst::Predicate Predicate = (CmpInst::Predicate)IntPredicate; 1224 // fold: icmp (inttoptr x), null -> icmp x, 0 1225 // fold: icmp null, (inttoptr x) -> icmp 0, x 1226 // fold: icmp (ptrtoint x), 0 -> icmp x, null 1227 // fold: icmp 0, (ptrtoint x) -> icmp null, x 1228 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y 1229 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y 1230 // 1231 // FIXME: The following comment is out of data and the DataLayout is here now. 1232 // ConstantExpr::getCompare cannot do this, because it doesn't have DL 1233 // around to know if bit truncation is happening. 1234 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) { 1235 if (Ops1->isNullValue()) { 1236 if (CE0->getOpcode() == Instruction::IntToPtr) { 1237 Type *IntPtrTy = DL.getIntPtrType(CE0->getType()); 1238 // Convert the integer value to the right size to ensure we get the 1239 // proper extension or truncation. 1240 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1241 IntPtrTy, false); 1242 Constant *Null = Constant::getNullValue(C->getType()); 1243 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI); 1244 } 1245 1246 // Only do this transformation if the int is intptrty in size, otherwise 1247 // there is a truncation or extension that we aren't modeling. 1248 if (CE0->getOpcode() == Instruction::PtrToInt) { 1249 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType()); 1250 if (CE0->getType() == IntPtrTy) { 1251 Constant *C = CE0->getOperand(0); 1252 Constant *Null = Constant::getNullValue(C->getType()); 1253 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI); 1254 } 1255 } 1256 } 1257 1258 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) { 1259 if (CE0->getOpcode() == CE1->getOpcode()) { 1260 if (CE0->getOpcode() == Instruction::IntToPtr) { 1261 Type *IntPtrTy = DL.getIntPtrType(CE0->getType()); 1262 1263 // Convert the integer value to the right size to ensure we get the 1264 // proper extension or truncation. 1265 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1266 IntPtrTy, false); 1267 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0), 1268 IntPtrTy, false); 1269 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI); 1270 } 1271 1272 // Only do this transformation if the int is intptrty in size, otherwise 1273 // there is a truncation or extension that we aren't modeling. 1274 if (CE0->getOpcode() == Instruction::PtrToInt) { 1275 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType()); 1276 if (CE0->getType() == IntPtrTy && 1277 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) { 1278 return ConstantFoldCompareInstOperands( 1279 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI); 1280 } 1281 } 1282 } 1283 } 1284 1285 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0) 1286 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0) 1287 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) && 1288 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) { 1289 Constant *LHS = ConstantFoldCompareInstOperands( 1290 Predicate, CE0->getOperand(0), Ops1, DL, TLI); 1291 Constant *RHS = ConstantFoldCompareInstOperands( 1292 Predicate, CE0->getOperand(1), Ops1, DL, TLI); 1293 unsigned OpC = 1294 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; 1295 return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL); 1296 } 1297 1298 // Convert pointer comparison (base+offset1) pred (base+offset2) into 1299 // offset1 pred offset2, for the case where the offset is inbounds. This 1300 // only works for equality and unsigned comparison, as inbounds permits 1301 // crossing the sign boundary. However, the offset comparison itself is 1302 // signed. 1303 if (Ops0->getType()->isPointerTy() && !ICmpInst::isSigned(Predicate)) { 1304 unsigned IndexWidth = DL.getIndexTypeSizeInBits(Ops0->getType()); 1305 APInt Offset0(IndexWidth, 0); 1306 Value *Stripped0 = 1307 Ops0->stripAndAccumulateInBoundsConstantOffsets(DL, Offset0); 1308 APInt Offset1(IndexWidth, 0); 1309 Value *Stripped1 = 1310 Ops1->stripAndAccumulateInBoundsConstantOffsets(DL, Offset1); 1311 if (Stripped0 == Stripped1) 1312 return ConstantExpr::getCompare( 1313 ICmpInst::getSignedPredicate(Predicate), 1314 ConstantInt::get(CE0->getContext(), Offset0), 1315 ConstantInt::get(CE0->getContext(), Offset1)); 1316 } 1317 } else if (isa<ConstantExpr>(Ops1)) { 1318 // If RHS is a constant expression, but the left side isn't, swap the 1319 // operands and try again. 1320 Predicate = ICmpInst::getSwappedPredicate(Predicate); 1321 return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI); 1322 } 1323 1324 // Flush any denormal constant float input according to denormal handling 1325 // mode. 1326 Ops0 = FlushFPConstant(Ops0, I, /* IsOutput */ false); 1327 Ops1 = FlushFPConstant(Ops1, I, /* IsOutput */ false); 1328 1329 return ConstantExpr::getCompare(Predicate, Ops0, Ops1); 1330 } 1331 1332 Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, 1333 const DataLayout &DL) { 1334 assert(Instruction::isUnaryOp(Opcode)); 1335 1336 return ConstantExpr::get(Opcode, Op); 1337 } 1338 1339 Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, 1340 Constant *RHS, 1341 const DataLayout &DL) { 1342 assert(Instruction::isBinaryOp(Opcode)); 1343 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS)) 1344 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL)) 1345 return C; 1346 1347 if (ConstantExpr::isDesirableBinOp(Opcode)) 1348 return ConstantExpr::get(Opcode, LHS, RHS); 1349 return ConstantFoldBinaryInstruction(Opcode, LHS, RHS); 1350 } 1351 1352 Constant *llvm::FlushFPConstant(Constant *Operand, const Instruction *I, 1353 bool IsOutput) { 1354 if (!I || !I->getParent() || !I->getFunction()) 1355 return Operand; 1356 1357 ConstantFP *CFP = dyn_cast<ConstantFP>(Operand); 1358 if (!CFP) 1359 return Operand; 1360 1361 const APFloat &APF = CFP->getValueAPF(); 1362 Type *Ty = CFP->getType(); 1363 DenormalMode DenormMode = 1364 I->getFunction()->getDenormalMode(Ty->getFltSemantics()); 1365 DenormalMode::DenormalModeKind Mode = 1366 IsOutput ? DenormMode.Output : DenormMode.Input; 1367 switch (Mode) { 1368 default: 1369 llvm_unreachable("unknown denormal mode"); 1370 return Operand; 1371 case DenormalMode::IEEE: 1372 return Operand; 1373 case DenormalMode::PreserveSign: 1374 if (APF.isDenormal()) { 1375 return ConstantFP::get( 1376 Ty->getContext(), 1377 APFloat::getZero(Ty->getFltSemantics(), APF.isNegative())); 1378 } 1379 return Operand; 1380 case DenormalMode::PositiveZero: 1381 if (APF.isDenormal()) { 1382 return ConstantFP::get(Ty->getContext(), 1383 APFloat::getZero(Ty->getFltSemantics(), false)); 1384 } 1385 return Operand; 1386 } 1387 return Operand; 1388 } 1389 1390 Constant *llvm::ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, 1391 Constant *RHS, const DataLayout &DL, 1392 const Instruction *I) { 1393 if (Instruction::isBinaryOp(Opcode)) { 1394 // Flush denormal inputs if needed. 1395 Constant *Op0 = FlushFPConstant(LHS, I, /* IsOutput */ false); 1396 Constant *Op1 = FlushFPConstant(RHS, I, /* IsOutput */ false); 1397 1398 // Calculate constant result. 1399 Constant *C = ConstantFoldBinaryOpOperands(Opcode, Op0, Op1, DL); 1400 if (!C) 1401 return nullptr; 1402 1403 // Flush denormal output if needed. 1404 return FlushFPConstant(C, I, /* IsOutput */ true); 1405 } 1406 // If instruction lacks a parent/function and the denormal mode cannot be 1407 // determined, use the default (IEEE). 1408 return ConstantFoldBinaryOpOperands(Opcode, LHS, RHS, DL); 1409 } 1410 1411 Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C, 1412 Type *DestTy, const DataLayout &DL) { 1413 assert(Instruction::isCast(Opcode)); 1414 switch (Opcode) { 1415 default: 1416 llvm_unreachable("Missing case"); 1417 case Instruction::PtrToInt: 1418 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 1419 Constant *FoldedValue = nullptr; 1420 // If the input is a inttoptr, eliminate the pair. This requires knowing 1421 // the width of a pointer, so it can't be done in ConstantExpr::getCast. 1422 if (CE->getOpcode() == Instruction::IntToPtr) { 1423 // zext/trunc the inttoptr to pointer size. 1424 FoldedValue = ConstantExpr::getIntegerCast( 1425 CE->getOperand(0), DL.getIntPtrType(CE->getType()), 1426 /*IsSigned=*/false); 1427 } else if (auto *GEP = dyn_cast<GEPOperator>(CE)) { 1428 // If we have GEP, we can perform the following folds: 1429 // (ptrtoint (gep null, x)) -> x 1430 // (ptrtoint (gep (gep null, x), y) -> x + y, etc. 1431 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType()); 1432 APInt BaseOffset(BitWidth, 0); 1433 auto *Base = cast<Constant>(GEP->stripAndAccumulateConstantOffsets( 1434 DL, BaseOffset, /*AllowNonInbounds=*/true)); 1435 if (Base->isNullValue()) { 1436 FoldedValue = ConstantInt::get(CE->getContext(), BaseOffset); 1437 } else { 1438 // ptrtoint (gep i8, Ptr, (sub 0, V)) -> sub (ptrtoint Ptr), V 1439 if (GEP->getNumIndices() == 1 && 1440 GEP->getSourceElementType()->isIntegerTy(8)) { 1441 auto *Ptr = cast<Constant>(GEP->getPointerOperand()); 1442 auto *Sub = dyn_cast<ConstantExpr>(GEP->getOperand(1)); 1443 Type *IntIdxTy = DL.getIndexType(Ptr->getType()); 1444 if (Sub && Sub->getType() == IntIdxTy && 1445 Sub->getOpcode() == Instruction::Sub && 1446 Sub->getOperand(0)->isNullValue()) 1447 FoldedValue = ConstantExpr::getSub( 1448 ConstantExpr::getPtrToInt(Ptr, IntIdxTy), Sub->getOperand(1)); 1449 } 1450 } 1451 } 1452 if (FoldedValue) { 1453 // Do a zext or trunc to get to the ptrtoint dest size. 1454 return ConstantExpr::getIntegerCast(FoldedValue, DestTy, 1455 /*IsSigned=*/false); 1456 } 1457 } 1458 return ConstantExpr::getCast(Opcode, C, DestTy); 1459 case Instruction::IntToPtr: 1460 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if 1461 // the int size is >= the ptr size and the address spaces are the same. 1462 // This requires knowing the width of a pointer, so it can't be done in 1463 // ConstantExpr::getCast. 1464 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 1465 if (CE->getOpcode() == Instruction::PtrToInt) { 1466 Constant *SrcPtr = CE->getOperand(0); 1467 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType()); 1468 unsigned MidIntSize = CE->getType()->getScalarSizeInBits(); 1469 1470 if (MidIntSize >= SrcPtrSize) { 1471 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace(); 1472 if (SrcAS == DestTy->getPointerAddressSpace()) 1473 return FoldBitCast(CE->getOperand(0), DestTy, DL); 1474 } 1475 } 1476 } 1477 1478 return ConstantExpr::getCast(Opcode, C, DestTy); 1479 case Instruction::Trunc: 1480 case Instruction::ZExt: 1481 case Instruction::SExt: 1482 case Instruction::FPTrunc: 1483 case Instruction::FPExt: 1484 case Instruction::UIToFP: 1485 case Instruction::SIToFP: 1486 case Instruction::FPToUI: 1487 case Instruction::FPToSI: 1488 case Instruction::AddrSpaceCast: 1489 return ConstantExpr::getCast(Opcode, C, DestTy); 1490 case Instruction::BitCast: 1491 return FoldBitCast(C, DestTy, DL); 1492 } 1493 } 1494 1495 //===----------------------------------------------------------------------===// 1496 // Constant Folding for Calls 1497 // 1498 1499 bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) { 1500 if (Call->isNoBuiltin()) 1501 return false; 1502 if (Call->getFunctionType() != F->getFunctionType()) 1503 return false; 1504 switch (F->getIntrinsicID()) { 1505 // Operations that do not operate floating-point numbers and do not depend on 1506 // FP environment can be folded even in strictfp functions. 1507 case Intrinsic::bswap: 1508 case Intrinsic::ctpop: 1509 case Intrinsic::ctlz: 1510 case Intrinsic::cttz: 1511 case Intrinsic::fshl: 1512 case Intrinsic::fshr: 1513 case Intrinsic::launder_invariant_group: 1514 case Intrinsic::strip_invariant_group: 1515 case Intrinsic::masked_load: 1516 case Intrinsic::get_active_lane_mask: 1517 case Intrinsic::abs: 1518 case Intrinsic::smax: 1519 case Intrinsic::smin: 1520 case Intrinsic::umax: 1521 case Intrinsic::umin: 1522 case Intrinsic::sadd_with_overflow: 1523 case Intrinsic::uadd_with_overflow: 1524 case Intrinsic::ssub_with_overflow: 1525 case Intrinsic::usub_with_overflow: 1526 case Intrinsic::smul_with_overflow: 1527 case Intrinsic::umul_with_overflow: 1528 case Intrinsic::sadd_sat: 1529 case Intrinsic::uadd_sat: 1530 case Intrinsic::ssub_sat: 1531 case Intrinsic::usub_sat: 1532 case Intrinsic::smul_fix: 1533 case Intrinsic::smul_fix_sat: 1534 case Intrinsic::bitreverse: 1535 case Intrinsic::is_constant: 1536 case Intrinsic::vector_reduce_add: 1537 case Intrinsic::vector_reduce_mul: 1538 case Intrinsic::vector_reduce_and: 1539 case Intrinsic::vector_reduce_or: 1540 case Intrinsic::vector_reduce_xor: 1541 case Intrinsic::vector_reduce_smin: 1542 case Intrinsic::vector_reduce_smax: 1543 case Intrinsic::vector_reduce_umin: 1544 case Intrinsic::vector_reduce_umax: 1545 // Target intrinsics 1546 case Intrinsic::amdgcn_perm: 1547 case Intrinsic::arm_mve_vctp8: 1548 case Intrinsic::arm_mve_vctp16: 1549 case Intrinsic::arm_mve_vctp32: 1550 case Intrinsic::arm_mve_vctp64: 1551 case Intrinsic::aarch64_sve_convert_from_svbool: 1552 // WebAssembly float semantics are always known 1553 case Intrinsic::wasm_trunc_signed: 1554 case Intrinsic::wasm_trunc_unsigned: 1555 return true; 1556 1557 // Floating point operations cannot be folded in strictfp functions in 1558 // general case. They can be folded if FP environment is known to compiler. 1559 case Intrinsic::minnum: 1560 case Intrinsic::maxnum: 1561 case Intrinsic::minimum: 1562 case Intrinsic::maximum: 1563 case Intrinsic::log: 1564 case Intrinsic::log2: 1565 case Intrinsic::log10: 1566 case Intrinsic::exp: 1567 case Intrinsic::exp2: 1568 case Intrinsic::sqrt: 1569 case Intrinsic::sin: 1570 case Intrinsic::cos: 1571 case Intrinsic::pow: 1572 case Intrinsic::powi: 1573 case Intrinsic::fma: 1574 case Intrinsic::fmuladd: 1575 case Intrinsic::fptoui_sat: 1576 case Intrinsic::fptosi_sat: 1577 case Intrinsic::convert_from_fp16: 1578 case Intrinsic::convert_to_fp16: 1579 case Intrinsic::amdgcn_cos: 1580 case Intrinsic::amdgcn_cubeid: 1581 case Intrinsic::amdgcn_cubema: 1582 case Intrinsic::amdgcn_cubesc: 1583 case Intrinsic::amdgcn_cubetc: 1584 case Intrinsic::amdgcn_fmul_legacy: 1585 case Intrinsic::amdgcn_fma_legacy: 1586 case Intrinsic::amdgcn_fract: 1587 case Intrinsic::amdgcn_ldexp: 1588 case Intrinsic::amdgcn_sin: 1589 // The intrinsics below depend on rounding mode in MXCSR. 1590 case Intrinsic::x86_sse_cvtss2si: 1591 case Intrinsic::x86_sse_cvtss2si64: 1592 case Intrinsic::x86_sse_cvttss2si: 1593 case Intrinsic::x86_sse_cvttss2si64: 1594 case Intrinsic::x86_sse2_cvtsd2si: 1595 case Intrinsic::x86_sse2_cvtsd2si64: 1596 case Intrinsic::x86_sse2_cvttsd2si: 1597 case Intrinsic::x86_sse2_cvttsd2si64: 1598 case Intrinsic::x86_avx512_vcvtss2si32: 1599 case Intrinsic::x86_avx512_vcvtss2si64: 1600 case Intrinsic::x86_avx512_cvttss2si: 1601 case Intrinsic::x86_avx512_cvttss2si64: 1602 case Intrinsic::x86_avx512_vcvtsd2si32: 1603 case Intrinsic::x86_avx512_vcvtsd2si64: 1604 case Intrinsic::x86_avx512_cvttsd2si: 1605 case Intrinsic::x86_avx512_cvttsd2si64: 1606 case Intrinsic::x86_avx512_vcvtss2usi32: 1607 case Intrinsic::x86_avx512_vcvtss2usi64: 1608 case Intrinsic::x86_avx512_cvttss2usi: 1609 case Intrinsic::x86_avx512_cvttss2usi64: 1610 case Intrinsic::x86_avx512_vcvtsd2usi32: 1611 case Intrinsic::x86_avx512_vcvtsd2usi64: 1612 case Intrinsic::x86_avx512_cvttsd2usi: 1613 case Intrinsic::x86_avx512_cvttsd2usi64: 1614 return !Call->isStrictFP(); 1615 1616 // Sign operations are actually bitwise operations, they do not raise 1617 // exceptions even for SNANs. 1618 case Intrinsic::fabs: 1619 case Intrinsic::copysign: 1620 // Non-constrained variants of rounding operations means default FP 1621 // environment, they can be folded in any case. 1622 case Intrinsic::ceil: 1623 case Intrinsic::floor: 1624 case Intrinsic::round: 1625 case Intrinsic::roundeven: 1626 case Intrinsic::trunc: 1627 case Intrinsic::nearbyint: 1628 case Intrinsic::rint: 1629 // Constrained intrinsics can be folded if FP environment is known 1630 // to compiler. 1631 case Intrinsic::experimental_constrained_fma: 1632 case Intrinsic::experimental_constrained_fmuladd: 1633 case Intrinsic::experimental_constrained_fadd: 1634 case Intrinsic::experimental_constrained_fsub: 1635 case Intrinsic::experimental_constrained_fmul: 1636 case Intrinsic::experimental_constrained_fdiv: 1637 case Intrinsic::experimental_constrained_frem: 1638 case Intrinsic::experimental_constrained_ceil: 1639 case Intrinsic::experimental_constrained_floor: 1640 case Intrinsic::experimental_constrained_round: 1641 case Intrinsic::experimental_constrained_roundeven: 1642 case Intrinsic::experimental_constrained_trunc: 1643 case Intrinsic::experimental_constrained_nearbyint: 1644 case Intrinsic::experimental_constrained_rint: 1645 case Intrinsic::experimental_constrained_fcmp: 1646 case Intrinsic::experimental_constrained_fcmps: 1647 return true; 1648 default: 1649 return false; 1650 case Intrinsic::not_intrinsic: break; 1651 } 1652 1653 if (!F->hasName() || Call->isStrictFP()) 1654 return false; 1655 1656 // In these cases, the check of the length is required. We don't want to 1657 // return true for a name like "cos\0blah" which strcmp would return equal to 1658 // "cos", but has length 8. 1659 StringRef Name = F->getName(); 1660 switch (Name[0]) { 1661 default: 1662 return false; 1663 case 'a': 1664 return Name == "acos" || Name == "acosf" || 1665 Name == "asin" || Name == "asinf" || 1666 Name == "atan" || Name == "atanf" || 1667 Name == "atan2" || Name == "atan2f"; 1668 case 'c': 1669 return Name == "ceil" || Name == "ceilf" || 1670 Name == "cos" || Name == "cosf" || 1671 Name == "cosh" || Name == "coshf"; 1672 case 'e': 1673 return Name == "exp" || Name == "expf" || 1674 Name == "exp2" || Name == "exp2f"; 1675 case 'f': 1676 return Name == "fabs" || Name == "fabsf" || 1677 Name == "floor" || Name == "floorf" || 1678 Name == "fmod" || Name == "fmodf"; 1679 case 'l': 1680 return Name == "log" || Name == "logf" || 1681 Name == "log2" || Name == "log2f" || 1682 Name == "log10" || Name == "log10f"; 1683 case 'n': 1684 return Name == "nearbyint" || Name == "nearbyintf"; 1685 case 'p': 1686 return Name == "pow" || Name == "powf"; 1687 case 'r': 1688 return Name == "remainder" || Name == "remainderf" || 1689 Name == "rint" || Name == "rintf" || 1690 Name == "round" || Name == "roundf"; 1691 case 's': 1692 return Name == "sin" || Name == "sinf" || 1693 Name == "sinh" || Name == "sinhf" || 1694 Name == "sqrt" || Name == "sqrtf"; 1695 case 't': 1696 return Name == "tan" || Name == "tanf" || 1697 Name == "tanh" || Name == "tanhf" || 1698 Name == "trunc" || Name == "truncf"; 1699 case '_': 1700 // Check for various function names that get used for the math functions 1701 // when the header files are preprocessed with the macro 1702 // __FINITE_MATH_ONLY__ enabled. 1703 // The '12' here is the length of the shortest name that can match. 1704 // We need to check the size before looking at Name[1] and Name[2] 1705 // so we may as well check a limit that will eliminate mismatches. 1706 if (Name.size() < 12 || Name[1] != '_') 1707 return false; 1708 switch (Name[2]) { 1709 default: 1710 return false; 1711 case 'a': 1712 return Name == "__acos_finite" || Name == "__acosf_finite" || 1713 Name == "__asin_finite" || Name == "__asinf_finite" || 1714 Name == "__atan2_finite" || Name == "__atan2f_finite"; 1715 case 'c': 1716 return Name == "__cosh_finite" || Name == "__coshf_finite"; 1717 case 'e': 1718 return Name == "__exp_finite" || Name == "__expf_finite" || 1719 Name == "__exp2_finite" || Name == "__exp2f_finite"; 1720 case 'l': 1721 return Name == "__log_finite" || Name == "__logf_finite" || 1722 Name == "__log10_finite" || Name == "__log10f_finite"; 1723 case 'p': 1724 return Name == "__pow_finite" || Name == "__powf_finite"; 1725 case 's': 1726 return Name == "__sinh_finite" || Name == "__sinhf_finite"; 1727 } 1728 } 1729 } 1730 1731 namespace { 1732 1733 Constant *GetConstantFoldFPValue(double V, Type *Ty) { 1734 if (Ty->isHalfTy() || Ty->isFloatTy()) { 1735 APFloat APF(V); 1736 bool unused; 1737 APF.convert(Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &unused); 1738 return ConstantFP::get(Ty->getContext(), APF); 1739 } 1740 if (Ty->isDoubleTy()) 1741 return ConstantFP::get(Ty->getContext(), APFloat(V)); 1742 llvm_unreachable("Can only constant fold half/float/double"); 1743 } 1744 1745 /// Clear the floating-point exception state. 1746 inline void llvm_fenv_clearexcept() { 1747 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT 1748 feclearexcept(FE_ALL_EXCEPT); 1749 #endif 1750 errno = 0; 1751 } 1752 1753 /// Test if a floating-point exception was raised. 1754 inline bool llvm_fenv_testexcept() { 1755 int errno_val = errno; 1756 if (errno_val == ERANGE || errno_val == EDOM) 1757 return true; 1758 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT 1759 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT)) 1760 return true; 1761 #endif 1762 return false; 1763 } 1764 1765 Constant *ConstantFoldFP(double (*NativeFP)(double), const APFloat &V, 1766 Type *Ty) { 1767 llvm_fenv_clearexcept(); 1768 double Result = NativeFP(V.convertToDouble()); 1769 if (llvm_fenv_testexcept()) { 1770 llvm_fenv_clearexcept(); 1771 return nullptr; 1772 } 1773 1774 return GetConstantFoldFPValue(Result, Ty); 1775 } 1776 1777 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), 1778 const APFloat &V, const APFloat &W, Type *Ty) { 1779 llvm_fenv_clearexcept(); 1780 double Result = NativeFP(V.convertToDouble(), W.convertToDouble()); 1781 if (llvm_fenv_testexcept()) { 1782 llvm_fenv_clearexcept(); 1783 return nullptr; 1784 } 1785 1786 return GetConstantFoldFPValue(Result, Ty); 1787 } 1788 1789 Constant *constantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) { 1790 FixedVectorType *VT = dyn_cast<FixedVectorType>(Op->getType()); 1791 if (!VT) 1792 return nullptr; 1793 1794 // This isn't strictly necessary, but handle the special/common case of zero: 1795 // all integer reductions of a zero input produce zero. 1796 if (isa<ConstantAggregateZero>(Op)) 1797 return ConstantInt::get(VT->getElementType(), 0); 1798 1799 // This is the same as the underlying binops - poison propagates. 1800 if (isa<PoisonValue>(Op) || Op->containsPoisonElement()) 1801 return PoisonValue::get(VT->getElementType()); 1802 1803 // TODO: Handle undef. 1804 if (!isa<ConstantVector>(Op) && !isa<ConstantDataVector>(Op)) 1805 return nullptr; 1806 1807 auto *EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(0U)); 1808 if (!EltC) 1809 return nullptr; 1810 1811 APInt Acc = EltC->getValue(); 1812 for (unsigned I = 1, E = VT->getNumElements(); I != E; I++) { 1813 if (!(EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(I)))) 1814 return nullptr; 1815 const APInt &X = EltC->getValue(); 1816 switch (IID) { 1817 case Intrinsic::vector_reduce_add: 1818 Acc = Acc + X; 1819 break; 1820 case Intrinsic::vector_reduce_mul: 1821 Acc = Acc * X; 1822 break; 1823 case Intrinsic::vector_reduce_and: 1824 Acc = Acc & X; 1825 break; 1826 case Intrinsic::vector_reduce_or: 1827 Acc = Acc | X; 1828 break; 1829 case Intrinsic::vector_reduce_xor: 1830 Acc = Acc ^ X; 1831 break; 1832 case Intrinsic::vector_reduce_smin: 1833 Acc = APIntOps::smin(Acc, X); 1834 break; 1835 case Intrinsic::vector_reduce_smax: 1836 Acc = APIntOps::smax(Acc, X); 1837 break; 1838 case Intrinsic::vector_reduce_umin: 1839 Acc = APIntOps::umin(Acc, X); 1840 break; 1841 case Intrinsic::vector_reduce_umax: 1842 Acc = APIntOps::umax(Acc, X); 1843 break; 1844 } 1845 } 1846 1847 return ConstantInt::get(Op->getContext(), Acc); 1848 } 1849 1850 /// Attempt to fold an SSE floating point to integer conversion of a constant 1851 /// floating point. If roundTowardZero is false, the default IEEE rounding is 1852 /// used (toward nearest, ties to even). This matches the behavior of the 1853 /// non-truncating SSE instructions in the default rounding mode. The desired 1854 /// integer type Ty is used to select how many bits are available for the 1855 /// result. Returns null if the conversion cannot be performed, otherwise 1856 /// returns the Constant value resulting from the conversion. 1857 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero, 1858 Type *Ty, bool IsSigned) { 1859 // All of these conversion intrinsics form an integer of at most 64bits. 1860 unsigned ResultWidth = Ty->getIntegerBitWidth(); 1861 assert(ResultWidth <= 64 && 1862 "Can only constant fold conversions to 64 and 32 bit ints"); 1863 1864 uint64_t UIntVal; 1865 bool isExact = false; 1866 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero 1867 : APFloat::rmNearestTiesToEven; 1868 APFloat::opStatus status = 1869 Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth, 1870 IsSigned, mode, &isExact); 1871 if (status != APFloat::opOK && 1872 (!roundTowardZero || status != APFloat::opInexact)) 1873 return nullptr; 1874 return ConstantInt::get(Ty, UIntVal, IsSigned); 1875 } 1876 1877 double getValueAsDouble(ConstantFP *Op) { 1878 Type *Ty = Op->getType(); 1879 1880 if (Ty->isBFloatTy() || Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy()) 1881 return Op->getValueAPF().convertToDouble(); 1882 1883 bool unused; 1884 APFloat APF = Op->getValueAPF(); 1885 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused); 1886 return APF.convertToDouble(); 1887 } 1888 1889 static bool getConstIntOrUndef(Value *Op, const APInt *&C) { 1890 if (auto *CI = dyn_cast<ConstantInt>(Op)) { 1891 C = &CI->getValue(); 1892 return true; 1893 } 1894 if (isa<UndefValue>(Op)) { 1895 C = nullptr; 1896 return true; 1897 } 1898 return false; 1899 } 1900 1901 /// Checks if the given intrinsic call, which evaluates to constant, is allowed 1902 /// to be folded. 1903 /// 1904 /// \param CI Constrained intrinsic call. 1905 /// \param St Exception flags raised during constant evaluation. 1906 static bool mayFoldConstrained(ConstrainedFPIntrinsic *CI, 1907 APFloat::opStatus St) { 1908 Optional<RoundingMode> ORM = CI->getRoundingMode(); 1909 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior(); 1910 1911 // If the operation does not change exception status flags, it is safe 1912 // to fold. 1913 if (St == APFloat::opStatus::opOK) 1914 return true; 1915 1916 // If evaluation raised FP exception, the result can depend on rounding 1917 // mode. If the latter is unknown, folding is not possible. 1918 if (ORM && *ORM == RoundingMode::Dynamic) 1919 return false; 1920 1921 // If FP exceptions are ignored, fold the call, even if such exception is 1922 // raised. 1923 if (EB && *EB != fp::ExceptionBehavior::ebStrict) 1924 return true; 1925 1926 // Leave the calculation for runtime so that exception flags be correctly set 1927 // in hardware. 1928 return false; 1929 } 1930 1931 /// Returns the rounding mode that should be used for constant evaluation. 1932 static RoundingMode 1933 getEvaluationRoundingMode(const ConstrainedFPIntrinsic *CI) { 1934 Optional<RoundingMode> ORM = CI->getRoundingMode(); 1935 if (!ORM || *ORM == RoundingMode::Dynamic) 1936 // Even if the rounding mode is unknown, try evaluating the operation. 1937 // If it does not raise inexact exception, rounding was not applied, 1938 // so the result is exact and does not depend on rounding mode. Whether 1939 // other FP exceptions are raised, it does not depend on rounding mode. 1940 return RoundingMode::NearestTiesToEven; 1941 return *ORM; 1942 } 1943 1944 static Constant *ConstantFoldScalarCall1(StringRef Name, 1945 Intrinsic::ID IntrinsicID, 1946 Type *Ty, 1947 ArrayRef<Constant *> Operands, 1948 const TargetLibraryInfo *TLI, 1949 const CallBase *Call) { 1950 assert(Operands.size() == 1 && "Wrong number of operands."); 1951 1952 if (IntrinsicID == Intrinsic::is_constant) { 1953 // We know we have a "Constant" argument. But we want to only 1954 // return true for manifest constants, not those that depend on 1955 // constants with unknowable values, e.g. GlobalValue or BlockAddress. 1956 if (Operands[0]->isManifestConstant()) 1957 return ConstantInt::getTrue(Ty->getContext()); 1958 return nullptr; 1959 } 1960 if (isa<UndefValue>(Operands[0])) { 1961 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN. 1962 // ctpop() is between 0 and bitwidth, pick 0 for undef. 1963 // fptoui.sat and fptosi.sat can always fold to zero (for a zero input). 1964 if (IntrinsicID == Intrinsic::cos || 1965 IntrinsicID == Intrinsic::ctpop || 1966 IntrinsicID == Intrinsic::fptoui_sat || 1967 IntrinsicID == Intrinsic::fptosi_sat) 1968 return Constant::getNullValue(Ty); 1969 if (IntrinsicID == Intrinsic::bswap || 1970 IntrinsicID == Intrinsic::bitreverse || 1971 IntrinsicID == Intrinsic::launder_invariant_group || 1972 IntrinsicID == Intrinsic::strip_invariant_group) 1973 return Operands[0]; 1974 } 1975 1976 if (isa<ConstantPointerNull>(Operands[0])) { 1977 // launder(null) == null == strip(null) iff in addrspace 0 1978 if (IntrinsicID == Intrinsic::launder_invariant_group || 1979 IntrinsicID == Intrinsic::strip_invariant_group) { 1980 // If instruction is not yet put in a basic block (e.g. when cloning 1981 // a function during inlining), Call's caller may not be available. 1982 // So check Call's BB first before querying Call->getCaller. 1983 const Function *Caller = 1984 Call->getParent() ? Call->getCaller() : nullptr; 1985 if (Caller && 1986 !NullPointerIsDefined( 1987 Caller, Operands[0]->getType()->getPointerAddressSpace())) { 1988 return Operands[0]; 1989 } 1990 return nullptr; 1991 } 1992 } 1993 1994 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) { 1995 if (IntrinsicID == Intrinsic::convert_to_fp16) { 1996 APFloat Val(Op->getValueAPF()); 1997 1998 bool lost = false; 1999 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost); 2000 2001 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt()); 2002 } 2003 2004 APFloat U = Op->getValueAPF(); 2005 2006 if (IntrinsicID == Intrinsic::wasm_trunc_signed || 2007 IntrinsicID == Intrinsic::wasm_trunc_unsigned) { 2008 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed; 2009 2010 if (U.isNaN()) 2011 return nullptr; 2012 2013 unsigned Width = Ty->getIntegerBitWidth(); 2014 APSInt Int(Width, !Signed); 2015 bool IsExact = false; 2016 APFloat::opStatus Status = 2017 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact); 2018 2019 if (Status == APFloat::opOK || Status == APFloat::opInexact) 2020 return ConstantInt::get(Ty, Int); 2021 2022 return nullptr; 2023 } 2024 2025 if (IntrinsicID == Intrinsic::fptoui_sat || 2026 IntrinsicID == Intrinsic::fptosi_sat) { 2027 // convertToInteger() already has the desired saturation semantics. 2028 APSInt Int(Ty->getIntegerBitWidth(), 2029 IntrinsicID == Intrinsic::fptoui_sat); 2030 bool IsExact; 2031 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact); 2032 return ConstantInt::get(Ty, Int); 2033 } 2034 2035 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 2036 return nullptr; 2037 2038 // Use internal versions of these intrinsics. 2039 2040 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) { 2041 U.roundToIntegral(APFloat::rmNearestTiesToEven); 2042 return ConstantFP::get(Ty->getContext(), U); 2043 } 2044 2045 if (IntrinsicID == Intrinsic::round) { 2046 U.roundToIntegral(APFloat::rmNearestTiesToAway); 2047 return ConstantFP::get(Ty->getContext(), U); 2048 } 2049 2050 if (IntrinsicID == Intrinsic::roundeven) { 2051 U.roundToIntegral(APFloat::rmNearestTiesToEven); 2052 return ConstantFP::get(Ty->getContext(), U); 2053 } 2054 2055 if (IntrinsicID == Intrinsic::ceil) { 2056 U.roundToIntegral(APFloat::rmTowardPositive); 2057 return ConstantFP::get(Ty->getContext(), U); 2058 } 2059 2060 if (IntrinsicID == Intrinsic::floor) { 2061 U.roundToIntegral(APFloat::rmTowardNegative); 2062 return ConstantFP::get(Ty->getContext(), U); 2063 } 2064 2065 if (IntrinsicID == Intrinsic::trunc) { 2066 U.roundToIntegral(APFloat::rmTowardZero); 2067 return ConstantFP::get(Ty->getContext(), U); 2068 } 2069 2070 if (IntrinsicID == Intrinsic::fabs) { 2071 U.clearSign(); 2072 return ConstantFP::get(Ty->getContext(), U); 2073 } 2074 2075 if (IntrinsicID == Intrinsic::amdgcn_fract) { 2076 // The v_fract instruction behaves like the OpenCL spec, which defines 2077 // fract(x) as fmin(x - floor(x), 0x1.fffffep-1f): "The min() operator is 2078 // there to prevent fract(-small) from returning 1.0. It returns the 2079 // largest positive floating-point number less than 1.0." 2080 APFloat FloorU(U); 2081 FloorU.roundToIntegral(APFloat::rmTowardNegative); 2082 APFloat FractU(U - FloorU); 2083 APFloat AlmostOne(U.getSemantics(), 1); 2084 AlmostOne.next(/*nextDown*/ true); 2085 return ConstantFP::get(Ty->getContext(), minimum(FractU, AlmostOne)); 2086 } 2087 2088 // Rounding operations (floor, trunc, ceil, round and nearbyint) do not 2089 // raise FP exceptions, unless the argument is signaling NaN. 2090 2091 Optional<APFloat::roundingMode> RM; 2092 switch (IntrinsicID) { 2093 default: 2094 break; 2095 case Intrinsic::experimental_constrained_nearbyint: 2096 case Intrinsic::experimental_constrained_rint: { 2097 auto CI = cast<ConstrainedFPIntrinsic>(Call); 2098 RM = CI->getRoundingMode(); 2099 if (!RM || *RM == RoundingMode::Dynamic) 2100 return nullptr; 2101 break; 2102 } 2103 case Intrinsic::experimental_constrained_round: 2104 RM = APFloat::rmNearestTiesToAway; 2105 break; 2106 case Intrinsic::experimental_constrained_ceil: 2107 RM = APFloat::rmTowardPositive; 2108 break; 2109 case Intrinsic::experimental_constrained_floor: 2110 RM = APFloat::rmTowardNegative; 2111 break; 2112 case Intrinsic::experimental_constrained_trunc: 2113 RM = APFloat::rmTowardZero; 2114 break; 2115 } 2116 if (RM) { 2117 auto CI = cast<ConstrainedFPIntrinsic>(Call); 2118 if (U.isFinite()) { 2119 APFloat::opStatus St = U.roundToIntegral(*RM); 2120 if (IntrinsicID == Intrinsic::experimental_constrained_rint && 2121 St == APFloat::opInexact) { 2122 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior(); 2123 if (EB && *EB == fp::ebStrict) 2124 return nullptr; 2125 } 2126 } else if (U.isSignaling()) { 2127 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior(); 2128 if (EB && *EB != fp::ebIgnore) 2129 return nullptr; 2130 U = APFloat::getQNaN(U.getSemantics()); 2131 } 2132 return ConstantFP::get(Ty->getContext(), U); 2133 } 2134 2135 /// We only fold functions with finite arguments. Folding NaN and inf is 2136 /// likely to be aborted with an exception anyway, and some host libms 2137 /// have known errors raising exceptions. 2138 if (!U.isFinite()) 2139 return nullptr; 2140 2141 /// Currently APFloat versions of these functions do not exist, so we use 2142 /// the host native double versions. Float versions are not called 2143 /// directly but for all these it is true (float)(f((double)arg)) == 2144 /// f(arg). Long double not supported yet. 2145 const APFloat &APF = Op->getValueAPF(); 2146 2147 switch (IntrinsicID) { 2148 default: break; 2149 case Intrinsic::log: 2150 return ConstantFoldFP(log, APF, Ty); 2151 case Intrinsic::log2: 2152 // TODO: What about hosts that lack a C99 library? 2153 return ConstantFoldFP(Log2, APF, Ty); 2154 case Intrinsic::log10: 2155 // TODO: What about hosts that lack a C99 library? 2156 return ConstantFoldFP(log10, APF, Ty); 2157 case Intrinsic::exp: 2158 return ConstantFoldFP(exp, APF, Ty); 2159 case Intrinsic::exp2: 2160 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library. 2161 return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty); 2162 case Intrinsic::sin: 2163 return ConstantFoldFP(sin, APF, Ty); 2164 case Intrinsic::cos: 2165 return ConstantFoldFP(cos, APF, Ty); 2166 case Intrinsic::sqrt: 2167 return ConstantFoldFP(sqrt, APF, Ty); 2168 case Intrinsic::amdgcn_cos: 2169 case Intrinsic::amdgcn_sin: { 2170 double V = getValueAsDouble(Op); 2171 if (V < -256.0 || V > 256.0) 2172 // The gfx8 and gfx9 architectures handle arguments outside the range 2173 // [-256, 256] differently. This should be a rare case so bail out 2174 // rather than trying to handle the difference. 2175 return nullptr; 2176 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos; 2177 double V4 = V * 4.0; 2178 if (V4 == floor(V4)) { 2179 // Force exact results for quarter-integer inputs. 2180 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 }; 2181 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3]; 2182 } else { 2183 if (IsCos) 2184 V = cos(V * 2.0 * numbers::pi); 2185 else 2186 V = sin(V * 2.0 * numbers::pi); 2187 } 2188 return GetConstantFoldFPValue(V, Ty); 2189 } 2190 } 2191 2192 if (!TLI) 2193 return nullptr; 2194 2195 LibFunc Func = NotLibFunc; 2196 if (!TLI->getLibFunc(Name, Func)) 2197 return nullptr; 2198 2199 switch (Func) { 2200 default: 2201 break; 2202 case LibFunc_acos: 2203 case LibFunc_acosf: 2204 case LibFunc_acos_finite: 2205 case LibFunc_acosf_finite: 2206 if (TLI->has(Func)) 2207 return ConstantFoldFP(acos, APF, Ty); 2208 break; 2209 case LibFunc_asin: 2210 case LibFunc_asinf: 2211 case LibFunc_asin_finite: 2212 case LibFunc_asinf_finite: 2213 if (TLI->has(Func)) 2214 return ConstantFoldFP(asin, APF, Ty); 2215 break; 2216 case LibFunc_atan: 2217 case LibFunc_atanf: 2218 if (TLI->has(Func)) 2219 return ConstantFoldFP(atan, APF, Ty); 2220 break; 2221 case LibFunc_ceil: 2222 case LibFunc_ceilf: 2223 if (TLI->has(Func)) { 2224 U.roundToIntegral(APFloat::rmTowardPositive); 2225 return ConstantFP::get(Ty->getContext(), U); 2226 } 2227 break; 2228 case LibFunc_cos: 2229 case LibFunc_cosf: 2230 if (TLI->has(Func)) 2231 return ConstantFoldFP(cos, APF, Ty); 2232 break; 2233 case LibFunc_cosh: 2234 case LibFunc_coshf: 2235 case LibFunc_cosh_finite: 2236 case LibFunc_coshf_finite: 2237 if (TLI->has(Func)) 2238 return ConstantFoldFP(cosh, APF, Ty); 2239 break; 2240 case LibFunc_exp: 2241 case LibFunc_expf: 2242 case LibFunc_exp_finite: 2243 case LibFunc_expf_finite: 2244 if (TLI->has(Func)) 2245 return ConstantFoldFP(exp, APF, Ty); 2246 break; 2247 case LibFunc_exp2: 2248 case LibFunc_exp2f: 2249 case LibFunc_exp2_finite: 2250 case LibFunc_exp2f_finite: 2251 if (TLI->has(Func)) 2252 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library. 2253 return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty); 2254 break; 2255 case LibFunc_fabs: 2256 case LibFunc_fabsf: 2257 if (TLI->has(Func)) { 2258 U.clearSign(); 2259 return ConstantFP::get(Ty->getContext(), U); 2260 } 2261 break; 2262 case LibFunc_floor: 2263 case LibFunc_floorf: 2264 if (TLI->has(Func)) { 2265 U.roundToIntegral(APFloat::rmTowardNegative); 2266 return ConstantFP::get(Ty->getContext(), U); 2267 } 2268 break; 2269 case LibFunc_log: 2270 case LibFunc_logf: 2271 case LibFunc_log_finite: 2272 case LibFunc_logf_finite: 2273 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func)) 2274 return ConstantFoldFP(log, APF, Ty); 2275 break; 2276 case LibFunc_log2: 2277 case LibFunc_log2f: 2278 case LibFunc_log2_finite: 2279 case LibFunc_log2f_finite: 2280 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func)) 2281 // TODO: What about hosts that lack a C99 library? 2282 return ConstantFoldFP(Log2, APF, Ty); 2283 break; 2284 case LibFunc_log10: 2285 case LibFunc_log10f: 2286 case LibFunc_log10_finite: 2287 case LibFunc_log10f_finite: 2288 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func)) 2289 // TODO: What about hosts that lack a C99 library? 2290 return ConstantFoldFP(log10, APF, Ty); 2291 break; 2292 case LibFunc_nearbyint: 2293 case LibFunc_nearbyintf: 2294 case LibFunc_rint: 2295 case LibFunc_rintf: 2296 if (TLI->has(Func)) { 2297 U.roundToIntegral(APFloat::rmNearestTiesToEven); 2298 return ConstantFP::get(Ty->getContext(), U); 2299 } 2300 break; 2301 case LibFunc_round: 2302 case LibFunc_roundf: 2303 if (TLI->has(Func)) { 2304 U.roundToIntegral(APFloat::rmNearestTiesToAway); 2305 return ConstantFP::get(Ty->getContext(), U); 2306 } 2307 break; 2308 case LibFunc_sin: 2309 case LibFunc_sinf: 2310 if (TLI->has(Func)) 2311 return ConstantFoldFP(sin, APF, Ty); 2312 break; 2313 case LibFunc_sinh: 2314 case LibFunc_sinhf: 2315 case LibFunc_sinh_finite: 2316 case LibFunc_sinhf_finite: 2317 if (TLI->has(Func)) 2318 return ConstantFoldFP(sinh, APF, Ty); 2319 break; 2320 case LibFunc_sqrt: 2321 case LibFunc_sqrtf: 2322 if (!APF.isNegative() && TLI->has(Func)) 2323 return ConstantFoldFP(sqrt, APF, Ty); 2324 break; 2325 case LibFunc_tan: 2326 case LibFunc_tanf: 2327 if (TLI->has(Func)) 2328 return ConstantFoldFP(tan, APF, Ty); 2329 break; 2330 case LibFunc_tanh: 2331 case LibFunc_tanhf: 2332 if (TLI->has(Func)) 2333 return ConstantFoldFP(tanh, APF, Ty); 2334 break; 2335 case LibFunc_trunc: 2336 case LibFunc_truncf: 2337 if (TLI->has(Func)) { 2338 U.roundToIntegral(APFloat::rmTowardZero); 2339 return ConstantFP::get(Ty->getContext(), U); 2340 } 2341 break; 2342 } 2343 return nullptr; 2344 } 2345 2346 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) { 2347 switch (IntrinsicID) { 2348 case Intrinsic::bswap: 2349 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap()); 2350 case Intrinsic::ctpop: 2351 return ConstantInt::get(Ty, Op->getValue().countPopulation()); 2352 case Intrinsic::bitreverse: 2353 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits()); 2354 case Intrinsic::convert_from_fp16: { 2355 APFloat Val(APFloat::IEEEhalf(), Op->getValue()); 2356 2357 bool lost = false; 2358 APFloat::opStatus status = Val.convert( 2359 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost); 2360 2361 // Conversion is always precise. 2362 (void)status; 2363 assert(status == APFloat::opOK && !lost && 2364 "Precision lost during fp16 constfolding"); 2365 2366 return ConstantFP::get(Ty->getContext(), Val); 2367 } 2368 default: 2369 return nullptr; 2370 } 2371 } 2372 2373 switch (IntrinsicID) { 2374 default: break; 2375 case Intrinsic::vector_reduce_add: 2376 case Intrinsic::vector_reduce_mul: 2377 case Intrinsic::vector_reduce_and: 2378 case Intrinsic::vector_reduce_or: 2379 case Intrinsic::vector_reduce_xor: 2380 case Intrinsic::vector_reduce_smin: 2381 case Intrinsic::vector_reduce_smax: 2382 case Intrinsic::vector_reduce_umin: 2383 case Intrinsic::vector_reduce_umax: 2384 if (Constant *C = constantFoldVectorReduce(IntrinsicID, Operands[0])) 2385 return C; 2386 break; 2387 } 2388 2389 // Support ConstantVector in case we have an Undef in the top. 2390 if (isa<ConstantVector>(Operands[0]) || 2391 isa<ConstantDataVector>(Operands[0])) { 2392 auto *Op = cast<Constant>(Operands[0]); 2393 switch (IntrinsicID) { 2394 default: break; 2395 case Intrinsic::x86_sse_cvtss2si: 2396 case Intrinsic::x86_sse_cvtss2si64: 2397 case Intrinsic::x86_sse2_cvtsd2si: 2398 case Intrinsic::x86_sse2_cvtsd2si64: 2399 if (ConstantFP *FPOp = 2400 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2401 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2402 /*roundTowardZero=*/false, Ty, 2403 /*IsSigned*/true); 2404 break; 2405 case Intrinsic::x86_sse_cvttss2si: 2406 case Intrinsic::x86_sse_cvttss2si64: 2407 case Intrinsic::x86_sse2_cvttsd2si: 2408 case Intrinsic::x86_sse2_cvttsd2si64: 2409 if (ConstantFP *FPOp = 2410 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2411 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2412 /*roundTowardZero=*/true, Ty, 2413 /*IsSigned*/true); 2414 break; 2415 } 2416 } 2417 2418 return nullptr; 2419 } 2420 2421 static Constant *evaluateCompare(const APFloat &Op1, const APFloat &Op2, 2422 const ConstrainedFPIntrinsic *Call) { 2423 APFloat::opStatus St = APFloat::opOK; 2424 auto *FCmp = cast<ConstrainedFPCmpIntrinsic>(Call); 2425 FCmpInst::Predicate Cond = FCmp->getPredicate(); 2426 if (FCmp->isSignaling()) { 2427 if (Op1.isNaN() || Op2.isNaN()) 2428 St = APFloat::opInvalidOp; 2429 } else { 2430 if (Op1.isSignaling() || Op2.isSignaling()) 2431 St = APFloat::opInvalidOp; 2432 } 2433 bool Result = FCmpInst::compare(Op1, Op2, Cond); 2434 if (mayFoldConstrained(const_cast<ConstrainedFPCmpIntrinsic *>(FCmp), St)) 2435 return ConstantInt::get(Call->getType()->getScalarType(), Result); 2436 return nullptr; 2437 } 2438 2439 static Constant *ConstantFoldScalarCall2(StringRef Name, 2440 Intrinsic::ID IntrinsicID, 2441 Type *Ty, 2442 ArrayRef<Constant *> Operands, 2443 const TargetLibraryInfo *TLI, 2444 const CallBase *Call) { 2445 assert(Operands.size() == 2 && "Wrong number of operands."); 2446 2447 if (Ty->isFloatingPointTy()) { 2448 // TODO: We should have undef handling for all of the FP intrinsics that 2449 // are attempted to be folded in this function. 2450 bool IsOp0Undef = isa<UndefValue>(Operands[0]); 2451 bool IsOp1Undef = isa<UndefValue>(Operands[1]); 2452 switch (IntrinsicID) { 2453 case Intrinsic::maxnum: 2454 case Intrinsic::minnum: 2455 case Intrinsic::maximum: 2456 case Intrinsic::minimum: 2457 // If one argument is undef, return the other argument. 2458 if (IsOp0Undef) 2459 return Operands[1]; 2460 if (IsOp1Undef) 2461 return Operands[0]; 2462 break; 2463 } 2464 } 2465 2466 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 2467 const APFloat &Op1V = Op1->getValueAPF(); 2468 2469 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 2470 if (Op2->getType() != Op1->getType()) 2471 return nullptr; 2472 const APFloat &Op2V = Op2->getValueAPF(); 2473 2474 if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) { 2475 RoundingMode RM = getEvaluationRoundingMode(ConstrIntr); 2476 APFloat Res = Op1V; 2477 APFloat::opStatus St; 2478 switch (IntrinsicID) { 2479 default: 2480 return nullptr; 2481 case Intrinsic::experimental_constrained_fadd: 2482 St = Res.add(Op2V, RM); 2483 break; 2484 case Intrinsic::experimental_constrained_fsub: 2485 St = Res.subtract(Op2V, RM); 2486 break; 2487 case Intrinsic::experimental_constrained_fmul: 2488 St = Res.multiply(Op2V, RM); 2489 break; 2490 case Intrinsic::experimental_constrained_fdiv: 2491 St = Res.divide(Op2V, RM); 2492 break; 2493 case Intrinsic::experimental_constrained_frem: 2494 St = Res.mod(Op2V); 2495 break; 2496 case Intrinsic::experimental_constrained_fcmp: 2497 case Intrinsic::experimental_constrained_fcmps: 2498 return evaluateCompare(Op1V, Op2V, ConstrIntr); 2499 } 2500 if (mayFoldConstrained(const_cast<ConstrainedFPIntrinsic *>(ConstrIntr), 2501 St)) 2502 return ConstantFP::get(Ty->getContext(), Res); 2503 return nullptr; 2504 } 2505 2506 switch (IntrinsicID) { 2507 default: 2508 break; 2509 case Intrinsic::copysign: 2510 return ConstantFP::get(Ty->getContext(), APFloat::copySign(Op1V, Op2V)); 2511 case Intrinsic::minnum: 2512 return ConstantFP::get(Ty->getContext(), minnum(Op1V, Op2V)); 2513 case Intrinsic::maxnum: 2514 return ConstantFP::get(Ty->getContext(), maxnum(Op1V, Op2V)); 2515 case Intrinsic::minimum: 2516 return ConstantFP::get(Ty->getContext(), minimum(Op1V, Op2V)); 2517 case Intrinsic::maximum: 2518 return ConstantFP::get(Ty->getContext(), maximum(Op1V, Op2V)); 2519 } 2520 2521 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 2522 return nullptr; 2523 2524 switch (IntrinsicID) { 2525 default: 2526 break; 2527 case Intrinsic::pow: 2528 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 2529 case Intrinsic::amdgcn_fmul_legacy: 2530 // The legacy behaviour is that multiplying +/- 0.0 by anything, even 2531 // NaN or infinity, gives +0.0. 2532 if (Op1V.isZero() || Op2V.isZero()) 2533 return ConstantFP::getNullValue(Ty); 2534 return ConstantFP::get(Ty->getContext(), Op1V * Op2V); 2535 } 2536 2537 if (!TLI) 2538 return nullptr; 2539 2540 LibFunc Func = NotLibFunc; 2541 if (!TLI->getLibFunc(Name, Func)) 2542 return nullptr; 2543 2544 switch (Func) { 2545 default: 2546 break; 2547 case LibFunc_pow: 2548 case LibFunc_powf: 2549 case LibFunc_pow_finite: 2550 case LibFunc_powf_finite: 2551 if (TLI->has(Func)) 2552 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 2553 break; 2554 case LibFunc_fmod: 2555 case LibFunc_fmodf: 2556 if (TLI->has(Func)) { 2557 APFloat V = Op1->getValueAPF(); 2558 if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF())) 2559 return ConstantFP::get(Ty->getContext(), V); 2560 } 2561 break; 2562 case LibFunc_remainder: 2563 case LibFunc_remainderf: 2564 if (TLI->has(Func)) { 2565 APFloat V = Op1->getValueAPF(); 2566 if (APFloat::opStatus::opOK == V.remainder(Op2->getValueAPF())) 2567 return ConstantFP::get(Ty->getContext(), V); 2568 } 2569 break; 2570 case LibFunc_atan2: 2571 case LibFunc_atan2f: 2572 case LibFunc_atan2_finite: 2573 case LibFunc_atan2f_finite: 2574 if (TLI->has(Func)) 2575 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty); 2576 break; 2577 } 2578 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) { 2579 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 2580 return nullptr; 2581 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy()) 2582 return ConstantFP::get( 2583 Ty->getContext(), 2584 APFloat((float)std::pow((float)Op1V.convertToDouble(), 2585 (int)Op2C->getZExtValue()))); 2586 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy()) 2587 return ConstantFP::get( 2588 Ty->getContext(), 2589 APFloat((float)std::pow((float)Op1V.convertToDouble(), 2590 (int)Op2C->getZExtValue()))); 2591 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy()) 2592 return ConstantFP::get( 2593 Ty->getContext(), 2594 APFloat((double)std::pow(Op1V.convertToDouble(), 2595 (int)Op2C->getZExtValue()))); 2596 2597 if (IntrinsicID == Intrinsic::amdgcn_ldexp) { 2598 // FIXME: Should flush denorms depending on FP mode, but that's ignored 2599 // everywhere else. 2600 2601 // scalbn is equivalent to ldexp with float radix 2 2602 APFloat Result = scalbn(Op1->getValueAPF(), Op2C->getSExtValue(), 2603 APFloat::rmNearestTiesToEven); 2604 return ConstantFP::get(Ty->getContext(), Result); 2605 } 2606 } 2607 return nullptr; 2608 } 2609 2610 if (Operands[0]->getType()->isIntegerTy() && 2611 Operands[1]->getType()->isIntegerTy()) { 2612 const APInt *C0, *C1; 2613 if (!getConstIntOrUndef(Operands[0], C0) || 2614 !getConstIntOrUndef(Operands[1], C1)) 2615 return nullptr; 2616 2617 switch (IntrinsicID) { 2618 default: break; 2619 case Intrinsic::smax: 2620 case Intrinsic::smin: 2621 case Intrinsic::umax: 2622 case Intrinsic::umin: 2623 // This is the same as for binary ops - poison propagates. 2624 // TODO: Poison handling should be consolidated. 2625 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1])) 2626 return PoisonValue::get(Ty); 2627 2628 if (!C0 && !C1) 2629 return UndefValue::get(Ty); 2630 if (!C0 || !C1) 2631 return MinMaxIntrinsic::getSaturationPoint(IntrinsicID, Ty); 2632 return ConstantInt::get( 2633 Ty, ICmpInst::compare(*C0, *C1, 2634 MinMaxIntrinsic::getPredicate(IntrinsicID)) 2635 ? *C0 2636 : *C1); 2637 2638 case Intrinsic::usub_with_overflow: 2639 case Intrinsic::ssub_with_overflow: 2640 // X - undef -> { 0, false } 2641 // undef - X -> { 0, false } 2642 if (!C0 || !C1) 2643 return Constant::getNullValue(Ty); 2644 LLVM_FALLTHROUGH; 2645 case Intrinsic::uadd_with_overflow: 2646 case Intrinsic::sadd_with_overflow: 2647 // X + undef -> { -1, false } 2648 // undef + x -> { -1, false } 2649 if (!C0 || !C1) { 2650 return ConstantStruct::get( 2651 cast<StructType>(Ty), 2652 {Constant::getAllOnesValue(Ty->getStructElementType(0)), 2653 Constant::getNullValue(Ty->getStructElementType(1))}); 2654 } 2655 LLVM_FALLTHROUGH; 2656 case Intrinsic::smul_with_overflow: 2657 case Intrinsic::umul_with_overflow: { 2658 // undef * X -> { 0, false } 2659 // X * undef -> { 0, false } 2660 if (!C0 || !C1) 2661 return Constant::getNullValue(Ty); 2662 2663 APInt Res; 2664 bool Overflow; 2665 switch (IntrinsicID) { 2666 default: llvm_unreachable("Invalid case"); 2667 case Intrinsic::sadd_with_overflow: 2668 Res = C0->sadd_ov(*C1, Overflow); 2669 break; 2670 case Intrinsic::uadd_with_overflow: 2671 Res = C0->uadd_ov(*C1, Overflow); 2672 break; 2673 case Intrinsic::ssub_with_overflow: 2674 Res = C0->ssub_ov(*C1, Overflow); 2675 break; 2676 case Intrinsic::usub_with_overflow: 2677 Res = C0->usub_ov(*C1, Overflow); 2678 break; 2679 case Intrinsic::smul_with_overflow: 2680 Res = C0->smul_ov(*C1, Overflow); 2681 break; 2682 case Intrinsic::umul_with_overflow: 2683 Res = C0->umul_ov(*C1, Overflow); 2684 break; 2685 } 2686 Constant *Ops[] = { 2687 ConstantInt::get(Ty->getContext(), Res), 2688 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow) 2689 }; 2690 return ConstantStruct::get(cast<StructType>(Ty), Ops); 2691 } 2692 case Intrinsic::uadd_sat: 2693 case Intrinsic::sadd_sat: 2694 // This is the same as for binary ops - poison propagates. 2695 // TODO: Poison handling should be consolidated. 2696 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1])) 2697 return PoisonValue::get(Ty); 2698 2699 if (!C0 && !C1) 2700 return UndefValue::get(Ty); 2701 if (!C0 || !C1) 2702 return Constant::getAllOnesValue(Ty); 2703 if (IntrinsicID == Intrinsic::uadd_sat) 2704 return ConstantInt::get(Ty, C0->uadd_sat(*C1)); 2705 else 2706 return ConstantInt::get(Ty, C0->sadd_sat(*C1)); 2707 case Intrinsic::usub_sat: 2708 case Intrinsic::ssub_sat: 2709 // This is the same as for binary ops - poison propagates. 2710 // TODO: Poison handling should be consolidated. 2711 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1])) 2712 return PoisonValue::get(Ty); 2713 2714 if (!C0 && !C1) 2715 return UndefValue::get(Ty); 2716 if (!C0 || !C1) 2717 return Constant::getNullValue(Ty); 2718 if (IntrinsicID == Intrinsic::usub_sat) 2719 return ConstantInt::get(Ty, C0->usub_sat(*C1)); 2720 else 2721 return ConstantInt::get(Ty, C0->ssub_sat(*C1)); 2722 case Intrinsic::cttz: 2723 case Intrinsic::ctlz: 2724 assert(C1 && "Must be constant int"); 2725 2726 // cttz(0, 1) and ctlz(0, 1) are poison. 2727 if (C1->isOne() && (!C0 || C0->isZero())) 2728 return PoisonValue::get(Ty); 2729 if (!C0) 2730 return Constant::getNullValue(Ty); 2731 if (IntrinsicID == Intrinsic::cttz) 2732 return ConstantInt::get(Ty, C0->countTrailingZeros()); 2733 else 2734 return ConstantInt::get(Ty, C0->countLeadingZeros()); 2735 2736 case Intrinsic::abs: 2737 assert(C1 && "Must be constant int"); 2738 assert((C1->isOne() || C1->isZero()) && "Must be 0 or 1"); 2739 2740 // Undef or minimum val operand with poison min --> undef 2741 if (C1->isOne() && (!C0 || C0->isMinSignedValue())) 2742 return UndefValue::get(Ty); 2743 2744 // Undef operand with no poison min --> 0 (sign bit must be clear) 2745 if (!C0) 2746 return Constant::getNullValue(Ty); 2747 2748 return ConstantInt::get(Ty, C0->abs()); 2749 } 2750 2751 return nullptr; 2752 } 2753 2754 // Support ConstantVector in case we have an Undef in the top. 2755 if ((isa<ConstantVector>(Operands[0]) || 2756 isa<ConstantDataVector>(Operands[0])) && 2757 // Check for default rounding mode. 2758 // FIXME: Support other rounding modes? 2759 isa<ConstantInt>(Operands[1]) && 2760 cast<ConstantInt>(Operands[1])->getValue() == 4) { 2761 auto *Op = cast<Constant>(Operands[0]); 2762 switch (IntrinsicID) { 2763 default: break; 2764 case Intrinsic::x86_avx512_vcvtss2si32: 2765 case Intrinsic::x86_avx512_vcvtss2si64: 2766 case Intrinsic::x86_avx512_vcvtsd2si32: 2767 case Intrinsic::x86_avx512_vcvtsd2si64: 2768 if (ConstantFP *FPOp = 2769 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2770 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2771 /*roundTowardZero=*/false, Ty, 2772 /*IsSigned*/true); 2773 break; 2774 case Intrinsic::x86_avx512_vcvtss2usi32: 2775 case Intrinsic::x86_avx512_vcvtss2usi64: 2776 case Intrinsic::x86_avx512_vcvtsd2usi32: 2777 case Intrinsic::x86_avx512_vcvtsd2usi64: 2778 if (ConstantFP *FPOp = 2779 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2780 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2781 /*roundTowardZero=*/false, Ty, 2782 /*IsSigned*/false); 2783 break; 2784 case Intrinsic::x86_avx512_cvttss2si: 2785 case Intrinsic::x86_avx512_cvttss2si64: 2786 case Intrinsic::x86_avx512_cvttsd2si: 2787 case Intrinsic::x86_avx512_cvttsd2si64: 2788 if (ConstantFP *FPOp = 2789 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2790 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2791 /*roundTowardZero=*/true, Ty, 2792 /*IsSigned*/true); 2793 break; 2794 case Intrinsic::x86_avx512_cvttss2usi: 2795 case Intrinsic::x86_avx512_cvttss2usi64: 2796 case Intrinsic::x86_avx512_cvttsd2usi: 2797 case Intrinsic::x86_avx512_cvttsd2usi64: 2798 if (ConstantFP *FPOp = 2799 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2800 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2801 /*roundTowardZero=*/true, Ty, 2802 /*IsSigned*/false); 2803 break; 2804 } 2805 } 2806 return nullptr; 2807 } 2808 2809 static APFloat ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID, 2810 const APFloat &S0, 2811 const APFloat &S1, 2812 const APFloat &S2) { 2813 unsigned ID; 2814 const fltSemantics &Sem = S0.getSemantics(); 2815 APFloat MA(Sem), SC(Sem), TC(Sem); 2816 if (abs(S2) >= abs(S0) && abs(S2) >= abs(S1)) { 2817 if (S2.isNegative() && S2.isNonZero() && !S2.isNaN()) { 2818 // S2 < 0 2819 ID = 5; 2820 SC = -S0; 2821 } else { 2822 ID = 4; 2823 SC = S0; 2824 } 2825 MA = S2; 2826 TC = -S1; 2827 } else if (abs(S1) >= abs(S0)) { 2828 if (S1.isNegative() && S1.isNonZero() && !S1.isNaN()) { 2829 // S1 < 0 2830 ID = 3; 2831 TC = -S2; 2832 } else { 2833 ID = 2; 2834 TC = S2; 2835 } 2836 MA = S1; 2837 SC = S0; 2838 } else { 2839 if (S0.isNegative() && S0.isNonZero() && !S0.isNaN()) { 2840 // S0 < 0 2841 ID = 1; 2842 SC = S2; 2843 } else { 2844 ID = 0; 2845 SC = -S2; 2846 } 2847 MA = S0; 2848 TC = -S1; 2849 } 2850 switch (IntrinsicID) { 2851 default: 2852 llvm_unreachable("unhandled amdgcn cube intrinsic"); 2853 case Intrinsic::amdgcn_cubeid: 2854 return APFloat(Sem, ID); 2855 case Intrinsic::amdgcn_cubema: 2856 return MA + MA; 2857 case Intrinsic::amdgcn_cubesc: 2858 return SC; 2859 case Intrinsic::amdgcn_cubetc: 2860 return TC; 2861 } 2862 } 2863 2864 static Constant *ConstantFoldAMDGCNPermIntrinsic(ArrayRef<Constant *> Operands, 2865 Type *Ty) { 2866 const APInt *C0, *C1, *C2; 2867 if (!getConstIntOrUndef(Operands[0], C0) || 2868 !getConstIntOrUndef(Operands[1], C1) || 2869 !getConstIntOrUndef(Operands[2], C2)) 2870 return nullptr; 2871 2872 if (!C2) 2873 return UndefValue::get(Ty); 2874 2875 APInt Val(32, 0); 2876 unsigned NumUndefBytes = 0; 2877 for (unsigned I = 0; I < 32; I += 8) { 2878 unsigned Sel = C2->extractBitsAsZExtValue(8, I); 2879 unsigned B = 0; 2880 2881 if (Sel >= 13) 2882 B = 0xff; 2883 else if (Sel == 12) 2884 B = 0x00; 2885 else { 2886 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1; 2887 if (!Src) 2888 ++NumUndefBytes; 2889 else if (Sel < 8) 2890 B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8); 2891 else 2892 B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff; 2893 } 2894 2895 Val.insertBits(B, I, 8); 2896 } 2897 2898 if (NumUndefBytes == 4) 2899 return UndefValue::get(Ty); 2900 2901 return ConstantInt::get(Ty, Val); 2902 } 2903 2904 static Constant *ConstantFoldScalarCall3(StringRef Name, 2905 Intrinsic::ID IntrinsicID, 2906 Type *Ty, 2907 ArrayRef<Constant *> Operands, 2908 const TargetLibraryInfo *TLI, 2909 const CallBase *Call) { 2910 assert(Operands.size() == 3 && "Wrong number of operands."); 2911 2912 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 2913 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 2914 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) { 2915 const APFloat &C1 = Op1->getValueAPF(); 2916 const APFloat &C2 = Op2->getValueAPF(); 2917 const APFloat &C3 = Op3->getValueAPF(); 2918 2919 if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) { 2920 RoundingMode RM = getEvaluationRoundingMode(ConstrIntr); 2921 APFloat Res = C1; 2922 APFloat::opStatus St; 2923 switch (IntrinsicID) { 2924 default: 2925 return nullptr; 2926 case Intrinsic::experimental_constrained_fma: 2927 case Intrinsic::experimental_constrained_fmuladd: 2928 St = Res.fusedMultiplyAdd(C2, C3, RM); 2929 break; 2930 } 2931 if (mayFoldConstrained( 2932 const_cast<ConstrainedFPIntrinsic *>(ConstrIntr), St)) 2933 return ConstantFP::get(Ty->getContext(), Res); 2934 return nullptr; 2935 } 2936 2937 switch (IntrinsicID) { 2938 default: break; 2939 case Intrinsic::amdgcn_fma_legacy: { 2940 // The legacy behaviour is that multiplying +/- 0.0 by anything, even 2941 // NaN or infinity, gives +0.0. 2942 if (C1.isZero() || C2.isZero()) { 2943 // It's tempting to just return C3 here, but that would give the 2944 // wrong result if C3 was -0.0. 2945 return ConstantFP::get(Ty->getContext(), APFloat(0.0f) + C3); 2946 } 2947 LLVM_FALLTHROUGH; 2948 } 2949 case Intrinsic::fma: 2950 case Intrinsic::fmuladd: { 2951 APFloat V = C1; 2952 V.fusedMultiplyAdd(C2, C3, APFloat::rmNearestTiesToEven); 2953 return ConstantFP::get(Ty->getContext(), V); 2954 } 2955 case Intrinsic::amdgcn_cubeid: 2956 case Intrinsic::amdgcn_cubema: 2957 case Intrinsic::amdgcn_cubesc: 2958 case Intrinsic::amdgcn_cubetc: { 2959 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3); 2960 return ConstantFP::get(Ty->getContext(), V); 2961 } 2962 } 2963 } 2964 } 2965 } 2966 2967 if (IntrinsicID == Intrinsic::smul_fix || 2968 IntrinsicID == Intrinsic::smul_fix_sat) { 2969 // poison * C -> poison 2970 // C * poison -> poison 2971 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1])) 2972 return PoisonValue::get(Ty); 2973 2974 const APInt *C0, *C1; 2975 if (!getConstIntOrUndef(Operands[0], C0) || 2976 !getConstIntOrUndef(Operands[1], C1)) 2977 return nullptr; 2978 2979 // undef * C -> 0 2980 // C * undef -> 0 2981 if (!C0 || !C1) 2982 return Constant::getNullValue(Ty); 2983 2984 // This code performs rounding towards negative infinity in case the result 2985 // cannot be represented exactly for the given scale. Targets that do care 2986 // about rounding should use a target hook for specifying how rounding 2987 // should be done, and provide their own folding to be consistent with 2988 // rounding. This is the same approach as used by 2989 // DAGTypeLegalizer::ExpandIntRes_MULFIX. 2990 unsigned Scale = cast<ConstantInt>(Operands[2])->getZExtValue(); 2991 unsigned Width = C0->getBitWidth(); 2992 assert(Scale < Width && "Illegal scale."); 2993 unsigned ExtendedWidth = Width * 2; 2994 APInt Product = 2995 (C0->sext(ExtendedWidth) * C1->sext(ExtendedWidth)).ashr(Scale); 2996 if (IntrinsicID == Intrinsic::smul_fix_sat) { 2997 APInt Max = APInt::getSignedMaxValue(Width).sext(ExtendedWidth); 2998 APInt Min = APInt::getSignedMinValue(Width).sext(ExtendedWidth); 2999 Product = APIntOps::smin(Product, Max); 3000 Product = APIntOps::smax(Product, Min); 3001 } 3002 return ConstantInt::get(Ty->getContext(), Product.sextOrTrunc(Width)); 3003 } 3004 3005 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) { 3006 const APInt *C0, *C1, *C2; 3007 if (!getConstIntOrUndef(Operands[0], C0) || 3008 !getConstIntOrUndef(Operands[1], C1) || 3009 !getConstIntOrUndef(Operands[2], C2)) 3010 return nullptr; 3011 3012 bool IsRight = IntrinsicID == Intrinsic::fshr; 3013 if (!C2) 3014 return Operands[IsRight ? 1 : 0]; 3015 if (!C0 && !C1) 3016 return UndefValue::get(Ty); 3017 3018 // The shift amount is interpreted as modulo the bitwidth. If the shift 3019 // amount is effectively 0, avoid UB due to oversized inverse shift below. 3020 unsigned BitWidth = C2->getBitWidth(); 3021 unsigned ShAmt = C2->urem(BitWidth); 3022 if (!ShAmt) 3023 return Operands[IsRight ? 1 : 0]; 3024 3025 // (C0 << ShlAmt) | (C1 >> LshrAmt) 3026 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt; 3027 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt; 3028 if (!C0) 3029 return ConstantInt::get(Ty, C1->lshr(LshrAmt)); 3030 if (!C1) 3031 return ConstantInt::get(Ty, C0->shl(ShlAmt)); 3032 return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt)); 3033 } 3034 3035 if (IntrinsicID == Intrinsic::amdgcn_perm) 3036 return ConstantFoldAMDGCNPermIntrinsic(Operands, Ty); 3037 3038 return nullptr; 3039 } 3040 3041 static Constant *ConstantFoldScalarCall(StringRef Name, 3042 Intrinsic::ID IntrinsicID, 3043 Type *Ty, 3044 ArrayRef<Constant *> Operands, 3045 const TargetLibraryInfo *TLI, 3046 const CallBase *Call) { 3047 if (Operands.size() == 1) 3048 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call); 3049 3050 if (Operands.size() == 2) 3051 return ConstantFoldScalarCall2(Name, IntrinsicID, Ty, Operands, TLI, Call); 3052 3053 if (Operands.size() == 3) 3054 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call); 3055 3056 return nullptr; 3057 } 3058 3059 static Constant *ConstantFoldFixedVectorCall( 3060 StringRef Name, Intrinsic::ID IntrinsicID, FixedVectorType *FVTy, 3061 ArrayRef<Constant *> Operands, const DataLayout &DL, 3062 const TargetLibraryInfo *TLI, const CallBase *Call) { 3063 SmallVector<Constant *, 4> Result(FVTy->getNumElements()); 3064 SmallVector<Constant *, 4> Lane(Operands.size()); 3065 Type *Ty = FVTy->getElementType(); 3066 3067 switch (IntrinsicID) { 3068 case Intrinsic::masked_load: { 3069 auto *SrcPtr = Operands[0]; 3070 auto *Mask = Operands[2]; 3071 auto *Passthru = Operands[3]; 3072 3073 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, FVTy, DL); 3074 3075 SmallVector<Constant *, 32> NewElements; 3076 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) { 3077 auto *MaskElt = Mask->getAggregateElement(I); 3078 if (!MaskElt) 3079 break; 3080 auto *PassthruElt = Passthru->getAggregateElement(I); 3081 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr; 3082 if (isa<UndefValue>(MaskElt)) { 3083 if (PassthruElt) 3084 NewElements.push_back(PassthruElt); 3085 else if (VecElt) 3086 NewElements.push_back(VecElt); 3087 else 3088 return nullptr; 3089 } 3090 if (MaskElt->isNullValue()) { 3091 if (!PassthruElt) 3092 return nullptr; 3093 NewElements.push_back(PassthruElt); 3094 } else if (MaskElt->isOneValue()) { 3095 if (!VecElt) 3096 return nullptr; 3097 NewElements.push_back(VecElt); 3098 } else { 3099 return nullptr; 3100 } 3101 } 3102 if (NewElements.size() != FVTy->getNumElements()) 3103 return nullptr; 3104 return ConstantVector::get(NewElements); 3105 } 3106 case Intrinsic::arm_mve_vctp8: 3107 case Intrinsic::arm_mve_vctp16: 3108 case Intrinsic::arm_mve_vctp32: 3109 case Intrinsic::arm_mve_vctp64: { 3110 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) { 3111 unsigned Lanes = FVTy->getNumElements(); 3112 uint64_t Limit = Op->getZExtValue(); 3113 3114 SmallVector<Constant *, 16> NCs; 3115 for (unsigned i = 0; i < Lanes; i++) { 3116 if (i < Limit) 3117 NCs.push_back(ConstantInt::getTrue(Ty)); 3118 else 3119 NCs.push_back(ConstantInt::getFalse(Ty)); 3120 } 3121 return ConstantVector::get(NCs); 3122 } 3123 return nullptr; 3124 } 3125 case Intrinsic::get_active_lane_mask: { 3126 auto *Op0 = dyn_cast<ConstantInt>(Operands[0]); 3127 auto *Op1 = dyn_cast<ConstantInt>(Operands[1]); 3128 if (Op0 && Op1) { 3129 unsigned Lanes = FVTy->getNumElements(); 3130 uint64_t Base = Op0->getZExtValue(); 3131 uint64_t Limit = Op1->getZExtValue(); 3132 3133 SmallVector<Constant *, 16> NCs; 3134 for (unsigned i = 0; i < Lanes; i++) { 3135 if (Base + i < Limit) 3136 NCs.push_back(ConstantInt::getTrue(Ty)); 3137 else 3138 NCs.push_back(ConstantInt::getFalse(Ty)); 3139 } 3140 return ConstantVector::get(NCs); 3141 } 3142 return nullptr; 3143 } 3144 default: 3145 break; 3146 } 3147 3148 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) { 3149 // Gather a column of constants. 3150 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) { 3151 // Some intrinsics use a scalar type for certain arguments. 3152 if (isVectorIntrinsicWithScalarOpAtArg(IntrinsicID, J)) { 3153 Lane[J] = Operands[J]; 3154 continue; 3155 } 3156 3157 Constant *Agg = Operands[J]->getAggregateElement(I); 3158 if (!Agg) 3159 return nullptr; 3160 3161 Lane[J] = Agg; 3162 } 3163 3164 // Use the regular scalar folding to simplify this column. 3165 Constant *Folded = 3166 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call); 3167 if (!Folded) 3168 return nullptr; 3169 Result[I] = Folded; 3170 } 3171 3172 return ConstantVector::get(Result); 3173 } 3174 3175 static Constant *ConstantFoldScalableVectorCall( 3176 StringRef Name, Intrinsic::ID IntrinsicID, ScalableVectorType *SVTy, 3177 ArrayRef<Constant *> Operands, const DataLayout &DL, 3178 const TargetLibraryInfo *TLI, const CallBase *Call) { 3179 switch (IntrinsicID) { 3180 case Intrinsic::aarch64_sve_convert_from_svbool: { 3181 auto *Src = dyn_cast<Constant>(Operands[0]); 3182 if (!Src || !Src->isNullValue()) 3183 break; 3184 3185 return ConstantInt::getFalse(SVTy); 3186 } 3187 default: 3188 break; 3189 } 3190 return nullptr; 3191 } 3192 3193 } // end anonymous namespace 3194 3195 Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F, 3196 ArrayRef<Constant *> Operands, 3197 const TargetLibraryInfo *TLI) { 3198 if (Call->isNoBuiltin()) 3199 return nullptr; 3200 if (!F->hasName()) 3201 return nullptr; 3202 3203 // If this is not an intrinsic and not recognized as a library call, bail out. 3204 if (F->getIntrinsicID() == Intrinsic::not_intrinsic) { 3205 if (!TLI) 3206 return nullptr; 3207 LibFunc LibF; 3208 if (!TLI->getLibFunc(*F, LibF)) 3209 return nullptr; 3210 } 3211 3212 StringRef Name = F->getName(); 3213 Type *Ty = F->getReturnType(); 3214 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) 3215 return ConstantFoldFixedVectorCall( 3216 Name, F->getIntrinsicID(), FVTy, Operands, 3217 F->getParent()->getDataLayout(), TLI, Call); 3218 3219 if (auto *SVTy = dyn_cast<ScalableVectorType>(Ty)) 3220 return ConstantFoldScalableVectorCall( 3221 Name, F->getIntrinsicID(), SVTy, Operands, 3222 F->getParent()->getDataLayout(), TLI, Call); 3223 3224 // TODO: If this is a library function, we already discovered that above, 3225 // so we should pass the LibFunc, not the name (and it might be better 3226 // still to separate intrinsic handling from libcalls). 3227 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI, 3228 Call); 3229 } 3230 3231 bool llvm::isMathLibCallNoop(const CallBase *Call, 3232 const TargetLibraryInfo *TLI) { 3233 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap 3234 // (and to some extent ConstantFoldScalarCall). 3235 if (Call->isNoBuiltin() || Call->isStrictFP()) 3236 return false; 3237 Function *F = Call->getCalledFunction(); 3238 if (!F) 3239 return false; 3240 3241 LibFunc Func; 3242 if (!TLI || !TLI->getLibFunc(*F, Func)) 3243 return false; 3244 3245 if (Call->arg_size() == 1) { 3246 if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) { 3247 const APFloat &Op = OpC->getValueAPF(); 3248 switch (Func) { 3249 case LibFunc_logl: 3250 case LibFunc_log: 3251 case LibFunc_logf: 3252 case LibFunc_log2l: 3253 case LibFunc_log2: 3254 case LibFunc_log2f: 3255 case LibFunc_log10l: 3256 case LibFunc_log10: 3257 case LibFunc_log10f: 3258 return Op.isNaN() || (!Op.isZero() && !Op.isNegative()); 3259 3260 case LibFunc_expl: 3261 case LibFunc_exp: 3262 case LibFunc_expf: 3263 // FIXME: These boundaries are slightly conservative. 3264 if (OpC->getType()->isDoubleTy()) 3265 return !(Op < APFloat(-745.0) || Op > APFloat(709.0)); 3266 if (OpC->getType()->isFloatTy()) 3267 return !(Op < APFloat(-103.0f) || Op > APFloat(88.0f)); 3268 break; 3269 3270 case LibFunc_exp2l: 3271 case LibFunc_exp2: 3272 case LibFunc_exp2f: 3273 // FIXME: These boundaries are slightly conservative. 3274 if (OpC->getType()->isDoubleTy()) 3275 return !(Op < APFloat(-1074.0) || Op > APFloat(1023.0)); 3276 if (OpC->getType()->isFloatTy()) 3277 return !(Op < APFloat(-149.0f) || Op > APFloat(127.0f)); 3278 break; 3279 3280 case LibFunc_sinl: 3281 case LibFunc_sin: 3282 case LibFunc_sinf: 3283 case LibFunc_cosl: 3284 case LibFunc_cos: 3285 case LibFunc_cosf: 3286 return !Op.isInfinity(); 3287 3288 case LibFunc_tanl: 3289 case LibFunc_tan: 3290 case LibFunc_tanf: { 3291 // FIXME: Stop using the host math library. 3292 // FIXME: The computation isn't done in the right precision. 3293 Type *Ty = OpC->getType(); 3294 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) 3295 return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) != nullptr; 3296 break; 3297 } 3298 3299 case LibFunc_asinl: 3300 case LibFunc_asin: 3301 case LibFunc_asinf: 3302 case LibFunc_acosl: 3303 case LibFunc_acos: 3304 case LibFunc_acosf: 3305 return !(Op < APFloat(Op.getSemantics(), "-1") || 3306 Op > APFloat(Op.getSemantics(), "1")); 3307 3308 case LibFunc_sinh: 3309 case LibFunc_cosh: 3310 case LibFunc_sinhf: 3311 case LibFunc_coshf: 3312 case LibFunc_sinhl: 3313 case LibFunc_coshl: 3314 // FIXME: These boundaries are slightly conservative. 3315 if (OpC->getType()->isDoubleTy()) 3316 return !(Op < APFloat(-710.0) || Op > APFloat(710.0)); 3317 if (OpC->getType()->isFloatTy()) 3318 return !(Op < APFloat(-89.0f) || Op > APFloat(89.0f)); 3319 break; 3320 3321 case LibFunc_sqrtl: 3322 case LibFunc_sqrt: 3323 case LibFunc_sqrtf: 3324 return Op.isNaN() || Op.isZero() || !Op.isNegative(); 3325 3326 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p, 3327 // maybe others? 3328 default: 3329 break; 3330 } 3331 } 3332 } 3333 3334 if (Call->arg_size() == 2) { 3335 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0)); 3336 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1)); 3337 if (Op0C && Op1C) { 3338 const APFloat &Op0 = Op0C->getValueAPF(); 3339 const APFloat &Op1 = Op1C->getValueAPF(); 3340 3341 switch (Func) { 3342 case LibFunc_powl: 3343 case LibFunc_pow: 3344 case LibFunc_powf: { 3345 // FIXME: Stop using the host math library. 3346 // FIXME: The computation isn't done in the right precision. 3347 Type *Ty = Op0C->getType(); 3348 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) { 3349 if (Ty == Op1C->getType()) 3350 return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) != nullptr; 3351 } 3352 break; 3353 } 3354 3355 case LibFunc_fmodl: 3356 case LibFunc_fmod: 3357 case LibFunc_fmodf: 3358 case LibFunc_remainderl: 3359 case LibFunc_remainder: 3360 case LibFunc_remainderf: 3361 return Op0.isNaN() || Op1.isNaN() || 3362 (!Op0.isInfinity() && !Op1.isZero()); 3363 3364 default: 3365 break; 3366 } 3367 } 3368 } 3369 3370 return false; 3371 } 3372 3373 void TargetFolder::anchor() {} 3374