1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines routines for folding instructions into constants. 10 // 11 // Also, to supplement the basic IR ConstantExpr simplifications, 12 // this file defines some additional folding routines that can make use of 13 // DataLayout information. These functions cannot go in IR due to library 14 // dependency issues. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/Analysis/ConstantFolding.h" 19 #include "llvm/ADT/APFloat.h" 20 #include "llvm/ADT/APInt.h" 21 #include "llvm/ADT/ArrayRef.h" 22 #include "llvm/ADT/DenseMap.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/StringRef.h" 26 #include "llvm/Analysis/TargetLibraryInfo.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/Analysis/VectorUtils.h" 29 #include "llvm/Config/config.h" 30 #include "llvm/IR/Constant.h" 31 #include "llvm/IR/Constants.h" 32 #include "llvm/IR/DataLayout.h" 33 #include "llvm/IR/DerivedTypes.h" 34 #include "llvm/IR/Function.h" 35 #include "llvm/IR/GlobalValue.h" 36 #include "llvm/IR/GlobalVariable.h" 37 #include "llvm/IR/InstrTypes.h" 38 #include "llvm/IR/Instruction.h" 39 #include "llvm/IR/Instructions.h" 40 #include "llvm/IR/Operator.h" 41 #include "llvm/IR/Type.h" 42 #include "llvm/IR/Value.h" 43 #include "llvm/Support/Casting.h" 44 #include "llvm/Support/ErrorHandling.h" 45 #include "llvm/Support/KnownBits.h" 46 #include "llvm/Support/MathExtras.h" 47 #include <cassert> 48 #include <cerrno> 49 #include <cfenv> 50 #include <cmath> 51 #include <cstddef> 52 #include <cstdint> 53 54 using namespace llvm; 55 56 namespace { 57 58 //===----------------------------------------------------------------------===// 59 // Constant Folding internal helper functions 60 //===----------------------------------------------------------------------===// 61 62 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy, 63 Constant *C, Type *SrcEltTy, 64 unsigned NumSrcElts, 65 const DataLayout &DL) { 66 // Now that we know that the input value is a vector of integers, just shift 67 // and insert them into our result. 68 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy); 69 for (unsigned i = 0; i != NumSrcElts; ++i) { 70 Constant *Element; 71 if (DL.isLittleEndian()) 72 Element = C->getAggregateElement(NumSrcElts - i - 1); 73 else 74 Element = C->getAggregateElement(i); 75 76 if (Element && isa<UndefValue>(Element)) { 77 Result <<= BitShift; 78 continue; 79 } 80 81 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 82 if (!ElementCI) 83 return ConstantExpr::getBitCast(C, DestTy); 84 85 Result <<= BitShift; 86 Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth()); 87 } 88 89 return nullptr; 90 } 91 92 /// Constant fold bitcast, symbolically evaluating it with DataLayout. 93 /// This always returns a non-null constant, but it may be a 94 /// ConstantExpr if unfoldable. 95 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) { 96 assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) && 97 "Invalid constantexpr bitcast!"); 98 99 // Catch the obvious splat cases. 100 if (C->isNullValue() && !DestTy->isX86_MMXTy()) 101 return Constant::getNullValue(DestTy); 102 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() && 103 !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types! 104 return Constant::getAllOnesValue(DestTy); 105 106 if (auto *VTy = dyn_cast<VectorType>(C->getType())) { 107 // Handle a vector->scalar integer/fp cast. 108 if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) { 109 unsigned NumSrcElts = VTy->getNumElements(); 110 Type *SrcEltTy = VTy->getElementType(); 111 112 // If the vector is a vector of floating point, convert it to vector of int 113 // to simplify things. 114 if (SrcEltTy->isFloatingPointTy()) { 115 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 116 Type *SrcIVTy = 117 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts); 118 // Ask IR to do the conversion now that #elts line up. 119 C = ConstantExpr::getBitCast(C, SrcIVTy); 120 } 121 122 APInt Result(DL.getTypeSizeInBits(DestTy), 0); 123 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C, 124 SrcEltTy, NumSrcElts, DL)) 125 return CE; 126 127 if (isa<IntegerType>(DestTy)) 128 return ConstantInt::get(DestTy, Result); 129 130 APFloat FP(DestTy->getFltSemantics(), Result); 131 return ConstantFP::get(DestTy->getContext(), FP); 132 } 133 } 134 135 // The code below only handles casts to vectors currently. 136 auto *DestVTy = dyn_cast<VectorType>(DestTy); 137 if (!DestVTy) 138 return ConstantExpr::getBitCast(C, DestTy); 139 140 // If this is a scalar -> vector cast, convert the input into a <1 x scalar> 141 // vector so the code below can handle it uniformly. 142 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) { 143 Constant *Ops = C; // don't take the address of C! 144 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL); 145 } 146 147 // If this is a bitcast from constant vector -> vector, fold it. 148 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C)) 149 return ConstantExpr::getBitCast(C, DestTy); 150 151 // If the element types match, IR can fold it. 152 unsigned NumDstElt = DestVTy->getNumElements(); 153 unsigned NumSrcElt = C->getType()->getVectorNumElements(); 154 if (NumDstElt == NumSrcElt) 155 return ConstantExpr::getBitCast(C, DestTy); 156 157 Type *SrcEltTy = C->getType()->getVectorElementType(); 158 Type *DstEltTy = DestVTy->getElementType(); 159 160 // Otherwise, we're changing the number of elements in a vector, which 161 // requires endianness information to do the right thing. For example, 162 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 163 // folds to (little endian): 164 // <4 x i32> <i32 0, i32 0, i32 1, i32 0> 165 // and to (big endian): 166 // <4 x i32> <i32 0, i32 0, i32 0, i32 1> 167 168 // First thing is first. We only want to think about integer here, so if 169 // we have something in FP form, recast it as integer. 170 if (DstEltTy->isFloatingPointTy()) { 171 // Fold to an vector of integers with same size as our FP type. 172 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits(); 173 Type *DestIVTy = 174 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt); 175 // Recursively handle this integer conversion, if possible. 176 C = FoldBitCast(C, DestIVTy, DL); 177 178 // Finally, IR can handle this now that #elts line up. 179 return ConstantExpr::getBitCast(C, DestTy); 180 } 181 182 // Okay, we know the destination is integer, if the input is FP, convert 183 // it to integer first. 184 if (SrcEltTy->isFloatingPointTy()) { 185 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); 186 Type *SrcIVTy = 187 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt); 188 // Ask IR to do the conversion now that #elts line up. 189 C = ConstantExpr::getBitCast(C, SrcIVTy); 190 // If IR wasn't able to fold it, bail out. 191 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector. 192 !isa<ConstantDataVector>(C)) 193 return C; 194 } 195 196 // Now we know that the input and output vectors are both integer vectors 197 // of the same size, and that their #elements is not the same. Do the 198 // conversion here, which depends on whether the input or output has 199 // more elements. 200 bool isLittleEndian = DL.isLittleEndian(); 201 202 SmallVector<Constant*, 32> Result; 203 if (NumDstElt < NumSrcElt) { 204 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>) 205 Constant *Zero = Constant::getNullValue(DstEltTy); 206 unsigned Ratio = NumSrcElt/NumDstElt; 207 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits(); 208 unsigned SrcElt = 0; 209 for (unsigned i = 0; i != NumDstElt; ++i) { 210 // Build each element of the result. 211 Constant *Elt = Zero; 212 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1); 213 for (unsigned j = 0; j != Ratio; ++j) { 214 Constant *Src = C->getAggregateElement(SrcElt++); 215 if (Src && isa<UndefValue>(Src)) 216 Src = Constant::getNullValue(C->getType()->getVectorElementType()); 217 else 218 Src = dyn_cast_or_null<ConstantInt>(Src); 219 if (!Src) // Reject constantexpr elements. 220 return ConstantExpr::getBitCast(C, DestTy); 221 222 // Zero extend the element to the right size. 223 Src = ConstantExpr::getZExt(Src, Elt->getType()); 224 225 // Shift it to the right place, depending on endianness. 226 Src = ConstantExpr::getShl(Src, 227 ConstantInt::get(Src->getType(), ShiftAmt)); 228 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; 229 230 // Mix it in. 231 Elt = ConstantExpr::getOr(Elt, Src); 232 } 233 Result.push_back(Elt); 234 } 235 return ConstantVector::get(Result); 236 } 237 238 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) 239 unsigned Ratio = NumDstElt/NumSrcElt; 240 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy); 241 242 // Loop over each source value, expanding into multiple results. 243 for (unsigned i = 0; i != NumSrcElt; ++i) { 244 auto *Element = C->getAggregateElement(i); 245 246 if (!Element) // Reject constantexpr elements. 247 return ConstantExpr::getBitCast(C, DestTy); 248 249 if (isa<UndefValue>(Element)) { 250 // Correctly Propagate undef values. 251 Result.append(Ratio, UndefValue::get(DstEltTy)); 252 continue; 253 } 254 255 auto *Src = dyn_cast<ConstantInt>(Element); 256 if (!Src) 257 return ConstantExpr::getBitCast(C, DestTy); 258 259 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); 260 for (unsigned j = 0; j != Ratio; ++j) { 261 // Shift the piece of the value into the right place, depending on 262 // endianness. 263 Constant *Elt = ConstantExpr::getLShr(Src, 264 ConstantInt::get(Src->getType(), ShiftAmt)); 265 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; 266 267 // Truncate the element to an integer with the same pointer size and 268 // convert the element back to a pointer using a inttoptr. 269 if (DstEltTy->isPointerTy()) { 270 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize); 271 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy); 272 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy)); 273 continue; 274 } 275 276 // Truncate and remember this piece. 277 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); 278 } 279 } 280 281 return ConstantVector::get(Result); 282 } 283 284 } // end anonymous namespace 285 286 /// If this constant is a constant offset from a global, return the global and 287 /// the constant. Because of constantexprs, this function is recursive. 288 bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, 289 APInt &Offset, const DataLayout &DL) { 290 // Trivial case, constant is the global. 291 if ((GV = dyn_cast<GlobalValue>(C))) { 292 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType()); 293 Offset = APInt(BitWidth, 0); 294 return true; 295 } 296 297 // Otherwise, if this isn't a constant expr, bail out. 298 auto *CE = dyn_cast<ConstantExpr>(C); 299 if (!CE) return false; 300 301 // Look through ptr->int and ptr->ptr casts. 302 if (CE->getOpcode() == Instruction::PtrToInt || 303 CE->getOpcode() == Instruction::BitCast) 304 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL); 305 306 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5) 307 auto *GEP = dyn_cast<GEPOperator>(CE); 308 if (!GEP) 309 return false; 310 311 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType()); 312 APInt TmpOffset(BitWidth, 0); 313 314 // If the base isn't a global+constant, we aren't either. 315 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL)) 316 return false; 317 318 // Otherwise, add any offset that our operands provide. 319 if (!GEP->accumulateConstantOffset(DL, TmpOffset)) 320 return false; 321 322 Offset = TmpOffset; 323 return true; 324 } 325 326 Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy, 327 const DataLayout &DL) { 328 do { 329 Type *SrcTy = C->getType(); 330 331 // If the type sizes are the same and a cast is legal, just directly 332 // cast the constant. 333 if (DL.getTypeSizeInBits(DestTy) == DL.getTypeSizeInBits(SrcTy)) { 334 Instruction::CastOps Cast = Instruction::BitCast; 335 // If we are going from a pointer to int or vice versa, we spell the cast 336 // differently. 337 if (SrcTy->isIntegerTy() && DestTy->isPointerTy()) 338 Cast = Instruction::IntToPtr; 339 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy()) 340 Cast = Instruction::PtrToInt; 341 342 if (CastInst::castIsValid(Cast, C, DestTy)) 343 return ConstantExpr::getCast(Cast, C, DestTy); 344 } 345 346 // If this isn't an aggregate type, there is nothing we can do to drill down 347 // and find a bitcastable constant. 348 if (!SrcTy->isAggregateType()) 349 return nullptr; 350 351 // We're simulating a load through a pointer that was bitcast to point to 352 // a different type, so we can try to walk down through the initial 353 // elements of an aggregate to see if some part of the aggregate is 354 // castable to implement the "load" semantic model. 355 if (SrcTy->isStructTy()) { 356 // Struct types might have leading zero-length elements like [0 x i32], 357 // which are certainly not what we are looking for, so skip them. 358 unsigned Elem = 0; 359 Constant *ElemC; 360 do { 361 ElemC = C->getAggregateElement(Elem++); 362 } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()) == 0); 363 C = ElemC; 364 } else { 365 C = C->getAggregateElement(0u); 366 } 367 } while (C); 368 369 return nullptr; 370 } 371 372 namespace { 373 374 /// Recursive helper to read bits out of global. C is the constant being copied 375 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy 376 /// results into and BytesLeft is the number of bytes left in 377 /// the CurPtr buffer. DL is the DataLayout. 378 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr, 379 unsigned BytesLeft, const DataLayout &DL) { 380 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) && 381 "Out of range access"); 382 383 // If this element is zero or undefined, we can just return since *CurPtr is 384 // zero initialized. 385 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) 386 return true; 387 388 if (auto *CI = dyn_cast<ConstantInt>(C)) { 389 if (CI->getBitWidth() > 64 || 390 (CI->getBitWidth() & 7) != 0) 391 return false; 392 393 uint64_t Val = CI->getZExtValue(); 394 unsigned IntBytes = unsigned(CI->getBitWidth()/8); 395 396 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) { 397 int n = ByteOffset; 398 if (!DL.isLittleEndian()) 399 n = IntBytes - n - 1; 400 CurPtr[i] = (unsigned char)(Val >> (n * 8)); 401 ++ByteOffset; 402 } 403 return true; 404 } 405 406 if (auto *CFP = dyn_cast<ConstantFP>(C)) { 407 if (CFP->getType()->isDoubleTy()) { 408 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL); 409 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 410 } 411 if (CFP->getType()->isFloatTy()){ 412 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL); 413 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 414 } 415 if (CFP->getType()->isHalfTy()){ 416 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL); 417 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL); 418 } 419 return false; 420 } 421 422 if (auto *CS = dyn_cast<ConstantStruct>(C)) { 423 const StructLayout *SL = DL.getStructLayout(CS->getType()); 424 unsigned Index = SL->getElementContainingOffset(ByteOffset); 425 uint64_t CurEltOffset = SL->getElementOffset(Index); 426 ByteOffset -= CurEltOffset; 427 428 while (true) { 429 // If the element access is to the element itself and not to tail padding, 430 // read the bytes from the element. 431 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType()); 432 433 if (ByteOffset < EltSize && 434 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr, 435 BytesLeft, DL)) 436 return false; 437 438 ++Index; 439 440 // Check to see if we read from the last struct element, if so we're done. 441 if (Index == CS->getType()->getNumElements()) 442 return true; 443 444 // If we read all of the bytes we needed from this element we're done. 445 uint64_t NextEltOffset = SL->getElementOffset(Index); 446 447 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset) 448 return true; 449 450 // Move to the next element of the struct. 451 CurPtr += NextEltOffset - CurEltOffset - ByteOffset; 452 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset; 453 ByteOffset = 0; 454 CurEltOffset = NextEltOffset; 455 } 456 // not reached. 457 } 458 459 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) || 460 isa<ConstantDataSequential>(C)) { 461 Type *EltTy = C->getType()->getSequentialElementType(); 462 uint64_t EltSize = DL.getTypeAllocSize(EltTy); 463 uint64_t Index = ByteOffset / EltSize; 464 uint64_t Offset = ByteOffset - Index * EltSize; 465 uint64_t NumElts; 466 if (auto *AT = dyn_cast<ArrayType>(C->getType())) 467 NumElts = AT->getNumElements(); 468 else 469 NumElts = C->getType()->getVectorNumElements(); 470 471 for (; Index != NumElts; ++Index) { 472 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr, 473 BytesLeft, DL)) 474 return false; 475 476 uint64_t BytesWritten = EltSize - Offset; 477 assert(BytesWritten <= EltSize && "Not indexing into this element?"); 478 if (BytesWritten >= BytesLeft) 479 return true; 480 481 Offset = 0; 482 BytesLeft -= BytesWritten; 483 CurPtr += BytesWritten; 484 } 485 return true; 486 } 487 488 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 489 if (CE->getOpcode() == Instruction::IntToPtr && 490 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) { 491 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr, 492 BytesLeft, DL); 493 } 494 } 495 496 // Otherwise, unknown initializer type. 497 return false; 498 } 499 500 Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy, 501 const DataLayout &DL) { 502 auto *PTy = cast<PointerType>(C->getType()); 503 auto *IntType = dyn_cast<IntegerType>(LoadTy); 504 505 // If this isn't an integer load we can't fold it directly. 506 if (!IntType) { 507 unsigned AS = PTy->getAddressSpace(); 508 509 // If this is a float/double load, we can try folding it as an int32/64 load 510 // and then bitcast the result. This can be useful for union cases. Note 511 // that address spaces don't matter here since we're not going to result in 512 // an actual new load. 513 Type *MapTy; 514 if (LoadTy->isHalfTy()) 515 MapTy = Type::getInt16Ty(C->getContext()); 516 else if (LoadTy->isFloatTy()) 517 MapTy = Type::getInt32Ty(C->getContext()); 518 else if (LoadTy->isDoubleTy()) 519 MapTy = Type::getInt64Ty(C->getContext()); 520 else if (LoadTy->isVectorTy()) { 521 MapTy = PointerType::getIntNTy(C->getContext(), 522 DL.getTypeSizeInBits(LoadTy)); 523 } else 524 return nullptr; 525 526 C = FoldBitCast(C, MapTy->getPointerTo(AS), DL); 527 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, MapTy, DL)) { 528 if (Res->isNullValue() && !LoadTy->isX86_MMXTy()) 529 // Materializing a zero can be done trivially without a bitcast 530 return Constant::getNullValue(LoadTy); 531 Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy; 532 Res = FoldBitCast(Res, CastTy, DL); 533 if (LoadTy->isPtrOrPtrVectorTy()) { 534 // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr 535 if (Res->isNullValue() && !LoadTy->isX86_MMXTy()) 536 return Constant::getNullValue(LoadTy); 537 if (DL.isNonIntegralPointerType(LoadTy->getScalarType())) 538 // Be careful not to replace a load of an addrspace value with an inttoptr here 539 return nullptr; 540 Res = ConstantExpr::getCast(Instruction::IntToPtr, Res, LoadTy); 541 } 542 return Res; 543 } 544 return nullptr; 545 } 546 547 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8; 548 if (BytesLoaded > 32 || BytesLoaded == 0) 549 return nullptr; 550 551 GlobalValue *GVal; 552 APInt OffsetAI; 553 if (!IsConstantOffsetFromGlobal(C, GVal, OffsetAI, DL)) 554 return nullptr; 555 556 auto *GV = dyn_cast<GlobalVariable>(GVal); 557 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 558 !GV->getInitializer()->getType()->isSized()) 559 return nullptr; 560 561 int64_t Offset = OffsetAI.getSExtValue(); 562 int64_t InitializerSize = DL.getTypeAllocSize(GV->getInitializer()->getType()); 563 564 // If we're not accessing anything in this constant, the result is undefined. 565 if (Offset <= -1 * static_cast<int64_t>(BytesLoaded)) 566 return UndefValue::get(IntType); 567 568 // If we're not accessing anything in this constant, the result is undefined. 569 if (Offset >= InitializerSize) 570 return UndefValue::get(IntType); 571 572 unsigned char RawBytes[32] = {0}; 573 unsigned char *CurPtr = RawBytes; 574 unsigned BytesLeft = BytesLoaded; 575 576 // If we're loading off the beginning of the global, some bytes may be valid. 577 if (Offset < 0) { 578 CurPtr += -Offset; 579 BytesLeft += Offset; 580 Offset = 0; 581 } 582 583 if (!ReadDataFromGlobal(GV->getInitializer(), Offset, CurPtr, BytesLeft, DL)) 584 return nullptr; 585 586 APInt ResultVal = APInt(IntType->getBitWidth(), 0); 587 if (DL.isLittleEndian()) { 588 ResultVal = RawBytes[BytesLoaded - 1]; 589 for (unsigned i = 1; i != BytesLoaded; ++i) { 590 ResultVal <<= 8; 591 ResultVal |= RawBytes[BytesLoaded - 1 - i]; 592 } 593 } else { 594 ResultVal = RawBytes[0]; 595 for (unsigned i = 1; i != BytesLoaded; ++i) { 596 ResultVal <<= 8; 597 ResultVal |= RawBytes[i]; 598 } 599 } 600 601 return ConstantInt::get(IntType->getContext(), ResultVal); 602 } 603 604 Constant *ConstantFoldLoadThroughBitcastExpr(ConstantExpr *CE, Type *DestTy, 605 const DataLayout &DL) { 606 auto *SrcPtr = CE->getOperand(0); 607 auto *SrcPtrTy = dyn_cast<PointerType>(SrcPtr->getType()); 608 if (!SrcPtrTy) 609 return nullptr; 610 Type *SrcTy = SrcPtrTy->getPointerElementType(); 611 612 Constant *C = ConstantFoldLoadFromConstPtr(SrcPtr, SrcTy, DL); 613 if (!C) 614 return nullptr; 615 616 return llvm::ConstantFoldLoadThroughBitcast(C, DestTy, DL); 617 } 618 619 } // end anonymous namespace 620 621 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, 622 const DataLayout &DL) { 623 // First, try the easy cases: 624 if (auto *GV = dyn_cast<GlobalVariable>(C)) 625 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 626 return GV->getInitializer(); 627 628 if (auto *GA = dyn_cast<GlobalAlias>(C)) 629 if (GA->getAliasee() && !GA->isInterposable()) 630 return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL); 631 632 // If the loaded value isn't a constant expr, we can't handle it. 633 auto *CE = dyn_cast<ConstantExpr>(C); 634 if (!CE) 635 return nullptr; 636 637 if (CE->getOpcode() == Instruction::GetElementPtr) { 638 if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) { 639 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 640 if (Constant *V = 641 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE)) 642 return V; 643 } 644 } 645 } 646 647 if (CE->getOpcode() == Instruction::BitCast) 648 if (Constant *LoadedC = ConstantFoldLoadThroughBitcastExpr(CE, Ty, DL)) 649 return LoadedC; 650 651 // Instead of loading constant c string, use corresponding integer value 652 // directly if string length is small enough. 653 StringRef Str; 654 if (getConstantStringInfo(CE, Str) && !Str.empty()) { 655 size_t StrLen = Str.size(); 656 unsigned NumBits = Ty->getPrimitiveSizeInBits(); 657 // Replace load with immediate integer if the result is an integer or fp 658 // value. 659 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 && 660 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) { 661 APInt StrVal(NumBits, 0); 662 APInt SingleChar(NumBits, 0); 663 if (DL.isLittleEndian()) { 664 for (unsigned char C : reverse(Str.bytes())) { 665 SingleChar = static_cast<uint64_t>(C); 666 StrVal = (StrVal << 8) | SingleChar; 667 } 668 } else { 669 for (unsigned char C : Str.bytes()) { 670 SingleChar = static_cast<uint64_t>(C); 671 StrVal = (StrVal << 8) | SingleChar; 672 } 673 // Append NULL at the end. 674 SingleChar = 0; 675 StrVal = (StrVal << 8) | SingleChar; 676 } 677 678 Constant *Res = ConstantInt::get(CE->getContext(), StrVal); 679 if (Ty->isFloatingPointTy()) 680 Res = ConstantExpr::getBitCast(Res, Ty); 681 return Res; 682 } 683 } 684 685 // If this load comes from anywhere in a constant global, and if the global 686 // is all undef or zero, we know what it loads. 687 if (auto *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) { 688 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 689 if (GV->getInitializer()->isNullValue()) 690 return Constant::getNullValue(Ty); 691 if (isa<UndefValue>(GV->getInitializer())) 692 return UndefValue::get(Ty); 693 } 694 } 695 696 // Try hard to fold loads from bitcasted strange and non-type-safe things. 697 return FoldReinterpretLoadFromConstPtr(CE, Ty, DL); 698 } 699 700 namespace { 701 702 Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout &DL) { 703 if (LI->isVolatile()) return nullptr; 704 705 if (auto *C = dyn_cast<Constant>(LI->getOperand(0))) 706 return ConstantFoldLoadFromConstPtr(C, LI->getType(), DL); 707 708 return nullptr; 709 } 710 711 /// One of Op0/Op1 is a constant expression. 712 /// Attempt to symbolically evaluate the result of a binary operator merging 713 /// these together. If target data info is available, it is provided as DL, 714 /// otherwise DL is null. 715 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1, 716 const DataLayout &DL) { 717 // SROA 718 719 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl. 720 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute 721 // bits. 722 723 if (Opc == Instruction::And) { 724 KnownBits Known0 = computeKnownBits(Op0, DL); 725 KnownBits Known1 = computeKnownBits(Op1, DL); 726 if ((Known1.One | Known0.Zero).isAllOnesValue()) { 727 // All the bits of Op0 that the 'and' could be masking are already zero. 728 return Op0; 729 } 730 if ((Known0.One | Known1.Zero).isAllOnesValue()) { 731 // All the bits of Op1 that the 'and' could be masking are already zero. 732 return Op1; 733 } 734 735 Known0.Zero |= Known1.Zero; 736 Known0.One &= Known1.One; 737 if (Known0.isConstant()) 738 return ConstantInt::get(Op0->getType(), Known0.getConstant()); 739 } 740 741 // If the constant expr is something like &A[123] - &A[4].f, fold this into a 742 // constant. This happens frequently when iterating over a global array. 743 if (Opc == Instruction::Sub) { 744 GlobalValue *GV1, *GV2; 745 APInt Offs1, Offs2; 746 747 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL)) 748 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) { 749 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType()); 750 751 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow. 752 // PtrToInt may change the bitwidth so we have convert to the right size 753 // first. 754 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) - 755 Offs2.zextOrTrunc(OpSize)); 756 } 757 } 758 759 return nullptr; 760 } 761 762 /// If array indices are not pointer-sized integers, explicitly cast them so 763 /// that they aren't implicitly casted by the getelementptr. 764 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops, 765 Type *ResultTy, Optional<unsigned> InRangeIndex, 766 const DataLayout &DL, const TargetLibraryInfo *TLI) { 767 Type *IntPtrTy = DL.getIntPtrType(ResultTy); 768 Type *IntPtrScalarTy = IntPtrTy->getScalarType(); 769 770 bool Any = false; 771 SmallVector<Constant*, 32> NewIdxs; 772 for (unsigned i = 1, e = Ops.size(); i != e; ++i) { 773 if ((i == 1 || 774 !isa<StructType>(GetElementPtrInst::getIndexedType( 775 SrcElemTy, Ops.slice(1, i - 1)))) && 776 Ops[i]->getType()->getScalarType() != IntPtrScalarTy) { 777 Any = true; 778 Type *NewType = Ops[i]->getType()->isVectorTy() 779 ? IntPtrTy 780 : IntPtrTy->getScalarType(); 781 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i], 782 true, 783 NewType, 784 true), 785 Ops[i], NewType)); 786 } else 787 NewIdxs.push_back(Ops[i]); 788 } 789 790 if (!Any) 791 return nullptr; 792 793 Constant *C = ConstantExpr::getGetElementPtr( 794 SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex); 795 if (Constant *Folded = ConstantFoldConstant(C, DL, TLI)) 796 C = Folded; 797 798 return C; 799 } 800 801 /// Strip the pointer casts, but preserve the address space information. 802 Constant *StripPtrCastKeepAS(Constant *Ptr, Type *&ElemTy) { 803 assert(Ptr->getType()->isPointerTy() && "Not a pointer type"); 804 auto *OldPtrTy = cast<PointerType>(Ptr->getType()); 805 Ptr = cast<Constant>(Ptr->stripPointerCasts()); 806 auto *NewPtrTy = cast<PointerType>(Ptr->getType()); 807 808 ElemTy = NewPtrTy->getPointerElementType(); 809 810 // Preserve the address space number of the pointer. 811 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) { 812 NewPtrTy = ElemTy->getPointerTo(OldPtrTy->getAddressSpace()); 813 Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy); 814 } 815 return Ptr; 816 } 817 818 /// If we can symbolically evaluate the GEP constant expression, do so. 819 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, 820 ArrayRef<Constant *> Ops, 821 const DataLayout &DL, 822 const TargetLibraryInfo *TLI) { 823 const GEPOperator *InnermostGEP = GEP; 824 bool InBounds = GEP->isInBounds(); 825 826 Type *SrcElemTy = GEP->getSourceElementType(); 827 Type *ResElemTy = GEP->getResultElementType(); 828 Type *ResTy = GEP->getType(); 829 if (!SrcElemTy->isSized()) 830 return nullptr; 831 832 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy, 833 GEP->getInRangeIndex(), DL, TLI)) 834 return C; 835 836 Constant *Ptr = Ops[0]; 837 if (!Ptr->getType()->isPointerTy()) 838 return nullptr; 839 840 Type *IntPtrTy = DL.getIntPtrType(Ptr->getType()); 841 842 // If this is a constant expr gep that is effectively computing an 843 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12' 844 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 845 if (!isa<ConstantInt>(Ops[i])) { 846 847 // If this is "gep i8* Ptr, (sub 0, V)", fold this as: 848 // "inttoptr (sub (ptrtoint Ptr), V)" 849 if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) { 850 auto *CE = dyn_cast<ConstantExpr>(Ops[1]); 851 assert((!CE || CE->getType() == IntPtrTy) && 852 "CastGEPIndices didn't canonicalize index types!"); 853 if (CE && CE->getOpcode() == Instruction::Sub && 854 CE->getOperand(0)->isNullValue()) { 855 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType()); 856 Res = ConstantExpr::getSub(Res, CE->getOperand(1)); 857 Res = ConstantExpr::getIntToPtr(Res, ResTy); 858 if (auto *FoldedRes = ConstantFoldConstant(Res, DL, TLI)) 859 Res = FoldedRes; 860 return Res; 861 } 862 } 863 return nullptr; 864 } 865 866 unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy); 867 APInt Offset = 868 APInt(BitWidth, 869 DL.getIndexedOffsetInType( 870 SrcElemTy, 871 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1))); 872 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy); 873 874 // If this is a GEP of a GEP, fold it all into a single GEP. 875 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) { 876 InnermostGEP = GEP; 877 InBounds &= GEP->isInBounds(); 878 879 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end()); 880 881 // Do not try the incorporate the sub-GEP if some index is not a number. 882 bool AllConstantInt = true; 883 for (Value *NestedOp : NestedOps) 884 if (!isa<ConstantInt>(NestedOp)) { 885 AllConstantInt = false; 886 break; 887 } 888 if (!AllConstantInt) 889 break; 890 891 Ptr = cast<Constant>(GEP->getOperand(0)); 892 SrcElemTy = GEP->getSourceElementType(); 893 Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps)); 894 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy); 895 } 896 897 // If the base value for this address is a literal integer value, fold the 898 // getelementptr to the resulting integer value casted to the pointer type. 899 APInt BasePtr(BitWidth, 0); 900 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) { 901 if (CE->getOpcode() == Instruction::IntToPtr) { 902 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0))) 903 BasePtr = Base->getValue().zextOrTrunc(BitWidth); 904 } 905 } 906 907 auto *PTy = cast<PointerType>(Ptr->getType()); 908 if ((Ptr->isNullValue() || BasePtr != 0) && 909 !DL.isNonIntegralPointerType(PTy)) { 910 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr); 911 return ConstantExpr::getIntToPtr(C, ResTy); 912 } 913 914 // Otherwise form a regular getelementptr. Recompute the indices so that 915 // we eliminate over-indexing of the notional static type array bounds. 916 // This makes it easy to determine if the getelementptr is "inbounds". 917 // Also, this helps GlobalOpt do SROA on GlobalVariables. 918 Type *Ty = PTy; 919 SmallVector<Constant *, 32> NewIdxs; 920 921 do { 922 if (!Ty->isStructTy()) { 923 if (Ty->isPointerTy()) { 924 // The only pointer indexing we'll do is on the first index of the GEP. 925 if (!NewIdxs.empty()) 926 break; 927 928 Ty = SrcElemTy; 929 930 // Only handle pointers to sized types, not pointers to functions. 931 if (!Ty->isSized()) 932 return nullptr; 933 } else if (auto *ATy = dyn_cast<SequentialType>(Ty)) { 934 Ty = ATy->getElementType(); 935 } else { 936 // We've reached some non-indexable type. 937 break; 938 } 939 940 // Determine which element of the array the offset points into. 941 APInt ElemSize(BitWidth, DL.getTypeAllocSize(Ty)); 942 if (ElemSize == 0) { 943 // The element size is 0. This may be [0 x Ty]*, so just use a zero 944 // index for this level and proceed to the next level to see if it can 945 // accommodate the offset. 946 NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0)); 947 } else { 948 // The element size is non-zero divide the offset by the element 949 // size (rounding down), to compute the index at this level. 950 bool Overflow; 951 APInt NewIdx = Offset.sdiv_ov(ElemSize, Overflow); 952 if (Overflow) 953 break; 954 Offset -= NewIdx * ElemSize; 955 NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx)); 956 } 957 } else { 958 auto *STy = cast<StructType>(Ty); 959 // If we end up with an offset that isn't valid for this struct type, we 960 // can't re-form this GEP in a regular form, so bail out. The pointer 961 // operand likely went through casts that are necessary to make the GEP 962 // sensible. 963 const StructLayout &SL = *DL.getStructLayout(STy); 964 if (Offset.isNegative() || Offset.uge(SL.getSizeInBytes())) 965 break; 966 967 // Determine which field of the struct the offset points into. The 968 // getZExtValue is fine as we've already ensured that the offset is 969 // within the range representable by the StructLayout API. 970 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue()); 971 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 972 ElIdx)); 973 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx)); 974 Ty = STy->getTypeAtIndex(ElIdx); 975 } 976 } while (Ty != ResElemTy); 977 978 // If we haven't used up the entire offset by descending the static 979 // type, then the offset is pointing into the middle of an indivisible 980 // member, so we can't simplify it. 981 if (Offset != 0) 982 return nullptr; 983 984 // Preserve the inrange index from the innermost GEP if possible. We must 985 // have calculated the same indices up to and including the inrange index. 986 Optional<unsigned> InRangeIndex; 987 if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex()) 988 if (SrcElemTy == InnermostGEP->getSourceElementType() && 989 NewIdxs.size() > *LastIRIndex) { 990 InRangeIndex = LastIRIndex; 991 for (unsigned I = 0; I <= *LastIRIndex; ++I) 992 if (NewIdxs[I] != InnermostGEP->getOperand(I + 1)) 993 return nullptr; 994 } 995 996 // Create a GEP. 997 Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs, 998 InBounds, InRangeIndex); 999 assert(C->getType()->getPointerElementType() == Ty && 1000 "Computed GetElementPtr has unexpected type!"); 1001 1002 // If we ended up indexing a member with a type that doesn't match 1003 // the type of what the original indices indexed, add a cast. 1004 if (Ty != ResElemTy) 1005 C = FoldBitCast(C, ResTy, DL); 1006 1007 return C; 1008 } 1009 1010 /// Attempt to constant fold an instruction with the 1011 /// specified opcode and operands. If successful, the constant result is 1012 /// returned, if not, null is returned. Note that this function can fail when 1013 /// attempting to fold instructions like loads and stores, which have no 1014 /// constant expression form. 1015 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode, 1016 ArrayRef<Constant *> Ops, 1017 const DataLayout &DL, 1018 const TargetLibraryInfo *TLI) { 1019 Type *DestTy = InstOrCE->getType(); 1020 1021 if (Instruction::isUnaryOp(Opcode)) 1022 return ConstantFoldUnaryOpOperand(Opcode, Ops[0], DL); 1023 1024 if (Instruction::isBinaryOp(Opcode)) 1025 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL); 1026 1027 if (Instruction::isCast(Opcode)) 1028 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL); 1029 1030 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) { 1031 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI)) 1032 return C; 1033 1034 return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0], 1035 Ops.slice(1), GEP->isInBounds(), 1036 GEP->getInRangeIndex()); 1037 } 1038 1039 if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE)) 1040 return CE->getWithOperands(Ops); 1041 1042 switch (Opcode) { 1043 default: return nullptr; 1044 case Instruction::ICmp: 1045 case Instruction::FCmp: llvm_unreachable("Invalid for compares"); 1046 case Instruction::Call: 1047 if (auto *F = dyn_cast<Function>(Ops.back())) { 1048 const auto *Call = cast<CallBase>(InstOrCE); 1049 if (canConstantFoldCallTo(Call, F)) 1050 return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI); 1051 } 1052 return nullptr; 1053 case Instruction::Select: 1054 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]); 1055 case Instruction::ExtractElement: 1056 return ConstantExpr::getExtractElement(Ops[0], Ops[1]); 1057 case Instruction::ExtractValue: 1058 return ConstantExpr::getExtractValue( 1059 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices()); 1060 case Instruction::InsertElement: 1061 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]); 1062 case Instruction::ShuffleVector: 1063 return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]); 1064 } 1065 } 1066 1067 } // end anonymous namespace 1068 1069 //===----------------------------------------------------------------------===// 1070 // Constant Folding public APIs 1071 //===----------------------------------------------------------------------===// 1072 1073 namespace { 1074 1075 Constant * 1076 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL, 1077 const TargetLibraryInfo *TLI, 1078 SmallDenseMap<Constant *, Constant *> &FoldedOps) { 1079 if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C)) 1080 return nullptr; 1081 1082 SmallVector<Constant *, 8> Ops; 1083 for (const Use &NewU : C->operands()) { 1084 auto *NewC = cast<Constant>(&NewU); 1085 // Recursively fold the ConstantExpr's operands. If we have already folded 1086 // a ConstantExpr, we don't have to process it again. 1087 if (isa<ConstantVector>(NewC) || isa<ConstantExpr>(NewC)) { 1088 auto It = FoldedOps.find(NewC); 1089 if (It == FoldedOps.end()) { 1090 if (auto *FoldedC = 1091 ConstantFoldConstantImpl(NewC, DL, TLI, FoldedOps)) { 1092 FoldedOps.insert({NewC, FoldedC}); 1093 NewC = FoldedC; 1094 } else { 1095 FoldedOps.insert({NewC, NewC}); 1096 } 1097 } else { 1098 NewC = It->second; 1099 } 1100 } 1101 Ops.push_back(NewC); 1102 } 1103 1104 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 1105 if (CE->isCompare()) 1106 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1], 1107 DL, TLI); 1108 1109 return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI); 1110 } 1111 1112 assert(isa<ConstantVector>(C)); 1113 return ConstantVector::get(Ops); 1114 } 1115 1116 } // end anonymous namespace 1117 1118 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL, 1119 const TargetLibraryInfo *TLI) { 1120 // Handle PHI nodes quickly here... 1121 if (auto *PN = dyn_cast<PHINode>(I)) { 1122 Constant *CommonValue = nullptr; 1123 1124 SmallDenseMap<Constant *, Constant *> FoldedOps; 1125 for (Value *Incoming : PN->incoming_values()) { 1126 // If the incoming value is undef then skip it. Note that while we could 1127 // skip the value if it is equal to the phi node itself we choose not to 1128 // because that would break the rule that constant folding only applies if 1129 // all operands are constants. 1130 if (isa<UndefValue>(Incoming)) 1131 continue; 1132 // If the incoming value is not a constant, then give up. 1133 auto *C = dyn_cast<Constant>(Incoming); 1134 if (!C) 1135 return nullptr; 1136 // Fold the PHI's operands. 1137 if (auto *FoldedC = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps)) 1138 C = FoldedC; 1139 // If the incoming value is a different constant to 1140 // the one we saw previously, then give up. 1141 if (CommonValue && C != CommonValue) 1142 return nullptr; 1143 CommonValue = C; 1144 } 1145 1146 // If we reach here, all incoming values are the same constant or undef. 1147 return CommonValue ? CommonValue : UndefValue::get(PN->getType()); 1148 } 1149 1150 // Scan the operand list, checking to see if they are all constants, if so, 1151 // hand off to ConstantFoldInstOperandsImpl. 1152 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); })) 1153 return nullptr; 1154 1155 SmallDenseMap<Constant *, Constant *> FoldedOps; 1156 SmallVector<Constant *, 8> Ops; 1157 for (const Use &OpU : I->operands()) { 1158 auto *Op = cast<Constant>(&OpU); 1159 // Fold the Instruction's operands. 1160 if (auto *FoldedOp = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps)) 1161 Op = FoldedOp; 1162 1163 Ops.push_back(Op); 1164 } 1165 1166 if (const auto *CI = dyn_cast<CmpInst>(I)) 1167 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1], 1168 DL, TLI); 1169 1170 if (const auto *LI = dyn_cast<LoadInst>(I)) 1171 return ConstantFoldLoadInst(LI, DL); 1172 1173 if (auto *IVI = dyn_cast<InsertValueInst>(I)) { 1174 return ConstantExpr::getInsertValue( 1175 cast<Constant>(IVI->getAggregateOperand()), 1176 cast<Constant>(IVI->getInsertedValueOperand()), 1177 IVI->getIndices()); 1178 } 1179 1180 if (auto *EVI = dyn_cast<ExtractValueInst>(I)) { 1181 return ConstantExpr::getExtractValue( 1182 cast<Constant>(EVI->getAggregateOperand()), 1183 EVI->getIndices()); 1184 } 1185 1186 return ConstantFoldInstOperands(I, Ops, DL, TLI); 1187 } 1188 1189 Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL, 1190 const TargetLibraryInfo *TLI) { 1191 SmallDenseMap<Constant *, Constant *> FoldedOps; 1192 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps); 1193 } 1194 1195 Constant *llvm::ConstantFoldInstOperands(Instruction *I, 1196 ArrayRef<Constant *> Ops, 1197 const DataLayout &DL, 1198 const TargetLibraryInfo *TLI) { 1199 return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI); 1200 } 1201 1202 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, 1203 Constant *Ops0, Constant *Ops1, 1204 const DataLayout &DL, 1205 const TargetLibraryInfo *TLI) { 1206 // fold: icmp (inttoptr x), null -> icmp x, 0 1207 // fold: icmp null, (inttoptr x) -> icmp 0, x 1208 // fold: icmp (ptrtoint x), 0 -> icmp x, null 1209 // fold: icmp 0, (ptrtoint x) -> icmp null, x 1210 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y 1211 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y 1212 // 1213 // FIXME: The following comment is out of data and the DataLayout is here now. 1214 // ConstantExpr::getCompare cannot do this, because it doesn't have DL 1215 // around to know if bit truncation is happening. 1216 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) { 1217 if (Ops1->isNullValue()) { 1218 if (CE0->getOpcode() == Instruction::IntToPtr) { 1219 Type *IntPtrTy = DL.getIntPtrType(CE0->getType()); 1220 // Convert the integer value to the right size to ensure we get the 1221 // proper extension or truncation. 1222 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1223 IntPtrTy, false); 1224 Constant *Null = Constant::getNullValue(C->getType()); 1225 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI); 1226 } 1227 1228 // Only do this transformation if the int is intptrty in size, otherwise 1229 // there is a truncation or extension that we aren't modeling. 1230 if (CE0->getOpcode() == Instruction::PtrToInt) { 1231 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType()); 1232 if (CE0->getType() == IntPtrTy) { 1233 Constant *C = CE0->getOperand(0); 1234 Constant *Null = Constant::getNullValue(C->getType()); 1235 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI); 1236 } 1237 } 1238 } 1239 1240 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) { 1241 if (CE0->getOpcode() == CE1->getOpcode()) { 1242 if (CE0->getOpcode() == Instruction::IntToPtr) { 1243 Type *IntPtrTy = DL.getIntPtrType(CE0->getType()); 1244 1245 // Convert the integer value to the right size to ensure we get the 1246 // proper extension or truncation. 1247 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0), 1248 IntPtrTy, false); 1249 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0), 1250 IntPtrTy, false); 1251 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI); 1252 } 1253 1254 // Only do this transformation if the int is intptrty in size, otherwise 1255 // there is a truncation or extension that we aren't modeling. 1256 if (CE0->getOpcode() == Instruction::PtrToInt) { 1257 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType()); 1258 if (CE0->getType() == IntPtrTy && 1259 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) { 1260 return ConstantFoldCompareInstOperands( 1261 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI); 1262 } 1263 } 1264 } 1265 } 1266 1267 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0) 1268 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0) 1269 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) && 1270 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) { 1271 Constant *LHS = ConstantFoldCompareInstOperands( 1272 Predicate, CE0->getOperand(0), Ops1, DL, TLI); 1273 Constant *RHS = ConstantFoldCompareInstOperands( 1274 Predicate, CE0->getOperand(1), Ops1, DL, TLI); 1275 unsigned OpC = 1276 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; 1277 return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL); 1278 } 1279 } else if (isa<ConstantExpr>(Ops1)) { 1280 // If RHS is a constant expression, but the left side isn't, swap the 1281 // operands and try again. 1282 Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate); 1283 return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI); 1284 } 1285 1286 return ConstantExpr::getCompare(Predicate, Ops0, Ops1); 1287 } 1288 1289 Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, 1290 const DataLayout &DL) { 1291 assert(Instruction::isUnaryOp(Opcode)); 1292 1293 return ConstantExpr::get(Opcode, Op); 1294 } 1295 1296 Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, 1297 Constant *RHS, 1298 const DataLayout &DL) { 1299 assert(Instruction::isBinaryOp(Opcode)); 1300 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS)) 1301 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL)) 1302 return C; 1303 1304 return ConstantExpr::get(Opcode, LHS, RHS); 1305 } 1306 1307 Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C, 1308 Type *DestTy, const DataLayout &DL) { 1309 assert(Instruction::isCast(Opcode)); 1310 switch (Opcode) { 1311 default: 1312 llvm_unreachable("Missing case"); 1313 case Instruction::PtrToInt: 1314 // If the input is a inttoptr, eliminate the pair. This requires knowing 1315 // the width of a pointer, so it can't be done in ConstantExpr::getCast. 1316 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 1317 if (CE->getOpcode() == Instruction::IntToPtr) { 1318 Constant *Input = CE->getOperand(0); 1319 unsigned InWidth = Input->getType()->getScalarSizeInBits(); 1320 unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType()); 1321 if (PtrWidth < InWidth) { 1322 Constant *Mask = 1323 ConstantInt::get(CE->getContext(), 1324 APInt::getLowBitsSet(InWidth, PtrWidth)); 1325 Input = ConstantExpr::getAnd(Input, Mask); 1326 } 1327 // Do a zext or trunc to get to the dest size. 1328 return ConstantExpr::getIntegerCast(Input, DestTy, false); 1329 } 1330 } 1331 return ConstantExpr::getCast(Opcode, C, DestTy); 1332 case Instruction::IntToPtr: 1333 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if 1334 // the int size is >= the ptr size and the address spaces are the same. 1335 // This requires knowing the width of a pointer, so it can't be done in 1336 // ConstantExpr::getCast. 1337 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 1338 if (CE->getOpcode() == Instruction::PtrToInt) { 1339 Constant *SrcPtr = CE->getOperand(0); 1340 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType()); 1341 unsigned MidIntSize = CE->getType()->getScalarSizeInBits(); 1342 1343 if (MidIntSize >= SrcPtrSize) { 1344 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace(); 1345 if (SrcAS == DestTy->getPointerAddressSpace()) 1346 return FoldBitCast(CE->getOperand(0), DestTy, DL); 1347 } 1348 } 1349 } 1350 1351 return ConstantExpr::getCast(Opcode, C, DestTy); 1352 case Instruction::Trunc: 1353 case Instruction::ZExt: 1354 case Instruction::SExt: 1355 case Instruction::FPTrunc: 1356 case Instruction::FPExt: 1357 case Instruction::UIToFP: 1358 case Instruction::SIToFP: 1359 case Instruction::FPToUI: 1360 case Instruction::FPToSI: 1361 case Instruction::AddrSpaceCast: 1362 return ConstantExpr::getCast(Opcode, C, DestTy); 1363 case Instruction::BitCast: 1364 return FoldBitCast(C, DestTy, DL); 1365 } 1366 } 1367 1368 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C, 1369 ConstantExpr *CE) { 1370 if (!CE->getOperand(1)->isNullValue()) 1371 return nullptr; // Do not allow stepping over the value! 1372 1373 // Loop over all of the operands, tracking down which value we are 1374 // addressing. 1375 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) { 1376 C = C->getAggregateElement(CE->getOperand(i)); 1377 if (!C) 1378 return nullptr; 1379 } 1380 return C; 1381 } 1382 1383 Constant * 1384 llvm::ConstantFoldLoadThroughGEPIndices(Constant *C, 1385 ArrayRef<Constant *> Indices) { 1386 // Loop over all of the operands, tracking down which value we are 1387 // addressing. 1388 for (Constant *Index : Indices) { 1389 C = C->getAggregateElement(Index); 1390 if (!C) 1391 return nullptr; 1392 } 1393 return C; 1394 } 1395 1396 //===----------------------------------------------------------------------===// 1397 // Constant Folding for Calls 1398 // 1399 1400 bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) { 1401 if (Call->isNoBuiltin() || Call->isStrictFP()) 1402 return false; 1403 switch (F->getIntrinsicID()) { 1404 case Intrinsic::fabs: 1405 case Intrinsic::minnum: 1406 case Intrinsic::maxnum: 1407 case Intrinsic::minimum: 1408 case Intrinsic::maximum: 1409 case Intrinsic::log: 1410 case Intrinsic::log2: 1411 case Intrinsic::log10: 1412 case Intrinsic::exp: 1413 case Intrinsic::exp2: 1414 case Intrinsic::floor: 1415 case Intrinsic::ceil: 1416 case Intrinsic::sqrt: 1417 case Intrinsic::sin: 1418 case Intrinsic::cos: 1419 case Intrinsic::trunc: 1420 case Intrinsic::rint: 1421 case Intrinsic::nearbyint: 1422 case Intrinsic::pow: 1423 case Intrinsic::powi: 1424 case Intrinsic::bswap: 1425 case Intrinsic::ctpop: 1426 case Intrinsic::ctlz: 1427 case Intrinsic::cttz: 1428 case Intrinsic::fshl: 1429 case Intrinsic::fshr: 1430 case Intrinsic::fma: 1431 case Intrinsic::fmuladd: 1432 case Intrinsic::copysign: 1433 case Intrinsic::launder_invariant_group: 1434 case Intrinsic::strip_invariant_group: 1435 case Intrinsic::round: 1436 case Intrinsic::masked_load: 1437 case Intrinsic::sadd_with_overflow: 1438 case Intrinsic::uadd_with_overflow: 1439 case Intrinsic::ssub_with_overflow: 1440 case Intrinsic::usub_with_overflow: 1441 case Intrinsic::smul_with_overflow: 1442 case Intrinsic::umul_with_overflow: 1443 case Intrinsic::sadd_sat: 1444 case Intrinsic::uadd_sat: 1445 case Intrinsic::ssub_sat: 1446 case Intrinsic::usub_sat: 1447 case Intrinsic::smul_fix: 1448 case Intrinsic::smul_fix_sat: 1449 case Intrinsic::convert_from_fp16: 1450 case Intrinsic::convert_to_fp16: 1451 case Intrinsic::bitreverse: 1452 case Intrinsic::x86_sse_cvtss2si: 1453 case Intrinsic::x86_sse_cvtss2si64: 1454 case Intrinsic::x86_sse_cvttss2si: 1455 case Intrinsic::x86_sse_cvttss2si64: 1456 case Intrinsic::x86_sse2_cvtsd2si: 1457 case Intrinsic::x86_sse2_cvtsd2si64: 1458 case Intrinsic::x86_sse2_cvttsd2si: 1459 case Intrinsic::x86_sse2_cvttsd2si64: 1460 case Intrinsic::x86_avx512_vcvtss2si32: 1461 case Intrinsic::x86_avx512_vcvtss2si64: 1462 case Intrinsic::x86_avx512_cvttss2si: 1463 case Intrinsic::x86_avx512_cvttss2si64: 1464 case Intrinsic::x86_avx512_vcvtsd2si32: 1465 case Intrinsic::x86_avx512_vcvtsd2si64: 1466 case Intrinsic::x86_avx512_cvttsd2si: 1467 case Intrinsic::x86_avx512_cvttsd2si64: 1468 case Intrinsic::x86_avx512_vcvtss2usi32: 1469 case Intrinsic::x86_avx512_vcvtss2usi64: 1470 case Intrinsic::x86_avx512_cvttss2usi: 1471 case Intrinsic::x86_avx512_cvttss2usi64: 1472 case Intrinsic::x86_avx512_vcvtsd2usi32: 1473 case Intrinsic::x86_avx512_vcvtsd2usi64: 1474 case Intrinsic::x86_avx512_cvttsd2usi: 1475 case Intrinsic::x86_avx512_cvttsd2usi64: 1476 case Intrinsic::is_constant: 1477 return true; 1478 default: 1479 return false; 1480 case Intrinsic::not_intrinsic: break; 1481 } 1482 1483 if (!F->hasName()) 1484 return false; 1485 1486 // In these cases, the check of the length is required. We don't want to 1487 // return true for a name like "cos\0blah" which strcmp would return equal to 1488 // "cos", but has length 8. 1489 StringRef Name = F->getName(); 1490 switch (Name[0]) { 1491 default: 1492 return false; 1493 case 'a': 1494 return Name == "acos" || Name == "acosf" || 1495 Name == "asin" || Name == "asinf" || 1496 Name == "atan" || Name == "atanf" || 1497 Name == "atan2" || Name == "atan2f"; 1498 case 'c': 1499 return Name == "ceil" || Name == "ceilf" || 1500 Name == "cos" || Name == "cosf" || 1501 Name == "cosh" || Name == "coshf"; 1502 case 'e': 1503 return Name == "exp" || Name == "expf" || 1504 Name == "exp2" || Name == "exp2f"; 1505 case 'f': 1506 return Name == "fabs" || Name == "fabsf" || 1507 Name == "floor" || Name == "floorf" || 1508 Name == "fmod" || Name == "fmodf"; 1509 case 'l': 1510 return Name == "log" || Name == "logf" || 1511 Name == "log2" || Name == "log2f" || 1512 Name == "log10" || Name == "log10f"; 1513 case 'n': 1514 return Name == "nearbyint" || Name == "nearbyintf"; 1515 case 'p': 1516 return Name == "pow" || Name == "powf"; 1517 case 'r': 1518 return Name == "rint" || Name == "rintf" || 1519 Name == "round" || Name == "roundf"; 1520 case 's': 1521 return Name == "sin" || Name == "sinf" || 1522 Name == "sinh" || Name == "sinhf" || 1523 Name == "sqrt" || Name == "sqrtf"; 1524 case 't': 1525 return Name == "tan" || Name == "tanf" || 1526 Name == "tanh" || Name == "tanhf" || 1527 Name == "trunc" || Name == "truncf"; 1528 case '_': 1529 // Check for various function names that get used for the math functions 1530 // when the header files are preprocessed with the macro 1531 // __FINITE_MATH_ONLY__ enabled. 1532 // The '12' here is the length of the shortest name that can match. 1533 // We need to check the size before looking at Name[1] and Name[2] 1534 // so we may as well check a limit that will eliminate mismatches. 1535 if (Name.size() < 12 || Name[1] != '_') 1536 return false; 1537 switch (Name[2]) { 1538 default: 1539 return false; 1540 case 'a': 1541 return Name == "__acos_finite" || Name == "__acosf_finite" || 1542 Name == "__asin_finite" || Name == "__asinf_finite" || 1543 Name == "__atan2_finite" || Name == "__atan2f_finite"; 1544 case 'c': 1545 return Name == "__cosh_finite" || Name == "__coshf_finite"; 1546 case 'e': 1547 return Name == "__exp_finite" || Name == "__expf_finite" || 1548 Name == "__exp2_finite" || Name == "__exp2f_finite"; 1549 case 'l': 1550 return Name == "__log_finite" || Name == "__logf_finite" || 1551 Name == "__log10_finite" || Name == "__log10f_finite"; 1552 case 'p': 1553 return Name == "__pow_finite" || Name == "__powf_finite"; 1554 case 's': 1555 return Name == "__sinh_finite" || Name == "__sinhf_finite"; 1556 } 1557 } 1558 } 1559 1560 namespace { 1561 1562 Constant *GetConstantFoldFPValue(double V, Type *Ty) { 1563 if (Ty->isHalfTy() || Ty->isFloatTy()) { 1564 APFloat APF(V); 1565 bool unused; 1566 APF.convert(Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &unused); 1567 return ConstantFP::get(Ty->getContext(), APF); 1568 } 1569 if (Ty->isDoubleTy()) 1570 return ConstantFP::get(Ty->getContext(), APFloat(V)); 1571 llvm_unreachable("Can only constant fold half/float/double"); 1572 } 1573 1574 /// Clear the floating-point exception state. 1575 inline void llvm_fenv_clearexcept() { 1576 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT 1577 feclearexcept(FE_ALL_EXCEPT); 1578 #endif 1579 errno = 0; 1580 } 1581 1582 /// Test if a floating-point exception was raised. 1583 inline bool llvm_fenv_testexcept() { 1584 int errno_val = errno; 1585 if (errno_val == ERANGE || errno_val == EDOM) 1586 return true; 1587 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT 1588 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT)) 1589 return true; 1590 #endif 1591 return false; 1592 } 1593 1594 Constant *ConstantFoldFP(double (*NativeFP)(double), double V, Type *Ty) { 1595 llvm_fenv_clearexcept(); 1596 V = NativeFP(V); 1597 if (llvm_fenv_testexcept()) { 1598 llvm_fenv_clearexcept(); 1599 return nullptr; 1600 } 1601 1602 return GetConstantFoldFPValue(V, Ty); 1603 } 1604 1605 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), double V, 1606 double W, Type *Ty) { 1607 llvm_fenv_clearexcept(); 1608 V = NativeFP(V, W); 1609 if (llvm_fenv_testexcept()) { 1610 llvm_fenv_clearexcept(); 1611 return nullptr; 1612 } 1613 1614 return GetConstantFoldFPValue(V, Ty); 1615 } 1616 1617 /// Attempt to fold an SSE floating point to integer conversion of a constant 1618 /// floating point. If roundTowardZero is false, the default IEEE rounding is 1619 /// used (toward nearest, ties to even). This matches the behavior of the 1620 /// non-truncating SSE instructions in the default rounding mode. The desired 1621 /// integer type Ty is used to select how many bits are available for the 1622 /// result. Returns null if the conversion cannot be performed, otherwise 1623 /// returns the Constant value resulting from the conversion. 1624 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero, 1625 Type *Ty, bool IsSigned) { 1626 // All of these conversion intrinsics form an integer of at most 64bits. 1627 unsigned ResultWidth = Ty->getIntegerBitWidth(); 1628 assert(ResultWidth <= 64 && 1629 "Can only constant fold conversions to 64 and 32 bit ints"); 1630 1631 uint64_t UIntVal; 1632 bool isExact = false; 1633 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero 1634 : APFloat::rmNearestTiesToEven; 1635 APFloat::opStatus status = 1636 Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth, 1637 IsSigned, mode, &isExact); 1638 if (status != APFloat::opOK && 1639 (!roundTowardZero || status != APFloat::opInexact)) 1640 return nullptr; 1641 return ConstantInt::get(Ty, UIntVal, IsSigned); 1642 } 1643 1644 double getValueAsDouble(ConstantFP *Op) { 1645 Type *Ty = Op->getType(); 1646 1647 if (Ty->isFloatTy()) 1648 return Op->getValueAPF().convertToFloat(); 1649 1650 if (Ty->isDoubleTy()) 1651 return Op->getValueAPF().convertToDouble(); 1652 1653 bool unused; 1654 APFloat APF = Op->getValueAPF(); 1655 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused); 1656 return APF.convertToDouble(); 1657 } 1658 1659 static bool isManifestConstant(const Constant *c) { 1660 if (isa<ConstantData>(c)) { 1661 return true; 1662 } else if (isa<ConstantAggregate>(c) || isa<ConstantExpr>(c)) { 1663 for (const Value *subc : c->operand_values()) { 1664 if (!isManifestConstant(cast<Constant>(subc))) 1665 return false; 1666 } 1667 return true; 1668 } 1669 return false; 1670 } 1671 1672 static bool getConstIntOrUndef(Value *Op, const APInt *&C) { 1673 if (auto *CI = dyn_cast<ConstantInt>(Op)) { 1674 C = &CI->getValue(); 1675 return true; 1676 } 1677 if (isa<UndefValue>(Op)) { 1678 C = nullptr; 1679 return true; 1680 } 1681 return false; 1682 } 1683 1684 static Constant *ConstantFoldScalarCall1(StringRef Name, 1685 Intrinsic::ID IntrinsicID, 1686 Type *Ty, 1687 ArrayRef<Constant *> Operands, 1688 const TargetLibraryInfo *TLI, 1689 const CallBase *Call) { 1690 assert(Operands.size() == 1 && "Wrong number of operands."); 1691 1692 if (IntrinsicID == Intrinsic::is_constant) { 1693 // We know we have a "Constant" argument. But we want to only 1694 // return true for manifest constants, not those that depend on 1695 // constants with unknowable values, e.g. GlobalValue or BlockAddress. 1696 if (isManifestConstant(Operands[0])) 1697 return ConstantInt::getTrue(Ty->getContext()); 1698 return nullptr; 1699 } 1700 if (isa<UndefValue>(Operands[0])) { 1701 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN. 1702 // ctpop() is between 0 and bitwidth, pick 0 for undef. 1703 if (IntrinsicID == Intrinsic::cos || 1704 IntrinsicID == Intrinsic::ctpop) 1705 return Constant::getNullValue(Ty); 1706 if (IntrinsicID == Intrinsic::bswap || 1707 IntrinsicID == Intrinsic::bitreverse || 1708 IntrinsicID == Intrinsic::launder_invariant_group || 1709 IntrinsicID == Intrinsic::strip_invariant_group) 1710 return Operands[0]; 1711 } 1712 1713 if (isa<ConstantPointerNull>(Operands[0])) { 1714 // launder(null) == null == strip(null) iff in addrspace 0 1715 if (IntrinsicID == Intrinsic::launder_invariant_group || 1716 IntrinsicID == Intrinsic::strip_invariant_group) { 1717 // If instruction is not yet put in a basic block (e.g. when cloning 1718 // a function during inlining), Call's caller may not be available. 1719 // So check Call's BB first before querying Call->getCaller. 1720 const Function *Caller = 1721 Call->getParent() ? Call->getCaller() : nullptr; 1722 if (Caller && 1723 !NullPointerIsDefined( 1724 Caller, Operands[0]->getType()->getPointerAddressSpace())) { 1725 return Operands[0]; 1726 } 1727 return nullptr; 1728 } 1729 } 1730 1731 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) { 1732 if (IntrinsicID == Intrinsic::convert_to_fp16) { 1733 APFloat Val(Op->getValueAPF()); 1734 1735 bool lost = false; 1736 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost); 1737 1738 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt()); 1739 } 1740 1741 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 1742 return nullptr; 1743 1744 // Use internal versions of these intrinsics. 1745 APFloat U = Op->getValueAPF(); 1746 1747 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) { 1748 U.roundToIntegral(APFloat::rmNearestTiesToEven); 1749 return ConstantFP::get(Ty->getContext(), U); 1750 } 1751 1752 if (IntrinsicID == Intrinsic::round) { 1753 U.roundToIntegral(APFloat::rmNearestTiesToAway); 1754 return ConstantFP::get(Ty->getContext(), U); 1755 } 1756 1757 if (IntrinsicID == Intrinsic::ceil) { 1758 U.roundToIntegral(APFloat::rmTowardPositive); 1759 return ConstantFP::get(Ty->getContext(), U); 1760 } 1761 1762 if (IntrinsicID == Intrinsic::floor) { 1763 U.roundToIntegral(APFloat::rmTowardNegative); 1764 return ConstantFP::get(Ty->getContext(), U); 1765 } 1766 1767 if (IntrinsicID == Intrinsic::trunc) { 1768 U.roundToIntegral(APFloat::rmTowardZero); 1769 return ConstantFP::get(Ty->getContext(), U); 1770 } 1771 1772 if (IntrinsicID == Intrinsic::fabs) { 1773 U.clearSign(); 1774 return ConstantFP::get(Ty->getContext(), U); 1775 } 1776 1777 /// We only fold functions with finite arguments. Folding NaN and inf is 1778 /// likely to be aborted with an exception anyway, and some host libms 1779 /// have known errors raising exceptions. 1780 if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity()) 1781 return nullptr; 1782 1783 /// Currently APFloat versions of these functions do not exist, so we use 1784 /// the host native double versions. Float versions are not called 1785 /// directly but for all these it is true (float)(f((double)arg)) == 1786 /// f(arg). Long double not supported yet. 1787 double V = getValueAsDouble(Op); 1788 1789 switch (IntrinsicID) { 1790 default: break; 1791 case Intrinsic::log: 1792 return ConstantFoldFP(log, V, Ty); 1793 case Intrinsic::log2: 1794 // TODO: What about hosts that lack a C99 library? 1795 return ConstantFoldFP(Log2, V, Ty); 1796 case Intrinsic::log10: 1797 // TODO: What about hosts that lack a C99 library? 1798 return ConstantFoldFP(log10, V, Ty); 1799 case Intrinsic::exp: 1800 return ConstantFoldFP(exp, V, Ty); 1801 case Intrinsic::exp2: 1802 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library. 1803 return ConstantFoldBinaryFP(pow, 2.0, V, Ty); 1804 case Intrinsic::sin: 1805 return ConstantFoldFP(sin, V, Ty); 1806 case Intrinsic::cos: 1807 return ConstantFoldFP(cos, V, Ty); 1808 case Intrinsic::sqrt: 1809 return ConstantFoldFP(sqrt, V, Ty); 1810 } 1811 1812 if (!TLI) 1813 return nullptr; 1814 1815 LibFunc Func = NotLibFunc; 1816 TLI->getLibFunc(Name, Func); 1817 switch (Func) { 1818 default: 1819 break; 1820 case LibFunc_acos: 1821 case LibFunc_acosf: 1822 case LibFunc_acos_finite: 1823 case LibFunc_acosf_finite: 1824 if (TLI->has(Func)) 1825 return ConstantFoldFP(acos, V, Ty); 1826 break; 1827 case LibFunc_asin: 1828 case LibFunc_asinf: 1829 case LibFunc_asin_finite: 1830 case LibFunc_asinf_finite: 1831 if (TLI->has(Func)) 1832 return ConstantFoldFP(asin, V, Ty); 1833 break; 1834 case LibFunc_atan: 1835 case LibFunc_atanf: 1836 if (TLI->has(Func)) 1837 return ConstantFoldFP(atan, V, Ty); 1838 break; 1839 case LibFunc_ceil: 1840 case LibFunc_ceilf: 1841 if (TLI->has(Func)) { 1842 U.roundToIntegral(APFloat::rmTowardPositive); 1843 return ConstantFP::get(Ty->getContext(), U); 1844 } 1845 break; 1846 case LibFunc_cos: 1847 case LibFunc_cosf: 1848 if (TLI->has(Func)) 1849 return ConstantFoldFP(cos, V, Ty); 1850 break; 1851 case LibFunc_cosh: 1852 case LibFunc_coshf: 1853 case LibFunc_cosh_finite: 1854 case LibFunc_coshf_finite: 1855 if (TLI->has(Func)) 1856 return ConstantFoldFP(cosh, V, Ty); 1857 break; 1858 case LibFunc_exp: 1859 case LibFunc_expf: 1860 case LibFunc_exp_finite: 1861 case LibFunc_expf_finite: 1862 if (TLI->has(Func)) 1863 return ConstantFoldFP(exp, V, Ty); 1864 break; 1865 case LibFunc_exp2: 1866 case LibFunc_exp2f: 1867 case LibFunc_exp2_finite: 1868 case LibFunc_exp2f_finite: 1869 if (TLI->has(Func)) 1870 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library. 1871 return ConstantFoldBinaryFP(pow, 2.0, V, Ty); 1872 break; 1873 case LibFunc_fabs: 1874 case LibFunc_fabsf: 1875 if (TLI->has(Func)) { 1876 U.clearSign(); 1877 return ConstantFP::get(Ty->getContext(), U); 1878 } 1879 break; 1880 case LibFunc_floor: 1881 case LibFunc_floorf: 1882 if (TLI->has(Func)) { 1883 U.roundToIntegral(APFloat::rmTowardNegative); 1884 return ConstantFP::get(Ty->getContext(), U); 1885 } 1886 break; 1887 case LibFunc_log: 1888 case LibFunc_logf: 1889 case LibFunc_log_finite: 1890 case LibFunc_logf_finite: 1891 if (V > 0.0 && TLI->has(Func)) 1892 return ConstantFoldFP(log, V, Ty); 1893 break; 1894 case LibFunc_log2: 1895 case LibFunc_log2f: 1896 case LibFunc_log2_finite: 1897 case LibFunc_log2f_finite: 1898 if (V > 0.0 && TLI->has(Func)) 1899 // TODO: What about hosts that lack a C99 library? 1900 return ConstantFoldFP(Log2, V, Ty); 1901 break; 1902 case LibFunc_log10: 1903 case LibFunc_log10f: 1904 case LibFunc_log10_finite: 1905 case LibFunc_log10f_finite: 1906 if (V > 0.0 && TLI->has(Func)) 1907 // TODO: What about hosts that lack a C99 library? 1908 return ConstantFoldFP(log10, V, Ty); 1909 break; 1910 case LibFunc_nearbyint: 1911 case LibFunc_nearbyintf: 1912 case LibFunc_rint: 1913 case LibFunc_rintf: 1914 if (TLI->has(Func)) { 1915 U.roundToIntegral(APFloat::rmNearestTiesToEven); 1916 return ConstantFP::get(Ty->getContext(), U); 1917 } 1918 break; 1919 case LibFunc_round: 1920 case LibFunc_roundf: 1921 if (TLI->has(Func)) { 1922 U.roundToIntegral(APFloat::rmNearestTiesToAway); 1923 return ConstantFP::get(Ty->getContext(), U); 1924 } 1925 break; 1926 case LibFunc_sin: 1927 case LibFunc_sinf: 1928 if (TLI->has(Func)) 1929 return ConstantFoldFP(sin, V, Ty); 1930 break; 1931 case LibFunc_sinh: 1932 case LibFunc_sinhf: 1933 case LibFunc_sinh_finite: 1934 case LibFunc_sinhf_finite: 1935 if (TLI->has(Func)) 1936 return ConstantFoldFP(sinh, V, Ty); 1937 break; 1938 case LibFunc_sqrt: 1939 case LibFunc_sqrtf: 1940 if (V >= 0.0 && TLI->has(Func)) 1941 return ConstantFoldFP(sqrt, V, Ty); 1942 break; 1943 case LibFunc_tan: 1944 case LibFunc_tanf: 1945 if (TLI->has(Func)) 1946 return ConstantFoldFP(tan, V, Ty); 1947 break; 1948 case LibFunc_tanh: 1949 case LibFunc_tanhf: 1950 if (TLI->has(Func)) 1951 return ConstantFoldFP(tanh, V, Ty); 1952 break; 1953 case LibFunc_trunc: 1954 case LibFunc_truncf: 1955 if (TLI->has(Func)) { 1956 U.roundToIntegral(APFloat::rmTowardZero); 1957 return ConstantFP::get(Ty->getContext(), U); 1958 } 1959 break; 1960 } 1961 return nullptr; 1962 } 1963 1964 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) { 1965 switch (IntrinsicID) { 1966 case Intrinsic::bswap: 1967 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap()); 1968 case Intrinsic::ctpop: 1969 return ConstantInt::get(Ty, Op->getValue().countPopulation()); 1970 case Intrinsic::bitreverse: 1971 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits()); 1972 case Intrinsic::convert_from_fp16: { 1973 APFloat Val(APFloat::IEEEhalf(), Op->getValue()); 1974 1975 bool lost = false; 1976 APFloat::opStatus status = Val.convert( 1977 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost); 1978 1979 // Conversion is always precise. 1980 (void)status; 1981 assert(status == APFloat::opOK && !lost && 1982 "Precision lost during fp16 constfolding"); 1983 1984 return ConstantFP::get(Ty->getContext(), Val); 1985 } 1986 default: 1987 return nullptr; 1988 } 1989 } 1990 1991 // Support ConstantVector in case we have an Undef in the top. 1992 if (isa<ConstantVector>(Operands[0]) || 1993 isa<ConstantDataVector>(Operands[0])) { 1994 auto *Op = cast<Constant>(Operands[0]); 1995 switch (IntrinsicID) { 1996 default: break; 1997 case Intrinsic::x86_sse_cvtss2si: 1998 case Intrinsic::x86_sse_cvtss2si64: 1999 case Intrinsic::x86_sse2_cvtsd2si: 2000 case Intrinsic::x86_sse2_cvtsd2si64: 2001 if (ConstantFP *FPOp = 2002 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2003 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2004 /*roundTowardZero=*/false, Ty, 2005 /*IsSigned*/true); 2006 break; 2007 case Intrinsic::x86_sse_cvttss2si: 2008 case Intrinsic::x86_sse_cvttss2si64: 2009 case Intrinsic::x86_sse2_cvttsd2si: 2010 case Intrinsic::x86_sse2_cvttsd2si64: 2011 if (ConstantFP *FPOp = 2012 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2013 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2014 /*roundTowardZero=*/true, Ty, 2015 /*IsSigned*/true); 2016 break; 2017 } 2018 } 2019 2020 return nullptr; 2021 } 2022 2023 static Constant *ConstantFoldScalarCall2(StringRef Name, 2024 Intrinsic::ID IntrinsicID, 2025 Type *Ty, 2026 ArrayRef<Constant *> Operands, 2027 const TargetLibraryInfo *TLI, 2028 const CallBase *Call) { 2029 assert(Operands.size() == 2 && "Wrong number of operands."); 2030 2031 if (auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 2032 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) 2033 return nullptr; 2034 double Op1V = getValueAsDouble(Op1); 2035 2036 if (auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 2037 if (Op2->getType() != Op1->getType()) 2038 return nullptr; 2039 2040 double Op2V = getValueAsDouble(Op2); 2041 if (IntrinsicID == Intrinsic::pow) { 2042 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 2043 } 2044 if (IntrinsicID == Intrinsic::copysign) { 2045 APFloat V1 = Op1->getValueAPF(); 2046 const APFloat &V2 = Op2->getValueAPF(); 2047 V1.copySign(V2); 2048 return ConstantFP::get(Ty->getContext(), V1); 2049 } 2050 2051 if (IntrinsicID == Intrinsic::minnum) { 2052 const APFloat &C1 = Op1->getValueAPF(); 2053 const APFloat &C2 = Op2->getValueAPF(); 2054 return ConstantFP::get(Ty->getContext(), minnum(C1, C2)); 2055 } 2056 2057 if (IntrinsicID == Intrinsic::maxnum) { 2058 const APFloat &C1 = Op1->getValueAPF(); 2059 const APFloat &C2 = Op2->getValueAPF(); 2060 return ConstantFP::get(Ty->getContext(), maxnum(C1, C2)); 2061 } 2062 2063 if (IntrinsicID == Intrinsic::minimum) { 2064 const APFloat &C1 = Op1->getValueAPF(); 2065 const APFloat &C2 = Op2->getValueAPF(); 2066 return ConstantFP::get(Ty->getContext(), minimum(C1, C2)); 2067 } 2068 2069 if (IntrinsicID == Intrinsic::maximum) { 2070 const APFloat &C1 = Op1->getValueAPF(); 2071 const APFloat &C2 = Op2->getValueAPF(); 2072 return ConstantFP::get(Ty->getContext(), maximum(C1, C2)); 2073 } 2074 2075 if (!TLI) 2076 return nullptr; 2077 2078 LibFunc Func = NotLibFunc; 2079 TLI->getLibFunc(Name, Func); 2080 switch (Func) { 2081 default: 2082 break; 2083 case LibFunc_pow: 2084 case LibFunc_powf: 2085 case LibFunc_pow_finite: 2086 case LibFunc_powf_finite: 2087 if (TLI->has(Func)) 2088 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); 2089 break; 2090 case LibFunc_fmod: 2091 case LibFunc_fmodf: 2092 if (TLI->has(Func)) { 2093 APFloat V = Op1->getValueAPF(); 2094 if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF())) 2095 return ConstantFP::get(Ty->getContext(), V); 2096 } 2097 break; 2098 case LibFunc_atan2: 2099 case LibFunc_atan2f: 2100 case LibFunc_atan2_finite: 2101 case LibFunc_atan2f_finite: 2102 if (TLI->has(Func)) 2103 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty); 2104 break; 2105 } 2106 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) { 2107 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy()) 2108 return ConstantFP::get(Ty->getContext(), 2109 APFloat((float)std::pow((float)Op1V, 2110 (int)Op2C->getZExtValue()))); 2111 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy()) 2112 return ConstantFP::get(Ty->getContext(), 2113 APFloat((float)std::pow((float)Op1V, 2114 (int)Op2C->getZExtValue()))); 2115 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy()) 2116 return ConstantFP::get(Ty->getContext(), 2117 APFloat((double)std::pow((double)Op1V, 2118 (int)Op2C->getZExtValue()))); 2119 } 2120 return nullptr; 2121 } 2122 2123 if (Operands[0]->getType()->isIntegerTy() && 2124 Operands[1]->getType()->isIntegerTy()) { 2125 const APInt *C0, *C1; 2126 if (!getConstIntOrUndef(Operands[0], C0) || 2127 !getConstIntOrUndef(Operands[1], C1)) 2128 return nullptr; 2129 2130 switch (IntrinsicID) { 2131 default: break; 2132 case Intrinsic::usub_with_overflow: 2133 case Intrinsic::ssub_with_overflow: 2134 case Intrinsic::uadd_with_overflow: 2135 case Intrinsic::sadd_with_overflow: 2136 // X - undef -> { undef, false } 2137 // undef - X -> { undef, false } 2138 // X + undef -> { undef, false } 2139 // undef + x -> { undef, false } 2140 if (!C0 || !C1) { 2141 return ConstantStruct::get( 2142 cast<StructType>(Ty), 2143 {UndefValue::get(Ty->getStructElementType(0)), 2144 Constant::getNullValue(Ty->getStructElementType(1))}); 2145 } 2146 LLVM_FALLTHROUGH; 2147 case Intrinsic::smul_with_overflow: 2148 case Intrinsic::umul_with_overflow: { 2149 // undef * X -> { 0, false } 2150 // X * undef -> { 0, false } 2151 if (!C0 || !C1) 2152 return Constant::getNullValue(Ty); 2153 2154 APInt Res; 2155 bool Overflow; 2156 switch (IntrinsicID) { 2157 default: llvm_unreachable("Invalid case"); 2158 case Intrinsic::sadd_with_overflow: 2159 Res = C0->sadd_ov(*C1, Overflow); 2160 break; 2161 case Intrinsic::uadd_with_overflow: 2162 Res = C0->uadd_ov(*C1, Overflow); 2163 break; 2164 case Intrinsic::ssub_with_overflow: 2165 Res = C0->ssub_ov(*C1, Overflow); 2166 break; 2167 case Intrinsic::usub_with_overflow: 2168 Res = C0->usub_ov(*C1, Overflow); 2169 break; 2170 case Intrinsic::smul_with_overflow: 2171 Res = C0->smul_ov(*C1, Overflow); 2172 break; 2173 case Intrinsic::umul_with_overflow: 2174 Res = C0->umul_ov(*C1, Overflow); 2175 break; 2176 } 2177 Constant *Ops[] = { 2178 ConstantInt::get(Ty->getContext(), Res), 2179 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow) 2180 }; 2181 return ConstantStruct::get(cast<StructType>(Ty), Ops); 2182 } 2183 case Intrinsic::uadd_sat: 2184 case Intrinsic::sadd_sat: 2185 if (!C0 && !C1) 2186 return UndefValue::get(Ty); 2187 if (!C0 || !C1) 2188 return Constant::getAllOnesValue(Ty); 2189 if (IntrinsicID == Intrinsic::uadd_sat) 2190 return ConstantInt::get(Ty, C0->uadd_sat(*C1)); 2191 else 2192 return ConstantInt::get(Ty, C0->sadd_sat(*C1)); 2193 case Intrinsic::usub_sat: 2194 case Intrinsic::ssub_sat: 2195 if (!C0 && !C1) 2196 return UndefValue::get(Ty); 2197 if (!C0 || !C1) 2198 return Constant::getNullValue(Ty); 2199 if (IntrinsicID == Intrinsic::usub_sat) 2200 return ConstantInt::get(Ty, C0->usub_sat(*C1)); 2201 else 2202 return ConstantInt::get(Ty, C0->ssub_sat(*C1)); 2203 case Intrinsic::cttz: 2204 case Intrinsic::ctlz: 2205 assert(C1 && "Must be constant int"); 2206 2207 // cttz(0, 1) and ctlz(0, 1) are undef. 2208 if (C1->isOneValue() && (!C0 || C0->isNullValue())) 2209 return UndefValue::get(Ty); 2210 if (!C0) 2211 return Constant::getNullValue(Ty); 2212 if (IntrinsicID == Intrinsic::cttz) 2213 return ConstantInt::get(Ty, C0->countTrailingZeros()); 2214 else 2215 return ConstantInt::get(Ty, C0->countLeadingZeros()); 2216 } 2217 2218 return nullptr; 2219 } 2220 2221 // Support ConstantVector in case we have an Undef in the top. 2222 if ((isa<ConstantVector>(Operands[0]) || 2223 isa<ConstantDataVector>(Operands[0])) && 2224 // Check for default rounding mode. 2225 // FIXME: Support other rounding modes? 2226 isa<ConstantInt>(Operands[1]) && 2227 cast<ConstantInt>(Operands[1])->getValue() == 4) { 2228 auto *Op = cast<Constant>(Operands[0]); 2229 switch (IntrinsicID) { 2230 default: break; 2231 case Intrinsic::x86_avx512_vcvtss2si32: 2232 case Intrinsic::x86_avx512_vcvtss2si64: 2233 case Intrinsic::x86_avx512_vcvtsd2si32: 2234 case Intrinsic::x86_avx512_vcvtsd2si64: 2235 if (ConstantFP *FPOp = 2236 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2237 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2238 /*roundTowardZero=*/false, Ty, 2239 /*IsSigned*/true); 2240 break; 2241 case Intrinsic::x86_avx512_vcvtss2usi32: 2242 case Intrinsic::x86_avx512_vcvtss2usi64: 2243 case Intrinsic::x86_avx512_vcvtsd2usi32: 2244 case Intrinsic::x86_avx512_vcvtsd2usi64: 2245 if (ConstantFP *FPOp = 2246 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2247 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2248 /*roundTowardZero=*/false, Ty, 2249 /*IsSigned*/false); 2250 break; 2251 case Intrinsic::x86_avx512_cvttss2si: 2252 case Intrinsic::x86_avx512_cvttss2si64: 2253 case Intrinsic::x86_avx512_cvttsd2si: 2254 case Intrinsic::x86_avx512_cvttsd2si64: 2255 if (ConstantFP *FPOp = 2256 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2257 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2258 /*roundTowardZero=*/true, Ty, 2259 /*IsSigned*/true); 2260 break; 2261 case Intrinsic::x86_avx512_cvttss2usi: 2262 case Intrinsic::x86_avx512_cvttss2usi64: 2263 case Intrinsic::x86_avx512_cvttsd2usi: 2264 case Intrinsic::x86_avx512_cvttsd2usi64: 2265 if (ConstantFP *FPOp = 2266 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U))) 2267 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), 2268 /*roundTowardZero=*/true, Ty, 2269 /*IsSigned*/false); 2270 break; 2271 } 2272 } 2273 return nullptr; 2274 } 2275 2276 static Constant *ConstantFoldScalarCall3(StringRef Name, 2277 Intrinsic::ID IntrinsicID, 2278 Type *Ty, 2279 ArrayRef<Constant *> Operands, 2280 const TargetLibraryInfo *TLI, 2281 const CallBase *Call) { 2282 assert(Operands.size() == 3 && "Wrong number of operands."); 2283 2284 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) { 2285 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) { 2286 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) { 2287 switch (IntrinsicID) { 2288 default: break; 2289 case Intrinsic::fma: 2290 case Intrinsic::fmuladd: { 2291 APFloat V = Op1->getValueAPF(); 2292 V.fusedMultiplyAdd(Op2->getValueAPF(), Op3->getValueAPF(), 2293 APFloat::rmNearestTiesToEven); 2294 return ConstantFP::get(Ty->getContext(), V); 2295 } 2296 } 2297 } 2298 } 2299 } 2300 2301 if (const auto *Op1 = dyn_cast<ConstantInt>(Operands[0])) { 2302 if (const auto *Op2 = dyn_cast<ConstantInt>(Operands[1])) { 2303 if (const auto *Op3 = dyn_cast<ConstantInt>(Operands[2])) { 2304 switch (IntrinsicID) { 2305 default: break; 2306 case Intrinsic::smul_fix: 2307 case Intrinsic::smul_fix_sat: { 2308 // This code performs rounding towards negative infinity in case the 2309 // result cannot be represented exactly for the given scale. Targets 2310 // that do care about rounding should use a target hook for specifying 2311 // how rounding should be done, and provide their own folding to be 2312 // consistent with rounding. This is the same approach as used by 2313 // DAGTypeLegalizer::ExpandIntRes_MULFIX. 2314 APInt Lhs = Op1->getValue(); 2315 APInt Rhs = Op2->getValue(); 2316 unsigned Scale = Op3->getValue().getZExtValue(); 2317 unsigned Width = Lhs.getBitWidth(); 2318 assert(Scale < Width && "Illegal scale."); 2319 unsigned ExtendedWidth = Width * 2; 2320 APInt Product = (Lhs.sextOrSelf(ExtendedWidth) * 2321 Rhs.sextOrSelf(ExtendedWidth)).ashr(Scale); 2322 if (IntrinsicID == Intrinsic::smul_fix_sat) { 2323 APInt MaxValue = 2324 APInt::getSignedMaxValue(Width).sextOrSelf(ExtendedWidth); 2325 APInt MinValue = 2326 APInt::getSignedMinValue(Width).sextOrSelf(ExtendedWidth); 2327 Product = APIntOps::smin(Product, MaxValue); 2328 Product = APIntOps::smax(Product, MinValue); 2329 } 2330 return ConstantInt::get(Ty->getContext(), 2331 Product.sextOrTrunc(Width)); 2332 } 2333 } 2334 } 2335 } 2336 } 2337 2338 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) { 2339 const APInt *C0, *C1, *C2; 2340 if (!getConstIntOrUndef(Operands[0], C0) || 2341 !getConstIntOrUndef(Operands[1], C1) || 2342 !getConstIntOrUndef(Operands[2], C2)) 2343 return nullptr; 2344 2345 bool IsRight = IntrinsicID == Intrinsic::fshr; 2346 if (!C2) 2347 return Operands[IsRight ? 1 : 0]; 2348 if (!C0 && !C1) 2349 return UndefValue::get(Ty); 2350 2351 // The shift amount is interpreted as modulo the bitwidth. If the shift 2352 // amount is effectively 0, avoid UB due to oversized inverse shift below. 2353 unsigned BitWidth = C2->getBitWidth(); 2354 unsigned ShAmt = C2->urem(BitWidth); 2355 if (!ShAmt) 2356 return Operands[IsRight ? 1 : 0]; 2357 2358 // (C0 << ShlAmt) | (C1 >> LshrAmt) 2359 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt; 2360 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt; 2361 if (!C0) 2362 return ConstantInt::get(Ty, C1->lshr(LshrAmt)); 2363 if (!C1) 2364 return ConstantInt::get(Ty, C0->shl(ShlAmt)); 2365 return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt)); 2366 } 2367 2368 return nullptr; 2369 } 2370 2371 static Constant *ConstantFoldScalarCall(StringRef Name, 2372 Intrinsic::ID IntrinsicID, 2373 Type *Ty, 2374 ArrayRef<Constant *> Operands, 2375 const TargetLibraryInfo *TLI, 2376 const CallBase *Call) { 2377 if (Operands.size() == 1) 2378 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call); 2379 2380 if (Operands.size() == 2) 2381 return ConstantFoldScalarCall2(Name, IntrinsicID, Ty, Operands, TLI, Call); 2382 2383 if (Operands.size() == 3) 2384 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call); 2385 2386 return nullptr; 2387 } 2388 2389 static Constant *ConstantFoldVectorCall(StringRef Name, 2390 Intrinsic::ID IntrinsicID, 2391 VectorType *VTy, 2392 ArrayRef<Constant *> Operands, 2393 const DataLayout &DL, 2394 const TargetLibraryInfo *TLI, 2395 const CallBase *Call) { 2396 SmallVector<Constant *, 4> Result(VTy->getNumElements()); 2397 SmallVector<Constant *, 4> Lane(Operands.size()); 2398 Type *Ty = VTy->getElementType(); 2399 2400 if (IntrinsicID == Intrinsic::masked_load) { 2401 auto *SrcPtr = Operands[0]; 2402 auto *Mask = Operands[2]; 2403 auto *Passthru = Operands[3]; 2404 2405 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, VTy, DL); 2406 2407 SmallVector<Constant *, 32> NewElements; 2408 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) { 2409 auto *MaskElt = Mask->getAggregateElement(I); 2410 if (!MaskElt) 2411 break; 2412 auto *PassthruElt = Passthru->getAggregateElement(I); 2413 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr; 2414 if (isa<UndefValue>(MaskElt)) { 2415 if (PassthruElt) 2416 NewElements.push_back(PassthruElt); 2417 else if (VecElt) 2418 NewElements.push_back(VecElt); 2419 else 2420 return nullptr; 2421 } 2422 if (MaskElt->isNullValue()) { 2423 if (!PassthruElt) 2424 return nullptr; 2425 NewElements.push_back(PassthruElt); 2426 } else if (MaskElt->isOneValue()) { 2427 if (!VecElt) 2428 return nullptr; 2429 NewElements.push_back(VecElt); 2430 } else { 2431 return nullptr; 2432 } 2433 } 2434 if (NewElements.size() != VTy->getNumElements()) 2435 return nullptr; 2436 return ConstantVector::get(NewElements); 2437 } 2438 2439 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) { 2440 // Gather a column of constants. 2441 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) { 2442 // Some intrinsics use a scalar type for certain arguments. 2443 if (hasVectorInstrinsicScalarOpd(IntrinsicID, J)) { 2444 Lane[J] = Operands[J]; 2445 continue; 2446 } 2447 2448 Constant *Agg = Operands[J]->getAggregateElement(I); 2449 if (!Agg) 2450 return nullptr; 2451 2452 Lane[J] = Agg; 2453 } 2454 2455 // Use the regular scalar folding to simplify this column. 2456 Constant *Folded = 2457 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call); 2458 if (!Folded) 2459 return nullptr; 2460 Result[I] = Folded; 2461 } 2462 2463 return ConstantVector::get(Result); 2464 } 2465 2466 } // end anonymous namespace 2467 2468 Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F, 2469 ArrayRef<Constant *> Operands, 2470 const TargetLibraryInfo *TLI) { 2471 if (Call->isNoBuiltin() || Call->isStrictFP()) 2472 return nullptr; 2473 if (!F->hasName()) 2474 return nullptr; 2475 StringRef Name = F->getName(); 2476 2477 Type *Ty = F->getReturnType(); 2478 2479 if (auto *VTy = dyn_cast<VectorType>(Ty)) 2480 return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands, 2481 F->getParent()->getDataLayout(), TLI, Call); 2482 2483 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI, 2484 Call); 2485 } 2486 2487 bool llvm::isMathLibCallNoop(const CallBase *Call, 2488 const TargetLibraryInfo *TLI) { 2489 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap 2490 // (and to some extent ConstantFoldScalarCall). 2491 if (Call->isNoBuiltin() || Call->isStrictFP()) 2492 return false; 2493 Function *F = Call->getCalledFunction(); 2494 if (!F) 2495 return false; 2496 2497 LibFunc Func; 2498 if (!TLI || !TLI->getLibFunc(*F, Func)) 2499 return false; 2500 2501 if (Call->getNumArgOperands() == 1) { 2502 if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) { 2503 const APFloat &Op = OpC->getValueAPF(); 2504 switch (Func) { 2505 case LibFunc_logl: 2506 case LibFunc_log: 2507 case LibFunc_logf: 2508 case LibFunc_log2l: 2509 case LibFunc_log2: 2510 case LibFunc_log2f: 2511 case LibFunc_log10l: 2512 case LibFunc_log10: 2513 case LibFunc_log10f: 2514 return Op.isNaN() || (!Op.isZero() && !Op.isNegative()); 2515 2516 case LibFunc_expl: 2517 case LibFunc_exp: 2518 case LibFunc_expf: 2519 // FIXME: These boundaries are slightly conservative. 2520 if (OpC->getType()->isDoubleTy()) 2521 return Op.compare(APFloat(-745.0)) != APFloat::cmpLessThan && 2522 Op.compare(APFloat(709.0)) != APFloat::cmpGreaterThan; 2523 if (OpC->getType()->isFloatTy()) 2524 return Op.compare(APFloat(-103.0f)) != APFloat::cmpLessThan && 2525 Op.compare(APFloat(88.0f)) != APFloat::cmpGreaterThan; 2526 break; 2527 2528 case LibFunc_exp2l: 2529 case LibFunc_exp2: 2530 case LibFunc_exp2f: 2531 // FIXME: These boundaries are slightly conservative. 2532 if (OpC->getType()->isDoubleTy()) 2533 return Op.compare(APFloat(-1074.0)) != APFloat::cmpLessThan && 2534 Op.compare(APFloat(1023.0)) != APFloat::cmpGreaterThan; 2535 if (OpC->getType()->isFloatTy()) 2536 return Op.compare(APFloat(-149.0f)) != APFloat::cmpLessThan && 2537 Op.compare(APFloat(127.0f)) != APFloat::cmpGreaterThan; 2538 break; 2539 2540 case LibFunc_sinl: 2541 case LibFunc_sin: 2542 case LibFunc_sinf: 2543 case LibFunc_cosl: 2544 case LibFunc_cos: 2545 case LibFunc_cosf: 2546 return !Op.isInfinity(); 2547 2548 case LibFunc_tanl: 2549 case LibFunc_tan: 2550 case LibFunc_tanf: { 2551 // FIXME: Stop using the host math library. 2552 // FIXME: The computation isn't done in the right precision. 2553 Type *Ty = OpC->getType(); 2554 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) { 2555 double OpV = getValueAsDouble(OpC); 2556 return ConstantFoldFP(tan, OpV, Ty) != nullptr; 2557 } 2558 break; 2559 } 2560 2561 case LibFunc_asinl: 2562 case LibFunc_asin: 2563 case LibFunc_asinf: 2564 case LibFunc_acosl: 2565 case LibFunc_acos: 2566 case LibFunc_acosf: 2567 return Op.compare(APFloat(Op.getSemantics(), "-1")) != 2568 APFloat::cmpLessThan && 2569 Op.compare(APFloat(Op.getSemantics(), "1")) != 2570 APFloat::cmpGreaterThan; 2571 2572 case LibFunc_sinh: 2573 case LibFunc_cosh: 2574 case LibFunc_sinhf: 2575 case LibFunc_coshf: 2576 case LibFunc_sinhl: 2577 case LibFunc_coshl: 2578 // FIXME: These boundaries are slightly conservative. 2579 if (OpC->getType()->isDoubleTy()) 2580 return Op.compare(APFloat(-710.0)) != APFloat::cmpLessThan && 2581 Op.compare(APFloat(710.0)) != APFloat::cmpGreaterThan; 2582 if (OpC->getType()->isFloatTy()) 2583 return Op.compare(APFloat(-89.0f)) != APFloat::cmpLessThan && 2584 Op.compare(APFloat(89.0f)) != APFloat::cmpGreaterThan; 2585 break; 2586 2587 case LibFunc_sqrtl: 2588 case LibFunc_sqrt: 2589 case LibFunc_sqrtf: 2590 return Op.isNaN() || Op.isZero() || !Op.isNegative(); 2591 2592 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p, 2593 // maybe others? 2594 default: 2595 break; 2596 } 2597 } 2598 } 2599 2600 if (Call->getNumArgOperands() == 2) { 2601 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0)); 2602 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1)); 2603 if (Op0C && Op1C) { 2604 const APFloat &Op0 = Op0C->getValueAPF(); 2605 const APFloat &Op1 = Op1C->getValueAPF(); 2606 2607 switch (Func) { 2608 case LibFunc_powl: 2609 case LibFunc_pow: 2610 case LibFunc_powf: { 2611 // FIXME: Stop using the host math library. 2612 // FIXME: The computation isn't done in the right precision. 2613 Type *Ty = Op0C->getType(); 2614 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) { 2615 if (Ty == Op1C->getType()) { 2616 double Op0V = getValueAsDouble(Op0C); 2617 double Op1V = getValueAsDouble(Op1C); 2618 return ConstantFoldBinaryFP(pow, Op0V, Op1V, Ty) != nullptr; 2619 } 2620 } 2621 break; 2622 } 2623 2624 case LibFunc_fmodl: 2625 case LibFunc_fmod: 2626 case LibFunc_fmodf: 2627 return Op0.isNaN() || Op1.isNaN() || 2628 (!Op0.isInfinity() && !Op1.isZero()); 2629 2630 default: 2631 break; 2632 } 2633 } 2634 } 2635 2636 return false; 2637 } 2638