1 //===- InstCombineCasts.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visit functions for cast operations. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/ADT/SetVector.h" 15 #include "llvm/Analysis/ConstantFolding.h" 16 #include "llvm/Analysis/TargetLibraryInfo.h" 17 #include "llvm/IR/DIBuilder.h" 18 #include "llvm/IR/DataLayout.h" 19 #include "llvm/IR/PatternMatch.h" 20 #include "llvm/Support/KnownBits.h" 21 #include "llvm/Transforms/InstCombine/InstCombiner.h" 22 #include <numeric> 23 using namespace llvm; 24 using namespace PatternMatch; 25 26 #define DEBUG_TYPE "instcombine" 27 28 /// Analyze 'Val', seeing if it is a simple linear expression. 29 /// If so, decompose it, returning some value X, such that Val is 30 /// X*Scale+Offset. 31 /// 32 static Value *decomposeSimpleLinearExpr(Value *Val, unsigned &Scale, 33 uint64_t &Offset) { 34 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) { 35 Offset = CI->getZExtValue(); 36 Scale = 0; 37 return ConstantInt::get(Val->getType(), 0); 38 } 39 40 if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) { 41 // Cannot look past anything that might overflow. 42 OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val); 43 if (OBI && !OBI->hasNoUnsignedWrap() && !OBI->hasNoSignedWrap()) { 44 Scale = 1; 45 Offset = 0; 46 return Val; 47 } 48 49 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) { 50 if (I->getOpcode() == Instruction::Shl) { 51 // This is a value scaled by '1 << the shift amt'. 52 Scale = UINT64_C(1) << RHS->getZExtValue(); 53 Offset = 0; 54 return I->getOperand(0); 55 } 56 57 if (I->getOpcode() == Instruction::Mul) { 58 // This value is scaled by 'RHS'. 59 Scale = RHS->getZExtValue(); 60 Offset = 0; 61 return I->getOperand(0); 62 } 63 64 if (I->getOpcode() == Instruction::Add) { 65 // We have X+C. Check to see if we really have (X*C2)+C1, 66 // where C1 is divisible by C2. 67 unsigned SubScale; 68 Value *SubVal = 69 decomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset); 70 Offset += RHS->getZExtValue(); 71 Scale = SubScale; 72 return SubVal; 73 } 74 } 75 } 76 77 // Otherwise, we can't look past this. 78 Scale = 1; 79 Offset = 0; 80 return Val; 81 } 82 83 /// If we find a cast of an allocation instruction, try to eliminate the cast by 84 /// moving the type information into the alloc. 85 Instruction *InstCombinerImpl::PromoteCastOfAllocation(BitCastInst &CI, 86 AllocaInst &AI) { 87 PointerType *PTy = cast<PointerType>(CI.getType()); 88 89 IRBuilderBase::InsertPointGuard Guard(Builder); 90 Builder.SetInsertPoint(&AI); 91 92 // Get the type really allocated and the type casted to. 93 Type *AllocElTy = AI.getAllocatedType(); 94 Type *CastElTy = PTy->getElementType(); 95 if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr; 96 97 // This optimisation does not work for cases where the cast type 98 // is scalable and the allocated type is not. This because we need to 99 // know how many times the casted type fits into the allocated type. 100 // For the opposite case where the allocated type is scalable and the 101 // cast type is not this leads to poor code quality due to the 102 // introduction of 'vscale' into the calculations. It seems better to 103 // bail out for this case too until we've done a proper cost-benefit 104 // analysis. 105 bool AllocIsScalable = isa<ScalableVectorType>(AllocElTy); 106 bool CastIsScalable = isa<ScalableVectorType>(CastElTy); 107 if (AllocIsScalable != CastIsScalable) return nullptr; 108 109 Align AllocElTyAlign = DL.getABITypeAlign(AllocElTy); 110 Align CastElTyAlign = DL.getABITypeAlign(CastElTy); 111 if (CastElTyAlign < AllocElTyAlign) return nullptr; 112 113 // If the allocation has multiple uses, only promote it if we are strictly 114 // increasing the alignment of the resultant allocation. If we keep it the 115 // same, we open the door to infinite loops of various kinds. 116 if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr; 117 118 // The alloc and cast types should be either both fixed or both scalable. 119 uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy).getKnownMinSize(); 120 uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy).getKnownMinSize(); 121 if (CastElTySize == 0 || AllocElTySize == 0) return nullptr; 122 123 // If the allocation has multiple uses, only promote it if we're not 124 // shrinking the amount of memory being allocated. 125 uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy).getKnownMinSize(); 126 uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy).getKnownMinSize(); 127 if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr; 128 129 // See if we can satisfy the modulus by pulling a scale out of the array 130 // size argument. 131 unsigned ArraySizeScale; 132 uint64_t ArrayOffset; 133 Value *NumElements = // See if the array size is a decomposable linear expr. 134 decomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset); 135 136 // If we can now satisfy the modulus, by using a non-1 scale, we really can 137 // do the xform. 138 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 || 139 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return nullptr; 140 141 // We don't currently support arrays of scalable types. 142 assert(!AllocIsScalable || (ArrayOffset == 1 && ArraySizeScale == 0)); 143 144 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize; 145 Value *Amt = nullptr; 146 if (Scale == 1) { 147 Amt = NumElements; 148 } else { 149 Amt = ConstantInt::get(AI.getArraySize()->getType(), Scale); 150 // Insert before the alloca, not before the cast. 151 Amt = Builder.CreateMul(Amt, NumElements); 152 } 153 154 if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) { 155 Value *Off = ConstantInt::get(AI.getArraySize()->getType(), 156 Offset, true); 157 Amt = Builder.CreateAdd(Amt, Off); 158 } 159 160 AllocaInst *New = Builder.CreateAlloca(CastElTy, Amt); 161 New->setAlignment(AI.getAlign()); 162 New->takeName(&AI); 163 New->setUsedWithInAlloca(AI.isUsedWithInAlloca()); 164 165 // If the allocation has multiple real uses, insert a cast and change all 166 // things that used it to use the new cast. This will also hack on CI, but it 167 // will die soon. 168 if (!AI.hasOneUse()) { 169 // New is the allocation instruction, pointer typed. AI is the original 170 // allocation instruction, also pointer typed. Thus, cast to use is BitCast. 171 Value *NewCast = Builder.CreateBitCast(New, AI.getType(), "tmpcast"); 172 replaceInstUsesWith(AI, NewCast); 173 eraseInstFromFunction(AI); 174 } 175 return replaceInstUsesWith(CI, New); 176 } 177 178 /// Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns 179 /// true for, actually insert the code to evaluate the expression. 180 Value *InstCombinerImpl::EvaluateInDifferentType(Value *V, Type *Ty, 181 bool isSigned) { 182 if (Constant *C = dyn_cast<Constant>(V)) { 183 C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/); 184 // If we got a constantexpr back, try to simplify it with DL info. 185 return ConstantFoldConstant(C, DL, &TLI); 186 } 187 188 // Otherwise, it must be an instruction. 189 Instruction *I = cast<Instruction>(V); 190 Instruction *Res = nullptr; 191 unsigned Opc = I->getOpcode(); 192 switch (Opc) { 193 case Instruction::Add: 194 case Instruction::Sub: 195 case Instruction::Mul: 196 case Instruction::And: 197 case Instruction::Or: 198 case Instruction::Xor: 199 case Instruction::AShr: 200 case Instruction::LShr: 201 case Instruction::Shl: 202 case Instruction::UDiv: 203 case Instruction::URem: { 204 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned); 205 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 206 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS); 207 break; 208 } 209 case Instruction::Trunc: 210 case Instruction::ZExt: 211 case Instruction::SExt: 212 // If the source type of the cast is the type we're trying for then we can 213 // just return the source. There's no need to insert it because it is not 214 // new. 215 if (I->getOperand(0)->getType() == Ty) 216 return I->getOperand(0); 217 218 // Otherwise, must be the same type of cast, so just reinsert a new one. 219 // This also handles the case of zext(trunc(x)) -> zext(x). 220 Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty, 221 Opc == Instruction::SExt); 222 break; 223 case Instruction::Select: { 224 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 225 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned); 226 Res = SelectInst::Create(I->getOperand(0), True, False); 227 break; 228 } 229 case Instruction::PHI: { 230 PHINode *OPN = cast<PHINode>(I); 231 PHINode *NPN = PHINode::Create(Ty, OPN->getNumIncomingValues()); 232 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) { 233 Value *V = 234 EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned); 235 NPN->addIncoming(V, OPN->getIncomingBlock(i)); 236 } 237 Res = NPN; 238 break; 239 } 240 default: 241 // TODO: Can handle more cases here. 242 llvm_unreachable("Unreachable!"); 243 } 244 245 Res->takeName(I); 246 return InsertNewInstWith(Res, *I); 247 } 248 249 Instruction::CastOps 250 InstCombinerImpl::isEliminableCastPair(const CastInst *CI1, 251 const CastInst *CI2) { 252 Type *SrcTy = CI1->getSrcTy(); 253 Type *MidTy = CI1->getDestTy(); 254 Type *DstTy = CI2->getDestTy(); 255 256 Instruction::CastOps firstOp = CI1->getOpcode(); 257 Instruction::CastOps secondOp = CI2->getOpcode(); 258 Type *SrcIntPtrTy = 259 SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr; 260 Type *MidIntPtrTy = 261 MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr; 262 Type *DstIntPtrTy = 263 DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr; 264 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, 265 DstTy, SrcIntPtrTy, MidIntPtrTy, 266 DstIntPtrTy); 267 268 // We don't want to form an inttoptr or ptrtoint that converts to an integer 269 // type that differs from the pointer size. 270 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) || 271 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy)) 272 Res = 0; 273 274 return Instruction::CastOps(Res); 275 } 276 277 /// Implement the transforms common to all CastInst visitors. 278 Instruction *InstCombinerImpl::commonCastTransforms(CastInst &CI) { 279 Value *Src = CI.getOperand(0); 280 Type *Ty = CI.getType(); 281 282 // Try to eliminate a cast of a cast. 283 if (auto *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast 284 if (Instruction::CastOps NewOpc = isEliminableCastPair(CSrc, &CI)) { 285 // The first cast (CSrc) is eliminable so we need to fix up or replace 286 // the second cast (CI). CSrc will then have a good chance of being dead. 287 auto *Res = CastInst::Create(NewOpc, CSrc->getOperand(0), Ty); 288 // Point debug users of the dying cast to the new one. 289 if (CSrc->hasOneUse()) 290 replaceAllDbgUsesWith(*CSrc, *Res, CI, DT); 291 return Res; 292 } 293 } 294 295 if (auto *Sel = dyn_cast<SelectInst>(Src)) { 296 // We are casting a select. Try to fold the cast into the select if the 297 // select does not have a compare instruction with matching operand types 298 // or the select is likely better done in a narrow type. 299 // Creating a select with operands that are different sizes than its 300 // condition may inhibit other folds and lead to worse codegen. 301 auto *Cmp = dyn_cast<CmpInst>(Sel->getCondition()); 302 if (!Cmp || Cmp->getOperand(0)->getType() != Sel->getType() || 303 (CI.getOpcode() == Instruction::Trunc && 304 shouldChangeType(CI.getSrcTy(), CI.getType()))) { 305 if (Instruction *NV = FoldOpIntoSelect(CI, Sel)) { 306 replaceAllDbgUsesWith(*Sel, *NV, CI, DT); 307 return NV; 308 } 309 } 310 } 311 312 // If we are casting a PHI, then fold the cast into the PHI. 313 if (auto *PN = dyn_cast<PHINode>(Src)) { 314 // Don't do this if it would create a PHI node with an illegal type from a 315 // legal type. 316 if (!Src->getType()->isIntegerTy() || !CI.getType()->isIntegerTy() || 317 shouldChangeType(CI.getSrcTy(), CI.getType())) 318 if (Instruction *NV = foldOpIntoPhi(CI, PN)) 319 return NV; 320 } 321 322 // Canonicalize a unary shuffle after the cast if neither operation changes 323 // the size or element size of the input vector. 324 // TODO: We could allow size-changing ops if that doesn't harm codegen. 325 // cast (shuffle X, Mask) --> shuffle (cast X), Mask 326 Value *X; 327 ArrayRef<int> Mask; 328 if (match(Src, m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(Mask))))) { 329 // TODO: Allow scalable vectors? 330 auto *SrcTy = dyn_cast<FixedVectorType>(X->getType()); 331 auto *DestTy = dyn_cast<FixedVectorType>(Ty); 332 if (SrcTy && DestTy && 333 SrcTy->getNumElements() == DestTy->getNumElements() && 334 SrcTy->getPrimitiveSizeInBits() == DestTy->getPrimitiveSizeInBits()) { 335 Value *CastX = Builder.CreateCast(CI.getOpcode(), X, DestTy); 336 return new ShuffleVectorInst(CastX, UndefValue::get(DestTy), Mask); 337 } 338 } 339 340 return nullptr; 341 } 342 343 /// Constants and extensions/truncates from the destination type are always 344 /// free to be evaluated in that type. This is a helper for canEvaluate*. 345 static bool canAlwaysEvaluateInType(Value *V, Type *Ty) { 346 if (isa<Constant>(V)) 347 return true; 348 Value *X; 349 if ((match(V, m_ZExtOrSExt(m_Value(X))) || match(V, m_Trunc(m_Value(X)))) && 350 X->getType() == Ty) 351 return true; 352 353 return false; 354 } 355 356 /// Filter out values that we can not evaluate in the destination type for free. 357 /// This is a helper for canEvaluate*. 358 static bool canNotEvaluateInType(Value *V, Type *Ty) { 359 assert(!isa<Constant>(V) && "Constant should already be handled."); 360 if (!isa<Instruction>(V)) 361 return true; 362 // We don't extend or shrink something that has multiple uses -- doing so 363 // would require duplicating the instruction which isn't profitable. 364 if (!V->hasOneUse()) 365 return true; 366 367 return false; 368 } 369 370 /// Return true if we can evaluate the specified expression tree as type Ty 371 /// instead of its larger type, and arrive with the same value. 372 /// This is used by code that tries to eliminate truncates. 373 /// 374 /// Ty will always be a type smaller than V. We should return true if trunc(V) 375 /// can be computed by computing V in the smaller type. If V is an instruction, 376 /// then trunc(inst(x,y)) can be computed as inst(trunc(x),trunc(y)), which only 377 /// makes sense if x and y can be efficiently truncated. 378 /// 379 /// This function works on both vectors and scalars. 380 /// 381 static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombinerImpl &IC, 382 Instruction *CxtI) { 383 if (canAlwaysEvaluateInType(V, Ty)) 384 return true; 385 if (canNotEvaluateInType(V, Ty)) 386 return false; 387 388 auto *I = cast<Instruction>(V); 389 Type *OrigTy = V->getType(); 390 switch (I->getOpcode()) { 391 case Instruction::Add: 392 case Instruction::Sub: 393 case Instruction::Mul: 394 case Instruction::And: 395 case Instruction::Or: 396 case Instruction::Xor: 397 // These operators can all arbitrarily be extended or truncated. 398 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 399 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 400 401 case Instruction::UDiv: 402 case Instruction::URem: { 403 // UDiv and URem can be truncated if all the truncated bits are zero. 404 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 405 uint32_t BitWidth = Ty->getScalarSizeInBits(); 406 assert(BitWidth < OrigBitWidth && "Unexpected bitwidths!"); 407 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth); 408 if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, CxtI) && 409 IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, CxtI)) { 410 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 411 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 412 } 413 break; 414 } 415 case Instruction::Shl: { 416 // If we are truncating the result of this SHL, and if it's a shift of an 417 // inrange amount, we can always perform a SHL in a smaller type. 418 uint32_t BitWidth = Ty->getScalarSizeInBits(); 419 KnownBits AmtKnownBits = 420 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout()); 421 if (AmtKnownBits.getMaxValue().ult(BitWidth)) 422 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 423 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 424 break; 425 } 426 case Instruction::LShr: { 427 // If this is a truncate of a logical shr, we can truncate it to a smaller 428 // lshr iff we know that the bits we would otherwise be shifting in are 429 // already zeros. 430 // TODO: It is enough to check that the bits we would be shifting in are 431 // zero - use AmtKnownBits.getMaxValue(). 432 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 433 uint32_t BitWidth = Ty->getScalarSizeInBits(); 434 KnownBits AmtKnownBits = 435 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout()); 436 APInt ShiftedBits = APInt::getBitsSetFrom(OrigBitWidth, BitWidth); 437 if (AmtKnownBits.getMaxValue().ult(BitWidth) && 438 IC.MaskedValueIsZero(I->getOperand(0), ShiftedBits, 0, CxtI)) { 439 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 440 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 441 } 442 break; 443 } 444 case Instruction::AShr: { 445 // If this is a truncate of an arithmetic shr, we can truncate it to a 446 // smaller ashr iff we know that all the bits from the sign bit of the 447 // original type and the sign bit of the truncate type are similar. 448 // TODO: It is enough to check that the bits we would be shifting in are 449 // similar to sign bit of the truncate type. 450 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 451 uint32_t BitWidth = Ty->getScalarSizeInBits(); 452 KnownBits AmtKnownBits = 453 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout()); 454 unsigned ShiftedBits = OrigBitWidth - BitWidth; 455 if (AmtKnownBits.getMaxValue().ult(BitWidth) && 456 ShiftedBits < IC.ComputeNumSignBits(I->getOperand(0), 0, CxtI)) 457 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 458 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 459 break; 460 } 461 case Instruction::Trunc: 462 // trunc(trunc(x)) -> trunc(x) 463 return true; 464 case Instruction::ZExt: 465 case Instruction::SExt: 466 // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest 467 // trunc(ext(x)) -> trunc(x) if the source type is larger than the new dest 468 return true; 469 case Instruction::Select: { 470 SelectInst *SI = cast<SelectInst>(I); 471 return canEvaluateTruncated(SI->getTrueValue(), Ty, IC, CxtI) && 472 canEvaluateTruncated(SI->getFalseValue(), Ty, IC, CxtI); 473 } 474 case Instruction::PHI: { 475 // We can change a phi if we can change all operands. Note that we never 476 // get into trouble with cyclic PHIs here because we only consider 477 // instructions with a single use. 478 PHINode *PN = cast<PHINode>(I); 479 for (Value *IncValue : PN->incoming_values()) 480 if (!canEvaluateTruncated(IncValue, Ty, IC, CxtI)) 481 return false; 482 return true; 483 } 484 default: 485 // TODO: Can handle more cases here. 486 break; 487 } 488 489 return false; 490 } 491 492 /// Given a vector that is bitcast to an integer, optionally logically 493 /// right-shifted, and truncated, convert it to an extractelement. 494 /// Example (big endian): 495 /// trunc (lshr (bitcast <4 x i32> %X to i128), 32) to i32 496 /// ---> 497 /// extractelement <4 x i32> %X, 1 498 static Instruction *foldVecTruncToExtElt(TruncInst &Trunc, 499 InstCombinerImpl &IC) { 500 Value *TruncOp = Trunc.getOperand(0); 501 Type *DestType = Trunc.getType(); 502 if (!TruncOp->hasOneUse() || !isa<IntegerType>(DestType)) 503 return nullptr; 504 505 Value *VecInput = nullptr; 506 ConstantInt *ShiftVal = nullptr; 507 if (!match(TruncOp, m_CombineOr(m_BitCast(m_Value(VecInput)), 508 m_LShr(m_BitCast(m_Value(VecInput)), 509 m_ConstantInt(ShiftVal)))) || 510 !isa<VectorType>(VecInput->getType())) 511 return nullptr; 512 513 VectorType *VecType = cast<VectorType>(VecInput->getType()); 514 unsigned VecWidth = VecType->getPrimitiveSizeInBits(); 515 unsigned DestWidth = DestType->getPrimitiveSizeInBits(); 516 unsigned ShiftAmount = ShiftVal ? ShiftVal->getZExtValue() : 0; 517 518 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0)) 519 return nullptr; 520 521 // If the element type of the vector doesn't match the result type, 522 // bitcast it to a vector type that we can extract from. 523 unsigned NumVecElts = VecWidth / DestWidth; 524 if (VecType->getElementType() != DestType) { 525 VecType = FixedVectorType::get(DestType, NumVecElts); 526 VecInput = IC.Builder.CreateBitCast(VecInput, VecType, "bc"); 527 } 528 529 unsigned Elt = ShiftAmount / DestWidth; 530 if (IC.getDataLayout().isBigEndian()) 531 Elt = NumVecElts - 1 - Elt; 532 533 return ExtractElementInst::Create(VecInput, IC.Builder.getInt32(Elt)); 534 } 535 536 /// Funnel/Rotate left/right may occur in a wider type than necessary because of 537 /// type promotion rules. Try to narrow the inputs and convert to funnel shift. 538 Instruction *InstCombinerImpl::narrowFunnelShift(TruncInst &Trunc) { 539 assert((isa<VectorType>(Trunc.getSrcTy()) || 540 shouldChangeType(Trunc.getSrcTy(), Trunc.getType())) && 541 "Don't narrow to an illegal scalar type"); 542 543 // Bail out on strange types. It is possible to handle some of these patterns 544 // even with non-power-of-2 sizes, but it is not a likely scenario. 545 Type *DestTy = Trunc.getType(); 546 unsigned NarrowWidth = DestTy->getScalarSizeInBits(); 547 unsigned WideWidth = Trunc.getSrcTy()->getScalarSizeInBits(); 548 if (!isPowerOf2_32(NarrowWidth)) 549 return nullptr; 550 551 // First, find an or'd pair of opposite shifts: 552 // trunc (or (lshr ShVal0, ShAmt0), (shl ShVal1, ShAmt1)) 553 BinaryOperator *Or0, *Or1; 554 if (!match(Trunc.getOperand(0), m_OneUse(m_Or(m_BinOp(Or0), m_BinOp(Or1))))) 555 return nullptr; 556 557 Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1; 558 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal0), m_Value(ShAmt0)))) || 559 !match(Or1, m_OneUse(m_LogicalShift(m_Value(ShVal1), m_Value(ShAmt1)))) || 560 Or0->getOpcode() == Or1->getOpcode()) 561 return nullptr; 562 563 // Canonicalize to or(shl(ShVal0, ShAmt0), lshr(ShVal1, ShAmt1)). 564 if (Or0->getOpcode() == BinaryOperator::LShr) { 565 std::swap(Or0, Or1); 566 std::swap(ShVal0, ShVal1); 567 std::swap(ShAmt0, ShAmt1); 568 } 569 assert(Or0->getOpcode() == BinaryOperator::Shl && 570 Or1->getOpcode() == BinaryOperator::LShr && 571 "Illegal or(shift,shift) pair"); 572 573 // Match the shift amount operands for a funnel/rotate pattern. This always 574 // matches a subtraction on the R operand. 575 auto matchShiftAmount = [&](Value *L, Value *R, unsigned Width) -> Value * { 576 // The shift amounts may add up to the narrow bit width: 577 // (shl ShVal0, L) | (lshr ShVal1, Width - L) 578 // If this is a funnel shift (different operands are shifted), then the 579 // shift amount can not over-shift (create poison) in the narrow type. 580 unsigned MaxShiftAmountWidth = Log2_32(NarrowWidth); 581 APInt HiBitMask = ~APInt::getLowBitsSet(WideWidth, MaxShiftAmountWidth); 582 if (ShVal0 == ShVal1 || MaskedValueIsZero(L, HiBitMask)) 583 if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L))))) 584 return L; 585 586 // The following patterns currently only work for rotation patterns. 587 // TODO: Add more general funnel-shift compatible patterns. 588 if (ShVal0 != ShVal1) 589 return nullptr; 590 591 // The shift amount may be masked with negation: 592 // (shl ShVal0, (X & (Width - 1))) | (lshr ShVal1, ((-X) & (Width - 1))) 593 Value *X; 594 unsigned Mask = Width - 1; 595 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) && 596 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))) 597 return X; 598 599 // Same as above, but the shift amount may be extended after masking: 600 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) && 601 match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))) 602 return X; 603 604 return nullptr; 605 }; 606 607 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, NarrowWidth); 608 bool IsFshl = true; // Sub on LSHR. 609 if (!ShAmt) { 610 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, NarrowWidth); 611 IsFshl = false; // Sub on SHL. 612 } 613 if (!ShAmt) 614 return nullptr; 615 616 // The right-shifted value must have high zeros in the wide type (for example 617 // from 'zext', 'and' or 'shift'). High bits of the left-shifted value are 618 // truncated, so those do not matter. 619 APInt HiBitMask = APInt::getHighBitsSet(WideWidth, WideWidth - NarrowWidth); 620 if (!MaskedValueIsZero(ShVal1, HiBitMask, 0, &Trunc)) 621 return nullptr; 622 623 // We have an unnecessarily wide rotate! 624 // trunc (or (shl ShVal0, ShAmt), (lshr ShVal1, BitWidth - ShAmt)) 625 // Narrow the inputs and convert to funnel shift intrinsic: 626 // llvm.fshl.i8(trunc(ShVal), trunc(ShVal), trunc(ShAmt)) 627 Value *NarrowShAmt = Builder.CreateTrunc(ShAmt, DestTy); 628 Value *X, *Y; 629 X = Y = Builder.CreateTrunc(ShVal0, DestTy); 630 if (ShVal0 != ShVal1) 631 Y = Builder.CreateTrunc(ShVal1, DestTy); 632 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr; 633 Function *F = Intrinsic::getDeclaration(Trunc.getModule(), IID, DestTy); 634 return CallInst::Create(F, {X, Y, NarrowShAmt}); 635 } 636 637 /// Try to narrow the width of math or bitwise logic instructions by pulling a 638 /// truncate ahead of binary operators. 639 /// TODO: Transforms for truncated shifts should be moved into here. 640 Instruction *InstCombinerImpl::narrowBinOp(TruncInst &Trunc) { 641 Type *SrcTy = Trunc.getSrcTy(); 642 Type *DestTy = Trunc.getType(); 643 if (!isa<VectorType>(SrcTy) && !shouldChangeType(SrcTy, DestTy)) 644 return nullptr; 645 646 BinaryOperator *BinOp; 647 if (!match(Trunc.getOperand(0), m_OneUse(m_BinOp(BinOp)))) 648 return nullptr; 649 650 Value *BinOp0 = BinOp->getOperand(0); 651 Value *BinOp1 = BinOp->getOperand(1); 652 switch (BinOp->getOpcode()) { 653 case Instruction::And: 654 case Instruction::Or: 655 case Instruction::Xor: 656 case Instruction::Add: 657 case Instruction::Sub: 658 case Instruction::Mul: { 659 Constant *C; 660 if (match(BinOp0, m_Constant(C))) { 661 // trunc (binop C, X) --> binop (trunc C', X) 662 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy); 663 Value *TruncX = Builder.CreateTrunc(BinOp1, DestTy); 664 return BinaryOperator::Create(BinOp->getOpcode(), NarrowC, TruncX); 665 } 666 if (match(BinOp1, m_Constant(C))) { 667 // trunc (binop X, C) --> binop (trunc X, C') 668 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy); 669 Value *TruncX = Builder.CreateTrunc(BinOp0, DestTy); 670 return BinaryOperator::Create(BinOp->getOpcode(), TruncX, NarrowC); 671 } 672 Value *X; 673 if (match(BinOp0, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) { 674 // trunc (binop (ext X), Y) --> binop X, (trunc Y) 675 Value *NarrowOp1 = Builder.CreateTrunc(BinOp1, DestTy); 676 return BinaryOperator::Create(BinOp->getOpcode(), X, NarrowOp1); 677 } 678 if (match(BinOp1, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) { 679 // trunc (binop Y, (ext X)) --> binop (trunc Y), X 680 Value *NarrowOp0 = Builder.CreateTrunc(BinOp0, DestTy); 681 return BinaryOperator::Create(BinOp->getOpcode(), NarrowOp0, X); 682 } 683 break; 684 } 685 686 default: break; 687 } 688 689 if (Instruction *NarrowOr = narrowFunnelShift(Trunc)) 690 return NarrowOr; 691 692 return nullptr; 693 } 694 695 /// Try to narrow the width of a splat shuffle. This could be generalized to any 696 /// shuffle with a constant operand, but we limit the transform to avoid 697 /// creating a shuffle type that targets may not be able to lower effectively. 698 static Instruction *shrinkSplatShuffle(TruncInst &Trunc, 699 InstCombiner::BuilderTy &Builder) { 700 auto *Shuf = dyn_cast<ShuffleVectorInst>(Trunc.getOperand(0)); 701 if (Shuf && Shuf->hasOneUse() && match(Shuf->getOperand(1), m_Undef()) && 702 is_splat(Shuf->getShuffleMask()) && 703 Shuf->getType() == Shuf->getOperand(0)->getType()) { 704 // trunc (shuf X, Undef, SplatMask) --> shuf (trunc X), Undef, SplatMask 705 Constant *NarrowUndef = UndefValue::get(Trunc.getType()); 706 Value *NarrowOp = Builder.CreateTrunc(Shuf->getOperand(0), Trunc.getType()); 707 return new ShuffleVectorInst(NarrowOp, NarrowUndef, Shuf->getShuffleMask()); 708 } 709 710 return nullptr; 711 } 712 713 /// Try to narrow the width of an insert element. This could be generalized for 714 /// any vector constant, but we limit the transform to insertion into undef to 715 /// avoid potential backend problems from unsupported insertion widths. This 716 /// could also be extended to handle the case of inserting a scalar constant 717 /// into a vector variable. 718 static Instruction *shrinkInsertElt(CastInst &Trunc, 719 InstCombiner::BuilderTy &Builder) { 720 Instruction::CastOps Opcode = Trunc.getOpcode(); 721 assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) && 722 "Unexpected instruction for shrinking"); 723 724 auto *InsElt = dyn_cast<InsertElementInst>(Trunc.getOperand(0)); 725 if (!InsElt || !InsElt->hasOneUse()) 726 return nullptr; 727 728 Type *DestTy = Trunc.getType(); 729 Type *DestScalarTy = DestTy->getScalarType(); 730 Value *VecOp = InsElt->getOperand(0); 731 Value *ScalarOp = InsElt->getOperand(1); 732 Value *Index = InsElt->getOperand(2); 733 734 if (match(VecOp, m_Undef())) { 735 // trunc (inselt undef, X, Index) --> inselt undef, (trunc X), Index 736 // fptrunc (inselt undef, X, Index) --> inselt undef, (fptrunc X), Index 737 UndefValue *NarrowUndef = UndefValue::get(DestTy); 738 Value *NarrowOp = Builder.CreateCast(Opcode, ScalarOp, DestScalarTy); 739 return InsertElementInst::Create(NarrowUndef, NarrowOp, Index); 740 } 741 742 return nullptr; 743 } 744 745 Instruction *InstCombinerImpl::visitTrunc(TruncInst &Trunc) { 746 if (Instruction *Result = commonCastTransforms(Trunc)) 747 return Result; 748 749 Value *Src = Trunc.getOperand(0); 750 Type *DestTy = Trunc.getType(), *SrcTy = Src->getType(); 751 unsigned DestWidth = DestTy->getScalarSizeInBits(); 752 unsigned SrcWidth = SrcTy->getScalarSizeInBits(); 753 754 // Attempt to truncate the entire input expression tree to the destination 755 // type. Only do this if the dest type is a simple type, don't convert the 756 // expression tree to something weird like i93 unless the source is also 757 // strange. 758 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) && 759 canEvaluateTruncated(Src, DestTy, *this, &Trunc)) { 760 761 // If this cast is a truncate, evaluting in a different type always 762 // eliminates the cast, so it is always a win. 763 LLVM_DEBUG( 764 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 765 " to avoid cast: " 766 << Trunc << '\n'); 767 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 768 assert(Res->getType() == DestTy); 769 return replaceInstUsesWith(Trunc, Res); 770 } 771 772 // For integer types, check if we can shorten the entire input expression to 773 // DestWidth * 2, which won't allow removing the truncate, but reducing the 774 // width may enable further optimizations, e.g. allowing for larger 775 // vectorization factors. 776 if (auto *DestITy = dyn_cast<IntegerType>(DestTy)) { 777 if (DestWidth * 2 < SrcWidth) { 778 auto *NewDestTy = DestITy->getExtendedType(); 779 if (shouldChangeType(SrcTy, NewDestTy) && 780 canEvaluateTruncated(Src, NewDestTy, *this, &Trunc)) { 781 LLVM_DEBUG( 782 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 783 " to reduce the width of operand of" 784 << Trunc << '\n'); 785 Value *Res = EvaluateInDifferentType(Src, NewDestTy, false); 786 return new TruncInst(Res, DestTy); 787 } 788 } 789 } 790 791 // Test if the trunc is the user of a select which is part of a 792 // minimum or maximum operation. If so, don't do any more simplification. 793 // Even simplifying demanded bits can break the canonical form of a 794 // min/max. 795 Value *LHS, *RHS; 796 if (SelectInst *Sel = dyn_cast<SelectInst>(Src)) 797 if (matchSelectPattern(Sel, LHS, RHS).Flavor != SPF_UNKNOWN) 798 return nullptr; 799 800 // See if we can simplify any instructions used by the input whose sole 801 // purpose is to compute bits we don't care about. 802 if (SimplifyDemandedInstructionBits(Trunc)) 803 return &Trunc; 804 805 if (DestWidth == 1) { 806 Value *Zero = Constant::getNullValue(SrcTy); 807 if (DestTy->isIntegerTy()) { 808 // Canonicalize trunc x to i1 -> icmp ne (and x, 1), 0 (scalar only). 809 // TODO: We canonicalize to more instructions here because we are probably 810 // lacking equivalent analysis for trunc relative to icmp. There may also 811 // be codegen concerns. If those trunc limitations were removed, we could 812 // remove this transform. 813 Value *And = Builder.CreateAnd(Src, ConstantInt::get(SrcTy, 1)); 814 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 815 } 816 817 // For vectors, we do not canonicalize all truncs to icmp, so optimize 818 // patterns that would be covered within visitICmpInst. 819 Value *X; 820 Constant *C; 821 if (match(Src, m_OneUse(m_LShr(m_Value(X), m_Constant(C))))) { 822 // trunc (lshr X, C) to i1 --> icmp ne (and X, C'), 0 823 Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1)); 824 Constant *MaskC = ConstantExpr::getShl(One, C); 825 Value *And = Builder.CreateAnd(X, MaskC); 826 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 827 } 828 if (match(Src, m_OneUse(m_c_Or(m_LShr(m_Value(X), m_Constant(C)), 829 m_Deferred(X))))) { 830 // trunc (or (lshr X, C), X) to i1 --> icmp ne (and X, C'), 0 831 Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1)); 832 Constant *MaskC = ConstantExpr::getShl(One, C); 833 MaskC = ConstantExpr::getOr(MaskC, One); 834 Value *And = Builder.CreateAnd(X, MaskC); 835 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 836 } 837 } 838 839 Value *A, *B; 840 Constant *C; 841 if (match(Src, m_LShr(m_SExt(m_Value(A)), m_Constant(C)))) { 842 unsigned AWidth = A->getType()->getScalarSizeInBits(); 843 unsigned MaxShiftAmt = SrcWidth - std::max(DestWidth, AWidth); 844 auto *OldSh = cast<Instruction>(Src); 845 bool IsExact = OldSh->isExact(); 846 847 // If the shift is small enough, all zero bits created by the shift are 848 // removed by the trunc. 849 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULE, 850 APInt(SrcWidth, MaxShiftAmt)))) { 851 // trunc (lshr (sext A), C) --> ashr A, C 852 if (A->getType() == DestTy) { 853 Constant *MaxAmt = ConstantInt::get(SrcTy, DestWidth - 1, false); 854 Constant *ShAmt = ConstantExpr::getUMin(C, MaxAmt); 855 ShAmt = ConstantExpr::getTrunc(ShAmt, A->getType()); 856 ShAmt = Constant::mergeUndefsWith(ShAmt, C); 857 return IsExact ? BinaryOperator::CreateExactAShr(A, ShAmt) 858 : BinaryOperator::CreateAShr(A, ShAmt); 859 } 860 // The types are mismatched, so create a cast after shifting: 861 // trunc (lshr (sext A), C) --> sext/trunc (ashr A, C) 862 if (Src->hasOneUse()) { 863 Constant *MaxAmt = ConstantInt::get(SrcTy, AWidth - 1, false); 864 Constant *ShAmt = ConstantExpr::getUMin(C, MaxAmt); 865 ShAmt = ConstantExpr::getTrunc(ShAmt, A->getType()); 866 Value *Shift = Builder.CreateAShr(A, ShAmt, "", IsExact); 867 return CastInst::CreateIntegerCast(Shift, DestTy, true); 868 } 869 } 870 // TODO: Mask high bits with 'and'. 871 } 872 873 // trunc (*shr (trunc A), C) --> trunc(*shr A, C) 874 if (match(Src, m_OneUse(m_Shr(m_Trunc(m_Value(A)), m_Constant(C))))) { 875 unsigned MaxShiftAmt = SrcWidth - DestWidth; 876 877 // If the shift is small enough, all zero/sign bits created by the shift are 878 // removed by the trunc. 879 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULE, 880 APInt(SrcWidth, MaxShiftAmt)))) { 881 auto *OldShift = cast<Instruction>(Src); 882 bool IsExact = OldShift->isExact(); 883 auto *ShAmt = ConstantExpr::getIntegerCast(C, A->getType(), true); 884 ShAmt = Constant::mergeUndefsWith(ShAmt, C); 885 Value *Shift = 886 OldShift->getOpcode() == Instruction::AShr 887 ? Builder.CreateAShr(A, ShAmt, OldShift->getName(), IsExact) 888 : Builder.CreateLShr(A, ShAmt, OldShift->getName(), IsExact); 889 return CastInst::CreateTruncOrBitCast(Shift, DestTy); 890 } 891 } 892 893 if (Instruction *I = narrowBinOp(Trunc)) 894 return I; 895 896 if (Instruction *I = shrinkSplatShuffle(Trunc, Builder)) 897 return I; 898 899 if (Instruction *I = shrinkInsertElt(Trunc, Builder)) 900 return I; 901 902 if (Src->hasOneUse() && 903 (isa<VectorType>(SrcTy) || shouldChangeType(SrcTy, DestTy))) { 904 // Transform "trunc (shl X, cst)" -> "shl (trunc X), cst" so long as the 905 // dest type is native and cst < dest size. 906 if (match(Src, m_Shl(m_Value(A), m_Constant(C))) && 907 !match(A, m_Shr(m_Value(), m_Constant()))) { 908 // Skip shifts of shift by constants. It undoes a combine in 909 // FoldShiftByConstant and is the extend in reg pattern. 910 APInt Threshold = APInt(C->getType()->getScalarSizeInBits(), DestWidth); 911 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, Threshold))) { 912 Value *NewTrunc = Builder.CreateTrunc(A, DestTy, A->getName() + ".tr"); 913 return BinaryOperator::Create(Instruction::Shl, NewTrunc, 914 ConstantExpr::getTrunc(C, DestTy)); 915 } 916 } 917 } 918 919 if (Instruction *I = foldVecTruncToExtElt(Trunc, *this)) 920 return I; 921 922 // Whenever an element is extracted from a vector, and then truncated, 923 // canonicalize by converting it to a bitcast followed by an 924 // extractelement. 925 // 926 // Example (little endian): 927 // trunc (extractelement <4 x i64> %X, 0) to i32 928 // ---> 929 // extractelement <8 x i32> (bitcast <4 x i64> %X to <8 x i32>), i32 0 930 Value *VecOp; 931 ConstantInt *Cst; 932 if (match(Src, m_OneUse(m_ExtractElt(m_Value(VecOp), m_ConstantInt(Cst))))) { 933 auto *VecOpTy = cast<VectorType>(VecOp->getType()); 934 auto VecElts = VecOpTy->getElementCount(); 935 936 // A badly fit destination size would result in an invalid cast. 937 if (SrcWidth % DestWidth == 0) { 938 uint64_t TruncRatio = SrcWidth / DestWidth; 939 uint64_t BitCastNumElts = VecElts.getKnownMinValue() * TruncRatio; 940 uint64_t VecOpIdx = Cst->getZExtValue(); 941 uint64_t NewIdx = DL.isBigEndian() ? (VecOpIdx + 1) * TruncRatio - 1 942 : VecOpIdx * TruncRatio; 943 assert(BitCastNumElts <= std::numeric_limits<uint32_t>::max() && 944 "overflow 32-bits"); 945 946 auto *BitCastTo = 947 VectorType::get(DestTy, BitCastNumElts, VecElts.isScalable()); 948 Value *BitCast = Builder.CreateBitCast(VecOp, BitCastTo); 949 return ExtractElementInst::Create(BitCast, Builder.getInt32(NewIdx)); 950 } 951 } 952 953 // trunc (ctlz_i32(zext(A), B) --> add(ctlz_i16(A, B), C) 954 if (match(Src, m_OneUse(m_Intrinsic<Intrinsic::ctlz>(m_ZExt(m_Value(A)), 955 m_Value(B))))) { 956 unsigned AWidth = A->getType()->getScalarSizeInBits(); 957 if (AWidth == DestWidth && AWidth > Log2_32(SrcWidth)) { 958 Value *WidthDiff = ConstantInt::get(A->getType(), SrcWidth - AWidth); 959 Value *NarrowCtlz = 960 Builder.CreateIntrinsic(Intrinsic::ctlz, {Trunc.getType()}, {A, B}); 961 return BinaryOperator::CreateAdd(NarrowCtlz, WidthDiff); 962 } 963 } 964 return nullptr; 965 } 966 967 /// Transform (zext icmp) to bitwise / integer operations in order to 968 /// eliminate it. If DoTransform is false, just test whether the given 969 /// (zext icmp) can be transformed. 970 Instruction *InstCombinerImpl::transformZExtICmp(ICmpInst *Cmp, ZExtInst &Zext, 971 bool DoTransform) { 972 // If we are just checking for a icmp eq of a single bit and zext'ing it 973 // to an integer, then shift the bit to the appropriate place and then 974 // cast to integer to avoid the comparison. 975 const APInt *Op1CV; 976 if (match(Cmp->getOperand(1), m_APInt(Op1CV))) { 977 978 // zext (x <s 0) to i32 --> x>>u31 true if signbit set. 979 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear. 980 if ((Cmp->getPredicate() == ICmpInst::ICMP_SLT && Op1CV->isNullValue()) || 981 (Cmp->getPredicate() == ICmpInst::ICMP_SGT && Op1CV->isAllOnesValue())) { 982 if (!DoTransform) return Cmp; 983 984 Value *In = Cmp->getOperand(0); 985 Value *Sh = ConstantInt::get(In->getType(), 986 In->getType()->getScalarSizeInBits() - 1); 987 In = Builder.CreateLShr(In, Sh, In->getName() + ".lobit"); 988 if (In->getType() != Zext.getType()) 989 In = Builder.CreateIntCast(In, Zext.getType(), false /*ZExt*/); 990 991 if (Cmp->getPredicate() == ICmpInst::ICMP_SGT) { 992 Constant *One = ConstantInt::get(In->getType(), 1); 993 In = Builder.CreateXor(In, One, In->getName() + ".not"); 994 } 995 996 return replaceInstUsesWith(Zext, In); 997 } 998 999 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set. 1000 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. 1001 // zext (X == 1) to i32 --> X iff X has only the low bit set. 1002 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set. 1003 // zext (X != 0) to i32 --> X iff X has only the low bit set. 1004 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set. 1005 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set. 1006 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. 1007 if ((Op1CV->isNullValue() || Op1CV->isPowerOf2()) && 1008 // This only works for EQ and NE 1009 Cmp->isEquality()) { 1010 // If Op1C some other power of two, convert: 1011 KnownBits Known = computeKnownBits(Cmp->getOperand(0), 0, &Zext); 1012 1013 APInt KnownZeroMask(~Known.Zero); 1014 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1? 1015 if (!DoTransform) return Cmp; 1016 1017 bool isNE = Cmp->getPredicate() == ICmpInst::ICMP_NE; 1018 if (!Op1CV->isNullValue() && (*Op1CV != KnownZeroMask)) { 1019 // (X&4) == 2 --> false 1020 // (X&4) != 2 --> true 1021 Constant *Res = ConstantInt::get(Zext.getType(), isNE); 1022 return replaceInstUsesWith(Zext, Res); 1023 } 1024 1025 uint32_t ShAmt = KnownZeroMask.logBase2(); 1026 Value *In = Cmp->getOperand(0); 1027 if (ShAmt) { 1028 // Perform a logical shr by shiftamt. 1029 // Insert the shift to put the result in the low bit. 1030 In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt), 1031 In->getName() + ".lobit"); 1032 } 1033 1034 if (!Op1CV->isNullValue() == isNE) { // Toggle the low bit. 1035 Constant *One = ConstantInt::get(In->getType(), 1); 1036 In = Builder.CreateXor(In, One); 1037 } 1038 1039 if (Zext.getType() == In->getType()) 1040 return replaceInstUsesWith(Zext, In); 1041 1042 Value *IntCast = Builder.CreateIntCast(In, Zext.getType(), false); 1043 return replaceInstUsesWith(Zext, IntCast); 1044 } 1045 } 1046 } 1047 1048 if (Cmp->isEquality() && Zext.getType() == Cmp->getOperand(0)->getType()) { 1049 // Test if a bit is clear/set using a shifted-one mask: 1050 // zext (icmp eq (and X, (1 << ShAmt)), 0) --> and (lshr (not X), ShAmt), 1 1051 // zext (icmp ne (and X, (1 << ShAmt)), 0) --> and (lshr X, ShAmt), 1 1052 Value *X, *ShAmt; 1053 if (Cmp->hasOneUse() && match(Cmp->getOperand(1), m_ZeroInt()) && 1054 match(Cmp->getOperand(0), 1055 m_OneUse(m_c_And(m_Shl(m_One(), m_Value(ShAmt)), m_Value(X))))) { 1056 if (!DoTransform) 1057 return Cmp; 1058 1059 if (Cmp->getPredicate() == ICmpInst::ICMP_EQ) 1060 X = Builder.CreateNot(X); 1061 Value *Lshr = Builder.CreateLShr(X, ShAmt); 1062 Value *And1 = Builder.CreateAnd(Lshr, ConstantInt::get(X->getType(), 1)); 1063 return replaceInstUsesWith(Zext, And1); 1064 } 1065 1066 // icmp ne A, B is equal to xor A, B when A and B only really have one bit. 1067 // It is also profitable to transform icmp eq into not(xor(A, B)) because 1068 // that may lead to additional simplifications. 1069 if (IntegerType *ITy = dyn_cast<IntegerType>(Zext.getType())) { 1070 Value *LHS = Cmp->getOperand(0); 1071 Value *RHS = Cmp->getOperand(1); 1072 1073 KnownBits KnownLHS = computeKnownBits(LHS, 0, &Zext); 1074 KnownBits KnownRHS = computeKnownBits(RHS, 0, &Zext); 1075 1076 if (KnownLHS.Zero == KnownRHS.Zero && KnownLHS.One == KnownRHS.One) { 1077 APInt KnownBits = KnownLHS.Zero | KnownLHS.One; 1078 APInt UnknownBit = ~KnownBits; 1079 if (UnknownBit.countPopulation() == 1) { 1080 if (!DoTransform) return Cmp; 1081 1082 Value *Result = Builder.CreateXor(LHS, RHS); 1083 1084 // Mask off any bits that are set and won't be shifted away. 1085 if (KnownLHS.One.uge(UnknownBit)) 1086 Result = Builder.CreateAnd(Result, 1087 ConstantInt::get(ITy, UnknownBit)); 1088 1089 // Shift the bit we're testing down to the lsb. 1090 Result = Builder.CreateLShr( 1091 Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros())); 1092 1093 if (Cmp->getPredicate() == ICmpInst::ICMP_EQ) 1094 Result = Builder.CreateXor(Result, ConstantInt::get(ITy, 1)); 1095 Result->takeName(Cmp); 1096 return replaceInstUsesWith(Zext, Result); 1097 } 1098 } 1099 } 1100 } 1101 1102 return nullptr; 1103 } 1104 1105 /// Determine if the specified value can be computed in the specified wider type 1106 /// and produce the same low bits. If not, return false. 1107 /// 1108 /// If this function returns true, it can also return a non-zero number of bits 1109 /// (in BitsToClear) which indicates that the value it computes is correct for 1110 /// the zero extend, but that the additional BitsToClear bits need to be zero'd 1111 /// out. For example, to promote something like: 1112 /// 1113 /// %B = trunc i64 %A to i32 1114 /// %C = lshr i32 %B, 8 1115 /// %E = zext i32 %C to i64 1116 /// 1117 /// CanEvaluateZExtd for the 'lshr' will return true, and BitsToClear will be 1118 /// set to 8 to indicate that the promoted value needs to have bits 24-31 1119 /// cleared in addition to bits 32-63. Since an 'and' will be generated to 1120 /// clear the top bits anyway, doing this has no extra cost. 1121 /// 1122 /// This function works on both vectors and scalars. 1123 static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear, 1124 InstCombinerImpl &IC, Instruction *CxtI) { 1125 BitsToClear = 0; 1126 if (canAlwaysEvaluateInType(V, Ty)) 1127 return true; 1128 if (canNotEvaluateInType(V, Ty)) 1129 return false; 1130 1131 auto *I = cast<Instruction>(V); 1132 unsigned Tmp; 1133 switch (I->getOpcode()) { 1134 case Instruction::ZExt: // zext(zext(x)) -> zext(x). 1135 case Instruction::SExt: // zext(sext(x)) -> sext(x). 1136 case Instruction::Trunc: // zext(trunc(x)) -> trunc(x) or zext(x) 1137 return true; 1138 case Instruction::And: 1139 case Instruction::Or: 1140 case Instruction::Xor: 1141 case Instruction::Add: 1142 case Instruction::Sub: 1143 case Instruction::Mul: 1144 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI) || 1145 !canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI)) 1146 return false; 1147 // These can all be promoted if neither operand has 'bits to clear'. 1148 if (BitsToClear == 0 && Tmp == 0) 1149 return true; 1150 1151 // If the operation is an AND/OR/XOR and the bits to clear are zero in the 1152 // other side, BitsToClear is ok. 1153 if (Tmp == 0 && I->isBitwiseLogicOp()) { 1154 // We use MaskedValueIsZero here for generality, but the case we care 1155 // about the most is constant RHS. 1156 unsigned VSize = V->getType()->getScalarSizeInBits(); 1157 if (IC.MaskedValueIsZero(I->getOperand(1), 1158 APInt::getHighBitsSet(VSize, BitsToClear), 1159 0, CxtI)) { 1160 // If this is an And instruction and all of the BitsToClear are 1161 // known to be zero we can reset BitsToClear. 1162 if (I->getOpcode() == Instruction::And) 1163 BitsToClear = 0; 1164 return true; 1165 } 1166 } 1167 1168 // Otherwise, we don't know how to analyze this BitsToClear case yet. 1169 return false; 1170 1171 case Instruction::Shl: { 1172 // We can promote shl(x, cst) if we can promote x. Since shl overwrites the 1173 // upper bits we can reduce BitsToClear by the shift amount. 1174 const APInt *Amt; 1175 if (match(I->getOperand(1), m_APInt(Amt))) { 1176 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 1177 return false; 1178 uint64_t ShiftAmt = Amt->getZExtValue(); 1179 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0; 1180 return true; 1181 } 1182 return false; 1183 } 1184 case Instruction::LShr: { 1185 // We can promote lshr(x, cst) if we can promote x. This requires the 1186 // ultimate 'and' to clear out the high zero bits we're clearing out though. 1187 const APInt *Amt; 1188 if (match(I->getOperand(1), m_APInt(Amt))) { 1189 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 1190 return false; 1191 BitsToClear += Amt->getZExtValue(); 1192 if (BitsToClear > V->getType()->getScalarSizeInBits()) 1193 BitsToClear = V->getType()->getScalarSizeInBits(); 1194 return true; 1195 } 1196 // Cannot promote variable LSHR. 1197 return false; 1198 } 1199 case Instruction::Select: 1200 if (!canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI) || 1201 !canEvaluateZExtd(I->getOperand(2), Ty, BitsToClear, IC, CxtI) || 1202 // TODO: If important, we could handle the case when the BitsToClear are 1203 // known zero in the disagreeing side. 1204 Tmp != BitsToClear) 1205 return false; 1206 return true; 1207 1208 case Instruction::PHI: { 1209 // We can change a phi if we can change all operands. Note that we never 1210 // get into trouble with cyclic PHIs here because we only consider 1211 // instructions with a single use. 1212 PHINode *PN = cast<PHINode>(I); 1213 if (!canEvaluateZExtd(PN->getIncomingValue(0), Ty, BitsToClear, IC, CxtI)) 1214 return false; 1215 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) 1216 if (!canEvaluateZExtd(PN->getIncomingValue(i), Ty, Tmp, IC, CxtI) || 1217 // TODO: If important, we could handle the case when the BitsToClear 1218 // are known zero in the disagreeing input. 1219 Tmp != BitsToClear) 1220 return false; 1221 return true; 1222 } 1223 default: 1224 // TODO: Can handle more cases here. 1225 return false; 1226 } 1227 } 1228 1229 Instruction *InstCombinerImpl::visitZExt(ZExtInst &CI) { 1230 // If this zero extend is only used by a truncate, let the truncate be 1231 // eliminated before we try to optimize this zext. 1232 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back())) 1233 return nullptr; 1234 1235 // If one of the common conversion will work, do it. 1236 if (Instruction *Result = commonCastTransforms(CI)) 1237 return Result; 1238 1239 Value *Src = CI.getOperand(0); 1240 Type *SrcTy = Src->getType(), *DestTy = CI.getType(); 1241 1242 // Try to extend the entire expression tree to the wide destination type. 1243 unsigned BitsToClear; 1244 if (shouldChangeType(SrcTy, DestTy) && 1245 canEvaluateZExtd(Src, DestTy, BitsToClear, *this, &CI)) { 1246 assert(BitsToClear <= SrcTy->getScalarSizeInBits() && 1247 "Can't clear more bits than in SrcTy"); 1248 1249 // Okay, we can transform this! Insert the new expression now. 1250 LLVM_DEBUG( 1251 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 1252 " to avoid zero extend: " 1253 << CI << '\n'); 1254 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 1255 assert(Res->getType() == DestTy); 1256 1257 // Preserve debug values referring to Src if the zext is its last use. 1258 if (auto *SrcOp = dyn_cast<Instruction>(Src)) 1259 if (SrcOp->hasOneUse()) 1260 replaceAllDbgUsesWith(*SrcOp, *Res, CI, DT); 1261 1262 uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear; 1263 uint32_t DestBitSize = DestTy->getScalarSizeInBits(); 1264 1265 // If the high bits are already filled with zeros, just replace this 1266 // cast with the result. 1267 if (MaskedValueIsZero(Res, 1268 APInt::getHighBitsSet(DestBitSize, 1269 DestBitSize-SrcBitsKept), 1270 0, &CI)) 1271 return replaceInstUsesWith(CI, Res); 1272 1273 // We need to emit an AND to clear the high bits. 1274 Constant *C = ConstantInt::get(Res->getType(), 1275 APInt::getLowBitsSet(DestBitSize, SrcBitsKept)); 1276 return BinaryOperator::CreateAnd(Res, C); 1277 } 1278 1279 // If this is a TRUNC followed by a ZEXT then we are dealing with integral 1280 // types and if the sizes are just right we can convert this into a logical 1281 // 'and' which will be much cheaper than the pair of casts. 1282 if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast 1283 // TODO: Subsume this into EvaluateInDifferentType. 1284 1285 // Get the sizes of the types involved. We know that the intermediate type 1286 // will be smaller than A or C, but don't know the relation between A and C. 1287 Value *A = CSrc->getOperand(0); 1288 unsigned SrcSize = A->getType()->getScalarSizeInBits(); 1289 unsigned MidSize = CSrc->getType()->getScalarSizeInBits(); 1290 unsigned DstSize = CI.getType()->getScalarSizeInBits(); 1291 // If we're actually extending zero bits, then if 1292 // SrcSize < DstSize: zext(a & mask) 1293 // SrcSize == DstSize: a & mask 1294 // SrcSize > DstSize: trunc(a) & mask 1295 if (SrcSize < DstSize) { 1296 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 1297 Constant *AndConst = ConstantInt::get(A->getType(), AndValue); 1298 Value *And = Builder.CreateAnd(A, AndConst, CSrc->getName() + ".mask"); 1299 return new ZExtInst(And, CI.getType()); 1300 } 1301 1302 if (SrcSize == DstSize) { 1303 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 1304 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(), 1305 AndValue)); 1306 } 1307 if (SrcSize > DstSize) { 1308 Value *Trunc = Builder.CreateTrunc(A, CI.getType()); 1309 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize)); 1310 return BinaryOperator::CreateAnd(Trunc, 1311 ConstantInt::get(Trunc->getType(), 1312 AndValue)); 1313 } 1314 } 1315 1316 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(Src)) 1317 return transformZExtICmp(Cmp, CI); 1318 1319 BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src); 1320 if (SrcI && SrcI->getOpcode() == Instruction::Or) { 1321 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) if at least one 1322 // of the (zext icmp) can be eliminated. If so, immediately perform the 1323 // according elimination. 1324 ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0)); 1325 ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1)); 1326 if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() && 1327 LHS->getOperand(0)->getType() == RHS->getOperand(0)->getType() && 1328 (transformZExtICmp(LHS, CI, false) || 1329 transformZExtICmp(RHS, CI, false))) { 1330 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) 1331 Value *LCast = Builder.CreateZExt(LHS, CI.getType(), LHS->getName()); 1332 Value *RCast = Builder.CreateZExt(RHS, CI.getType(), RHS->getName()); 1333 Value *Or = Builder.CreateOr(LCast, RCast, CI.getName()); 1334 if (auto *OrInst = dyn_cast<Instruction>(Or)) 1335 Builder.SetInsertPoint(OrInst); 1336 1337 // Perform the elimination. 1338 if (auto *LZExt = dyn_cast<ZExtInst>(LCast)) 1339 transformZExtICmp(LHS, *LZExt); 1340 if (auto *RZExt = dyn_cast<ZExtInst>(RCast)) 1341 transformZExtICmp(RHS, *RZExt); 1342 1343 return replaceInstUsesWith(CI, Or); 1344 } 1345 } 1346 1347 // zext(trunc(X) & C) -> (X & zext(C)). 1348 Constant *C; 1349 Value *X; 1350 if (SrcI && 1351 match(SrcI, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Constant(C)))) && 1352 X->getType() == CI.getType()) 1353 return BinaryOperator::CreateAnd(X, ConstantExpr::getZExt(C, CI.getType())); 1354 1355 // zext((trunc(X) & C) ^ C) -> ((X & zext(C)) ^ zext(C)). 1356 Value *And; 1357 if (SrcI && match(SrcI, m_OneUse(m_Xor(m_Value(And), m_Constant(C)))) && 1358 match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) && 1359 X->getType() == CI.getType()) { 1360 Constant *ZC = ConstantExpr::getZExt(C, CI.getType()); 1361 return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC); 1362 } 1363 1364 return nullptr; 1365 } 1366 1367 /// Transform (sext icmp) to bitwise / integer operations to eliminate the icmp. 1368 Instruction *InstCombinerImpl::transformSExtICmp(ICmpInst *ICI, 1369 Instruction &CI) { 1370 Value *Op0 = ICI->getOperand(0), *Op1 = ICI->getOperand(1); 1371 ICmpInst::Predicate Pred = ICI->getPredicate(); 1372 1373 // Don't bother if Op1 isn't of vector or integer type. 1374 if (!Op1->getType()->isIntOrIntVectorTy()) 1375 return nullptr; 1376 1377 if ((Pred == ICmpInst::ICMP_SLT && match(Op1, m_ZeroInt())) || 1378 (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))) { 1379 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if negative 1380 // (x >s -1) ? -1 : 0 -> not (ashr x, 31) -> all ones if positive 1381 Value *Sh = ConstantInt::get(Op0->getType(), 1382 Op0->getType()->getScalarSizeInBits() - 1); 1383 Value *In = Builder.CreateAShr(Op0, Sh, Op0->getName() + ".lobit"); 1384 if (In->getType() != CI.getType()) 1385 In = Builder.CreateIntCast(In, CI.getType(), true /*SExt*/); 1386 1387 if (Pred == ICmpInst::ICMP_SGT) 1388 In = Builder.CreateNot(In, In->getName() + ".not"); 1389 return replaceInstUsesWith(CI, In); 1390 } 1391 1392 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) { 1393 // If we know that only one bit of the LHS of the icmp can be set and we 1394 // have an equality comparison with zero or a power of 2, we can transform 1395 // the icmp and sext into bitwise/integer operations. 1396 if (ICI->hasOneUse() && 1397 ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){ 1398 KnownBits Known = computeKnownBits(Op0, 0, &CI); 1399 1400 APInt KnownZeroMask(~Known.Zero); 1401 if (KnownZeroMask.isPowerOf2()) { 1402 Value *In = ICI->getOperand(0); 1403 1404 // If the icmp tests for a known zero bit we can constant fold it. 1405 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) { 1406 Value *V = Pred == ICmpInst::ICMP_NE ? 1407 ConstantInt::getAllOnesValue(CI.getType()) : 1408 ConstantInt::getNullValue(CI.getType()); 1409 return replaceInstUsesWith(CI, V); 1410 } 1411 1412 if (!Op1C->isZero() == (Pred == ICmpInst::ICMP_NE)) { 1413 // sext ((x & 2^n) == 0) -> (x >> n) - 1 1414 // sext ((x & 2^n) != 2^n) -> (x >> n) - 1 1415 unsigned ShiftAmt = KnownZeroMask.countTrailingZeros(); 1416 // Perform a right shift to place the desired bit in the LSB. 1417 if (ShiftAmt) 1418 In = Builder.CreateLShr(In, 1419 ConstantInt::get(In->getType(), ShiftAmt)); 1420 1421 // At this point "In" is either 1 or 0. Subtract 1 to turn 1422 // {1, 0} -> {0, -1}. 1423 In = Builder.CreateAdd(In, 1424 ConstantInt::getAllOnesValue(In->getType()), 1425 "sext"); 1426 } else { 1427 // sext ((x & 2^n) != 0) -> (x << bitwidth-n) a>> bitwidth-1 1428 // sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1 1429 unsigned ShiftAmt = KnownZeroMask.countLeadingZeros(); 1430 // Perform a left shift to place the desired bit in the MSB. 1431 if (ShiftAmt) 1432 In = Builder.CreateShl(In, 1433 ConstantInt::get(In->getType(), ShiftAmt)); 1434 1435 // Distribute the bit over the whole bit width. 1436 In = Builder.CreateAShr(In, ConstantInt::get(In->getType(), 1437 KnownZeroMask.getBitWidth() - 1), "sext"); 1438 } 1439 1440 if (CI.getType() == In->getType()) 1441 return replaceInstUsesWith(CI, In); 1442 return CastInst::CreateIntegerCast(In, CI.getType(), true/*SExt*/); 1443 } 1444 } 1445 } 1446 1447 return nullptr; 1448 } 1449 1450 /// Return true if we can take the specified value and return it as type Ty 1451 /// without inserting any new casts and without changing the value of the common 1452 /// low bits. This is used by code that tries to promote integer operations to 1453 /// a wider types will allow us to eliminate the extension. 1454 /// 1455 /// This function works on both vectors and scalars. 1456 /// 1457 static bool canEvaluateSExtd(Value *V, Type *Ty) { 1458 assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() && 1459 "Can't sign extend type to a smaller type"); 1460 if (canAlwaysEvaluateInType(V, Ty)) 1461 return true; 1462 if (canNotEvaluateInType(V, Ty)) 1463 return false; 1464 1465 auto *I = cast<Instruction>(V); 1466 switch (I->getOpcode()) { 1467 case Instruction::SExt: // sext(sext(x)) -> sext(x) 1468 case Instruction::ZExt: // sext(zext(x)) -> zext(x) 1469 case Instruction::Trunc: // sext(trunc(x)) -> trunc(x) or sext(x) 1470 return true; 1471 case Instruction::And: 1472 case Instruction::Or: 1473 case Instruction::Xor: 1474 case Instruction::Add: 1475 case Instruction::Sub: 1476 case Instruction::Mul: 1477 // These operators can all arbitrarily be extended if their inputs can. 1478 return canEvaluateSExtd(I->getOperand(0), Ty) && 1479 canEvaluateSExtd(I->getOperand(1), Ty); 1480 1481 //case Instruction::Shl: TODO 1482 //case Instruction::LShr: TODO 1483 1484 case Instruction::Select: 1485 return canEvaluateSExtd(I->getOperand(1), Ty) && 1486 canEvaluateSExtd(I->getOperand(2), Ty); 1487 1488 case Instruction::PHI: { 1489 // We can change a phi if we can change all operands. Note that we never 1490 // get into trouble with cyclic PHIs here because we only consider 1491 // instructions with a single use. 1492 PHINode *PN = cast<PHINode>(I); 1493 for (Value *IncValue : PN->incoming_values()) 1494 if (!canEvaluateSExtd(IncValue, Ty)) return false; 1495 return true; 1496 } 1497 default: 1498 // TODO: Can handle more cases here. 1499 break; 1500 } 1501 1502 return false; 1503 } 1504 1505 Instruction *InstCombinerImpl::visitSExt(SExtInst &CI) { 1506 // If this sign extend is only used by a truncate, let the truncate be 1507 // eliminated before we try to optimize this sext. 1508 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back())) 1509 return nullptr; 1510 1511 if (Instruction *I = commonCastTransforms(CI)) 1512 return I; 1513 1514 Value *Src = CI.getOperand(0); 1515 Type *SrcTy = Src->getType(), *DestTy = CI.getType(); 1516 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 1517 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 1518 1519 // If we know that the value being extended is positive, we can use a zext 1520 // instead. 1521 KnownBits Known = computeKnownBits(Src, 0, &CI); 1522 if (Known.isNonNegative()) 1523 return CastInst::Create(Instruction::ZExt, Src, DestTy); 1524 1525 // Try to extend the entire expression tree to the wide destination type. 1526 if (shouldChangeType(SrcTy, DestTy) && canEvaluateSExtd(Src, DestTy)) { 1527 // Okay, we can transform this! Insert the new expression now. 1528 LLVM_DEBUG( 1529 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 1530 " to avoid sign extend: " 1531 << CI << '\n'); 1532 Value *Res = EvaluateInDifferentType(Src, DestTy, true); 1533 assert(Res->getType() == DestTy); 1534 1535 // If the high bits are already filled with sign bit, just replace this 1536 // cast with the result. 1537 if (ComputeNumSignBits(Res, 0, &CI) > DestBitSize - SrcBitSize) 1538 return replaceInstUsesWith(CI, Res); 1539 1540 // We need to emit a shl + ashr to do the sign extend. 1541 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize); 1542 return BinaryOperator::CreateAShr(Builder.CreateShl(Res, ShAmt, "sext"), 1543 ShAmt); 1544 } 1545 1546 Value *X; 1547 if (match(Src, m_Trunc(m_Value(X)))) { 1548 // If the input has more sign bits than bits truncated, then convert 1549 // directly to final type. 1550 unsigned XBitSize = X->getType()->getScalarSizeInBits(); 1551 if (ComputeNumSignBits(X, 0, &CI) > XBitSize - SrcBitSize) 1552 return CastInst::CreateIntegerCast(X, DestTy, /* isSigned */ true); 1553 1554 // If input is a trunc from the destination type, then convert into shifts. 1555 if (Src->hasOneUse() && X->getType() == DestTy) { 1556 // sext (trunc X) --> ashr (shl X, C), C 1557 Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize); 1558 return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShAmt), ShAmt); 1559 } 1560 1561 // If we are replacing shifted-in high zero bits with sign bits, convert 1562 // the logic shift to arithmetic shift and eliminate the cast to 1563 // intermediate type: 1564 // sext (trunc (lshr Y, C)) --> sext/trunc (ashr Y, C) 1565 Value *Y; 1566 if (Src->hasOneUse() && 1567 match(X, m_LShr(m_Value(Y), 1568 m_SpecificIntAllowUndef(XBitSize - SrcBitSize)))) { 1569 Value *Ashr = Builder.CreateAShr(Y, XBitSize - SrcBitSize); 1570 return CastInst::CreateIntegerCast(Ashr, DestTy, /* isSigned */ true); 1571 } 1572 } 1573 1574 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src)) 1575 return transformSExtICmp(ICI, CI); 1576 1577 // If the input is a shl/ashr pair of a same constant, then this is a sign 1578 // extension from a smaller value. If we could trust arbitrary bitwidth 1579 // integers, we could turn this into a truncate to the smaller bit and then 1580 // use a sext for the whole extension. Since we don't, look deeper and check 1581 // for a truncate. If the source and dest are the same type, eliminate the 1582 // trunc and extend and just do shifts. For example, turn: 1583 // %a = trunc i32 %i to i8 1584 // %b = shl i8 %a, C 1585 // %c = ashr i8 %b, C 1586 // %d = sext i8 %c to i32 1587 // into: 1588 // %a = shl i32 %i, 32-(8-C) 1589 // %d = ashr i32 %a, 32-(8-C) 1590 Value *A = nullptr; 1591 // TODO: Eventually this could be subsumed by EvaluateInDifferentType. 1592 Constant *BA = nullptr, *CA = nullptr; 1593 if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_Constant(BA)), 1594 m_Constant(CA))) && 1595 BA->isElementWiseEqual(CA) && A->getType() == DestTy) { 1596 Constant *WideCurrShAmt = ConstantExpr::getSExt(CA, DestTy); 1597 Constant *NumLowbitsLeft = ConstantExpr::getSub( 1598 ConstantInt::get(DestTy, SrcTy->getScalarSizeInBits()), WideCurrShAmt); 1599 Constant *NewShAmt = ConstantExpr::getSub( 1600 ConstantInt::get(DestTy, DestTy->getScalarSizeInBits()), 1601 NumLowbitsLeft); 1602 NewShAmt = 1603 Constant::mergeUndefsWith(Constant::mergeUndefsWith(NewShAmt, BA), CA); 1604 A = Builder.CreateShl(A, NewShAmt, CI.getName()); 1605 return BinaryOperator::CreateAShr(A, NewShAmt); 1606 } 1607 1608 return nullptr; 1609 } 1610 1611 /// Return a Constant* for the specified floating-point constant if it fits 1612 /// in the specified FP type without changing its value. 1613 static bool fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) { 1614 bool losesInfo; 1615 APFloat F = CFP->getValueAPF(); 1616 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo); 1617 return !losesInfo; 1618 } 1619 1620 static Type *shrinkFPConstant(ConstantFP *CFP) { 1621 if (CFP->getType() == Type::getPPC_FP128Ty(CFP->getContext())) 1622 return nullptr; // No constant folding of this. 1623 // See if the value can be truncated to half and then reextended. 1624 if (fitsInFPType(CFP, APFloat::IEEEhalf())) 1625 return Type::getHalfTy(CFP->getContext()); 1626 // See if the value can be truncated to float and then reextended. 1627 if (fitsInFPType(CFP, APFloat::IEEEsingle())) 1628 return Type::getFloatTy(CFP->getContext()); 1629 if (CFP->getType()->isDoubleTy()) 1630 return nullptr; // Won't shrink. 1631 if (fitsInFPType(CFP, APFloat::IEEEdouble())) 1632 return Type::getDoubleTy(CFP->getContext()); 1633 // Don't try to shrink to various long double types. 1634 return nullptr; 1635 } 1636 1637 // Determine if this is a vector of ConstantFPs and if so, return the minimal 1638 // type we can safely truncate all elements to. 1639 // TODO: Make these support undef elements. 1640 static Type *shrinkFPConstantVector(Value *V) { 1641 auto *CV = dyn_cast<Constant>(V); 1642 auto *CVVTy = dyn_cast<FixedVectorType>(V->getType()); 1643 if (!CV || !CVVTy) 1644 return nullptr; 1645 1646 Type *MinType = nullptr; 1647 1648 unsigned NumElts = CVVTy->getNumElements(); 1649 1650 // For fixed-width vectors we find the minimal type by looking 1651 // through the constant values of the vector. 1652 for (unsigned i = 0; i != NumElts; ++i) { 1653 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); 1654 if (!CFP) 1655 return nullptr; 1656 1657 Type *T = shrinkFPConstant(CFP); 1658 if (!T) 1659 return nullptr; 1660 1661 // If we haven't found a type yet or this type has a larger mantissa than 1662 // our previous type, this is our new minimal type. 1663 if (!MinType || T->getFPMantissaWidth() > MinType->getFPMantissaWidth()) 1664 MinType = T; 1665 } 1666 1667 // Make a vector type from the minimal type. 1668 return FixedVectorType::get(MinType, NumElts); 1669 } 1670 1671 /// Find the minimum FP type we can safely truncate to. 1672 static Type *getMinimumFPType(Value *V) { 1673 if (auto *FPExt = dyn_cast<FPExtInst>(V)) 1674 return FPExt->getOperand(0)->getType(); 1675 1676 // If this value is a constant, return the constant in the smallest FP type 1677 // that can accurately represent it. This allows us to turn 1678 // (float)((double)X+2.0) into x+2.0f. 1679 if (auto *CFP = dyn_cast<ConstantFP>(V)) 1680 if (Type *T = shrinkFPConstant(CFP)) 1681 return T; 1682 1683 // We can only correctly find a minimum type for a scalable vector when it is 1684 // a splat. For splats of constant values the fpext is wrapped up as a 1685 // ConstantExpr. 1686 if (auto *FPCExt = dyn_cast<ConstantExpr>(V)) 1687 if (FPCExt->getOpcode() == Instruction::FPExt) 1688 return FPCExt->getOperand(0)->getType(); 1689 1690 // Try to shrink a vector of FP constants. This returns nullptr on scalable 1691 // vectors 1692 if (Type *T = shrinkFPConstantVector(V)) 1693 return T; 1694 1695 return V->getType(); 1696 } 1697 1698 /// Return true if the cast from integer to FP can be proven to be exact for all 1699 /// possible inputs (the conversion does not lose any precision). 1700 static bool isKnownExactCastIntToFP(CastInst &I) { 1701 CastInst::CastOps Opcode = I.getOpcode(); 1702 assert((Opcode == CastInst::SIToFP || Opcode == CastInst::UIToFP) && 1703 "Unexpected cast"); 1704 Value *Src = I.getOperand(0); 1705 Type *SrcTy = Src->getType(); 1706 Type *FPTy = I.getType(); 1707 bool IsSigned = Opcode == Instruction::SIToFP; 1708 int SrcSize = (int)SrcTy->getScalarSizeInBits() - IsSigned; 1709 1710 // Easy case - if the source integer type has less bits than the FP mantissa, 1711 // then the cast must be exact. 1712 int DestNumSigBits = FPTy->getFPMantissaWidth(); 1713 if (SrcSize <= DestNumSigBits) 1714 return true; 1715 1716 // Cast from FP to integer and back to FP is independent of the intermediate 1717 // integer width because of poison on overflow. 1718 Value *F; 1719 if (match(Src, m_FPToSI(m_Value(F))) || match(Src, m_FPToUI(m_Value(F)))) { 1720 // If this is uitofp (fptosi F), the source needs an extra bit to avoid 1721 // potential rounding of negative FP input values. 1722 int SrcNumSigBits = F->getType()->getFPMantissaWidth(); 1723 if (!IsSigned && match(Src, m_FPToSI(m_Value()))) 1724 SrcNumSigBits++; 1725 1726 // [su]itofp (fpto[su]i F) --> exact if the source type has less or equal 1727 // significant bits than the destination (and make sure neither type is 1728 // weird -- ppc_fp128). 1729 if (SrcNumSigBits > 0 && DestNumSigBits > 0 && 1730 SrcNumSigBits <= DestNumSigBits) 1731 return true; 1732 } 1733 1734 // TODO: 1735 // Try harder to find if the source integer type has less significant bits. 1736 // For example, compute number of sign bits or compute low bit mask. 1737 return false; 1738 } 1739 1740 Instruction *InstCombinerImpl::visitFPTrunc(FPTruncInst &FPT) { 1741 if (Instruction *I = commonCastTransforms(FPT)) 1742 return I; 1743 1744 // If we have fptrunc(OpI (fpextend x), (fpextend y)), we would like to 1745 // simplify this expression to avoid one or more of the trunc/extend 1746 // operations if we can do so without changing the numerical results. 1747 // 1748 // The exact manner in which the widths of the operands interact to limit 1749 // what we can and cannot do safely varies from operation to operation, and 1750 // is explained below in the various case statements. 1751 Type *Ty = FPT.getType(); 1752 auto *BO = dyn_cast<BinaryOperator>(FPT.getOperand(0)); 1753 if (BO && BO->hasOneUse()) { 1754 Type *LHSMinType = getMinimumFPType(BO->getOperand(0)); 1755 Type *RHSMinType = getMinimumFPType(BO->getOperand(1)); 1756 unsigned OpWidth = BO->getType()->getFPMantissaWidth(); 1757 unsigned LHSWidth = LHSMinType->getFPMantissaWidth(); 1758 unsigned RHSWidth = RHSMinType->getFPMantissaWidth(); 1759 unsigned SrcWidth = std::max(LHSWidth, RHSWidth); 1760 unsigned DstWidth = Ty->getFPMantissaWidth(); 1761 switch (BO->getOpcode()) { 1762 default: break; 1763 case Instruction::FAdd: 1764 case Instruction::FSub: 1765 // For addition and subtraction, the infinitely precise result can 1766 // essentially be arbitrarily wide; proving that double rounding 1767 // will not occur because the result of OpI is exact (as we will for 1768 // FMul, for example) is hopeless. However, we *can* nonetheless 1769 // frequently know that double rounding cannot occur (or that it is 1770 // innocuous) by taking advantage of the specific structure of 1771 // infinitely-precise results that admit double rounding. 1772 // 1773 // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient 1774 // to represent both sources, we can guarantee that the double 1775 // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis, 1776 // "A Rigorous Framework for Fully Supporting the IEEE Standard ..." 1777 // for proof of this fact). 1778 // 1779 // Note: Figueroa does not consider the case where DstFormat != 1780 // SrcFormat. It's possible (likely even!) that this analysis 1781 // could be tightened for those cases, but they are rare (the main 1782 // case of interest here is (float)((double)float + float)). 1783 if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) { 1784 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1785 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1786 Instruction *RI = BinaryOperator::Create(BO->getOpcode(), LHS, RHS); 1787 RI->copyFastMathFlags(BO); 1788 return RI; 1789 } 1790 break; 1791 case Instruction::FMul: 1792 // For multiplication, the infinitely precise result has at most 1793 // LHSWidth + RHSWidth significant bits; if OpWidth is sufficient 1794 // that such a value can be exactly represented, then no double 1795 // rounding can possibly occur; we can safely perform the operation 1796 // in the destination format if it can represent both sources. 1797 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) { 1798 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1799 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1800 return BinaryOperator::CreateFMulFMF(LHS, RHS, BO); 1801 } 1802 break; 1803 case Instruction::FDiv: 1804 // For division, we use again use the bound from Figueroa's 1805 // dissertation. I am entirely certain that this bound can be 1806 // tightened in the unbalanced operand case by an analysis based on 1807 // the diophantine rational approximation bound, but the well-known 1808 // condition used here is a good conservative first pass. 1809 // TODO: Tighten bound via rigorous analysis of the unbalanced case. 1810 if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) { 1811 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1812 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1813 return BinaryOperator::CreateFDivFMF(LHS, RHS, BO); 1814 } 1815 break; 1816 case Instruction::FRem: { 1817 // Remainder is straightforward. Remainder is always exact, so the 1818 // type of OpI doesn't enter into things at all. We simply evaluate 1819 // in whichever source type is larger, then convert to the 1820 // destination type. 1821 if (SrcWidth == OpWidth) 1822 break; 1823 Value *LHS, *RHS; 1824 if (LHSWidth == SrcWidth) { 1825 LHS = Builder.CreateFPTrunc(BO->getOperand(0), LHSMinType); 1826 RHS = Builder.CreateFPTrunc(BO->getOperand(1), LHSMinType); 1827 } else { 1828 LHS = Builder.CreateFPTrunc(BO->getOperand(0), RHSMinType); 1829 RHS = Builder.CreateFPTrunc(BO->getOperand(1), RHSMinType); 1830 } 1831 1832 Value *ExactResult = Builder.CreateFRemFMF(LHS, RHS, BO); 1833 return CastInst::CreateFPCast(ExactResult, Ty); 1834 } 1835 } 1836 } 1837 1838 // (fptrunc (fneg x)) -> (fneg (fptrunc x)) 1839 Value *X; 1840 Instruction *Op = dyn_cast<Instruction>(FPT.getOperand(0)); 1841 if (Op && Op->hasOneUse()) { 1842 // FIXME: The FMF should propagate from the fptrunc, not the source op. 1843 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 1844 if (isa<FPMathOperator>(Op)) 1845 Builder.setFastMathFlags(Op->getFastMathFlags()); 1846 1847 if (match(Op, m_FNeg(m_Value(X)))) { 1848 Value *InnerTrunc = Builder.CreateFPTrunc(X, Ty); 1849 1850 return UnaryOperator::CreateFNegFMF(InnerTrunc, Op); 1851 } 1852 1853 // If we are truncating a select that has an extended operand, we can 1854 // narrow the other operand and do the select as a narrow op. 1855 Value *Cond, *X, *Y; 1856 if (match(Op, m_Select(m_Value(Cond), m_FPExt(m_Value(X)), m_Value(Y))) && 1857 X->getType() == Ty) { 1858 // fptrunc (select Cond, (fpext X), Y --> select Cond, X, (fptrunc Y) 1859 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty); 1860 Value *Sel = Builder.CreateSelect(Cond, X, NarrowY, "narrow.sel", Op); 1861 return replaceInstUsesWith(FPT, Sel); 1862 } 1863 if (match(Op, m_Select(m_Value(Cond), m_Value(Y), m_FPExt(m_Value(X)))) && 1864 X->getType() == Ty) { 1865 // fptrunc (select Cond, Y, (fpext X) --> select Cond, (fptrunc Y), X 1866 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty); 1867 Value *Sel = Builder.CreateSelect(Cond, NarrowY, X, "narrow.sel", Op); 1868 return replaceInstUsesWith(FPT, Sel); 1869 } 1870 } 1871 1872 if (auto *II = dyn_cast<IntrinsicInst>(FPT.getOperand(0))) { 1873 switch (II->getIntrinsicID()) { 1874 default: break; 1875 case Intrinsic::ceil: 1876 case Intrinsic::fabs: 1877 case Intrinsic::floor: 1878 case Intrinsic::nearbyint: 1879 case Intrinsic::rint: 1880 case Intrinsic::round: 1881 case Intrinsic::roundeven: 1882 case Intrinsic::trunc: { 1883 Value *Src = II->getArgOperand(0); 1884 if (!Src->hasOneUse()) 1885 break; 1886 1887 // Except for fabs, this transformation requires the input of the unary FP 1888 // operation to be itself an fpext from the type to which we're 1889 // truncating. 1890 if (II->getIntrinsicID() != Intrinsic::fabs) { 1891 FPExtInst *FPExtSrc = dyn_cast<FPExtInst>(Src); 1892 if (!FPExtSrc || FPExtSrc->getSrcTy() != Ty) 1893 break; 1894 } 1895 1896 // Do unary FP operation on smaller type. 1897 // (fptrunc (fabs x)) -> (fabs (fptrunc x)) 1898 Value *InnerTrunc = Builder.CreateFPTrunc(Src, Ty); 1899 Function *Overload = Intrinsic::getDeclaration(FPT.getModule(), 1900 II->getIntrinsicID(), Ty); 1901 SmallVector<OperandBundleDef, 1> OpBundles; 1902 II->getOperandBundlesAsDefs(OpBundles); 1903 CallInst *NewCI = 1904 CallInst::Create(Overload, {InnerTrunc}, OpBundles, II->getName()); 1905 NewCI->copyFastMathFlags(II); 1906 return NewCI; 1907 } 1908 } 1909 } 1910 1911 if (Instruction *I = shrinkInsertElt(FPT, Builder)) 1912 return I; 1913 1914 Value *Src = FPT.getOperand(0); 1915 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) { 1916 auto *FPCast = cast<CastInst>(Src); 1917 if (isKnownExactCastIntToFP(*FPCast)) 1918 return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty); 1919 } 1920 1921 return nullptr; 1922 } 1923 1924 Instruction *InstCombinerImpl::visitFPExt(CastInst &FPExt) { 1925 // If the source operand is a cast from integer to FP and known exact, then 1926 // cast the integer operand directly to the destination type. 1927 Type *Ty = FPExt.getType(); 1928 Value *Src = FPExt.getOperand(0); 1929 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) { 1930 auto *FPCast = cast<CastInst>(Src); 1931 if (isKnownExactCastIntToFP(*FPCast)) 1932 return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty); 1933 } 1934 1935 return commonCastTransforms(FPExt); 1936 } 1937 1938 /// fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X) 1939 /// This is safe if the intermediate type has enough bits in its mantissa to 1940 /// accurately represent all values of X. For example, this won't work with 1941 /// i64 -> float -> i64. 1942 Instruction *InstCombinerImpl::foldItoFPtoI(CastInst &FI) { 1943 if (!isa<UIToFPInst>(FI.getOperand(0)) && !isa<SIToFPInst>(FI.getOperand(0))) 1944 return nullptr; 1945 1946 auto *OpI = cast<CastInst>(FI.getOperand(0)); 1947 Value *X = OpI->getOperand(0); 1948 Type *XType = X->getType(); 1949 Type *DestType = FI.getType(); 1950 bool IsOutputSigned = isa<FPToSIInst>(FI); 1951 1952 // Since we can assume the conversion won't overflow, our decision as to 1953 // whether the input will fit in the float should depend on the minimum 1954 // of the input range and output range. 1955 1956 // This means this is also safe for a signed input and unsigned output, since 1957 // a negative input would lead to undefined behavior. 1958 if (!isKnownExactCastIntToFP(*OpI)) { 1959 // The first cast may not round exactly based on the source integer width 1960 // and FP width, but the overflow UB rules can still allow this to fold. 1961 // If the destination type is narrow, that means the intermediate FP value 1962 // must be large enough to hold the source value exactly. 1963 // For example, (uint8_t)((float)(uint32_t 16777217) is undefined behavior. 1964 int OutputSize = (int)DestType->getScalarSizeInBits() - IsOutputSigned; 1965 if (OutputSize > OpI->getType()->getFPMantissaWidth()) 1966 return nullptr; 1967 } 1968 1969 if (DestType->getScalarSizeInBits() > XType->getScalarSizeInBits()) { 1970 bool IsInputSigned = isa<SIToFPInst>(OpI); 1971 if (IsInputSigned && IsOutputSigned) 1972 return new SExtInst(X, DestType); 1973 return new ZExtInst(X, DestType); 1974 } 1975 if (DestType->getScalarSizeInBits() < XType->getScalarSizeInBits()) 1976 return new TruncInst(X, DestType); 1977 1978 assert(XType == DestType && "Unexpected types for int to FP to int casts"); 1979 return replaceInstUsesWith(FI, X); 1980 } 1981 1982 Instruction *InstCombinerImpl::visitFPToUI(FPToUIInst &FI) { 1983 if (Instruction *I = foldItoFPtoI(FI)) 1984 return I; 1985 1986 return commonCastTransforms(FI); 1987 } 1988 1989 Instruction *InstCombinerImpl::visitFPToSI(FPToSIInst &FI) { 1990 if (Instruction *I = foldItoFPtoI(FI)) 1991 return I; 1992 1993 return commonCastTransforms(FI); 1994 } 1995 1996 Instruction *InstCombinerImpl::visitUIToFP(CastInst &CI) { 1997 return commonCastTransforms(CI); 1998 } 1999 2000 Instruction *InstCombinerImpl::visitSIToFP(CastInst &CI) { 2001 return commonCastTransforms(CI); 2002 } 2003 2004 Instruction *InstCombinerImpl::visitIntToPtr(IntToPtrInst &CI) { 2005 // If the source integer type is not the intptr_t type for this target, do a 2006 // trunc or zext to the intptr_t type, then inttoptr of it. This allows the 2007 // cast to be exposed to other transforms. 2008 unsigned AS = CI.getAddressSpace(); 2009 if (CI.getOperand(0)->getType()->getScalarSizeInBits() != 2010 DL.getPointerSizeInBits(AS)) { 2011 Type *Ty = CI.getOperand(0)->getType()->getWithNewType( 2012 DL.getIntPtrType(CI.getContext(), AS)); 2013 Value *P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty); 2014 return new IntToPtrInst(P, CI.getType()); 2015 } 2016 2017 if (Instruction *I = commonCastTransforms(CI)) 2018 return I; 2019 2020 return nullptr; 2021 } 2022 2023 /// Implement the transforms for cast of pointer (bitcast/ptrtoint) 2024 Instruction *InstCombinerImpl::commonPointerCastTransforms(CastInst &CI) { 2025 Value *Src = CI.getOperand(0); 2026 2027 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) { 2028 // If casting the result of a getelementptr instruction with no offset, turn 2029 // this into a cast of the original pointer! 2030 if (GEP->hasAllZeroIndices() && 2031 // If CI is an addrspacecast and GEP changes the poiner type, merging 2032 // GEP into CI would undo canonicalizing addrspacecast with different 2033 // pointer types, causing infinite loops. 2034 (!isa<AddrSpaceCastInst>(CI) || 2035 GEP->getType() == GEP->getPointerOperandType())) { 2036 // Changing the cast operand is usually not a good idea but it is safe 2037 // here because the pointer operand is being replaced with another 2038 // pointer operand so the opcode doesn't need to change. 2039 return replaceOperand(CI, 0, GEP->getOperand(0)); 2040 } 2041 } 2042 2043 return commonCastTransforms(CI); 2044 } 2045 2046 Instruction *InstCombinerImpl::visitPtrToInt(PtrToIntInst &CI) { 2047 // If the destination integer type is not the intptr_t type for this target, 2048 // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast 2049 // to be exposed to other transforms. 2050 Value *SrcOp = CI.getPointerOperand(); 2051 Type *SrcTy = SrcOp->getType(); 2052 Type *Ty = CI.getType(); 2053 unsigned AS = CI.getPointerAddressSpace(); 2054 unsigned TySize = Ty->getScalarSizeInBits(); 2055 unsigned PtrSize = DL.getPointerSizeInBits(AS); 2056 if (TySize != PtrSize) { 2057 Type *IntPtrTy = 2058 SrcTy->getWithNewType(DL.getIntPtrType(CI.getContext(), AS)); 2059 Value *P = Builder.CreatePtrToInt(SrcOp, IntPtrTy); 2060 return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false); 2061 } 2062 2063 Value *Vec, *Scalar, *Index; 2064 if (match(SrcOp, m_OneUse(m_InsertElt(m_IntToPtr(m_Value(Vec)), 2065 m_Value(Scalar), m_Value(Index)))) && 2066 Vec->getType() == Ty) { 2067 assert(Vec->getType()->getScalarSizeInBits() == PtrSize && "Wrong type"); 2068 // Convert the scalar to int followed by insert to eliminate one cast: 2069 // p2i (ins (i2p Vec), Scalar, Index --> ins Vec, (p2i Scalar), Index 2070 Value *NewCast = Builder.CreatePtrToInt(Scalar, Ty->getScalarType()); 2071 return InsertElementInst::Create(Vec, NewCast, Index); 2072 } 2073 2074 return commonPointerCastTransforms(CI); 2075 } 2076 2077 /// This input value (which is known to have vector type) is being zero extended 2078 /// or truncated to the specified vector type. Since the zext/trunc is done 2079 /// using an integer type, we have a (bitcast(cast(bitcast))) pattern, 2080 /// endianness will impact which end of the vector that is extended or 2081 /// truncated. 2082 /// 2083 /// A vector is always stored with index 0 at the lowest address, which 2084 /// corresponds to the most significant bits for a big endian stored integer and 2085 /// the least significant bits for little endian. A trunc/zext of an integer 2086 /// impacts the big end of the integer. Thus, we need to add/remove elements at 2087 /// the front of the vector for big endian targets, and the back of the vector 2088 /// for little endian targets. 2089 /// 2090 /// Try to replace it with a shuffle (and vector/vector bitcast) if possible. 2091 /// 2092 /// The source and destination vector types may have different element types. 2093 static Instruction * 2094 optimizeVectorResizeWithIntegerBitCasts(Value *InVal, VectorType *DestTy, 2095 InstCombinerImpl &IC) { 2096 // We can only do this optimization if the output is a multiple of the input 2097 // element size, or the input is a multiple of the output element size. 2098 // Convert the input type to have the same element type as the output. 2099 VectorType *SrcTy = cast<VectorType>(InVal->getType()); 2100 2101 if (SrcTy->getElementType() != DestTy->getElementType()) { 2102 // The input types don't need to be identical, but for now they must be the 2103 // same size. There is no specific reason we couldn't handle things like 2104 // <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten 2105 // there yet. 2106 if (SrcTy->getElementType()->getPrimitiveSizeInBits() != 2107 DestTy->getElementType()->getPrimitiveSizeInBits()) 2108 return nullptr; 2109 2110 SrcTy = 2111 FixedVectorType::get(DestTy->getElementType(), 2112 cast<FixedVectorType>(SrcTy)->getNumElements()); 2113 InVal = IC.Builder.CreateBitCast(InVal, SrcTy); 2114 } 2115 2116 bool IsBigEndian = IC.getDataLayout().isBigEndian(); 2117 unsigned SrcElts = cast<FixedVectorType>(SrcTy)->getNumElements(); 2118 unsigned DestElts = cast<FixedVectorType>(DestTy)->getNumElements(); 2119 2120 assert(SrcElts != DestElts && "Element counts should be different."); 2121 2122 // Now that the element types match, get the shuffle mask and RHS of the 2123 // shuffle to use, which depends on whether we're increasing or decreasing the 2124 // size of the input. 2125 SmallVector<int, 16> ShuffleMaskStorage; 2126 ArrayRef<int> ShuffleMask; 2127 Value *V2; 2128 2129 // Produce an identify shuffle mask for the src vector. 2130 ShuffleMaskStorage.resize(SrcElts); 2131 std::iota(ShuffleMaskStorage.begin(), ShuffleMaskStorage.end(), 0); 2132 2133 if (SrcElts > DestElts) { 2134 // If we're shrinking the number of elements (rewriting an integer 2135 // truncate), just shuffle in the elements corresponding to the least 2136 // significant bits from the input and use undef as the second shuffle 2137 // input. 2138 V2 = UndefValue::get(SrcTy); 2139 // Make sure the shuffle mask selects the "least significant bits" by 2140 // keeping elements from back of the src vector for big endian, and from the 2141 // front for little endian. 2142 ShuffleMask = ShuffleMaskStorage; 2143 if (IsBigEndian) 2144 ShuffleMask = ShuffleMask.take_back(DestElts); 2145 else 2146 ShuffleMask = ShuffleMask.take_front(DestElts); 2147 } else { 2148 // If we're increasing the number of elements (rewriting an integer zext), 2149 // shuffle in all of the elements from InVal. Fill the rest of the result 2150 // elements with zeros from a constant zero. 2151 V2 = Constant::getNullValue(SrcTy); 2152 // Use first elt from V2 when indicating zero in the shuffle mask. 2153 uint32_t NullElt = SrcElts; 2154 // Extend with null values in the "most significant bits" by adding elements 2155 // in front of the src vector for big endian, and at the back for little 2156 // endian. 2157 unsigned DeltaElts = DestElts - SrcElts; 2158 if (IsBigEndian) 2159 ShuffleMaskStorage.insert(ShuffleMaskStorage.begin(), DeltaElts, NullElt); 2160 else 2161 ShuffleMaskStorage.append(DeltaElts, NullElt); 2162 ShuffleMask = ShuffleMaskStorage; 2163 } 2164 2165 return new ShuffleVectorInst(InVal, V2, ShuffleMask); 2166 } 2167 2168 static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) { 2169 return Value % Ty->getPrimitiveSizeInBits() == 0; 2170 } 2171 2172 static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) { 2173 return Value / Ty->getPrimitiveSizeInBits(); 2174 } 2175 2176 /// V is a value which is inserted into a vector of VecEltTy. 2177 /// Look through the value to see if we can decompose it into 2178 /// insertions into the vector. See the example in the comment for 2179 /// OptimizeIntegerToVectorInsertions for the pattern this handles. 2180 /// The type of V is always a non-zero multiple of VecEltTy's size. 2181 /// Shift is the number of bits between the lsb of V and the lsb of 2182 /// the vector. 2183 /// 2184 /// This returns false if the pattern can't be matched or true if it can, 2185 /// filling in Elements with the elements found here. 2186 static bool collectInsertionElements(Value *V, unsigned Shift, 2187 SmallVectorImpl<Value *> &Elements, 2188 Type *VecEltTy, bool isBigEndian) { 2189 assert(isMultipleOfTypeSize(Shift, VecEltTy) && 2190 "Shift should be a multiple of the element type size"); 2191 2192 // Undef values never contribute useful bits to the result. 2193 if (isa<UndefValue>(V)) return true; 2194 2195 // If we got down to a value of the right type, we win, try inserting into the 2196 // right element. 2197 if (V->getType() == VecEltTy) { 2198 // Inserting null doesn't actually insert any elements. 2199 if (Constant *C = dyn_cast<Constant>(V)) 2200 if (C->isNullValue()) 2201 return true; 2202 2203 unsigned ElementIndex = getTypeSizeIndex(Shift, VecEltTy); 2204 if (isBigEndian) 2205 ElementIndex = Elements.size() - ElementIndex - 1; 2206 2207 // Fail if multiple elements are inserted into this slot. 2208 if (Elements[ElementIndex]) 2209 return false; 2210 2211 Elements[ElementIndex] = V; 2212 return true; 2213 } 2214 2215 if (Constant *C = dyn_cast<Constant>(V)) { 2216 // Figure out the # elements this provides, and bitcast it or slice it up 2217 // as required. 2218 unsigned NumElts = getTypeSizeIndex(C->getType()->getPrimitiveSizeInBits(), 2219 VecEltTy); 2220 // If the constant is the size of a vector element, we just need to bitcast 2221 // it to the right type so it gets properly inserted. 2222 if (NumElts == 1) 2223 return collectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy), 2224 Shift, Elements, VecEltTy, isBigEndian); 2225 2226 // Okay, this is a constant that covers multiple elements. Slice it up into 2227 // pieces and insert each element-sized piece into the vector. 2228 if (!isa<IntegerType>(C->getType())) 2229 C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(), 2230 C->getType()->getPrimitiveSizeInBits())); 2231 unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits(); 2232 Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize); 2233 2234 for (unsigned i = 0; i != NumElts; ++i) { 2235 unsigned ShiftI = Shift+i*ElementSize; 2236 Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(), 2237 ShiftI)); 2238 Piece = ConstantExpr::getTrunc(Piece, ElementIntTy); 2239 if (!collectInsertionElements(Piece, ShiftI, Elements, VecEltTy, 2240 isBigEndian)) 2241 return false; 2242 } 2243 return true; 2244 } 2245 2246 if (!V->hasOneUse()) return false; 2247 2248 Instruction *I = dyn_cast<Instruction>(V); 2249 if (!I) return false; 2250 switch (I->getOpcode()) { 2251 default: return false; // Unhandled case. 2252 case Instruction::BitCast: 2253 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2254 isBigEndian); 2255 case Instruction::ZExt: 2256 if (!isMultipleOfTypeSize( 2257 I->getOperand(0)->getType()->getPrimitiveSizeInBits(), 2258 VecEltTy)) 2259 return false; 2260 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2261 isBigEndian); 2262 case Instruction::Or: 2263 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2264 isBigEndian) && 2265 collectInsertionElements(I->getOperand(1), Shift, Elements, VecEltTy, 2266 isBigEndian); 2267 case Instruction::Shl: { 2268 // Must be shifting by a constant that is a multiple of the element size. 2269 ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1)); 2270 if (!CI) return false; 2271 Shift += CI->getZExtValue(); 2272 if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false; 2273 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2274 isBigEndian); 2275 } 2276 2277 } 2278 } 2279 2280 2281 /// If the input is an 'or' instruction, we may be doing shifts and ors to 2282 /// assemble the elements of the vector manually. 2283 /// Try to rip the code out and replace it with insertelements. This is to 2284 /// optimize code like this: 2285 /// 2286 /// %tmp37 = bitcast float %inc to i32 2287 /// %tmp38 = zext i32 %tmp37 to i64 2288 /// %tmp31 = bitcast float %inc5 to i32 2289 /// %tmp32 = zext i32 %tmp31 to i64 2290 /// %tmp33 = shl i64 %tmp32, 32 2291 /// %ins35 = or i64 %tmp33, %tmp38 2292 /// %tmp43 = bitcast i64 %ins35 to <2 x float> 2293 /// 2294 /// Into two insertelements that do "buildvector{%inc, %inc5}". 2295 static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI, 2296 InstCombinerImpl &IC) { 2297 auto *DestVecTy = cast<FixedVectorType>(CI.getType()); 2298 Value *IntInput = CI.getOperand(0); 2299 2300 SmallVector<Value*, 8> Elements(DestVecTy->getNumElements()); 2301 if (!collectInsertionElements(IntInput, 0, Elements, 2302 DestVecTy->getElementType(), 2303 IC.getDataLayout().isBigEndian())) 2304 return nullptr; 2305 2306 // If we succeeded, we know that all of the element are specified by Elements 2307 // or are zero if Elements has a null entry. Recast this as a set of 2308 // insertions. 2309 Value *Result = Constant::getNullValue(CI.getType()); 2310 for (unsigned i = 0, e = Elements.size(); i != e; ++i) { 2311 if (!Elements[i]) continue; // Unset element. 2312 2313 Result = IC.Builder.CreateInsertElement(Result, Elements[i], 2314 IC.Builder.getInt32(i)); 2315 } 2316 2317 return Result; 2318 } 2319 2320 /// Canonicalize scalar bitcasts of extracted elements into a bitcast of the 2321 /// vector followed by extract element. The backend tends to handle bitcasts of 2322 /// vectors better than bitcasts of scalars because vector registers are 2323 /// usually not type-specific like scalar integer or scalar floating-point. 2324 static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast, 2325 InstCombinerImpl &IC) { 2326 // TODO: Create and use a pattern matcher for ExtractElementInst. 2327 auto *ExtElt = dyn_cast<ExtractElementInst>(BitCast.getOperand(0)); 2328 if (!ExtElt || !ExtElt->hasOneUse()) 2329 return nullptr; 2330 2331 // The bitcast must be to a vectorizable type, otherwise we can't make a new 2332 // type to extract from. 2333 Type *DestType = BitCast.getType(); 2334 if (!VectorType::isValidElementType(DestType)) 2335 return nullptr; 2336 2337 auto *NewVecType = VectorType::get(DestType, ExtElt->getVectorOperandType()); 2338 auto *NewBC = IC.Builder.CreateBitCast(ExtElt->getVectorOperand(), 2339 NewVecType, "bc"); 2340 return ExtractElementInst::Create(NewBC, ExtElt->getIndexOperand()); 2341 } 2342 2343 /// Change the type of a bitwise logic operation if we can eliminate a bitcast. 2344 static Instruction *foldBitCastBitwiseLogic(BitCastInst &BitCast, 2345 InstCombiner::BuilderTy &Builder) { 2346 Type *DestTy = BitCast.getType(); 2347 BinaryOperator *BO; 2348 if (!DestTy->isIntOrIntVectorTy() || 2349 !match(BitCast.getOperand(0), m_OneUse(m_BinOp(BO))) || 2350 !BO->isBitwiseLogicOp()) 2351 return nullptr; 2352 2353 // FIXME: This transform is restricted to vector types to avoid backend 2354 // problems caused by creating potentially illegal operations. If a fix-up is 2355 // added to handle that situation, we can remove this check. 2356 if (!DestTy->isVectorTy() || !BO->getType()->isVectorTy()) 2357 return nullptr; 2358 2359 Value *X; 2360 if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) && 2361 X->getType() == DestTy && !isa<Constant>(X)) { 2362 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y)) 2363 Value *CastedOp1 = Builder.CreateBitCast(BO->getOperand(1), DestTy); 2364 return BinaryOperator::Create(BO->getOpcode(), X, CastedOp1); 2365 } 2366 2367 if (match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(X)))) && 2368 X->getType() == DestTy && !isa<Constant>(X)) { 2369 // bitcast(logic(Y, bitcast(X))) --> logic'(bitcast(Y), X) 2370 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); 2371 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, X); 2372 } 2373 2374 // Canonicalize vector bitcasts to come before vector bitwise logic with a 2375 // constant. This eases recognition of special constants for later ops. 2376 // Example: 2377 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b 2378 Constant *C; 2379 if (match(BO->getOperand(1), m_Constant(C))) { 2380 // bitcast (logic X, C) --> logic (bitcast X, C') 2381 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); 2382 Value *CastedC = Builder.CreateBitCast(C, DestTy); 2383 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, CastedC); 2384 } 2385 2386 return nullptr; 2387 } 2388 2389 /// Change the type of a select if we can eliminate a bitcast. 2390 static Instruction *foldBitCastSelect(BitCastInst &BitCast, 2391 InstCombiner::BuilderTy &Builder) { 2392 Value *Cond, *TVal, *FVal; 2393 if (!match(BitCast.getOperand(0), 2394 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) 2395 return nullptr; 2396 2397 // A vector select must maintain the same number of elements in its operands. 2398 Type *CondTy = Cond->getType(); 2399 Type *DestTy = BitCast.getType(); 2400 if (auto *CondVTy = dyn_cast<VectorType>(CondTy)) 2401 if (!DestTy->isVectorTy() || 2402 CondVTy->getElementCount() != 2403 cast<VectorType>(DestTy)->getElementCount()) 2404 return nullptr; 2405 2406 // FIXME: This transform is restricted from changing the select between 2407 // scalars and vectors to avoid backend problems caused by creating 2408 // potentially illegal operations. If a fix-up is added to handle that 2409 // situation, we can remove this check. 2410 if (DestTy->isVectorTy() != TVal->getType()->isVectorTy()) 2411 return nullptr; 2412 2413 auto *Sel = cast<Instruction>(BitCast.getOperand(0)); 2414 Value *X; 2415 if (match(TVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 2416 !isa<Constant>(X)) { 2417 // bitcast(select(Cond, bitcast(X), Y)) --> select'(Cond, X, bitcast(Y)) 2418 Value *CastedVal = Builder.CreateBitCast(FVal, DestTy); 2419 return SelectInst::Create(Cond, X, CastedVal, "", nullptr, Sel); 2420 } 2421 2422 if (match(FVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 2423 !isa<Constant>(X)) { 2424 // bitcast(select(Cond, Y, bitcast(X))) --> select'(Cond, bitcast(Y), X) 2425 Value *CastedVal = Builder.CreateBitCast(TVal, DestTy); 2426 return SelectInst::Create(Cond, CastedVal, X, "", nullptr, Sel); 2427 } 2428 2429 return nullptr; 2430 } 2431 2432 /// Check if all users of CI are StoreInsts. 2433 static bool hasStoreUsersOnly(CastInst &CI) { 2434 for (User *U : CI.users()) { 2435 if (!isa<StoreInst>(U)) 2436 return false; 2437 } 2438 return true; 2439 } 2440 2441 /// This function handles following case 2442 /// 2443 /// A -> B cast 2444 /// PHI 2445 /// B -> A cast 2446 /// 2447 /// All the related PHI nodes can be replaced by new PHI nodes with type A. 2448 /// The uses of \p CI can be changed to the new PHI node corresponding to \p PN. 2449 Instruction *InstCombinerImpl::optimizeBitCastFromPhi(CastInst &CI, 2450 PHINode *PN) { 2451 // BitCast used by Store can be handled in InstCombineLoadStoreAlloca.cpp. 2452 if (hasStoreUsersOnly(CI)) 2453 return nullptr; 2454 2455 Value *Src = CI.getOperand(0); 2456 Type *SrcTy = Src->getType(); // Type B 2457 Type *DestTy = CI.getType(); // Type A 2458 2459 SmallVector<PHINode *, 4> PhiWorklist; 2460 SmallSetVector<PHINode *, 4> OldPhiNodes; 2461 2462 // Find all of the A->B casts and PHI nodes. 2463 // We need to inspect all related PHI nodes, but PHIs can be cyclic, so 2464 // OldPhiNodes is used to track all known PHI nodes, before adding a new 2465 // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first. 2466 PhiWorklist.push_back(PN); 2467 OldPhiNodes.insert(PN); 2468 while (!PhiWorklist.empty()) { 2469 auto *OldPN = PhiWorklist.pop_back_val(); 2470 for (Value *IncValue : OldPN->incoming_values()) { 2471 if (isa<Constant>(IncValue)) 2472 continue; 2473 2474 if (auto *LI = dyn_cast<LoadInst>(IncValue)) { 2475 // If there is a sequence of one or more load instructions, each loaded 2476 // value is used as address of later load instruction, bitcast is 2477 // necessary to change the value type, don't optimize it. For 2478 // simplicity we give up if the load address comes from another load. 2479 Value *Addr = LI->getOperand(0); 2480 if (Addr == &CI || isa<LoadInst>(Addr)) 2481 return nullptr; 2482 // Don't tranform "load <256 x i32>, <256 x i32>*" to 2483 // "load x86_amx, x86_amx*", because x86_amx* is invalid. 2484 // TODO: Remove this check when bitcast between vector and x86_amx 2485 // is replaced with a specific intrinsic. 2486 if (DestTy->isX86_AMXTy()) 2487 return nullptr; 2488 if (LI->hasOneUse() && LI->isSimple()) 2489 continue; 2490 // If a LoadInst has more than one use, changing the type of loaded 2491 // value may create another bitcast. 2492 return nullptr; 2493 } 2494 2495 if (auto *PNode = dyn_cast<PHINode>(IncValue)) { 2496 if (OldPhiNodes.insert(PNode)) 2497 PhiWorklist.push_back(PNode); 2498 continue; 2499 } 2500 2501 auto *BCI = dyn_cast<BitCastInst>(IncValue); 2502 // We can't handle other instructions. 2503 if (!BCI) 2504 return nullptr; 2505 2506 // Verify it's a A->B cast. 2507 Type *TyA = BCI->getOperand(0)->getType(); 2508 Type *TyB = BCI->getType(); 2509 if (TyA != DestTy || TyB != SrcTy) 2510 return nullptr; 2511 } 2512 } 2513 2514 // Check that each user of each old PHI node is something that we can 2515 // rewrite, so that all of the old PHI nodes can be cleaned up afterwards. 2516 for (auto *OldPN : OldPhiNodes) { 2517 for (User *V : OldPN->users()) { 2518 if (auto *SI = dyn_cast<StoreInst>(V)) { 2519 if (!SI->isSimple() || SI->getOperand(0) != OldPN) 2520 return nullptr; 2521 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2522 // Verify it's a B->A cast. 2523 Type *TyB = BCI->getOperand(0)->getType(); 2524 Type *TyA = BCI->getType(); 2525 if (TyA != DestTy || TyB != SrcTy) 2526 return nullptr; 2527 } else if (auto *PHI = dyn_cast<PHINode>(V)) { 2528 // As long as the user is another old PHI node, then even if we don't 2529 // rewrite it, the PHI web we're considering won't have any users 2530 // outside itself, so it'll be dead. 2531 if (OldPhiNodes.count(PHI) == 0) 2532 return nullptr; 2533 } else { 2534 return nullptr; 2535 } 2536 } 2537 } 2538 2539 // For each old PHI node, create a corresponding new PHI node with a type A. 2540 SmallDenseMap<PHINode *, PHINode *> NewPNodes; 2541 for (auto *OldPN : OldPhiNodes) { 2542 Builder.SetInsertPoint(OldPN); 2543 PHINode *NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands()); 2544 NewPNodes[OldPN] = NewPN; 2545 } 2546 2547 // Fill in the operands of new PHI nodes. 2548 for (auto *OldPN : OldPhiNodes) { 2549 PHINode *NewPN = NewPNodes[OldPN]; 2550 for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) { 2551 Value *V = OldPN->getOperand(j); 2552 Value *NewV = nullptr; 2553 if (auto *C = dyn_cast<Constant>(V)) { 2554 NewV = ConstantExpr::getBitCast(C, DestTy); 2555 } else if (auto *LI = dyn_cast<LoadInst>(V)) { 2556 // Explicitly perform load combine to make sure no opposing transform 2557 // can remove the bitcast in the meantime and trigger an infinite loop. 2558 Builder.SetInsertPoint(LI); 2559 NewV = combineLoadToNewType(*LI, DestTy); 2560 // Remove the old load and its use in the old phi, which itself becomes 2561 // dead once the whole transform finishes. 2562 replaceInstUsesWith(*LI, PoisonValue::get(LI->getType())); 2563 eraseInstFromFunction(*LI); 2564 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2565 NewV = BCI->getOperand(0); 2566 } else if (auto *PrevPN = dyn_cast<PHINode>(V)) { 2567 NewV = NewPNodes[PrevPN]; 2568 } 2569 assert(NewV); 2570 NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j)); 2571 } 2572 } 2573 2574 // Traverse all accumulated PHI nodes and process its users, 2575 // which are Stores and BitcCasts. Without this processing 2576 // NewPHI nodes could be replicated and could lead to extra 2577 // moves generated after DeSSA. 2578 // If there is a store with type B, change it to type A. 2579 2580 2581 // Replace users of BitCast B->A with NewPHI. These will help 2582 // later to get rid off a closure formed by OldPHI nodes. 2583 Instruction *RetVal = nullptr; 2584 for (auto *OldPN : OldPhiNodes) { 2585 PHINode *NewPN = NewPNodes[OldPN]; 2586 for (User *V : make_early_inc_range(OldPN->users())) { 2587 if (auto *SI = dyn_cast<StoreInst>(V)) { 2588 assert(SI->isSimple() && SI->getOperand(0) == OldPN); 2589 Builder.SetInsertPoint(SI); 2590 auto *NewBC = 2591 cast<BitCastInst>(Builder.CreateBitCast(NewPN, SrcTy)); 2592 SI->setOperand(0, NewBC); 2593 Worklist.push(SI); 2594 assert(hasStoreUsersOnly(*NewBC)); 2595 } 2596 else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2597 Type *TyB = BCI->getOperand(0)->getType(); 2598 Type *TyA = BCI->getType(); 2599 assert(TyA == DestTy && TyB == SrcTy); 2600 (void) TyA; 2601 (void) TyB; 2602 Instruction *I = replaceInstUsesWith(*BCI, NewPN); 2603 if (BCI == &CI) 2604 RetVal = I; 2605 } else if (auto *PHI = dyn_cast<PHINode>(V)) { 2606 assert(OldPhiNodes.contains(PHI)); 2607 (void) PHI; 2608 } else { 2609 llvm_unreachable("all uses should be handled"); 2610 } 2611 } 2612 } 2613 2614 return RetVal; 2615 } 2616 2617 static Instruction *convertBitCastToGEP(BitCastInst &CI, IRBuilderBase &Builder, 2618 const DataLayout &DL) { 2619 Value *Src = CI.getOperand(0); 2620 PointerType *SrcPTy = cast<PointerType>(Src->getType()); 2621 PointerType *DstPTy = cast<PointerType>(CI.getType()); 2622 2623 // Bitcasts involving opaque pointers cannot be converted into a GEP. 2624 if (SrcPTy->isOpaque() || DstPTy->isOpaque()) 2625 return nullptr; 2626 2627 Type *DstElTy = DstPTy->getElementType(); 2628 Type *SrcElTy = SrcPTy->getElementType(); 2629 2630 // When the type pointed to is not sized the cast cannot be 2631 // turned into a gep. 2632 if (!SrcElTy->isSized()) 2633 return nullptr; 2634 2635 // If the source and destination are pointers, and this cast is equivalent 2636 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep. 2637 // This can enhance SROA and other transforms that want type-safe pointers. 2638 unsigned NumZeros = 0; 2639 while (SrcElTy && SrcElTy != DstElTy) { 2640 SrcElTy = GetElementPtrInst::getTypeAtIndex(SrcElTy, (uint64_t)0); 2641 ++NumZeros; 2642 } 2643 2644 // If we found a path from the src to dest, create the getelementptr now. 2645 if (SrcElTy == DstElTy) { 2646 SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder.getInt32(0)); 2647 GetElementPtrInst *GEP = 2648 GetElementPtrInst::Create(SrcPTy->getElementType(), Src, Idxs); 2649 2650 // If the source pointer is dereferenceable, then assume it points to an 2651 // allocated object and apply "inbounds" to the GEP. 2652 bool CanBeNull, CanBeFreed; 2653 if (Src->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed)) { 2654 // In a non-default address space (not 0), a null pointer can not be 2655 // assumed inbounds, so ignore that case (dereferenceable_or_null). 2656 // The reason is that 'null' is not treated differently in these address 2657 // spaces, and we consequently ignore the 'gep inbounds' special case 2658 // for 'null' which allows 'inbounds' on 'null' if the indices are 2659 // zeros. 2660 if (SrcPTy->getAddressSpace() == 0 || !CanBeNull) 2661 GEP->setIsInBounds(); 2662 } 2663 return GEP; 2664 } 2665 return nullptr; 2666 } 2667 2668 Instruction *InstCombinerImpl::visitBitCast(BitCastInst &CI) { 2669 // If the operands are integer typed then apply the integer transforms, 2670 // otherwise just apply the common ones. 2671 Value *Src = CI.getOperand(0); 2672 Type *SrcTy = Src->getType(); 2673 Type *DestTy = CI.getType(); 2674 2675 // Get rid of casts from one type to the same type. These are useless and can 2676 // be replaced by the operand. 2677 if (DestTy == Src->getType()) 2678 return replaceInstUsesWith(CI, Src); 2679 2680 if (isa<PointerType>(SrcTy) && isa<PointerType>(DestTy)) { 2681 // If we are casting a alloca to a pointer to a type of the same 2682 // size, rewrite the allocation instruction to allocate the "right" type. 2683 // There is no need to modify malloc calls because it is their bitcast that 2684 // needs to be cleaned up. 2685 if (AllocaInst *AI = dyn_cast<AllocaInst>(Src)) 2686 if (Instruction *V = PromoteCastOfAllocation(CI, *AI)) 2687 return V; 2688 2689 if (Instruction *I = convertBitCastToGEP(CI, Builder, DL)) 2690 return I; 2691 } 2692 2693 if (FixedVectorType *DestVTy = dyn_cast<FixedVectorType>(DestTy)) { 2694 // Beware: messing with this target-specific oddity may cause trouble. 2695 if (DestVTy->getNumElements() == 1 && SrcTy->isX86_MMXTy()) { 2696 Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType()); 2697 return InsertElementInst::Create(PoisonValue::get(DestTy), Elem, 2698 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2699 } 2700 2701 if (isa<IntegerType>(SrcTy)) { 2702 // If this is a cast from an integer to vector, check to see if the input 2703 // is a trunc or zext of a bitcast from vector. If so, we can replace all 2704 // the casts with a shuffle and (potentially) a bitcast. 2705 if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) { 2706 CastInst *SrcCast = cast<CastInst>(Src); 2707 if (BitCastInst *BCIn = dyn_cast<BitCastInst>(SrcCast->getOperand(0))) 2708 if (isa<VectorType>(BCIn->getOperand(0)->getType())) 2709 if (Instruction *I = optimizeVectorResizeWithIntegerBitCasts( 2710 BCIn->getOperand(0), cast<VectorType>(DestTy), *this)) 2711 return I; 2712 } 2713 2714 // If the input is an 'or' instruction, we may be doing shifts and ors to 2715 // assemble the elements of the vector manually. Try to rip the code out 2716 // and replace it with insertelements. 2717 if (Value *V = optimizeIntegerToVectorInsertions(CI, *this)) 2718 return replaceInstUsesWith(CI, V); 2719 } 2720 } 2721 2722 if (FixedVectorType *SrcVTy = dyn_cast<FixedVectorType>(SrcTy)) { 2723 if (SrcVTy->getNumElements() == 1) { 2724 // If our destination is not a vector, then make this a straight 2725 // scalar-scalar cast. 2726 if (!DestTy->isVectorTy()) { 2727 Value *Elem = 2728 Builder.CreateExtractElement(Src, 2729 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2730 return CastInst::Create(Instruction::BitCast, Elem, DestTy); 2731 } 2732 2733 // Otherwise, see if our source is an insert. If so, then use the scalar 2734 // component directly: 2735 // bitcast (inselt <1 x elt> V, X, 0) to <n x m> --> bitcast X to <n x m> 2736 if (auto *InsElt = dyn_cast<InsertElementInst>(Src)) 2737 return new BitCastInst(InsElt->getOperand(1), DestTy); 2738 } 2739 } 2740 2741 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(Src)) { 2742 // Okay, we have (bitcast (shuffle ..)). Check to see if this is 2743 // a bitcast to a vector with the same # elts. 2744 Value *ShufOp0 = Shuf->getOperand(0); 2745 Value *ShufOp1 = Shuf->getOperand(1); 2746 auto ShufElts = cast<VectorType>(Shuf->getType())->getElementCount(); 2747 auto SrcVecElts = cast<VectorType>(ShufOp0->getType())->getElementCount(); 2748 if (Shuf->hasOneUse() && DestTy->isVectorTy() && 2749 cast<VectorType>(DestTy)->getElementCount() == ShufElts && 2750 ShufElts == SrcVecElts) { 2751 BitCastInst *Tmp; 2752 // If either of the operands is a cast from CI.getType(), then 2753 // evaluating the shuffle in the casted destination's type will allow 2754 // us to eliminate at least one cast. 2755 if (((Tmp = dyn_cast<BitCastInst>(ShufOp0)) && 2756 Tmp->getOperand(0)->getType() == DestTy) || 2757 ((Tmp = dyn_cast<BitCastInst>(ShufOp1)) && 2758 Tmp->getOperand(0)->getType() == DestTy)) { 2759 Value *LHS = Builder.CreateBitCast(ShufOp0, DestTy); 2760 Value *RHS = Builder.CreateBitCast(ShufOp1, DestTy); 2761 // Return a new shuffle vector. Use the same element ID's, as we 2762 // know the vector types match #elts. 2763 return new ShuffleVectorInst(LHS, RHS, Shuf->getShuffleMask()); 2764 } 2765 } 2766 2767 // A bitcasted-to-scalar and byte-reversing shuffle is better recognized as 2768 // a byte-swap: 2769 // bitcast <N x i8> (shuf X, undef, <N, N-1,...0>) --> bswap (bitcast X) 2770 // TODO: We should match the related pattern for bitreverse. 2771 if (DestTy->isIntegerTy() && 2772 DL.isLegalInteger(DestTy->getScalarSizeInBits()) && 2773 SrcTy->getScalarSizeInBits() == 8 && 2774 ShufElts.getKnownMinValue() % 2 == 0 && Shuf->hasOneUse() && 2775 Shuf->isReverse()) { 2776 assert(ShufOp0->getType() == SrcTy && "Unexpected shuffle mask"); 2777 assert(match(ShufOp1, m_Undef()) && "Unexpected shuffle op"); 2778 Function *Bswap = 2779 Intrinsic::getDeclaration(CI.getModule(), Intrinsic::bswap, DestTy); 2780 Value *ScalarX = Builder.CreateBitCast(ShufOp0, DestTy); 2781 return CallInst::Create(Bswap, { ScalarX }); 2782 } 2783 } 2784 2785 // Handle the A->B->A cast, and there is an intervening PHI node. 2786 if (PHINode *PN = dyn_cast<PHINode>(Src)) 2787 if (Instruction *I = optimizeBitCastFromPhi(CI, PN)) 2788 return I; 2789 2790 if (Instruction *I = canonicalizeBitCastExtElt(CI, *this)) 2791 return I; 2792 2793 if (Instruction *I = foldBitCastBitwiseLogic(CI, Builder)) 2794 return I; 2795 2796 if (Instruction *I = foldBitCastSelect(CI, Builder)) 2797 return I; 2798 2799 if (SrcTy->isPointerTy()) 2800 return commonPointerCastTransforms(CI); 2801 return commonCastTransforms(CI); 2802 } 2803 2804 Instruction *InstCombinerImpl::visitAddrSpaceCast(AddrSpaceCastInst &CI) { 2805 // If the destination pointer element type is not the same as the source's 2806 // first do a bitcast to the destination type, and then the addrspacecast. 2807 // This allows the cast to be exposed to other transforms. 2808 Value *Src = CI.getOperand(0); 2809 PointerType *SrcTy = cast<PointerType>(Src->getType()->getScalarType()); 2810 PointerType *DestTy = cast<PointerType>(CI.getType()->getScalarType()); 2811 2812 if (!SrcTy->hasSameElementTypeAs(DestTy)) { 2813 Type *MidTy = 2814 PointerType::getWithSamePointeeType(DestTy, SrcTy->getAddressSpace()); 2815 // Handle vectors of pointers. 2816 if (VectorType *VT = dyn_cast<VectorType>(CI.getType())) 2817 MidTy = VectorType::get(MidTy, VT->getElementCount()); 2818 2819 Value *NewBitCast = Builder.CreateBitCast(Src, MidTy); 2820 return new AddrSpaceCastInst(NewBitCast, CI.getType()); 2821 } 2822 2823 return commonPointerCastTransforms(CI); 2824 } 2825