1 //===- InstCombineCasts.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visit functions for cast operations. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/ADT/SetVector.h" 15 #include "llvm/Analysis/ConstantFolding.h" 16 #include "llvm/IR/DataLayout.h" 17 #include "llvm/IR/DebugInfo.h" 18 #include "llvm/IR/PatternMatch.h" 19 #include "llvm/Support/KnownBits.h" 20 #include "llvm/Transforms/InstCombine/InstCombiner.h" 21 #include <optional> 22 23 using namespace llvm; 24 using namespace PatternMatch; 25 26 #define DEBUG_TYPE "instcombine" 27 28 /// Analyze 'Val', seeing if it is a simple linear expression. 29 /// If so, decompose it, returning some value X, such that Val is 30 /// X*Scale+Offset. 31 /// 32 static Value *decomposeSimpleLinearExpr(Value *Val, unsigned &Scale, 33 uint64_t &Offset) { 34 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) { 35 Offset = CI->getZExtValue(); 36 Scale = 0; 37 return ConstantInt::get(Val->getType(), 0); 38 } 39 40 if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) { 41 // Cannot look past anything that might overflow. 42 // We specifically require nuw because we store the Scale in an unsigned 43 // and perform an unsigned divide on it. 44 OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val); 45 if (OBI && !OBI->hasNoUnsignedWrap()) { 46 Scale = 1; 47 Offset = 0; 48 return Val; 49 } 50 51 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) { 52 if (I->getOpcode() == Instruction::Shl) { 53 // This is a value scaled by '1 << the shift amt'. 54 Scale = UINT64_C(1) << RHS->getZExtValue(); 55 Offset = 0; 56 return I->getOperand(0); 57 } 58 59 if (I->getOpcode() == Instruction::Mul) { 60 // This value is scaled by 'RHS'. 61 Scale = RHS->getZExtValue(); 62 Offset = 0; 63 return I->getOperand(0); 64 } 65 66 if (I->getOpcode() == Instruction::Add) { 67 // We have X+C. Check to see if we really have (X*C2)+C1, 68 // where C1 is divisible by C2. 69 unsigned SubScale; 70 Value *SubVal = 71 decomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset); 72 Offset += RHS->getZExtValue(); 73 Scale = SubScale; 74 return SubVal; 75 } 76 } 77 } 78 79 // Otherwise, we can't look past this. 80 Scale = 1; 81 Offset = 0; 82 return Val; 83 } 84 85 /// If we find a cast of an allocation instruction, try to eliminate the cast by 86 /// moving the type information into the alloc. 87 Instruction *InstCombinerImpl::PromoteCastOfAllocation(BitCastInst &CI, 88 AllocaInst &AI) { 89 PointerType *PTy = cast<PointerType>(CI.getType()); 90 // Opaque pointers don't have an element type we could replace with. 91 if (PTy->isOpaque()) 92 return nullptr; 93 94 IRBuilderBase::InsertPointGuard Guard(Builder); 95 Builder.SetInsertPoint(&AI); 96 97 // Get the type really allocated and the type casted to. 98 Type *AllocElTy = AI.getAllocatedType(); 99 Type *CastElTy = PTy->getNonOpaquePointerElementType(); 100 if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr; 101 102 // This optimisation does not work for cases where the cast type 103 // is scalable and the allocated type is not. This because we need to 104 // know how many times the casted type fits into the allocated type. 105 // For the opposite case where the allocated type is scalable and the 106 // cast type is not this leads to poor code quality due to the 107 // introduction of 'vscale' into the calculations. It seems better to 108 // bail out for this case too until we've done a proper cost-benefit 109 // analysis. 110 bool AllocIsScalable = isa<ScalableVectorType>(AllocElTy); 111 bool CastIsScalable = isa<ScalableVectorType>(CastElTy); 112 if (AllocIsScalable != CastIsScalable) return nullptr; 113 114 Align AllocElTyAlign = DL.getABITypeAlign(AllocElTy); 115 Align CastElTyAlign = DL.getABITypeAlign(CastElTy); 116 if (CastElTyAlign < AllocElTyAlign) return nullptr; 117 118 // If the allocation has multiple uses, only promote it if we are strictly 119 // increasing the alignment of the resultant allocation. If we keep it the 120 // same, we open the door to infinite loops of various kinds. 121 if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr; 122 123 // The alloc and cast types should be either both fixed or both scalable. 124 uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy).getKnownMinValue(); 125 uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy).getKnownMinValue(); 126 if (CastElTySize == 0 || AllocElTySize == 0) return nullptr; 127 128 // If the allocation has multiple uses, only promote it if we're not 129 // shrinking the amount of memory being allocated. 130 uint64_t AllocElTyStoreSize = 131 DL.getTypeStoreSize(AllocElTy).getKnownMinValue(); 132 uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy).getKnownMinValue(); 133 if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr; 134 135 // See if we can satisfy the modulus by pulling a scale out of the array 136 // size argument. 137 unsigned ArraySizeScale; 138 uint64_t ArrayOffset; 139 Value *NumElements = // See if the array size is a decomposable linear expr. 140 decomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset); 141 142 // If we can now satisfy the modulus, by using a non-1 scale, we really can 143 // do the xform. 144 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 || 145 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return nullptr; 146 147 // We don't currently support arrays of scalable types. 148 assert(!AllocIsScalable || (ArrayOffset == 1 && ArraySizeScale == 0)); 149 150 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize; 151 Value *Amt = nullptr; 152 if (Scale == 1) { 153 Amt = NumElements; 154 } else { 155 Amt = ConstantInt::get(AI.getArraySize()->getType(), Scale); 156 // Insert before the alloca, not before the cast. 157 Amt = Builder.CreateMul(Amt, NumElements); 158 } 159 160 if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) { 161 Value *Off = ConstantInt::get(AI.getArraySize()->getType(), 162 Offset, true); 163 Amt = Builder.CreateAdd(Amt, Off); 164 } 165 166 AllocaInst *New = Builder.CreateAlloca(CastElTy, AI.getAddressSpace(), Amt); 167 New->setAlignment(AI.getAlign()); 168 New->takeName(&AI); 169 New->setUsedWithInAlloca(AI.isUsedWithInAlloca()); 170 New->setMetadata(LLVMContext::MD_DIAssignID, 171 AI.getMetadata(LLVMContext::MD_DIAssignID)); 172 173 replaceAllDbgUsesWith(AI, *New, *New, DT); 174 175 // If the allocation has multiple real uses, insert a cast and change all 176 // things that used it to use the new cast. This will also hack on CI, but it 177 // will die soon. 178 if (!AI.hasOneUse()) { 179 // New is the allocation instruction, pointer typed. AI is the original 180 // allocation instruction, also pointer typed. Thus, cast to use is BitCast. 181 Value *NewCast = Builder.CreateBitCast(New, AI.getType(), "tmpcast"); 182 replaceInstUsesWith(AI, NewCast); 183 eraseInstFromFunction(AI); 184 } 185 return replaceInstUsesWith(CI, New); 186 } 187 188 /// Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns 189 /// true for, actually insert the code to evaluate the expression. 190 Value *InstCombinerImpl::EvaluateInDifferentType(Value *V, Type *Ty, 191 bool isSigned) { 192 if (Constant *C = dyn_cast<Constant>(V)) { 193 C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/); 194 // If we got a constantexpr back, try to simplify it with DL info. 195 return ConstantFoldConstant(C, DL, &TLI); 196 } 197 198 // Otherwise, it must be an instruction. 199 Instruction *I = cast<Instruction>(V); 200 Instruction *Res = nullptr; 201 unsigned Opc = I->getOpcode(); 202 switch (Opc) { 203 case Instruction::Add: 204 case Instruction::Sub: 205 case Instruction::Mul: 206 case Instruction::And: 207 case Instruction::Or: 208 case Instruction::Xor: 209 case Instruction::AShr: 210 case Instruction::LShr: 211 case Instruction::Shl: 212 case Instruction::UDiv: 213 case Instruction::URem: { 214 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned); 215 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 216 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS); 217 break; 218 } 219 case Instruction::Trunc: 220 case Instruction::ZExt: 221 case Instruction::SExt: 222 // If the source type of the cast is the type we're trying for then we can 223 // just return the source. There's no need to insert it because it is not 224 // new. 225 if (I->getOperand(0)->getType() == Ty) 226 return I->getOperand(0); 227 228 // Otherwise, must be the same type of cast, so just reinsert a new one. 229 // This also handles the case of zext(trunc(x)) -> zext(x). 230 Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty, 231 Opc == Instruction::SExt); 232 break; 233 case Instruction::Select: { 234 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 235 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned); 236 Res = SelectInst::Create(I->getOperand(0), True, False); 237 break; 238 } 239 case Instruction::PHI: { 240 PHINode *OPN = cast<PHINode>(I); 241 PHINode *NPN = PHINode::Create(Ty, OPN->getNumIncomingValues()); 242 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) { 243 Value *V = 244 EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned); 245 NPN->addIncoming(V, OPN->getIncomingBlock(i)); 246 } 247 Res = NPN; 248 break; 249 } 250 case Instruction::FPToUI: 251 case Instruction::FPToSI: 252 Res = CastInst::Create( 253 static_cast<Instruction::CastOps>(Opc), I->getOperand(0), Ty); 254 break; 255 default: 256 // TODO: Can handle more cases here. 257 llvm_unreachable("Unreachable!"); 258 } 259 260 Res->takeName(I); 261 return InsertNewInstWith(Res, *I); 262 } 263 264 Instruction::CastOps 265 InstCombinerImpl::isEliminableCastPair(const CastInst *CI1, 266 const CastInst *CI2) { 267 Type *SrcTy = CI1->getSrcTy(); 268 Type *MidTy = CI1->getDestTy(); 269 Type *DstTy = CI2->getDestTy(); 270 271 Instruction::CastOps firstOp = CI1->getOpcode(); 272 Instruction::CastOps secondOp = CI2->getOpcode(); 273 Type *SrcIntPtrTy = 274 SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr; 275 Type *MidIntPtrTy = 276 MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr; 277 Type *DstIntPtrTy = 278 DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr; 279 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, 280 DstTy, SrcIntPtrTy, MidIntPtrTy, 281 DstIntPtrTy); 282 283 // We don't want to form an inttoptr or ptrtoint that converts to an integer 284 // type that differs from the pointer size. 285 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) || 286 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy)) 287 Res = 0; 288 289 return Instruction::CastOps(Res); 290 } 291 292 /// Implement the transforms common to all CastInst visitors. 293 Instruction *InstCombinerImpl::commonCastTransforms(CastInst &CI) { 294 Value *Src = CI.getOperand(0); 295 Type *Ty = CI.getType(); 296 297 // Try to eliminate a cast of a cast. 298 if (auto *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast 299 if (Instruction::CastOps NewOpc = isEliminableCastPair(CSrc, &CI)) { 300 // The first cast (CSrc) is eliminable so we need to fix up or replace 301 // the second cast (CI). CSrc will then have a good chance of being dead. 302 auto *Res = CastInst::Create(NewOpc, CSrc->getOperand(0), Ty); 303 // Point debug users of the dying cast to the new one. 304 if (CSrc->hasOneUse()) 305 replaceAllDbgUsesWith(*CSrc, *Res, CI, DT); 306 return Res; 307 } 308 } 309 310 if (auto *Sel = dyn_cast<SelectInst>(Src)) { 311 // We are casting a select. Try to fold the cast into the select if the 312 // select does not have a compare instruction with matching operand types 313 // or the select is likely better done in a narrow type. 314 // Creating a select with operands that are different sizes than its 315 // condition may inhibit other folds and lead to worse codegen. 316 auto *Cmp = dyn_cast<CmpInst>(Sel->getCondition()); 317 if (!Cmp || Cmp->getOperand(0)->getType() != Sel->getType() || 318 (CI.getOpcode() == Instruction::Trunc && 319 shouldChangeType(CI.getSrcTy(), CI.getType()))) { 320 if (Instruction *NV = FoldOpIntoSelect(CI, Sel)) { 321 replaceAllDbgUsesWith(*Sel, *NV, CI, DT); 322 return NV; 323 } 324 } 325 } 326 327 // If we are casting a PHI, then fold the cast into the PHI. 328 if (auto *PN = dyn_cast<PHINode>(Src)) { 329 // Don't do this if it would create a PHI node with an illegal type from a 330 // legal type. 331 if (!Src->getType()->isIntegerTy() || !CI.getType()->isIntegerTy() || 332 shouldChangeType(CI.getSrcTy(), CI.getType())) 333 if (Instruction *NV = foldOpIntoPhi(CI, PN)) 334 return NV; 335 } 336 337 // Canonicalize a unary shuffle after the cast if neither operation changes 338 // the size or element size of the input vector. 339 // TODO: We could allow size-changing ops if that doesn't harm codegen. 340 // cast (shuffle X, Mask) --> shuffle (cast X), Mask 341 Value *X; 342 ArrayRef<int> Mask; 343 if (match(Src, m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(Mask))))) { 344 // TODO: Allow scalable vectors? 345 auto *SrcTy = dyn_cast<FixedVectorType>(X->getType()); 346 auto *DestTy = dyn_cast<FixedVectorType>(Ty); 347 if (SrcTy && DestTy && 348 SrcTy->getNumElements() == DestTy->getNumElements() && 349 SrcTy->getPrimitiveSizeInBits() == DestTy->getPrimitiveSizeInBits()) { 350 Value *CastX = Builder.CreateCast(CI.getOpcode(), X, DestTy); 351 return new ShuffleVectorInst(CastX, Mask); 352 } 353 } 354 355 return nullptr; 356 } 357 358 /// Constants and extensions/truncates from the destination type are always 359 /// free to be evaluated in that type. This is a helper for canEvaluate*. 360 static bool canAlwaysEvaluateInType(Value *V, Type *Ty) { 361 if (isa<Constant>(V)) 362 return true; 363 Value *X; 364 if ((match(V, m_ZExtOrSExt(m_Value(X))) || match(V, m_Trunc(m_Value(X)))) && 365 X->getType() == Ty) 366 return true; 367 368 return false; 369 } 370 371 /// Filter out values that we can not evaluate in the destination type for free. 372 /// This is a helper for canEvaluate*. 373 static bool canNotEvaluateInType(Value *V, Type *Ty) { 374 assert(!isa<Constant>(V) && "Constant should already be handled."); 375 if (!isa<Instruction>(V)) 376 return true; 377 // We don't extend or shrink something that has multiple uses -- doing so 378 // would require duplicating the instruction which isn't profitable. 379 if (!V->hasOneUse()) 380 return true; 381 382 return false; 383 } 384 385 /// Return true if we can evaluate the specified expression tree as type Ty 386 /// instead of its larger type, and arrive with the same value. 387 /// This is used by code that tries to eliminate truncates. 388 /// 389 /// Ty will always be a type smaller than V. We should return true if trunc(V) 390 /// can be computed by computing V in the smaller type. If V is an instruction, 391 /// then trunc(inst(x,y)) can be computed as inst(trunc(x),trunc(y)), which only 392 /// makes sense if x and y can be efficiently truncated. 393 /// 394 /// This function works on both vectors and scalars. 395 /// 396 static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombinerImpl &IC, 397 Instruction *CxtI) { 398 if (canAlwaysEvaluateInType(V, Ty)) 399 return true; 400 if (canNotEvaluateInType(V, Ty)) 401 return false; 402 403 auto *I = cast<Instruction>(V); 404 Type *OrigTy = V->getType(); 405 switch (I->getOpcode()) { 406 case Instruction::Add: 407 case Instruction::Sub: 408 case Instruction::Mul: 409 case Instruction::And: 410 case Instruction::Or: 411 case Instruction::Xor: 412 // These operators can all arbitrarily be extended or truncated. 413 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 414 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 415 416 case Instruction::UDiv: 417 case Instruction::URem: { 418 // UDiv and URem can be truncated if all the truncated bits are zero. 419 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 420 uint32_t BitWidth = Ty->getScalarSizeInBits(); 421 assert(BitWidth < OrigBitWidth && "Unexpected bitwidths!"); 422 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth); 423 if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, CxtI) && 424 IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, CxtI)) { 425 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 426 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 427 } 428 break; 429 } 430 case Instruction::Shl: { 431 // If we are truncating the result of this SHL, and if it's a shift of an 432 // inrange amount, we can always perform a SHL in a smaller type. 433 uint32_t BitWidth = Ty->getScalarSizeInBits(); 434 KnownBits AmtKnownBits = 435 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout()); 436 if (AmtKnownBits.getMaxValue().ult(BitWidth)) 437 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 438 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 439 break; 440 } 441 case Instruction::LShr: { 442 // If this is a truncate of a logical shr, we can truncate it to a smaller 443 // lshr iff we know that the bits we would otherwise be shifting in are 444 // already zeros. 445 // TODO: It is enough to check that the bits we would be shifting in are 446 // zero - use AmtKnownBits.getMaxValue(). 447 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 448 uint32_t BitWidth = Ty->getScalarSizeInBits(); 449 KnownBits AmtKnownBits = 450 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout()); 451 APInt ShiftedBits = APInt::getBitsSetFrom(OrigBitWidth, BitWidth); 452 if (AmtKnownBits.getMaxValue().ult(BitWidth) && 453 IC.MaskedValueIsZero(I->getOperand(0), ShiftedBits, 0, CxtI)) { 454 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 455 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 456 } 457 break; 458 } 459 case Instruction::AShr: { 460 // If this is a truncate of an arithmetic shr, we can truncate it to a 461 // smaller ashr iff we know that all the bits from the sign bit of the 462 // original type and the sign bit of the truncate type are similar. 463 // TODO: It is enough to check that the bits we would be shifting in are 464 // similar to sign bit of the truncate type. 465 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 466 uint32_t BitWidth = Ty->getScalarSizeInBits(); 467 KnownBits AmtKnownBits = 468 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout()); 469 unsigned ShiftedBits = OrigBitWidth - BitWidth; 470 if (AmtKnownBits.getMaxValue().ult(BitWidth) && 471 ShiftedBits < IC.ComputeNumSignBits(I->getOperand(0), 0, CxtI)) 472 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 473 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 474 break; 475 } 476 case Instruction::Trunc: 477 // trunc(trunc(x)) -> trunc(x) 478 return true; 479 case Instruction::ZExt: 480 case Instruction::SExt: 481 // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest 482 // trunc(ext(x)) -> trunc(x) if the source type is larger than the new dest 483 return true; 484 case Instruction::Select: { 485 SelectInst *SI = cast<SelectInst>(I); 486 return canEvaluateTruncated(SI->getTrueValue(), Ty, IC, CxtI) && 487 canEvaluateTruncated(SI->getFalseValue(), Ty, IC, CxtI); 488 } 489 case Instruction::PHI: { 490 // We can change a phi if we can change all operands. Note that we never 491 // get into trouble with cyclic PHIs here because we only consider 492 // instructions with a single use. 493 PHINode *PN = cast<PHINode>(I); 494 for (Value *IncValue : PN->incoming_values()) 495 if (!canEvaluateTruncated(IncValue, Ty, IC, CxtI)) 496 return false; 497 return true; 498 } 499 case Instruction::FPToUI: 500 case Instruction::FPToSI: { 501 // If the integer type can hold the max FP value, it is safe to cast 502 // directly to that type. Otherwise, we may create poison via overflow 503 // that did not exist in the original code. 504 // 505 // The max FP value is pow(2, MaxExponent) * (1 + MaxFraction), so we need 506 // at least one more bit than the MaxExponent to hold the max FP value. 507 Type *InputTy = I->getOperand(0)->getType()->getScalarType(); 508 const fltSemantics &Semantics = InputTy->getFltSemantics(); 509 uint32_t MinBitWidth = APFloatBase::semanticsMaxExponent(Semantics); 510 // Extra sign bit needed. 511 if (I->getOpcode() == Instruction::FPToSI) 512 ++MinBitWidth; 513 return Ty->getScalarSizeInBits() > MinBitWidth; 514 } 515 default: 516 // TODO: Can handle more cases here. 517 break; 518 } 519 520 return false; 521 } 522 523 /// Given a vector that is bitcast to an integer, optionally logically 524 /// right-shifted, and truncated, convert it to an extractelement. 525 /// Example (big endian): 526 /// trunc (lshr (bitcast <4 x i32> %X to i128), 32) to i32 527 /// ---> 528 /// extractelement <4 x i32> %X, 1 529 static Instruction *foldVecTruncToExtElt(TruncInst &Trunc, 530 InstCombinerImpl &IC) { 531 Value *TruncOp = Trunc.getOperand(0); 532 Type *DestType = Trunc.getType(); 533 if (!TruncOp->hasOneUse() || !isa<IntegerType>(DestType)) 534 return nullptr; 535 536 Value *VecInput = nullptr; 537 ConstantInt *ShiftVal = nullptr; 538 if (!match(TruncOp, m_CombineOr(m_BitCast(m_Value(VecInput)), 539 m_LShr(m_BitCast(m_Value(VecInput)), 540 m_ConstantInt(ShiftVal)))) || 541 !isa<VectorType>(VecInput->getType())) 542 return nullptr; 543 544 VectorType *VecType = cast<VectorType>(VecInput->getType()); 545 unsigned VecWidth = VecType->getPrimitiveSizeInBits(); 546 unsigned DestWidth = DestType->getPrimitiveSizeInBits(); 547 unsigned ShiftAmount = ShiftVal ? ShiftVal->getZExtValue() : 0; 548 549 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0)) 550 return nullptr; 551 552 // If the element type of the vector doesn't match the result type, 553 // bitcast it to a vector type that we can extract from. 554 unsigned NumVecElts = VecWidth / DestWidth; 555 if (VecType->getElementType() != DestType) { 556 VecType = FixedVectorType::get(DestType, NumVecElts); 557 VecInput = IC.Builder.CreateBitCast(VecInput, VecType, "bc"); 558 } 559 560 unsigned Elt = ShiftAmount / DestWidth; 561 if (IC.getDataLayout().isBigEndian()) 562 Elt = NumVecElts - 1 - Elt; 563 564 return ExtractElementInst::Create(VecInput, IC.Builder.getInt32(Elt)); 565 } 566 567 /// Funnel/Rotate left/right may occur in a wider type than necessary because of 568 /// type promotion rules. Try to narrow the inputs and convert to funnel shift. 569 Instruction *InstCombinerImpl::narrowFunnelShift(TruncInst &Trunc) { 570 assert((isa<VectorType>(Trunc.getSrcTy()) || 571 shouldChangeType(Trunc.getSrcTy(), Trunc.getType())) && 572 "Don't narrow to an illegal scalar type"); 573 574 // Bail out on strange types. It is possible to handle some of these patterns 575 // even with non-power-of-2 sizes, but it is not a likely scenario. 576 Type *DestTy = Trunc.getType(); 577 unsigned NarrowWidth = DestTy->getScalarSizeInBits(); 578 unsigned WideWidth = Trunc.getSrcTy()->getScalarSizeInBits(); 579 if (!isPowerOf2_32(NarrowWidth)) 580 return nullptr; 581 582 // First, find an or'd pair of opposite shifts: 583 // trunc (or (lshr ShVal0, ShAmt0), (shl ShVal1, ShAmt1)) 584 BinaryOperator *Or0, *Or1; 585 if (!match(Trunc.getOperand(0), m_OneUse(m_Or(m_BinOp(Or0), m_BinOp(Or1))))) 586 return nullptr; 587 588 Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1; 589 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal0), m_Value(ShAmt0)))) || 590 !match(Or1, m_OneUse(m_LogicalShift(m_Value(ShVal1), m_Value(ShAmt1)))) || 591 Or0->getOpcode() == Or1->getOpcode()) 592 return nullptr; 593 594 // Canonicalize to or(shl(ShVal0, ShAmt0), lshr(ShVal1, ShAmt1)). 595 if (Or0->getOpcode() == BinaryOperator::LShr) { 596 std::swap(Or0, Or1); 597 std::swap(ShVal0, ShVal1); 598 std::swap(ShAmt0, ShAmt1); 599 } 600 assert(Or0->getOpcode() == BinaryOperator::Shl && 601 Or1->getOpcode() == BinaryOperator::LShr && 602 "Illegal or(shift,shift) pair"); 603 604 // Match the shift amount operands for a funnel/rotate pattern. This always 605 // matches a subtraction on the R operand. 606 auto matchShiftAmount = [&](Value *L, Value *R, unsigned Width) -> Value * { 607 // The shift amounts may add up to the narrow bit width: 608 // (shl ShVal0, L) | (lshr ShVal1, Width - L) 609 // If this is a funnel shift (different operands are shifted), then the 610 // shift amount can not over-shift (create poison) in the narrow type. 611 unsigned MaxShiftAmountWidth = Log2_32(NarrowWidth); 612 APInt HiBitMask = ~APInt::getLowBitsSet(WideWidth, MaxShiftAmountWidth); 613 if (ShVal0 == ShVal1 || MaskedValueIsZero(L, HiBitMask)) 614 if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L))))) 615 return L; 616 617 // The following patterns currently only work for rotation patterns. 618 // TODO: Add more general funnel-shift compatible patterns. 619 if (ShVal0 != ShVal1) 620 return nullptr; 621 622 // The shift amount may be masked with negation: 623 // (shl ShVal0, (X & (Width - 1))) | (lshr ShVal1, ((-X) & (Width - 1))) 624 Value *X; 625 unsigned Mask = Width - 1; 626 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) && 627 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))) 628 return X; 629 630 // Same as above, but the shift amount may be extended after masking: 631 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) && 632 match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))) 633 return X; 634 635 return nullptr; 636 }; 637 638 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, NarrowWidth); 639 bool IsFshl = true; // Sub on LSHR. 640 if (!ShAmt) { 641 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, NarrowWidth); 642 IsFshl = false; // Sub on SHL. 643 } 644 if (!ShAmt) 645 return nullptr; 646 647 // The right-shifted value must have high zeros in the wide type (for example 648 // from 'zext', 'and' or 'shift'). High bits of the left-shifted value are 649 // truncated, so those do not matter. 650 APInt HiBitMask = APInt::getHighBitsSet(WideWidth, WideWidth - NarrowWidth); 651 if (!MaskedValueIsZero(ShVal1, HiBitMask, 0, &Trunc)) 652 return nullptr; 653 654 // We have an unnecessarily wide rotate! 655 // trunc (or (shl ShVal0, ShAmt), (lshr ShVal1, BitWidth - ShAmt)) 656 // Narrow the inputs and convert to funnel shift intrinsic: 657 // llvm.fshl.i8(trunc(ShVal), trunc(ShVal), trunc(ShAmt)) 658 Value *NarrowShAmt = Builder.CreateTrunc(ShAmt, DestTy); 659 Value *X, *Y; 660 X = Y = Builder.CreateTrunc(ShVal0, DestTy); 661 if (ShVal0 != ShVal1) 662 Y = Builder.CreateTrunc(ShVal1, DestTy); 663 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr; 664 Function *F = Intrinsic::getDeclaration(Trunc.getModule(), IID, DestTy); 665 return CallInst::Create(F, {X, Y, NarrowShAmt}); 666 } 667 668 /// Try to narrow the width of math or bitwise logic instructions by pulling a 669 /// truncate ahead of binary operators. 670 Instruction *InstCombinerImpl::narrowBinOp(TruncInst &Trunc) { 671 Type *SrcTy = Trunc.getSrcTy(); 672 Type *DestTy = Trunc.getType(); 673 unsigned SrcWidth = SrcTy->getScalarSizeInBits(); 674 unsigned DestWidth = DestTy->getScalarSizeInBits(); 675 676 if (!isa<VectorType>(SrcTy) && !shouldChangeType(SrcTy, DestTy)) 677 return nullptr; 678 679 BinaryOperator *BinOp; 680 if (!match(Trunc.getOperand(0), m_OneUse(m_BinOp(BinOp)))) 681 return nullptr; 682 683 Value *BinOp0 = BinOp->getOperand(0); 684 Value *BinOp1 = BinOp->getOperand(1); 685 switch (BinOp->getOpcode()) { 686 case Instruction::And: 687 case Instruction::Or: 688 case Instruction::Xor: 689 case Instruction::Add: 690 case Instruction::Sub: 691 case Instruction::Mul: { 692 Constant *C; 693 if (match(BinOp0, m_Constant(C))) { 694 // trunc (binop C, X) --> binop (trunc C', X) 695 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy); 696 Value *TruncX = Builder.CreateTrunc(BinOp1, DestTy); 697 return BinaryOperator::Create(BinOp->getOpcode(), NarrowC, TruncX); 698 } 699 if (match(BinOp1, m_Constant(C))) { 700 // trunc (binop X, C) --> binop (trunc X, C') 701 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy); 702 Value *TruncX = Builder.CreateTrunc(BinOp0, DestTy); 703 return BinaryOperator::Create(BinOp->getOpcode(), TruncX, NarrowC); 704 } 705 Value *X; 706 if (match(BinOp0, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) { 707 // trunc (binop (ext X), Y) --> binop X, (trunc Y) 708 Value *NarrowOp1 = Builder.CreateTrunc(BinOp1, DestTy); 709 return BinaryOperator::Create(BinOp->getOpcode(), X, NarrowOp1); 710 } 711 if (match(BinOp1, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) { 712 // trunc (binop Y, (ext X)) --> binop (trunc Y), X 713 Value *NarrowOp0 = Builder.CreateTrunc(BinOp0, DestTy); 714 return BinaryOperator::Create(BinOp->getOpcode(), NarrowOp0, X); 715 } 716 break; 717 } 718 case Instruction::LShr: 719 case Instruction::AShr: { 720 // trunc (*shr (trunc A), C) --> trunc(*shr A, C) 721 Value *A; 722 Constant *C; 723 if (match(BinOp0, m_Trunc(m_Value(A))) && match(BinOp1, m_Constant(C))) { 724 unsigned MaxShiftAmt = SrcWidth - DestWidth; 725 // If the shift is small enough, all zero/sign bits created by the shift 726 // are removed by the trunc. 727 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULE, 728 APInt(SrcWidth, MaxShiftAmt)))) { 729 auto *OldShift = cast<Instruction>(Trunc.getOperand(0)); 730 bool IsExact = OldShift->isExact(); 731 auto *ShAmt = ConstantExpr::getIntegerCast(C, A->getType(), true); 732 ShAmt = Constant::mergeUndefsWith(ShAmt, C); 733 Value *Shift = 734 OldShift->getOpcode() == Instruction::AShr 735 ? Builder.CreateAShr(A, ShAmt, OldShift->getName(), IsExact) 736 : Builder.CreateLShr(A, ShAmt, OldShift->getName(), IsExact); 737 return CastInst::CreateTruncOrBitCast(Shift, DestTy); 738 } 739 } 740 break; 741 } 742 default: break; 743 } 744 745 if (Instruction *NarrowOr = narrowFunnelShift(Trunc)) 746 return NarrowOr; 747 748 return nullptr; 749 } 750 751 /// Try to narrow the width of a splat shuffle. This could be generalized to any 752 /// shuffle with a constant operand, but we limit the transform to avoid 753 /// creating a shuffle type that targets may not be able to lower effectively. 754 static Instruction *shrinkSplatShuffle(TruncInst &Trunc, 755 InstCombiner::BuilderTy &Builder) { 756 auto *Shuf = dyn_cast<ShuffleVectorInst>(Trunc.getOperand(0)); 757 if (Shuf && Shuf->hasOneUse() && match(Shuf->getOperand(1), m_Undef()) && 758 all_equal(Shuf->getShuffleMask()) && 759 Shuf->getType() == Shuf->getOperand(0)->getType()) { 760 // trunc (shuf X, Undef, SplatMask) --> shuf (trunc X), Poison, SplatMask 761 // trunc (shuf X, Poison, SplatMask) --> shuf (trunc X), Poison, SplatMask 762 Value *NarrowOp = Builder.CreateTrunc(Shuf->getOperand(0), Trunc.getType()); 763 return new ShuffleVectorInst(NarrowOp, Shuf->getShuffleMask()); 764 } 765 766 return nullptr; 767 } 768 769 /// Try to narrow the width of an insert element. This could be generalized for 770 /// any vector constant, but we limit the transform to insertion into undef to 771 /// avoid potential backend problems from unsupported insertion widths. This 772 /// could also be extended to handle the case of inserting a scalar constant 773 /// into a vector variable. 774 static Instruction *shrinkInsertElt(CastInst &Trunc, 775 InstCombiner::BuilderTy &Builder) { 776 Instruction::CastOps Opcode = Trunc.getOpcode(); 777 assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) && 778 "Unexpected instruction for shrinking"); 779 780 auto *InsElt = dyn_cast<InsertElementInst>(Trunc.getOperand(0)); 781 if (!InsElt || !InsElt->hasOneUse()) 782 return nullptr; 783 784 Type *DestTy = Trunc.getType(); 785 Type *DestScalarTy = DestTy->getScalarType(); 786 Value *VecOp = InsElt->getOperand(0); 787 Value *ScalarOp = InsElt->getOperand(1); 788 Value *Index = InsElt->getOperand(2); 789 790 if (match(VecOp, m_Undef())) { 791 // trunc (inselt undef, X, Index) --> inselt undef, (trunc X), Index 792 // fptrunc (inselt undef, X, Index) --> inselt undef, (fptrunc X), Index 793 UndefValue *NarrowUndef = UndefValue::get(DestTy); 794 Value *NarrowOp = Builder.CreateCast(Opcode, ScalarOp, DestScalarTy); 795 return InsertElementInst::Create(NarrowUndef, NarrowOp, Index); 796 } 797 798 return nullptr; 799 } 800 801 Instruction *InstCombinerImpl::visitTrunc(TruncInst &Trunc) { 802 if (Instruction *Result = commonCastTransforms(Trunc)) 803 return Result; 804 805 Value *Src = Trunc.getOperand(0); 806 Type *DestTy = Trunc.getType(), *SrcTy = Src->getType(); 807 unsigned DestWidth = DestTy->getScalarSizeInBits(); 808 unsigned SrcWidth = SrcTy->getScalarSizeInBits(); 809 810 // Attempt to truncate the entire input expression tree to the destination 811 // type. Only do this if the dest type is a simple type, don't convert the 812 // expression tree to something weird like i93 unless the source is also 813 // strange. 814 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) && 815 canEvaluateTruncated(Src, DestTy, *this, &Trunc)) { 816 817 // If this cast is a truncate, evaluting in a different type always 818 // eliminates the cast, so it is always a win. 819 LLVM_DEBUG( 820 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 821 " to avoid cast: " 822 << Trunc << '\n'); 823 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 824 assert(Res->getType() == DestTy); 825 return replaceInstUsesWith(Trunc, Res); 826 } 827 828 // For integer types, check if we can shorten the entire input expression to 829 // DestWidth * 2, which won't allow removing the truncate, but reducing the 830 // width may enable further optimizations, e.g. allowing for larger 831 // vectorization factors. 832 if (auto *DestITy = dyn_cast<IntegerType>(DestTy)) { 833 if (DestWidth * 2 < SrcWidth) { 834 auto *NewDestTy = DestITy->getExtendedType(); 835 if (shouldChangeType(SrcTy, NewDestTy) && 836 canEvaluateTruncated(Src, NewDestTy, *this, &Trunc)) { 837 LLVM_DEBUG( 838 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 839 " to reduce the width of operand of" 840 << Trunc << '\n'); 841 Value *Res = EvaluateInDifferentType(Src, NewDestTy, false); 842 return new TruncInst(Res, DestTy); 843 } 844 } 845 } 846 847 // Test if the trunc is the user of a select which is part of a 848 // minimum or maximum operation. If so, don't do any more simplification. 849 // Even simplifying demanded bits can break the canonical form of a 850 // min/max. 851 Value *LHS, *RHS; 852 if (SelectInst *Sel = dyn_cast<SelectInst>(Src)) 853 if (matchSelectPattern(Sel, LHS, RHS).Flavor != SPF_UNKNOWN) 854 return nullptr; 855 856 // See if we can simplify any instructions used by the input whose sole 857 // purpose is to compute bits we don't care about. 858 if (SimplifyDemandedInstructionBits(Trunc)) 859 return &Trunc; 860 861 if (DestWidth == 1) { 862 Value *Zero = Constant::getNullValue(SrcTy); 863 if (DestTy->isIntegerTy()) { 864 // Canonicalize trunc x to i1 -> icmp ne (and x, 1), 0 (scalar only). 865 // TODO: We canonicalize to more instructions here because we are probably 866 // lacking equivalent analysis for trunc relative to icmp. There may also 867 // be codegen concerns. If those trunc limitations were removed, we could 868 // remove this transform. 869 Value *And = Builder.CreateAnd(Src, ConstantInt::get(SrcTy, 1)); 870 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 871 } 872 873 // For vectors, we do not canonicalize all truncs to icmp, so optimize 874 // patterns that would be covered within visitICmpInst. 875 Value *X; 876 Constant *C; 877 if (match(Src, m_OneUse(m_LShr(m_Value(X), m_Constant(C))))) { 878 // trunc (lshr X, C) to i1 --> icmp ne (and X, C'), 0 879 Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1)); 880 Constant *MaskC = ConstantExpr::getShl(One, C); 881 Value *And = Builder.CreateAnd(X, MaskC); 882 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 883 } 884 if (match(Src, m_OneUse(m_c_Or(m_LShr(m_Value(X), m_Constant(C)), 885 m_Deferred(X))))) { 886 // trunc (or (lshr X, C), X) to i1 --> icmp ne (and X, C'), 0 887 Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1)); 888 Constant *MaskC = ConstantExpr::getShl(One, C); 889 MaskC = ConstantExpr::getOr(MaskC, One); 890 Value *And = Builder.CreateAnd(X, MaskC); 891 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 892 } 893 } 894 895 Value *A, *B; 896 Constant *C; 897 if (match(Src, m_LShr(m_SExt(m_Value(A)), m_Constant(C)))) { 898 unsigned AWidth = A->getType()->getScalarSizeInBits(); 899 unsigned MaxShiftAmt = SrcWidth - std::max(DestWidth, AWidth); 900 auto *OldSh = cast<Instruction>(Src); 901 bool IsExact = OldSh->isExact(); 902 903 // If the shift is small enough, all zero bits created by the shift are 904 // removed by the trunc. 905 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULE, 906 APInt(SrcWidth, MaxShiftAmt)))) { 907 // trunc (lshr (sext A), C) --> ashr A, C 908 if (A->getType() == DestTy) { 909 Constant *MaxAmt = ConstantInt::get(SrcTy, DestWidth - 1, false); 910 Constant *ShAmt = ConstantExpr::getUMin(C, MaxAmt); 911 ShAmt = ConstantExpr::getTrunc(ShAmt, A->getType()); 912 ShAmt = Constant::mergeUndefsWith(ShAmt, C); 913 return IsExact ? BinaryOperator::CreateExactAShr(A, ShAmt) 914 : BinaryOperator::CreateAShr(A, ShAmt); 915 } 916 // The types are mismatched, so create a cast after shifting: 917 // trunc (lshr (sext A), C) --> sext/trunc (ashr A, C) 918 if (Src->hasOneUse()) { 919 Constant *MaxAmt = ConstantInt::get(SrcTy, AWidth - 1, false); 920 Constant *ShAmt = ConstantExpr::getUMin(C, MaxAmt); 921 ShAmt = ConstantExpr::getTrunc(ShAmt, A->getType()); 922 Value *Shift = Builder.CreateAShr(A, ShAmt, "", IsExact); 923 return CastInst::CreateIntegerCast(Shift, DestTy, true); 924 } 925 } 926 // TODO: Mask high bits with 'and'. 927 } 928 929 if (Instruction *I = narrowBinOp(Trunc)) 930 return I; 931 932 if (Instruction *I = shrinkSplatShuffle(Trunc, Builder)) 933 return I; 934 935 if (Instruction *I = shrinkInsertElt(Trunc, Builder)) 936 return I; 937 938 if (Src->hasOneUse() && 939 (isa<VectorType>(SrcTy) || shouldChangeType(SrcTy, DestTy))) { 940 // Transform "trunc (shl X, cst)" -> "shl (trunc X), cst" so long as the 941 // dest type is native and cst < dest size. 942 if (match(Src, m_Shl(m_Value(A), m_Constant(C))) && 943 !match(A, m_Shr(m_Value(), m_Constant()))) { 944 // Skip shifts of shift by constants. It undoes a combine in 945 // FoldShiftByConstant and is the extend in reg pattern. 946 APInt Threshold = APInt(C->getType()->getScalarSizeInBits(), DestWidth); 947 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, Threshold))) { 948 Value *NewTrunc = Builder.CreateTrunc(A, DestTy, A->getName() + ".tr"); 949 return BinaryOperator::Create(Instruction::Shl, NewTrunc, 950 ConstantExpr::getTrunc(C, DestTy)); 951 } 952 } 953 } 954 955 if (Instruction *I = foldVecTruncToExtElt(Trunc, *this)) 956 return I; 957 958 // Whenever an element is extracted from a vector, and then truncated, 959 // canonicalize by converting it to a bitcast followed by an 960 // extractelement. 961 // 962 // Example (little endian): 963 // trunc (extractelement <4 x i64> %X, 0) to i32 964 // ---> 965 // extractelement <8 x i32> (bitcast <4 x i64> %X to <8 x i32>), i32 0 966 Value *VecOp; 967 ConstantInt *Cst; 968 if (match(Src, m_OneUse(m_ExtractElt(m_Value(VecOp), m_ConstantInt(Cst))))) { 969 auto *VecOpTy = cast<VectorType>(VecOp->getType()); 970 auto VecElts = VecOpTy->getElementCount(); 971 972 // A badly fit destination size would result in an invalid cast. 973 if (SrcWidth % DestWidth == 0) { 974 uint64_t TruncRatio = SrcWidth / DestWidth; 975 uint64_t BitCastNumElts = VecElts.getKnownMinValue() * TruncRatio; 976 uint64_t VecOpIdx = Cst->getZExtValue(); 977 uint64_t NewIdx = DL.isBigEndian() ? (VecOpIdx + 1) * TruncRatio - 1 978 : VecOpIdx * TruncRatio; 979 assert(BitCastNumElts <= std::numeric_limits<uint32_t>::max() && 980 "overflow 32-bits"); 981 982 auto *BitCastTo = 983 VectorType::get(DestTy, BitCastNumElts, VecElts.isScalable()); 984 Value *BitCast = Builder.CreateBitCast(VecOp, BitCastTo); 985 return ExtractElementInst::Create(BitCast, Builder.getInt32(NewIdx)); 986 } 987 } 988 989 // trunc (ctlz_i32(zext(A), B) --> add(ctlz_i16(A, B), C) 990 if (match(Src, m_OneUse(m_Intrinsic<Intrinsic::ctlz>(m_ZExt(m_Value(A)), 991 m_Value(B))))) { 992 unsigned AWidth = A->getType()->getScalarSizeInBits(); 993 if (AWidth == DestWidth && AWidth > Log2_32(SrcWidth)) { 994 Value *WidthDiff = ConstantInt::get(A->getType(), SrcWidth - AWidth); 995 Value *NarrowCtlz = 996 Builder.CreateIntrinsic(Intrinsic::ctlz, {Trunc.getType()}, {A, B}); 997 return BinaryOperator::CreateAdd(NarrowCtlz, WidthDiff); 998 } 999 } 1000 1001 if (match(Src, m_VScale(DL))) { 1002 if (Trunc.getFunction() && 1003 Trunc.getFunction()->hasFnAttribute(Attribute::VScaleRange)) { 1004 Attribute Attr = 1005 Trunc.getFunction()->getFnAttribute(Attribute::VScaleRange); 1006 if (std::optional<unsigned> MaxVScale = Attr.getVScaleRangeMax()) { 1007 if (Log2_32(*MaxVScale) < DestWidth) { 1008 Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1)); 1009 return replaceInstUsesWith(Trunc, VScale); 1010 } 1011 } 1012 } 1013 } 1014 1015 return nullptr; 1016 } 1017 1018 Instruction *InstCombinerImpl::transformZExtICmp(ICmpInst *Cmp, 1019 ZExtInst &Zext) { 1020 // If we are just checking for a icmp eq of a single bit and zext'ing it 1021 // to an integer, then shift the bit to the appropriate place and then 1022 // cast to integer to avoid the comparison. 1023 1024 // FIXME: This set of transforms does not check for extra uses and/or creates 1025 // an extra instruction (an optional final cast is not included 1026 // in the transform comments). We may also want to favor icmp over 1027 // shifts in cases of equal instructions because icmp has better 1028 // analysis in general (invert the transform). 1029 1030 const APInt *Op1CV; 1031 if (match(Cmp->getOperand(1), m_APInt(Op1CV))) { 1032 1033 // zext (x <s 0) to i32 --> x>>u31 true if signbit set. 1034 if (Cmp->getPredicate() == ICmpInst::ICMP_SLT && Op1CV->isZero()) { 1035 Value *In = Cmp->getOperand(0); 1036 Value *Sh = ConstantInt::get(In->getType(), 1037 In->getType()->getScalarSizeInBits() - 1); 1038 In = Builder.CreateLShr(In, Sh, In->getName() + ".lobit"); 1039 if (In->getType() != Zext.getType()) 1040 In = Builder.CreateIntCast(In, Zext.getType(), false /*ZExt*/); 1041 1042 return replaceInstUsesWith(Zext, In); 1043 } 1044 1045 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set. 1046 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. 1047 // zext (X != 0) to i32 --> X iff X has only the low bit set. 1048 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set. 1049 if (Op1CV->isZero() && Cmp->isEquality() && 1050 (Cmp->getOperand(0)->getType() == Zext.getType() || 1051 Cmp->getPredicate() == ICmpInst::ICMP_NE)) { 1052 // If Op1C some other power of two, convert: 1053 KnownBits Known = computeKnownBits(Cmp->getOperand(0), 0, &Zext); 1054 1055 // Exactly 1 possible 1? But not the high-bit because that is 1056 // canonicalized to this form. 1057 APInt KnownZeroMask(~Known.Zero); 1058 if (KnownZeroMask.isPowerOf2() && 1059 (Zext.getType()->getScalarSizeInBits() != 1060 KnownZeroMask.logBase2() + 1)) { 1061 uint32_t ShAmt = KnownZeroMask.logBase2(); 1062 Value *In = Cmp->getOperand(0); 1063 if (ShAmt) { 1064 // Perform a logical shr by shiftamt. 1065 // Insert the shift to put the result in the low bit. 1066 In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt), 1067 In->getName() + ".lobit"); 1068 } 1069 1070 // Toggle the low bit for "X == 0". 1071 if (Cmp->getPredicate() == ICmpInst::ICMP_EQ) 1072 In = Builder.CreateXor(In, ConstantInt::get(In->getType(), 1)); 1073 1074 if (Zext.getType() == In->getType()) 1075 return replaceInstUsesWith(Zext, In); 1076 1077 Value *IntCast = Builder.CreateIntCast(In, Zext.getType(), false); 1078 return replaceInstUsesWith(Zext, IntCast); 1079 } 1080 } 1081 } 1082 1083 if (Cmp->isEquality() && Zext.getType() == Cmp->getOperand(0)->getType()) { 1084 // Test if a bit is clear/set using a shifted-one mask: 1085 // zext (icmp eq (and X, (1 << ShAmt)), 0) --> and (lshr (not X), ShAmt), 1 1086 // zext (icmp ne (and X, (1 << ShAmt)), 0) --> and (lshr X, ShAmt), 1 1087 Value *X, *ShAmt; 1088 if (Cmp->hasOneUse() && match(Cmp->getOperand(1), m_ZeroInt()) && 1089 match(Cmp->getOperand(0), 1090 m_OneUse(m_c_And(m_Shl(m_One(), m_Value(ShAmt)), m_Value(X))))) { 1091 if (Cmp->getPredicate() == ICmpInst::ICMP_EQ) 1092 X = Builder.CreateNot(X); 1093 Value *Lshr = Builder.CreateLShr(X, ShAmt); 1094 Value *And1 = Builder.CreateAnd(Lshr, ConstantInt::get(X->getType(), 1)); 1095 return replaceInstUsesWith(Zext, And1); 1096 } 1097 } 1098 1099 return nullptr; 1100 } 1101 1102 /// Determine if the specified value can be computed in the specified wider type 1103 /// and produce the same low bits. If not, return false. 1104 /// 1105 /// If this function returns true, it can also return a non-zero number of bits 1106 /// (in BitsToClear) which indicates that the value it computes is correct for 1107 /// the zero extend, but that the additional BitsToClear bits need to be zero'd 1108 /// out. For example, to promote something like: 1109 /// 1110 /// %B = trunc i64 %A to i32 1111 /// %C = lshr i32 %B, 8 1112 /// %E = zext i32 %C to i64 1113 /// 1114 /// CanEvaluateZExtd for the 'lshr' will return true, and BitsToClear will be 1115 /// set to 8 to indicate that the promoted value needs to have bits 24-31 1116 /// cleared in addition to bits 32-63. Since an 'and' will be generated to 1117 /// clear the top bits anyway, doing this has no extra cost. 1118 /// 1119 /// This function works on both vectors and scalars. 1120 static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear, 1121 InstCombinerImpl &IC, Instruction *CxtI) { 1122 BitsToClear = 0; 1123 if (canAlwaysEvaluateInType(V, Ty)) 1124 return true; 1125 if (canNotEvaluateInType(V, Ty)) 1126 return false; 1127 1128 auto *I = cast<Instruction>(V); 1129 unsigned Tmp; 1130 switch (I->getOpcode()) { 1131 case Instruction::ZExt: // zext(zext(x)) -> zext(x). 1132 case Instruction::SExt: // zext(sext(x)) -> sext(x). 1133 case Instruction::Trunc: // zext(trunc(x)) -> trunc(x) or zext(x) 1134 return true; 1135 case Instruction::And: 1136 case Instruction::Or: 1137 case Instruction::Xor: 1138 case Instruction::Add: 1139 case Instruction::Sub: 1140 case Instruction::Mul: 1141 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI) || 1142 !canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI)) 1143 return false; 1144 // These can all be promoted if neither operand has 'bits to clear'. 1145 if (BitsToClear == 0 && Tmp == 0) 1146 return true; 1147 1148 // If the operation is an AND/OR/XOR and the bits to clear are zero in the 1149 // other side, BitsToClear is ok. 1150 if (Tmp == 0 && I->isBitwiseLogicOp()) { 1151 // We use MaskedValueIsZero here for generality, but the case we care 1152 // about the most is constant RHS. 1153 unsigned VSize = V->getType()->getScalarSizeInBits(); 1154 if (IC.MaskedValueIsZero(I->getOperand(1), 1155 APInt::getHighBitsSet(VSize, BitsToClear), 1156 0, CxtI)) { 1157 // If this is an And instruction and all of the BitsToClear are 1158 // known to be zero we can reset BitsToClear. 1159 if (I->getOpcode() == Instruction::And) 1160 BitsToClear = 0; 1161 return true; 1162 } 1163 } 1164 1165 // Otherwise, we don't know how to analyze this BitsToClear case yet. 1166 return false; 1167 1168 case Instruction::Shl: { 1169 // We can promote shl(x, cst) if we can promote x. Since shl overwrites the 1170 // upper bits we can reduce BitsToClear by the shift amount. 1171 const APInt *Amt; 1172 if (match(I->getOperand(1), m_APInt(Amt))) { 1173 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 1174 return false; 1175 uint64_t ShiftAmt = Amt->getZExtValue(); 1176 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0; 1177 return true; 1178 } 1179 return false; 1180 } 1181 case Instruction::LShr: { 1182 // We can promote lshr(x, cst) if we can promote x. This requires the 1183 // ultimate 'and' to clear out the high zero bits we're clearing out though. 1184 const APInt *Amt; 1185 if (match(I->getOperand(1), m_APInt(Amt))) { 1186 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 1187 return false; 1188 BitsToClear += Amt->getZExtValue(); 1189 if (BitsToClear > V->getType()->getScalarSizeInBits()) 1190 BitsToClear = V->getType()->getScalarSizeInBits(); 1191 return true; 1192 } 1193 // Cannot promote variable LSHR. 1194 return false; 1195 } 1196 case Instruction::Select: 1197 if (!canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI) || 1198 !canEvaluateZExtd(I->getOperand(2), Ty, BitsToClear, IC, CxtI) || 1199 // TODO: If important, we could handle the case when the BitsToClear are 1200 // known zero in the disagreeing side. 1201 Tmp != BitsToClear) 1202 return false; 1203 return true; 1204 1205 case Instruction::PHI: { 1206 // We can change a phi if we can change all operands. Note that we never 1207 // get into trouble with cyclic PHIs here because we only consider 1208 // instructions with a single use. 1209 PHINode *PN = cast<PHINode>(I); 1210 if (!canEvaluateZExtd(PN->getIncomingValue(0), Ty, BitsToClear, IC, CxtI)) 1211 return false; 1212 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) 1213 if (!canEvaluateZExtd(PN->getIncomingValue(i), Ty, Tmp, IC, CxtI) || 1214 // TODO: If important, we could handle the case when the BitsToClear 1215 // are known zero in the disagreeing input. 1216 Tmp != BitsToClear) 1217 return false; 1218 return true; 1219 } 1220 default: 1221 // TODO: Can handle more cases here. 1222 return false; 1223 } 1224 } 1225 1226 Instruction *InstCombinerImpl::visitZExt(ZExtInst &Zext) { 1227 // If this zero extend is only used by a truncate, let the truncate be 1228 // eliminated before we try to optimize this zext. 1229 if (Zext.hasOneUse() && isa<TruncInst>(Zext.user_back())) 1230 return nullptr; 1231 1232 // If one of the common conversion will work, do it. 1233 if (Instruction *Result = commonCastTransforms(Zext)) 1234 return Result; 1235 1236 Value *Src = Zext.getOperand(0); 1237 Type *SrcTy = Src->getType(), *DestTy = Zext.getType(); 1238 1239 // Try to extend the entire expression tree to the wide destination type. 1240 unsigned BitsToClear; 1241 if (shouldChangeType(SrcTy, DestTy) && 1242 canEvaluateZExtd(Src, DestTy, BitsToClear, *this, &Zext)) { 1243 assert(BitsToClear <= SrcTy->getScalarSizeInBits() && 1244 "Can't clear more bits than in SrcTy"); 1245 1246 // Okay, we can transform this! Insert the new expression now. 1247 LLVM_DEBUG( 1248 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 1249 " to avoid zero extend: " 1250 << Zext << '\n'); 1251 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 1252 assert(Res->getType() == DestTy); 1253 1254 // Preserve debug values referring to Src if the zext is its last use. 1255 if (auto *SrcOp = dyn_cast<Instruction>(Src)) 1256 if (SrcOp->hasOneUse()) 1257 replaceAllDbgUsesWith(*SrcOp, *Res, Zext, DT); 1258 1259 uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits() - BitsToClear; 1260 uint32_t DestBitSize = DestTy->getScalarSizeInBits(); 1261 1262 // If the high bits are already filled with zeros, just replace this 1263 // cast with the result. 1264 if (MaskedValueIsZero(Res, 1265 APInt::getHighBitsSet(DestBitSize, 1266 DestBitSize - SrcBitsKept), 1267 0, &Zext)) 1268 return replaceInstUsesWith(Zext, Res); 1269 1270 // We need to emit an AND to clear the high bits. 1271 Constant *C = ConstantInt::get(Res->getType(), 1272 APInt::getLowBitsSet(DestBitSize, SrcBitsKept)); 1273 return BinaryOperator::CreateAnd(Res, C); 1274 } 1275 1276 // If this is a TRUNC followed by a ZEXT then we are dealing with integral 1277 // types and if the sizes are just right we can convert this into a logical 1278 // 'and' which will be much cheaper than the pair of casts. 1279 if (auto *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast 1280 // TODO: Subsume this into EvaluateInDifferentType. 1281 1282 // Get the sizes of the types involved. We know that the intermediate type 1283 // will be smaller than A or C, but don't know the relation between A and C. 1284 Value *A = CSrc->getOperand(0); 1285 unsigned SrcSize = A->getType()->getScalarSizeInBits(); 1286 unsigned MidSize = CSrc->getType()->getScalarSizeInBits(); 1287 unsigned DstSize = DestTy->getScalarSizeInBits(); 1288 // If we're actually extending zero bits, then if 1289 // SrcSize < DstSize: zext(a & mask) 1290 // SrcSize == DstSize: a & mask 1291 // SrcSize > DstSize: trunc(a) & mask 1292 if (SrcSize < DstSize) { 1293 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 1294 Constant *AndConst = ConstantInt::get(A->getType(), AndValue); 1295 Value *And = Builder.CreateAnd(A, AndConst, CSrc->getName() + ".mask"); 1296 return new ZExtInst(And, DestTy); 1297 } 1298 1299 if (SrcSize == DstSize) { 1300 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 1301 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(), 1302 AndValue)); 1303 } 1304 if (SrcSize > DstSize) { 1305 Value *Trunc = Builder.CreateTrunc(A, DestTy); 1306 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize)); 1307 return BinaryOperator::CreateAnd(Trunc, 1308 ConstantInt::get(Trunc->getType(), 1309 AndValue)); 1310 } 1311 } 1312 1313 if (auto *Cmp = dyn_cast<ICmpInst>(Src)) 1314 return transformZExtICmp(Cmp, Zext); 1315 1316 // zext(trunc(X) & C) -> (X & zext(C)). 1317 Constant *C; 1318 Value *X; 1319 if (match(Src, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Constant(C)))) && 1320 X->getType() == DestTy) 1321 return BinaryOperator::CreateAnd(X, ConstantExpr::getZExt(C, DestTy)); 1322 1323 // zext((trunc(X) & C) ^ C) -> ((X & zext(C)) ^ zext(C)). 1324 Value *And; 1325 if (match(Src, m_OneUse(m_Xor(m_Value(And), m_Constant(C)))) && 1326 match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) && 1327 X->getType() == DestTy) { 1328 Constant *ZC = ConstantExpr::getZExt(C, DestTy); 1329 return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC); 1330 } 1331 1332 // If we are truncating, masking, and then zexting back to the original type, 1333 // that's just a mask. This is not handled by canEvaluateZextd if the 1334 // intermediate values have extra uses. This could be generalized further for 1335 // a non-constant mask operand. 1336 // zext (and (trunc X), C) --> and X, (zext C) 1337 if (match(Src, m_And(m_Trunc(m_Value(X)), m_Constant(C))) && 1338 X->getType() == DestTy) { 1339 Constant *ZextC = ConstantExpr::getZExt(C, DestTy); 1340 return BinaryOperator::CreateAnd(X, ZextC); 1341 } 1342 1343 if (match(Src, m_VScale(DL))) { 1344 if (Zext.getFunction() && 1345 Zext.getFunction()->hasFnAttribute(Attribute::VScaleRange)) { 1346 Attribute Attr = 1347 Zext.getFunction()->getFnAttribute(Attribute::VScaleRange); 1348 if (std::optional<unsigned> MaxVScale = Attr.getVScaleRangeMax()) { 1349 unsigned TypeWidth = Src->getType()->getScalarSizeInBits(); 1350 if (Log2_32(*MaxVScale) < TypeWidth) { 1351 Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1)); 1352 return replaceInstUsesWith(Zext, VScale); 1353 } 1354 } 1355 } 1356 } 1357 1358 return nullptr; 1359 } 1360 1361 /// Transform (sext icmp) to bitwise / integer operations to eliminate the icmp. 1362 Instruction *InstCombinerImpl::transformSExtICmp(ICmpInst *Cmp, 1363 SExtInst &Sext) { 1364 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1); 1365 ICmpInst::Predicate Pred = Cmp->getPredicate(); 1366 1367 // Don't bother if Op1 isn't of vector or integer type. 1368 if (!Op1->getType()->isIntOrIntVectorTy()) 1369 return nullptr; 1370 1371 if (Pred == ICmpInst::ICMP_SLT && match(Op1, m_ZeroInt())) { 1372 // sext (x <s 0) --> ashr x, 31 (all ones if negative) 1373 Value *Sh = ConstantInt::get(Op0->getType(), 1374 Op0->getType()->getScalarSizeInBits() - 1); 1375 Value *In = Builder.CreateAShr(Op0, Sh, Op0->getName() + ".lobit"); 1376 if (In->getType() != Sext.getType()) 1377 In = Builder.CreateIntCast(In, Sext.getType(), true /*SExt*/); 1378 1379 return replaceInstUsesWith(Sext, In); 1380 } 1381 1382 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) { 1383 // If we know that only one bit of the LHS of the icmp can be set and we 1384 // have an equality comparison with zero or a power of 2, we can transform 1385 // the icmp and sext into bitwise/integer operations. 1386 if (Cmp->hasOneUse() && 1387 Cmp->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){ 1388 KnownBits Known = computeKnownBits(Op0, 0, &Sext); 1389 1390 APInt KnownZeroMask(~Known.Zero); 1391 if (KnownZeroMask.isPowerOf2()) { 1392 Value *In = Cmp->getOperand(0); 1393 1394 // If the icmp tests for a known zero bit we can constant fold it. 1395 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) { 1396 Value *V = Pred == ICmpInst::ICMP_NE ? 1397 ConstantInt::getAllOnesValue(Sext.getType()) : 1398 ConstantInt::getNullValue(Sext.getType()); 1399 return replaceInstUsesWith(Sext, V); 1400 } 1401 1402 if (!Op1C->isZero() == (Pred == ICmpInst::ICMP_NE)) { 1403 // sext ((x & 2^n) == 0) -> (x >> n) - 1 1404 // sext ((x & 2^n) != 2^n) -> (x >> n) - 1 1405 unsigned ShiftAmt = KnownZeroMask.countTrailingZeros(); 1406 // Perform a right shift to place the desired bit in the LSB. 1407 if (ShiftAmt) 1408 In = Builder.CreateLShr(In, 1409 ConstantInt::get(In->getType(), ShiftAmt)); 1410 1411 // At this point "In" is either 1 or 0. Subtract 1 to turn 1412 // {1, 0} -> {0, -1}. 1413 In = Builder.CreateAdd(In, 1414 ConstantInt::getAllOnesValue(In->getType()), 1415 "sext"); 1416 } else { 1417 // sext ((x & 2^n) != 0) -> (x << bitwidth-n) a>> bitwidth-1 1418 // sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1 1419 unsigned ShiftAmt = KnownZeroMask.countLeadingZeros(); 1420 // Perform a left shift to place the desired bit in the MSB. 1421 if (ShiftAmt) 1422 In = Builder.CreateShl(In, 1423 ConstantInt::get(In->getType(), ShiftAmt)); 1424 1425 // Distribute the bit over the whole bit width. 1426 In = Builder.CreateAShr(In, ConstantInt::get(In->getType(), 1427 KnownZeroMask.getBitWidth() - 1), "sext"); 1428 } 1429 1430 if (Sext.getType() == In->getType()) 1431 return replaceInstUsesWith(Sext, In); 1432 return CastInst::CreateIntegerCast(In, Sext.getType(), true/*SExt*/); 1433 } 1434 } 1435 } 1436 1437 return nullptr; 1438 } 1439 1440 /// Return true if we can take the specified value and return it as type Ty 1441 /// without inserting any new casts and without changing the value of the common 1442 /// low bits. This is used by code that tries to promote integer operations to 1443 /// a wider types will allow us to eliminate the extension. 1444 /// 1445 /// This function works on both vectors and scalars. 1446 /// 1447 static bool canEvaluateSExtd(Value *V, Type *Ty) { 1448 assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() && 1449 "Can't sign extend type to a smaller type"); 1450 if (canAlwaysEvaluateInType(V, Ty)) 1451 return true; 1452 if (canNotEvaluateInType(V, Ty)) 1453 return false; 1454 1455 auto *I = cast<Instruction>(V); 1456 switch (I->getOpcode()) { 1457 case Instruction::SExt: // sext(sext(x)) -> sext(x) 1458 case Instruction::ZExt: // sext(zext(x)) -> zext(x) 1459 case Instruction::Trunc: // sext(trunc(x)) -> trunc(x) or sext(x) 1460 return true; 1461 case Instruction::And: 1462 case Instruction::Or: 1463 case Instruction::Xor: 1464 case Instruction::Add: 1465 case Instruction::Sub: 1466 case Instruction::Mul: 1467 // These operators can all arbitrarily be extended if their inputs can. 1468 return canEvaluateSExtd(I->getOperand(0), Ty) && 1469 canEvaluateSExtd(I->getOperand(1), Ty); 1470 1471 //case Instruction::Shl: TODO 1472 //case Instruction::LShr: TODO 1473 1474 case Instruction::Select: 1475 return canEvaluateSExtd(I->getOperand(1), Ty) && 1476 canEvaluateSExtd(I->getOperand(2), Ty); 1477 1478 case Instruction::PHI: { 1479 // We can change a phi if we can change all operands. Note that we never 1480 // get into trouble with cyclic PHIs here because we only consider 1481 // instructions with a single use. 1482 PHINode *PN = cast<PHINode>(I); 1483 for (Value *IncValue : PN->incoming_values()) 1484 if (!canEvaluateSExtd(IncValue, Ty)) return false; 1485 return true; 1486 } 1487 default: 1488 // TODO: Can handle more cases here. 1489 break; 1490 } 1491 1492 return false; 1493 } 1494 1495 Instruction *InstCombinerImpl::visitSExt(SExtInst &Sext) { 1496 // If this sign extend is only used by a truncate, let the truncate be 1497 // eliminated before we try to optimize this sext. 1498 if (Sext.hasOneUse() && isa<TruncInst>(Sext.user_back())) 1499 return nullptr; 1500 1501 if (Instruction *I = commonCastTransforms(Sext)) 1502 return I; 1503 1504 Value *Src = Sext.getOperand(0); 1505 Type *SrcTy = Src->getType(), *DestTy = Sext.getType(); 1506 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 1507 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 1508 1509 // If the value being extended is zero or positive, use a zext instead. 1510 if (isKnownNonNegative(Src, DL, 0, &AC, &Sext, &DT)) 1511 return CastInst::Create(Instruction::ZExt, Src, DestTy); 1512 1513 // Try to extend the entire expression tree to the wide destination type. 1514 if (shouldChangeType(SrcTy, DestTy) && canEvaluateSExtd(Src, DestTy)) { 1515 // Okay, we can transform this! Insert the new expression now. 1516 LLVM_DEBUG( 1517 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 1518 " to avoid sign extend: " 1519 << Sext << '\n'); 1520 Value *Res = EvaluateInDifferentType(Src, DestTy, true); 1521 assert(Res->getType() == DestTy); 1522 1523 // If the high bits are already filled with sign bit, just replace this 1524 // cast with the result. 1525 if (ComputeNumSignBits(Res, 0, &Sext) > DestBitSize - SrcBitSize) 1526 return replaceInstUsesWith(Sext, Res); 1527 1528 // We need to emit a shl + ashr to do the sign extend. 1529 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize); 1530 return BinaryOperator::CreateAShr(Builder.CreateShl(Res, ShAmt, "sext"), 1531 ShAmt); 1532 } 1533 1534 Value *X; 1535 if (match(Src, m_Trunc(m_Value(X)))) { 1536 // If the input has more sign bits than bits truncated, then convert 1537 // directly to final type. 1538 unsigned XBitSize = X->getType()->getScalarSizeInBits(); 1539 if (ComputeNumSignBits(X, 0, &Sext) > XBitSize - SrcBitSize) 1540 return CastInst::CreateIntegerCast(X, DestTy, /* isSigned */ true); 1541 1542 // If input is a trunc from the destination type, then convert into shifts. 1543 if (Src->hasOneUse() && X->getType() == DestTy) { 1544 // sext (trunc X) --> ashr (shl X, C), C 1545 Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize); 1546 return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShAmt), ShAmt); 1547 } 1548 1549 // If we are replacing shifted-in high zero bits with sign bits, convert 1550 // the logic shift to arithmetic shift and eliminate the cast to 1551 // intermediate type: 1552 // sext (trunc (lshr Y, C)) --> sext/trunc (ashr Y, C) 1553 Value *Y; 1554 if (Src->hasOneUse() && 1555 match(X, m_LShr(m_Value(Y), 1556 m_SpecificIntAllowUndef(XBitSize - SrcBitSize)))) { 1557 Value *Ashr = Builder.CreateAShr(Y, XBitSize - SrcBitSize); 1558 return CastInst::CreateIntegerCast(Ashr, DestTy, /* isSigned */ true); 1559 } 1560 } 1561 1562 if (auto *Cmp = dyn_cast<ICmpInst>(Src)) 1563 return transformSExtICmp(Cmp, Sext); 1564 1565 // If the input is a shl/ashr pair of a same constant, then this is a sign 1566 // extension from a smaller value. If we could trust arbitrary bitwidth 1567 // integers, we could turn this into a truncate to the smaller bit and then 1568 // use a sext for the whole extension. Since we don't, look deeper and check 1569 // for a truncate. If the source and dest are the same type, eliminate the 1570 // trunc and extend and just do shifts. For example, turn: 1571 // %a = trunc i32 %i to i8 1572 // %b = shl i8 %a, C 1573 // %c = ashr i8 %b, C 1574 // %d = sext i8 %c to i32 1575 // into: 1576 // %a = shl i32 %i, 32-(8-C) 1577 // %d = ashr i32 %a, 32-(8-C) 1578 Value *A = nullptr; 1579 // TODO: Eventually this could be subsumed by EvaluateInDifferentType. 1580 Constant *BA = nullptr, *CA = nullptr; 1581 if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_Constant(BA)), 1582 m_Constant(CA))) && 1583 BA->isElementWiseEqual(CA) && A->getType() == DestTy) { 1584 Constant *WideCurrShAmt = ConstantExpr::getSExt(CA, DestTy); 1585 Constant *NumLowbitsLeft = ConstantExpr::getSub( 1586 ConstantInt::get(DestTy, SrcTy->getScalarSizeInBits()), WideCurrShAmt); 1587 Constant *NewShAmt = ConstantExpr::getSub( 1588 ConstantInt::get(DestTy, DestTy->getScalarSizeInBits()), 1589 NumLowbitsLeft); 1590 NewShAmt = 1591 Constant::mergeUndefsWith(Constant::mergeUndefsWith(NewShAmt, BA), CA); 1592 A = Builder.CreateShl(A, NewShAmt, Sext.getName()); 1593 return BinaryOperator::CreateAShr(A, NewShAmt); 1594 } 1595 1596 // Splatting a bit of constant-index across a value: 1597 // sext (ashr (trunc iN X to iM), M-1) to iN --> ashr (shl X, N-M), N-1 1598 // If the dest type is different, use a cast (adjust use check). 1599 if (match(Src, m_OneUse(m_AShr(m_Trunc(m_Value(X)), 1600 m_SpecificInt(SrcBitSize - 1))))) { 1601 Type *XTy = X->getType(); 1602 unsigned XBitSize = XTy->getScalarSizeInBits(); 1603 Constant *ShlAmtC = ConstantInt::get(XTy, XBitSize - SrcBitSize); 1604 Constant *AshrAmtC = ConstantInt::get(XTy, XBitSize - 1); 1605 if (XTy == DestTy) 1606 return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShlAmtC), 1607 AshrAmtC); 1608 if (cast<BinaryOperator>(Src)->getOperand(0)->hasOneUse()) { 1609 Value *Ashr = Builder.CreateAShr(Builder.CreateShl(X, ShlAmtC), AshrAmtC); 1610 return CastInst::CreateIntegerCast(Ashr, DestTy, /* isSigned */ true); 1611 } 1612 } 1613 1614 if (match(Src, m_VScale(DL))) { 1615 if (Sext.getFunction() && 1616 Sext.getFunction()->hasFnAttribute(Attribute::VScaleRange)) { 1617 Attribute Attr = 1618 Sext.getFunction()->getFnAttribute(Attribute::VScaleRange); 1619 if (std::optional<unsigned> MaxVScale = Attr.getVScaleRangeMax()) { 1620 if (Log2_32(*MaxVScale) < (SrcBitSize - 1)) { 1621 Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1)); 1622 return replaceInstUsesWith(Sext, VScale); 1623 } 1624 } 1625 } 1626 } 1627 1628 return nullptr; 1629 } 1630 1631 /// Return a Constant* for the specified floating-point constant if it fits 1632 /// in the specified FP type without changing its value. 1633 static bool fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) { 1634 bool losesInfo; 1635 APFloat F = CFP->getValueAPF(); 1636 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo); 1637 return !losesInfo; 1638 } 1639 1640 static Type *shrinkFPConstant(ConstantFP *CFP) { 1641 if (CFP->getType() == Type::getPPC_FP128Ty(CFP->getContext())) 1642 return nullptr; // No constant folding of this. 1643 // See if the value can be truncated to half and then reextended. 1644 if (fitsInFPType(CFP, APFloat::IEEEhalf())) 1645 return Type::getHalfTy(CFP->getContext()); 1646 // See if the value can be truncated to float and then reextended. 1647 if (fitsInFPType(CFP, APFloat::IEEEsingle())) 1648 return Type::getFloatTy(CFP->getContext()); 1649 if (CFP->getType()->isDoubleTy()) 1650 return nullptr; // Won't shrink. 1651 if (fitsInFPType(CFP, APFloat::IEEEdouble())) 1652 return Type::getDoubleTy(CFP->getContext()); 1653 // Don't try to shrink to various long double types. 1654 return nullptr; 1655 } 1656 1657 // Determine if this is a vector of ConstantFPs and if so, return the minimal 1658 // type we can safely truncate all elements to. 1659 static Type *shrinkFPConstantVector(Value *V) { 1660 auto *CV = dyn_cast<Constant>(V); 1661 auto *CVVTy = dyn_cast<FixedVectorType>(V->getType()); 1662 if (!CV || !CVVTy) 1663 return nullptr; 1664 1665 Type *MinType = nullptr; 1666 1667 unsigned NumElts = CVVTy->getNumElements(); 1668 1669 // For fixed-width vectors we find the minimal type by looking 1670 // through the constant values of the vector. 1671 for (unsigned i = 0; i != NumElts; ++i) { 1672 if (isa<UndefValue>(CV->getAggregateElement(i))) 1673 continue; 1674 1675 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); 1676 if (!CFP) 1677 return nullptr; 1678 1679 Type *T = shrinkFPConstant(CFP); 1680 if (!T) 1681 return nullptr; 1682 1683 // If we haven't found a type yet or this type has a larger mantissa than 1684 // our previous type, this is our new minimal type. 1685 if (!MinType || T->getFPMantissaWidth() > MinType->getFPMantissaWidth()) 1686 MinType = T; 1687 } 1688 1689 // Make a vector type from the minimal type. 1690 return MinType ? FixedVectorType::get(MinType, NumElts) : nullptr; 1691 } 1692 1693 /// Find the minimum FP type we can safely truncate to. 1694 static Type *getMinimumFPType(Value *V) { 1695 if (auto *FPExt = dyn_cast<FPExtInst>(V)) 1696 return FPExt->getOperand(0)->getType(); 1697 1698 // If this value is a constant, return the constant in the smallest FP type 1699 // that can accurately represent it. This allows us to turn 1700 // (float)((double)X+2.0) into x+2.0f. 1701 if (auto *CFP = dyn_cast<ConstantFP>(V)) 1702 if (Type *T = shrinkFPConstant(CFP)) 1703 return T; 1704 1705 // We can only correctly find a minimum type for a scalable vector when it is 1706 // a splat. For splats of constant values the fpext is wrapped up as a 1707 // ConstantExpr. 1708 if (auto *FPCExt = dyn_cast<ConstantExpr>(V)) 1709 if (FPCExt->getOpcode() == Instruction::FPExt) 1710 return FPCExt->getOperand(0)->getType(); 1711 1712 // Try to shrink a vector of FP constants. This returns nullptr on scalable 1713 // vectors 1714 if (Type *T = shrinkFPConstantVector(V)) 1715 return T; 1716 1717 return V->getType(); 1718 } 1719 1720 /// Return true if the cast from integer to FP can be proven to be exact for all 1721 /// possible inputs (the conversion does not lose any precision). 1722 static bool isKnownExactCastIntToFP(CastInst &I, InstCombinerImpl &IC) { 1723 CastInst::CastOps Opcode = I.getOpcode(); 1724 assert((Opcode == CastInst::SIToFP || Opcode == CastInst::UIToFP) && 1725 "Unexpected cast"); 1726 Value *Src = I.getOperand(0); 1727 Type *SrcTy = Src->getType(); 1728 Type *FPTy = I.getType(); 1729 bool IsSigned = Opcode == Instruction::SIToFP; 1730 int SrcSize = (int)SrcTy->getScalarSizeInBits() - IsSigned; 1731 1732 // Easy case - if the source integer type has less bits than the FP mantissa, 1733 // then the cast must be exact. 1734 int DestNumSigBits = FPTy->getFPMantissaWidth(); 1735 if (SrcSize <= DestNumSigBits) 1736 return true; 1737 1738 // Cast from FP to integer and back to FP is independent of the intermediate 1739 // integer width because of poison on overflow. 1740 Value *F; 1741 if (match(Src, m_FPToSI(m_Value(F))) || match(Src, m_FPToUI(m_Value(F)))) { 1742 // If this is uitofp (fptosi F), the source needs an extra bit to avoid 1743 // potential rounding of negative FP input values. 1744 int SrcNumSigBits = F->getType()->getFPMantissaWidth(); 1745 if (!IsSigned && match(Src, m_FPToSI(m_Value()))) 1746 SrcNumSigBits++; 1747 1748 // [su]itofp (fpto[su]i F) --> exact if the source type has less or equal 1749 // significant bits than the destination (and make sure neither type is 1750 // weird -- ppc_fp128). 1751 if (SrcNumSigBits > 0 && DestNumSigBits > 0 && 1752 SrcNumSigBits <= DestNumSigBits) 1753 return true; 1754 } 1755 1756 // TODO: 1757 // Try harder to find if the source integer type has less significant bits. 1758 // For example, compute number of sign bits. 1759 KnownBits SrcKnown = IC.computeKnownBits(Src, 0, &I); 1760 int SigBits = (int)SrcTy->getScalarSizeInBits() - 1761 SrcKnown.countMinLeadingZeros() - 1762 SrcKnown.countMinTrailingZeros(); 1763 if (SigBits <= DestNumSigBits) 1764 return true; 1765 1766 return false; 1767 } 1768 1769 Instruction *InstCombinerImpl::visitFPTrunc(FPTruncInst &FPT) { 1770 if (Instruction *I = commonCastTransforms(FPT)) 1771 return I; 1772 1773 // If we have fptrunc(OpI (fpextend x), (fpextend y)), we would like to 1774 // simplify this expression to avoid one or more of the trunc/extend 1775 // operations if we can do so without changing the numerical results. 1776 // 1777 // The exact manner in which the widths of the operands interact to limit 1778 // what we can and cannot do safely varies from operation to operation, and 1779 // is explained below in the various case statements. 1780 Type *Ty = FPT.getType(); 1781 auto *BO = dyn_cast<BinaryOperator>(FPT.getOperand(0)); 1782 if (BO && BO->hasOneUse()) { 1783 Type *LHSMinType = getMinimumFPType(BO->getOperand(0)); 1784 Type *RHSMinType = getMinimumFPType(BO->getOperand(1)); 1785 unsigned OpWidth = BO->getType()->getFPMantissaWidth(); 1786 unsigned LHSWidth = LHSMinType->getFPMantissaWidth(); 1787 unsigned RHSWidth = RHSMinType->getFPMantissaWidth(); 1788 unsigned SrcWidth = std::max(LHSWidth, RHSWidth); 1789 unsigned DstWidth = Ty->getFPMantissaWidth(); 1790 switch (BO->getOpcode()) { 1791 default: break; 1792 case Instruction::FAdd: 1793 case Instruction::FSub: 1794 // For addition and subtraction, the infinitely precise result can 1795 // essentially be arbitrarily wide; proving that double rounding 1796 // will not occur because the result of OpI is exact (as we will for 1797 // FMul, for example) is hopeless. However, we *can* nonetheless 1798 // frequently know that double rounding cannot occur (or that it is 1799 // innocuous) by taking advantage of the specific structure of 1800 // infinitely-precise results that admit double rounding. 1801 // 1802 // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient 1803 // to represent both sources, we can guarantee that the double 1804 // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis, 1805 // "A Rigorous Framework for Fully Supporting the IEEE Standard ..." 1806 // for proof of this fact). 1807 // 1808 // Note: Figueroa does not consider the case where DstFormat != 1809 // SrcFormat. It's possible (likely even!) that this analysis 1810 // could be tightened for those cases, but they are rare (the main 1811 // case of interest here is (float)((double)float + float)). 1812 if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) { 1813 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1814 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1815 Instruction *RI = BinaryOperator::Create(BO->getOpcode(), LHS, RHS); 1816 RI->copyFastMathFlags(BO); 1817 return RI; 1818 } 1819 break; 1820 case Instruction::FMul: 1821 // For multiplication, the infinitely precise result has at most 1822 // LHSWidth + RHSWidth significant bits; if OpWidth is sufficient 1823 // that such a value can be exactly represented, then no double 1824 // rounding can possibly occur; we can safely perform the operation 1825 // in the destination format if it can represent both sources. 1826 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) { 1827 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1828 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1829 return BinaryOperator::CreateFMulFMF(LHS, RHS, BO); 1830 } 1831 break; 1832 case Instruction::FDiv: 1833 // For division, we use again use the bound from Figueroa's 1834 // dissertation. I am entirely certain that this bound can be 1835 // tightened in the unbalanced operand case by an analysis based on 1836 // the diophantine rational approximation bound, but the well-known 1837 // condition used here is a good conservative first pass. 1838 // TODO: Tighten bound via rigorous analysis of the unbalanced case. 1839 if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) { 1840 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1841 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1842 return BinaryOperator::CreateFDivFMF(LHS, RHS, BO); 1843 } 1844 break; 1845 case Instruction::FRem: { 1846 // Remainder is straightforward. Remainder is always exact, so the 1847 // type of OpI doesn't enter into things at all. We simply evaluate 1848 // in whichever source type is larger, then convert to the 1849 // destination type. 1850 if (SrcWidth == OpWidth) 1851 break; 1852 Value *LHS, *RHS; 1853 if (LHSWidth == SrcWidth) { 1854 LHS = Builder.CreateFPTrunc(BO->getOperand(0), LHSMinType); 1855 RHS = Builder.CreateFPTrunc(BO->getOperand(1), LHSMinType); 1856 } else { 1857 LHS = Builder.CreateFPTrunc(BO->getOperand(0), RHSMinType); 1858 RHS = Builder.CreateFPTrunc(BO->getOperand(1), RHSMinType); 1859 } 1860 1861 Value *ExactResult = Builder.CreateFRemFMF(LHS, RHS, BO); 1862 return CastInst::CreateFPCast(ExactResult, Ty); 1863 } 1864 } 1865 } 1866 1867 // (fptrunc (fneg x)) -> (fneg (fptrunc x)) 1868 Value *X; 1869 Instruction *Op = dyn_cast<Instruction>(FPT.getOperand(0)); 1870 if (Op && Op->hasOneUse()) { 1871 // FIXME: The FMF should propagate from the fptrunc, not the source op. 1872 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 1873 if (isa<FPMathOperator>(Op)) 1874 Builder.setFastMathFlags(Op->getFastMathFlags()); 1875 1876 if (match(Op, m_FNeg(m_Value(X)))) { 1877 Value *InnerTrunc = Builder.CreateFPTrunc(X, Ty); 1878 1879 return UnaryOperator::CreateFNegFMF(InnerTrunc, Op); 1880 } 1881 1882 // If we are truncating a select that has an extended operand, we can 1883 // narrow the other operand and do the select as a narrow op. 1884 Value *Cond, *X, *Y; 1885 if (match(Op, m_Select(m_Value(Cond), m_FPExt(m_Value(X)), m_Value(Y))) && 1886 X->getType() == Ty) { 1887 // fptrunc (select Cond, (fpext X), Y --> select Cond, X, (fptrunc Y) 1888 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty); 1889 Value *Sel = Builder.CreateSelect(Cond, X, NarrowY, "narrow.sel", Op); 1890 return replaceInstUsesWith(FPT, Sel); 1891 } 1892 if (match(Op, m_Select(m_Value(Cond), m_Value(Y), m_FPExt(m_Value(X)))) && 1893 X->getType() == Ty) { 1894 // fptrunc (select Cond, Y, (fpext X) --> select Cond, (fptrunc Y), X 1895 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty); 1896 Value *Sel = Builder.CreateSelect(Cond, NarrowY, X, "narrow.sel", Op); 1897 return replaceInstUsesWith(FPT, Sel); 1898 } 1899 } 1900 1901 if (auto *II = dyn_cast<IntrinsicInst>(FPT.getOperand(0))) { 1902 switch (II->getIntrinsicID()) { 1903 default: break; 1904 case Intrinsic::ceil: 1905 case Intrinsic::fabs: 1906 case Intrinsic::floor: 1907 case Intrinsic::nearbyint: 1908 case Intrinsic::rint: 1909 case Intrinsic::round: 1910 case Intrinsic::roundeven: 1911 case Intrinsic::trunc: { 1912 Value *Src = II->getArgOperand(0); 1913 if (!Src->hasOneUse()) 1914 break; 1915 1916 // Except for fabs, this transformation requires the input of the unary FP 1917 // operation to be itself an fpext from the type to which we're 1918 // truncating. 1919 if (II->getIntrinsicID() != Intrinsic::fabs) { 1920 FPExtInst *FPExtSrc = dyn_cast<FPExtInst>(Src); 1921 if (!FPExtSrc || FPExtSrc->getSrcTy() != Ty) 1922 break; 1923 } 1924 1925 // Do unary FP operation on smaller type. 1926 // (fptrunc (fabs x)) -> (fabs (fptrunc x)) 1927 Value *InnerTrunc = Builder.CreateFPTrunc(Src, Ty); 1928 Function *Overload = Intrinsic::getDeclaration(FPT.getModule(), 1929 II->getIntrinsicID(), Ty); 1930 SmallVector<OperandBundleDef, 1> OpBundles; 1931 II->getOperandBundlesAsDefs(OpBundles); 1932 CallInst *NewCI = 1933 CallInst::Create(Overload, {InnerTrunc}, OpBundles, II->getName()); 1934 NewCI->copyFastMathFlags(II); 1935 return NewCI; 1936 } 1937 } 1938 } 1939 1940 if (Instruction *I = shrinkInsertElt(FPT, Builder)) 1941 return I; 1942 1943 Value *Src = FPT.getOperand(0); 1944 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) { 1945 auto *FPCast = cast<CastInst>(Src); 1946 if (isKnownExactCastIntToFP(*FPCast, *this)) 1947 return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty); 1948 } 1949 1950 return nullptr; 1951 } 1952 1953 Instruction *InstCombinerImpl::visitFPExt(CastInst &FPExt) { 1954 // If the source operand is a cast from integer to FP and known exact, then 1955 // cast the integer operand directly to the destination type. 1956 Type *Ty = FPExt.getType(); 1957 Value *Src = FPExt.getOperand(0); 1958 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) { 1959 auto *FPCast = cast<CastInst>(Src); 1960 if (isKnownExactCastIntToFP(*FPCast, *this)) 1961 return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty); 1962 } 1963 1964 return commonCastTransforms(FPExt); 1965 } 1966 1967 /// fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X) 1968 /// This is safe if the intermediate type has enough bits in its mantissa to 1969 /// accurately represent all values of X. For example, this won't work with 1970 /// i64 -> float -> i64. 1971 Instruction *InstCombinerImpl::foldItoFPtoI(CastInst &FI) { 1972 if (!isa<UIToFPInst>(FI.getOperand(0)) && !isa<SIToFPInst>(FI.getOperand(0))) 1973 return nullptr; 1974 1975 auto *OpI = cast<CastInst>(FI.getOperand(0)); 1976 Value *X = OpI->getOperand(0); 1977 Type *XType = X->getType(); 1978 Type *DestType = FI.getType(); 1979 bool IsOutputSigned = isa<FPToSIInst>(FI); 1980 1981 // Since we can assume the conversion won't overflow, our decision as to 1982 // whether the input will fit in the float should depend on the minimum 1983 // of the input range and output range. 1984 1985 // This means this is also safe for a signed input and unsigned output, since 1986 // a negative input would lead to undefined behavior. 1987 if (!isKnownExactCastIntToFP(*OpI, *this)) { 1988 // The first cast may not round exactly based on the source integer width 1989 // and FP width, but the overflow UB rules can still allow this to fold. 1990 // If the destination type is narrow, that means the intermediate FP value 1991 // must be large enough to hold the source value exactly. 1992 // For example, (uint8_t)((float)(uint32_t 16777217) is undefined behavior. 1993 int OutputSize = (int)DestType->getScalarSizeInBits(); 1994 if (OutputSize > OpI->getType()->getFPMantissaWidth()) 1995 return nullptr; 1996 } 1997 1998 if (DestType->getScalarSizeInBits() > XType->getScalarSizeInBits()) { 1999 bool IsInputSigned = isa<SIToFPInst>(OpI); 2000 if (IsInputSigned && IsOutputSigned) 2001 return new SExtInst(X, DestType); 2002 return new ZExtInst(X, DestType); 2003 } 2004 if (DestType->getScalarSizeInBits() < XType->getScalarSizeInBits()) 2005 return new TruncInst(X, DestType); 2006 2007 assert(XType == DestType && "Unexpected types for int to FP to int casts"); 2008 return replaceInstUsesWith(FI, X); 2009 } 2010 2011 Instruction *InstCombinerImpl::visitFPToUI(FPToUIInst &FI) { 2012 if (Instruction *I = foldItoFPtoI(FI)) 2013 return I; 2014 2015 return commonCastTransforms(FI); 2016 } 2017 2018 Instruction *InstCombinerImpl::visitFPToSI(FPToSIInst &FI) { 2019 if (Instruction *I = foldItoFPtoI(FI)) 2020 return I; 2021 2022 return commonCastTransforms(FI); 2023 } 2024 2025 Instruction *InstCombinerImpl::visitUIToFP(CastInst &CI) { 2026 return commonCastTransforms(CI); 2027 } 2028 2029 Instruction *InstCombinerImpl::visitSIToFP(CastInst &CI) { 2030 return commonCastTransforms(CI); 2031 } 2032 2033 Instruction *InstCombinerImpl::visitIntToPtr(IntToPtrInst &CI) { 2034 // If the source integer type is not the intptr_t type for this target, do a 2035 // trunc or zext to the intptr_t type, then inttoptr of it. This allows the 2036 // cast to be exposed to other transforms. 2037 unsigned AS = CI.getAddressSpace(); 2038 if (CI.getOperand(0)->getType()->getScalarSizeInBits() != 2039 DL.getPointerSizeInBits(AS)) { 2040 Type *Ty = CI.getOperand(0)->getType()->getWithNewType( 2041 DL.getIntPtrType(CI.getContext(), AS)); 2042 Value *P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty); 2043 return new IntToPtrInst(P, CI.getType()); 2044 } 2045 2046 if (Instruction *I = commonCastTransforms(CI)) 2047 return I; 2048 2049 return nullptr; 2050 } 2051 2052 /// Implement the transforms for cast of pointer (bitcast/ptrtoint) 2053 Instruction *InstCombinerImpl::commonPointerCastTransforms(CastInst &CI) { 2054 Value *Src = CI.getOperand(0); 2055 2056 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) { 2057 // If casting the result of a getelementptr instruction with no offset, turn 2058 // this into a cast of the original pointer! 2059 if (GEP->hasAllZeroIndices() && 2060 // If CI is an addrspacecast and GEP changes the poiner type, merging 2061 // GEP into CI would undo canonicalizing addrspacecast with different 2062 // pointer types, causing infinite loops. 2063 (!isa<AddrSpaceCastInst>(CI) || 2064 GEP->getType() == GEP->getPointerOperandType())) { 2065 // Changing the cast operand is usually not a good idea but it is safe 2066 // here because the pointer operand is being replaced with another 2067 // pointer operand so the opcode doesn't need to change. 2068 return replaceOperand(CI, 0, GEP->getOperand(0)); 2069 } 2070 } 2071 2072 return commonCastTransforms(CI); 2073 } 2074 2075 Instruction *InstCombinerImpl::visitPtrToInt(PtrToIntInst &CI) { 2076 // If the destination integer type is not the intptr_t type for this target, 2077 // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast 2078 // to be exposed to other transforms. 2079 Value *SrcOp = CI.getPointerOperand(); 2080 Type *SrcTy = SrcOp->getType(); 2081 Type *Ty = CI.getType(); 2082 unsigned AS = CI.getPointerAddressSpace(); 2083 unsigned TySize = Ty->getScalarSizeInBits(); 2084 unsigned PtrSize = DL.getPointerSizeInBits(AS); 2085 if (TySize != PtrSize) { 2086 Type *IntPtrTy = 2087 SrcTy->getWithNewType(DL.getIntPtrType(CI.getContext(), AS)); 2088 Value *P = Builder.CreatePtrToInt(SrcOp, IntPtrTy); 2089 return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false); 2090 } 2091 2092 if (auto *GEP = dyn_cast<GetElementPtrInst>(SrcOp)) { 2093 // Fold ptrtoint(gep null, x) to multiply + constant if the GEP has one use. 2094 // While this can increase the number of instructions it doesn't actually 2095 // increase the overall complexity since the arithmetic is just part of 2096 // the GEP otherwise. 2097 if (GEP->hasOneUse() && 2098 isa<ConstantPointerNull>(GEP->getPointerOperand())) { 2099 return replaceInstUsesWith(CI, 2100 Builder.CreateIntCast(EmitGEPOffset(GEP), Ty, 2101 /*isSigned=*/false)); 2102 } 2103 } 2104 2105 Value *Vec, *Scalar, *Index; 2106 if (match(SrcOp, m_OneUse(m_InsertElt(m_IntToPtr(m_Value(Vec)), 2107 m_Value(Scalar), m_Value(Index)))) && 2108 Vec->getType() == Ty) { 2109 assert(Vec->getType()->getScalarSizeInBits() == PtrSize && "Wrong type"); 2110 // Convert the scalar to int followed by insert to eliminate one cast: 2111 // p2i (ins (i2p Vec), Scalar, Index --> ins Vec, (p2i Scalar), Index 2112 Value *NewCast = Builder.CreatePtrToInt(Scalar, Ty->getScalarType()); 2113 return InsertElementInst::Create(Vec, NewCast, Index); 2114 } 2115 2116 return commonPointerCastTransforms(CI); 2117 } 2118 2119 /// This input value (which is known to have vector type) is being zero extended 2120 /// or truncated to the specified vector type. Since the zext/trunc is done 2121 /// using an integer type, we have a (bitcast(cast(bitcast))) pattern, 2122 /// endianness will impact which end of the vector that is extended or 2123 /// truncated. 2124 /// 2125 /// A vector is always stored with index 0 at the lowest address, which 2126 /// corresponds to the most significant bits for a big endian stored integer and 2127 /// the least significant bits for little endian. A trunc/zext of an integer 2128 /// impacts the big end of the integer. Thus, we need to add/remove elements at 2129 /// the front of the vector for big endian targets, and the back of the vector 2130 /// for little endian targets. 2131 /// 2132 /// Try to replace it with a shuffle (and vector/vector bitcast) if possible. 2133 /// 2134 /// The source and destination vector types may have different element types. 2135 static Instruction * 2136 optimizeVectorResizeWithIntegerBitCasts(Value *InVal, VectorType *DestTy, 2137 InstCombinerImpl &IC) { 2138 // We can only do this optimization if the output is a multiple of the input 2139 // element size, or the input is a multiple of the output element size. 2140 // Convert the input type to have the same element type as the output. 2141 VectorType *SrcTy = cast<VectorType>(InVal->getType()); 2142 2143 if (SrcTy->getElementType() != DestTy->getElementType()) { 2144 // The input types don't need to be identical, but for now they must be the 2145 // same size. There is no specific reason we couldn't handle things like 2146 // <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten 2147 // there yet. 2148 if (SrcTy->getElementType()->getPrimitiveSizeInBits() != 2149 DestTy->getElementType()->getPrimitiveSizeInBits()) 2150 return nullptr; 2151 2152 SrcTy = 2153 FixedVectorType::get(DestTy->getElementType(), 2154 cast<FixedVectorType>(SrcTy)->getNumElements()); 2155 InVal = IC.Builder.CreateBitCast(InVal, SrcTy); 2156 } 2157 2158 bool IsBigEndian = IC.getDataLayout().isBigEndian(); 2159 unsigned SrcElts = cast<FixedVectorType>(SrcTy)->getNumElements(); 2160 unsigned DestElts = cast<FixedVectorType>(DestTy)->getNumElements(); 2161 2162 assert(SrcElts != DestElts && "Element counts should be different."); 2163 2164 // Now that the element types match, get the shuffle mask and RHS of the 2165 // shuffle to use, which depends on whether we're increasing or decreasing the 2166 // size of the input. 2167 auto ShuffleMaskStorage = llvm::to_vector<16>(llvm::seq<int>(0, SrcElts)); 2168 ArrayRef<int> ShuffleMask; 2169 Value *V2; 2170 2171 if (SrcElts > DestElts) { 2172 // If we're shrinking the number of elements (rewriting an integer 2173 // truncate), just shuffle in the elements corresponding to the least 2174 // significant bits from the input and use poison as the second shuffle 2175 // input. 2176 V2 = PoisonValue::get(SrcTy); 2177 // Make sure the shuffle mask selects the "least significant bits" by 2178 // keeping elements from back of the src vector for big endian, and from the 2179 // front for little endian. 2180 ShuffleMask = ShuffleMaskStorage; 2181 if (IsBigEndian) 2182 ShuffleMask = ShuffleMask.take_back(DestElts); 2183 else 2184 ShuffleMask = ShuffleMask.take_front(DestElts); 2185 } else { 2186 // If we're increasing the number of elements (rewriting an integer zext), 2187 // shuffle in all of the elements from InVal. Fill the rest of the result 2188 // elements with zeros from a constant zero. 2189 V2 = Constant::getNullValue(SrcTy); 2190 // Use first elt from V2 when indicating zero in the shuffle mask. 2191 uint32_t NullElt = SrcElts; 2192 // Extend with null values in the "most significant bits" by adding elements 2193 // in front of the src vector for big endian, and at the back for little 2194 // endian. 2195 unsigned DeltaElts = DestElts - SrcElts; 2196 if (IsBigEndian) 2197 ShuffleMaskStorage.insert(ShuffleMaskStorage.begin(), DeltaElts, NullElt); 2198 else 2199 ShuffleMaskStorage.append(DeltaElts, NullElt); 2200 ShuffleMask = ShuffleMaskStorage; 2201 } 2202 2203 return new ShuffleVectorInst(InVal, V2, ShuffleMask); 2204 } 2205 2206 static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) { 2207 return Value % Ty->getPrimitiveSizeInBits() == 0; 2208 } 2209 2210 static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) { 2211 return Value / Ty->getPrimitiveSizeInBits(); 2212 } 2213 2214 /// V is a value which is inserted into a vector of VecEltTy. 2215 /// Look through the value to see if we can decompose it into 2216 /// insertions into the vector. See the example in the comment for 2217 /// OptimizeIntegerToVectorInsertions for the pattern this handles. 2218 /// The type of V is always a non-zero multiple of VecEltTy's size. 2219 /// Shift is the number of bits between the lsb of V and the lsb of 2220 /// the vector. 2221 /// 2222 /// This returns false if the pattern can't be matched or true if it can, 2223 /// filling in Elements with the elements found here. 2224 static bool collectInsertionElements(Value *V, unsigned Shift, 2225 SmallVectorImpl<Value *> &Elements, 2226 Type *VecEltTy, bool isBigEndian) { 2227 assert(isMultipleOfTypeSize(Shift, VecEltTy) && 2228 "Shift should be a multiple of the element type size"); 2229 2230 // Undef values never contribute useful bits to the result. 2231 if (isa<UndefValue>(V)) return true; 2232 2233 // If we got down to a value of the right type, we win, try inserting into the 2234 // right element. 2235 if (V->getType() == VecEltTy) { 2236 // Inserting null doesn't actually insert any elements. 2237 if (Constant *C = dyn_cast<Constant>(V)) 2238 if (C->isNullValue()) 2239 return true; 2240 2241 unsigned ElementIndex = getTypeSizeIndex(Shift, VecEltTy); 2242 if (isBigEndian) 2243 ElementIndex = Elements.size() - ElementIndex - 1; 2244 2245 // Fail if multiple elements are inserted into this slot. 2246 if (Elements[ElementIndex]) 2247 return false; 2248 2249 Elements[ElementIndex] = V; 2250 return true; 2251 } 2252 2253 if (Constant *C = dyn_cast<Constant>(V)) { 2254 // Figure out the # elements this provides, and bitcast it or slice it up 2255 // as required. 2256 unsigned NumElts = getTypeSizeIndex(C->getType()->getPrimitiveSizeInBits(), 2257 VecEltTy); 2258 // If the constant is the size of a vector element, we just need to bitcast 2259 // it to the right type so it gets properly inserted. 2260 if (NumElts == 1) 2261 return collectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy), 2262 Shift, Elements, VecEltTy, isBigEndian); 2263 2264 // Okay, this is a constant that covers multiple elements. Slice it up into 2265 // pieces and insert each element-sized piece into the vector. 2266 if (!isa<IntegerType>(C->getType())) 2267 C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(), 2268 C->getType()->getPrimitiveSizeInBits())); 2269 unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits(); 2270 Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize); 2271 2272 for (unsigned i = 0; i != NumElts; ++i) { 2273 unsigned ShiftI = Shift+i*ElementSize; 2274 Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(), 2275 ShiftI)); 2276 Piece = ConstantExpr::getTrunc(Piece, ElementIntTy); 2277 if (!collectInsertionElements(Piece, ShiftI, Elements, VecEltTy, 2278 isBigEndian)) 2279 return false; 2280 } 2281 return true; 2282 } 2283 2284 if (!V->hasOneUse()) return false; 2285 2286 Instruction *I = dyn_cast<Instruction>(V); 2287 if (!I) return false; 2288 switch (I->getOpcode()) { 2289 default: return false; // Unhandled case. 2290 case Instruction::BitCast: 2291 if (I->getOperand(0)->getType()->isVectorTy()) 2292 return false; 2293 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2294 isBigEndian); 2295 case Instruction::ZExt: 2296 if (!isMultipleOfTypeSize( 2297 I->getOperand(0)->getType()->getPrimitiveSizeInBits(), 2298 VecEltTy)) 2299 return false; 2300 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2301 isBigEndian); 2302 case Instruction::Or: 2303 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2304 isBigEndian) && 2305 collectInsertionElements(I->getOperand(1), Shift, Elements, VecEltTy, 2306 isBigEndian); 2307 case Instruction::Shl: { 2308 // Must be shifting by a constant that is a multiple of the element size. 2309 ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1)); 2310 if (!CI) return false; 2311 Shift += CI->getZExtValue(); 2312 if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false; 2313 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2314 isBigEndian); 2315 } 2316 2317 } 2318 } 2319 2320 2321 /// If the input is an 'or' instruction, we may be doing shifts and ors to 2322 /// assemble the elements of the vector manually. 2323 /// Try to rip the code out and replace it with insertelements. This is to 2324 /// optimize code like this: 2325 /// 2326 /// %tmp37 = bitcast float %inc to i32 2327 /// %tmp38 = zext i32 %tmp37 to i64 2328 /// %tmp31 = bitcast float %inc5 to i32 2329 /// %tmp32 = zext i32 %tmp31 to i64 2330 /// %tmp33 = shl i64 %tmp32, 32 2331 /// %ins35 = or i64 %tmp33, %tmp38 2332 /// %tmp43 = bitcast i64 %ins35 to <2 x float> 2333 /// 2334 /// Into two insertelements that do "buildvector{%inc, %inc5}". 2335 static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI, 2336 InstCombinerImpl &IC) { 2337 auto *DestVecTy = cast<FixedVectorType>(CI.getType()); 2338 Value *IntInput = CI.getOperand(0); 2339 2340 SmallVector<Value*, 8> Elements(DestVecTy->getNumElements()); 2341 if (!collectInsertionElements(IntInput, 0, Elements, 2342 DestVecTy->getElementType(), 2343 IC.getDataLayout().isBigEndian())) 2344 return nullptr; 2345 2346 // If we succeeded, we know that all of the element are specified by Elements 2347 // or are zero if Elements has a null entry. Recast this as a set of 2348 // insertions. 2349 Value *Result = Constant::getNullValue(CI.getType()); 2350 for (unsigned i = 0, e = Elements.size(); i != e; ++i) { 2351 if (!Elements[i]) continue; // Unset element. 2352 2353 Result = IC.Builder.CreateInsertElement(Result, Elements[i], 2354 IC.Builder.getInt32(i)); 2355 } 2356 2357 return Result; 2358 } 2359 2360 /// Canonicalize scalar bitcasts of extracted elements into a bitcast of the 2361 /// vector followed by extract element. The backend tends to handle bitcasts of 2362 /// vectors better than bitcasts of scalars because vector registers are 2363 /// usually not type-specific like scalar integer or scalar floating-point. 2364 static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast, 2365 InstCombinerImpl &IC) { 2366 Value *VecOp, *Index; 2367 if (!match(BitCast.getOperand(0), 2368 m_OneUse(m_ExtractElt(m_Value(VecOp), m_Value(Index))))) 2369 return nullptr; 2370 2371 // The bitcast must be to a vectorizable type, otherwise we can't make a new 2372 // type to extract from. 2373 Type *DestType = BitCast.getType(); 2374 VectorType *VecType = cast<VectorType>(VecOp->getType()); 2375 if (VectorType::isValidElementType(DestType)) { 2376 auto *NewVecType = VectorType::get(DestType, VecType); 2377 auto *NewBC = IC.Builder.CreateBitCast(VecOp, NewVecType, "bc"); 2378 return ExtractElementInst::Create(NewBC, Index); 2379 } 2380 2381 // Only solve DestType is vector to avoid inverse transform in visitBitCast. 2382 // bitcast (extractelement <1 x elt>, dest) -> bitcast(<1 x elt>, dest) 2383 auto *FixedVType = dyn_cast<FixedVectorType>(VecType); 2384 if (DestType->isVectorTy() && FixedVType && FixedVType->getNumElements() == 1) 2385 return CastInst::Create(Instruction::BitCast, VecOp, DestType); 2386 2387 return nullptr; 2388 } 2389 2390 /// Change the type of a bitwise logic operation if we can eliminate a bitcast. 2391 static Instruction *foldBitCastBitwiseLogic(BitCastInst &BitCast, 2392 InstCombiner::BuilderTy &Builder) { 2393 Type *DestTy = BitCast.getType(); 2394 BinaryOperator *BO; 2395 2396 if (!match(BitCast.getOperand(0), m_OneUse(m_BinOp(BO))) || 2397 !BO->isBitwiseLogicOp()) 2398 return nullptr; 2399 2400 // FIXME: This transform is restricted to vector types to avoid backend 2401 // problems caused by creating potentially illegal operations. If a fix-up is 2402 // added to handle that situation, we can remove this check. 2403 if (!DestTy->isVectorTy() || !BO->getType()->isVectorTy()) 2404 return nullptr; 2405 2406 if (DestTy->isFPOrFPVectorTy()) { 2407 Value *X, *Y; 2408 // bitcast(logic(bitcast(X), bitcast(Y))) -> bitcast'(logic(bitcast'(X), Y)) 2409 if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) && 2410 match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(Y))))) { 2411 if (X->getType()->isFPOrFPVectorTy() && 2412 Y->getType()->isIntOrIntVectorTy()) { 2413 Value *CastedOp = 2414 Builder.CreateBitCast(BO->getOperand(0), Y->getType()); 2415 Value *NewBO = Builder.CreateBinOp(BO->getOpcode(), CastedOp, Y); 2416 return CastInst::CreateBitOrPointerCast(NewBO, DestTy); 2417 } 2418 if (X->getType()->isIntOrIntVectorTy() && 2419 Y->getType()->isFPOrFPVectorTy()) { 2420 Value *CastedOp = 2421 Builder.CreateBitCast(BO->getOperand(1), X->getType()); 2422 Value *NewBO = Builder.CreateBinOp(BO->getOpcode(), CastedOp, X); 2423 return CastInst::CreateBitOrPointerCast(NewBO, DestTy); 2424 } 2425 } 2426 return nullptr; 2427 } 2428 2429 if (!DestTy->isIntOrIntVectorTy()) 2430 return nullptr; 2431 2432 Value *X; 2433 if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) && 2434 X->getType() == DestTy && !isa<Constant>(X)) { 2435 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y)) 2436 Value *CastedOp1 = Builder.CreateBitCast(BO->getOperand(1), DestTy); 2437 return BinaryOperator::Create(BO->getOpcode(), X, CastedOp1); 2438 } 2439 2440 if (match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(X)))) && 2441 X->getType() == DestTy && !isa<Constant>(X)) { 2442 // bitcast(logic(Y, bitcast(X))) --> logic'(bitcast(Y), X) 2443 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); 2444 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, X); 2445 } 2446 2447 // Canonicalize vector bitcasts to come before vector bitwise logic with a 2448 // constant. This eases recognition of special constants for later ops. 2449 // Example: 2450 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b 2451 Constant *C; 2452 if (match(BO->getOperand(1), m_Constant(C))) { 2453 // bitcast (logic X, C) --> logic (bitcast X, C') 2454 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); 2455 Value *CastedC = Builder.CreateBitCast(C, DestTy); 2456 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, CastedC); 2457 } 2458 2459 return nullptr; 2460 } 2461 2462 /// Change the type of a select if we can eliminate a bitcast. 2463 static Instruction *foldBitCastSelect(BitCastInst &BitCast, 2464 InstCombiner::BuilderTy &Builder) { 2465 Value *Cond, *TVal, *FVal; 2466 if (!match(BitCast.getOperand(0), 2467 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) 2468 return nullptr; 2469 2470 // A vector select must maintain the same number of elements in its operands. 2471 Type *CondTy = Cond->getType(); 2472 Type *DestTy = BitCast.getType(); 2473 if (auto *CondVTy = dyn_cast<VectorType>(CondTy)) 2474 if (!DestTy->isVectorTy() || 2475 CondVTy->getElementCount() != 2476 cast<VectorType>(DestTy)->getElementCount()) 2477 return nullptr; 2478 2479 // FIXME: This transform is restricted from changing the select between 2480 // scalars and vectors to avoid backend problems caused by creating 2481 // potentially illegal operations. If a fix-up is added to handle that 2482 // situation, we can remove this check. 2483 if (DestTy->isVectorTy() != TVal->getType()->isVectorTy()) 2484 return nullptr; 2485 2486 auto *Sel = cast<Instruction>(BitCast.getOperand(0)); 2487 Value *X; 2488 if (match(TVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 2489 !isa<Constant>(X)) { 2490 // bitcast(select(Cond, bitcast(X), Y)) --> select'(Cond, X, bitcast(Y)) 2491 Value *CastedVal = Builder.CreateBitCast(FVal, DestTy); 2492 return SelectInst::Create(Cond, X, CastedVal, "", nullptr, Sel); 2493 } 2494 2495 if (match(FVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 2496 !isa<Constant>(X)) { 2497 // bitcast(select(Cond, Y, bitcast(X))) --> select'(Cond, bitcast(Y), X) 2498 Value *CastedVal = Builder.CreateBitCast(TVal, DestTy); 2499 return SelectInst::Create(Cond, CastedVal, X, "", nullptr, Sel); 2500 } 2501 2502 return nullptr; 2503 } 2504 2505 /// Check if all users of CI are StoreInsts. 2506 static bool hasStoreUsersOnly(CastInst &CI) { 2507 for (User *U : CI.users()) { 2508 if (!isa<StoreInst>(U)) 2509 return false; 2510 } 2511 return true; 2512 } 2513 2514 /// This function handles following case 2515 /// 2516 /// A -> B cast 2517 /// PHI 2518 /// B -> A cast 2519 /// 2520 /// All the related PHI nodes can be replaced by new PHI nodes with type A. 2521 /// The uses of \p CI can be changed to the new PHI node corresponding to \p PN. 2522 Instruction *InstCombinerImpl::optimizeBitCastFromPhi(CastInst &CI, 2523 PHINode *PN) { 2524 // BitCast used by Store can be handled in InstCombineLoadStoreAlloca.cpp. 2525 if (hasStoreUsersOnly(CI)) 2526 return nullptr; 2527 2528 Value *Src = CI.getOperand(0); 2529 Type *SrcTy = Src->getType(); // Type B 2530 Type *DestTy = CI.getType(); // Type A 2531 2532 SmallVector<PHINode *, 4> PhiWorklist; 2533 SmallSetVector<PHINode *, 4> OldPhiNodes; 2534 2535 // Find all of the A->B casts and PHI nodes. 2536 // We need to inspect all related PHI nodes, but PHIs can be cyclic, so 2537 // OldPhiNodes is used to track all known PHI nodes, before adding a new 2538 // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first. 2539 PhiWorklist.push_back(PN); 2540 OldPhiNodes.insert(PN); 2541 while (!PhiWorklist.empty()) { 2542 auto *OldPN = PhiWorklist.pop_back_val(); 2543 for (Value *IncValue : OldPN->incoming_values()) { 2544 if (isa<Constant>(IncValue)) 2545 continue; 2546 2547 if (auto *LI = dyn_cast<LoadInst>(IncValue)) { 2548 // If there is a sequence of one or more load instructions, each loaded 2549 // value is used as address of later load instruction, bitcast is 2550 // necessary to change the value type, don't optimize it. For 2551 // simplicity we give up if the load address comes from another load. 2552 Value *Addr = LI->getOperand(0); 2553 if (Addr == &CI || isa<LoadInst>(Addr)) 2554 return nullptr; 2555 // Don't tranform "load <256 x i32>, <256 x i32>*" to 2556 // "load x86_amx, x86_amx*", because x86_amx* is invalid. 2557 // TODO: Remove this check when bitcast between vector and x86_amx 2558 // is replaced with a specific intrinsic. 2559 if (DestTy->isX86_AMXTy()) 2560 return nullptr; 2561 if (LI->hasOneUse() && LI->isSimple()) 2562 continue; 2563 // If a LoadInst has more than one use, changing the type of loaded 2564 // value may create another bitcast. 2565 return nullptr; 2566 } 2567 2568 if (auto *PNode = dyn_cast<PHINode>(IncValue)) { 2569 if (OldPhiNodes.insert(PNode)) 2570 PhiWorklist.push_back(PNode); 2571 continue; 2572 } 2573 2574 auto *BCI = dyn_cast<BitCastInst>(IncValue); 2575 // We can't handle other instructions. 2576 if (!BCI) 2577 return nullptr; 2578 2579 // Verify it's a A->B cast. 2580 Type *TyA = BCI->getOperand(0)->getType(); 2581 Type *TyB = BCI->getType(); 2582 if (TyA != DestTy || TyB != SrcTy) 2583 return nullptr; 2584 } 2585 } 2586 2587 // Check that each user of each old PHI node is something that we can 2588 // rewrite, so that all of the old PHI nodes can be cleaned up afterwards. 2589 for (auto *OldPN : OldPhiNodes) { 2590 for (User *V : OldPN->users()) { 2591 if (auto *SI = dyn_cast<StoreInst>(V)) { 2592 if (!SI->isSimple() || SI->getOperand(0) != OldPN) 2593 return nullptr; 2594 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2595 // Verify it's a B->A cast. 2596 Type *TyB = BCI->getOperand(0)->getType(); 2597 Type *TyA = BCI->getType(); 2598 if (TyA != DestTy || TyB != SrcTy) 2599 return nullptr; 2600 } else if (auto *PHI = dyn_cast<PHINode>(V)) { 2601 // As long as the user is another old PHI node, then even if we don't 2602 // rewrite it, the PHI web we're considering won't have any users 2603 // outside itself, so it'll be dead. 2604 if (!OldPhiNodes.contains(PHI)) 2605 return nullptr; 2606 } else { 2607 return nullptr; 2608 } 2609 } 2610 } 2611 2612 // For each old PHI node, create a corresponding new PHI node with a type A. 2613 SmallDenseMap<PHINode *, PHINode *> NewPNodes; 2614 for (auto *OldPN : OldPhiNodes) { 2615 Builder.SetInsertPoint(OldPN); 2616 PHINode *NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands()); 2617 NewPNodes[OldPN] = NewPN; 2618 } 2619 2620 // Fill in the operands of new PHI nodes. 2621 for (auto *OldPN : OldPhiNodes) { 2622 PHINode *NewPN = NewPNodes[OldPN]; 2623 for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) { 2624 Value *V = OldPN->getOperand(j); 2625 Value *NewV = nullptr; 2626 if (auto *C = dyn_cast<Constant>(V)) { 2627 NewV = ConstantExpr::getBitCast(C, DestTy); 2628 } else if (auto *LI = dyn_cast<LoadInst>(V)) { 2629 // Explicitly perform load combine to make sure no opposing transform 2630 // can remove the bitcast in the meantime and trigger an infinite loop. 2631 Builder.SetInsertPoint(LI); 2632 NewV = combineLoadToNewType(*LI, DestTy); 2633 // Remove the old load and its use in the old phi, which itself becomes 2634 // dead once the whole transform finishes. 2635 replaceInstUsesWith(*LI, PoisonValue::get(LI->getType())); 2636 eraseInstFromFunction(*LI); 2637 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2638 NewV = BCI->getOperand(0); 2639 } else if (auto *PrevPN = dyn_cast<PHINode>(V)) { 2640 NewV = NewPNodes[PrevPN]; 2641 } 2642 assert(NewV); 2643 NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j)); 2644 } 2645 } 2646 2647 // Traverse all accumulated PHI nodes and process its users, 2648 // which are Stores and BitcCasts. Without this processing 2649 // NewPHI nodes could be replicated and could lead to extra 2650 // moves generated after DeSSA. 2651 // If there is a store with type B, change it to type A. 2652 2653 2654 // Replace users of BitCast B->A with NewPHI. These will help 2655 // later to get rid off a closure formed by OldPHI nodes. 2656 Instruction *RetVal = nullptr; 2657 for (auto *OldPN : OldPhiNodes) { 2658 PHINode *NewPN = NewPNodes[OldPN]; 2659 for (User *V : make_early_inc_range(OldPN->users())) { 2660 if (auto *SI = dyn_cast<StoreInst>(V)) { 2661 assert(SI->isSimple() && SI->getOperand(0) == OldPN); 2662 Builder.SetInsertPoint(SI); 2663 auto *NewBC = 2664 cast<BitCastInst>(Builder.CreateBitCast(NewPN, SrcTy)); 2665 SI->setOperand(0, NewBC); 2666 Worklist.push(SI); 2667 assert(hasStoreUsersOnly(*NewBC)); 2668 } 2669 else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2670 Type *TyB = BCI->getOperand(0)->getType(); 2671 Type *TyA = BCI->getType(); 2672 assert(TyA == DestTy && TyB == SrcTy); 2673 (void) TyA; 2674 (void) TyB; 2675 Instruction *I = replaceInstUsesWith(*BCI, NewPN); 2676 if (BCI == &CI) 2677 RetVal = I; 2678 } else if (auto *PHI = dyn_cast<PHINode>(V)) { 2679 assert(OldPhiNodes.contains(PHI)); 2680 (void) PHI; 2681 } else { 2682 llvm_unreachable("all uses should be handled"); 2683 } 2684 } 2685 } 2686 2687 return RetVal; 2688 } 2689 2690 static Instruction *convertBitCastToGEP(BitCastInst &CI, IRBuilderBase &Builder, 2691 const DataLayout &DL) { 2692 Value *Src = CI.getOperand(0); 2693 PointerType *SrcPTy = cast<PointerType>(Src->getType()); 2694 PointerType *DstPTy = cast<PointerType>(CI.getType()); 2695 2696 // Bitcasts involving opaque pointers cannot be converted into a GEP. 2697 if (SrcPTy->isOpaque() || DstPTy->isOpaque()) 2698 return nullptr; 2699 2700 Type *DstElTy = DstPTy->getNonOpaquePointerElementType(); 2701 Type *SrcElTy = SrcPTy->getNonOpaquePointerElementType(); 2702 2703 // When the type pointed to is not sized the cast cannot be 2704 // turned into a gep. 2705 if (!SrcElTy->isSized()) 2706 return nullptr; 2707 2708 // If the source and destination are pointers, and this cast is equivalent 2709 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep. 2710 // This can enhance SROA and other transforms that want type-safe pointers. 2711 unsigned NumZeros = 0; 2712 while (SrcElTy && SrcElTy != DstElTy) { 2713 SrcElTy = GetElementPtrInst::getTypeAtIndex(SrcElTy, (uint64_t)0); 2714 ++NumZeros; 2715 } 2716 2717 // If we found a path from the src to dest, create the getelementptr now. 2718 if (SrcElTy == DstElTy) { 2719 SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder.getInt32(0)); 2720 GetElementPtrInst *GEP = GetElementPtrInst::Create( 2721 SrcPTy->getNonOpaquePointerElementType(), Src, Idxs); 2722 2723 // If the source pointer is dereferenceable, then assume it points to an 2724 // allocated object and apply "inbounds" to the GEP. 2725 bool CanBeNull, CanBeFreed; 2726 if (Src->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed)) { 2727 // In a non-default address space (not 0), a null pointer can not be 2728 // assumed inbounds, so ignore that case (dereferenceable_or_null). 2729 // The reason is that 'null' is not treated differently in these address 2730 // spaces, and we consequently ignore the 'gep inbounds' special case 2731 // for 'null' which allows 'inbounds' on 'null' if the indices are 2732 // zeros. 2733 if (SrcPTy->getAddressSpace() == 0 || !CanBeNull) 2734 GEP->setIsInBounds(); 2735 } 2736 return GEP; 2737 } 2738 return nullptr; 2739 } 2740 2741 Instruction *InstCombinerImpl::visitBitCast(BitCastInst &CI) { 2742 // If the operands are integer typed then apply the integer transforms, 2743 // otherwise just apply the common ones. 2744 Value *Src = CI.getOperand(0); 2745 Type *SrcTy = Src->getType(); 2746 Type *DestTy = CI.getType(); 2747 2748 // Get rid of casts from one type to the same type. These are useless and can 2749 // be replaced by the operand. 2750 if (DestTy == Src->getType()) 2751 return replaceInstUsesWith(CI, Src); 2752 2753 if (isa<PointerType>(SrcTy) && isa<PointerType>(DestTy)) { 2754 // If we are casting a alloca to a pointer to a type of the same 2755 // size, rewrite the allocation instruction to allocate the "right" type. 2756 // There is no need to modify malloc calls because it is their bitcast that 2757 // needs to be cleaned up. 2758 if (AllocaInst *AI = dyn_cast<AllocaInst>(Src)) 2759 if (Instruction *V = PromoteCastOfAllocation(CI, *AI)) 2760 return V; 2761 2762 if (Instruction *I = convertBitCastToGEP(CI, Builder, DL)) 2763 return I; 2764 } 2765 2766 if (FixedVectorType *DestVTy = dyn_cast<FixedVectorType>(DestTy)) { 2767 // Beware: messing with this target-specific oddity may cause trouble. 2768 if (DestVTy->getNumElements() == 1 && SrcTy->isX86_MMXTy()) { 2769 Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType()); 2770 return InsertElementInst::Create(PoisonValue::get(DestTy), Elem, 2771 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2772 } 2773 2774 if (isa<IntegerType>(SrcTy)) { 2775 // If this is a cast from an integer to vector, check to see if the input 2776 // is a trunc or zext of a bitcast from vector. If so, we can replace all 2777 // the casts with a shuffle and (potentially) a bitcast. 2778 if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) { 2779 CastInst *SrcCast = cast<CastInst>(Src); 2780 if (BitCastInst *BCIn = dyn_cast<BitCastInst>(SrcCast->getOperand(0))) 2781 if (isa<VectorType>(BCIn->getOperand(0)->getType())) 2782 if (Instruction *I = optimizeVectorResizeWithIntegerBitCasts( 2783 BCIn->getOperand(0), cast<VectorType>(DestTy), *this)) 2784 return I; 2785 } 2786 2787 // If the input is an 'or' instruction, we may be doing shifts and ors to 2788 // assemble the elements of the vector manually. Try to rip the code out 2789 // and replace it with insertelements. 2790 if (Value *V = optimizeIntegerToVectorInsertions(CI, *this)) 2791 return replaceInstUsesWith(CI, V); 2792 } 2793 } 2794 2795 if (FixedVectorType *SrcVTy = dyn_cast<FixedVectorType>(SrcTy)) { 2796 if (SrcVTy->getNumElements() == 1) { 2797 // If our destination is not a vector, then make this a straight 2798 // scalar-scalar cast. 2799 if (!DestTy->isVectorTy()) { 2800 Value *Elem = 2801 Builder.CreateExtractElement(Src, 2802 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2803 return CastInst::Create(Instruction::BitCast, Elem, DestTy); 2804 } 2805 2806 // Otherwise, see if our source is an insert. If so, then use the scalar 2807 // component directly: 2808 // bitcast (inselt <1 x elt> V, X, 0) to <n x m> --> bitcast X to <n x m> 2809 if (auto *InsElt = dyn_cast<InsertElementInst>(Src)) 2810 return new BitCastInst(InsElt->getOperand(1), DestTy); 2811 } 2812 2813 // Convert an artificial vector insert into more analyzable bitwise logic. 2814 unsigned BitWidth = DestTy->getScalarSizeInBits(); 2815 Value *X, *Y; 2816 uint64_t IndexC; 2817 if (match(Src, m_OneUse(m_InsertElt(m_OneUse(m_BitCast(m_Value(X))), 2818 m_Value(Y), m_ConstantInt(IndexC)))) && 2819 DestTy->isIntegerTy() && X->getType() == DestTy && 2820 Y->getType()->isIntegerTy() && isDesirableIntType(BitWidth)) { 2821 // Adjust for big endian - the LSBs are at the high index. 2822 if (DL.isBigEndian()) 2823 IndexC = SrcVTy->getNumElements() - 1 - IndexC; 2824 2825 // We only handle (endian-normalized) insert to index 0. Any other insert 2826 // would require a left-shift, so that is an extra instruction. 2827 if (IndexC == 0) { 2828 // bitcast (inselt (bitcast X), Y, 0) --> or (and X, MaskC), (zext Y) 2829 unsigned EltWidth = Y->getType()->getScalarSizeInBits(); 2830 APInt MaskC = APInt::getHighBitsSet(BitWidth, BitWidth - EltWidth); 2831 Value *AndX = Builder.CreateAnd(X, MaskC); 2832 Value *ZextY = Builder.CreateZExt(Y, DestTy); 2833 return BinaryOperator::CreateOr(AndX, ZextY); 2834 } 2835 } 2836 } 2837 2838 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(Src)) { 2839 // Okay, we have (bitcast (shuffle ..)). Check to see if this is 2840 // a bitcast to a vector with the same # elts. 2841 Value *ShufOp0 = Shuf->getOperand(0); 2842 Value *ShufOp1 = Shuf->getOperand(1); 2843 auto ShufElts = cast<VectorType>(Shuf->getType())->getElementCount(); 2844 auto SrcVecElts = cast<VectorType>(ShufOp0->getType())->getElementCount(); 2845 if (Shuf->hasOneUse() && DestTy->isVectorTy() && 2846 cast<VectorType>(DestTy)->getElementCount() == ShufElts && 2847 ShufElts == SrcVecElts) { 2848 BitCastInst *Tmp; 2849 // If either of the operands is a cast from CI.getType(), then 2850 // evaluating the shuffle in the casted destination's type will allow 2851 // us to eliminate at least one cast. 2852 if (((Tmp = dyn_cast<BitCastInst>(ShufOp0)) && 2853 Tmp->getOperand(0)->getType() == DestTy) || 2854 ((Tmp = dyn_cast<BitCastInst>(ShufOp1)) && 2855 Tmp->getOperand(0)->getType() == DestTy)) { 2856 Value *LHS = Builder.CreateBitCast(ShufOp0, DestTy); 2857 Value *RHS = Builder.CreateBitCast(ShufOp1, DestTy); 2858 // Return a new shuffle vector. Use the same element ID's, as we 2859 // know the vector types match #elts. 2860 return new ShuffleVectorInst(LHS, RHS, Shuf->getShuffleMask()); 2861 } 2862 } 2863 2864 // A bitcasted-to-scalar and byte/bit reversing shuffle is better recognized 2865 // as a byte/bit swap: 2866 // bitcast <N x i8> (shuf X, undef, <N, N-1,...0>) -> bswap (bitcast X) 2867 // bitcast <N x i1> (shuf X, undef, <N, N-1,...0>) -> bitreverse (bitcast X) 2868 if (DestTy->isIntegerTy() && ShufElts.getKnownMinValue() % 2 == 0 && 2869 Shuf->hasOneUse() && Shuf->isReverse()) { 2870 unsigned IntrinsicNum = 0; 2871 if (DL.isLegalInteger(DestTy->getScalarSizeInBits()) && 2872 SrcTy->getScalarSizeInBits() == 8) { 2873 IntrinsicNum = Intrinsic::bswap; 2874 } else if (SrcTy->getScalarSizeInBits() == 1) { 2875 IntrinsicNum = Intrinsic::bitreverse; 2876 } 2877 if (IntrinsicNum != 0) { 2878 assert(ShufOp0->getType() == SrcTy && "Unexpected shuffle mask"); 2879 assert(match(ShufOp1, m_Undef()) && "Unexpected shuffle op"); 2880 Function *BswapOrBitreverse = 2881 Intrinsic::getDeclaration(CI.getModule(), IntrinsicNum, DestTy); 2882 Value *ScalarX = Builder.CreateBitCast(ShufOp0, DestTy); 2883 return CallInst::Create(BswapOrBitreverse, {ScalarX}); 2884 } 2885 } 2886 } 2887 2888 // Handle the A->B->A cast, and there is an intervening PHI node. 2889 if (PHINode *PN = dyn_cast<PHINode>(Src)) 2890 if (Instruction *I = optimizeBitCastFromPhi(CI, PN)) 2891 return I; 2892 2893 if (Instruction *I = canonicalizeBitCastExtElt(CI, *this)) 2894 return I; 2895 2896 if (Instruction *I = foldBitCastBitwiseLogic(CI, Builder)) 2897 return I; 2898 2899 if (Instruction *I = foldBitCastSelect(CI, Builder)) 2900 return I; 2901 2902 if (SrcTy->isPointerTy()) 2903 return commonPointerCastTransforms(CI); 2904 return commonCastTransforms(CI); 2905 } 2906 2907 Instruction *InstCombinerImpl::visitAddrSpaceCast(AddrSpaceCastInst &CI) { 2908 // If the destination pointer element type is not the same as the source's 2909 // first do a bitcast to the destination type, and then the addrspacecast. 2910 // This allows the cast to be exposed to other transforms. 2911 Value *Src = CI.getOperand(0); 2912 PointerType *SrcTy = cast<PointerType>(Src->getType()->getScalarType()); 2913 PointerType *DestTy = cast<PointerType>(CI.getType()->getScalarType()); 2914 2915 if (!SrcTy->hasSameElementTypeAs(DestTy)) { 2916 Type *MidTy = 2917 PointerType::getWithSamePointeeType(DestTy, SrcTy->getAddressSpace()); 2918 // Handle vectors of pointers. 2919 if (VectorType *VT = dyn_cast<VectorType>(CI.getType())) 2920 MidTy = VectorType::get(MidTy, VT->getElementCount()); 2921 2922 Value *NewBitCast = Builder.CreateBitCast(Src, MidTy); 2923 return new AddrSpaceCastInst(NewBitCast, CI.getType()); 2924 } 2925 2926 return commonPointerCastTransforms(CI); 2927 } 2928