1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the TargetLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/TargetLowering.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/CodeGen/CallingConvLower.h" 16 #include "llvm/CodeGen/CodeGenCommonISel.h" 17 #include "llvm/CodeGen/MachineFrameInfo.h" 18 #include "llvm/CodeGen/MachineFunction.h" 19 #include "llvm/CodeGen/MachineJumpTableInfo.h" 20 #include "llvm/CodeGen/MachineRegisterInfo.h" 21 #include "llvm/CodeGen/SelectionDAG.h" 22 #include "llvm/CodeGen/TargetRegisterInfo.h" 23 #include "llvm/IR/DataLayout.h" 24 #include "llvm/IR/DerivedTypes.h" 25 #include "llvm/IR/GlobalVariable.h" 26 #include "llvm/IR/LLVMContext.h" 27 #include "llvm/MC/MCAsmInfo.h" 28 #include "llvm/MC/MCExpr.h" 29 #include "llvm/Support/DivisionByConstantInfo.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/KnownBits.h" 32 #include "llvm/Support/MathExtras.h" 33 #include "llvm/Target/TargetMachine.h" 34 #include <cctype> 35 using namespace llvm; 36 37 /// NOTE: The TargetMachine owns TLOF. 38 TargetLowering::TargetLowering(const TargetMachine &tm) 39 : TargetLoweringBase(tm) {} 40 41 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 42 return nullptr; 43 } 44 45 bool TargetLowering::isPositionIndependent() const { 46 return getTargetMachine().isPositionIndependent(); 47 } 48 49 /// Check whether a given call node is in tail position within its function. If 50 /// so, it sets Chain to the input chain of the tail call. 51 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, 52 SDValue &Chain) const { 53 const Function &F = DAG.getMachineFunction().getFunction(); 54 55 // First, check if tail calls have been disabled in this function. 56 if (F.getFnAttribute("disable-tail-calls").getValueAsBool()) 57 return false; 58 59 // Conservatively require the attributes of the call to match those of 60 // the return. Ignore following attributes because they don't affect the 61 // call sequence. 62 AttrBuilder CallerAttrs(F.getContext(), F.getAttributes().getRetAttrs()); 63 for (const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable, 64 Attribute::DereferenceableOrNull, Attribute::NoAlias, 65 Attribute::NonNull, Attribute::NoUndef}) 66 CallerAttrs.removeAttribute(Attr); 67 68 if (CallerAttrs.hasAttributes()) 69 return false; 70 71 // It's not safe to eliminate the sign / zero extension of the return value. 72 if (CallerAttrs.contains(Attribute::ZExt) || 73 CallerAttrs.contains(Attribute::SExt)) 74 return false; 75 76 // Check if the only use is a function return node. 77 return isUsedByReturnOnly(Node, Chain); 78 } 79 80 bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI, 81 const uint32_t *CallerPreservedMask, 82 const SmallVectorImpl<CCValAssign> &ArgLocs, 83 const SmallVectorImpl<SDValue> &OutVals) const { 84 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 85 const CCValAssign &ArgLoc = ArgLocs[I]; 86 if (!ArgLoc.isRegLoc()) 87 continue; 88 MCRegister Reg = ArgLoc.getLocReg(); 89 // Only look at callee saved registers. 90 if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg)) 91 continue; 92 // Check that we pass the value used for the caller. 93 // (We look for a CopyFromReg reading a virtual register that is used 94 // for the function live-in value of register Reg) 95 SDValue Value = OutVals[I]; 96 if (Value->getOpcode() == ISD::AssertZext) 97 Value = Value.getOperand(0); 98 if (Value->getOpcode() != ISD::CopyFromReg) 99 return false; 100 Register ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg(); 101 if (MRI.getLiveInPhysReg(ArgReg) != Reg) 102 return false; 103 } 104 return true; 105 } 106 107 /// Set CallLoweringInfo attribute flags based on a call instruction 108 /// and called function attributes. 109 void TargetLoweringBase::ArgListEntry::setAttributes(const CallBase *Call, 110 unsigned ArgIdx) { 111 IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt); 112 IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt); 113 IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg); 114 IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet); 115 IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest); 116 IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal); 117 IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated); 118 IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca); 119 IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned); 120 IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf); 121 IsSwiftAsync = Call->paramHasAttr(ArgIdx, Attribute::SwiftAsync); 122 IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError); 123 Alignment = Call->getParamStackAlign(ArgIdx); 124 IndirectType = nullptr; 125 assert(IsByVal + IsPreallocated + IsInAlloca + IsSRet <= 1 && 126 "multiple ABI attributes?"); 127 if (IsByVal) { 128 IndirectType = Call->getParamByValType(ArgIdx); 129 if (!Alignment) 130 Alignment = Call->getParamAlign(ArgIdx); 131 } 132 if (IsPreallocated) 133 IndirectType = Call->getParamPreallocatedType(ArgIdx); 134 if (IsInAlloca) 135 IndirectType = Call->getParamInAllocaType(ArgIdx); 136 if (IsSRet) 137 IndirectType = Call->getParamStructRetType(ArgIdx); 138 } 139 140 /// Generate a libcall taking the given operands as arguments and returning a 141 /// result of type RetVT. 142 std::pair<SDValue, SDValue> 143 TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, 144 ArrayRef<SDValue> Ops, 145 MakeLibCallOptions CallOptions, 146 const SDLoc &dl, 147 SDValue InChain) const { 148 if (!InChain) 149 InChain = DAG.getEntryNode(); 150 151 TargetLowering::ArgListTy Args; 152 Args.reserve(Ops.size()); 153 154 TargetLowering::ArgListEntry Entry; 155 for (unsigned i = 0; i < Ops.size(); ++i) { 156 SDValue NewOp = Ops[i]; 157 Entry.Node = NewOp; 158 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext()); 159 Entry.IsSExt = shouldSignExtendTypeInLibCall(NewOp.getValueType(), 160 CallOptions.IsSExt); 161 Entry.IsZExt = !Entry.IsSExt; 162 163 if (CallOptions.IsSoften && 164 !shouldExtendTypeInLibCall(CallOptions.OpsVTBeforeSoften[i])) { 165 Entry.IsSExt = Entry.IsZExt = false; 166 } 167 Args.push_back(Entry); 168 } 169 170 if (LC == RTLIB::UNKNOWN_LIBCALL) 171 report_fatal_error("Unsupported library call operation!"); 172 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 173 getPointerTy(DAG.getDataLayout())); 174 175 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 176 TargetLowering::CallLoweringInfo CLI(DAG); 177 bool signExtend = shouldSignExtendTypeInLibCall(RetVT, CallOptions.IsSExt); 178 bool zeroExtend = !signExtend; 179 180 if (CallOptions.IsSoften && 181 !shouldExtendTypeInLibCall(CallOptions.RetVTBeforeSoften)) { 182 signExtend = zeroExtend = false; 183 } 184 185 CLI.setDebugLoc(dl) 186 .setChain(InChain) 187 .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) 188 .setNoReturn(CallOptions.DoesNotReturn) 189 .setDiscardResult(!CallOptions.IsReturnValueUsed) 190 .setIsPostTypeLegalization(CallOptions.IsPostTypeLegalization) 191 .setSExtResult(signExtend) 192 .setZExtResult(zeroExtend); 193 return LowerCallTo(CLI); 194 } 195 196 bool TargetLowering::findOptimalMemOpLowering( 197 std::vector<EVT> &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, 198 unsigned SrcAS, const AttributeList &FuncAttributes) const { 199 if (Limit != ~unsigned(0) && Op.isMemcpyWithFixedDstAlign() && 200 Op.getSrcAlign() < Op.getDstAlign()) 201 return false; 202 203 EVT VT = getOptimalMemOpType(Op, FuncAttributes); 204 205 if (VT == MVT::Other) { 206 // Use the largest integer type whose alignment constraints are satisfied. 207 // We only need to check DstAlign here as SrcAlign is always greater or 208 // equal to DstAlign (or zero). 209 VT = MVT::i64; 210 if (Op.isFixedDstAlign()) 211 while (Op.getDstAlign() < (VT.getSizeInBits() / 8) && 212 !allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign())) 213 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); 214 assert(VT.isInteger()); 215 216 // Find the largest legal integer type. 217 MVT LVT = MVT::i64; 218 while (!isTypeLegal(LVT)) 219 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 220 assert(LVT.isInteger()); 221 222 // If the type we've chosen is larger than the largest legal integer type 223 // then use that instead. 224 if (VT.bitsGT(LVT)) 225 VT = LVT; 226 } 227 228 unsigned NumMemOps = 0; 229 uint64_t Size = Op.size(); 230 while (Size) { 231 unsigned VTSize = VT.getSizeInBits() / 8; 232 while (VTSize > Size) { 233 // For now, only use non-vector load / store's for the left-over pieces. 234 EVT NewVT = VT; 235 unsigned NewVTSize; 236 237 bool Found = false; 238 if (VT.isVector() || VT.isFloatingPoint()) { 239 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 240 if (isOperationLegalOrCustom(ISD::STORE, NewVT) && 241 isSafeMemOpType(NewVT.getSimpleVT())) 242 Found = true; 243 else if (NewVT == MVT::i64 && 244 isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 245 isSafeMemOpType(MVT::f64)) { 246 // i64 is usually not legal on 32-bit targets, but f64 may be. 247 NewVT = MVT::f64; 248 Found = true; 249 } 250 } 251 252 if (!Found) { 253 do { 254 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 255 if (NewVT == MVT::i8) 256 break; 257 } while (!isSafeMemOpType(NewVT.getSimpleVT())); 258 } 259 NewVTSize = NewVT.getSizeInBits() / 8; 260 261 // If the new VT cannot cover all of the remaining bits, then consider 262 // issuing a (or a pair of) unaligned and overlapping load / store. 263 bool Fast; 264 if (NumMemOps && Op.allowOverlap() && NewVTSize < Size && 265 allowsMisalignedMemoryAccesses( 266 VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign() : Align(1), 267 MachineMemOperand::MONone, &Fast) && 268 Fast) 269 VTSize = Size; 270 else { 271 VT = NewVT; 272 VTSize = NewVTSize; 273 } 274 } 275 276 if (++NumMemOps > Limit) 277 return false; 278 279 MemOps.push_back(VT); 280 Size -= VTSize; 281 } 282 283 return true; 284 } 285 286 /// Soften the operands of a comparison. This code is shared among BR_CC, 287 /// SELECT_CC, and SETCC handlers. 288 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 289 SDValue &NewLHS, SDValue &NewRHS, 290 ISD::CondCode &CCCode, 291 const SDLoc &dl, const SDValue OldLHS, 292 const SDValue OldRHS) const { 293 SDValue Chain; 294 return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS, 295 OldRHS, Chain); 296 } 297 298 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 299 SDValue &NewLHS, SDValue &NewRHS, 300 ISD::CondCode &CCCode, 301 const SDLoc &dl, const SDValue OldLHS, 302 const SDValue OldRHS, 303 SDValue &Chain, 304 bool IsSignaling) const { 305 // FIXME: Currently we cannot really respect all IEEE predicates due to libgcc 306 // not supporting it. We can update this code when libgcc provides such 307 // functions. 308 309 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128) 310 && "Unsupported setcc type!"); 311 312 // Expand into one or more soft-fp libcall(s). 313 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL; 314 bool ShouldInvertCC = false; 315 switch (CCCode) { 316 case ISD::SETEQ: 317 case ISD::SETOEQ: 318 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 319 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 320 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 321 break; 322 case ISD::SETNE: 323 case ISD::SETUNE: 324 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 : 325 (VT == MVT::f64) ? RTLIB::UNE_F64 : 326 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128; 327 break; 328 case ISD::SETGE: 329 case ISD::SETOGE: 330 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 331 (VT == MVT::f64) ? RTLIB::OGE_F64 : 332 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 333 break; 334 case ISD::SETLT: 335 case ISD::SETOLT: 336 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 337 (VT == MVT::f64) ? RTLIB::OLT_F64 : 338 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 339 break; 340 case ISD::SETLE: 341 case ISD::SETOLE: 342 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 343 (VT == MVT::f64) ? RTLIB::OLE_F64 : 344 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 345 break; 346 case ISD::SETGT: 347 case ISD::SETOGT: 348 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 349 (VT == MVT::f64) ? RTLIB::OGT_F64 : 350 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 351 break; 352 case ISD::SETO: 353 ShouldInvertCC = true; 354 LLVM_FALLTHROUGH; 355 case ISD::SETUO: 356 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 357 (VT == MVT::f64) ? RTLIB::UO_F64 : 358 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 359 break; 360 case ISD::SETONE: 361 // SETONE = O && UNE 362 ShouldInvertCC = true; 363 LLVM_FALLTHROUGH; 364 case ISD::SETUEQ: 365 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 366 (VT == MVT::f64) ? RTLIB::UO_F64 : 367 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 368 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 369 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 370 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 371 break; 372 default: 373 // Invert CC for unordered comparisons 374 ShouldInvertCC = true; 375 switch (CCCode) { 376 case ISD::SETULT: 377 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 378 (VT == MVT::f64) ? RTLIB::OGE_F64 : 379 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 380 break; 381 case ISD::SETULE: 382 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 383 (VT == MVT::f64) ? RTLIB::OGT_F64 : 384 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 385 break; 386 case ISD::SETUGT: 387 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 388 (VT == MVT::f64) ? RTLIB::OLE_F64 : 389 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 390 break; 391 case ISD::SETUGE: 392 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 393 (VT == MVT::f64) ? RTLIB::OLT_F64 : 394 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 395 break; 396 default: llvm_unreachable("Do not know how to soften this setcc!"); 397 } 398 } 399 400 // Use the target specific return value for comparions lib calls. 401 EVT RetVT = getCmpLibcallReturnType(); 402 SDValue Ops[2] = {NewLHS, NewRHS}; 403 TargetLowering::MakeLibCallOptions CallOptions; 404 EVT OpsVT[2] = { OldLHS.getValueType(), 405 OldRHS.getValueType() }; 406 CallOptions.setTypeListBeforeSoften(OpsVT, RetVT, true); 407 auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain); 408 NewLHS = Call.first; 409 NewRHS = DAG.getConstant(0, dl, RetVT); 410 411 CCCode = getCmpLibcallCC(LC1); 412 if (ShouldInvertCC) { 413 assert(RetVT.isInteger()); 414 CCCode = getSetCCInverse(CCCode, RetVT); 415 } 416 417 if (LC2 == RTLIB::UNKNOWN_LIBCALL) { 418 // Update Chain. 419 Chain = Call.second; 420 } else { 421 EVT SetCCVT = 422 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT); 423 SDValue Tmp = DAG.getSetCC(dl, SetCCVT, NewLHS, NewRHS, CCCode); 424 auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain); 425 CCCode = getCmpLibcallCC(LC2); 426 if (ShouldInvertCC) 427 CCCode = getSetCCInverse(CCCode, RetVT); 428 NewLHS = DAG.getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode); 429 if (Chain) 430 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Call.second, 431 Call2.second); 432 NewLHS = DAG.getNode(ShouldInvertCC ? ISD::AND : ISD::OR, dl, 433 Tmp.getValueType(), Tmp, NewLHS); 434 NewRHS = SDValue(); 435 } 436 } 437 438 /// Return the entry encoding for a jump table in the current function. The 439 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum. 440 unsigned TargetLowering::getJumpTableEncoding() const { 441 // In non-pic modes, just use the address of a block. 442 if (!isPositionIndependent()) 443 return MachineJumpTableInfo::EK_BlockAddress; 444 445 // In PIC mode, if the target supports a GPRel32 directive, use it. 446 if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr) 447 return MachineJumpTableInfo::EK_GPRel32BlockAddress; 448 449 // Otherwise, use a label difference. 450 return MachineJumpTableInfo::EK_LabelDifference32; 451 } 452 453 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table, 454 SelectionDAG &DAG) const { 455 // If our PIC model is GP relative, use the global offset table as the base. 456 unsigned JTEncoding = getJumpTableEncoding(); 457 458 if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) || 459 (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress)) 460 return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout())); 461 462 return Table; 463 } 464 465 /// This returns the relocation base for the given PIC jumptable, the same as 466 /// getPICJumpTableRelocBase, but as an MCExpr. 467 const MCExpr * 468 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 469 unsigned JTI,MCContext &Ctx) const{ 470 // The normal PIC reloc base is the label at the start of the jump table. 471 return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx); 472 } 473 474 bool 475 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 476 const TargetMachine &TM = getTargetMachine(); 477 const GlobalValue *GV = GA->getGlobal(); 478 479 // If the address is not even local to this DSO we will have to load it from 480 // a got and then add the offset. 481 if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) 482 return false; 483 484 // If the code is position independent we will have to add a base register. 485 if (isPositionIndependent()) 486 return false; 487 488 // Otherwise we can do it. 489 return true; 490 } 491 492 //===----------------------------------------------------------------------===// 493 // Optimization Methods 494 //===----------------------------------------------------------------------===// 495 496 /// If the specified instruction has a constant integer operand and there are 497 /// bits set in that constant that are not demanded, then clear those bits and 498 /// return true. 499 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, 500 const APInt &DemandedBits, 501 const APInt &DemandedElts, 502 TargetLoweringOpt &TLO) const { 503 SDLoc DL(Op); 504 unsigned Opcode = Op.getOpcode(); 505 506 // Do target-specific constant optimization. 507 if (targetShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 508 return TLO.New.getNode(); 509 510 // FIXME: ISD::SELECT, ISD::SELECT_CC 511 switch (Opcode) { 512 default: 513 break; 514 case ISD::XOR: 515 case ISD::AND: 516 case ISD::OR: { 517 auto *Op1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 518 if (!Op1C || Op1C->isOpaque()) 519 return false; 520 521 // If this is a 'not' op, don't touch it because that's a canonical form. 522 const APInt &C = Op1C->getAPIntValue(); 523 if (Opcode == ISD::XOR && DemandedBits.isSubsetOf(C)) 524 return false; 525 526 if (!C.isSubsetOf(DemandedBits)) { 527 EVT VT = Op.getValueType(); 528 SDValue NewC = TLO.DAG.getConstant(DemandedBits & C, DL, VT); 529 SDValue NewOp = TLO.DAG.getNode(Opcode, DL, VT, Op.getOperand(0), NewC); 530 return TLO.CombineTo(Op, NewOp); 531 } 532 533 break; 534 } 535 } 536 537 return false; 538 } 539 540 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, 541 const APInt &DemandedBits, 542 TargetLoweringOpt &TLO) const { 543 EVT VT = Op.getValueType(); 544 APInt DemandedElts = VT.isVector() 545 ? APInt::getAllOnes(VT.getVectorNumElements()) 546 : APInt(1, 1); 547 return ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO); 548 } 549 550 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. 551 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be 552 /// generalized for targets with other types of implicit widening casts. 553 bool TargetLowering::ShrinkDemandedOp(SDValue Op, unsigned BitWidth, 554 const APInt &Demanded, 555 TargetLoweringOpt &TLO) const { 556 assert(Op.getNumOperands() == 2 && 557 "ShrinkDemandedOp only supports binary operators!"); 558 assert(Op.getNode()->getNumValues() == 1 && 559 "ShrinkDemandedOp only supports nodes with one result!"); 560 561 SelectionDAG &DAG = TLO.DAG; 562 SDLoc dl(Op); 563 564 // Early return, as this function cannot handle vector types. 565 if (Op.getValueType().isVector()) 566 return false; 567 568 // Don't do this if the node has another user, which may require the 569 // full value. 570 if (!Op.getNode()->hasOneUse()) 571 return false; 572 573 // Search for the smallest integer type with free casts to and from 574 // Op's type. For expedience, just check power-of-2 integer types. 575 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 576 unsigned DemandedSize = Demanded.getActiveBits(); 577 unsigned SmallVTBits = DemandedSize; 578 if (!isPowerOf2_32(SmallVTBits)) 579 SmallVTBits = NextPowerOf2(SmallVTBits); 580 for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) { 581 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits); 582 if (TLI.isTruncateFree(Op.getValueType(), SmallVT) && 583 TLI.isZExtFree(SmallVT, Op.getValueType())) { 584 // We found a type with free casts. 585 SDValue X = DAG.getNode( 586 Op.getOpcode(), dl, SmallVT, 587 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(0)), 588 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(1))); 589 assert(DemandedSize <= SmallVTBits && "Narrowed below demanded bits?"); 590 SDValue Z = DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(), X); 591 return TLO.CombineTo(Op, Z); 592 } 593 } 594 return false; 595 } 596 597 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 598 DAGCombinerInfo &DCI) const { 599 SelectionDAG &DAG = DCI.DAG; 600 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 601 !DCI.isBeforeLegalizeOps()); 602 KnownBits Known; 603 604 bool Simplified = SimplifyDemandedBits(Op, DemandedBits, Known, TLO); 605 if (Simplified) { 606 DCI.AddToWorklist(Op.getNode()); 607 DCI.CommitTargetLoweringOpt(TLO); 608 } 609 return Simplified; 610 } 611 612 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 613 const APInt &DemandedElts, 614 DAGCombinerInfo &DCI) const { 615 SelectionDAG &DAG = DCI.DAG; 616 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 617 !DCI.isBeforeLegalizeOps()); 618 KnownBits Known; 619 620 bool Simplified = 621 SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO); 622 if (Simplified) { 623 DCI.AddToWorklist(Op.getNode()); 624 DCI.CommitTargetLoweringOpt(TLO); 625 } 626 return Simplified; 627 } 628 629 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 630 KnownBits &Known, 631 TargetLoweringOpt &TLO, 632 unsigned Depth, 633 bool AssumeSingleUse) const { 634 EVT VT = Op.getValueType(); 635 636 // TODO: We can probably do more work on calculating the known bits and 637 // simplifying the operations for scalable vectors, but for now we just 638 // bail out. 639 if (VT.isScalableVector()) { 640 // Pretend we don't know anything for now. 641 Known = KnownBits(DemandedBits.getBitWidth()); 642 return false; 643 } 644 645 APInt DemandedElts = VT.isVector() 646 ? APInt::getAllOnes(VT.getVectorNumElements()) 647 : APInt(1, 1); 648 return SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO, Depth, 649 AssumeSingleUse); 650 } 651 652 // TODO: Can we merge SelectionDAG::GetDemandedBits into this? 653 // TODO: Under what circumstances can we create nodes? Constant folding? 654 SDValue TargetLowering::SimplifyMultipleUseDemandedBits( 655 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 656 SelectionDAG &DAG, unsigned Depth) const { 657 EVT VT = Op.getValueType(); 658 659 // Pretend we don't know anything about scalable vectors for now. 660 // TODO: We can probably do more work on simplifying the operations for 661 // scalable vectors, but for now we just bail out. 662 if (VT.isScalableVector()) 663 return SDValue(); 664 665 // Limit search depth. 666 if (Depth >= SelectionDAG::MaxRecursionDepth) 667 return SDValue(); 668 669 // Ignore UNDEFs. 670 if (Op.isUndef()) 671 return SDValue(); 672 673 // Not demanding any bits/elts from Op. 674 if (DemandedBits == 0 || DemandedElts == 0) 675 return DAG.getUNDEF(VT); 676 677 bool IsLE = DAG.getDataLayout().isLittleEndian(); 678 unsigned NumElts = DemandedElts.getBitWidth(); 679 unsigned BitWidth = DemandedBits.getBitWidth(); 680 KnownBits LHSKnown, RHSKnown; 681 switch (Op.getOpcode()) { 682 case ISD::BITCAST: { 683 SDValue Src = peekThroughBitcasts(Op.getOperand(0)); 684 EVT SrcVT = Src.getValueType(); 685 EVT DstVT = Op.getValueType(); 686 if (SrcVT == DstVT) 687 return Src; 688 689 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 690 unsigned NumDstEltBits = DstVT.getScalarSizeInBits(); 691 if (NumSrcEltBits == NumDstEltBits) 692 if (SDValue V = SimplifyMultipleUseDemandedBits( 693 Src, DemandedBits, DemandedElts, DAG, Depth + 1)) 694 return DAG.getBitcast(DstVT, V); 695 696 if (SrcVT.isVector() && (NumDstEltBits % NumSrcEltBits) == 0) { 697 unsigned Scale = NumDstEltBits / NumSrcEltBits; 698 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 699 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 700 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 701 for (unsigned i = 0; i != Scale; ++i) { 702 unsigned EltOffset = IsLE ? i : (Scale - 1 - i); 703 unsigned BitOffset = EltOffset * NumSrcEltBits; 704 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, BitOffset); 705 if (!Sub.isZero()) { 706 DemandedSrcBits |= Sub; 707 for (unsigned j = 0; j != NumElts; ++j) 708 if (DemandedElts[j]) 709 DemandedSrcElts.setBit((j * Scale) + i); 710 } 711 } 712 713 if (SDValue V = SimplifyMultipleUseDemandedBits( 714 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 715 return DAG.getBitcast(DstVT, V); 716 } 717 718 // TODO - bigendian once we have test coverage. 719 if (IsLE && (NumSrcEltBits % NumDstEltBits) == 0) { 720 unsigned Scale = NumSrcEltBits / NumDstEltBits; 721 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 722 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 723 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 724 for (unsigned i = 0; i != NumElts; ++i) 725 if (DemandedElts[i]) { 726 unsigned Offset = (i % Scale) * NumDstEltBits; 727 DemandedSrcBits.insertBits(DemandedBits, Offset); 728 DemandedSrcElts.setBit(i / Scale); 729 } 730 731 if (SDValue V = SimplifyMultipleUseDemandedBits( 732 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 733 return DAG.getBitcast(DstVT, V); 734 } 735 736 break; 737 } 738 case ISD::AND: { 739 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 740 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 741 742 // If all of the demanded bits are known 1 on one side, return the other. 743 // These bits cannot contribute to the result of the 'and' in this 744 // context. 745 if (DemandedBits.isSubsetOf(LHSKnown.Zero | RHSKnown.One)) 746 return Op.getOperand(0); 747 if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.One)) 748 return Op.getOperand(1); 749 break; 750 } 751 case ISD::OR: { 752 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 753 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 754 755 // If all of the demanded bits are known zero on one side, return the 756 // other. These bits cannot contribute to the result of the 'or' in this 757 // context. 758 if (DemandedBits.isSubsetOf(LHSKnown.One | RHSKnown.Zero)) 759 return Op.getOperand(0); 760 if (DemandedBits.isSubsetOf(RHSKnown.One | LHSKnown.Zero)) 761 return Op.getOperand(1); 762 break; 763 } 764 case ISD::XOR: { 765 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 766 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 767 768 // If all of the demanded bits are known zero on one side, return the 769 // other. 770 if (DemandedBits.isSubsetOf(RHSKnown.Zero)) 771 return Op.getOperand(0); 772 if (DemandedBits.isSubsetOf(LHSKnown.Zero)) 773 return Op.getOperand(1); 774 break; 775 } 776 case ISD::SHL: { 777 // If we are only demanding sign bits then we can use the shift source 778 // directly. 779 if (const APInt *MaxSA = 780 DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 781 SDValue Op0 = Op.getOperand(0); 782 unsigned ShAmt = MaxSA->getZExtValue(); 783 unsigned NumSignBits = 784 DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 785 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 786 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits)) 787 return Op0; 788 } 789 break; 790 } 791 case ISD::SETCC: { 792 SDValue Op0 = Op.getOperand(0); 793 SDValue Op1 = Op.getOperand(1); 794 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 795 // If (1) we only need the sign-bit, (2) the setcc operands are the same 796 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 797 // -1, we may be able to bypass the setcc. 798 if (DemandedBits.isSignMask() && 799 Op0.getScalarValueSizeInBits() == BitWidth && 800 getBooleanContents(Op0.getValueType()) == 801 BooleanContent::ZeroOrNegativeOneBooleanContent) { 802 // If we're testing X < 0, then this compare isn't needed - just use X! 803 // FIXME: We're limiting to integer types here, but this should also work 804 // if we don't care about FP signed-zero. The use of SETLT with FP means 805 // that we don't care about NaNs. 806 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 807 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 808 return Op0; 809 } 810 break; 811 } 812 case ISD::SIGN_EXTEND_INREG: { 813 // If none of the extended bits are demanded, eliminate the sextinreg. 814 SDValue Op0 = Op.getOperand(0); 815 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 816 unsigned ExBits = ExVT.getScalarSizeInBits(); 817 if (DemandedBits.getActiveBits() <= ExBits) 818 return Op0; 819 // If the input is already sign extended, just drop the extension. 820 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 821 if (NumSignBits >= (BitWidth - ExBits + 1)) 822 return Op0; 823 break; 824 } 825 case ISD::ANY_EXTEND_VECTOR_INREG: 826 case ISD::SIGN_EXTEND_VECTOR_INREG: 827 case ISD::ZERO_EXTEND_VECTOR_INREG: { 828 // If we only want the lowest element and none of extended bits, then we can 829 // return the bitcasted source vector. 830 SDValue Src = Op.getOperand(0); 831 EVT SrcVT = Src.getValueType(); 832 EVT DstVT = Op.getValueType(); 833 if (IsLE && DemandedElts == 1 && 834 DstVT.getSizeInBits() == SrcVT.getSizeInBits() && 835 DemandedBits.getActiveBits() <= SrcVT.getScalarSizeInBits()) { 836 return DAG.getBitcast(DstVT, Src); 837 } 838 break; 839 } 840 case ISD::INSERT_VECTOR_ELT: { 841 // If we don't demand the inserted element, return the base vector. 842 SDValue Vec = Op.getOperand(0); 843 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 844 EVT VecVT = Vec.getValueType(); 845 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) && 846 !DemandedElts[CIdx->getZExtValue()]) 847 return Vec; 848 break; 849 } 850 case ISD::INSERT_SUBVECTOR: { 851 SDValue Vec = Op.getOperand(0); 852 SDValue Sub = Op.getOperand(1); 853 uint64_t Idx = Op.getConstantOperandVal(2); 854 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 855 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 856 // If we don't demand the inserted subvector, return the base vector. 857 if (DemandedSubElts == 0) 858 return Vec; 859 // If this simply widens the lowest subvector, see if we can do it earlier. 860 if (Idx == 0 && Vec.isUndef()) { 861 if (SDValue NewSub = SimplifyMultipleUseDemandedBits( 862 Sub, DemandedBits, DemandedSubElts, DAG, Depth + 1)) 863 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 864 Op.getOperand(0), NewSub, Op.getOperand(2)); 865 } 866 break; 867 } 868 case ISD::VECTOR_SHUFFLE: { 869 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 870 871 // If all the demanded elts are from one operand and are inline, 872 // then we can use the operand directly. 873 bool AllUndef = true, IdentityLHS = true, IdentityRHS = true; 874 for (unsigned i = 0; i != NumElts; ++i) { 875 int M = ShuffleMask[i]; 876 if (M < 0 || !DemandedElts[i]) 877 continue; 878 AllUndef = false; 879 IdentityLHS &= (M == (int)i); 880 IdentityRHS &= ((M - NumElts) == i); 881 } 882 883 if (AllUndef) 884 return DAG.getUNDEF(Op.getValueType()); 885 if (IdentityLHS) 886 return Op.getOperand(0); 887 if (IdentityRHS) 888 return Op.getOperand(1); 889 break; 890 } 891 default: 892 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) 893 if (SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode( 894 Op, DemandedBits, DemandedElts, DAG, Depth)) 895 return V; 896 break; 897 } 898 return SDValue(); 899 } 900 901 SDValue TargetLowering::SimplifyMultipleUseDemandedBits( 902 SDValue Op, const APInt &DemandedBits, SelectionDAG &DAG, 903 unsigned Depth) const { 904 EVT VT = Op.getValueType(); 905 906 // Pretend we don't know anything about scalable vectors for now. 907 // TODO: We can probably do more work on simplifying the operations for 908 // scalable vectors, but for now we just bail out. 909 if (VT.isScalableVector()) 910 return SDValue(); 911 912 APInt DemandedElts = VT.isVector() 913 ? APInt::getAllOnes(VT.getVectorNumElements()) 914 : APInt(1, 1); 915 return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG, 916 Depth); 917 } 918 919 SDValue TargetLowering::SimplifyMultipleUseDemandedVectorElts( 920 SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG, 921 unsigned Depth) const { 922 APInt DemandedBits = APInt::getAllOnes(Op.getScalarValueSizeInBits()); 923 return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG, 924 Depth); 925 } 926 927 // Attempt to form ext(avgfloor(A, B)) from shr(add(ext(A), ext(B)), 1). 928 // or to form ext(avgceil(A, B)) from shr(add(ext(A), ext(B), 1), 1). 929 static SDValue combineShiftToAVG(SDValue Op, SelectionDAG &DAG, 930 const TargetLowering &TLI, 931 const APInt &DemandedBits, 932 const APInt &DemandedElts, 933 unsigned Depth) { 934 assert((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SRA) && 935 "SRL or SRA node is required here!"); 936 // Is the right shift using an immediate value of 1? 937 ConstantSDNode *N1C = isConstOrConstSplat(Op.getOperand(1), DemandedElts); 938 if (!N1C || !N1C->isOne()) 939 return SDValue(); 940 941 // We are looking for an avgfloor 942 // add(ext, ext) 943 // or one of these as a avgceil 944 // add(add(ext, ext), 1) 945 // add(add(ext, 1), ext) 946 // add(ext, add(ext, 1)) 947 SDValue Add = Op.getOperand(0); 948 if (Add.getOpcode() != ISD::ADD) 949 return SDValue(); 950 951 SDValue ExtOpA = Add.getOperand(0); 952 SDValue ExtOpB = Add.getOperand(1); 953 auto MatchOperands = [&](SDValue Op1, SDValue Op2, SDValue Op3) { 954 ConstantSDNode *ConstOp; 955 if ((ConstOp = isConstOrConstSplat(Op1, DemandedElts)) && 956 ConstOp->isOne()) { 957 ExtOpA = Op2; 958 ExtOpB = Op3; 959 return true; 960 } 961 if ((ConstOp = isConstOrConstSplat(Op2, DemandedElts)) && 962 ConstOp->isOne()) { 963 ExtOpA = Op1; 964 ExtOpB = Op3; 965 return true; 966 } 967 if ((ConstOp = isConstOrConstSplat(Op3, DemandedElts)) && 968 ConstOp->isOne()) { 969 ExtOpA = Op1; 970 ExtOpB = Op2; 971 return true; 972 } 973 return false; 974 }; 975 bool IsCeil = 976 (ExtOpA.getOpcode() == ISD::ADD && 977 MatchOperands(ExtOpA.getOperand(0), ExtOpA.getOperand(1), ExtOpB)) || 978 (ExtOpB.getOpcode() == ISD::ADD && 979 MatchOperands(ExtOpB.getOperand(0), ExtOpB.getOperand(1), ExtOpA)); 980 981 // If the shift is signed (sra): 982 // - Needs >= 2 sign bit for both operands. 983 // - Needs >= 2 zero bits. 984 // If the shift is unsigned (srl): 985 // - Needs >= 1 zero bit for both operands. 986 // - Needs 1 demanded bit zero and >= 2 sign bits. 987 unsigned ShiftOpc = Op.getOpcode(); 988 bool IsSigned = false; 989 unsigned KnownBits; 990 unsigned NumSignedA = DAG.ComputeNumSignBits(ExtOpA, DemandedElts, Depth); 991 unsigned NumSignedB = DAG.ComputeNumSignBits(ExtOpB, DemandedElts, Depth); 992 unsigned NumSigned = std::min(NumSignedA, NumSignedB) - 1; 993 unsigned NumZeroA = 994 DAG.computeKnownBits(ExtOpA, DemandedElts, Depth).countMinLeadingZeros(); 995 unsigned NumZeroB = 996 DAG.computeKnownBits(ExtOpB, DemandedElts, Depth).countMinLeadingZeros(); 997 unsigned NumZero = std::min(NumZeroA, NumZeroB); 998 999 switch (ShiftOpc) { 1000 default: 1001 llvm_unreachable("Unexpected ShiftOpc in combineShiftToAVG"); 1002 case ISD::SRA: { 1003 if (NumZero >= 2 && NumSigned < NumZero) { 1004 IsSigned = false; 1005 KnownBits = NumZero; 1006 break; 1007 } 1008 if (NumSigned >= 1) { 1009 IsSigned = true; 1010 KnownBits = NumSigned; 1011 break; 1012 } 1013 return SDValue(); 1014 } 1015 case ISD::SRL: { 1016 if (NumZero >= 1 && NumSigned < NumZero) { 1017 IsSigned = false; 1018 KnownBits = NumZero; 1019 break; 1020 } 1021 if (NumSigned >= 1 && DemandedBits.isSignBitClear()) { 1022 IsSigned = true; 1023 KnownBits = NumSigned; 1024 break; 1025 } 1026 return SDValue(); 1027 } 1028 } 1029 1030 unsigned AVGOpc = IsCeil ? (IsSigned ? ISD::AVGCEILS : ISD::AVGCEILU) 1031 : (IsSigned ? ISD::AVGFLOORS : ISD::AVGFLOORU); 1032 1033 // Find the smallest power-2 type that is legal for this vector size and 1034 // operation, given the original type size and the number of known sign/zero 1035 // bits. 1036 EVT VT = Op.getValueType(); 1037 unsigned MinWidth = 1038 std::max<unsigned>(VT.getScalarSizeInBits() - KnownBits, 8); 1039 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), PowerOf2Ceil(MinWidth)); 1040 if (VT.isVector()) 1041 NVT = EVT::getVectorVT(*DAG.getContext(), NVT, VT.getVectorElementCount()); 1042 if (!TLI.isOperationLegalOrCustom(AVGOpc, NVT)) 1043 return SDValue(); 1044 1045 SDLoc DL(Op); 1046 SDValue ResultAVG = 1047 DAG.getNode(AVGOpc, DL, NVT, DAG.getNode(ISD::TRUNCATE, DL, NVT, ExtOpA), 1048 DAG.getNode(ISD::TRUNCATE, DL, NVT, ExtOpB)); 1049 return DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, VT, 1050 ResultAVG); 1051 } 1052 1053 /// Look at Op. At this point, we know that only the OriginalDemandedBits of the 1054 /// result of Op are ever used downstream. If we can use this information to 1055 /// simplify Op, create a new simplified DAG node and return true, returning the 1056 /// original and new nodes in Old and New. Otherwise, analyze the expression and 1057 /// return a mask of Known bits for the expression (used to simplify the 1058 /// caller). The Known bits may only be accurate for those bits in the 1059 /// OriginalDemandedBits and OriginalDemandedElts. 1060 bool TargetLowering::SimplifyDemandedBits( 1061 SDValue Op, const APInt &OriginalDemandedBits, 1062 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, 1063 unsigned Depth, bool AssumeSingleUse) const { 1064 unsigned BitWidth = OriginalDemandedBits.getBitWidth(); 1065 assert(Op.getScalarValueSizeInBits() == BitWidth && 1066 "Mask size mismatches value type size!"); 1067 1068 // Don't know anything. 1069 Known = KnownBits(BitWidth); 1070 1071 // TODO: We can probably do more work on calculating the known bits and 1072 // simplifying the operations for scalable vectors, but for now we just 1073 // bail out. 1074 EVT VT = Op.getValueType(); 1075 if (VT.isScalableVector()) 1076 return false; 1077 1078 bool IsLE = TLO.DAG.getDataLayout().isLittleEndian(); 1079 unsigned NumElts = OriginalDemandedElts.getBitWidth(); 1080 assert((!VT.isVector() || NumElts == VT.getVectorNumElements()) && 1081 "Unexpected vector size"); 1082 1083 APInt DemandedBits = OriginalDemandedBits; 1084 APInt DemandedElts = OriginalDemandedElts; 1085 SDLoc dl(Op); 1086 auto &DL = TLO.DAG.getDataLayout(); 1087 1088 // Undef operand. 1089 if (Op.isUndef()) 1090 return false; 1091 1092 if (Op.getOpcode() == ISD::Constant) { 1093 // We know all of the bits for a constant! 1094 Known = KnownBits::makeConstant(cast<ConstantSDNode>(Op)->getAPIntValue()); 1095 return false; 1096 } 1097 1098 if (Op.getOpcode() == ISD::ConstantFP) { 1099 // We know all of the bits for a floating point constant! 1100 Known = KnownBits::makeConstant( 1101 cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt()); 1102 return false; 1103 } 1104 1105 // Other users may use these bits. 1106 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) { 1107 if (Depth != 0) { 1108 // If not at the root, Just compute the Known bits to 1109 // simplify things downstream. 1110 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 1111 return false; 1112 } 1113 // If this is the root being simplified, allow it to have multiple uses, 1114 // just set the DemandedBits/Elts to all bits. 1115 DemandedBits = APInt::getAllOnes(BitWidth); 1116 DemandedElts = APInt::getAllOnes(NumElts); 1117 } else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) { 1118 // Not demanding any bits/elts from Op. 1119 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 1120 } else if (Depth >= SelectionDAG::MaxRecursionDepth) { 1121 // Limit search depth. 1122 return false; 1123 } 1124 1125 KnownBits Known2; 1126 switch (Op.getOpcode()) { 1127 case ISD::TargetConstant: 1128 llvm_unreachable("Can't simplify this node"); 1129 case ISD::SCALAR_TO_VECTOR: { 1130 if (!DemandedElts[0]) 1131 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 1132 1133 KnownBits SrcKnown; 1134 SDValue Src = Op.getOperand(0); 1135 unsigned SrcBitWidth = Src.getScalarValueSizeInBits(); 1136 APInt SrcDemandedBits = DemandedBits.zext(SrcBitWidth); 1137 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO, Depth + 1)) 1138 return true; 1139 1140 // Upper elements are undef, so only get the knownbits if we just demand 1141 // the bottom element. 1142 if (DemandedElts == 1) 1143 Known = SrcKnown.anyextOrTrunc(BitWidth); 1144 break; 1145 } 1146 case ISD::BUILD_VECTOR: 1147 // Collect the known bits that are shared by every demanded element. 1148 // TODO: Call SimplifyDemandedBits for non-constant demanded elements. 1149 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 1150 return false; // Don't fall through, will infinitely loop. 1151 case ISD::LOAD: { 1152 auto *LD = cast<LoadSDNode>(Op); 1153 if (getTargetConstantFromLoad(LD)) { 1154 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 1155 return false; // Don't fall through, will infinitely loop. 1156 } 1157 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 1158 // If this is a ZEXTLoad and we are looking at the loaded value. 1159 EVT MemVT = LD->getMemoryVT(); 1160 unsigned MemBits = MemVT.getScalarSizeInBits(); 1161 Known.Zero.setBitsFrom(MemBits); 1162 return false; // Don't fall through, will infinitely loop. 1163 } 1164 break; 1165 } 1166 case ISD::INSERT_VECTOR_ELT: { 1167 SDValue Vec = Op.getOperand(0); 1168 SDValue Scl = Op.getOperand(1); 1169 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 1170 EVT VecVT = Vec.getValueType(); 1171 1172 // If index isn't constant, assume we need all vector elements AND the 1173 // inserted element. 1174 APInt DemandedVecElts(DemandedElts); 1175 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) { 1176 unsigned Idx = CIdx->getZExtValue(); 1177 DemandedVecElts.clearBit(Idx); 1178 1179 // Inserted element is not required. 1180 if (!DemandedElts[Idx]) 1181 return TLO.CombineTo(Op, Vec); 1182 } 1183 1184 KnownBits KnownScl; 1185 unsigned NumSclBits = Scl.getScalarValueSizeInBits(); 1186 APInt DemandedSclBits = DemandedBits.zextOrTrunc(NumSclBits); 1187 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1)) 1188 return true; 1189 1190 Known = KnownScl.anyextOrTrunc(BitWidth); 1191 1192 KnownBits KnownVec; 1193 if (SimplifyDemandedBits(Vec, DemandedBits, DemandedVecElts, KnownVec, TLO, 1194 Depth + 1)) 1195 return true; 1196 1197 if (!!DemandedVecElts) 1198 Known = KnownBits::commonBits(Known, KnownVec); 1199 1200 return false; 1201 } 1202 case ISD::INSERT_SUBVECTOR: { 1203 // Demand any elements from the subvector and the remainder from the src its 1204 // inserted into. 1205 SDValue Src = Op.getOperand(0); 1206 SDValue Sub = Op.getOperand(1); 1207 uint64_t Idx = Op.getConstantOperandVal(2); 1208 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 1209 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 1210 APInt DemandedSrcElts = DemandedElts; 1211 DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx); 1212 1213 KnownBits KnownSub, KnownSrc; 1214 if (SimplifyDemandedBits(Sub, DemandedBits, DemandedSubElts, KnownSub, TLO, 1215 Depth + 1)) 1216 return true; 1217 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, KnownSrc, TLO, 1218 Depth + 1)) 1219 return true; 1220 1221 Known.Zero.setAllBits(); 1222 Known.One.setAllBits(); 1223 if (!!DemandedSubElts) 1224 Known = KnownBits::commonBits(Known, KnownSub); 1225 if (!!DemandedSrcElts) 1226 Known = KnownBits::commonBits(Known, KnownSrc); 1227 1228 // Attempt to avoid multi-use src if we don't need anything from it. 1229 if (!DemandedBits.isAllOnes() || !DemandedSubElts.isAllOnes() || 1230 !DemandedSrcElts.isAllOnes()) { 1231 SDValue NewSub = SimplifyMultipleUseDemandedBits( 1232 Sub, DemandedBits, DemandedSubElts, TLO.DAG, Depth + 1); 1233 SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1234 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 1235 if (NewSub || NewSrc) { 1236 NewSub = NewSub ? NewSub : Sub; 1237 NewSrc = NewSrc ? NewSrc : Src; 1238 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc, NewSub, 1239 Op.getOperand(2)); 1240 return TLO.CombineTo(Op, NewOp); 1241 } 1242 } 1243 break; 1244 } 1245 case ISD::EXTRACT_SUBVECTOR: { 1246 // Offset the demanded elts by the subvector index. 1247 SDValue Src = Op.getOperand(0); 1248 if (Src.getValueType().isScalableVector()) 1249 break; 1250 uint64_t Idx = Op.getConstantOperandVal(1); 1251 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 1252 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx); 1253 1254 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, Known, TLO, 1255 Depth + 1)) 1256 return true; 1257 1258 // Attempt to avoid multi-use src if we don't need anything from it. 1259 if (!DemandedBits.isAllOnes() || !DemandedSrcElts.isAllOnes()) { 1260 SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 1261 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 1262 if (DemandedSrc) { 1263 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, 1264 Op.getOperand(1)); 1265 return TLO.CombineTo(Op, NewOp); 1266 } 1267 } 1268 break; 1269 } 1270 case ISD::CONCAT_VECTORS: { 1271 Known.Zero.setAllBits(); 1272 Known.One.setAllBits(); 1273 EVT SubVT = Op.getOperand(0).getValueType(); 1274 unsigned NumSubVecs = Op.getNumOperands(); 1275 unsigned NumSubElts = SubVT.getVectorNumElements(); 1276 for (unsigned i = 0; i != NumSubVecs; ++i) { 1277 APInt DemandedSubElts = 1278 DemandedElts.extractBits(NumSubElts, i * NumSubElts); 1279 if (SimplifyDemandedBits(Op.getOperand(i), DemandedBits, DemandedSubElts, 1280 Known2, TLO, Depth + 1)) 1281 return true; 1282 // Known bits are shared by every demanded subvector element. 1283 if (!!DemandedSubElts) 1284 Known = KnownBits::commonBits(Known, Known2); 1285 } 1286 break; 1287 } 1288 case ISD::VECTOR_SHUFFLE: { 1289 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 1290 1291 // Collect demanded elements from shuffle operands.. 1292 APInt DemandedLHS(NumElts, 0); 1293 APInt DemandedRHS(NumElts, 0); 1294 for (unsigned i = 0; i != NumElts; ++i) { 1295 if (!DemandedElts[i]) 1296 continue; 1297 int M = ShuffleMask[i]; 1298 if (M < 0) { 1299 // For UNDEF elements, we don't know anything about the common state of 1300 // the shuffle result. 1301 DemandedLHS.clearAllBits(); 1302 DemandedRHS.clearAllBits(); 1303 break; 1304 } 1305 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range"); 1306 if (M < (int)NumElts) 1307 DemandedLHS.setBit(M); 1308 else 1309 DemandedRHS.setBit(M - NumElts); 1310 } 1311 1312 if (!!DemandedLHS || !!DemandedRHS) { 1313 SDValue Op0 = Op.getOperand(0); 1314 SDValue Op1 = Op.getOperand(1); 1315 1316 Known.Zero.setAllBits(); 1317 Known.One.setAllBits(); 1318 if (!!DemandedLHS) { 1319 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedLHS, Known2, TLO, 1320 Depth + 1)) 1321 return true; 1322 Known = KnownBits::commonBits(Known, Known2); 1323 } 1324 if (!!DemandedRHS) { 1325 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedRHS, Known2, TLO, 1326 Depth + 1)) 1327 return true; 1328 Known = KnownBits::commonBits(Known, Known2); 1329 } 1330 1331 // Attempt to avoid multi-use ops if we don't need anything from them. 1332 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1333 Op0, DemandedBits, DemandedLHS, TLO.DAG, Depth + 1); 1334 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1335 Op1, DemandedBits, DemandedRHS, TLO.DAG, Depth + 1); 1336 if (DemandedOp0 || DemandedOp1) { 1337 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1338 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1339 SDValue NewOp = TLO.DAG.getVectorShuffle(VT, dl, Op0, Op1, ShuffleMask); 1340 return TLO.CombineTo(Op, NewOp); 1341 } 1342 } 1343 break; 1344 } 1345 case ISD::AND: { 1346 SDValue Op0 = Op.getOperand(0); 1347 SDValue Op1 = Op.getOperand(1); 1348 1349 // If the RHS is a constant, check to see if the LHS would be zero without 1350 // using the bits from the RHS. Below, we use knowledge about the RHS to 1351 // simplify the LHS, here we're using information from the LHS to simplify 1352 // the RHS. 1353 if (ConstantSDNode *RHSC = isConstOrConstSplat(Op1)) { 1354 // Do not increment Depth here; that can cause an infinite loop. 1355 KnownBits LHSKnown = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth); 1356 // If the LHS already has zeros where RHSC does, this 'and' is dead. 1357 if ((LHSKnown.Zero & DemandedBits) == 1358 (~RHSC->getAPIntValue() & DemandedBits)) 1359 return TLO.CombineTo(Op, Op0); 1360 1361 // If any of the set bits in the RHS are known zero on the LHS, shrink 1362 // the constant. 1363 if (ShrinkDemandedConstant(Op, ~LHSKnown.Zero & DemandedBits, 1364 DemandedElts, TLO)) 1365 return true; 1366 1367 // Bitwise-not (xor X, -1) is a special case: we don't usually shrink its 1368 // constant, but if this 'and' is only clearing bits that were just set by 1369 // the xor, then this 'and' can be eliminated by shrinking the mask of 1370 // the xor. For example, for a 32-bit X: 1371 // and (xor (srl X, 31), -1), 1 --> xor (srl X, 31), 1 1372 if (isBitwiseNot(Op0) && Op0.hasOneUse() && 1373 LHSKnown.One == ~RHSC->getAPIntValue()) { 1374 SDValue Xor = TLO.DAG.getNode(ISD::XOR, dl, VT, Op0.getOperand(0), Op1); 1375 return TLO.CombineTo(Op, Xor); 1376 } 1377 } 1378 1379 // AND(INSERT_SUBVECTOR(C,X,I),M) -> INSERT_SUBVECTOR(AND(C,M),X,I) 1380 // iff 'C' is Undef/Constant and AND(X,M) == X (for DemandedBits). 1381 if (Op0.getOpcode() == ISD::INSERT_SUBVECTOR && 1382 (Op0.getOperand(0).isUndef() || 1383 ISD::isBuildVectorOfConstantSDNodes(Op0.getOperand(0).getNode())) && 1384 Op0->hasOneUse()) { 1385 unsigned NumSubElts = 1386 Op0.getOperand(1).getValueType().getVectorNumElements(); 1387 unsigned SubIdx = Op0.getConstantOperandVal(2); 1388 APInt DemandedSub = 1389 APInt::getBitsSet(NumElts, SubIdx, SubIdx + NumSubElts); 1390 KnownBits KnownSubMask = 1391 TLO.DAG.computeKnownBits(Op1, DemandedSub & DemandedElts, Depth + 1); 1392 if (DemandedBits.isSubsetOf(KnownSubMask.One)) { 1393 SDValue NewAnd = 1394 TLO.DAG.getNode(ISD::AND, dl, VT, Op0.getOperand(0), Op1); 1395 SDValue NewInsert = 1396 TLO.DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, NewAnd, 1397 Op0.getOperand(1), Op0.getOperand(2)); 1398 return TLO.CombineTo(Op, NewInsert); 1399 } 1400 } 1401 1402 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1403 Depth + 1)) 1404 return true; 1405 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1406 if (SimplifyDemandedBits(Op0, ~Known.Zero & DemandedBits, DemandedElts, 1407 Known2, TLO, Depth + 1)) 1408 return true; 1409 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1410 1411 // If all of the demanded bits are known one on one side, return the other. 1412 // These bits cannot contribute to the result of the 'and'. 1413 if (DemandedBits.isSubsetOf(Known2.Zero | Known.One)) 1414 return TLO.CombineTo(Op, Op0); 1415 if (DemandedBits.isSubsetOf(Known.Zero | Known2.One)) 1416 return TLO.CombineTo(Op, Op1); 1417 // If all of the demanded bits in the inputs are known zeros, return zero. 1418 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1419 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, VT)); 1420 // If the RHS is a constant, see if we can simplify it. 1421 if (ShrinkDemandedConstant(Op, ~Known2.Zero & DemandedBits, DemandedElts, 1422 TLO)) 1423 return true; 1424 // If the operation can be done in a smaller type, do so. 1425 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1426 return true; 1427 1428 // Attempt to avoid multi-use ops if we don't need anything from them. 1429 if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) { 1430 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1431 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1432 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1433 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1434 if (DemandedOp0 || DemandedOp1) { 1435 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1436 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1437 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1438 return TLO.CombineTo(Op, NewOp); 1439 } 1440 } 1441 1442 Known &= Known2; 1443 break; 1444 } 1445 case ISD::OR: { 1446 SDValue Op0 = Op.getOperand(0); 1447 SDValue Op1 = Op.getOperand(1); 1448 1449 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1450 Depth + 1)) 1451 return true; 1452 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1453 if (SimplifyDemandedBits(Op0, ~Known.One & DemandedBits, DemandedElts, 1454 Known2, TLO, Depth + 1)) 1455 return true; 1456 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1457 1458 // If all of the demanded bits are known zero on one side, return the other. 1459 // These bits cannot contribute to the result of the 'or'. 1460 if (DemandedBits.isSubsetOf(Known2.One | Known.Zero)) 1461 return TLO.CombineTo(Op, Op0); 1462 if (DemandedBits.isSubsetOf(Known.One | Known2.Zero)) 1463 return TLO.CombineTo(Op, Op1); 1464 // If the RHS is a constant, see if we can simplify it. 1465 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1466 return true; 1467 // If the operation can be done in a smaller type, do so. 1468 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1469 return true; 1470 1471 // Attempt to avoid multi-use ops if we don't need anything from them. 1472 if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) { 1473 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1474 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1475 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1476 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1477 if (DemandedOp0 || DemandedOp1) { 1478 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1479 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1480 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1481 return TLO.CombineTo(Op, NewOp); 1482 } 1483 } 1484 1485 // (or (and X, C1), (and (or X, Y), C2)) -> (or (and X, C1|C2), (and Y, C2)) 1486 // TODO: Use SimplifyMultipleUseDemandedBits to peek through masks. 1487 if (Op0.getOpcode() == ISD::AND && Op1.getOpcode() == ISD::AND && 1488 Op0->hasOneUse() && Op1->hasOneUse()) { 1489 // Attempt to match all commutations - m_c_Or would've been useful! 1490 for (int I = 0; I != 2; ++I) { 1491 SDValue X = Op.getOperand(I).getOperand(0); 1492 SDValue C1 = Op.getOperand(I).getOperand(1); 1493 SDValue Alt = Op.getOperand(1 - I).getOperand(0); 1494 SDValue C2 = Op.getOperand(1 - I).getOperand(1); 1495 if (Alt.getOpcode() == ISD::OR) { 1496 for (int J = 0; J != 2; ++J) { 1497 if (X == Alt.getOperand(J)) { 1498 SDValue Y = Alt.getOperand(1 - J); 1499 if (SDValue C12 = TLO.DAG.FoldConstantArithmetic(ISD::OR, dl, VT, 1500 {C1, C2})) { 1501 SDValue MaskX = TLO.DAG.getNode(ISD::AND, dl, VT, X, C12); 1502 SDValue MaskY = TLO.DAG.getNode(ISD::AND, dl, VT, Y, C2); 1503 return TLO.CombineTo( 1504 Op, TLO.DAG.getNode(ISD::OR, dl, VT, MaskX, MaskY)); 1505 } 1506 } 1507 } 1508 } 1509 } 1510 } 1511 1512 Known |= Known2; 1513 break; 1514 } 1515 case ISD::XOR: { 1516 SDValue Op0 = Op.getOperand(0); 1517 SDValue Op1 = Op.getOperand(1); 1518 1519 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1520 Depth + 1)) 1521 return true; 1522 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1523 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedElts, Known2, TLO, 1524 Depth + 1)) 1525 return true; 1526 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1527 1528 // If all of the demanded bits are known zero on one side, return the other. 1529 // These bits cannot contribute to the result of the 'xor'. 1530 if (DemandedBits.isSubsetOf(Known.Zero)) 1531 return TLO.CombineTo(Op, Op0); 1532 if (DemandedBits.isSubsetOf(Known2.Zero)) 1533 return TLO.CombineTo(Op, Op1); 1534 // If the operation can be done in a smaller type, do so. 1535 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1536 return true; 1537 1538 // If all of the unknown bits are known to be zero on one side or the other 1539 // turn this into an *inclusive* or. 1540 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 1541 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1542 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, VT, Op0, Op1)); 1543 1544 ConstantSDNode *C = isConstOrConstSplat(Op1, DemandedElts); 1545 if (C) { 1546 // If one side is a constant, and all of the set bits in the constant are 1547 // also known set on the other side, turn this into an AND, as we know 1548 // the bits will be cleared. 1549 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 1550 // NB: it is okay if more bits are known than are requested 1551 if (C->getAPIntValue() == Known2.One) { 1552 SDValue ANDC = 1553 TLO.DAG.getConstant(~C->getAPIntValue() & DemandedBits, dl, VT); 1554 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT, Op0, ANDC)); 1555 } 1556 1557 // If the RHS is a constant, see if we can change it. Don't alter a -1 1558 // constant because that's a 'not' op, and that is better for combining 1559 // and codegen. 1560 if (!C->isAllOnes() && DemandedBits.isSubsetOf(C->getAPIntValue())) { 1561 // We're flipping all demanded bits. Flip the undemanded bits too. 1562 SDValue New = TLO.DAG.getNOT(dl, Op0, VT); 1563 return TLO.CombineTo(Op, New); 1564 } 1565 1566 unsigned Op0Opcode = Op0.getOpcode(); 1567 if ((Op0Opcode == ISD::SRL || Op0Opcode == ISD::SHL) && Op0.hasOneUse()) { 1568 if (ConstantSDNode *ShiftC = 1569 isConstOrConstSplat(Op0.getOperand(1), DemandedElts)) { 1570 // Don't crash on an oversized shift. We can not guarantee that a 1571 // bogus shift has been simplified to undef. 1572 if (ShiftC->getAPIntValue().ult(BitWidth)) { 1573 uint64_t ShiftAmt = ShiftC->getZExtValue(); 1574 APInt Ones = APInt::getAllOnes(BitWidth); 1575 Ones = Op0Opcode == ISD::SHL ? Ones.shl(ShiftAmt) 1576 : Ones.lshr(ShiftAmt); 1577 const TargetLowering &TLI = TLO.DAG.getTargetLoweringInfo(); 1578 if ((DemandedBits & C->getAPIntValue()) == (DemandedBits & Ones) && 1579 TLI.isDesirableToCommuteXorWithShift(Op.getNode())) { 1580 // If the xor constant is a demanded mask, do a 'not' before the 1581 // shift: 1582 // xor (X << ShiftC), XorC --> (not X) << ShiftC 1583 // xor (X >> ShiftC), XorC --> (not X) >> ShiftC 1584 SDValue Not = TLO.DAG.getNOT(dl, Op0.getOperand(0), VT); 1585 return TLO.CombineTo(Op, TLO.DAG.getNode(Op0Opcode, dl, VT, Not, 1586 Op0.getOperand(1))); 1587 } 1588 } 1589 } 1590 } 1591 } 1592 1593 // If we can't turn this into a 'not', try to shrink the constant. 1594 if (!C || !C->isAllOnes()) 1595 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1596 return true; 1597 1598 // Attempt to avoid multi-use ops if we don't need anything from them. 1599 if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) { 1600 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1601 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1602 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1603 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1604 if (DemandedOp0 || DemandedOp1) { 1605 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1606 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1607 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1608 return TLO.CombineTo(Op, NewOp); 1609 } 1610 } 1611 1612 Known ^= Known2; 1613 break; 1614 } 1615 case ISD::SELECT: 1616 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known, TLO, 1617 Depth + 1)) 1618 return true; 1619 if (SimplifyDemandedBits(Op.getOperand(1), DemandedBits, Known2, TLO, 1620 Depth + 1)) 1621 return true; 1622 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1623 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1624 1625 // If the operands are constants, see if we can simplify them. 1626 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1627 return true; 1628 1629 // Only known if known in both the LHS and RHS. 1630 Known = KnownBits::commonBits(Known, Known2); 1631 break; 1632 case ISD::VSELECT: 1633 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, DemandedElts, 1634 Known, TLO, Depth + 1)) 1635 return true; 1636 if (SimplifyDemandedBits(Op.getOperand(1), DemandedBits, DemandedElts, 1637 Known2, TLO, Depth + 1)) 1638 return true; 1639 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1640 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1641 1642 // Only known if known in both the LHS and RHS. 1643 Known = KnownBits::commonBits(Known, Known2); 1644 break; 1645 case ISD::SELECT_CC: 1646 if (SimplifyDemandedBits(Op.getOperand(3), DemandedBits, Known, TLO, 1647 Depth + 1)) 1648 return true; 1649 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known2, TLO, 1650 Depth + 1)) 1651 return true; 1652 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1653 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1654 1655 // If the operands are constants, see if we can simplify them. 1656 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1657 return true; 1658 1659 // Only known if known in both the LHS and RHS. 1660 Known = KnownBits::commonBits(Known, Known2); 1661 break; 1662 case ISD::SETCC: { 1663 SDValue Op0 = Op.getOperand(0); 1664 SDValue Op1 = Op.getOperand(1); 1665 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1666 // If (1) we only need the sign-bit, (2) the setcc operands are the same 1667 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 1668 // -1, we may be able to bypass the setcc. 1669 if (DemandedBits.isSignMask() && 1670 Op0.getScalarValueSizeInBits() == BitWidth && 1671 getBooleanContents(Op0.getValueType()) == 1672 BooleanContent::ZeroOrNegativeOneBooleanContent) { 1673 // If we're testing X < 0, then this compare isn't needed - just use X! 1674 // FIXME: We're limiting to integer types here, but this should also work 1675 // if we don't care about FP signed-zero. The use of SETLT with FP means 1676 // that we don't care about NaNs. 1677 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 1678 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 1679 return TLO.CombineTo(Op, Op0); 1680 1681 // TODO: Should we check for other forms of sign-bit comparisons? 1682 // Examples: X <= -1, X >= 0 1683 } 1684 if (getBooleanContents(Op0.getValueType()) == 1685 TargetLowering::ZeroOrOneBooleanContent && 1686 BitWidth > 1) 1687 Known.Zero.setBitsFrom(1); 1688 break; 1689 } 1690 case ISD::SHL: { 1691 SDValue Op0 = Op.getOperand(0); 1692 SDValue Op1 = Op.getOperand(1); 1693 EVT ShiftVT = Op1.getValueType(); 1694 1695 if (const APInt *SA = 1696 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1697 unsigned ShAmt = SA->getZExtValue(); 1698 if (ShAmt == 0) 1699 return TLO.CombineTo(Op, Op0); 1700 1701 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a 1702 // single shift. We can do this if the bottom bits (which are shifted 1703 // out) are never demanded. 1704 // TODO - support non-uniform vector amounts. 1705 if (Op0.getOpcode() == ISD::SRL) { 1706 if (!DemandedBits.intersects(APInt::getLowBitsSet(BitWidth, ShAmt))) { 1707 if (const APInt *SA2 = 1708 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) { 1709 unsigned C1 = SA2->getZExtValue(); 1710 unsigned Opc = ISD::SHL; 1711 int Diff = ShAmt - C1; 1712 if (Diff < 0) { 1713 Diff = -Diff; 1714 Opc = ISD::SRL; 1715 } 1716 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT); 1717 return TLO.CombineTo( 1718 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1719 } 1720 } 1721 } 1722 1723 // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits 1724 // are not demanded. This will likely allow the anyext to be folded away. 1725 // TODO - support non-uniform vector amounts. 1726 if (Op0.getOpcode() == ISD::ANY_EXTEND) { 1727 SDValue InnerOp = Op0.getOperand(0); 1728 EVT InnerVT = InnerOp.getValueType(); 1729 unsigned InnerBits = InnerVT.getScalarSizeInBits(); 1730 if (ShAmt < InnerBits && DemandedBits.getActiveBits() <= InnerBits && 1731 isTypeDesirableForOp(ISD::SHL, InnerVT)) { 1732 EVT ShTy = getShiftAmountTy(InnerVT, DL); 1733 if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits())) 1734 ShTy = InnerVT; 1735 SDValue NarrowShl = 1736 TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp, 1737 TLO.DAG.getConstant(ShAmt, dl, ShTy)); 1738 return TLO.CombineTo( 1739 Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, NarrowShl)); 1740 } 1741 1742 // Repeat the SHL optimization above in cases where an extension 1743 // intervenes: (shl (anyext (shr x, c1)), c2) to 1744 // (shl (anyext x), c2-c1). This requires that the bottom c1 bits 1745 // aren't demanded (as above) and that the shifted upper c1 bits of 1746 // x aren't demanded. 1747 // TODO - support non-uniform vector amounts. 1748 if (Op0.hasOneUse() && InnerOp.getOpcode() == ISD::SRL && 1749 InnerOp.hasOneUse()) { 1750 if (const APInt *SA2 = 1751 TLO.DAG.getValidShiftAmountConstant(InnerOp, DemandedElts)) { 1752 unsigned InnerShAmt = SA2->getZExtValue(); 1753 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits && 1754 DemandedBits.getActiveBits() <= 1755 (InnerBits - InnerShAmt + ShAmt) && 1756 DemandedBits.countTrailingZeros() >= ShAmt) { 1757 SDValue NewSA = 1758 TLO.DAG.getConstant(ShAmt - InnerShAmt, dl, ShiftVT); 1759 SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, 1760 InnerOp.getOperand(0)); 1761 return TLO.CombineTo( 1762 Op, TLO.DAG.getNode(ISD::SHL, dl, VT, NewExt, NewSA)); 1763 } 1764 } 1765 } 1766 } 1767 1768 APInt InDemandedMask = DemandedBits.lshr(ShAmt); 1769 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1770 Depth + 1)) 1771 return true; 1772 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1773 Known.Zero <<= ShAmt; 1774 Known.One <<= ShAmt; 1775 // low bits known zero. 1776 Known.Zero.setLowBits(ShAmt); 1777 1778 // Attempt to avoid multi-use ops if we don't need anything from them. 1779 if (!InDemandedMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1780 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1781 Op0, InDemandedMask, DemandedElts, TLO.DAG, Depth + 1); 1782 if (DemandedOp0) { 1783 SDValue NewOp = TLO.DAG.getNode(ISD::SHL, dl, VT, DemandedOp0, Op1); 1784 return TLO.CombineTo(Op, NewOp); 1785 } 1786 } 1787 1788 // Try shrinking the operation as long as the shift amount will still be 1789 // in range. 1790 if ((ShAmt < DemandedBits.getActiveBits()) && 1791 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1792 return true; 1793 } else { 1794 // This is a variable shift, so we can't shift the demand mask by a known 1795 // amount. But if we are not demanding high bits, then we are not 1796 // demanding those bits from the pre-shifted operand either. 1797 if (unsigned CTLZ = DemandedBits.countLeadingZeros()) { 1798 APInt DemandedFromOp(APInt::getLowBitsSet(BitWidth, BitWidth - CTLZ)); 1799 if (SimplifyDemandedBits(Op0, DemandedFromOp, DemandedElts, Known, TLO, 1800 Depth + 1)) { 1801 SDNodeFlags Flags = Op.getNode()->getFlags(); 1802 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) { 1803 // Disable the nsw and nuw flags. We can no longer guarantee that we 1804 // won't wrap after simplification. 1805 Flags.setNoSignedWrap(false); 1806 Flags.setNoUnsignedWrap(false); 1807 Op->setFlags(Flags); 1808 } 1809 return true; 1810 } 1811 Known.resetAll(); 1812 } 1813 } 1814 1815 // If we are only demanding sign bits then we can use the shift source 1816 // directly. 1817 if (const APInt *MaxSA = 1818 TLO.DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 1819 unsigned ShAmt = MaxSA->getZExtValue(); 1820 unsigned NumSignBits = 1821 TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 1822 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 1823 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits)) 1824 return TLO.CombineTo(Op, Op0); 1825 } 1826 break; 1827 } 1828 case ISD::SRL: { 1829 SDValue Op0 = Op.getOperand(0); 1830 SDValue Op1 = Op.getOperand(1); 1831 EVT ShiftVT = Op1.getValueType(); 1832 1833 // Try to match AVG patterns. 1834 if (SDValue AVG = combineShiftToAVG(Op, TLO.DAG, *this, DemandedBits, 1835 DemandedElts, Depth + 1)) 1836 return TLO.CombineTo(Op, AVG); 1837 1838 if (const APInt *SA = 1839 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1840 unsigned ShAmt = SA->getZExtValue(); 1841 if (ShAmt == 0) 1842 return TLO.CombineTo(Op, Op0); 1843 1844 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a 1845 // single shift. We can do this if the top bits (which are shifted out) 1846 // are never demanded. 1847 // TODO - support non-uniform vector amounts. 1848 if (Op0.getOpcode() == ISD::SHL) { 1849 if (!DemandedBits.intersects(APInt::getHighBitsSet(BitWidth, ShAmt))) { 1850 if (const APInt *SA2 = 1851 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) { 1852 unsigned C1 = SA2->getZExtValue(); 1853 unsigned Opc = ISD::SRL; 1854 int Diff = ShAmt - C1; 1855 if (Diff < 0) { 1856 Diff = -Diff; 1857 Opc = ISD::SHL; 1858 } 1859 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT); 1860 return TLO.CombineTo( 1861 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1862 } 1863 } 1864 } 1865 1866 APInt InDemandedMask = (DemandedBits << ShAmt); 1867 1868 // If the shift is exact, then it does demand the low bits (and knows that 1869 // they are zero). 1870 if (Op->getFlags().hasExact()) 1871 InDemandedMask.setLowBits(ShAmt); 1872 1873 // Compute the new bits that are at the top now. 1874 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1875 Depth + 1)) 1876 return true; 1877 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1878 Known.Zero.lshrInPlace(ShAmt); 1879 Known.One.lshrInPlace(ShAmt); 1880 // High bits known zero. 1881 Known.Zero.setHighBits(ShAmt); 1882 } 1883 break; 1884 } 1885 case ISD::SRA: { 1886 SDValue Op0 = Op.getOperand(0); 1887 SDValue Op1 = Op.getOperand(1); 1888 EVT ShiftVT = Op1.getValueType(); 1889 1890 // If we only want bits that already match the signbit then we don't need 1891 // to shift. 1892 unsigned NumHiDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 1893 if (TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1) >= 1894 NumHiDemandedBits) 1895 return TLO.CombineTo(Op, Op0); 1896 1897 // If this is an arithmetic shift right and only the low-bit is set, we can 1898 // always convert this into a logical shr, even if the shift amount is 1899 // variable. The low bit of the shift cannot be an input sign bit unless 1900 // the shift amount is >= the size of the datatype, which is undefined. 1901 if (DemandedBits.isOne()) 1902 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1)); 1903 1904 // Try to match AVG patterns. 1905 if (SDValue AVG = combineShiftToAVG(Op, TLO.DAG, *this, DemandedBits, 1906 DemandedElts, Depth + 1)) 1907 return TLO.CombineTo(Op, AVG); 1908 1909 if (const APInt *SA = 1910 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1911 unsigned ShAmt = SA->getZExtValue(); 1912 if (ShAmt == 0) 1913 return TLO.CombineTo(Op, Op0); 1914 1915 APInt InDemandedMask = (DemandedBits << ShAmt); 1916 1917 // If the shift is exact, then it does demand the low bits (and knows that 1918 // they are zero). 1919 if (Op->getFlags().hasExact()) 1920 InDemandedMask.setLowBits(ShAmt); 1921 1922 // If any of the demanded bits are produced by the sign extension, we also 1923 // demand the input sign bit. 1924 if (DemandedBits.countLeadingZeros() < ShAmt) 1925 InDemandedMask.setSignBit(); 1926 1927 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1928 Depth + 1)) 1929 return true; 1930 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1931 Known.Zero.lshrInPlace(ShAmt); 1932 Known.One.lshrInPlace(ShAmt); 1933 1934 // If the input sign bit is known to be zero, or if none of the top bits 1935 // are demanded, turn this into an unsigned shift right. 1936 if (Known.Zero[BitWidth - ShAmt - 1] || 1937 DemandedBits.countLeadingZeros() >= ShAmt) { 1938 SDNodeFlags Flags; 1939 Flags.setExact(Op->getFlags().hasExact()); 1940 return TLO.CombineTo( 1941 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1, Flags)); 1942 } 1943 1944 int Log2 = DemandedBits.exactLogBase2(); 1945 if (Log2 >= 0) { 1946 // The bit must come from the sign. 1947 SDValue NewSA = TLO.DAG.getConstant(BitWidth - 1 - Log2, dl, ShiftVT); 1948 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, NewSA)); 1949 } 1950 1951 if (Known.One[BitWidth - ShAmt - 1]) 1952 // New bits are known one. 1953 Known.One.setHighBits(ShAmt); 1954 1955 // Attempt to avoid multi-use ops if we don't need anything from them. 1956 if (!InDemandedMask.isAllOnes() || !DemandedElts.isAllOnes()) { 1957 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1958 Op0, InDemandedMask, DemandedElts, TLO.DAG, Depth + 1); 1959 if (DemandedOp0) { 1960 SDValue NewOp = TLO.DAG.getNode(ISD::SRA, dl, VT, DemandedOp0, Op1); 1961 return TLO.CombineTo(Op, NewOp); 1962 } 1963 } 1964 } 1965 break; 1966 } 1967 case ISD::FSHL: 1968 case ISD::FSHR: { 1969 SDValue Op0 = Op.getOperand(0); 1970 SDValue Op1 = Op.getOperand(1); 1971 SDValue Op2 = Op.getOperand(2); 1972 bool IsFSHL = (Op.getOpcode() == ISD::FSHL); 1973 1974 if (ConstantSDNode *SA = isConstOrConstSplat(Op2, DemandedElts)) { 1975 unsigned Amt = SA->getAPIntValue().urem(BitWidth); 1976 1977 // For fshl, 0-shift returns the 1st arg. 1978 // For fshr, 0-shift returns the 2nd arg. 1979 if (Amt == 0) { 1980 if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1, DemandedBits, DemandedElts, 1981 Known, TLO, Depth + 1)) 1982 return true; 1983 break; 1984 } 1985 1986 // fshl: (Op0 << Amt) | (Op1 >> (BW - Amt)) 1987 // fshr: (Op0 << (BW - Amt)) | (Op1 >> Amt) 1988 APInt Demanded0 = DemandedBits.lshr(IsFSHL ? Amt : (BitWidth - Amt)); 1989 APInt Demanded1 = DemandedBits << (IsFSHL ? (BitWidth - Amt) : Amt); 1990 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO, 1991 Depth + 1)) 1992 return true; 1993 if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO, 1994 Depth + 1)) 1995 return true; 1996 1997 Known2.One <<= (IsFSHL ? Amt : (BitWidth - Amt)); 1998 Known2.Zero <<= (IsFSHL ? Amt : (BitWidth - Amt)); 1999 Known.One.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 2000 Known.Zero.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 2001 Known.One |= Known2.One; 2002 Known.Zero |= Known2.Zero; 2003 2004 // Attempt to avoid multi-use ops if we don't need anything from them. 2005 if (!Demanded0.isAllOnes() || !Demanded1.isAllOnes() || 2006 !DemandedElts.isAllOnes()) { 2007 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 2008 Op0, Demanded0, DemandedElts, TLO.DAG, Depth + 1); 2009 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 2010 Op1, Demanded1, DemandedElts, TLO.DAG, Depth + 1); 2011 if (DemandedOp0 || DemandedOp1) { 2012 DemandedOp0 = DemandedOp0 ? DemandedOp0 : Op0; 2013 DemandedOp1 = DemandedOp1 ? DemandedOp1 : Op1; 2014 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedOp0, 2015 DemandedOp1, Op2); 2016 return TLO.CombineTo(Op, NewOp); 2017 } 2018 } 2019 } 2020 2021 // For pow-2 bitwidths we only demand the bottom modulo amt bits. 2022 if (isPowerOf2_32(BitWidth)) { 2023 APInt DemandedAmtBits(Op2.getScalarValueSizeInBits(), BitWidth - 1); 2024 if (SimplifyDemandedBits(Op2, DemandedAmtBits, DemandedElts, 2025 Known2, TLO, Depth + 1)) 2026 return true; 2027 } 2028 break; 2029 } 2030 case ISD::ROTL: 2031 case ISD::ROTR: { 2032 SDValue Op0 = Op.getOperand(0); 2033 SDValue Op1 = Op.getOperand(1); 2034 bool IsROTL = (Op.getOpcode() == ISD::ROTL); 2035 2036 // If we're rotating an 0/-1 value, then it stays an 0/-1 value. 2037 if (BitWidth == TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1)) 2038 return TLO.CombineTo(Op, Op0); 2039 2040 if (ConstantSDNode *SA = isConstOrConstSplat(Op1, DemandedElts)) { 2041 unsigned Amt = SA->getAPIntValue().urem(BitWidth); 2042 unsigned RevAmt = BitWidth - Amt; 2043 2044 // rotl: (Op0 << Amt) | (Op0 >> (BW - Amt)) 2045 // rotr: (Op0 << (BW - Amt)) | (Op0 >> Amt) 2046 APInt Demanded0 = DemandedBits.rotr(IsROTL ? Amt : RevAmt); 2047 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO, 2048 Depth + 1)) 2049 return true; 2050 2051 // rot*(x, 0) --> x 2052 if (Amt == 0) 2053 return TLO.CombineTo(Op, Op0); 2054 2055 // See if we don't demand either half of the rotated bits. 2056 if ((!TLO.LegalOperations() || isOperationLegal(ISD::SHL, VT)) && 2057 DemandedBits.countTrailingZeros() >= (IsROTL ? Amt : RevAmt)) { 2058 Op1 = TLO.DAG.getConstant(IsROTL ? Amt : RevAmt, dl, Op1.getValueType()); 2059 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, Op1)); 2060 } 2061 if ((!TLO.LegalOperations() || isOperationLegal(ISD::SRL, VT)) && 2062 DemandedBits.countLeadingZeros() >= (IsROTL ? RevAmt : Amt)) { 2063 Op1 = TLO.DAG.getConstant(IsROTL ? RevAmt : Amt, dl, Op1.getValueType()); 2064 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1)); 2065 } 2066 } 2067 2068 // For pow-2 bitwidths we only demand the bottom modulo amt bits. 2069 if (isPowerOf2_32(BitWidth)) { 2070 APInt DemandedAmtBits(Op1.getScalarValueSizeInBits(), BitWidth - 1); 2071 if (SimplifyDemandedBits(Op1, DemandedAmtBits, DemandedElts, Known2, TLO, 2072 Depth + 1)) 2073 return true; 2074 } 2075 break; 2076 } 2077 case ISD::UMIN: { 2078 // Check if one arg is always less than (or equal) to the other arg. 2079 SDValue Op0 = Op.getOperand(0); 2080 SDValue Op1 = Op.getOperand(1); 2081 KnownBits Known0 = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth + 1); 2082 KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1); 2083 Known = KnownBits::umin(Known0, Known1); 2084 if (Optional<bool> IsULE = KnownBits::ule(Known0, Known1)) 2085 return TLO.CombineTo(Op, IsULE.value() ? Op0 : Op1); 2086 if (Optional<bool> IsULT = KnownBits::ult(Known0, Known1)) 2087 return TLO.CombineTo(Op, IsULT.value() ? Op0 : Op1); 2088 break; 2089 } 2090 case ISD::UMAX: { 2091 // Check if one arg is always greater than (or equal) to the other arg. 2092 SDValue Op0 = Op.getOperand(0); 2093 SDValue Op1 = Op.getOperand(1); 2094 KnownBits Known0 = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth + 1); 2095 KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1); 2096 Known = KnownBits::umax(Known0, Known1); 2097 if (Optional<bool> IsUGE = KnownBits::uge(Known0, Known1)) 2098 return TLO.CombineTo(Op, IsUGE.value() ? Op0 : Op1); 2099 if (Optional<bool> IsUGT = KnownBits::ugt(Known0, Known1)) 2100 return TLO.CombineTo(Op, IsUGT.value() ? Op0 : Op1); 2101 break; 2102 } 2103 case ISD::BITREVERSE: { 2104 SDValue Src = Op.getOperand(0); 2105 APInt DemandedSrcBits = DemandedBits.reverseBits(); 2106 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 2107 Depth + 1)) 2108 return true; 2109 Known.One = Known2.One.reverseBits(); 2110 Known.Zero = Known2.Zero.reverseBits(); 2111 break; 2112 } 2113 case ISD::BSWAP: { 2114 SDValue Src = Op.getOperand(0); 2115 2116 // If the only bits demanded come from one byte of the bswap result, 2117 // just shift the input byte into position to eliminate the bswap. 2118 unsigned NLZ = DemandedBits.countLeadingZeros(); 2119 unsigned NTZ = DemandedBits.countTrailingZeros(); 2120 2121 // Round NTZ down to the next byte. If we have 11 trailing zeros, then 2122 // we need all the bits down to bit 8. Likewise, round NLZ. If we 2123 // have 14 leading zeros, round to 8. 2124 NLZ = alignDown(NLZ, 8); 2125 NTZ = alignDown(NTZ, 8); 2126 // If we need exactly one byte, we can do this transformation. 2127 if (BitWidth - NLZ - NTZ == 8) { 2128 // Replace this with either a left or right shift to get the byte into 2129 // the right place. 2130 unsigned ShiftOpcode = NLZ > NTZ ? ISD::SRL : ISD::SHL; 2131 if (!TLO.LegalOperations() || isOperationLegal(ShiftOpcode, VT)) { 2132 EVT ShiftAmtTy = getShiftAmountTy(VT, DL); 2133 unsigned ShiftAmount = NLZ > NTZ ? NLZ - NTZ : NTZ - NLZ; 2134 SDValue ShAmt = TLO.DAG.getConstant(ShiftAmount, dl, ShiftAmtTy); 2135 SDValue NewOp = TLO.DAG.getNode(ShiftOpcode, dl, VT, Src, ShAmt); 2136 return TLO.CombineTo(Op, NewOp); 2137 } 2138 } 2139 2140 APInt DemandedSrcBits = DemandedBits.byteSwap(); 2141 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 2142 Depth + 1)) 2143 return true; 2144 Known.One = Known2.One.byteSwap(); 2145 Known.Zero = Known2.Zero.byteSwap(); 2146 break; 2147 } 2148 case ISD::CTPOP: { 2149 // If only 1 bit is demanded, replace with PARITY as long as we're before 2150 // op legalization. 2151 // FIXME: Limit to scalars for now. 2152 if (DemandedBits.isOne() && !TLO.LegalOps && !VT.isVector()) 2153 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::PARITY, dl, VT, 2154 Op.getOperand(0))); 2155 2156 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2157 break; 2158 } 2159 case ISD::SIGN_EXTEND_INREG: { 2160 SDValue Op0 = Op.getOperand(0); 2161 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2162 unsigned ExVTBits = ExVT.getScalarSizeInBits(); 2163 2164 // If we only care about the highest bit, don't bother shifting right. 2165 if (DemandedBits.isSignMask()) { 2166 unsigned MinSignedBits = 2167 TLO.DAG.ComputeMaxSignificantBits(Op0, DemandedElts, Depth + 1); 2168 bool AlreadySignExtended = ExVTBits >= MinSignedBits; 2169 // However if the input is already sign extended we expect the sign 2170 // extension to be dropped altogether later and do not simplify. 2171 if (!AlreadySignExtended) { 2172 // Compute the correct shift amount type, which must be getShiftAmountTy 2173 // for scalar types after legalization. 2174 SDValue ShiftAmt = TLO.DAG.getConstant(BitWidth - ExVTBits, dl, 2175 getShiftAmountTy(VT, DL)); 2176 return TLO.CombineTo(Op, 2177 TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, ShiftAmt)); 2178 } 2179 } 2180 2181 // If none of the extended bits are demanded, eliminate the sextinreg. 2182 if (DemandedBits.getActiveBits() <= ExVTBits) 2183 return TLO.CombineTo(Op, Op0); 2184 2185 APInt InputDemandedBits = DemandedBits.getLoBits(ExVTBits); 2186 2187 // Since the sign extended bits are demanded, we know that the sign 2188 // bit is demanded. 2189 InputDemandedBits.setBit(ExVTBits - 1); 2190 2191 if (SimplifyDemandedBits(Op0, InputDemandedBits, DemandedElts, Known, TLO, 2192 Depth + 1)) 2193 return true; 2194 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2195 2196 // If the sign bit of the input is known set or clear, then we know the 2197 // top bits of the result. 2198 2199 // If the input sign bit is known zero, convert this into a zero extension. 2200 if (Known.Zero[ExVTBits - 1]) 2201 return TLO.CombineTo(Op, TLO.DAG.getZeroExtendInReg(Op0, dl, ExVT)); 2202 2203 APInt Mask = APInt::getLowBitsSet(BitWidth, ExVTBits); 2204 if (Known.One[ExVTBits - 1]) { // Input sign bit known set 2205 Known.One.setBitsFrom(ExVTBits); 2206 Known.Zero &= Mask; 2207 } else { // Input sign bit unknown 2208 Known.Zero &= Mask; 2209 Known.One &= Mask; 2210 } 2211 break; 2212 } 2213 case ISD::BUILD_PAIR: { 2214 EVT HalfVT = Op.getOperand(0).getValueType(); 2215 unsigned HalfBitWidth = HalfVT.getScalarSizeInBits(); 2216 2217 APInt MaskLo = DemandedBits.getLoBits(HalfBitWidth).trunc(HalfBitWidth); 2218 APInt MaskHi = DemandedBits.getHiBits(HalfBitWidth).trunc(HalfBitWidth); 2219 2220 KnownBits KnownLo, KnownHi; 2221 2222 if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownLo, TLO, Depth + 1)) 2223 return true; 2224 2225 if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownHi, TLO, Depth + 1)) 2226 return true; 2227 2228 Known.Zero = KnownLo.Zero.zext(BitWidth) | 2229 KnownHi.Zero.zext(BitWidth).shl(HalfBitWidth); 2230 2231 Known.One = KnownLo.One.zext(BitWidth) | 2232 KnownHi.One.zext(BitWidth).shl(HalfBitWidth); 2233 break; 2234 } 2235 case ISD::ZERO_EXTEND: 2236 case ISD::ZERO_EXTEND_VECTOR_INREG: { 2237 SDValue Src = Op.getOperand(0); 2238 EVT SrcVT = Src.getValueType(); 2239 unsigned InBits = SrcVT.getScalarSizeInBits(); 2240 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 2241 bool IsVecInReg = Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG; 2242 2243 // If none of the top bits are demanded, convert this into an any_extend. 2244 if (DemandedBits.getActiveBits() <= InBits) { 2245 // If we only need the non-extended bits of the bottom element 2246 // then we can just bitcast to the result. 2247 if (IsLE && IsVecInReg && DemandedElts == 1 && 2248 VT.getSizeInBits() == SrcVT.getSizeInBits()) 2249 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 2250 2251 unsigned Opc = 2252 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 2253 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 2254 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 2255 } 2256 2257 APInt InDemandedBits = DemandedBits.trunc(InBits); 2258 APInt InDemandedElts = DemandedElts.zext(InElts); 2259 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 2260 Depth + 1)) 2261 return true; 2262 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2263 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 2264 Known = Known.zext(BitWidth); 2265 2266 // Attempt to avoid multi-use ops if we don't need anything from them. 2267 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2268 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 2269 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 2270 break; 2271 } 2272 case ISD::SIGN_EXTEND: 2273 case ISD::SIGN_EXTEND_VECTOR_INREG: { 2274 SDValue Src = Op.getOperand(0); 2275 EVT SrcVT = Src.getValueType(); 2276 unsigned InBits = SrcVT.getScalarSizeInBits(); 2277 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 2278 bool IsVecInReg = Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG; 2279 2280 // If none of the top bits are demanded, convert this into an any_extend. 2281 if (DemandedBits.getActiveBits() <= InBits) { 2282 // If we only need the non-extended bits of the bottom element 2283 // then we can just bitcast to the result. 2284 if (IsLE && IsVecInReg && DemandedElts == 1 && 2285 VT.getSizeInBits() == SrcVT.getSizeInBits()) 2286 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 2287 2288 unsigned Opc = 2289 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 2290 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 2291 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 2292 } 2293 2294 APInt InDemandedBits = DemandedBits.trunc(InBits); 2295 APInt InDemandedElts = DemandedElts.zext(InElts); 2296 2297 // Since some of the sign extended bits are demanded, we know that the sign 2298 // bit is demanded. 2299 InDemandedBits.setBit(InBits - 1); 2300 2301 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 2302 Depth + 1)) 2303 return true; 2304 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2305 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 2306 2307 // If the sign bit is known one, the top bits match. 2308 Known = Known.sext(BitWidth); 2309 2310 // If the sign bit is known zero, convert this to a zero extend. 2311 if (Known.isNonNegative()) { 2312 unsigned Opc = 2313 IsVecInReg ? ISD::ZERO_EXTEND_VECTOR_INREG : ISD::ZERO_EXTEND; 2314 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 2315 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 2316 } 2317 2318 // Attempt to avoid multi-use ops if we don't need anything from them. 2319 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2320 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 2321 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 2322 break; 2323 } 2324 case ISD::ANY_EXTEND: 2325 case ISD::ANY_EXTEND_VECTOR_INREG: { 2326 SDValue Src = Op.getOperand(0); 2327 EVT SrcVT = Src.getValueType(); 2328 unsigned InBits = SrcVT.getScalarSizeInBits(); 2329 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 2330 bool IsVecInReg = Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG; 2331 2332 // If we only need the bottom element then we can just bitcast. 2333 // TODO: Handle ANY_EXTEND? 2334 if (IsLE && IsVecInReg && DemandedElts == 1 && 2335 VT.getSizeInBits() == SrcVT.getSizeInBits()) 2336 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 2337 2338 APInt InDemandedBits = DemandedBits.trunc(InBits); 2339 APInt InDemandedElts = DemandedElts.zext(InElts); 2340 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 2341 Depth + 1)) 2342 return true; 2343 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2344 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 2345 Known = Known.anyext(BitWidth); 2346 2347 // Attempt to avoid multi-use ops if we don't need anything from them. 2348 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2349 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 2350 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 2351 break; 2352 } 2353 case ISD::TRUNCATE: { 2354 SDValue Src = Op.getOperand(0); 2355 2356 // Simplify the input, using demanded bit information, and compute the known 2357 // zero/one bits live out. 2358 unsigned OperandBitWidth = Src.getScalarValueSizeInBits(); 2359 APInt TruncMask = DemandedBits.zext(OperandBitWidth); 2360 if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, Known, TLO, 2361 Depth + 1)) 2362 return true; 2363 Known = Known.trunc(BitWidth); 2364 2365 // Attempt to avoid multi-use ops if we don't need anything from them. 2366 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2367 Src, TruncMask, DemandedElts, TLO.DAG, Depth + 1)) 2368 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, NewSrc)); 2369 2370 // If the input is only used by this truncate, see if we can shrink it based 2371 // on the known demanded bits. 2372 if (Src.getNode()->hasOneUse()) { 2373 switch (Src.getOpcode()) { 2374 default: 2375 break; 2376 case ISD::SRL: 2377 // Shrink SRL by a constant if none of the high bits shifted in are 2378 // demanded. 2379 if (TLO.LegalTypes() && !isTypeDesirableForOp(ISD::SRL, VT)) 2380 // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is 2381 // undesirable. 2382 break; 2383 2384 const APInt *ShAmtC = 2385 TLO.DAG.getValidShiftAmountConstant(Src, DemandedElts); 2386 if (!ShAmtC || ShAmtC->uge(BitWidth)) 2387 break; 2388 uint64_t ShVal = ShAmtC->getZExtValue(); 2389 2390 APInt HighBits = 2391 APInt::getHighBitsSet(OperandBitWidth, OperandBitWidth - BitWidth); 2392 HighBits.lshrInPlace(ShVal); 2393 HighBits = HighBits.trunc(BitWidth); 2394 2395 if (!(HighBits & DemandedBits)) { 2396 // None of the shifted in bits are needed. Add a truncate of the 2397 // shift input, then shift it. 2398 SDValue NewShAmt = TLO.DAG.getConstant( 2399 ShVal, dl, getShiftAmountTy(VT, DL, TLO.LegalTypes())); 2400 SDValue NewTrunc = 2401 TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, Src.getOperand(0)); 2402 return TLO.CombineTo( 2403 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, NewTrunc, NewShAmt)); 2404 } 2405 break; 2406 } 2407 } 2408 2409 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2410 break; 2411 } 2412 case ISD::AssertZext: { 2413 // AssertZext demands all of the high bits, plus any of the low bits 2414 // demanded by its users. 2415 EVT ZVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2416 APInt InMask = APInt::getLowBitsSet(BitWidth, ZVT.getSizeInBits()); 2417 if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | DemandedBits, Known, 2418 TLO, Depth + 1)) 2419 return true; 2420 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2421 2422 Known.Zero |= ~InMask; 2423 break; 2424 } 2425 case ISD::EXTRACT_VECTOR_ELT: { 2426 SDValue Src = Op.getOperand(0); 2427 SDValue Idx = Op.getOperand(1); 2428 ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount(); 2429 unsigned EltBitWidth = Src.getScalarValueSizeInBits(); 2430 2431 if (SrcEltCnt.isScalable()) 2432 return false; 2433 2434 // Demand the bits from every vector element without a constant index. 2435 unsigned NumSrcElts = SrcEltCnt.getFixedValue(); 2436 APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts); 2437 if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx)) 2438 if (CIdx->getAPIntValue().ult(NumSrcElts)) 2439 DemandedSrcElts = APInt::getOneBitSet(NumSrcElts, CIdx->getZExtValue()); 2440 2441 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 2442 // anything about the extended bits. 2443 APInt DemandedSrcBits = DemandedBits; 2444 if (BitWidth > EltBitWidth) 2445 DemandedSrcBits = DemandedSrcBits.trunc(EltBitWidth); 2446 2447 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO, 2448 Depth + 1)) 2449 return true; 2450 2451 // Attempt to avoid multi-use ops if we don't need anything from them. 2452 if (!DemandedSrcBits.isAllOnes() || !DemandedSrcElts.isAllOnes()) { 2453 if (SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 2454 Src, DemandedSrcBits, DemandedSrcElts, TLO.DAG, Depth + 1)) { 2455 SDValue NewOp = 2456 TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, Idx); 2457 return TLO.CombineTo(Op, NewOp); 2458 } 2459 } 2460 2461 Known = Known2; 2462 if (BitWidth > EltBitWidth) 2463 Known = Known.anyext(BitWidth); 2464 break; 2465 } 2466 case ISD::BITCAST: { 2467 SDValue Src = Op.getOperand(0); 2468 EVT SrcVT = Src.getValueType(); 2469 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 2470 2471 // If this is an FP->Int bitcast and if the sign bit is the only 2472 // thing demanded, turn this into a FGETSIGN. 2473 if (!TLO.LegalOperations() && !VT.isVector() && !SrcVT.isVector() && 2474 DemandedBits == APInt::getSignMask(Op.getValueSizeInBits()) && 2475 SrcVT.isFloatingPoint()) { 2476 bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, VT); 2477 bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32); 2478 if ((OpVTLegal || i32Legal) && VT.isSimple() && SrcVT != MVT::f16 && 2479 SrcVT != MVT::f128) { 2480 // Cannot eliminate/lower SHL for f128 yet. 2481 EVT Ty = OpVTLegal ? VT : MVT::i32; 2482 // Make a FGETSIGN + SHL to move the sign bit into the appropriate 2483 // place. We expect the SHL to be eliminated by other optimizations. 2484 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Src); 2485 unsigned OpVTSizeInBits = Op.getValueSizeInBits(); 2486 if (!OpVTLegal && OpVTSizeInBits > 32) 2487 Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Sign); 2488 unsigned ShVal = Op.getValueSizeInBits() - 1; 2489 SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, VT); 2490 return TLO.CombineTo(Op, 2491 TLO.DAG.getNode(ISD::SHL, dl, VT, Sign, ShAmt)); 2492 } 2493 } 2494 2495 // Bitcast from a vector using SimplifyDemanded Bits/VectorElts. 2496 // Demand the elt/bit if any of the original elts/bits are demanded. 2497 if (SrcVT.isVector() && (BitWidth % NumSrcEltBits) == 0) { 2498 unsigned Scale = BitWidth / NumSrcEltBits; 2499 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 2500 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 2501 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 2502 for (unsigned i = 0; i != Scale; ++i) { 2503 unsigned EltOffset = IsLE ? i : (Scale - 1 - i); 2504 unsigned BitOffset = EltOffset * NumSrcEltBits; 2505 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, BitOffset); 2506 if (!Sub.isZero()) { 2507 DemandedSrcBits |= Sub; 2508 for (unsigned j = 0; j != NumElts; ++j) 2509 if (DemandedElts[j]) 2510 DemandedSrcElts.setBit((j * Scale) + i); 2511 } 2512 } 2513 2514 APInt KnownSrcUndef, KnownSrcZero; 2515 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 2516 KnownSrcZero, TLO, Depth + 1)) 2517 return true; 2518 2519 KnownBits KnownSrcBits; 2520 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 2521 KnownSrcBits, TLO, Depth + 1)) 2522 return true; 2523 } else if (IsLE && (NumSrcEltBits % BitWidth) == 0) { 2524 // TODO - bigendian once we have test coverage. 2525 unsigned Scale = NumSrcEltBits / BitWidth; 2526 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 2527 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 2528 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 2529 for (unsigned i = 0; i != NumElts; ++i) 2530 if (DemandedElts[i]) { 2531 unsigned Offset = (i % Scale) * BitWidth; 2532 DemandedSrcBits.insertBits(DemandedBits, Offset); 2533 DemandedSrcElts.setBit(i / Scale); 2534 } 2535 2536 if (SrcVT.isVector()) { 2537 APInt KnownSrcUndef, KnownSrcZero; 2538 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 2539 KnownSrcZero, TLO, Depth + 1)) 2540 return true; 2541 } 2542 2543 KnownBits KnownSrcBits; 2544 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 2545 KnownSrcBits, TLO, Depth + 1)) 2546 return true; 2547 } 2548 2549 // If this is a bitcast, let computeKnownBits handle it. Only do this on a 2550 // recursive call where Known may be useful to the caller. 2551 if (Depth > 0) { 2552 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2553 return false; 2554 } 2555 break; 2556 } 2557 case ISD::MUL: 2558 if (DemandedBits.isPowerOf2()) { 2559 // The LSB of X*Y is set only if (X & 1) == 1 and (Y & 1) == 1. 2560 // If we demand exactly one bit N and we have "X * (C' << N)" where C' is 2561 // odd (has LSB set), then the left-shifted low bit of X is the answer. 2562 unsigned CTZ = DemandedBits.countTrailingZeros(); 2563 ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1), DemandedElts); 2564 if (C && C->getAPIntValue().countTrailingZeros() == CTZ) { 2565 EVT ShiftAmtTy = getShiftAmountTy(VT, TLO.DAG.getDataLayout()); 2566 SDValue AmtC = TLO.DAG.getConstant(CTZ, dl, ShiftAmtTy); 2567 SDValue Shl = TLO.DAG.getNode(ISD::SHL, dl, VT, Op.getOperand(0), AmtC); 2568 return TLO.CombineTo(Op, Shl); 2569 } 2570 } 2571 // For a squared value "X * X", the bottom 2 bits are 0 and X[0] because: 2572 // X * X is odd iff X is odd. 2573 // 'Quadratic Reciprocity': X * X -> 0 for bit[1] 2574 if (Op.getOperand(0) == Op.getOperand(1) && DemandedBits.ult(4)) { 2575 SDValue One = TLO.DAG.getConstant(1, dl, VT); 2576 SDValue And1 = TLO.DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), One); 2577 return TLO.CombineTo(Op, And1); 2578 } 2579 LLVM_FALLTHROUGH; 2580 case ISD::ADD: 2581 case ISD::SUB: { 2582 // Add, Sub, and Mul don't demand any bits in positions beyond that 2583 // of the highest bit demanded of them. 2584 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1); 2585 SDNodeFlags Flags = Op.getNode()->getFlags(); 2586 unsigned DemandedBitsLZ = DemandedBits.countLeadingZeros(); 2587 APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ); 2588 if (SimplifyDemandedBits(Op0, LoMask, DemandedElts, Known2, TLO, 2589 Depth + 1) || 2590 SimplifyDemandedBits(Op1, LoMask, DemandedElts, Known2, TLO, 2591 Depth + 1) || 2592 // See if the operation should be performed at a smaller bit width. 2593 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) { 2594 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) { 2595 // Disable the nsw and nuw flags. We can no longer guarantee that we 2596 // won't wrap after simplification. 2597 Flags.setNoSignedWrap(false); 2598 Flags.setNoUnsignedWrap(false); 2599 Op->setFlags(Flags); 2600 } 2601 return true; 2602 } 2603 2604 // Attempt to avoid multi-use ops if we don't need anything from them. 2605 if (!LoMask.isAllOnes() || !DemandedElts.isAllOnes()) { 2606 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 2607 Op0, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2608 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 2609 Op1, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2610 if (DemandedOp0 || DemandedOp1) { 2611 Flags.setNoSignedWrap(false); 2612 Flags.setNoUnsignedWrap(false); 2613 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 2614 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 2615 SDValue NewOp = 2616 TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags); 2617 return TLO.CombineTo(Op, NewOp); 2618 } 2619 } 2620 2621 // If we have a constant operand, we may be able to turn it into -1 if we 2622 // do not demand the high bits. This can make the constant smaller to 2623 // encode, allow more general folding, or match specialized instruction 2624 // patterns (eg, 'blsr' on x86). Don't bother changing 1 to -1 because that 2625 // is probably not useful (and could be detrimental). 2626 ConstantSDNode *C = isConstOrConstSplat(Op1); 2627 APInt HighMask = APInt::getHighBitsSet(BitWidth, DemandedBitsLZ); 2628 if (C && !C->isAllOnes() && !C->isOne() && 2629 (C->getAPIntValue() | HighMask).isAllOnes()) { 2630 SDValue Neg1 = TLO.DAG.getAllOnesConstant(dl, VT); 2631 // Disable the nsw and nuw flags. We can no longer guarantee that we 2632 // won't wrap after simplification. 2633 Flags.setNoSignedWrap(false); 2634 Flags.setNoUnsignedWrap(false); 2635 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Neg1, Flags); 2636 return TLO.CombineTo(Op, NewOp); 2637 } 2638 2639 // Match a multiply with a disguised negated-power-of-2 and convert to a 2640 // an equivalent shift-left amount. 2641 // Example: (X * MulC) + Op1 --> Op1 - (X << log2(-MulC)) 2642 auto getShiftLeftAmt = [&HighMask](SDValue Mul) -> unsigned { 2643 if (Mul.getOpcode() != ISD::MUL || !Mul.hasOneUse()) 2644 return 0; 2645 2646 // Don't touch opaque constants. Also, ignore zero and power-of-2 2647 // multiplies. Those will get folded later. 2648 ConstantSDNode *MulC = isConstOrConstSplat(Mul.getOperand(1)); 2649 if (MulC && !MulC->isOpaque() && !MulC->isZero() && 2650 !MulC->getAPIntValue().isPowerOf2()) { 2651 APInt UnmaskedC = MulC->getAPIntValue() | HighMask; 2652 if (UnmaskedC.isNegatedPowerOf2()) 2653 return (-UnmaskedC).logBase2(); 2654 } 2655 return 0; 2656 }; 2657 2658 auto foldMul = [&](ISD::NodeType NT, SDValue X, SDValue Y, unsigned ShlAmt) { 2659 EVT ShiftAmtTy = getShiftAmountTy(VT, TLO.DAG.getDataLayout()); 2660 SDValue ShlAmtC = TLO.DAG.getConstant(ShlAmt, dl, ShiftAmtTy); 2661 SDValue Shl = TLO.DAG.getNode(ISD::SHL, dl, VT, X, ShlAmtC); 2662 SDValue Res = TLO.DAG.getNode(NT, dl, VT, Y, Shl); 2663 return TLO.CombineTo(Op, Res); 2664 }; 2665 2666 if (isOperationLegalOrCustom(ISD::SHL, VT)) { 2667 if (Op.getOpcode() == ISD::ADD) { 2668 // (X * MulC) + Op1 --> Op1 - (X << log2(-MulC)) 2669 if (unsigned ShAmt = getShiftLeftAmt(Op0)) 2670 return foldMul(ISD::SUB, Op0.getOperand(0), Op1, ShAmt); 2671 // Op0 + (X * MulC) --> Op0 - (X << log2(-MulC)) 2672 if (unsigned ShAmt = getShiftLeftAmt(Op1)) 2673 return foldMul(ISD::SUB, Op1.getOperand(0), Op0, ShAmt); 2674 } 2675 if (Op.getOpcode() == ISD::SUB) { 2676 // Op0 - (X * MulC) --> Op0 + (X << log2(-MulC)) 2677 if (unsigned ShAmt = getShiftLeftAmt(Op1)) 2678 return foldMul(ISD::ADD, Op1.getOperand(0), Op0, ShAmt); 2679 } 2680 } 2681 2682 LLVM_FALLTHROUGH; 2683 } 2684 default: 2685 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 2686 if (SimplifyDemandedBitsForTargetNode(Op, DemandedBits, DemandedElts, 2687 Known, TLO, Depth)) 2688 return true; 2689 break; 2690 } 2691 2692 // Just use computeKnownBits to compute output bits. 2693 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2694 break; 2695 } 2696 2697 // If we know the value of all of the demanded bits, return this as a 2698 // constant. 2699 if (!isTargetCanonicalConstantNode(Op) && 2700 DemandedBits.isSubsetOf(Known.Zero | Known.One)) { 2701 // Avoid folding to a constant if any OpaqueConstant is involved. 2702 const SDNode *N = Op.getNode(); 2703 for (SDNode *Op : 2704 llvm::make_range(SDNodeIterator::begin(N), SDNodeIterator::end(N))) { 2705 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 2706 if (C->isOpaque()) 2707 return false; 2708 } 2709 if (VT.isInteger()) 2710 return TLO.CombineTo(Op, TLO.DAG.getConstant(Known.One, dl, VT)); 2711 if (VT.isFloatingPoint()) 2712 return TLO.CombineTo( 2713 Op, 2714 TLO.DAG.getConstantFP( 2715 APFloat(TLO.DAG.EVTToAPFloatSemantics(VT), Known.One), dl, VT)); 2716 } 2717 2718 return false; 2719 } 2720 2721 bool TargetLowering::SimplifyDemandedVectorElts(SDValue Op, 2722 const APInt &DemandedElts, 2723 DAGCombinerInfo &DCI) const { 2724 SelectionDAG &DAG = DCI.DAG; 2725 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 2726 !DCI.isBeforeLegalizeOps()); 2727 2728 APInt KnownUndef, KnownZero; 2729 bool Simplified = 2730 SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, TLO); 2731 if (Simplified) { 2732 DCI.AddToWorklist(Op.getNode()); 2733 DCI.CommitTargetLoweringOpt(TLO); 2734 } 2735 2736 return Simplified; 2737 } 2738 2739 /// Given a vector binary operation and known undefined elements for each input 2740 /// operand, compute whether each element of the output is undefined. 2741 static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG, 2742 const APInt &UndefOp0, 2743 const APInt &UndefOp1) { 2744 EVT VT = BO.getValueType(); 2745 assert(DAG.getTargetLoweringInfo().isBinOp(BO.getOpcode()) && VT.isVector() && 2746 "Vector binop only"); 2747 2748 EVT EltVT = VT.getVectorElementType(); 2749 unsigned NumElts = VT.getVectorNumElements(); 2750 assert(UndefOp0.getBitWidth() == NumElts && 2751 UndefOp1.getBitWidth() == NumElts && "Bad type for undef analysis"); 2752 2753 auto getUndefOrConstantElt = [&](SDValue V, unsigned Index, 2754 const APInt &UndefVals) { 2755 if (UndefVals[Index]) 2756 return DAG.getUNDEF(EltVT); 2757 2758 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 2759 // Try hard to make sure that the getNode() call is not creating temporary 2760 // nodes. Ignore opaque integers because they do not constant fold. 2761 SDValue Elt = BV->getOperand(Index); 2762 auto *C = dyn_cast<ConstantSDNode>(Elt); 2763 if (isa<ConstantFPSDNode>(Elt) || Elt.isUndef() || (C && !C->isOpaque())) 2764 return Elt; 2765 } 2766 2767 return SDValue(); 2768 }; 2769 2770 APInt KnownUndef = APInt::getZero(NumElts); 2771 for (unsigned i = 0; i != NumElts; ++i) { 2772 // If both inputs for this element are either constant or undef and match 2773 // the element type, compute the constant/undef result for this element of 2774 // the vector. 2775 // TODO: Ideally we would use FoldConstantArithmetic() here, but that does 2776 // not handle FP constants. The code within getNode() should be refactored 2777 // to avoid the danger of creating a bogus temporary node here. 2778 SDValue C0 = getUndefOrConstantElt(BO.getOperand(0), i, UndefOp0); 2779 SDValue C1 = getUndefOrConstantElt(BO.getOperand(1), i, UndefOp1); 2780 if (C0 && C1 && C0.getValueType() == EltVT && C1.getValueType() == EltVT) 2781 if (DAG.getNode(BO.getOpcode(), SDLoc(BO), EltVT, C0, C1).isUndef()) 2782 KnownUndef.setBit(i); 2783 } 2784 return KnownUndef; 2785 } 2786 2787 bool TargetLowering::SimplifyDemandedVectorElts( 2788 SDValue Op, const APInt &OriginalDemandedElts, APInt &KnownUndef, 2789 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth, 2790 bool AssumeSingleUse) const { 2791 EVT VT = Op.getValueType(); 2792 unsigned Opcode = Op.getOpcode(); 2793 APInt DemandedElts = OriginalDemandedElts; 2794 unsigned NumElts = DemandedElts.getBitWidth(); 2795 assert(VT.isVector() && "Expected vector op"); 2796 2797 KnownUndef = KnownZero = APInt::getZero(NumElts); 2798 2799 const TargetLowering &TLI = TLO.DAG.getTargetLoweringInfo(); 2800 if (!TLI.shouldSimplifyDemandedVectorElts(Op, TLO)) 2801 return false; 2802 2803 // TODO: For now we assume we know nothing about scalable vectors. 2804 if (VT.isScalableVector()) 2805 return false; 2806 2807 assert(VT.getVectorNumElements() == NumElts && 2808 "Mask size mismatches value type element count!"); 2809 2810 // Undef operand. 2811 if (Op.isUndef()) { 2812 KnownUndef.setAllBits(); 2813 return false; 2814 } 2815 2816 // If Op has other users, assume that all elements are needed. 2817 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) 2818 DemandedElts.setAllBits(); 2819 2820 // Not demanding any elements from Op. 2821 if (DemandedElts == 0) { 2822 KnownUndef.setAllBits(); 2823 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2824 } 2825 2826 // Limit search depth. 2827 if (Depth >= SelectionDAG::MaxRecursionDepth) 2828 return false; 2829 2830 SDLoc DL(Op); 2831 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 2832 bool IsLE = TLO.DAG.getDataLayout().isLittleEndian(); 2833 2834 // Helper for demanding the specified elements and all the bits of both binary 2835 // operands. 2836 auto SimplifyDemandedVectorEltsBinOp = [&](SDValue Op0, SDValue Op1) { 2837 SDValue NewOp0 = SimplifyMultipleUseDemandedVectorElts(Op0, DemandedElts, 2838 TLO.DAG, Depth + 1); 2839 SDValue NewOp1 = SimplifyMultipleUseDemandedVectorElts(Op1, DemandedElts, 2840 TLO.DAG, Depth + 1); 2841 if (NewOp0 || NewOp1) { 2842 SDValue NewOp = TLO.DAG.getNode( 2843 Opcode, SDLoc(Op), VT, NewOp0 ? NewOp0 : Op0, NewOp1 ? NewOp1 : Op1); 2844 return TLO.CombineTo(Op, NewOp); 2845 } 2846 return false; 2847 }; 2848 2849 switch (Opcode) { 2850 case ISD::SCALAR_TO_VECTOR: { 2851 if (!DemandedElts[0]) { 2852 KnownUndef.setAllBits(); 2853 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2854 } 2855 SDValue ScalarSrc = Op.getOperand(0); 2856 if (ScalarSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 2857 SDValue Src = ScalarSrc.getOperand(0); 2858 SDValue Idx = ScalarSrc.getOperand(1); 2859 EVT SrcVT = Src.getValueType(); 2860 2861 ElementCount SrcEltCnt = SrcVT.getVectorElementCount(); 2862 2863 if (SrcEltCnt.isScalable()) 2864 return false; 2865 2866 unsigned NumSrcElts = SrcEltCnt.getFixedValue(); 2867 if (isNullConstant(Idx)) { 2868 APInt SrcDemandedElts = APInt::getOneBitSet(NumSrcElts, 0); 2869 APInt SrcUndef = KnownUndef.zextOrTrunc(NumSrcElts); 2870 APInt SrcZero = KnownZero.zextOrTrunc(NumSrcElts); 2871 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2872 TLO, Depth + 1)) 2873 return true; 2874 } 2875 } 2876 KnownUndef.setHighBits(NumElts - 1); 2877 break; 2878 } 2879 case ISD::BITCAST: { 2880 SDValue Src = Op.getOperand(0); 2881 EVT SrcVT = Src.getValueType(); 2882 2883 // We only handle vectors here. 2884 // TODO - investigate calling SimplifyDemandedBits/ComputeKnownBits? 2885 if (!SrcVT.isVector()) 2886 break; 2887 2888 // Fast handling of 'identity' bitcasts. 2889 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 2890 if (NumSrcElts == NumElts) 2891 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, 2892 KnownZero, TLO, Depth + 1); 2893 2894 APInt SrcDemandedElts, SrcZero, SrcUndef; 2895 2896 // Bitcast from 'large element' src vector to 'small element' vector, we 2897 // must demand a source element if any DemandedElt maps to it. 2898 if ((NumElts % NumSrcElts) == 0) { 2899 unsigned Scale = NumElts / NumSrcElts; 2900 SrcDemandedElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts); 2901 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2902 TLO, Depth + 1)) 2903 return true; 2904 2905 // Try calling SimplifyDemandedBits, converting demanded elts to the bits 2906 // of the large element. 2907 // TODO - bigendian once we have test coverage. 2908 if (IsLE) { 2909 unsigned SrcEltSizeInBits = SrcVT.getScalarSizeInBits(); 2910 APInt SrcDemandedBits = APInt::getZero(SrcEltSizeInBits); 2911 for (unsigned i = 0; i != NumElts; ++i) 2912 if (DemandedElts[i]) { 2913 unsigned Ofs = (i % Scale) * EltSizeInBits; 2914 SrcDemandedBits.setBits(Ofs, Ofs + EltSizeInBits); 2915 } 2916 2917 KnownBits Known; 2918 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known, 2919 TLO, Depth + 1)) 2920 return true; 2921 2922 // The bitcast has split each wide element into a number of 2923 // narrow subelements. We have just computed the Known bits 2924 // for wide elements. See if element splitting results in 2925 // some subelements being zero. Only for demanded elements! 2926 for (unsigned SubElt = 0; SubElt != Scale; ++SubElt) { 2927 if (!Known.Zero.extractBits(EltSizeInBits, SubElt * EltSizeInBits) 2928 .isAllOnes()) 2929 continue; 2930 for (unsigned SrcElt = 0; SrcElt != NumSrcElts; ++SrcElt) { 2931 unsigned Elt = Scale * SrcElt + SubElt; 2932 if (DemandedElts[Elt]) 2933 KnownZero.setBit(Elt); 2934 } 2935 } 2936 } 2937 2938 // If the src element is zero/undef then all the output elements will be - 2939 // only demanded elements are guaranteed to be correct. 2940 for (unsigned i = 0; i != NumSrcElts; ++i) { 2941 if (SrcDemandedElts[i]) { 2942 if (SrcZero[i]) 2943 KnownZero.setBits(i * Scale, (i + 1) * Scale); 2944 if (SrcUndef[i]) 2945 KnownUndef.setBits(i * Scale, (i + 1) * Scale); 2946 } 2947 } 2948 } 2949 2950 // Bitcast from 'small element' src vector to 'large element' vector, we 2951 // demand all smaller source elements covered by the larger demanded element 2952 // of this vector. 2953 if ((NumSrcElts % NumElts) == 0) { 2954 unsigned Scale = NumSrcElts / NumElts; 2955 SrcDemandedElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts); 2956 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2957 TLO, Depth + 1)) 2958 return true; 2959 2960 // If all the src elements covering an output element are zero/undef, then 2961 // the output element will be as well, assuming it was demanded. 2962 for (unsigned i = 0; i != NumElts; ++i) { 2963 if (DemandedElts[i]) { 2964 if (SrcZero.extractBits(Scale, i * Scale).isAllOnes()) 2965 KnownZero.setBit(i); 2966 if (SrcUndef.extractBits(Scale, i * Scale).isAllOnes()) 2967 KnownUndef.setBit(i); 2968 } 2969 } 2970 } 2971 break; 2972 } 2973 case ISD::BUILD_VECTOR: { 2974 // Check all elements and simplify any unused elements with UNDEF. 2975 if (!DemandedElts.isAllOnes()) { 2976 // Don't simplify BROADCASTS. 2977 if (llvm::any_of(Op->op_values(), 2978 [&](SDValue Elt) { return Op.getOperand(0) != Elt; })) { 2979 SmallVector<SDValue, 32> Ops(Op->op_begin(), Op->op_end()); 2980 bool Updated = false; 2981 for (unsigned i = 0; i != NumElts; ++i) { 2982 if (!DemandedElts[i] && !Ops[i].isUndef()) { 2983 Ops[i] = TLO.DAG.getUNDEF(Ops[0].getValueType()); 2984 KnownUndef.setBit(i); 2985 Updated = true; 2986 } 2987 } 2988 if (Updated) 2989 return TLO.CombineTo(Op, TLO.DAG.getBuildVector(VT, DL, Ops)); 2990 } 2991 } 2992 for (unsigned i = 0; i != NumElts; ++i) { 2993 SDValue SrcOp = Op.getOperand(i); 2994 if (SrcOp.isUndef()) { 2995 KnownUndef.setBit(i); 2996 } else if (EltSizeInBits == SrcOp.getScalarValueSizeInBits() && 2997 (isNullConstant(SrcOp) || isNullFPConstant(SrcOp))) { 2998 KnownZero.setBit(i); 2999 } 3000 } 3001 break; 3002 } 3003 case ISD::CONCAT_VECTORS: { 3004 EVT SubVT = Op.getOperand(0).getValueType(); 3005 unsigned NumSubVecs = Op.getNumOperands(); 3006 unsigned NumSubElts = SubVT.getVectorNumElements(); 3007 for (unsigned i = 0; i != NumSubVecs; ++i) { 3008 SDValue SubOp = Op.getOperand(i); 3009 APInt SubElts = DemandedElts.extractBits(NumSubElts, i * NumSubElts); 3010 APInt SubUndef, SubZero; 3011 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO, 3012 Depth + 1)) 3013 return true; 3014 KnownUndef.insertBits(SubUndef, i * NumSubElts); 3015 KnownZero.insertBits(SubZero, i * NumSubElts); 3016 } 3017 3018 // Attempt to avoid multi-use ops if we don't need anything from them. 3019 if (!DemandedElts.isAllOnes()) { 3020 bool FoundNewSub = false; 3021 SmallVector<SDValue, 2> DemandedSubOps; 3022 for (unsigned i = 0; i != NumSubVecs; ++i) { 3023 SDValue SubOp = Op.getOperand(i); 3024 APInt SubElts = DemandedElts.extractBits(NumSubElts, i * NumSubElts); 3025 SDValue NewSubOp = SimplifyMultipleUseDemandedVectorElts( 3026 SubOp, SubElts, TLO.DAG, Depth + 1); 3027 DemandedSubOps.push_back(NewSubOp ? NewSubOp : SubOp); 3028 FoundNewSub = NewSubOp ? true : FoundNewSub; 3029 } 3030 if (FoundNewSub) { 3031 SDValue NewOp = 3032 TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, DemandedSubOps); 3033 return TLO.CombineTo(Op, NewOp); 3034 } 3035 } 3036 break; 3037 } 3038 case ISD::INSERT_SUBVECTOR: { 3039 // Demand any elements from the subvector and the remainder from the src its 3040 // inserted into. 3041 SDValue Src = Op.getOperand(0); 3042 SDValue Sub = Op.getOperand(1); 3043 uint64_t Idx = Op.getConstantOperandVal(2); 3044 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 3045 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 3046 APInt DemandedSrcElts = DemandedElts; 3047 DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx); 3048 3049 APInt SubUndef, SubZero; 3050 if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO, 3051 Depth + 1)) 3052 return true; 3053 3054 // If none of the src operand elements are demanded, replace it with undef. 3055 if (!DemandedSrcElts && !Src.isUndef()) 3056 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, 3057 TLO.DAG.getUNDEF(VT), Sub, 3058 Op.getOperand(2))); 3059 3060 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownUndef, KnownZero, 3061 TLO, Depth + 1)) 3062 return true; 3063 KnownUndef.insertBits(SubUndef, Idx); 3064 KnownZero.insertBits(SubZero, Idx); 3065 3066 // Attempt to avoid multi-use ops if we don't need anything from them. 3067 if (!DemandedSrcElts.isAllOnes() || !DemandedSubElts.isAllOnes()) { 3068 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts( 3069 Src, DemandedSrcElts, TLO.DAG, Depth + 1); 3070 SDValue NewSub = SimplifyMultipleUseDemandedVectorElts( 3071 Sub, DemandedSubElts, TLO.DAG, Depth + 1); 3072 if (NewSrc || NewSub) { 3073 NewSrc = NewSrc ? NewSrc : Src; 3074 NewSub = NewSub ? NewSub : Sub; 3075 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc, 3076 NewSub, Op.getOperand(2)); 3077 return TLO.CombineTo(Op, NewOp); 3078 } 3079 } 3080 break; 3081 } 3082 case ISD::EXTRACT_SUBVECTOR: { 3083 // Offset the demanded elts by the subvector index. 3084 SDValue Src = Op.getOperand(0); 3085 if (Src.getValueType().isScalableVector()) 3086 break; 3087 uint64_t Idx = Op.getConstantOperandVal(1); 3088 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3089 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx); 3090 3091 APInt SrcUndef, SrcZero; 3092 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, 3093 Depth + 1)) 3094 return true; 3095 KnownUndef = SrcUndef.extractBits(NumElts, Idx); 3096 KnownZero = SrcZero.extractBits(NumElts, Idx); 3097 3098 // Attempt to avoid multi-use ops if we don't need anything from them. 3099 if (!DemandedElts.isAllOnes()) { 3100 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts( 3101 Src, DemandedSrcElts, TLO.DAG, Depth + 1); 3102 if (NewSrc) { 3103 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc, 3104 Op.getOperand(1)); 3105 return TLO.CombineTo(Op, NewOp); 3106 } 3107 } 3108 break; 3109 } 3110 case ISD::INSERT_VECTOR_ELT: { 3111 SDValue Vec = Op.getOperand(0); 3112 SDValue Scl = Op.getOperand(1); 3113 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 3114 3115 // For a legal, constant insertion index, if we don't need this insertion 3116 // then strip it, else remove it from the demanded elts. 3117 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) { 3118 unsigned Idx = CIdx->getZExtValue(); 3119 if (!DemandedElts[Idx]) 3120 return TLO.CombineTo(Op, Vec); 3121 3122 APInt DemandedVecElts(DemandedElts); 3123 DemandedVecElts.clearBit(Idx); 3124 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef, 3125 KnownZero, TLO, Depth + 1)) 3126 return true; 3127 3128 KnownUndef.setBitVal(Idx, Scl.isUndef()); 3129 3130 KnownZero.setBitVal(Idx, isNullConstant(Scl) || isNullFPConstant(Scl)); 3131 break; 3132 } 3133 3134 APInt VecUndef, VecZero; 3135 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO, 3136 Depth + 1)) 3137 return true; 3138 // Without knowing the insertion index we can't set KnownUndef/KnownZero. 3139 break; 3140 } 3141 case ISD::VSELECT: { 3142 SDValue Sel = Op.getOperand(0); 3143 SDValue LHS = Op.getOperand(1); 3144 SDValue RHS = Op.getOperand(2); 3145 3146 // Try to transform the select condition based on the current demanded 3147 // elements. 3148 APInt UndefSel, UndefZero; 3149 if (SimplifyDemandedVectorElts(Sel, DemandedElts, UndefSel, UndefZero, TLO, 3150 Depth + 1)) 3151 return true; 3152 3153 // See if we can simplify either vselect operand. 3154 APInt DemandedLHS(DemandedElts); 3155 APInt DemandedRHS(DemandedElts); 3156 APInt UndefLHS, ZeroLHS; 3157 APInt UndefRHS, ZeroRHS; 3158 if (SimplifyDemandedVectorElts(LHS, DemandedLHS, UndefLHS, ZeroLHS, TLO, 3159 Depth + 1)) 3160 return true; 3161 if (SimplifyDemandedVectorElts(RHS, DemandedRHS, UndefRHS, ZeroRHS, TLO, 3162 Depth + 1)) 3163 return true; 3164 3165 KnownUndef = UndefLHS & UndefRHS; 3166 KnownZero = ZeroLHS & ZeroRHS; 3167 3168 // If we know that the selected element is always zero, we don't need the 3169 // select value element. 3170 APInt DemandedSel = DemandedElts & ~KnownZero; 3171 if (DemandedSel != DemandedElts) 3172 if (SimplifyDemandedVectorElts(Sel, DemandedSel, UndefSel, UndefZero, TLO, 3173 Depth + 1)) 3174 return true; 3175 3176 break; 3177 } 3178 case ISD::VECTOR_SHUFFLE: { 3179 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 3180 3181 // Collect demanded elements from shuffle operands.. 3182 APInt DemandedLHS(NumElts, 0); 3183 APInt DemandedRHS(NumElts, 0); 3184 for (unsigned i = 0; i != NumElts; ++i) { 3185 int M = ShuffleMask[i]; 3186 if (M < 0 || !DemandedElts[i]) 3187 continue; 3188 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range"); 3189 if (M < (int)NumElts) 3190 DemandedLHS.setBit(M); 3191 else 3192 DemandedRHS.setBit(M - NumElts); 3193 } 3194 3195 // See if we can simplify either shuffle operand. 3196 APInt UndefLHS, ZeroLHS; 3197 APInt UndefRHS, ZeroRHS; 3198 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, UndefLHS, 3199 ZeroLHS, TLO, Depth + 1)) 3200 return true; 3201 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, UndefRHS, 3202 ZeroRHS, TLO, Depth + 1)) 3203 return true; 3204 3205 // Simplify mask using undef elements from LHS/RHS. 3206 bool Updated = false; 3207 bool IdentityLHS = true, IdentityRHS = true; 3208 SmallVector<int, 32> NewMask(ShuffleMask.begin(), ShuffleMask.end()); 3209 for (unsigned i = 0; i != NumElts; ++i) { 3210 int &M = NewMask[i]; 3211 if (M < 0) 3212 continue; 3213 if (!DemandedElts[i] || (M < (int)NumElts && UndefLHS[M]) || 3214 (M >= (int)NumElts && UndefRHS[M - NumElts])) { 3215 Updated = true; 3216 M = -1; 3217 } 3218 IdentityLHS &= (M < 0) || (M == (int)i); 3219 IdentityRHS &= (M < 0) || ((M - NumElts) == i); 3220 } 3221 3222 // Update legal shuffle masks based on demanded elements if it won't reduce 3223 // to Identity which can cause premature removal of the shuffle mask. 3224 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.LegalOps) { 3225 SDValue LegalShuffle = 3226 buildLegalVectorShuffle(VT, DL, Op.getOperand(0), Op.getOperand(1), 3227 NewMask, TLO.DAG); 3228 if (LegalShuffle) 3229 return TLO.CombineTo(Op, LegalShuffle); 3230 } 3231 3232 // Propagate undef/zero elements from LHS/RHS. 3233 for (unsigned i = 0; i != NumElts; ++i) { 3234 int M = ShuffleMask[i]; 3235 if (M < 0) { 3236 KnownUndef.setBit(i); 3237 } else if (M < (int)NumElts) { 3238 if (UndefLHS[M]) 3239 KnownUndef.setBit(i); 3240 if (ZeroLHS[M]) 3241 KnownZero.setBit(i); 3242 } else { 3243 if (UndefRHS[M - NumElts]) 3244 KnownUndef.setBit(i); 3245 if (ZeroRHS[M - NumElts]) 3246 KnownZero.setBit(i); 3247 } 3248 } 3249 break; 3250 } 3251 case ISD::ANY_EXTEND_VECTOR_INREG: 3252 case ISD::SIGN_EXTEND_VECTOR_INREG: 3253 case ISD::ZERO_EXTEND_VECTOR_INREG: { 3254 APInt SrcUndef, SrcZero; 3255 SDValue Src = Op.getOperand(0); 3256 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3257 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts); 3258 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, 3259 Depth + 1)) 3260 return true; 3261 KnownZero = SrcZero.zextOrTrunc(NumElts); 3262 KnownUndef = SrcUndef.zextOrTrunc(NumElts); 3263 3264 if (IsLE && Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG && 3265 Op.getValueSizeInBits() == Src.getValueSizeInBits() && 3266 DemandedSrcElts == 1) { 3267 // aext - if we just need the bottom element then we can bitcast. 3268 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 3269 } 3270 3271 if (Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) { 3272 // zext(undef) upper bits are guaranteed to be zero. 3273 if (DemandedElts.isSubsetOf(KnownUndef)) 3274 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 3275 KnownUndef.clearAllBits(); 3276 3277 // zext - if we just need the bottom element then we can mask: 3278 // zext(and(x,c)) -> and(x,c') iff the zext is the only user of the and. 3279 if (IsLE && DemandedSrcElts == 1 && Src.getOpcode() == ISD::AND && 3280 Op->isOnlyUserOf(Src.getNode()) && 3281 Op.getValueSizeInBits() == Src.getValueSizeInBits()) { 3282 SDLoc DL(Op); 3283 EVT SrcVT = Src.getValueType(); 3284 EVT SrcSVT = SrcVT.getScalarType(); 3285 SmallVector<SDValue> MaskElts; 3286 MaskElts.push_back(TLO.DAG.getAllOnesConstant(DL, SrcSVT)); 3287 MaskElts.append(NumSrcElts - 1, TLO.DAG.getConstant(0, DL, SrcSVT)); 3288 SDValue Mask = TLO.DAG.getBuildVector(SrcVT, DL, MaskElts); 3289 if (SDValue Fold = TLO.DAG.FoldConstantArithmetic( 3290 ISD::AND, DL, SrcVT, {Src.getOperand(1), Mask})) { 3291 Fold = TLO.DAG.getNode(ISD::AND, DL, SrcVT, Src.getOperand(0), Fold); 3292 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Fold)); 3293 } 3294 } 3295 } 3296 break; 3297 } 3298 3299 // TODO: There are more binop opcodes that could be handled here - MIN, 3300 // MAX, saturated math, etc. 3301 case ISD::ADD: { 3302 SDValue Op0 = Op.getOperand(0); 3303 SDValue Op1 = Op.getOperand(1); 3304 if (Op0 == Op1 && Op->isOnlyUserOf(Op0.getNode())) { 3305 APInt UndefLHS, ZeroLHS; 3306 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 3307 Depth + 1, /*AssumeSingleUse*/ true)) 3308 return true; 3309 } 3310 LLVM_FALLTHROUGH; 3311 } 3312 case ISD::OR: 3313 case ISD::XOR: 3314 case ISD::SUB: 3315 case ISD::FADD: 3316 case ISD::FSUB: 3317 case ISD::FMUL: 3318 case ISD::FDIV: 3319 case ISD::FREM: { 3320 SDValue Op0 = Op.getOperand(0); 3321 SDValue Op1 = Op.getOperand(1); 3322 3323 APInt UndefRHS, ZeroRHS; 3324 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO, 3325 Depth + 1)) 3326 return true; 3327 APInt UndefLHS, ZeroLHS; 3328 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 3329 Depth + 1)) 3330 return true; 3331 3332 KnownZero = ZeroLHS & ZeroRHS; 3333 KnownUndef = getKnownUndefForVectorBinop(Op, TLO.DAG, UndefLHS, UndefRHS); 3334 3335 // Attempt to avoid multi-use ops if we don't need anything from them. 3336 // TODO - use KnownUndef to relax the demandedelts? 3337 if (!DemandedElts.isAllOnes()) 3338 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 3339 return true; 3340 break; 3341 } 3342 case ISD::SHL: 3343 case ISD::SRL: 3344 case ISD::SRA: 3345 case ISD::ROTL: 3346 case ISD::ROTR: { 3347 SDValue Op0 = Op.getOperand(0); 3348 SDValue Op1 = Op.getOperand(1); 3349 3350 APInt UndefRHS, ZeroRHS; 3351 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO, 3352 Depth + 1)) 3353 return true; 3354 APInt UndefLHS, ZeroLHS; 3355 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 3356 Depth + 1)) 3357 return true; 3358 3359 KnownZero = ZeroLHS; 3360 KnownUndef = UndefLHS & UndefRHS; // TODO: use getKnownUndefForVectorBinop? 3361 3362 // Attempt to avoid multi-use ops if we don't need anything from them. 3363 // TODO - use KnownUndef to relax the demandedelts? 3364 if (!DemandedElts.isAllOnes()) 3365 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 3366 return true; 3367 break; 3368 } 3369 case ISD::MUL: 3370 case ISD::AND: { 3371 SDValue Op0 = Op.getOperand(0); 3372 SDValue Op1 = Op.getOperand(1); 3373 3374 APInt SrcUndef, SrcZero; 3375 if (SimplifyDemandedVectorElts(Op1, DemandedElts, SrcUndef, SrcZero, TLO, 3376 Depth + 1)) 3377 return true; 3378 if (SimplifyDemandedVectorElts(Op0, DemandedElts, KnownUndef, KnownZero, 3379 TLO, Depth + 1)) 3380 return true; 3381 3382 // If every element pair has a zero/undef then just fold to zero. 3383 // fold (and x, undef) -> 0 / (and x, 0) -> 0 3384 // fold (mul x, undef) -> 0 / (mul x, 0) -> 0 3385 if (DemandedElts.isSubsetOf(SrcZero | KnownZero | SrcUndef | KnownUndef)) 3386 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 3387 3388 // If either side has a zero element, then the result element is zero, even 3389 // if the other is an UNDEF. 3390 // TODO: Extend getKnownUndefForVectorBinop to also deal with known zeros 3391 // and then handle 'and' nodes with the rest of the binop opcodes. 3392 KnownZero |= SrcZero; 3393 KnownUndef &= SrcUndef; 3394 KnownUndef &= ~KnownZero; 3395 3396 // Attempt to avoid multi-use ops if we don't need anything from them. 3397 if (!DemandedElts.isAllOnes()) 3398 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 3399 return true; 3400 break; 3401 } 3402 case ISD::TRUNCATE: 3403 case ISD::SIGN_EXTEND: 3404 case ISD::ZERO_EXTEND: 3405 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef, 3406 KnownZero, TLO, Depth + 1)) 3407 return true; 3408 3409 if (Op.getOpcode() == ISD::ZERO_EXTEND) { 3410 // zext(undef) upper bits are guaranteed to be zero. 3411 if (DemandedElts.isSubsetOf(KnownUndef)) 3412 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 3413 KnownUndef.clearAllBits(); 3414 } 3415 break; 3416 default: { 3417 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 3418 if (SimplifyDemandedVectorEltsForTargetNode(Op, DemandedElts, KnownUndef, 3419 KnownZero, TLO, Depth)) 3420 return true; 3421 } else { 3422 KnownBits Known; 3423 APInt DemandedBits = APInt::getAllOnes(EltSizeInBits); 3424 if (SimplifyDemandedBits(Op, DemandedBits, OriginalDemandedElts, Known, 3425 TLO, Depth, AssumeSingleUse)) 3426 return true; 3427 } 3428 break; 3429 } 3430 } 3431 assert((KnownUndef & KnownZero) == 0 && "Elements flagged as undef AND zero"); 3432 3433 // Constant fold all undef cases. 3434 // TODO: Handle zero cases as well. 3435 if (DemandedElts.isSubsetOf(KnownUndef)) 3436 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 3437 3438 return false; 3439 } 3440 3441 /// Determine which of the bits specified in Mask are known to be either zero or 3442 /// one and return them in the Known. 3443 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 3444 KnownBits &Known, 3445 const APInt &DemandedElts, 3446 const SelectionDAG &DAG, 3447 unsigned Depth) const { 3448 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3449 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3450 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3451 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3452 "Should use MaskedValueIsZero if you don't know whether Op" 3453 " is a target node!"); 3454 Known.resetAll(); 3455 } 3456 3457 void TargetLowering::computeKnownBitsForTargetInstr( 3458 GISelKnownBits &Analysis, Register R, KnownBits &Known, 3459 const APInt &DemandedElts, const MachineRegisterInfo &MRI, 3460 unsigned Depth) const { 3461 Known.resetAll(); 3462 } 3463 3464 void TargetLowering::computeKnownBitsForFrameIndex( 3465 const int FrameIdx, KnownBits &Known, const MachineFunction &MF) const { 3466 // The low bits are known zero if the pointer is aligned. 3467 Known.Zero.setLowBits(Log2(MF.getFrameInfo().getObjectAlign(FrameIdx))); 3468 } 3469 3470 Align TargetLowering::computeKnownAlignForTargetInstr( 3471 GISelKnownBits &Analysis, Register R, const MachineRegisterInfo &MRI, 3472 unsigned Depth) const { 3473 return Align(1); 3474 } 3475 3476 /// This method can be implemented by targets that want to expose additional 3477 /// information about sign bits to the DAG Combiner. 3478 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 3479 const APInt &, 3480 const SelectionDAG &, 3481 unsigned Depth) const { 3482 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3483 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3484 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3485 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3486 "Should use ComputeNumSignBits if you don't know whether Op" 3487 " is a target node!"); 3488 return 1; 3489 } 3490 3491 unsigned TargetLowering::computeNumSignBitsForTargetInstr( 3492 GISelKnownBits &Analysis, Register R, const APInt &DemandedElts, 3493 const MachineRegisterInfo &MRI, unsigned Depth) const { 3494 return 1; 3495 } 3496 3497 bool TargetLowering::SimplifyDemandedVectorEltsForTargetNode( 3498 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, 3499 TargetLoweringOpt &TLO, unsigned Depth) const { 3500 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3501 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3502 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3503 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3504 "Should use SimplifyDemandedVectorElts if you don't know whether Op" 3505 " is a target node!"); 3506 return false; 3507 } 3508 3509 bool TargetLowering::SimplifyDemandedBitsForTargetNode( 3510 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 3511 KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const { 3512 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3513 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3514 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3515 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3516 "Should use SimplifyDemandedBits if you don't know whether Op" 3517 " is a target node!"); 3518 computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth); 3519 return false; 3520 } 3521 3522 SDValue TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode( 3523 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 3524 SelectionDAG &DAG, unsigned Depth) const { 3525 assert( 3526 (Op.getOpcode() >= ISD::BUILTIN_OP_END || 3527 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3528 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3529 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3530 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op" 3531 " is a target node!"); 3532 return SDValue(); 3533 } 3534 3535 SDValue 3536 TargetLowering::buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, 3537 SDValue N1, MutableArrayRef<int> Mask, 3538 SelectionDAG &DAG) const { 3539 bool LegalMask = isShuffleMaskLegal(Mask, VT); 3540 if (!LegalMask) { 3541 std::swap(N0, N1); 3542 ShuffleVectorSDNode::commuteMask(Mask); 3543 LegalMask = isShuffleMaskLegal(Mask, VT); 3544 } 3545 3546 if (!LegalMask) 3547 return SDValue(); 3548 3549 return DAG.getVectorShuffle(VT, DL, N0, N1, Mask); 3550 } 3551 3552 const Constant *TargetLowering::getTargetConstantFromLoad(LoadSDNode*) const { 3553 return nullptr; 3554 } 3555 3556 bool TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode( 3557 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 3558 bool PoisonOnly, unsigned Depth) const { 3559 assert( 3560 (Op.getOpcode() >= ISD::BUILTIN_OP_END || 3561 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3562 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3563 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3564 "Should use isGuaranteedNotToBeUndefOrPoison if you don't know whether Op" 3565 " is a target node!"); 3566 return false; 3567 } 3568 3569 bool TargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, 3570 const SelectionDAG &DAG, 3571 bool SNaN, 3572 unsigned Depth) const { 3573 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3574 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3575 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3576 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3577 "Should use isKnownNeverNaN if you don't know whether Op" 3578 " is a target node!"); 3579 return false; 3580 } 3581 3582 bool TargetLowering::isSplatValueForTargetNode(SDValue Op, 3583 const APInt &DemandedElts, 3584 APInt &UndefElts, 3585 unsigned Depth) const { 3586 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3587 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3588 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3589 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3590 "Should use isSplatValue if you don't know whether Op" 3591 " is a target node!"); 3592 return false; 3593 } 3594 3595 // FIXME: Ideally, this would use ISD::isConstantSplatVector(), but that must 3596 // work with truncating build vectors and vectors with elements of less than 3597 // 8 bits. 3598 bool TargetLowering::isConstTrueVal(SDValue N) const { 3599 if (!N) 3600 return false; 3601 3602 unsigned EltWidth; 3603 APInt CVal; 3604 if (ConstantSDNode *CN = isConstOrConstSplat(N, /*AllowUndefs=*/false, 3605 /*AllowTruncation=*/true)) { 3606 CVal = CN->getAPIntValue(); 3607 EltWidth = N.getValueType().getScalarSizeInBits(); 3608 } else 3609 return false; 3610 3611 // If this is a truncating splat, truncate the splat value. 3612 // Otherwise, we may fail to match the expected values below. 3613 if (EltWidth < CVal.getBitWidth()) 3614 CVal = CVal.trunc(EltWidth); 3615 3616 switch (getBooleanContents(N.getValueType())) { 3617 case UndefinedBooleanContent: 3618 return CVal[0]; 3619 case ZeroOrOneBooleanContent: 3620 return CVal.isOne(); 3621 case ZeroOrNegativeOneBooleanContent: 3622 return CVal.isAllOnes(); 3623 } 3624 3625 llvm_unreachable("Invalid boolean contents"); 3626 } 3627 3628 bool TargetLowering::isConstFalseVal(SDValue N) const { 3629 if (!N) 3630 return false; 3631 3632 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N); 3633 if (!CN) { 3634 const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N); 3635 if (!BV) 3636 return false; 3637 3638 // Only interested in constant splats, we don't care about undef 3639 // elements in identifying boolean constants and getConstantSplatNode 3640 // returns NULL if all ops are undef; 3641 CN = BV->getConstantSplatNode(); 3642 if (!CN) 3643 return false; 3644 } 3645 3646 if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent) 3647 return !CN->getAPIntValue()[0]; 3648 3649 return CN->isZero(); 3650 } 3651 3652 bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT, 3653 bool SExt) const { 3654 if (VT == MVT::i1) 3655 return N->isOne(); 3656 3657 TargetLowering::BooleanContent Cnt = getBooleanContents(VT); 3658 switch (Cnt) { 3659 case TargetLowering::ZeroOrOneBooleanContent: 3660 // An extended value of 1 is always true, unless its original type is i1, 3661 // in which case it will be sign extended to -1. 3662 return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1)); 3663 case TargetLowering::UndefinedBooleanContent: 3664 case TargetLowering::ZeroOrNegativeOneBooleanContent: 3665 return N->isAllOnes() && SExt; 3666 } 3667 llvm_unreachable("Unexpected enumeration."); 3668 } 3669 3670 /// This helper function of SimplifySetCC tries to optimize the comparison when 3671 /// either operand of the SetCC node is a bitwise-and instruction. 3672 SDValue TargetLowering::foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, 3673 ISD::CondCode Cond, const SDLoc &DL, 3674 DAGCombinerInfo &DCI) const { 3675 if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND) 3676 std::swap(N0, N1); 3677 3678 SelectionDAG &DAG = DCI.DAG; 3679 EVT OpVT = N0.getValueType(); 3680 if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() || 3681 (Cond != ISD::SETEQ && Cond != ISD::SETNE)) 3682 return SDValue(); 3683 3684 // (X & Y) != 0 --> zextOrTrunc(X & Y) 3685 // iff everything but LSB is known zero: 3686 if (Cond == ISD::SETNE && isNullConstant(N1) && 3687 (getBooleanContents(OpVT) == TargetLowering::UndefinedBooleanContent || 3688 getBooleanContents(OpVT) == TargetLowering::ZeroOrOneBooleanContent)) { 3689 unsigned NumEltBits = OpVT.getScalarSizeInBits(); 3690 APInt UpperBits = APInt::getHighBitsSet(NumEltBits, NumEltBits - 1); 3691 if (DAG.MaskedValueIsZero(N0, UpperBits)) 3692 return DAG.getBoolExtOrTrunc(N0, DL, VT, OpVT); 3693 } 3694 3695 // Match these patterns in any of their permutations: 3696 // (X & Y) == Y 3697 // (X & Y) != Y 3698 SDValue X, Y; 3699 if (N0.getOperand(0) == N1) { 3700 X = N0.getOperand(1); 3701 Y = N0.getOperand(0); 3702 } else if (N0.getOperand(1) == N1) { 3703 X = N0.getOperand(0); 3704 Y = N0.getOperand(1); 3705 } else { 3706 return SDValue(); 3707 } 3708 3709 SDValue Zero = DAG.getConstant(0, DL, OpVT); 3710 if (DAG.isKnownToBeAPowerOfTwo(Y)) { 3711 // Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set. 3712 // Note that where Y is variable and is known to have at most one bit set 3713 // (for example, if it is Z & 1) we cannot do this; the expressions are not 3714 // equivalent when Y == 0. 3715 assert(OpVT.isInteger()); 3716 Cond = ISD::getSetCCInverse(Cond, OpVT); 3717 if (DCI.isBeforeLegalizeOps() || 3718 isCondCodeLegal(Cond, N0.getSimpleValueType())) 3719 return DAG.getSetCC(DL, VT, N0, Zero, Cond); 3720 } else if (N0.hasOneUse() && hasAndNotCompare(Y)) { 3721 // If the target supports an 'and-not' or 'and-complement' logic operation, 3722 // try to use that to make a comparison operation more efficient. 3723 // But don't do this transform if the mask is a single bit because there are 3724 // more efficient ways to deal with that case (for example, 'bt' on x86 or 3725 // 'rlwinm' on PPC). 3726 3727 // Bail out if the compare operand that we want to turn into a zero is 3728 // already a zero (otherwise, infinite loop). 3729 auto *YConst = dyn_cast<ConstantSDNode>(Y); 3730 if (YConst && YConst->isZero()) 3731 return SDValue(); 3732 3733 // Transform this into: ~X & Y == 0. 3734 SDValue NotX = DAG.getNOT(SDLoc(X), X, OpVT); 3735 SDValue NewAnd = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, NotX, Y); 3736 return DAG.getSetCC(DL, VT, NewAnd, Zero, Cond); 3737 } 3738 3739 return SDValue(); 3740 } 3741 3742 /// There are multiple IR patterns that could be checking whether certain 3743 /// truncation of a signed number would be lossy or not. The pattern which is 3744 /// best at IR level, may not lower optimally. Thus, we want to unfold it. 3745 /// We are looking for the following pattern: (KeptBits is a constant) 3746 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits) 3747 /// KeptBits won't be bitwidth(x), that will be constant-folded to true/false. 3748 /// KeptBits also can't be 1, that would have been folded to %x dstcond 0 3749 /// We will unfold it into the natural trunc+sext pattern: 3750 /// ((%x << C) a>> C) dstcond %x 3751 /// Where C = bitwidth(x) - KeptBits and C u< bitwidth(x) 3752 SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck( 3753 EVT SCCVT, SDValue N0, SDValue N1, ISD::CondCode Cond, DAGCombinerInfo &DCI, 3754 const SDLoc &DL) const { 3755 // We must be comparing with a constant. 3756 ConstantSDNode *C1; 3757 if (!(C1 = dyn_cast<ConstantSDNode>(N1))) 3758 return SDValue(); 3759 3760 // N0 should be: add %x, (1 << (KeptBits-1)) 3761 if (N0->getOpcode() != ISD::ADD) 3762 return SDValue(); 3763 3764 // And we must be 'add'ing a constant. 3765 ConstantSDNode *C01; 3766 if (!(C01 = dyn_cast<ConstantSDNode>(N0->getOperand(1)))) 3767 return SDValue(); 3768 3769 SDValue X = N0->getOperand(0); 3770 EVT XVT = X.getValueType(); 3771 3772 // Validate constants ... 3773 3774 APInt I1 = C1->getAPIntValue(); 3775 3776 ISD::CondCode NewCond; 3777 if (Cond == ISD::CondCode::SETULT) { 3778 NewCond = ISD::CondCode::SETEQ; 3779 } else if (Cond == ISD::CondCode::SETULE) { 3780 NewCond = ISD::CondCode::SETEQ; 3781 // But need to 'canonicalize' the constant. 3782 I1 += 1; 3783 } else if (Cond == ISD::CondCode::SETUGT) { 3784 NewCond = ISD::CondCode::SETNE; 3785 // But need to 'canonicalize' the constant. 3786 I1 += 1; 3787 } else if (Cond == ISD::CondCode::SETUGE) { 3788 NewCond = ISD::CondCode::SETNE; 3789 } else 3790 return SDValue(); 3791 3792 APInt I01 = C01->getAPIntValue(); 3793 3794 auto checkConstants = [&I1, &I01]() -> bool { 3795 // Both of them must be power-of-two, and the constant from setcc is bigger. 3796 return I1.ugt(I01) && I1.isPowerOf2() && I01.isPowerOf2(); 3797 }; 3798 3799 if (checkConstants()) { 3800 // Great, e.g. got icmp ult i16 (add i16 %x, 128), 256 3801 } else { 3802 // What if we invert constants? (and the target predicate) 3803 I1.negate(); 3804 I01.negate(); 3805 assert(XVT.isInteger()); 3806 NewCond = getSetCCInverse(NewCond, XVT); 3807 if (!checkConstants()) 3808 return SDValue(); 3809 // Great, e.g. got icmp uge i16 (add i16 %x, -128), -256 3810 } 3811 3812 // They are power-of-two, so which bit is set? 3813 const unsigned KeptBits = I1.logBase2(); 3814 const unsigned KeptBitsMinusOne = I01.logBase2(); 3815 3816 // Magic! 3817 if (KeptBits != (KeptBitsMinusOne + 1)) 3818 return SDValue(); 3819 assert(KeptBits > 0 && KeptBits < XVT.getSizeInBits() && "unreachable"); 3820 3821 // We don't want to do this in every single case. 3822 SelectionDAG &DAG = DCI.DAG; 3823 if (!DAG.getTargetLoweringInfo().shouldTransformSignedTruncationCheck( 3824 XVT, KeptBits)) 3825 return SDValue(); 3826 3827 const unsigned MaskedBits = XVT.getSizeInBits() - KeptBits; 3828 assert(MaskedBits > 0 && MaskedBits < XVT.getSizeInBits() && "unreachable"); 3829 3830 // Unfold into: ((%x << C) a>> C) cond %x 3831 // Where 'cond' will be either 'eq' or 'ne'. 3832 SDValue ShiftAmt = DAG.getConstant(MaskedBits, DL, XVT); 3833 SDValue T0 = DAG.getNode(ISD::SHL, DL, XVT, X, ShiftAmt); 3834 SDValue T1 = DAG.getNode(ISD::SRA, DL, XVT, T0, ShiftAmt); 3835 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, X, NewCond); 3836 3837 return T2; 3838 } 3839 3840 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 3841 SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift( 3842 EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond, 3843 DAGCombinerInfo &DCI, const SDLoc &DL) const { 3844 assert(isConstOrConstSplat(N1C) && 3845 isConstOrConstSplat(N1C)->getAPIntValue().isZero() && 3846 "Should be a comparison with 0."); 3847 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3848 "Valid only for [in]equality comparisons."); 3849 3850 unsigned NewShiftOpcode; 3851 SDValue X, C, Y; 3852 3853 SelectionDAG &DAG = DCI.DAG; 3854 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3855 3856 // Look for '(C l>>/<< Y)'. 3857 auto Match = [&NewShiftOpcode, &X, &C, &Y, &TLI, &DAG](SDValue V) { 3858 // The shift should be one-use. 3859 if (!V.hasOneUse()) 3860 return false; 3861 unsigned OldShiftOpcode = V.getOpcode(); 3862 switch (OldShiftOpcode) { 3863 case ISD::SHL: 3864 NewShiftOpcode = ISD::SRL; 3865 break; 3866 case ISD::SRL: 3867 NewShiftOpcode = ISD::SHL; 3868 break; 3869 default: 3870 return false; // must be a logical shift. 3871 } 3872 // We should be shifting a constant. 3873 // FIXME: best to use isConstantOrConstantVector(). 3874 C = V.getOperand(0); 3875 ConstantSDNode *CC = 3876 isConstOrConstSplat(C, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3877 if (!CC) 3878 return false; 3879 Y = V.getOperand(1); 3880 3881 ConstantSDNode *XC = 3882 isConstOrConstSplat(X, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3883 return TLI.shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( 3884 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG); 3885 }; 3886 3887 // LHS of comparison should be an one-use 'and'. 3888 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 3889 return SDValue(); 3890 3891 X = N0.getOperand(0); 3892 SDValue Mask = N0.getOperand(1); 3893 3894 // 'and' is commutative! 3895 if (!Match(Mask)) { 3896 std::swap(X, Mask); 3897 if (!Match(Mask)) 3898 return SDValue(); 3899 } 3900 3901 EVT VT = X.getValueType(); 3902 3903 // Produce: 3904 // ((X 'OppositeShiftOpcode' Y) & C) Cond 0 3905 SDValue T0 = DAG.getNode(NewShiftOpcode, DL, VT, X, Y); 3906 SDValue T1 = DAG.getNode(ISD::AND, DL, VT, T0, C); 3907 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, N1C, Cond); 3908 return T2; 3909 } 3910 3911 /// Try to fold an equality comparison with a {add/sub/xor} binary operation as 3912 /// the 1st operand (N0). Callers are expected to swap the N0/N1 parameters to 3913 /// handle the commuted versions of these patterns. 3914 SDValue TargetLowering::foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, 3915 ISD::CondCode Cond, const SDLoc &DL, 3916 DAGCombinerInfo &DCI) const { 3917 unsigned BOpcode = N0.getOpcode(); 3918 assert((BOpcode == ISD::ADD || BOpcode == ISD::SUB || BOpcode == ISD::XOR) && 3919 "Unexpected binop"); 3920 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && "Unexpected condcode"); 3921 3922 // (X + Y) == X --> Y == 0 3923 // (X - Y) == X --> Y == 0 3924 // (X ^ Y) == X --> Y == 0 3925 SelectionDAG &DAG = DCI.DAG; 3926 EVT OpVT = N0.getValueType(); 3927 SDValue X = N0.getOperand(0); 3928 SDValue Y = N0.getOperand(1); 3929 if (X == N1) 3930 return DAG.getSetCC(DL, VT, Y, DAG.getConstant(0, DL, OpVT), Cond); 3931 3932 if (Y != N1) 3933 return SDValue(); 3934 3935 // (X + Y) == Y --> X == 0 3936 // (X ^ Y) == Y --> X == 0 3937 if (BOpcode == ISD::ADD || BOpcode == ISD::XOR) 3938 return DAG.getSetCC(DL, VT, X, DAG.getConstant(0, DL, OpVT), Cond); 3939 3940 // The shift would not be valid if the operands are boolean (i1). 3941 if (!N0.hasOneUse() || OpVT.getScalarSizeInBits() == 1) 3942 return SDValue(); 3943 3944 // (X - Y) == Y --> X == Y << 1 3945 EVT ShiftVT = getShiftAmountTy(OpVT, DAG.getDataLayout(), 3946 !DCI.isBeforeLegalize()); 3947 SDValue One = DAG.getConstant(1, DL, ShiftVT); 3948 SDValue YShl1 = DAG.getNode(ISD::SHL, DL, N1.getValueType(), Y, One); 3949 if (!DCI.isCalledByLegalizer()) 3950 DCI.AddToWorklist(YShl1.getNode()); 3951 return DAG.getSetCC(DL, VT, X, YShl1, Cond); 3952 } 3953 3954 static SDValue simplifySetCCWithCTPOP(const TargetLowering &TLI, EVT VT, 3955 SDValue N0, const APInt &C1, 3956 ISD::CondCode Cond, const SDLoc &dl, 3957 SelectionDAG &DAG) { 3958 // Look through truncs that don't change the value of a ctpop. 3959 // FIXME: Add vector support? Need to be careful with setcc result type below. 3960 SDValue CTPOP = N0; 3961 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && !VT.isVector() && 3962 N0.getScalarValueSizeInBits() > Log2_32(N0.getOperand(0).getScalarValueSizeInBits())) 3963 CTPOP = N0.getOperand(0); 3964 3965 if (CTPOP.getOpcode() != ISD::CTPOP || !CTPOP.hasOneUse()) 3966 return SDValue(); 3967 3968 EVT CTVT = CTPOP.getValueType(); 3969 SDValue CTOp = CTPOP.getOperand(0); 3970 3971 // If this is a vector CTPOP, keep the CTPOP if it is legal. 3972 // TODO: Should we check if CTPOP is legal(or custom) for scalars? 3973 if (VT.isVector() && TLI.isOperationLegal(ISD::CTPOP, CTVT)) 3974 return SDValue(); 3975 3976 // (ctpop x) u< 2 -> (x & x-1) == 0 3977 // (ctpop x) u> 1 -> (x & x-1) != 0 3978 if (Cond == ISD::SETULT || Cond == ISD::SETUGT) { 3979 unsigned CostLimit = TLI.getCustomCtpopCost(CTVT, Cond); 3980 if (C1.ugt(CostLimit + (Cond == ISD::SETULT))) 3981 return SDValue(); 3982 if (C1 == 0 && (Cond == ISD::SETULT)) 3983 return SDValue(); // This is handled elsewhere. 3984 3985 unsigned Passes = C1.getLimitedValue() - (Cond == ISD::SETULT); 3986 3987 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 3988 SDValue Result = CTOp; 3989 for (unsigned i = 0; i < Passes; i++) { 3990 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, Result, NegOne); 3991 Result = DAG.getNode(ISD::AND, dl, CTVT, Result, Add); 3992 } 3993 ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE; 3994 return DAG.getSetCC(dl, VT, Result, DAG.getConstant(0, dl, CTVT), CC); 3995 } 3996 3997 // If ctpop is not supported, expand a power-of-2 comparison based on it. 3998 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && C1 == 1) { 3999 // For scalars, keep CTPOP if it is legal or custom. 4000 if (!VT.isVector() && TLI.isOperationLegalOrCustom(ISD::CTPOP, CTVT)) 4001 return SDValue(); 4002 // This is based on X86's custom lowering for CTPOP which produces more 4003 // instructions than the expansion here. 4004 4005 // (ctpop x) == 1 --> (x != 0) && ((x & x-1) == 0) 4006 // (ctpop x) != 1 --> (x == 0) || ((x & x-1) != 0) 4007 SDValue Zero = DAG.getConstant(0, dl, CTVT); 4008 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 4009 assert(CTVT.isInteger()); 4010 ISD::CondCode InvCond = ISD::getSetCCInverse(Cond, CTVT); 4011 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne); 4012 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add); 4013 SDValue LHS = DAG.getSetCC(dl, VT, CTOp, Zero, InvCond); 4014 SDValue RHS = DAG.getSetCC(dl, VT, And, Zero, Cond); 4015 unsigned LogicOpcode = Cond == ISD::SETEQ ? ISD::AND : ISD::OR; 4016 return DAG.getNode(LogicOpcode, dl, VT, LHS, RHS); 4017 } 4018 4019 return SDValue(); 4020 } 4021 4022 static SDValue foldSetCCWithRotate(EVT VT, SDValue N0, SDValue N1, 4023 ISD::CondCode Cond, const SDLoc &dl, 4024 SelectionDAG &DAG) { 4025 if (Cond != ISD::SETEQ && Cond != ISD::SETNE) 4026 return SDValue(); 4027 4028 auto *C1 = isConstOrConstSplat(N1, /* AllowUndefs */ true); 4029 if (!C1 || !(C1->isZero() || C1->isAllOnes())) 4030 return SDValue(); 4031 4032 auto getRotateSource = [](SDValue X) { 4033 if (X.getOpcode() == ISD::ROTL || X.getOpcode() == ISD::ROTR) 4034 return X.getOperand(0); 4035 return SDValue(); 4036 }; 4037 4038 // Peek through a rotated value compared against 0 or -1: 4039 // (rot X, Y) == 0/-1 --> X == 0/-1 4040 // (rot X, Y) != 0/-1 --> X != 0/-1 4041 if (SDValue R = getRotateSource(N0)) 4042 return DAG.getSetCC(dl, VT, R, N1, Cond); 4043 4044 // Peek through an 'or' of a rotated value compared against 0: 4045 // or (rot X, Y), Z ==/!= 0 --> (or X, Z) ==/!= 0 4046 // or Z, (rot X, Y) ==/!= 0 --> (or X, Z) ==/!= 0 4047 // 4048 // TODO: Add the 'and' with -1 sibling. 4049 // TODO: Recurse through a series of 'or' ops to find the rotate. 4050 EVT OpVT = N0.getValueType(); 4051 if (N0.hasOneUse() && N0.getOpcode() == ISD::OR && C1->isZero()) { 4052 if (SDValue R = getRotateSource(N0.getOperand(0))) { 4053 SDValue NewOr = DAG.getNode(ISD::OR, dl, OpVT, R, N0.getOperand(1)); 4054 return DAG.getSetCC(dl, VT, NewOr, N1, Cond); 4055 } 4056 if (SDValue R = getRotateSource(N0.getOperand(1))) { 4057 SDValue NewOr = DAG.getNode(ISD::OR, dl, OpVT, R, N0.getOperand(0)); 4058 return DAG.getSetCC(dl, VT, NewOr, N1, Cond); 4059 } 4060 } 4061 4062 return SDValue(); 4063 } 4064 4065 static SDValue foldSetCCWithFunnelShift(EVT VT, SDValue N0, SDValue N1, 4066 ISD::CondCode Cond, const SDLoc &dl, 4067 SelectionDAG &DAG) { 4068 // If we are testing for all-bits-clear, we might be able to do that with 4069 // less shifting since bit-order does not matter. 4070 if (Cond != ISD::SETEQ && Cond != ISD::SETNE) 4071 return SDValue(); 4072 4073 auto *C1 = isConstOrConstSplat(N1, /* AllowUndefs */ true); 4074 if (!C1 || !C1->isZero()) 4075 return SDValue(); 4076 4077 if (!N0.hasOneUse() || 4078 (N0.getOpcode() != ISD::FSHL && N0.getOpcode() != ISD::FSHR)) 4079 return SDValue(); 4080 4081 unsigned BitWidth = N0.getScalarValueSizeInBits(); 4082 auto *ShAmtC = isConstOrConstSplat(N0.getOperand(2)); 4083 if (!ShAmtC || ShAmtC->getAPIntValue().uge(BitWidth)) 4084 return SDValue(); 4085 4086 // Canonicalize fshr as fshl to reduce pattern-matching. 4087 unsigned ShAmt = ShAmtC->getZExtValue(); 4088 if (N0.getOpcode() == ISD::FSHR) 4089 ShAmt = BitWidth - ShAmt; 4090 4091 // Match an 'or' with a specific operand 'Other' in either commuted variant. 4092 SDValue X, Y; 4093 auto matchOr = [&X, &Y](SDValue Or, SDValue Other) { 4094 if (Or.getOpcode() != ISD::OR || !Or.hasOneUse()) 4095 return false; 4096 if (Or.getOperand(0) == Other) { 4097 X = Or.getOperand(0); 4098 Y = Or.getOperand(1); 4099 return true; 4100 } 4101 if (Or.getOperand(1) == Other) { 4102 X = Or.getOperand(1); 4103 Y = Or.getOperand(0); 4104 return true; 4105 } 4106 return false; 4107 }; 4108 4109 EVT OpVT = N0.getValueType(); 4110 EVT ShAmtVT = N0.getOperand(2).getValueType(); 4111 SDValue F0 = N0.getOperand(0); 4112 SDValue F1 = N0.getOperand(1); 4113 if (matchOr(F0, F1)) { 4114 // fshl (or X, Y), X, C ==/!= 0 --> or (shl Y, C), X ==/!= 0 4115 SDValue NewShAmt = DAG.getConstant(ShAmt, dl, ShAmtVT); 4116 SDValue Shift = DAG.getNode(ISD::SHL, dl, OpVT, Y, NewShAmt); 4117 SDValue NewOr = DAG.getNode(ISD::OR, dl, OpVT, Shift, X); 4118 return DAG.getSetCC(dl, VT, NewOr, N1, Cond); 4119 } 4120 if (matchOr(F1, F0)) { 4121 // fshl X, (or X, Y), C ==/!= 0 --> or (srl Y, BW-C), X ==/!= 0 4122 SDValue NewShAmt = DAG.getConstant(BitWidth - ShAmt, dl, ShAmtVT); 4123 SDValue Shift = DAG.getNode(ISD::SRL, dl, OpVT, Y, NewShAmt); 4124 SDValue NewOr = DAG.getNode(ISD::OR, dl, OpVT, Shift, X); 4125 return DAG.getSetCC(dl, VT, NewOr, N1, Cond); 4126 } 4127 4128 return SDValue(); 4129 } 4130 4131 /// Try to simplify a setcc built with the specified operands and cc. If it is 4132 /// unable to simplify it, return a null SDValue. 4133 SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 4134 ISD::CondCode Cond, bool foldBooleans, 4135 DAGCombinerInfo &DCI, 4136 const SDLoc &dl) const { 4137 SelectionDAG &DAG = DCI.DAG; 4138 const DataLayout &Layout = DAG.getDataLayout(); 4139 EVT OpVT = N0.getValueType(); 4140 4141 // Constant fold or commute setcc. 4142 if (SDValue Fold = DAG.FoldSetCC(VT, N0, N1, Cond, dl)) 4143 return Fold; 4144 4145 bool N0ConstOrSplat = 4146 isConstOrConstSplat(N0, /*AllowUndefs*/ false, /*AllowTruncate*/ true); 4147 bool N1ConstOrSplat = 4148 isConstOrConstSplat(N1, /*AllowUndefs*/ false, /*AllowTruncate*/ true); 4149 4150 // Ensure that the constant occurs on the RHS and fold constant comparisons. 4151 // TODO: Handle non-splat vector constants. All undef causes trouble. 4152 // FIXME: We can't yet fold constant scalable vector splats, so avoid an 4153 // infinite loop here when we encounter one. 4154 ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond); 4155 if (N0ConstOrSplat && (!OpVT.isScalableVector() || !N1ConstOrSplat) && 4156 (DCI.isBeforeLegalizeOps() || 4157 isCondCodeLegal(SwappedCC, N0.getSimpleValueType()))) 4158 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 4159 4160 // If we have a subtract with the same 2 non-constant operands as this setcc 4161 // -- but in reverse order -- then try to commute the operands of this setcc 4162 // to match. A matching pair of setcc (cmp) and sub may be combined into 1 4163 // instruction on some targets. 4164 if (!N0ConstOrSplat && !N1ConstOrSplat && 4165 (DCI.isBeforeLegalizeOps() || 4166 isCondCodeLegal(SwappedCC, N0.getSimpleValueType())) && 4167 DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N1, N0}) && 4168 !DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N0, N1})) 4169 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 4170 4171 if (SDValue V = foldSetCCWithRotate(VT, N0, N1, Cond, dl, DAG)) 4172 return V; 4173 4174 if (SDValue V = foldSetCCWithFunnelShift(VT, N0, N1, Cond, dl, DAG)) 4175 return V; 4176 4177 if (auto *N1C = isConstOrConstSplat(N1)) { 4178 const APInt &C1 = N1C->getAPIntValue(); 4179 4180 // Optimize some CTPOP cases. 4181 if (SDValue V = simplifySetCCWithCTPOP(*this, VT, N0, C1, Cond, dl, DAG)) 4182 return V; 4183 4184 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an 4185 // equality comparison, then we're just comparing whether X itself is 4186 // zero. 4187 if (N0.getOpcode() == ISD::SRL && (C1.isZero() || C1.isOne()) && 4188 N0.getOperand(0).getOpcode() == ISD::CTLZ && 4189 isPowerOf2_32(N0.getScalarValueSizeInBits())) { 4190 if (ConstantSDNode *ShAmt = isConstOrConstSplat(N0.getOperand(1))) { 4191 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4192 ShAmt->getAPIntValue() == Log2_32(N0.getScalarValueSizeInBits())) { 4193 if ((C1 == 0) == (Cond == ISD::SETEQ)) { 4194 // (srl (ctlz x), 5) == 0 -> X != 0 4195 // (srl (ctlz x), 5) != 1 -> X != 0 4196 Cond = ISD::SETNE; 4197 } else { 4198 // (srl (ctlz x), 5) != 0 -> X == 0 4199 // (srl (ctlz x), 5) == 1 -> X == 0 4200 Cond = ISD::SETEQ; 4201 } 4202 SDValue Zero = DAG.getConstant(0, dl, N0.getValueType()); 4203 return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0), Zero, 4204 Cond); 4205 } 4206 } 4207 } 4208 } 4209 4210 // FIXME: Support vectors. 4211 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 4212 const APInt &C1 = N1C->getAPIntValue(); 4213 4214 // (zext x) == C --> x == (trunc C) 4215 // (sext x) == C --> x == (trunc C) 4216 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4217 DCI.isBeforeLegalize() && N0->hasOneUse()) { 4218 unsigned MinBits = N0.getValueSizeInBits(); 4219 SDValue PreExt; 4220 bool Signed = false; 4221 if (N0->getOpcode() == ISD::ZERO_EXTEND) { 4222 // ZExt 4223 MinBits = N0->getOperand(0).getValueSizeInBits(); 4224 PreExt = N0->getOperand(0); 4225 } else if (N0->getOpcode() == ISD::AND) { 4226 // DAGCombine turns costly ZExts into ANDs 4227 if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1))) 4228 if ((C->getAPIntValue()+1).isPowerOf2()) { 4229 MinBits = C->getAPIntValue().countTrailingOnes(); 4230 PreExt = N0->getOperand(0); 4231 } 4232 } else if (N0->getOpcode() == ISD::SIGN_EXTEND) { 4233 // SExt 4234 MinBits = N0->getOperand(0).getValueSizeInBits(); 4235 PreExt = N0->getOperand(0); 4236 Signed = true; 4237 } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) { 4238 // ZEXTLOAD / SEXTLOAD 4239 if (LN0->getExtensionType() == ISD::ZEXTLOAD) { 4240 MinBits = LN0->getMemoryVT().getSizeInBits(); 4241 PreExt = N0; 4242 } else if (LN0->getExtensionType() == ISD::SEXTLOAD) { 4243 Signed = true; 4244 MinBits = LN0->getMemoryVT().getSizeInBits(); 4245 PreExt = N0; 4246 } 4247 } 4248 4249 // Figure out how many bits we need to preserve this constant. 4250 unsigned ReqdBits = Signed ? C1.getMinSignedBits() : C1.getActiveBits(); 4251 4252 // Make sure we're not losing bits from the constant. 4253 if (MinBits > 0 && 4254 MinBits < C1.getBitWidth() && 4255 MinBits >= ReqdBits) { 4256 EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits); 4257 if (isTypeDesirableForOp(ISD::SETCC, MinVT)) { 4258 // Will get folded away. 4259 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt); 4260 if (MinBits == 1 && C1 == 1) 4261 // Invert the condition. 4262 return DAG.getSetCC(dl, VT, Trunc, DAG.getConstant(0, dl, MVT::i1), 4263 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 4264 SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT); 4265 return DAG.getSetCC(dl, VT, Trunc, C, Cond); 4266 } 4267 4268 // If truncating the setcc operands is not desirable, we can still 4269 // simplify the expression in some cases: 4270 // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc) 4271 // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc)) 4272 // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc)) 4273 // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc) 4274 // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc)) 4275 // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc) 4276 SDValue TopSetCC = N0->getOperand(0); 4277 unsigned N0Opc = N0->getOpcode(); 4278 bool SExt = (N0Opc == ISD::SIGN_EXTEND); 4279 if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 && 4280 TopSetCC.getOpcode() == ISD::SETCC && 4281 (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) && 4282 (isConstFalseVal(N1) || 4283 isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) { 4284 4285 bool Inverse = (N1C->isZero() && Cond == ISD::SETEQ) || 4286 (!N1C->isZero() && Cond == ISD::SETNE); 4287 4288 if (!Inverse) 4289 return TopSetCC; 4290 4291 ISD::CondCode InvCond = ISD::getSetCCInverse( 4292 cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(), 4293 TopSetCC.getOperand(0).getValueType()); 4294 return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0), 4295 TopSetCC.getOperand(1), 4296 InvCond); 4297 } 4298 } 4299 } 4300 4301 // If the LHS is '(and load, const)', the RHS is 0, the test is for 4302 // equality or unsigned, and all 1 bits of the const are in the same 4303 // partial word, see if we can shorten the load. 4304 if (DCI.isBeforeLegalize() && 4305 !ISD::isSignedIntSetCC(Cond) && 4306 N0.getOpcode() == ISD::AND && C1 == 0 && 4307 N0.getNode()->hasOneUse() && 4308 isa<LoadSDNode>(N0.getOperand(0)) && 4309 N0.getOperand(0).getNode()->hasOneUse() && 4310 isa<ConstantSDNode>(N0.getOperand(1))) { 4311 LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0)); 4312 APInt bestMask; 4313 unsigned bestWidth = 0, bestOffset = 0; 4314 if (Lod->isSimple() && Lod->isUnindexed()) { 4315 unsigned origWidth = N0.getValueSizeInBits(); 4316 unsigned maskWidth = origWidth; 4317 // We can narrow (e.g.) 16-bit extending loads on 32-bit target to 4318 // 8 bits, but have to be careful... 4319 if (Lod->getExtensionType() != ISD::NON_EXTLOAD) 4320 origWidth = Lod->getMemoryVT().getSizeInBits(); 4321 const APInt &Mask = N0.getConstantOperandAPInt(1); 4322 for (unsigned width = origWidth / 2; width>=8; width /= 2) { 4323 APInt newMask = APInt::getLowBitsSet(maskWidth, width); 4324 for (unsigned offset=0; offset<origWidth/width; offset++) { 4325 if (Mask.isSubsetOf(newMask)) { 4326 if (Layout.isLittleEndian()) 4327 bestOffset = (uint64_t)offset * (width/8); 4328 else 4329 bestOffset = (origWidth/width - offset - 1) * (width/8); 4330 bestMask = Mask.lshr(offset * (width/8) * 8); 4331 bestWidth = width; 4332 break; 4333 } 4334 newMask <<= width; 4335 } 4336 } 4337 } 4338 if (bestWidth) { 4339 EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth); 4340 if (newVT.isRound() && 4341 shouldReduceLoadWidth(Lod, ISD::NON_EXTLOAD, newVT)) { 4342 SDValue Ptr = Lod->getBasePtr(); 4343 if (bestOffset != 0) 4344 Ptr = 4345 DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(bestOffset), dl); 4346 SDValue NewLoad = 4347 DAG.getLoad(newVT, dl, Lod->getChain(), Ptr, 4348 Lod->getPointerInfo().getWithOffset(bestOffset), 4349 Lod->getOriginalAlign()); 4350 return DAG.getSetCC(dl, VT, 4351 DAG.getNode(ISD::AND, dl, newVT, NewLoad, 4352 DAG.getConstant(bestMask.trunc(bestWidth), 4353 dl, newVT)), 4354 DAG.getConstant(0LL, dl, newVT), Cond); 4355 } 4356 } 4357 } 4358 4359 // If the LHS is a ZERO_EXTEND, perform the comparison on the input. 4360 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 4361 unsigned InSize = N0.getOperand(0).getValueSizeInBits(); 4362 4363 // If the comparison constant has bits in the upper part, the 4364 // zero-extended value could never match. 4365 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(), 4366 C1.getBitWidth() - InSize))) { 4367 switch (Cond) { 4368 case ISD::SETUGT: 4369 case ISD::SETUGE: 4370 case ISD::SETEQ: 4371 return DAG.getConstant(0, dl, VT); 4372 case ISD::SETULT: 4373 case ISD::SETULE: 4374 case ISD::SETNE: 4375 return DAG.getConstant(1, dl, VT); 4376 case ISD::SETGT: 4377 case ISD::SETGE: 4378 // True if the sign bit of C1 is set. 4379 return DAG.getConstant(C1.isNegative(), dl, VT); 4380 case ISD::SETLT: 4381 case ISD::SETLE: 4382 // True if the sign bit of C1 isn't set. 4383 return DAG.getConstant(C1.isNonNegative(), dl, VT); 4384 default: 4385 break; 4386 } 4387 } 4388 4389 // Otherwise, we can perform the comparison with the low bits. 4390 switch (Cond) { 4391 case ISD::SETEQ: 4392 case ISD::SETNE: 4393 case ISD::SETUGT: 4394 case ISD::SETUGE: 4395 case ISD::SETULT: 4396 case ISD::SETULE: { 4397 EVT newVT = N0.getOperand(0).getValueType(); 4398 if (DCI.isBeforeLegalizeOps() || 4399 (isOperationLegal(ISD::SETCC, newVT) && 4400 isCondCodeLegal(Cond, newVT.getSimpleVT()))) { 4401 EVT NewSetCCVT = getSetCCResultType(Layout, *DAG.getContext(), newVT); 4402 SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT); 4403 4404 SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0), 4405 NewConst, Cond); 4406 return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType()); 4407 } 4408 break; 4409 } 4410 default: 4411 break; // todo, be more careful with signed comparisons 4412 } 4413 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 4414 (Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4415 !isSExtCheaperThanZExt(cast<VTSDNode>(N0.getOperand(1))->getVT(), 4416 OpVT)) { 4417 EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); 4418 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); 4419 EVT ExtDstTy = N0.getValueType(); 4420 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits(); 4421 4422 // If the constant doesn't fit into the number of bits for the source of 4423 // the sign extension, it is impossible for both sides to be equal. 4424 if (C1.getMinSignedBits() > ExtSrcTyBits) 4425 return DAG.getBoolConstant(Cond == ISD::SETNE, dl, VT, OpVT); 4426 4427 assert(ExtDstTy == N0.getOperand(0).getValueType() && 4428 ExtDstTy != ExtSrcTy && "Unexpected types!"); 4429 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits); 4430 SDValue ZextOp = DAG.getNode(ISD::AND, dl, ExtDstTy, N0.getOperand(0), 4431 DAG.getConstant(Imm, dl, ExtDstTy)); 4432 if (!DCI.isCalledByLegalizer()) 4433 DCI.AddToWorklist(ZextOp.getNode()); 4434 // Otherwise, make this a use of a zext. 4435 return DAG.getSetCC(dl, VT, ZextOp, 4436 DAG.getConstant(C1 & Imm, dl, ExtDstTy), Cond); 4437 } else if ((N1C->isZero() || N1C->isOne()) && 4438 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 4439 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC 4440 if (N0.getOpcode() == ISD::SETCC && 4441 isTypeLegal(VT) && VT.bitsLE(N0.getValueType()) && 4442 (N0.getValueType() == MVT::i1 || 4443 getBooleanContents(N0.getOperand(0).getValueType()) == 4444 ZeroOrOneBooleanContent)) { 4445 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (!N1C->isOne()); 4446 if (TrueWhenTrue) 4447 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0); 4448 // Invert the condition. 4449 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 4450 CC = ISD::getSetCCInverse(CC, N0.getOperand(0).getValueType()); 4451 if (DCI.isBeforeLegalizeOps() || 4452 isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType())) 4453 return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC); 4454 } 4455 4456 if ((N0.getOpcode() == ISD::XOR || 4457 (N0.getOpcode() == ISD::AND && 4458 N0.getOperand(0).getOpcode() == ISD::XOR && 4459 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) && 4460 isOneConstant(N0.getOperand(1))) { 4461 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We 4462 // can only do this if the top bits are known zero. 4463 unsigned BitWidth = N0.getValueSizeInBits(); 4464 if (DAG.MaskedValueIsZero(N0, 4465 APInt::getHighBitsSet(BitWidth, 4466 BitWidth-1))) { 4467 // Okay, get the un-inverted input value. 4468 SDValue Val; 4469 if (N0.getOpcode() == ISD::XOR) { 4470 Val = N0.getOperand(0); 4471 } else { 4472 assert(N0.getOpcode() == ISD::AND && 4473 N0.getOperand(0).getOpcode() == ISD::XOR); 4474 // ((X^1)&1)^1 -> X & 1 4475 Val = DAG.getNode(ISD::AND, dl, N0.getValueType(), 4476 N0.getOperand(0).getOperand(0), 4477 N0.getOperand(1)); 4478 } 4479 4480 return DAG.getSetCC(dl, VT, Val, N1, 4481 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 4482 } 4483 } else if (N1C->isOne()) { 4484 SDValue Op0 = N0; 4485 if (Op0.getOpcode() == ISD::TRUNCATE) 4486 Op0 = Op0.getOperand(0); 4487 4488 if ((Op0.getOpcode() == ISD::XOR) && 4489 Op0.getOperand(0).getOpcode() == ISD::SETCC && 4490 Op0.getOperand(1).getOpcode() == ISD::SETCC) { 4491 SDValue XorLHS = Op0.getOperand(0); 4492 SDValue XorRHS = Op0.getOperand(1); 4493 // Ensure that the input setccs return an i1 type or 0/1 value. 4494 if (Op0.getValueType() == MVT::i1 || 4495 (getBooleanContents(XorLHS.getOperand(0).getValueType()) == 4496 ZeroOrOneBooleanContent && 4497 getBooleanContents(XorRHS.getOperand(0).getValueType()) == 4498 ZeroOrOneBooleanContent)) { 4499 // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc) 4500 Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ; 4501 return DAG.getSetCC(dl, VT, XorLHS, XorRHS, Cond); 4502 } 4503 } 4504 if (Op0.getOpcode() == ISD::AND && isOneConstant(Op0.getOperand(1))) { 4505 // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0. 4506 if (Op0.getValueType().bitsGT(VT)) 4507 Op0 = DAG.getNode(ISD::AND, dl, VT, 4508 DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)), 4509 DAG.getConstant(1, dl, VT)); 4510 else if (Op0.getValueType().bitsLT(VT)) 4511 Op0 = DAG.getNode(ISD::AND, dl, VT, 4512 DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)), 4513 DAG.getConstant(1, dl, VT)); 4514 4515 return DAG.getSetCC(dl, VT, Op0, 4516 DAG.getConstant(0, dl, Op0.getValueType()), 4517 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 4518 } 4519 if (Op0.getOpcode() == ISD::AssertZext && 4520 cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1) 4521 return DAG.getSetCC(dl, VT, Op0, 4522 DAG.getConstant(0, dl, Op0.getValueType()), 4523 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 4524 } 4525 } 4526 4527 // Given: 4528 // icmp eq/ne (urem %x, %y), 0 4529 // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem': 4530 // icmp eq/ne %x, 0 4531 if (N0.getOpcode() == ISD::UREM && N1C->isZero() && 4532 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 4533 KnownBits XKnown = DAG.computeKnownBits(N0.getOperand(0)); 4534 KnownBits YKnown = DAG.computeKnownBits(N0.getOperand(1)); 4535 if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2) 4536 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1, Cond); 4537 } 4538 4539 // Fold set_cc seteq (ashr X, BW-1), -1 -> set_cc setlt X, 0 4540 // and set_cc setne (ashr X, BW-1), -1 -> set_cc setge X, 0 4541 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4542 N0.getOpcode() == ISD::SRA && isa<ConstantSDNode>(N0.getOperand(1)) && 4543 N0.getConstantOperandAPInt(1) == OpVT.getScalarSizeInBits() - 1 && 4544 N1C && N1C->isAllOnes()) { 4545 return DAG.getSetCC(dl, VT, N0.getOperand(0), 4546 DAG.getConstant(0, dl, OpVT), 4547 Cond == ISD::SETEQ ? ISD::SETLT : ISD::SETGE); 4548 } 4549 4550 if (SDValue V = 4551 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1, Cond, DCI, dl)) 4552 return V; 4553 } 4554 4555 // These simplifications apply to splat vectors as well. 4556 // TODO: Handle more splat vector cases. 4557 if (auto *N1C = isConstOrConstSplat(N1)) { 4558 const APInt &C1 = N1C->getAPIntValue(); 4559 4560 APInt MinVal, MaxVal; 4561 unsigned OperandBitSize = N1C->getValueType(0).getScalarSizeInBits(); 4562 if (ISD::isSignedIntSetCC(Cond)) { 4563 MinVal = APInt::getSignedMinValue(OperandBitSize); 4564 MaxVal = APInt::getSignedMaxValue(OperandBitSize); 4565 } else { 4566 MinVal = APInt::getMinValue(OperandBitSize); 4567 MaxVal = APInt::getMaxValue(OperandBitSize); 4568 } 4569 4570 // Canonicalize GE/LE comparisons to use GT/LT comparisons. 4571 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) { 4572 // X >= MIN --> true 4573 if (C1 == MinVal) 4574 return DAG.getBoolConstant(true, dl, VT, OpVT); 4575 4576 if (!VT.isVector()) { // TODO: Support this for vectors. 4577 // X >= C0 --> X > (C0 - 1) 4578 APInt C = C1 - 1; 4579 ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT; 4580 if ((DCI.isBeforeLegalizeOps() || 4581 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 4582 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 4583 isLegalICmpImmediate(C.getSExtValue())))) { 4584 return DAG.getSetCC(dl, VT, N0, 4585 DAG.getConstant(C, dl, N1.getValueType()), 4586 NewCC); 4587 } 4588 } 4589 } 4590 4591 if (Cond == ISD::SETLE || Cond == ISD::SETULE) { 4592 // X <= MAX --> true 4593 if (C1 == MaxVal) 4594 return DAG.getBoolConstant(true, dl, VT, OpVT); 4595 4596 // X <= C0 --> X < (C0 + 1) 4597 if (!VT.isVector()) { // TODO: Support this for vectors. 4598 APInt C = C1 + 1; 4599 ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT; 4600 if ((DCI.isBeforeLegalizeOps() || 4601 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 4602 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 4603 isLegalICmpImmediate(C.getSExtValue())))) { 4604 return DAG.getSetCC(dl, VT, N0, 4605 DAG.getConstant(C, dl, N1.getValueType()), 4606 NewCC); 4607 } 4608 } 4609 } 4610 4611 if (Cond == ISD::SETLT || Cond == ISD::SETULT) { 4612 if (C1 == MinVal) 4613 return DAG.getBoolConstant(false, dl, VT, OpVT); // X < MIN --> false 4614 4615 // TODO: Support this for vectors after legalize ops. 4616 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 4617 // Canonicalize setlt X, Max --> setne X, Max 4618 if (C1 == MaxVal) 4619 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 4620 4621 // If we have setult X, 1, turn it into seteq X, 0 4622 if (C1 == MinVal+1) 4623 return DAG.getSetCC(dl, VT, N0, 4624 DAG.getConstant(MinVal, dl, N0.getValueType()), 4625 ISD::SETEQ); 4626 } 4627 } 4628 4629 if (Cond == ISD::SETGT || Cond == ISD::SETUGT) { 4630 if (C1 == MaxVal) 4631 return DAG.getBoolConstant(false, dl, VT, OpVT); // X > MAX --> false 4632 4633 // TODO: Support this for vectors after legalize ops. 4634 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 4635 // Canonicalize setgt X, Min --> setne X, Min 4636 if (C1 == MinVal) 4637 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 4638 4639 // If we have setugt X, Max-1, turn it into seteq X, Max 4640 if (C1 == MaxVal-1) 4641 return DAG.getSetCC(dl, VT, N0, 4642 DAG.getConstant(MaxVal, dl, N0.getValueType()), 4643 ISD::SETEQ); 4644 } 4645 } 4646 4647 if (Cond == ISD::SETEQ || Cond == ISD::SETNE) { 4648 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 4649 if (C1.isZero()) 4650 if (SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift( 4651 VT, N0, N1, Cond, DCI, dl)) 4652 return CC; 4653 4654 // For all/any comparisons, replace or(x,shl(y,bw/2)) with and/or(x,y). 4655 // For example, when high 32-bits of i64 X are known clear: 4656 // all bits clear: (X | (Y<<32)) == 0 --> (X | Y) == 0 4657 // all bits set: (X | (Y<<32)) == -1 --> (X & Y) == -1 4658 bool CmpZero = N1C->getAPIntValue().isZero(); 4659 bool CmpNegOne = N1C->getAPIntValue().isAllOnes(); 4660 if ((CmpZero || CmpNegOne) && N0.hasOneUse()) { 4661 // Match or(lo,shl(hi,bw/2)) pattern. 4662 auto IsConcat = [&](SDValue V, SDValue &Lo, SDValue &Hi) { 4663 unsigned EltBits = V.getScalarValueSizeInBits(); 4664 if (V.getOpcode() != ISD::OR || (EltBits % 2) != 0) 4665 return false; 4666 SDValue LHS = V.getOperand(0); 4667 SDValue RHS = V.getOperand(1); 4668 APInt HiBits = APInt::getHighBitsSet(EltBits, EltBits / 2); 4669 // Unshifted element must have zero upperbits. 4670 if (RHS.getOpcode() == ISD::SHL && 4671 isa<ConstantSDNode>(RHS.getOperand(1)) && 4672 RHS.getConstantOperandAPInt(1) == (EltBits / 2) && 4673 DAG.MaskedValueIsZero(LHS, HiBits)) { 4674 Lo = LHS; 4675 Hi = RHS.getOperand(0); 4676 return true; 4677 } 4678 if (LHS.getOpcode() == ISD::SHL && 4679 isa<ConstantSDNode>(LHS.getOperand(1)) && 4680 LHS.getConstantOperandAPInt(1) == (EltBits / 2) && 4681 DAG.MaskedValueIsZero(RHS, HiBits)) { 4682 Lo = RHS; 4683 Hi = LHS.getOperand(0); 4684 return true; 4685 } 4686 return false; 4687 }; 4688 4689 auto MergeConcat = [&](SDValue Lo, SDValue Hi) { 4690 unsigned EltBits = N0.getScalarValueSizeInBits(); 4691 unsigned HalfBits = EltBits / 2; 4692 APInt HiBits = APInt::getHighBitsSet(EltBits, HalfBits); 4693 SDValue LoBits = DAG.getConstant(~HiBits, dl, OpVT); 4694 SDValue HiMask = DAG.getNode(ISD::AND, dl, OpVT, Hi, LoBits); 4695 SDValue NewN0 = 4696 DAG.getNode(CmpZero ? ISD::OR : ISD::AND, dl, OpVT, Lo, HiMask); 4697 SDValue NewN1 = CmpZero ? DAG.getConstant(0, dl, OpVT) : LoBits; 4698 return DAG.getSetCC(dl, VT, NewN0, NewN1, Cond); 4699 }; 4700 4701 SDValue Lo, Hi; 4702 if (IsConcat(N0, Lo, Hi)) 4703 return MergeConcat(Lo, Hi); 4704 4705 if (N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR) { 4706 SDValue Lo0, Lo1, Hi0, Hi1; 4707 if (IsConcat(N0.getOperand(0), Lo0, Hi0) && 4708 IsConcat(N0.getOperand(1), Lo1, Hi1)) { 4709 return MergeConcat(DAG.getNode(N0.getOpcode(), dl, OpVT, Lo0, Lo1), 4710 DAG.getNode(N0.getOpcode(), dl, OpVT, Hi0, Hi1)); 4711 } 4712 } 4713 } 4714 } 4715 4716 // If we have "setcc X, C0", check to see if we can shrink the immediate 4717 // by changing cc. 4718 // TODO: Support this for vectors after legalize ops. 4719 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 4720 // SETUGT X, SINTMAX -> SETLT X, 0 4721 // SETUGE X, SINTMIN -> SETLT X, 0 4722 if ((Cond == ISD::SETUGT && C1.isMaxSignedValue()) || 4723 (Cond == ISD::SETUGE && C1.isMinSignedValue())) 4724 return DAG.getSetCC(dl, VT, N0, 4725 DAG.getConstant(0, dl, N1.getValueType()), 4726 ISD::SETLT); 4727 4728 // SETULT X, SINTMIN -> SETGT X, -1 4729 // SETULE X, SINTMAX -> SETGT X, -1 4730 if ((Cond == ISD::SETULT && C1.isMinSignedValue()) || 4731 (Cond == ISD::SETULE && C1.isMaxSignedValue())) 4732 return DAG.getSetCC(dl, VT, N0, 4733 DAG.getAllOnesConstant(dl, N1.getValueType()), 4734 ISD::SETGT); 4735 } 4736 } 4737 4738 // Back to non-vector simplifications. 4739 // TODO: Can we do these for vector splats? 4740 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 4741 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4742 const APInt &C1 = N1C->getAPIntValue(); 4743 EVT ShValTy = N0.getValueType(); 4744 4745 // Fold bit comparisons when we can. This will result in an 4746 // incorrect value when boolean false is negative one, unless 4747 // the bitsize is 1 in which case the false value is the same 4748 // in practice regardless of the representation. 4749 if ((VT.getSizeInBits() == 1 || 4750 getBooleanContents(N0.getValueType()) == ZeroOrOneBooleanContent) && 4751 (Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4752 (VT == ShValTy || (isTypeLegal(VT) && VT.bitsLE(ShValTy))) && 4753 N0.getOpcode() == ISD::AND) { 4754 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4755 EVT ShiftTy = 4756 getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 4757 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3 4758 // Perform the xform if the AND RHS is a single bit. 4759 unsigned ShCt = AndRHS->getAPIntValue().logBase2(); 4760 if (AndRHS->getAPIntValue().isPowerOf2() && 4761 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 4762 return DAG.getNode(ISD::TRUNCATE, dl, VT, 4763 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4764 DAG.getConstant(ShCt, dl, ShiftTy))); 4765 } 4766 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) { 4767 // (X & 8) == 8 --> (X & 8) >> 3 4768 // Perform the xform if C1 is a single bit. 4769 unsigned ShCt = C1.logBase2(); 4770 if (C1.isPowerOf2() && 4771 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 4772 return DAG.getNode(ISD::TRUNCATE, dl, VT, 4773 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4774 DAG.getConstant(ShCt, dl, ShiftTy))); 4775 } 4776 } 4777 } 4778 } 4779 4780 if (C1.getMinSignedBits() <= 64 && 4781 !isLegalICmpImmediate(C1.getSExtValue())) { 4782 EVT ShiftTy = getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 4783 // (X & -256) == 256 -> (X >> 8) == 1 4784 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4785 N0.getOpcode() == ISD::AND && N0.hasOneUse()) { 4786 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4787 const APInt &AndRHSC = AndRHS->getAPIntValue(); 4788 if (AndRHSC.isNegatedPowerOf2() && (AndRHSC & C1) == C1) { 4789 unsigned ShiftBits = AndRHSC.countTrailingZeros(); 4790 if (!TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 4791 SDValue Shift = 4792 DAG.getNode(ISD::SRL, dl, ShValTy, N0.getOperand(0), 4793 DAG.getConstant(ShiftBits, dl, ShiftTy)); 4794 SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, ShValTy); 4795 return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond); 4796 } 4797 } 4798 } 4799 } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE || 4800 Cond == ISD::SETULE || Cond == ISD::SETUGT) { 4801 bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT); 4802 // X < 0x100000000 -> (X >> 32) < 1 4803 // X >= 0x100000000 -> (X >> 32) >= 1 4804 // X <= 0x0ffffffff -> (X >> 32) < 1 4805 // X > 0x0ffffffff -> (X >> 32) >= 1 4806 unsigned ShiftBits; 4807 APInt NewC = C1; 4808 ISD::CondCode NewCond = Cond; 4809 if (AdjOne) { 4810 ShiftBits = C1.countTrailingOnes(); 4811 NewC = NewC + 1; 4812 NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 4813 } else { 4814 ShiftBits = C1.countTrailingZeros(); 4815 } 4816 NewC.lshrInPlace(ShiftBits); 4817 if (ShiftBits && NewC.getMinSignedBits() <= 64 && 4818 isLegalICmpImmediate(NewC.getSExtValue()) && 4819 !TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 4820 SDValue Shift = DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4821 DAG.getConstant(ShiftBits, dl, ShiftTy)); 4822 SDValue CmpRHS = DAG.getConstant(NewC, dl, ShValTy); 4823 return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond); 4824 } 4825 } 4826 } 4827 } 4828 4829 if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) { 4830 auto *CFP = cast<ConstantFPSDNode>(N1); 4831 assert(!CFP->getValueAPF().isNaN() && "Unexpected NaN value"); 4832 4833 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the 4834 // constant if knowing that the operand is non-nan is enough. We prefer to 4835 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to 4836 // materialize 0.0. 4837 if (Cond == ISD::SETO || Cond == ISD::SETUO) 4838 return DAG.getSetCC(dl, VT, N0, N0, Cond); 4839 4840 // setcc (fneg x), C -> setcc swap(pred) x, -C 4841 if (N0.getOpcode() == ISD::FNEG) { 4842 ISD::CondCode SwapCond = ISD::getSetCCSwappedOperands(Cond); 4843 if (DCI.isBeforeLegalizeOps() || 4844 isCondCodeLegal(SwapCond, N0.getSimpleValueType())) { 4845 SDValue NegN1 = DAG.getNode(ISD::FNEG, dl, N0.getValueType(), N1); 4846 return DAG.getSetCC(dl, VT, N0.getOperand(0), NegN1, SwapCond); 4847 } 4848 } 4849 4850 // If the condition is not legal, see if we can find an equivalent one 4851 // which is legal. 4852 if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) { 4853 // If the comparison was an awkward floating-point == or != and one of 4854 // the comparison operands is infinity or negative infinity, convert the 4855 // condition to a less-awkward <= or >=. 4856 if (CFP->getValueAPF().isInfinity()) { 4857 bool IsNegInf = CFP->getValueAPF().isNegative(); 4858 ISD::CondCode NewCond = ISD::SETCC_INVALID; 4859 switch (Cond) { 4860 case ISD::SETOEQ: NewCond = IsNegInf ? ISD::SETOLE : ISD::SETOGE; break; 4861 case ISD::SETUEQ: NewCond = IsNegInf ? ISD::SETULE : ISD::SETUGE; break; 4862 case ISD::SETUNE: NewCond = IsNegInf ? ISD::SETUGT : ISD::SETULT; break; 4863 case ISD::SETONE: NewCond = IsNegInf ? ISD::SETOGT : ISD::SETOLT; break; 4864 default: break; 4865 } 4866 if (NewCond != ISD::SETCC_INVALID && 4867 isCondCodeLegal(NewCond, N0.getSimpleValueType())) 4868 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 4869 } 4870 } 4871 } 4872 4873 if (N0 == N1) { 4874 // The sext(setcc()) => setcc() optimization relies on the appropriate 4875 // constant being emitted. 4876 assert(!N0.getValueType().isInteger() && 4877 "Integer types should be handled by FoldSetCC"); 4878 4879 bool EqTrue = ISD::isTrueWhenEqual(Cond); 4880 unsigned UOF = ISD::getUnorderedFlavor(Cond); 4881 if (UOF == 2) // FP operators that are undefined on NaNs. 4882 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 4883 if (UOF == unsigned(EqTrue)) 4884 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 4885 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO 4886 // if it is not already. 4887 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO; 4888 if (NewCond != Cond && 4889 (DCI.isBeforeLegalizeOps() || 4890 isCondCodeLegal(NewCond, N0.getSimpleValueType()))) 4891 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 4892 } 4893 4894 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4895 N0.getValueType().isInteger()) { 4896 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || 4897 N0.getOpcode() == ISD::XOR) { 4898 // Simplify (X+Y) == (X+Z) --> Y == Z 4899 if (N0.getOpcode() == N1.getOpcode()) { 4900 if (N0.getOperand(0) == N1.getOperand(0)) 4901 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond); 4902 if (N0.getOperand(1) == N1.getOperand(1)) 4903 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond); 4904 if (isCommutativeBinOp(N0.getOpcode())) { 4905 // If X op Y == Y op X, try other combinations. 4906 if (N0.getOperand(0) == N1.getOperand(1)) 4907 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0), 4908 Cond); 4909 if (N0.getOperand(1) == N1.getOperand(0)) 4910 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1), 4911 Cond); 4912 } 4913 } 4914 4915 // If RHS is a legal immediate value for a compare instruction, we need 4916 // to be careful about increasing register pressure needlessly. 4917 bool LegalRHSImm = false; 4918 4919 if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) { 4920 if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4921 // Turn (X+C1) == C2 --> X == C2-C1 4922 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) 4923 return DAG.getSetCC( 4924 dl, VT, N0.getOperand(0), 4925 DAG.getConstant(RHSC->getAPIntValue() - LHSR->getAPIntValue(), 4926 dl, N0.getValueType()), 4927 Cond); 4928 4929 // Turn (X^C1) == C2 --> X == C1^C2 4930 if (N0.getOpcode() == ISD::XOR && N0.getNode()->hasOneUse()) 4931 return DAG.getSetCC( 4932 dl, VT, N0.getOperand(0), 4933 DAG.getConstant(LHSR->getAPIntValue() ^ RHSC->getAPIntValue(), 4934 dl, N0.getValueType()), 4935 Cond); 4936 } 4937 4938 // Turn (C1-X) == C2 --> X == C1-C2 4939 if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) 4940 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) 4941 return DAG.getSetCC( 4942 dl, VT, N0.getOperand(1), 4943 DAG.getConstant(SUBC->getAPIntValue() - RHSC->getAPIntValue(), 4944 dl, N0.getValueType()), 4945 Cond); 4946 4947 // Could RHSC fold directly into a compare? 4948 if (RHSC->getValueType(0).getSizeInBits() <= 64) 4949 LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue()); 4950 } 4951 4952 // (X+Y) == X --> Y == 0 and similar folds. 4953 // Don't do this if X is an immediate that can fold into a cmp 4954 // instruction and X+Y has other uses. It could be an induction variable 4955 // chain, and the transform would increase register pressure. 4956 if (!LegalRHSImm || N0.hasOneUse()) 4957 if (SDValue V = foldSetCCWithBinOp(VT, N0, N1, Cond, dl, DCI)) 4958 return V; 4959 } 4960 4961 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB || 4962 N1.getOpcode() == ISD::XOR) 4963 if (SDValue V = foldSetCCWithBinOp(VT, N1, N0, Cond, dl, DCI)) 4964 return V; 4965 4966 if (SDValue V = foldSetCCWithAnd(VT, N0, N1, Cond, dl, DCI)) 4967 return V; 4968 } 4969 4970 // Fold remainder of division by a constant. 4971 if ((N0.getOpcode() == ISD::UREM || N0.getOpcode() == ISD::SREM) && 4972 N0.hasOneUse() && (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 4973 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 4974 4975 // When division is cheap or optimizing for minimum size, 4976 // fall through to DIVREM creation by skipping this fold. 4977 if (!isIntDivCheap(VT, Attr) && !Attr.hasFnAttr(Attribute::MinSize)) { 4978 if (N0.getOpcode() == ISD::UREM) { 4979 if (SDValue Folded = buildUREMEqFold(VT, N0, N1, Cond, DCI, dl)) 4980 return Folded; 4981 } else if (N0.getOpcode() == ISD::SREM) { 4982 if (SDValue Folded = buildSREMEqFold(VT, N0, N1, Cond, DCI, dl)) 4983 return Folded; 4984 } 4985 } 4986 } 4987 4988 // Fold away ALL boolean setcc's. 4989 if (N0.getValueType().getScalarType() == MVT::i1 && foldBooleans) { 4990 SDValue Temp; 4991 switch (Cond) { 4992 default: llvm_unreachable("Unknown integer setcc!"); 4993 case ISD::SETEQ: // X == Y -> ~(X^Y) 4994 Temp = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 4995 N0 = DAG.getNOT(dl, Temp, OpVT); 4996 if (!DCI.isCalledByLegalizer()) 4997 DCI.AddToWorklist(Temp.getNode()); 4998 break; 4999 case ISD::SETNE: // X != Y --> (X^Y) 5000 N0 = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 5001 break; 5002 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y 5003 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y 5004 Temp = DAG.getNOT(dl, N0, OpVT); 5005 N0 = DAG.getNode(ISD::AND, dl, OpVT, N1, Temp); 5006 if (!DCI.isCalledByLegalizer()) 5007 DCI.AddToWorklist(Temp.getNode()); 5008 break; 5009 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X 5010 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X 5011 Temp = DAG.getNOT(dl, N1, OpVT); 5012 N0 = DAG.getNode(ISD::AND, dl, OpVT, N0, Temp); 5013 if (!DCI.isCalledByLegalizer()) 5014 DCI.AddToWorklist(Temp.getNode()); 5015 break; 5016 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y 5017 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y 5018 Temp = DAG.getNOT(dl, N0, OpVT); 5019 N0 = DAG.getNode(ISD::OR, dl, OpVT, N1, Temp); 5020 if (!DCI.isCalledByLegalizer()) 5021 DCI.AddToWorklist(Temp.getNode()); 5022 break; 5023 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X 5024 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X 5025 Temp = DAG.getNOT(dl, N1, OpVT); 5026 N0 = DAG.getNode(ISD::OR, dl, OpVT, N0, Temp); 5027 break; 5028 } 5029 if (VT.getScalarType() != MVT::i1) { 5030 if (!DCI.isCalledByLegalizer()) 5031 DCI.AddToWorklist(N0.getNode()); 5032 // FIXME: If running after legalize, we probably can't do this. 5033 ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(OpVT)); 5034 N0 = DAG.getNode(ExtendCode, dl, VT, N0); 5035 } 5036 return N0; 5037 } 5038 5039 // Could not fold it. 5040 return SDValue(); 5041 } 5042 5043 /// Returns true (and the GlobalValue and the offset) if the node is a 5044 /// GlobalAddress + offset. 5045 bool TargetLowering::isGAPlusOffset(SDNode *WN, const GlobalValue *&GA, 5046 int64_t &Offset) const { 5047 5048 SDNode *N = unwrapAddress(SDValue(WN, 0)).getNode(); 5049 5050 if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) { 5051 GA = GASD->getGlobal(); 5052 Offset += GASD->getOffset(); 5053 return true; 5054 } 5055 5056 if (N->getOpcode() == ISD::ADD) { 5057 SDValue N1 = N->getOperand(0); 5058 SDValue N2 = N->getOperand(1); 5059 if (isGAPlusOffset(N1.getNode(), GA, Offset)) { 5060 if (auto *V = dyn_cast<ConstantSDNode>(N2)) { 5061 Offset += V->getSExtValue(); 5062 return true; 5063 } 5064 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) { 5065 if (auto *V = dyn_cast<ConstantSDNode>(N1)) { 5066 Offset += V->getSExtValue(); 5067 return true; 5068 } 5069 } 5070 } 5071 5072 return false; 5073 } 5074 5075 SDValue TargetLowering::PerformDAGCombine(SDNode *N, 5076 DAGCombinerInfo &DCI) const { 5077 // Default implementation: no optimization. 5078 return SDValue(); 5079 } 5080 5081 //===----------------------------------------------------------------------===// 5082 // Inline Assembler Implementation Methods 5083 //===----------------------------------------------------------------------===// 5084 5085 TargetLowering::ConstraintType 5086 TargetLowering::getConstraintType(StringRef Constraint) const { 5087 unsigned S = Constraint.size(); 5088 5089 if (S == 1) { 5090 switch (Constraint[0]) { 5091 default: break; 5092 case 'r': 5093 return C_RegisterClass; 5094 case 'm': // memory 5095 case 'o': // offsetable 5096 case 'V': // not offsetable 5097 return C_Memory; 5098 case 'p': // Address. 5099 return C_Address; 5100 case 'n': // Simple Integer 5101 case 'E': // Floating Point Constant 5102 case 'F': // Floating Point Constant 5103 return C_Immediate; 5104 case 'i': // Simple Integer or Relocatable Constant 5105 case 's': // Relocatable Constant 5106 case 'X': // Allow ANY value. 5107 case 'I': // Target registers. 5108 case 'J': 5109 case 'K': 5110 case 'L': 5111 case 'M': 5112 case 'N': 5113 case 'O': 5114 case 'P': 5115 case '<': 5116 case '>': 5117 return C_Other; 5118 } 5119 } 5120 5121 if (S > 1 && Constraint[0] == '{' && Constraint[S - 1] == '}') { 5122 if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}" 5123 return C_Memory; 5124 return C_Register; 5125 } 5126 return C_Unknown; 5127 } 5128 5129 /// Try to replace an X constraint, which matches anything, with another that 5130 /// has more specific requirements based on the type of the corresponding 5131 /// operand. 5132 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const { 5133 if (ConstraintVT.isInteger()) 5134 return "r"; 5135 if (ConstraintVT.isFloatingPoint()) 5136 return "f"; // works for many targets 5137 return nullptr; 5138 } 5139 5140 SDValue TargetLowering::LowerAsmOutputForConstraint( 5141 SDValue &Chain, SDValue &Flag, const SDLoc &DL, 5142 const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const { 5143 return SDValue(); 5144 } 5145 5146 /// Lower the specified operand into the Ops vector. 5147 /// If it is invalid, don't add anything to Ops. 5148 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 5149 std::string &Constraint, 5150 std::vector<SDValue> &Ops, 5151 SelectionDAG &DAG) const { 5152 5153 if (Constraint.length() > 1) return; 5154 5155 char ConstraintLetter = Constraint[0]; 5156 switch (ConstraintLetter) { 5157 default: break; 5158 case 'X': // Allows any operand 5159 case 'i': // Simple Integer or Relocatable Constant 5160 case 'n': // Simple Integer 5161 case 's': { // Relocatable Constant 5162 5163 ConstantSDNode *C; 5164 uint64_t Offset = 0; 5165 5166 // Match (GA) or (C) or (GA+C) or (GA-C) or ((GA+C)+C) or (((GA+C)+C)+C), 5167 // etc., since getelementpointer is variadic. We can't use 5168 // SelectionDAG::FoldSymbolOffset because it expects the GA to be accessible 5169 // while in this case the GA may be furthest from the root node which is 5170 // likely an ISD::ADD. 5171 while (true) { 5172 if ((C = dyn_cast<ConstantSDNode>(Op)) && ConstraintLetter != 's') { 5173 // gcc prints these as sign extended. Sign extend value to 64 bits 5174 // now; without this it would get ZExt'd later in 5175 // ScheduleDAGSDNodes::EmitNode, which is very generic. 5176 bool IsBool = C->getConstantIntValue()->getBitWidth() == 1; 5177 BooleanContent BCont = getBooleanContents(MVT::i64); 5178 ISD::NodeType ExtOpc = 5179 IsBool ? getExtendForContent(BCont) : ISD::SIGN_EXTEND; 5180 int64_t ExtVal = 5181 ExtOpc == ISD::ZERO_EXTEND ? C->getZExtValue() : C->getSExtValue(); 5182 Ops.push_back( 5183 DAG.getTargetConstant(Offset + ExtVal, SDLoc(C), MVT::i64)); 5184 return; 5185 } 5186 if (ConstraintLetter != 'n') { 5187 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) { 5188 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), 5189 GA->getValueType(0), 5190 Offset + GA->getOffset())); 5191 return; 5192 } 5193 if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) { 5194 Ops.push_back(DAG.getTargetBlockAddress( 5195 BA->getBlockAddress(), BA->getValueType(0), 5196 Offset + BA->getOffset(), BA->getTargetFlags())); 5197 return; 5198 } 5199 if (isa<BasicBlockSDNode>(Op)) { 5200 Ops.push_back(Op); 5201 return; 5202 } 5203 } 5204 const unsigned OpCode = Op.getOpcode(); 5205 if (OpCode == ISD::ADD || OpCode == ISD::SUB) { 5206 if ((C = dyn_cast<ConstantSDNode>(Op.getOperand(0)))) 5207 Op = Op.getOperand(1); 5208 // Subtraction is not commutative. 5209 else if (OpCode == ISD::ADD && 5210 (C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))) 5211 Op = Op.getOperand(0); 5212 else 5213 return; 5214 Offset += (OpCode == ISD::ADD ? 1 : -1) * C->getSExtValue(); 5215 continue; 5216 } 5217 return; 5218 } 5219 break; 5220 } 5221 } 5222 } 5223 5224 std::pair<unsigned, const TargetRegisterClass *> 5225 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI, 5226 StringRef Constraint, 5227 MVT VT) const { 5228 if (Constraint.empty() || Constraint[0] != '{') 5229 return std::make_pair(0u, static_cast<TargetRegisterClass *>(nullptr)); 5230 assert(*(Constraint.end() - 1) == '}' && "Not a brace enclosed constraint?"); 5231 5232 // Remove the braces from around the name. 5233 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2); 5234 5235 std::pair<unsigned, const TargetRegisterClass *> R = 5236 std::make_pair(0u, static_cast<const TargetRegisterClass *>(nullptr)); 5237 5238 // Figure out which register class contains this reg. 5239 for (const TargetRegisterClass *RC : RI->regclasses()) { 5240 // If none of the value types for this register class are valid, we 5241 // can't use it. For example, 64-bit reg classes on 32-bit targets. 5242 if (!isLegalRC(*RI, *RC)) 5243 continue; 5244 5245 for (const MCPhysReg &PR : *RC) { 5246 if (RegName.equals_insensitive(RI->getRegAsmName(PR))) { 5247 std::pair<unsigned, const TargetRegisterClass *> S = 5248 std::make_pair(PR, RC); 5249 5250 // If this register class has the requested value type, return it, 5251 // otherwise keep searching and return the first class found 5252 // if no other is found which explicitly has the requested type. 5253 if (RI->isTypeLegalForClass(*RC, VT)) 5254 return S; 5255 if (!R.second) 5256 R = S; 5257 } 5258 } 5259 } 5260 5261 return R; 5262 } 5263 5264 //===----------------------------------------------------------------------===// 5265 // Constraint Selection. 5266 5267 /// Return true of this is an input operand that is a matching constraint like 5268 /// "4". 5269 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const { 5270 assert(!ConstraintCode.empty() && "No known constraint!"); 5271 return isdigit(static_cast<unsigned char>(ConstraintCode[0])); 5272 } 5273 5274 /// If this is an input matching constraint, this method returns the output 5275 /// operand it matches. 5276 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const { 5277 assert(!ConstraintCode.empty() && "No known constraint!"); 5278 return atoi(ConstraintCode.c_str()); 5279 } 5280 5281 /// Split up the constraint string from the inline assembly value into the 5282 /// specific constraints and their prefixes, and also tie in the associated 5283 /// operand values. 5284 /// If this returns an empty vector, and if the constraint string itself 5285 /// isn't empty, there was an error parsing. 5286 TargetLowering::AsmOperandInfoVector 5287 TargetLowering::ParseConstraints(const DataLayout &DL, 5288 const TargetRegisterInfo *TRI, 5289 const CallBase &Call) const { 5290 /// Information about all of the constraints. 5291 AsmOperandInfoVector ConstraintOperands; 5292 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand()); 5293 unsigned maCount = 0; // Largest number of multiple alternative constraints. 5294 5295 // Do a prepass over the constraints, canonicalizing them, and building up the 5296 // ConstraintOperands list. 5297 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 5298 unsigned ResNo = 0; // ResNo - The result number of the next output. 5299 unsigned LabelNo = 0; // LabelNo - CallBr indirect dest number. 5300 5301 for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) { 5302 ConstraintOperands.emplace_back(std::move(CI)); 5303 AsmOperandInfo &OpInfo = ConstraintOperands.back(); 5304 5305 // Update multiple alternative constraint count. 5306 if (OpInfo.multipleAlternatives.size() > maCount) 5307 maCount = OpInfo.multipleAlternatives.size(); 5308 5309 OpInfo.ConstraintVT = MVT::Other; 5310 5311 // Compute the value type for each operand. 5312 switch (OpInfo.Type) { 5313 case InlineAsm::isOutput: 5314 // Indirect outputs just consume an argument. 5315 if (OpInfo.isIndirect) { 5316 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo); 5317 break; 5318 } 5319 5320 // The return value of the call is this value. As such, there is no 5321 // corresponding argument. 5322 assert(!Call.getType()->isVoidTy() && "Bad inline asm!"); 5323 if (StructType *STy = dyn_cast<StructType>(Call.getType())) { 5324 OpInfo.ConstraintVT = 5325 getSimpleValueType(DL, STy->getElementType(ResNo)); 5326 } else { 5327 assert(ResNo == 0 && "Asm only has one result!"); 5328 OpInfo.ConstraintVT = 5329 getAsmOperandValueType(DL, Call.getType()).getSimpleVT(); 5330 } 5331 ++ResNo; 5332 break; 5333 case InlineAsm::isInput: 5334 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo); 5335 break; 5336 case InlineAsm::isLabel: 5337 OpInfo.CallOperandVal = 5338 cast<CallBrInst>(&Call)->getBlockAddressForIndirectDest(LabelNo); 5339 OpInfo.ConstraintVT = 5340 getAsmOperandValueType(DL, OpInfo.CallOperandVal->getType()) 5341 .getSimpleVT(); 5342 ++LabelNo; 5343 continue; 5344 case InlineAsm::isClobber: 5345 // Nothing to do. 5346 break; 5347 } 5348 5349 if (OpInfo.CallOperandVal) { 5350 llvm::Type *OpTy = OpInfo.CallOperandVal->getType(); 5351 if (OpInfo.isIndirect) { 5352 OpTy = Call.getParamElementType(ArgNo); 5353 assert(OpTy && "Indirect operand must have elementtype attribute"); 5354 } 5355 5356 // Look for vector wrapped in a struct. e.g. { <16 x i8> }. 5357 if (StructType *STy = dyn_cast<StructType>(OpTy)) 5358 if (STy->getNumElements() == 1) 5359 OpTy = STy->getElementType(0); 5360 5361 // If OpTy is not a single value, it may be a struct/union that we 5362 // can tile with integers. 5363 if (!OpTy->isSingleValueType() && OpTy->isSized()) { 5364 unsigned BitSize = DL.getTypeSizeInBits(OpTy); 5365 switch (BitSize) { 5366 default: break; 5367 case 1: 5368 case 8: 5369 case 16: 5370 case 32: 5371 case 64: 5372 case 128: 5373 OpTy = IntegerType::get(OpTy->getContext(), BitSize); 5374 break; 5375 } 5376 } 5377 5378 EVT VT = getAsmOperandValueType(DL, OpTy, true); 5379 OpInfo.ConstraintVT = VT.isSimple() ? VT.getSimpleVT() : MVT::Other; 5380 ArgNo++; 5381 } 5382 } 5383 5384 // If we have multiple alternative constraints, select the best alternative. 5385 if (!ConstraintOperands.empty()) { 5386 if (maCount) { 5387 unsigned bestMAIndex = 0; 5388 int bestWeight = -1; 5389 // weight: -1 = invalid match, and 0 = so-so match to 5 = good match. 5390 int weight = -1; 5391 unsigned maIndex; 5392 // Compute the sums of the weights for each alternative, keeping track 5393 // of the best (highest weight) one so far. 5394 for (maIndex = 0; maIndex < maCount; ++maIndex) { 5395 int weightSum = 0; 5396 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 5397 cIndex != eIndex; ++cIndex) { 5398 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 5399 if (OpInfo.Type == InlineAsm::isClobber) 5400 continue; 5401 5402 // If this is an output operand with a matching input operand, 5403 // look up the matching input. If their types mismatch, e.g. one 5404 // is an integer, the other is floating point, or their sizes are 5405 // different, flag it as an maCantMatch. 5406 if (OpInfo.hasMatchingInput()) { 5407 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 5408 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 5409 if ((OpInfo.ConstraintVT.isInteger() != 5410 Input.ConstraintVT.isInteger()) || 5411 (OpInfo.ConstraintVT.getSizeInBits() != 5412 Input.ConstraintVT.getSizeInBits())) { 5413 weightSum = -1; // Can't match. 5414 break; 5415 } 5416 } 5417 } 5418 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex); 5419 if (weight == -1) { 5420 weightSum = -1; 5421 break; 5422 } 5423 weightSum += weight; 5424 } 5425 // Update best. 5426 if (weightSum > bestWeight) { 5427 bestWeight = weightSum; 5428 bestMAIndex = maIndex; 5429 } 5430 } 5431 5432 // Now select chosen alternative in each constraint. 5433 for (AsmOperandInfo &cInfo : ConstraintOperands) 5434 if (cInfo.Type != InlineAsm::isClobber) 5435 cInfo.selectAlternative(bestMAIndex); 5436 } 5437 } 5438 5439 // Check and hook up tied operands, choose constraint code to use. 5440 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 5441 cIndex != eIndex; ++cIndex) { 5442 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 5443 5444 // If this is an output operand with a matching input operand, look up the 5445 // matching input. If their types mismatch, e.g. one is an integer, the 5446 // other is floating point, or their sizes are different, flag it as an 5447 // error. 5448 if (OpInfo.hasMatchingInput()) { 5449 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 5450 5451 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 5452 std::pair<unsigned, const TargetRegisterClass *> MatchRC = 5453 getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode, 5454 OpInfo.ConstraintVT); 5455 std::pair<unsigned, const TargetRegisterClass *> InputRC = 5456 getRegForInlineAsmConstraint(TRI, Input.ConstraintCode, 5457 Input.ConstraintVT); 5458 if ((OpInfo.ConstraintVT.isInteger() != 5459 Input.ConstraintVT.isInteger()) || 5460 (MatchRC.second != InputRC.second)) { 5461 report_fatal_error("Unsupported asm: input constraint" 5462 " with a matching output constraint of" 5463 " incompatible type!"); 5464 } 5465 } 5466 } 5467 } 5468 5469 return ConstraintOperands; 5470 } 5471 5472 /// Return an integer indicating how general CT is. 5473 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { 5474 switch (CT) { 5475 case TargetLowering::C_Immediate: 5476 case TargetLowering::C_Other: 5477 case TargetLowering::C_Unknown: 5478 return 0; 5479 case TargetLowering::C_Register: 5480 return 1; 5481 case TargetLowering::C_RegisterClass: 5482 return 2; 5483 case TargetLowering::C_Memory: 5484 case TargetLowering::C_Address: 5485 return 3; 5486 } 5487 llvm_unreachable("Invalid constraint type"); 5488 } 5489 5490 /// Examine constraint type and operand type and determine a weight value. 5491 /// This object must already have been set up with the operand type 5492 /// and the current alternative constraint selected. 5493 TargetLowering::ConstraintWeight 5494 TargetLowering::getMultipleConstraintMatchWeight( 5495 AsmOperandInfo &info, int maIndex) const { 5496 InlineAsm::ConstraintCodeVector *rCodes; 5497 if (maIndex >= (int)info.multipleAlternatives.size()) 5498 rCodes = &info.Codes; 5499 else 5500 rCodes = &info.multipleAlternatives[maIndex].Codes; 5501 ConstraintWeight BestWeight = CW_Invalid; 5502 5503 // Loop over the options, keeping track of the most general one. 5504 for (const std::string &rCode : *rCodes) { 5505 ConstraintWeight weight = 5506 getSingleConstraintMatchWeight(info, rCode.c_str()); 5507 if (weight > BestWeight) 5508 BestWeight = weight; 5509 } 5510 5511 return BestWeight; 5512 } 5513 5514 /// Examine constraint type and operand type and determine a weight value. 5515 /// This object must already have been set up with the operand type 5516 /// and the current alternative constraint selected. 5517 TargetLowering::ConstraintWeight 5518 TargetLowering::getSingleConstraintMatchWeight( 5519 AsmOperandInfo &info, const char *constraint) const { 5520 ConstraintWeight weight = CW_Invalid; 5521 Value *CallOperandVal = info.CallOperandVal; 5522 // If we don't have a value, we can't do a match, 5523 // but allow it at the lowest weight. 5524 if (!CallOperandVal) 5525 return CW_Default; 5526 // Look at the constraint type. 5527 switch (*constraint) { 5528 case 'i': // immediate integer. 5529 case 'n': // immediate integer with a known value. 5530 if (isa<ConstantInt>(CallOperandVal)) 5531 weight = CW_Constant; 5532 break; 5533 case 's': // non-explicit intregal immediate. 5534 if (isa<GlobalValue>(CallOperandVal)) 5535 weight = CW_Constant; 5536 break; 5537 case 'E': // immediate float if host format. 5538 case 'F': // immediate float. 5539 if (isa<ConstantFP>(CallOperandVal)) 5540 weight = CW_Constant; 5541 break; 5542 case '<': // memory operand with autodecrement. 5543 case '>': // memory operand with autoincrement. 5544 case 'm': // memory operand. 5545 case 'o': // offsettable memory operand 5546 case 'V': // non-offsettable memory operand 5547 weight = CW_Memory; 5548 break; 5549 case 'r': // general register. 5550 case 'g': // general register, memory operand or immediate integer. 5551 // note: Clang converts "g" to "imr". 5552 if (CallOperandVal->getType()->isIntegerTy()) 5553 weight = CW_Register; 5554 break; 5555 case 'X': // any operand. 5556 default: 5557 weight = CW_Default; 5558 break; 5559 } 5560 return weight; 5561 } 5562 5563 /// If there are multiple different constraints that we could pick for this 5564 /// operand (e.g. "imr") try to pick the 'best' one. 5565 /// This is somewhat tricky: constraints fall into four classes: 5566 /// Other -> immediates and magic values 5567 /// Register -> one specific register 5568 /// RegisterClass -> a group of regs 5569 /// Memory -> memory 5570 /// Ideally, we would pick the most specific constraint possible: if we have 5571 /// something that fits into a register, we would pick it. The problem here 5572 /// is that if we have something that could either be in a register or in 5573 /// memory that use of the register could cause selection of *other* 5574 /// operands to fail: they might only succeed if we pick memory. Because of 5575 /// this the heuristic we use is: 5576 /// 5577 /// 1) If there is an 'other' constraint, and if the operand is valid for 5578 /// that constraint, use it. This makes us take advantage of 'i' 5579 /// constraints when available. 5580 /// 2) Otherwise, pick the most general constraint present. This prefers 5581 /// 'm' over 'r', for example. 5582 /// 5583 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, 5584 const TargetLowering &TLI, 5585 SDValue Op, SelectionDAG *DAG) { 5586 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); 5587 unsigned BestIdx = 0; 5588 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; 5589 int BestGenerality = -1; 5590 5591 // Loop over the options, keeping track of the most general one. 5592 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { 5593 TargetLowering::ConstraintType CType = 5594 TLI.getConstraintType(OpInfo.Codes[i]); 5595 5596 // Indirect 'other' or 'immediate' constraints are not allowed. 5597 if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory || 5598 CType == TargetLowering::C_Register || 5599 CType == TargetLowering::C_RegisterClass)) 5600 continue; 5601 5602 // If this is an 'other' or 'immediate' constraint, see if the operand is 5603 // valid for it. For example, on X86 we might have an 'rI' constraint. If 5604 // the operand is an integer in the range [0..31] we want to use I (saving a 5605 // load of a register), otherwise we must use 'r'. 5606 if ((CType == TargetLowering::C_Other || 5607 CType == TargetLowering::C_Immediate) && Op.getNode()) { 5608 assert(OpInfo.Codes[i].size() == 1 && 5609 "Unhandled multi-letter 'other' constraint"); 5610 std::vector<SDValue> ResultOps; 5611 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i], 5612 ResultOps, *DAG); 5613 if (!ResultOps.empty()) { 5614 BestType = CType; 5615 BestIdx = i; 5616 break; 5617 } 5618 } 5619 5620 // Things with matching constraints can only be registers, per gcc 5621 // documentation. This mainly affects "g" constraints. 5622 if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput()) 5623 continue; 5624 5625 // This constraint letter is more general than the previous one, use it. 5626 int Generality = getConstraintGenerality(CType); 5627 if (Generality > BestGenerality) { 5628 BestType = CType; 5629 BestIdx = i; 5630 BestGenerality = Generality; 5631 } 5632 } 5633 5634 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; 5635 OpInfo.ConstraintType = BestType; 5636 } 5637 5638 /// Determines the constraint code and constraint type to use for the specific 5639 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType. 5640 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, 5641 SDValue Op, 5642 SelectionDAG *DAG) const { 5643 assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); 5644 5645 // Single-letter constraints ('r') are very common. 5646 if (OpInfo.Codes.size() == 1) { 5647 OpInfo.ConstraintCode = OpInfo.Codes[0]; 5648 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 5649 } else { 5650 ChooseConstraint(OpInfo, *this, Op, DAG); 5651 } 5652 5653 // 'X' matches anything. 5654 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { 5655 // Constants are handled elsewhere. For Functions, the type here is the 5656 // type of the result, which is not what we want to look at; leave them 5657 // alone. 5658 Value *v = OpInfo.CallOperandVal; 5659 if (isa<ConstantInt>(v) || isa<Function>(v)) { 5660 return; 5661 } 5662 5663 if (isa<BasicBlock>(v) || isa<BlockAddress>(v)) { 5664 OpInfo.ConstraintCode = "i"; 5665 return; 5666 } 5667 5668 // Otherwise, try to resolve it to something we know about by looking at 5669 // the actual operand type. 5670 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) { 5671 OpInfo.ConstraintCode = Repl; 5672 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 5673 } 5674 } 5675 } 5676 5677 /// Given an exact SDIV by a constant, create a multiplication 5678 /// with the multiplicative inverse of the constant. 5679 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N, 5680 const SDLoc &dl, SelectionDAG &DAG, 5681 SmallVectorImpl<SDNode *> &Created) { 5682 SDValue Op0 = N->getOperand(0); 5683 SDValue Op1 = N->getOperand(1); 5684 EVT VT = N->getValueType(0); 5685 EVT SVT = VT.getScalarType(); 5686 EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout()); 5687 EVT ShSVT = ShVT.getScalarType(); 5688 5689 bool UseSRA = false; 5690 SmallVector<SDValue, 16> Shifts, Factors; 5691 5692 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 5693 if (C->isZero()) 5694 return false; 5695 APInt Divisor = C->getAPIntValue(); 5696 unsigned Shift = Divisor.countTrailingZeros(); 5697 if (Shift) { 5698 Divisor.ashrInPlace(Shift); 5699 UseSRA = true; 5700 } 5701 // Calculate the multiplicative inverse, using Newton's method. 5702 APInt t; 5703 APInt Factor = Divisor; 5704 while ((t = Divisor * Factor) != 1) 5705 Factor *= APInt(Divisor.getBitWidth(), 2) - t; 5706 Shifts.push_back(DAG.getConstant(Shift, dl, ShSVT)); 5707 Factors.push_back(DAG.getConstant(Factor, dl, SVT)); 5708 return true; 5709 }; 5710 5711 // Collect all magic values from the build vector. 5712 if (!ISD::matchUnaryPredicate(Op1, BuildSDIVPattern)) 5713 return SDValue(); 5714 5715 SDValue Shift, Factor; 5716 if (Op1.getOpcode() == ISD::BUILD_VECTOR) { 5717 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 5718 Factor = DAG.getBuildVector(VT, dl, Factors); 5719 } else if (Op1.getOpcode() == ISD::SPLAT_VECTOR) { 5720 assert(Shifts.size() == 1 && Factors.size() == 1 && 5721 "Expected matchUnaryPredicate to return one element for scalable " 5722 "vectors"); 5723 Shift = DAG.getSplatVector(ShVT, dl, Shifts[0]); 5724 Factor = DAG.getSplatVector(VT, dl, Factors[0]); 5725 } else { 5726 assert(isa<ConstantSDNode>(Op1) && "Expected a constant"); 5727 Shift = Shifts[0]; 5728 Factor = Factors[0]; 5729 } 5730 5731 SDValue Res = Op0; 5732 5733 // Shift the value upfront if it is even, so the LSB is one. 5734 if (UseSRA) { 5735 // TODO: For UDIV use SRL instead of SRA. 5736 SDNodeFlags Flags; 5737 Flags.setExact(true); 5738 Res = DAG.getNode(ISD::SRA, dl, VT, Res, Shift, Flags); 5739 Created.push_back(Res.getNode()); 5740 } 5741 5742 return DAG.getNode(ISD::MUL, dl, VT, Res, Factor); 5743 } 5744 5745 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 5746 SelectionDAG &DAG, 5747 SmallVectorImpl<SDNode *> &Created) const { 5748 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 5749 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5750 if (TLI.isIntDivCheap(N->getValueType(0), Attr)) 5751 return SDValue(N, 0); // Lower SDIV as SDIV 5752 return SDValue(); 5753 } 5754 5755 SDValue 5756 TargetLowering::BuildSREMPow2(SDNode *N, const APInt &Divisor, 5757 SelectionDAG &DAG, 5758 SmallVectorImpl<SDNode *> &Created) const { 5759 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 5760 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5761 if (TLI.isIntDivCheap(N->getValueType(0), Attr)) 5762 return SDValue(N, 0); // Lower SREM as SREM 5763 return SDValue(); 5764 } 5765 5766 /// Given an ISD::SDIV node expressing a divide by constant, 5767 /// return a DAG expression to select that will generate the same value by 5768 /// multiplying by a magic number. 5769 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 5770 SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 5771 bool IsAfterLegalization, 5772 SmallVectorImpl<SDNode *> &Created) const { 5773 SDLoc dl(N); 5774 EVT VT = N->getValueType(0); 5775 EVT SVT = VT.getScalarType(); 5776 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5777 EVT ShSVT = ShVT.getScalarType(); 5778 unsigned EltBits = VT.getScalarSizeInBits(); 5779 EVT MulVT; 5780 5781 // Check to see if we can do this. 5782 // FIXME: We should be more aggressive here. 5783 if (!isTypeLegal(VT)) { 5784 // Limit this to simple scalars for now. 5785 if (VT.isVector() || !VT.isSimple()) 5786 return SDValue(); 5787 5788 // If this type will be promoted to a large enough type with a legal 5789 // multiply operation, we can go ahead and do this transform. 5790 if (getTypeAction(VT.getSimpleVT()) != TypePromoteInteger) 5791 return SDValue(); 5792 5793 MulVT = getTypeToTransformTo(*DAG.getContext(), VT); 5794 if (MulVT.getSizeInBits() < (2 * EltBits) || 5795 !isOperationLegal(ISD::MUL, MulVT)) 5796 return SDValue(); 5797 } 5798 5799 // If the sdiv has an 'exact' bit we can use a simpler lowering. 5800 if (N->getFlags().hasExact()) 5801 return BuildExactSDIV(*this, N, dl, DAG, Created); 5802 5803 SmallVector<SDValue, 16> MagicFactors, Factors, Shifts, ShiftMasks; 5804 5805 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 5806 if (C->isZero()) 5807 return false; 5808 5809 const APInt &Divisor = C->getAPIntValue(); 5810 SignedDivisionByConstantInfo magics = SignedDivisionByConstantInfo::get(Divisor); 5811 int NumeratorFactor = 0; 5812 int ShiftMask = -1; 5813 5814 if (Divisor.isOne() || Divisor.isAllOnes()) { 5815 // If d is +1/-1, we just multiply the numerator by +1/-1. 5816 NumeratorFactor = Divisor.getSExtValue(); 5817 magics.Magic = 0; 5818 magics.ShiftAmount = 0; 5819 ShiftMask = 0; 5820 } else if (Divisor.isStrictlyPositive() && magics.Magic.isNegative()) { 5821 // If d > 0 and m < 0, add the numerator. 5822 NumeratorFactor = 1; 5823 } else if (Divisor.isNegative() && magics.Magic.isStrictlyPositive()) { 5824 // If d < 0 and m > 0, subtract the numerator. 5825 NumeratorFactor = -1; 5826 } 5827 5828 MagicFactors.push_back(DAG.getConstant(magics.Magic, dl, SVT)); 5829 Factors.push_back(DAG.getConstant(NumeratorFactor, dl, SVT)); 5830 Shifts.push_back(DAG.getConstant(magics.ShiftAmount, dl, ShSVT)); 5831 ShiftMasks.push_back(DAG.getConstant(ShiftMask, dl, SVT)); 5832 return true; 5833 }; 5834 5835 SDValue N0 = N->getOperand(0); 5836 SDValue N1 = N->getOperand(1); 5837 5838 // Collect the shifts / magic values from each element. 5839 if (!ISD::matchUnaryPredicate(N1, BuildSDIVPattern)) 5840 return SDValue(); 5841 5842 SDValue MagicFactor, Factor, Shift, ShiftMask; 5843 if (N1.getOpcode() == ISD::BUILD_VECTOR) { 5844 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 5845 Factor = DAG.getBuildVector(VT, dl, Factors); 5846 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 5847 ShiftMask = DAG.getBuildVector(VT, dl, ShiftMasks); 5848 } else if (N1.getOpcode() == ISD::SPLAT_VECTOR) { 5849 assert(MagicFactors.size() == 1 && Factors.size() == 1 && 5850 Shifts.size() == 1 && ShiftMasks.size() == 1 && 5851 "Expected matchUnaryPredicate to return one element for scalable " 5852 "vectors"); 5853 MagicFactor = DAG.getSplatVector(VT, dl, MagicFactors[0]); 5854 Factor = DAG.getSplatVector(VT, dl, Factors[0]); 5855 Shift = DAG.getSplatVector(ShVT, dl, Shifts[0]); 5856 ShiftMask = DAG.getSplatVector(VT, dl, ShiftMasks[0]); 5857 } else { 5858 assert(isa<ConstantSDNode>(N1) && "Expected a constant"); 5859 MagicFactor = MagicFactors[0]; 5860 Factor = Factors[0]; 5861 Shift = Shifts[0]; 5862 ShiftMask = ShiftMasks[0]; 5863 } 5864 5865 // Multiply the numerator (operand 0) by the magic value. 5866 // FIXME: We should support doing a MUL in a wider type. 5867 auto GetMULHS = [&](SDValue X, SDValue Y) { 5868 // If the type isn't legal, use a wider mul of the the type calculated 5869 // earlier. 5870 if (!isTypeLegal(VT)) { 5871 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MulVT, X); 5872 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MulVT, Y); 5873 Y = DAG.getNode(ISD::MUL, dl, MulVT, X, Y); 5874 Y = DAG.getNode(ISD::SRL, dl, MulVT, Y, 5875 DAG.getShiftAmountConstant(EltBits, MulVT, dl)); 5876 return DAG.getNode(ISD::TRUNCATE, dl, VT, Y); 5877 } 5878 5879 if (isOperationLegalOrCustom(ISD::MULHS, VT, IsAfterLegalization)) 5880 return DAG.getNode(ISD::MULHS, dl, VT, X, Y); 5881 if (isOperationLegalOrCustom(ISD::SMUL_LOHI, VT, IsAfterLegalization)) { 5882 SDValue LoHi = 5883 DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), X, Y); 5884 return SDValue(LoHi.getNode(), 1); 5885 } 5886 return SDValue(); 5887 }; 5888 5889 SDValue Q = GetMULHS(N0, MagicFactor); 5890 if (!Q) 5891 return SDValue(); 5892 5893 Created.push_back(Q.getNode()); 5894 5895 // (Optionally) Add/subtract the numerator using Factor. 5896 Factor = DAG.getNode(ISD::MUL, dl, VT, N0, Factor); 5897 Created.push_back(Factor.getNode()); 5898 Q = DAG.getNode(ISD::ADD, dl, VT, Q, Factor); 5899 Created.push_back(Q.getNode()); 5900 5901 // Shift right algebraic by shift value. 5902 Q = DAG.getNode(ISD::SRA, dl, VT, Q, Shift); 5903 Created.push_back(Q.getNode()); 5904 5905 // Extract the sign bit, mask it and add it to the quotient. 5906 SDValue SignShift = DAG.getConstant(EltBits - 1, dl, ShVT); 5907 SDValue T = DAG.getNode(ISD::SRL, dl, VT, Q, SignShift); 5908 Created.push_back(T.getNode()); 5909 T = DAG.getNode(ISD::AND, dl, VT, T, ShiftMask); 5910 Created.push_back(T.getNode()); 5911 return DAG.getNode(ISD::ADD, dl, VT, Q, T); 5912 } 5913 5914 /// Given an ISD::UDIV node expressing a divide by constant, 5915 /// return a DAG expression to select that will generate the same value by 5916 /// multiplying by a magic number. 5917 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 5918 SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, 5919 bool IsAfterLegalization, 5920 SmallVectorImpl<SDNode *> &Created) const { 5921 SDLoc dl(N); 5922 EVT VT = N->getValueType(0); 5923 EVT SVT = VT.getScalarType(); 5924 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5925 EVT ShSVT = ShVT.getScalarType(); 5926 unsigned EltBits = VT.getScalarSizeInBits(); 5927 EVT MulVT; 5928 5929 // Check to see if we can do this. 5930 // FIXME: We should be more aggressive here. 5931 if (!isTypeLegal(VT)) { 5932 // Limit this to simple scalars for now. 5933 if (VT.isVector() || !VT.isSimple()) 5934 return SDValue(); 5935 5936 // If this type will be promoted to a large enough type with a legal 5937 // multiply operation, we can go ahead and do this transform. 5938 if (getTypeAction(VT.getSimpleVT()) != TypePromoteInteger) 5939 return SDValue(); 5940 5941 MulVT = getTypeToTransformTo(*DAG.getContext(), VT); 5942 if (MulVT.getSizeInBits() < (2 * EltBits) || 5943 !isOperationLegal(ISD::MUL, MulVT)) 5944 return SDValue(); 5945 } 5946 5947 bool UseNPQ = false; 5948 SmallVector<SDValue, 16> PreShifts, PostShifts, MagicFactors, NPQFactors; 5949 5950 auto BuildUDIVPattern = [&](ConstantSDNode *C) { 5951 if (C->isZero()) 5952 return false; 5953 // FIXME: We should use a narrower constant when the upper 5954 // bits are known to be zero. 5955 const APInt& Divisor = C->getAPIntValue(); 5956 UnsignedDivisionByConstantInfo magics = 5957 UnsignedDivisionByConstantInfo::get(Divisor); 5958 unsigned PreShift = 0, PostShift = 0; 5959 5960 // If the divisor is even, we can avoid using the expensive fixup by 5961 // shifting the divided value upfront. 5962 if (magics.IsAdd && !Divisor[0]) { 5963 PreShift = Divisor.countTrailingZeros(); 5964 // Get magic number for the shifted divisor. 5965 magics = 5966 UnsignedDivisionByConstantInfo::get(Divisor.lshr(PreShift), PreShift); 5967 assert(!magics.IsAdd && "Should use cheap fixup now"); 5968 } 5969 5970 unsigned SelNPQ; 5971 if (!magics.IsAdd || Divisor.isOne()) { 5972 assert(magics.ShiftAmount < Divisor.getBitWidth() && 5973 "We shouldn't generate an undefined shift!"); 5974 PostShift = magics.ShiftAmount; 5975 SelNPQ = false; 5976 } else { 5977 PostShift = magics.ShiftAmount - 1; 5978 SelNPQ = true; 5979 } 5980 5981 PreShifts.push_back(DAG.getConstant(PreShift, dl, ShSVT)); 5982 MagicFactors.push_back(DAG.getConstant(magics.Magic, dl, SVT)); 5983 NPQFactors.push_back( 5984 DAG.getConstant(SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1) 5985 : APInt::getZero(EltBits), 5986 dl, SVT)); 5987 PostShifts.push_back(DAG.getConstant(PostShift, dl, ShSVT)); 5988 UseNPQ |= SelNPQ; 5989 return true; 5990 }; 5991 5992 SDValue N0 = N->getOperand(0); 5993 SDValue N1 = N->getOperand(1); 5994 5995 // Collect the shifts/magic values from each element. 5996 if (!ISD::matchUnaryPredicate(N1, BuildUDIVPattern)) 5997 return SDValue(); 5998 5999 SDValue PreShift, PostShift, MagicFactor, NPQFactor; 6000 if (N1.getOpcode() == ISD::BUILD_VECTOR) { 6001 PreShift = DAG.getBuildVector(ShVT, dl, PreShifts); 6002 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 6003 NPQFactor = DAG.getBuildVector(VT, dl, NPQFactors); 6004 PostShift = DAG.getBuildVector(ShVT, dl, PostShifts); 6005 } else if (N1.getOpcode() == ISD::SPLAT_VECTOR) { 6006 assert(PreShifts.size() == 1 && MagicFactors.size() == 1 && 6007 NPQFactors.size() == 1 && PostShifts.size() == 1 && 6008 "Expected matchUnaryPredicate to return one for scalable vectors"); 6009 PreShift = DAG.getSplatVector(ShVT, dl, PreShifts[0]); 6010 MagicFactor = DAG.getSplatVector(VT, dl, MagicFactors[0]); 6011 NPQFactor = DAG.getSplatVector(VT, dl, NPQFactors[0]); 6012 PostShift = DAG.getSplatVector(ShVT, dl, PostShifts[0]); 6013 } else { 6014 assert(isa<ConstantSDNode>(N1) && "Expected a constant"); 6015 PreShift = PreShifts[0]; 6016 MagicFactor = MagicFactors[0]; 6017 PostShift = PostShifts[0]; 6018 } 6019 6020 SDValue Q = N0; 6021 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PreShift); 6022 Created.push_back(Q.getNode()); 6023 6024 // FIXME: We should support doing a MUL in a wider type. 6025 auto GetMULHU = [&](SDValue X, SDValue Y) { 6026 // If the type isn't legal, use a wider mul of the the type calculated 6027 // earlier. 6028 if (!isTypeLegal(VT)) { 6029 X = DAG.getNode(ISD::ZERO_EXTEND, dl, MulVT, X); 6030 Y = DAG.getNode(ISD::ZERO_EXTEND, dl, MulVT, Y); 6031 Y = DAG.getNode(ISD::MUL, dl, MulVT, X, Y); 6032 Y = DAG.getNode(ISD::SRL, dl, MulVT, Y, 6033 DAG.getShiftAmountConstant(EltBits, MulVT, dl)); 6034 return DAG.getNode(ISD::TRUNCATE, dl, VT, Y); 6035 } 6036 6037 if (isOperationLegalOrCustom(ISD::MULHU, VT, IsAfterLegalization)) 6038 return DAG.getNode(ISD::MULHU, dl, VT, X, Y); 6039 if (isOperationLegalOrCustom(ISD::UMUL_LOHI, VT, IsAfterLegalization)) { 6040 SDValue LoHi = 6041 DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), X, Y); 6042 return SDValue(LoHi.getNode(), 1); 6043 } 6044 return SDValue(); // No mulhu or equivalent 6045 }; 6046 6047 // Multiply the numerator (operand 0) by the magic value. 6048 Q = GetMULHU(Q, MagicFactor); 6049 if (!Q) 6050 return SDValue(); 6051 6052 Created.push_back(Q.getNode()); 6053 6054 if (UseNPQ) { 6055 SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N0, Q); 6056 Created.push_back(NPQ.getNode()); 6057 6058 // For vectors we might have a mix of non-NPQ/NPQ paths, so use 6059 // MULHU to act as a SRL-by-1 for NPQ, else multiply by zero. 6060 if (VT.isVector()) 6061 NPQ = GetMULHU(NPQ, NPQFactor); 6062 else 6063 NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ, DAG.getConstant(1, dl, ShVT)); 6064 6065 Created.push_back(NPQ.getNode()); 6066 6067 Q = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q); 6068 Created.push_back(Q.getNode()); 6069 } 6070 6071 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PostShift); 6072 Created.push_back(Q.getNode()); 6073 6074 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6075 6076 SDValue One = DAG.getConstant(1, dl, VT); 6077 SDValue IsOne = DAG.getSetCC(dl, SetCCVT, N1, One, ISD::SETEQ); 6078 return DAG.getSelect(dl, VT, IsOne, N0, Q); 6079 } 6080 6081 /// If all values in Values that *don't* match the predicate are same 'splat' 6082 /// value, then replace all values with that splat value. 6083 /// Else, if AlternativeReplacement was provided, then replace all values that 6084 /// do match predicate with AlternativeReplacement value. 6085 static void 6086 turnVectorIntoSplatVector(MutableArrayRef<SDValue> Values, 6087 std::function<bool(SDValue)> Predicate, 6088 SDValue AlternativeReplacement = SDValue()) { 6089 SDValue Replacement; 6090 // Is there a value for which the Predicate does *NOT* match? What is it? 6091 auto SplatValue = llvm::find_if_not(Values, Predicate); 6092 if (SplatValue != Values.end()) { 6093 // Does Values consist only of SplatValue's and values matching Predicate? 6094 if (llvm::all_of(Values, [Predicate, SplatValue](SDValue Value) { 6095 return Value == *SplatValue || Predicate(Value); 6096 })) // Then we shall replace values matching predicate with SplatValue. 6097 Replacement = *SplatValue; 6098 } 6099 if (!Replacement) { 6100 // Oops, we did not find the "baseline" splat value. 6101 if (!AlternativeReplacement) 6102 return; // Nothing to do. 6103 // Let's replace with provided value then. 6104 Replacement = AlternativeReplacement; 6105 } 6106 std::replace_if(Values.begin(), Values.end(), Predicate, Replacement); 6107 } 6108 6109 /// Given an ISD::UREM used only by an ISD::SETEQ or ISD::SETNE 6110 /// where the divisor is constant and the comparison target is zero, 6111 /// return a DAG expression that will generate the same comparison result 6112 /// using only multiplications, additions and shifts/rotations. 6113 /// Ref: "Hacker's Delight" 10-17. 6114 SDValue TargetLowering::buildUREMEqFold(EVT SETCCVT, SDValue REMNode, 6115 SDValue CompTargetNode, 6116 ISD::CondCode Cond, 6117 DAGCombinerInfo &DCI, 6118 const SDLoc &DL) const { 6119 SmallVector<SDNode *, 5> Built; 6120 if (SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 6121 DCI, DL, Built)) { 6122 for (SDNode *N : Built) 6123 DCI.AddToWorklist(N); 6124 return Folded; 6125 } 6126 6127 return SDValue(); 6128 } 6129 6130 SDValue 6131 TargetLowering::prepareUREMEqFold(EVT SETCCVT, SDValue REMNode, 6132 SDValue CompTargetNode, ISD::CondCode Cond, 6133 DAGCombinerInfo &DCI, const SDLoc &DL, 6134 SmallVectorImpl<SDNode *> &Created) const { 6135 // fold (seteq/ne (urem N, D), 0) -> (setule/ugt (rotr (mul N, P), K), Q) 6136 // - D must be constant, with D = D0 * 2^K where D0 is odd 6137 // - P is the multiplicative inverse of D0 modulo 2^W 6138 // - Q = floor(((2^W) - 1) / D) 6139 // where W is the width of the common type of N and D. 6140 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 6141 "Only applicable for (in)equality comparisons."); 6142 6143 SelectionDAG &DAG = DCI.DAG; 6144 6145 EVT VT = REMNode.getValueType(); 6146 EVT SVT = VT.getScalarType(); 6147 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout(), !DCI.isBeforeLegalize()); 6148 EVT ShSVT = ShVT.getScalarType(); 6149 6150 // If MUL is unavailable, we cannot proceed in any case. 6151 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::MUL, VT)) 6152 return SDValue(); 6153 6154 bool ComparingWithAllZeros = true; 6155 bool AllComparisonsWithNonZerosAreTautological = true; 6156 bool HadTautologicalLanes = false; 6157 bool AllLanesAreTautological = true; 6158 bool HadEvenDivisor = false; 6159 bool AllDivisorsArePowerOfTwo = true; 6160 bool HadTautologicalInvertedLanes = false; 6161 SmallVector<SDValue, 16> PAmts, KAmts, QAmts, IAmts; 6162 6163 auto BuildUREMPattern = [&](ConstantSDNode *CDiv, ConstantSDNode *CCmp) { 6164 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 6165 if (CDiv->isZero()) 6166 return false; 6167 6168 const APInt &D = CDiv->getAPIntValue(); 6169 const APInt &Cmp = CCmp->getAPIntValue(); 6170 6171 ComparingWithAllZeros &= Cmp.isZero(); 6172 6173 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 6174 // if C2 is not less than C1, the comparison is always false. 6175 // But we will only be able to produce the comparison that will give the 6176 // opposive tautological answer. So this lane would need to be fixed up. 6177 bool TautologicalInvertedLane = D.ule(Cmp); 6178 HadTautologicalInvertedLanes |= TautologicalInvertedLane; 6179 6180 // If all lanes are tautological (either all divisors are ones, or divisor 6181 // is not greater than the constant we are comparing with), 6182 // we will prefer to avoid the fold. 6183 bool TautologicalLane = D.isOne() || TautologicalInvertedLane; 6184 HadTautologicalLanes |= TautologicalLane; 6185 AllLanesAreTautological &= TautologicalLane; 6186 6187 // If we are comparing with non-zero, we need'll need to subtract said 6188 // comparison value from the LHS. But there is no point in doing that if 6189 // every lane where we are comparing with non-zero is tautological.. 6190 if (!Cmp.isZero()) 6191 AllComparisonsWithNonZerosAreTautological &= TautologicalLane; 6192 6193 // Decompose D into D0 * 2^K 6194 unsigned K = D.countTrailingZeros(); 6195 assert((!D.isOne() || (K == 0)) && "For divisor '1' we won't rotate."); 6196 APInt D0 = D.lshr(K); 6197 6198 // D is even if it has trailing zeros. 6199 HadEvenDivisor |= (K != 0); 6200 // D is a power-of-two if D0 is one. 6201 // If all divisors are power-of-two, we will prefer to avoid the fold. 6202 AllDivisorsArePowerOfTwo &= D0.isOne(); 6203 6204 // P = inv(D0, 2^W) 6205 // 2^W requires W + 1 bits, so we have to extend and then truncate. 6206 unsigned W = D.getBitWidth(); 6207 APInt P = D0.zext(W + 1) 6208 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 6209 .trunc(W); 6210 assert(!P.isZero() && "No multiplicative inverse!"); // unreachable 6211 assert((D0 * P).isOne() && "Multiplicative inverse basic check failed."); 6212 6213 // Q = floor((2^W - 1) u/ D) 6214 // R = ((2^W - 1) u% D) 6215 APInt Q, R; 6216 APInt::udivrem(APInt::getAllOnes(W), D, Q, R); 6217 6218 // If we are comparing with zero, then that comparison constant is okay, 6219 // else it may need to be one less than that. 6220 if (Cmp.ugt(R)) 6221 Q -= 1; 6222 6223 assert(APInt::getAllOnes(ShSVT.getSizeInBits()).ugt(K) && 6224 "We are expecting that K is always less than all-ones for ShSVT"); 6225 6226 // If the lane is tautological the result can be constant-folded. 6227 if (TautologicalLane) { 6228 // Set P and K amount to a bogus values so we can try to splat them. 6229 P = 0; 6230 K = -1; 6231 // And ensure that comparison constant is tautological, 6232 // it will always compare true/false. 6233 Q = -1; 6234 } 6235 6236 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 6237 KAmts.push_back( 6238 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 6239 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 6240 return true; 6241 }; 6242 6243 SDValue N = REMNode.getOperand(0); 6244 SDValue D = REMNode.getOperand(1); 6245 6246 // Collect the values from each element. 6247 if (!ISD::matchBinaryPredicate(D, CompTargetNode, BuildUREMPattern)) 6248 return SDValue(); 6249 6250 // If all lanes are tautological, the result can be constant-folded. 6251 if (AllLanesAreTautological) 6252 return SDValue(); 6253 6254 // If this is a urem by a powers-of-two, avoid the fold since it can be 6255 // best implemented as a bit test. 6256 if (AllDivisorsArePowerOfTwo) 6257 return SDValue(); 6258 6259 SDValue PVal, KVal, QVal; 6260 if (D.getOpcode() == ISD::BUILD_VECTOR) { 6261 if (HadTautologicalLanes) { 6262 // Try to turn PAmts into a splat, since we don't care about the values 6263 // that are currently '0'. If we can't, just keep '0'`s. 6264 turnVectorIntoSplatVector(PAmts, isNullConstant); 6265 // Try to turn KAmts into a splat, since we don't care about the values 6266 // that are currently '-1'. If we can't, change them to '0'`s. 6267 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 6268 DAG.getConstant(0, DL, ShSVT)); 6269 } 6270 6271 PVal = DAG.getBuildVector(VT, DL, PAmts); 6272 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 6273 QVal = DAG.getBuildVector(VT, DL, QAmts); 6274 } else if (D.getOpcode() == ISD::SPLAT_VECTOR) { 6275 assert(PAmts.size() == 1 && KAmts.size() == 1 && QAmts.size() == 1 && 6276 "Expected matchBinaryPredicate to return one element for " 6277 "SPLAT_VECTORs"); 6278 PVal = DAG.getSplatVector(VT, DL, PAmts[0]); 6279 KVal = DAG.getSplatVector(ShVT, DL, KAmts[0]); 6280 QVal = DAG.getSplatVector(VT, DL, QAmts[0]); 6281 } else { 6282 PVal = PAmts[0]; 6283 KVal = KAmts[0]; 6284 QVal = QAmts[0]; 6285 } 6286 6287 if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) { 6288 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::SUB, VT)) 6289 return SDValue(); // FIXME: Could/should use `ISD::ADD`? 6290 assert(CompTargetNode.getValueType() == N.getValueType() && 6291 "Expecting that the types on LHS and RHS of comparisons match."); 6292 N = DAG.getNode(ISD::SUB, DL, VT, N, CompTargetNode); 6293 } 6294 6295 // (mul N, P) 6296 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 6297 Created.push_back(Op0.getNode()); 6298 6299 // Rotate right only if any divisor was even. We avoid rotates for all-odd 6300 // divisors as a performance improvement, since rotating by 0 is a no-op. 6301 if (HadEvenDivisor) { 6302 // We need ROTR to do this. 6303 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::ROTR, VT)) 6304 return SDValue(); 6305 // UREM: (rotr (mul N, P), K) 6306 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal); 6307 Created.push_back(Op0.getNode()); 6308 } 6309 6310 // UREM: (setule/setugt (rotr (mul N, P), K), Q) 6311 SDValue NewCC = 6312 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 6313 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 6314 if (!HadTautologicalInvertedLanes) 6315 return NewCC; 6316 6317 // If any lanes previously compared always-false, the NewCC will give 6318 // always-true result for them, so we need to fixup those lanes. 6319 // Or the other way around for inequality predicate. 6320 assert(VT.isVector() && "Can/should only get here for vectors."); 6321 Created.push_back(NewCC.getNode()); 6322 6323 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 6324 // if C2 is not less than C1, the comparison is always false. 6325 // But we have produced the comparison that will give the 6326 // opposive tautological answer. So these lanes would need to be fixed up. 6327 SDValue TautologicalInvertedChannels = 6328 DAG.getSetCC(DL, SETCCVT, D, CompTargetNode, ISD::SETULE); 6329 Created.push_back(TautologicalInvertedChannels.getNode()); 6330 6331 // NOTE: we avoid letting illegal types through even if we're before legalize 6332 // ops – legalization has a hard time producing good code for this. 6333 if (isOperationLegalOrCustom(ISD::VSELECT, SETCCVT)) { 6334 // If we have a vector select, let's replace the comparison results in the 6335 // affected lanes with the correct tautological result. 6336 SDValue Replacement = DAG.getBoolConstant(Cond == ISD::SETEQ ? false : true, 6337 DL, SETCCVT, SETCCVT); 6338 return DAG.getNode(ISD::VSELECT, DL, SETCCVT, TautologicalInvertedChannels, 6339 Replacement, NewCC); 6340 } 6341 6342 // Else, we can just invert the comparison result in the appropriate lanes. 6343 // 6344 // NOTE: see the note above VSELECT above. 6345 if (isOperationLegalOrCustom(ISD::XOR, SETCCVT)) 6346 return DAG.getNode(ISD::XOR, DL, SETCCVT, NewCC, 6347 TautologicalInvertedChannels); 6348 6349 return SDValue(); // Don't know how to lower. 6350 } 6351 6352 /// Given an ISD::SREM used only by an ISD::SETEQ or ISD::SETNE 6353 /// where the divisor is constant and the comparison target is zero, 6354 /// return a DAG expression that will generate the same comparison result 6355 /// using only multiplications, additions and shifts/rotations. 6356 /// Ref: "Hacker's Delight" 10-17. 6357 SDValue TargetLowering::buildSREMEqFold(EVT SETCCVT, SDValue REMNode, 6358 SDValue CompTargetNode, 6359 ISD::CondCode Cond, 6360 DAGCombinerInfo &DCI, 6361 const SDLoc &DL) const { 6362 SmallVector<SDNode *, 7> Built; 6363 if (SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 6364 DCI, DL, Built)) { 6365 assert(Built.size() <= 7 && "Max size prediction failed."); 6366 for (SDNode *N : Built) 6367 DCI.AddToWorklist(N); 6368 return Folded; 6369 } 6370 6371 return SDValue(); 6372 } 6373 6374 SDValue 6375 TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode, 6376 SDValue CompTargetNode, ISD::CondCode Cond, 6377 DAGCombinerInfo &DCI, const SDLoc &DL, 6378 SmallVectorImpl<SDNode *> &Created) const { 6379 // Fold: 6380 // (seteq/ne (srem N, D), 0) 6381 // To: 6382 // (setule/ugt (rotr (add (mul N, P), A), K), Q) 6383 // 6384 // - D must be constant, with D = D0 * 2^K where D0 is odd 6385 // - P is the multiplicative inverse of D0 modulo 2^W 6386 // - A = bitwiseand(floor((2^(W - 1) - 1) / D0), (-(2^k))) 6387 // - Q = floor((2 * A) / (2^K)) 6388 // where W is the width of the common type of N and D. 6389 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 6390 "Only applicable for (in)equality comparisons."); 6391 6392 SelectionDAG &DAG = DCI.DAG; 6393 6394 EVT VT = REMNode.getValueType(); 6395 EVT SVT = VT.getScalarType(); 6396 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout(), !DCI.isBeforeLegalize()); 6397 EVT ShSVT = ShVT.getScalarType(); 6398 6399 // If we are after ops legalization, and MUL is unavailable, we can not 6400 // proceed. 6401 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::MUL, VT)) 6402 return SDValue(); 6403 6404 // TODO: Could support comparing with non-zero too. 6405 ConstantSDNode *CompTarget = isConstOrConstSplat(CompTargetNode); 6406 if (!CompTarget || !CompTarget->isZero()) 6407 return SDValue(); 6408 6409 bool HadIntMinDivisor = false; 6410 bool HadOneDivisor = false; 6411 bool AllDivisorsAreOnes = true; 6412 bool HadEvenDivisor = false; 6413 bool NeedToApplyOffset = false; 6414 bool AllDivisorsArePowerOfTwo = true; 6415 SmallVector<SDValue, 16> PAmts, AAmts, KAmts, QAmts; 6416 6417 auto BuildSREMPattern = [&](ConstantSDNode *C) { 6418 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 6419 if (C->isZero()) 6420 return false; 6421 6422 // FIXME: we don't fold `rem %X, -C` to `rem %X, C` in DAGCombine. 6423 6424 // WARNING: this fold is only valid for positive divisors! 6425 APInt D = C->getAPIntValue(); 6426 if (D.isNegative()) 6427 D.negate(); // `rem %X, -C` is equivalent to `rem %X, C` 6428 6429 HadIntMinDivisor |= D.isMinSignedValue(); 6430 6431 // If all divisors are ones, we will prefer to avoid the fold. 6432 HadOneDivisor |= D.isOne(); 6433 AllDivisorsAreOnes &= D.isOne(); 6434 6435 // Decompose D into D0 * 2^K 6436 unsigned K = D.countTrailingZeros(); 6437 assert((!D.isOne() || (K == 0)) && "For divisor '1' we won't rotate."); 6438 APInt D0 = D.lshr(K); 6439 6440 if (!D.isMinSignedValue()) { 6441 // D is even if it has trailing zeros; unless it's INT_MIN, in which case 6442 // we don't care about this lane in this fold, we'll special-handle it. 6443 HadEvenDivisor |= (K != 0); 6444 } 6445 6446 // D is a power-of-two if D0 is one. This includes INT_MIN. 6447 // If all divisors are power-of-two, we will prefer to avoid the fold. 6448 AllDivisorsArePowerOfTwo &= D0.isOne(); 6449 6450 // P = inv(D0, 2^W) 6451 // 2^W requires W + 1 bits, so we have to extend and then truncate. 6452 unsigned W = D.getBitWidth(); 6453 APInt P = D0.zext(W + 1) 6454 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 6455 .trunc(W); 6456 assert(!P.isZero() && "No multiplicative inverse!"); // unreachable 6457 assert((D0 * P).isOne() && "Multiplicative inverse basic check failed."); 6458 6459 // A = floor((2^(W - 1) - 1) / D0) & -2^K 6460 APInt A = APInt::getSignedMaxValue(W).udiv(D0); 6461 A.clearLowBits(K); 6462 6463 if (!D.isMinSignedValue()) { 6464 // If divisor INT_MIN, then we don't care about this lane in this fold, 6465 // we'll special-handle it. 6466 NeedToApplyOffset |= A != 0; 6467 } 6468 6469 // Q = floor((2 * A) / (2^K)) 6470 APInt Q = (2 * A).udiv(APInt::getOneBitSet(W, K)); 6471 6472 assert(APInt::getAllOnes(SVT.getSizeInBits()).ugt(A) && 6473 "We are expecting that A is always less than all-ones for SVT"); 6474 assert(APInt::getAllOnes(ShSVT.getSizeInBits()).ugt(K) && 6475 "We are expecting that K is always less than all-ones for ShSVT"); 6476 6477 // If the divisor is 1 the result can be constant-folded. Likewise, we 6478 // don't care about INT_MIN lanes, those can be set to undef if appropriate. 6479 if (D.isOne()) { 6480 // Set P, A and K to a bogus values so we can try to splat them. 6481 P = 0; 6482 A = -1; 6483 K = -1; 6484 6485 // x ?% 1 == 0 <--> true <--> x u<= -1 6486 Q = -1; 6487 } 6488 6489 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 6490 AAmts.push_back(DAG.getConstant(A, DL, SVT)); 6491 KAmts.push_back( 6492 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 6493 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 6494 return true; 6495 }; 6496 6497 SDValue N = REMNode.getOperand(0); 6498 SDValue D = REMNode.getOperand(1); 6499 6500 // Collect the values from each element. 6501 if (!ISD::matchUnaryPredicate(D, BuildSREMPattern)) 6502 return SDValue(); 6503 6504 // If this is a srem by a one, avoid the fold since it can be constant-folded. 6505 if (AllDivisorsAreOnes) 6506 return SDValue(); 6507 6508 // If this is a srem by a powers-of-two (including INT_MIN), avoid the fold 6509 // since it can be best implemented as a bit test. 6510 if (AllDivisorsArePowerOfTwo) 6511 return SDValue(); 6512 6513 SDValue PVal, AVal, KVal, QVal; 6514 if (D.getOpcode() == ISD::BUILD_VECTOR) { 6515 if (HadOneDivisor) { 6516 // Try to turn PAmts into a splat, since we don't care about the values 6517 // that are currently '0'. If we can't, just keep '0'`s. 6518 turnVectorIntoSplatVector(PAmts, isNullConstant); 6519 // Try to turn AAmts into a splat, since we don't care about the 6520 // values that are currently '-1'. If we can't, change them to '0'`s. 6521 turnVectorIntoSplatVector(AAmts, isAllOnesConstant, 6522 DAG.getConstant(0, DL, SVT)); 6523 // Try to turn KAmts into a splat, since we don't care about the values 6524 // that are currently '-1'. If we can't, change them to '0'`s. 6525 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 6526 DAG.getConstant(0, DL, ShSVT)); 6527 } 6528 6529 PVal = DAG.getBuildVector(VT, DL, PAmts); 6530 AVal = DAG.getBuildVector(VT, DL, AAmts); 6531 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 6532 QVal = DAG.getBuildVector(VT, DL, QAmts); 6533 } else if (D.getOpcode() == ISD::SPLAT_VECTOR) { 6534 assert(PAmts.size() == 1 && AAmts.size() == 1 && KAmts.size() == 1 && 6535 QAmts.size() == 1 && 6536 "Expected matchUnaryPredicate to return one element for scalable " 6537 "vectors"); 6538 PVal = DAG.getSplatVector(VT, DL, PAmts[0]); 6539 AVal = DAG.getSplatVector(VT, DL, AAmts[0]); 6540 KVal = DAG.getSplatVector(ShVT, DL, KAmts[0]); 6541 QVal = DAG.getSplatVector(VT, DL, QAmts[0]); 6542 } else { 6543 assert(isa<ConstantSDNode>(D) && "Expected a constant"); 6544 PVal = PAmts[0]; 6545 AVal = AAmts[0]; 6546 KVal = KAmts[0]; 6547 QVal = QAmts[0]; 6548 } 6549 6550 // (mul N, P) 6551 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 6552 Created.push_back(Op0.getNode()); 6553 6554 if (NeedToApplyOffset) { 6555 // We need ADD to do this. 6556 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::ADD, VT)) 6557 return SDValue(); 6558 6559 // (add (mul N, P), A) 6560 Op0 = DAG.getNode(ISD::ADD, DL, VT, Op0, AVal); 6561 Created.push_back(Op0.getNode()); 6562 } 6563 6564 // Rotate right only if any divisor was even. We avoid rotates for all-odd 6565 // divisors as a performance improvement, since rotating by 0 is a no-op. 6566 if (HadEvenDivisor) { 6567 // We need ROTR to do this. 6568 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::ROTR, VT)) 6569 return SDValue(); 6570 // SREM: (rotr (add (mul N, P), A), K) 6571 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal); 6572 Created.push_back(Op0.getNode()); 6573 } 6574 6575 // SREM: (setule/setugt (rotr (add (mul N, P), A), K), Q) 6576 SDValue Fold = 6577 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 6578 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 6579 6580 // If we didn't have lanes with INT_MIN divisor, then we're done. 6581 if (!HadIntMinDivisor) 6582 return Fold; 6583 6584 // That fold is only valid for positive divisors. Which effectively means, 6585 // it is invalid for INT_MIN divisors. So if we have such a lane, 6586 // we must fix-up results for said lanes. 6587 assert(VT.isVector() && "Can/should only get here for vectors."); 6588 6589 // NOTE: we avoid letting illegal types through even if we're before legalize 6590 // ops – legalization has a hard time producing good code for the code that 6591 // follows. 6592 if (!isOperationLegalOrCustom(ISD::SETEQ, VT) || 6593 !isOperationLegalOrCustom(ISD::AND, VT) || 6594 !isOperationLegalOrCustom(Cond, VT) || 6595 !isOperationLegalOrCustom(ISD::VSELECT, SETCCVT)) 6596 return SDValue(); 6597 6598 Created.push_back(Fold.getNode()); 6599 6600 SDValue IntMin = DAG.getConstant( 6601 APInt::getSignedMinValue(SVT.getScalarSizeInBits()), DL, VT); 6602 SDValue IntMax = DAG.getConstant( 6603 APInt::getSignedMaxValue(SVT.getScalarSizeInBits()), DL, VT); 6604 SDValue Zero = 6605 DAG.getConstant(APInt::getZero(SVT.getScalarSizeInBits()), DL, VT); 6606 6607 // Which lanes had INT_MIN divisors? Divisor is constant, so const-folded. 6608 SDValue DivisorIsIntMin = DAG.getSetCC(DL, SETCCVT, D, IntMin, ISD::SETEQ); 6609 Created.push_back(DivisorIsIntMin.getNode()); 6610 6611 // (N s% INT_MIN) ==/!= 0 <--> (N & INT_MAX) ==/!= 0 6612 SDValue Masked = DAG.getNode(ISD::AND, DL, VT, N, IntMax); 6613 Created.push_back(Masked.getNode()); 6614 SDValue MaskedIsZero = DAG.getSetCC(DL, SETCCVT, Masked, Zero, Cond); 6615 Created.push_back(MaskedIsZero.getNode()); 6616 6617 // To produce final result we need to blend 2 vectors: 'SetCC' and 6618 // 'MaskedIsZero'. If the divisor for channel was *NOT* INT_MIN, we pick 6619 // from 'Fold', else pick from 'MaskedIsZero'. Since 'DivisorIsIntMin' is 6620 // constant-folded, select can get lowered to a shuffle with constant mask. 6621 SDValue Blended = DAG.getNode(ISD::VSELECT, DL, SETCCVT, DivisorIsIntMin, 6622 MaskedIsZero, Fold); 6623 6624 return Blended; 6625 } 6626 6627 bool TargetLowering:: 6628 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const { 6629 if (!isa<ConstantSDNode>(Op.getOperand(0))) { 6630 DAG.getContext()->emitError("argument to '__builtin_return_address' must " 6631 "be a constant integer"); 6632 return true; 6633 } 6634 6635 return false; 6636 } 6637 6638 SDValue TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG, 6639 const DenormalMode &Mode) const { 6640 SDLoc DL(Op); 6641 EVT VT = Op.getValueType(); 6642 EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6643 SDValue FPZero = DAG.getConstantFP(0.0, DL, VT); 6644 // Testing it with denormal inputs to avoid wrong estimate. 6645 if (Mode.Input == DenormalMode::IEEE) { 6646 // This is specifically a check for the handling of denormal inputs, 6647 // not the result. 6648 6649 // Test = fabs(X) < SmallestNormal 6650 const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT); 6651 APFloat SmallestNorm = APFloat::getSmallestNormalized(FltSem); 6652 SDValue NormC = DAG.getConstantFP(SmallestNorm, DL, VT); 6653 SDValue Fabs = DAG.getNode(ISD::FABS, DL, VT, Op); 6654 return DAG.getSetCC(DL, CCVT, Fabs, NormC, ISD::SETLT); 6655 } 6656 // Test = X == 0.0 6657 return DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ); 6658 } 6659 6660 SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, 6661 bool LegalOps, bool OptForSize, 6662 NegatibleCost &Cost, 6663 unsigned Depth) const { 6664 // fneg is removable even if it has multiple uses. 6665 if (Op.getOpcode() == ISD::FNEG) { 6666 Cost = NegatibleCost::Cheaper; 6667 return Op.getOperand(0); 6668 } 6669 6670 // Don't recurse exponentially. 6671 if (Depth > SelectionDAG::MaxRecursionDepth) 6672 return SDValue(); 6673 6674 // Pre-increment recursion depth for use in recursive calls. 6675 ++Depth; 6676 const SDNodeFlags Flags = Op->getFlags(); 6677 const TargetOptions &Options = DAG.getTarget().Options; 6678 EVT VT = Op.getValueType(); 6679 unsigned Opcode = Op.getOpcode(); 6680 6681 // Don't allow anything with multiple uses unless we know it is free. 6682 if (!Op.hasOneUse() && Opcode != ISD::ConstantFP) { 6683 bool IsFreeExtend = Opcode == ISD::FP_EXTEND && 6684 isFPExtFree(VT, Op.getOperand(0).getValueType()); 6685 if (!IsFreeExtend) 6686 return SDValue(); 6687 } 6688 6689 auto RemoveDeadNode = [&](SDValue N) { 6690 if (N && N.getNode()->use_empty()) 6691 DAG.RemoveDeadNode(N.getNode()); 6692 }; 6693 6694 SDLoc DL(Op); 6695 6696 // Because getNegatedExpression can delete nodes we need a handle to keep 6697 // temporary nodes alive in case the recursion manages to create an identical 6698 // node. 6699 std::list<HandleSDNode> Handles; 6700 6701 switch (Opcode) { 6702 case ISD::ConstantFP: { 6703 // Don't invert constant FP values after legalization unless the target says 6704 // the negated constant is legal. 6705 bool IsOpLegal = 6706 isOperationLegal(ISD::ConstantFP, VT) || 6707 isFPImmLegal(neg(cast<ConstantFPSDNode>(Op)->getValueAPF()), VT, 6708 OptForSize); 6709 6710 if (LegalOps && !IsOpLegal) 6711 break; 6712 6713 APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF(); 6714 V.changeSign(); 6715 SDValue CFP = DAG.getConstantFP(V, DL, VT); 6716 6717 // If we already have the use of the negated floating constant, it is free 6718 // to negate it even it has multiple uses. 6719 if (!Op.hasOneUse() && CFP.use_empty()) 6720 break; 6721 Cost = NegatibleCost::Neutral; 6722 return CFP; 6723 } 6724 case ISD::BUILD_VECTOR: { 6725 // Only permit BUILD_VECTOR of constants. 6726 if (llvm::any_of(Op->op_values(), [&](SDValue N) { 6727 return !N.isUndef() && !isa<ConstantFPSDNode>(N); 6728 })) 6729 break; 6730 6731 bool IsOpLegal = 6732 (isOperationLegal(ISD::ConstantFP, VT) && 6733 isOperationLegal(ISD::BUILD_VECTOR, VT)) || 6734 llvm::all_of(Op->op_values(), [&](SDValue N) { 6735 return N.isUndef() || 6736 isFPImmLegal(neg(cast<ConstantFPSDNode>(N)->getValueAPF()), VT, 6737 OptForSize); 6738 }); 6739 6740 if (LegalOps && !IsOpLegal) 6741 break; 6742 6743 SmallVector<SDValue, 4> Ops; 6744 for (SDValue C : Op->op_values()) { 6745 if (C.isUndef()) { 6746 Ops.push_back(C); 6747 continue; 6748 } 6749 APFloat V = cast<ConstantFPSDNode>(C)->getValueAPF(); 6750 V.changeSign(); 6751 Ops.push_back(DAG.getConstantFP(V, DL, C.getValueType())); 6752 } 6753 Cost = NegatibleCost::Neutral; 6754 return DAG.getBuildVector(VT, DL, Ops); 6755 } 6756 case ISD::FADD: { 6757 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 6758 break; 6759 6760 // After operation legalization, it might not be legal to create new FSUBs. 6761 if (LegalOps && !isOperationLegalOrCustom(ISD::FSUB, VT)) 6762 break; 6763 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 6764 6765 // fold (fneg (fadd X, Y)) -> (fsub (fneg X), Y) 6766 NegatibleCost CostX = NegatibleCost::Expensive; 6767 SDValue NegX = 6768 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 6769 // Prevent this node from being deleted by the next call. 6770 if (NegX) 6771 Handles.emplace_back(NegX); 6772 6773 // fold (fneg (fadd X, Y)) -> (fsub (fneg Y), X) 6774 NegatibleCost CostY = NegatibleCost::Expensive; 6775 SDValue NegY = 6776 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 6777 6778 // We're done with the handles. 6779 Handles.clear(); 6780 6781 // Negate the X if its cost is less or equal than Y. 6782 if (NegX && (CostX <= CostY)) { 6783 Cost = CostX; 6784 SDValue N = DAG.getNode(ISD::FSUB, DL, VT, NegX, Y, Flags); 6785 if (NegY != N) 6786 RemoveDeadNode(NegY); 6787 return N; 6788 } 6789 6790 // Negate the Y if it is not expensive. 6791 if (NegY) { 6792 Cost = CostY; 6793 SDValue N = DAG.getNode(ISD::FSUB, DL, VT, NegY, X, Flags); 6794 if (NegX != N) 6795 RemoveDeadNode(NegX); 6796 return N; 6797 } 6798 break; 6799 } 6800 case ISD::FSUB: { 6801 // We can't turn -(A-B) into B-A when we honor signed zeros. 6802 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 6803 break; 6804 6805 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 6806 // fold (fneg (fsub 0, Y)) -> Y 6807 if (ConstantFPSDNode *C = isConstOrConstSplatFP(X, /*AllowUndefs*/ true)) 6808 if (C->isZero()) { 6809 Cost = NegatibleCost::Cheaper; 6810 return Y; 6811 } 6812 6813 // fold (fneg (fsub X, Y)) -> (fsub Y, X) 6814 Cost = NegatibleCost::Neutral; 6815 return DAG.getNode(ISD::FSUB, DL, VT, Y, X, Flags); 6816 } 6817 case ISD::FMUL: 6818 case ISD::FDIV: { 6819 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 6820 6821 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) 6822 NegatibleCost CostX = NegatibleCost::Expensive; 6823 SDValue NegX = 6824 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 6825 // Prevent this node from being deleted by the next call. 6826 if (NegX) 6827 Handles.emplace_back(NegX); 6828 6829 // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y)) 6830 NegatibleCost CostY = NegatibleCost::Expensive; 6831 SDValue NegY = 6832 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 6833 6834 // We're done with the handles. 6835 Handles.clear(); 6836 6837 // Negate the X if its cost is less or equal than Y. 6838 if (NegX && (CostX <= CostY)) { 6839 Cost = CostX; 6840 SDValue N = DAG.getNode(Opcode, DL, VT, NegX, Y, Flags); 6841 if (NegY != N) 6842 RemoveDeadNode(NegY); 6843 return N; 6844 } 6845 6846 // Ignore X * 2.0 because that is expected to be canonicalized to X + X. 6847 if (auto *C = isConstOrConstSplatFP(Op.getOperand(1))) 6848 if (C->isExactlyValue(2.0) && Op.getOpcode() == ISD::FMUL) 6849 break; 6850 6851 // Negate the Y if it is not expensive. 6852 if (NegY) { 6853 Cost = CostY; 6854 SDValue N = DAG.getNode(Opcode, DL, VT, X, NegY, Flags); 6855 if (NegX != N) 6856 RemoveDeadNode(NegX); 6857 return N; 6858 } 6859 break; 6860 } 6861 case ISD::FMA: 6862 case ISD::FMAD: { 6863 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 6864 break; 6865 6866 SDValue X = Op.getOperand(0), Y = Op.getOperand(1), Z = Op.getOperand(2); 6867 NegatibleCost CostZ = NegatibleCost::Expensive; 6868 SDValue NegZ = 6869 getNegatedExpression(Z, DAG, LegalOps, OptForSize, CostZ, Depth); 6870 // Give up if fail to negate the Z. 6871 if (!NegZ) 6872 break; 6873 6874 // Prevent this node from being deleted by the next two calls. 6875 Handles.emplace_back(NegZ); 6876 6877 // fold (fneg (fma X, Y, Z)) -> (fma (fneg X), Y, (fneg Z)) 6878 NegatibleCost CostX = NegatibleCost::Expensive; 6879 SDValue NegX = 6880 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 6881 // Prevent this node from being deleted by the next call. 6882 if (NegX) 6883 Handles.emplace_back(NegX); 6884 6885 // fold (fneg (fma X, Y, Z)) -> (fma X, (fneg Y), (fneg Z)) 6886 NegatibleCost CostY = NegatibleCost::Expensive; 6887 SDValue NegY = 6888 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 6889 6890 // We're done with the handles. 6891 Handles.clear(); 6892 6893 // Negate the X if its cost is less or equal than Y. 6894 if (NegX && (CostX <= CostY)) { 6895 Cost = std::min(CostX, CostZ); 6896 SDValue N = DAG.getNode(Opcode, DL, VT, NegX, Y, NegZ, Flags); 6897 if (NegY != N) 6898 RemoveDeadNode(NegY); 6899 return N; 6900 } 6901 6902 // Negate the Y if it is not expensive. 6903 if (NegY) { 6904 Cost = std::min(CostY, CostZ); 6905 SDValue N = DAG.getNode(Opcode, DL, VT, X, NegY, NegZ, Flags); 6906 if (NegX != N) 6907 RemoveDeadNode(NegX); 6908 return N; 6909 } 6910 break; 6911 } 6912 6913 case ISD::FP_EXTEND: 6914 case ISD::FSIN: 6915 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps, 6916 OptForSize, Cost, Depth)) 6917 return DAG.getNode(Opcode, DL, VT, NegV); 6918 break; 6919 case ISD::FP_ROUND: 6920 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps, 6921 OptForSize, Cost, Depth)) 6922 return DAG.getNode(ISD::FP_ROUND, DL, VT, NegV, Op.getOperand(1)); 6923 break; 6924 } 6925 6926 return SDValue(); 6927 } 6928 6929 //===----------------------------------------------------------------------===// 6930 // Legalization Utilities 6931 //===----------------------------------------------------------------------===// 6932 6933 bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, 6934 SDValue LHS, SDValue RHS, 6935 SmallVectorImpl<SDValue> &Result, 6936 EVT HiLoVT, SelectionDAG &DAG, 6937 MulExpansionKind Kind, SDValue LL, 6938 SDValue LH, SDValue RL, SDValue RH) const { 6939 assert(Opcode == ISD::MUL || Opcode == ISD::UMUL_LOHI || 6940 Opcode == ISD::SMUL_LOHI); 6941 6942 bool HasMULHS = (Kind == MulExpansionKind::Always) || 6943 isOperationLegalOrCustom(ISD::MULHS, HiLoVT); 6944 bool HasMULHU = (Kind == MulExpansionKind::Always) || 6945 isOperationLegalOrCustom(ISD::MULHU, HiLoVT); 6946 bool HasSMUL_LOHI = (Kind == MulExpansionKind::Always) || 6947 isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT); 6948 bool HasUMUL_LOHI = (Kind == MulExpansionKind::Always) || 6949 isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT); 6950 6951 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI) 6952 return false; 6953 6954 unsigned OuterBitSize = VT.getScalarSizeInBits(); 6955 unsigned InnerBitSize = HiLoVT.getScalarSizeInBits(); 6956 6957 // LL, LH, RL, and RH must be either all NULL or all set to a value. 6958 assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) || 6959 (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode())); 6960 6961 SDVTList VTs = DAG.getVTList(HiLoVT, HiLoVT); 6962 auto MakeMUL_LOHI = [&](SDValue L, SDValue R, SDValue &Lo, SDValue &Hi, 6963 bool Signed) -> bool { 6964 if ((Signed && HasSMUL_LOHI) || (!Signed && HasUMUL_LOHI)) { 6965 Lo = DAG.getNode(Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI, dl, VTs, L, R); 6966 Hi = SDValue(Lo.getNode(), 1); 6967 return true; 6968 } 6969 if ((Signed && HasMULHS) || (!Signed && HasMULHU)) { 6970 Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, L, R); 6971 Hi = DAG.getNode(Signed ? ISD::MULHS : ISD::MULHU, dl, HiLoVT, L, R); 6972 return true; 6973 } 6974 return false; 6975 }; 6976 6977 SDValue Lo, Hi; 6978 6979 if (!LL.getNode() && !RL.getNode() && 6980 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 6981 LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LHS); 6982 RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RHS); 6983 } 6984 6985 if (!LL.getNode()) 6986 return false; 6987 6988 APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize); 6989 if (DAG.MaskedValueIsZero(LHS, HighMask) && 6990 DAG.MaskedValueIsZero(RHS, HighMask)) { 6991 // The inputs are both zero-extended. 6992 if (MakeMUL_LOHI(LL, RL, Lo, Hi, false)) { 6993 Result.push_back(Lo); 6994 Result.push_back(Hi); 6995 if (Opcode != ISD::MUL) { 6996 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 6997 Result.push_back(Zero); 6998 Result.push_back(Zero); 6999 } 7000 return true; 7001 } 7002 } 7003 7004 if (!VT.isVector() && Opcode == ISD::MUL && 7005 DAG.ComputeNumSignBits(LHS) > InnerBitSize && 7006 DAG.ComputeNumSignBits(RHS) > InnerBitSize) { 7007 // The input values are both sign-extended. 7008 // TODO non-MUL case? 7009 if (MakeMUL_LOHI(LL, RL, Lo, Hi, true)) { 7010 Result.push_back(Lo); 7011 Result.push_back(Hi); 7012 return true; 7013 } 7014 } 7015 7016 unsigned ShiftAmount = OuterBitSize - InnerBitSize; 7017 EVT ShiftAmountTy = getShiftAmountTy(VT, DAG.getDataLayout()); 7018 SDValue Shift = DAG.getConstant(ShiftAmount, dl, ShiftAmountTy); 7019 7020 if (!LH.getNode() && !RH.getNode() && 7021 isOperationLegalOrCustom(ISD::SRL, VT) && 7022 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 7023 LH = DAG.getNode(ISD::SRL, dl, VT, LHS, Shift); 7024 LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH); 7025 RH = DAG.getNode(ISD::SRL, dl, VT, RHS, Shift); 7026 RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH); 7027 } 7028 7029 if (!LH.getNode()) 7030 return false; 7031 7032 if (!MakeMUL_LOHI(LL, RL, Lo, Hi, false)) 7033 return false; 7034 7035 Result.push_back(Lo); 7036 7037 if (Opcode == ISD::MUL) { 7038 RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH); 7039 LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL); 7040 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH); 7041 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH); 7042 Result.push_back(Hi); 7043 return true; 7044 } 7045 7046 // Compute the full width result. 7047 auto Merge = [&](SDValue Lo, SDValue Hi) -> SDValue { 7048 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo); 7049 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 7050 Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift); 7051 return DAG.getNode(ISD::OR, dl, VT, Lo, Hi); 7052 }; 7053 7054 SDValue Next = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 7055 if (!MakeMUL_LOHI(LL, RH, Lo, Hi, false)) 7056 return false; 7057 7058 // This is effectively the add part of a multiply-add of half-sized operands, 7059 // so it cannot overflow. 7060 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 7061 7062 if (!MakeMUL_LOHI(LH, RL, Lo, Hi, false)) 7063 return false; 7064 7065 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 7066 EVT BoolType = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7067 7068 bool UseGlue = (isOperationLegalOrCustom(ISD::ADDC, VT) && 7069 isOperationLegalOrCustom(ISD::ADDE, VT)); 7070 if (UseGlue) 7071 Next = DAG.getNode(ISD::ADDC, dl, DAG.getVTList(VT, MVT::Glue), Next, 7072 Merge(Lo, Hi)); 7073 else 7074 Next = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(VT, BoolType), Next, 7075 Merge(Lo, Hi), DAG.getConstant(0, dl, BoolType)); 7076 7077 SDValue Carry = Next.getValue(1); 7078 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 7079 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 7080 7081 if (!MakeMUL_LOHI(LH, RH, Lo, Hi, Opcode == ISD::SMUL_LOHI)) 7082 return false; 7083 7084 if (UseGlue) 7085 Hi = DAG.getNode(ISD::ADDE, dl, DAG.getVTList(HiLoVT, MVT::Glue), Hi, Zero, 7086 Carry); 7087 else 7088 Hi = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(HiLoVT, BoolType), Hi, 7089 Zero, Carry); 7090 7091 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 7092 7093 if (Opcode == ISD::SMUL_LOHI) { 7094 SDValue NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 7095 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, RL)); 7096 Next = DAG.getSelectCC(dl, LH, Zero, NextSub, Next, ISD::SETLT); 7097 7098 NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 7099 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, LL)); 7100 Next = DAG.getSelectCC(dl, RH, Zero, NextSub, Next, ISD::SETLT); 7101 } 7102 7103 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 7104 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 7105 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 7106 return true; 7107 } 7108 7109 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, 7110 SelectionDAG &DAG, MulExpansionKind Kind, 7111 SDValue LL, SDValue LH, SDValue RL, 7112 SDValue RH) const { 7113 SmallVector<SDValue, 2> Result; 7114 bool Ok = expandMUL_LOHI(N->getOpcode(), N->getValueType(0), SDLoc(N), 7115 N->getOperand(0), N->getOperand(1), Result, HiLoVT, 7116 DAG, Kind, LL, LH, RL, RH); 7117 if (Ok) { 7118 assert(Result.size() == 2); 7119 Lo = Result[0]; 7120 Hi = Result[1]; 7121 } 7122 return Ok; 7123 } 7124 7125 // Check that (every element of) Z is undef or not an exact multiple of BW. 7126 static bool isNonZeroModBitWidthOrUndef(SDValue Z, unsigned BW) { 7127 return ISD::matchUnaryPredicate( 7128 Z, 7129 [=](ConstantSDNode *C) { return !C || C->getAPIntValue().urem(BW) != 0; }, 7130 true); 7131 } 7132 7133 SDValue TargetLowering::expandFunnelShift(SDNode *Node, 7134 SelectionDAG &DAG) const { 7135 EVT VT = Node->getValueType(0); 7136 7137 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SHL, VT) || 7138 !isOperationLegalOrCustom(ISD::SRL, VT) || 7139 !isOperationLegalOrCustom(ISD::SUB, VT) || 7140 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 7141 return SDValue(); 7142 7143 SDValue X = Node->getOperand(0); 7144 SDValue Y = Node->getOperand(1); 7145 SDValue Z = Node->getOperand(2); 7146 7147 unsigned BW = VT.getScalarSizeInBits(); 7148 bool IsFSHL = Node->getOpcode() == ISD::FSHL; 7149 SDLoc DL(SDValue(Node, 0)); 7150 7151 EVT ShVT = Z.getValueType(); 7152 7153 // If a funnel shift in the other direction is more supported, use it. 7154 unsigned RevOpcode = IsFSHL ? ISD::FSHR : ISD::FSHL; 7155 if (!isOperationLegalOrCustom(Node->getOpcode(), VT) && 7156 isOperationLegalOrCustom(RevOpcode, VT) && isPowerOf2_32(BW)) { 7157 if (isNonZeroModBitWidthOrUndef(Z, BW)) { 7158 // fshl X, Y, Z -> fshr X, Y, -Z 7159 // fshr X, Y, Z -> fshl X, Y, -Z 7160 SDValue Zero = DAG.getConstant(0, DL, ShVT); 7161 Z = DAG.getNode(ISD::SUB, DL, VT, Zero, Z); 7162 } else { 7163 // fshl X, Y, Z -> fshr (srl X, 1), (fshr X, Y, 1), ~Z 7164 // fshr X, Y, Z -> fshl (fshl X, Y, 1), (shl Y, 1), ~Z 7165 SDValue One = DAG.getConstant(1, DL, ShVT); 7166 if (IsFSHL) { 7167 Y = DAG.getNode(RevOpcode, DL, VT, X, Y, One); 7168 X = DAG.getNode(ISD::SRL, DL, VT, X, One); 7169 } else { 7170 X = DAG.getNode(RevOpcode, DL, VT, X, Y, One); 7171 Y = DAG.getNode(ISD::SHL, DL, VT, Y, One); 7172 } 7173 Z = DAG.getNOT(DL, Z, ShVT); 7174 } 7175 return DAG.getNode(RevOpcode, DL, VT, X, Y, Z); 7176 } 7177 7178 SDValue ShX, ShY; 7179 SDValue ShAmt, InvShAmt; 7180 if (isNonZeroModBitWidthOrUndef(Z, BW)) { 7181 // fshl: X << C | Y >> (BW - C) 7182 // fshr: X << (BW - C) | Y >> C 7183 // where C = Z % BW is not zero 7184 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); 7185 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC); 7186 InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, ShAmt); 7187 ShX = DAG.getNode(ISD::SHL, DL, VT, X, IsFSHL ? ShAmt : InvShAmt); 7188 ShY = DAG.getNode(ISD::SRL, DL, VT, Y, IsFSHL ? InvShAmt : ShAmt); 7189 } else { 7190 // fshl: X << (Z % BW) | Y >> 1 >> (BW - 1 - (Z % BW)) 7191 // fshr: X << 1 << (BW - 1 - (Z % BW)) | Y >> (Z % BW) 7192 SDValue Mask = DAG.getConstant(BW - 1, DL, ShVT); 7193 if (isPowerOf2_32(BW)) { 7194 // Z % BW -> Z & (BW - 1) 7195 ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Z, Mask); 7196 // (BW - 1) - (Z % BW) -> ~Z & (BW - 1) 7197 InvShAmt = DAG.getNode(ISD::AND, DL, ShVT, DAG.getNOT(DL, Z, ShVT), Mask); 7198 } else { 7199 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); 7200 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC); 7201 InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, Mask, ShAmt); 7202 } 7203 7204 SDValue One = DAG.getConstant(1, DL, ShVT); 7205 if (IsFSHL) { 7206 ShX = DAG.getNode(ISD::SHL, DL, VT, X, ShAmt); 7207 SDValue ShY1 = DAG.getNode(ISD::SRL, DL, VT, Y, One); 7208 ShY = DAG.getNode(ISD::SRL, DL, VT, ShY1, InvShAmt); 7209 } else { 7210 SDValue ShX1 = DAG.getNode(ISD::SHL, DL, VT, X, One); 7211 ShX = DAG.getNode(ISD::SHL, DL, VT, ShX1, InvShAmt); 7212 ShY = DAG.getNode(ISD::SRL, DL, VT, Y, ShAmt); 7213 } 7214 } 7215 return DAG.getNode(ISD::OR, DL, VT, ShX, ShY); 7216 } 7217 7218 // TODO: Merge with expandFunnelShift. 7219 SDValue TargetLowering::expandROT(SDNode *Node, bool AllowVectorOps, 7220 SelectionDAG &DAG) const { 7221 EVT VT = Node->getValueType(0); 7222 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 7223 bool IsLeft = Node->getOpcode() == ISD::ROTL; 7224 SDValue Op0 = Node->getOperand(0); 7225 SDValue Op1 = Node->getOperand(1); 7226 SDLoc DL(SDValue(Node, 0)); 7227 7228 EVT ShVT = Op1.getValueType(); 7229 SDValue Zero = DAG.getConstant(0, DL, ShVT); 7230 7231 // If a rotate in the other direction is more supported, use it. 7232 unsigned RevRot = IsLeft ? ISD::ROTR : ISD::ROTL; 7233 if (!isOperationLegalOrCustom(Node->getOpcode(), VT) && 7234 isOperationLegalOrCustom(RevRot, VT) && isPowerOf2_32(EltSizeInBits)) { 7235 SDValue Sub = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1); 7236 return DAG.getNode(RevRot, DL, VT, Op0, Sub); 7237 } 7238 7239 if (!AllowVectorOps && VT.isVector() && 7240 (!isOperationLegalOrCustom(ISD::SHL, VT) || 7241 !isOperationLegalOrCustom(ISD::SRL, VT) || 7242 !isOperationLegalOrCustom(ISD::SUB, VT) || 7243 !isOperationLegalOrCustomOrPromote(ISD::OR, VT) || 7244 !isOperationLegalOrCustomOrPromote(ISD::AND, VT))) 7245 return SDValue(); 7246 7247 unsigned ShOpc = IsLeft ? ISD::SHL : ISD::SRL; 7248 unsigned HsOpc = IsLeft ? ISD::SRL : ISD::SHL; 7249 SDValue BitWidthMinusOneC = DAG.getConstant(EltSizeInBits - 1, DL, ShVT); 7250 SDValue ShVal; 7251 SDValue HsVal; 7252 if (isPowerOf2_32(EltSizeInBits)) { 7253 // (rotl x, c) -> x << (c & (w - 1)) | x >> (-c & (w - 1)) 7254 // (rotr x, c) -> x >> (c & (w - 1)) | x << (-c & (w - 1)) 7255 SDValue NegOp1 = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1); 7256 SDValue ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Op1, BitWidthMinusOneC); 7257 ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt); 7258 SDValue HsAmt = DAG.getNode(ISD::AND, DL, ShVT, NegOp1, BitWidthMinusOneC); 7259 HsVal = DAG.getNode(HsOpc, DL, VT, Op0, HsAmt); 7260 } else { 7261 // (rotl x, c) -> x << (c % w) | x >> 1 >> (w - 1 - (c % w)) 7262 // (rotr x, c) -> x >> (c % w) | x << 1 << (w - 1 - (c % w)) 7263 SDValue BitWidthC = DAG.getConstant(EltSizeInBits, DL, ShVT); 7264 SDValue ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Op1, BitWidthC); 7265 ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt); 7266 SDValue HsAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthMinusOneC, ShAmt); 7267 SDValue One = DAG.getConstant(1, DL, ShVT); 7268 HsVal = 7269 DAG.getNode(HsOpc, DL, VT, DAG.getNode(HsOpc, DL, VT, Op0, One), HsAmt); 7270 } 7271 return DAG.getNode(ISD::OR, DL, VT, ShVal, HsVal); 7272 } 7273 7274 void TargetLowering::expandShiftParts(SDNode *Node, SDValue &Lo, SDValue &Hi, 7275 SelectionDAG &DAG) const { 7276 assert(Node->getNumOperands() == 3 && "Not a double-shift!"); 7277 EVT VT = Node->getValueType(0); 7278 unsigned VTBits = VT.getScalarSizeInBits(); 7279 assert(isPowerOf2_32(VTBits) && "Power-of-two integer type expected"); 7280 7281 bool IsSHL = Node->getOpcode() == ISD::SHL_PARTS; 7282 bool IsSRA = Node->getOpcode() == ISD::SRA_PARTS; 7283 SDValue ShOpLo = Node->getOperand(0); 7284 SDValue ShOpHi = Node->getOperand(1); 7285 SDValue ShAmt = Node->getOperand(2); 7286 EVT ShAmtVT = ShAmt.getValueType(); 7287 EVT ShAmtCCVT = 7288 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ShAmtVT); 7289 SDLoc dl(Node); 7290 7291 // ISD::FSHL and ISD::FSHR have defined overflow behavior but ISD::SHL and 7292 // ISD::SRA/L nodes haven't. Insert an AND to be safe, it's usually optimized 7293 // away during isel. 7294 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, ShAmtVT, ShAmt, 7295 DAG.getConstant(VTBits - 1, dl, ShAmtVT)); 7296 SDValue Tmp1 = IsSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, 7297 DAG.getConstant(VTBits - 1, dl, ShAmtVT)) 7298 : DAG.getConstant(0, dl, VT); 7299 7300 SDValue Tmp2, Tmp3; 7301 if (IsSHL) { 7302 Tmp2 = DAG.getNode(ISD::FSHL, dl, VT, ShOpHi, ShOpLo, ShAmt); 7303 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt); 7304 } else { 7305 Tmp2 = DAG.getNode(ISD::FSHR, dl, VT, ShOpHi, ShOpLo, ShAmt); 7306 Tmp3 = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt); 7307 } 7308 7309 // If the shift amount is larger or equal than the width of a part we don't 7310 // use the result from the FSHL/FSHR. Insert a test and select the appropriate 7311 // values for large shift amounts. 7312 SDValue AndNode = DAG.getNode(ISD::AND, dl, ShAmtVT, ShAmt, 7313 DAG.getConstant(VTBits, dl, ShAmtVT)); 7314 SDValue Cond = DAG.getSetCC(dl, ShAmtCCVT, AndNode, 7315 DAG.getConstant(0, dl, ShAmtVT), ISD::SETNE); 7316 7317 if (IsSHL) { 7318 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2); 7319 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3); 7320 } else { 7321 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2); 7322 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3); 7323 } 7324 } 7325 7326 bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result, 7327 SelectionDAG &DAG) const { 7328 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 7329 SDValue Src = Node->getOperand(OpNo); 7330 EVT SrcVT = Src.getValueType(); 7331 EVT DstVT = Node->getValueType(0); 7332 SDLoc dl(SDValue(Node, 0)); 7333 7334 // FIXME: Only f32 to i64 conversions are supported. 7335 if (SrcVT != MVT::f32 || DstVT != MVT::i64) 7336 return false; 7337 7338 if (Node->isStrictFPOpcode()) 7339 // When a NaN is converted to an integer a trap is allowed. We can't 7340 // use this expansion here because it would eliminate that trap. Other 7341 // traps are also allowed and cannot be eliminated. See 7342 // IEEE 754-2008 sec 5.8. 7343 return false; 7344 7345 // Expand f32 -> i64 conversion 7346 // This algorithm comes from compiler-rt's implementation of fixsfdi: 7347 // https://github.com/llvm/llvm-project/blob/main/compiler-rt/lib/builtins/fixsfdi.c 7348 unsigned SrcEltBits = SrcVT.getScalarSizeInBits(); 7349 EVT IntVT = SrcVT.changeTypeToInteger(); 7350 EVT IntShVT = getShiftAmountTy(IntVT, DAG.getDataLayout()); 7351 7352 SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT); 7353 SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT); 7354 SDValue Bias = DAG.getConstant(127, dl, IntVT); 7355 SDValue SignMask = DAG.getConstant(APInt::getSignMask(SrcEltBits), dl, IntVT); 7356 SDValue SignLowBit = DAG.getConstant(SrcEltBits - 1, dl, IntVT); 7357 SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT); 7358 7359 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Src); 7360 7361 SDValue ExponentBits = DAG.getNode( 7362 ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask), 7363 DAG.getZExtOrTrunc(ExponentLoBit, dl, IntShVT)); 7364 SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias); 7365 7366 SDValue Sign = DAG.getNode(ISD::SRA, dl, IntVT, 7367 DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask), 7368 DAG.getZExtOrTrunc(SignLowBit, dl, IntShVT)); 7369 Sign = DAG.getSExtOrTrunc(Sign, dl, DstVT); 7370 7371 SDValue R = DAG.getNode(ISD::OR, dl, IntVT, 7372 DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask), 7373 DAG.getConstant(0x00800000, dl, IntVT)); 7374 7375 R = DAG.getZExtOrTrunc(R, dl, DstVT); 7376 7377 R = DAG.getSelectCC( 7378 dl, Exponent, ExponentLoBit, 7379 DAG.getNode(ISD::SHL, dl, DstVT, R, 7380 DAG.getZExtOrTrunc( 7381 DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit), 7382 dl, IntShVT)), 7383 DAG.getNode(ISD::SRL, dl, DstVT, R, 7384 DAG.getZExtOrTrunc( 7385 DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent), 7386 dl, IntShVT)), 7387 ISD::SETGT); 7388 7389 SDValue Ret = DAG.getNode(ISD::SUB, dl, DstVT, 7390 DAG.getNode(ISD::XOR, dl, DstVT, R, Sign), Sign); 7391 7392 Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT), 7393 DAG.getConstant(0, dl, DstVT), Ret, ISD::SETLT); 7394 return true; 7395 } 7396 7397 bool TargetLowering::expandFP_TO_UINT(SDNode *Node, SDValue &Result, 7398 SDValue &Chain, 7399 SelectionDAG &DAG) const { 7400 SDLoc dl(SDValue(Node, 0)); 7401 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 7402 SDValue Src = Node->getOperand(OpNo); 7403 7404 EVT SrcVT = Src.getValueType(); 7405 EVT DstVT = Node->getValueType(0); 7406 EVT SetCCVT = 7407 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT); 7408 EVT DstSetCCVT = 7409 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT); 7410 7411 // Only expand vector types if we have the appropriate vector bit operations. 7412 unsigned SIntOpcode = Node->isStrictFPOpcode() ? ISD::STRICT_FP_TO_SINT : 7413 ISD::FP_TO_SINT; 7414 if (DstVT.isVector() && (!isOperationLegalOrCustom(SIntOpcode, DstVT) || 7415 !isOperationLegalOrCustomOrPromote(ISD::XOR, SrcVT))) 7416 return false; 7417 7418 // If the maximum float value is smaller then the signed integer range, 7419 // the destination signmask can't be represented by the float, so we can 7420 // just use FP_TO_SINT directly. 7421 const fltSemantics &APFSem = DAG.EVTToAPFloatSemantics(SrcVT); 7422 APFloat APF(APFSem, APInt::getZero(SrcVT.getScalarSizeInBits())); 7423 APInt SignMask = APInt::getSignMask(DstVT.getScalarSizeInBits()); 7424 if (APFloat::opOverflow & 7425 APF.convertFromAPInt(SignMask, false, APFloat::rmNearestTiesToEven)) { 7426 if (Node->isStrictFPOpcode()) { 7427 Result = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 7428 { Node->getOperand(0), Src }); 7429 Chain = Result.getValue(1); 7430 } else 7431 Result = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 7432 return true; 7433 } 7434 7435 // Don't expand it if there isn't cheap fsub instruction. 7436 if (!isOperationLegalOrCustom( 7437 Node->isStrictFPOpcode() ? ISD::STRICT_FSUB : ISD::FSUB, SrcVT)) 7438 return false; 7439 7440 SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT); 7441 SDValue Sel; 7442 7443 if (Node->isStrictFPOpcode()) { 7444 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT, 7445 Node->getOperand(0), /*IsSignaling*/ true); 7446 Chain = Sel.getValue(1); 7447 } else { 7448 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT); 7449 } 7450 7451 bool Strict = Node->isStrictFPOpcode() || 7452 shouldUseStrictFP_TO_INT(SrcVT, DstVT, /*IsSigned*/ false); 7453 7454 if (Strict) { 7455 // Expand based on maximum range of FP_TO_SINT, if the value exceeds the 7456 // signmask then offset (the result of which should be fully representable). 7457 // Sel = Src < 0x8000000000000000 7458 // FltOfs = select Sel, 0, 0x8000000000000000 7459 // IntOfs = select Sel, 0, 0x8000000000000000 7460 // Result = fp_to_sint(Src - FltOfs) ^ IntOfs 7461 7462 // TODO: Should any fast-math-flags be set for the FSUB? 7463 SDValue FltOfs = DAG.getSelect(dl, SrcVT, Sel, 7464 DAG.getConstantFP(0.0, dl, SrcVT), Cst); 7465 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 7466 SDValue IntOfs = DAG.getSelect(dl, DstVT, Sel, 7467 DAG.getConstant(0, dl, DstVT), 7468 DAG.getConstant(SignMask, dl, DstVT)); 7469 SDValue SInt; 7470 if (Node->isStrictFPOpcode()) { 7471 SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl, { SrcVT, MVT::Other }, 7472 { Chain, Src, FltOfs }); 7473 SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 7474 { Val.getValue(1), Val }); 7475 Chain = SInt.getValue(1); 7476 } else { 7477 SDValue Val = DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FltOfs); 7478 SInt = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Val); 7479 } 7480 Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs); 7481 } else { 7482 // Expand based on maximum range of FP_TO_SINT: 7483 // True = fp_to_sint(Src) 7484 // False = 0x8000000000000000 + fp_to_sint(Src - 0x8000000000000000) 7485 // Result = select (Src < 0x8000000000000000), True, False 7486 7487 SDValue True = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 7488 // TODO: Should any fast-math-flags be set for the FSUB? 7489 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, 7490 DAG.getNode(ISD::FSUB, dl, SrcVT, Src, Cst)); 7491 False = DAG.getNode(ISD::XOR, dl, DstVT, False, 7492 DAG.getConstant(SignMask, dl, DstVT)); 7493 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 7494 Result = DAG.getSelect(dl, DstVT, Sel, True, False); 7495 } 7496 return true; 7497 } 7498 7499 bool TargetLowering::expandUINT_TO_FP(SDNode *Node, SDValue &Result, 7500 SDValue &Chain, 7501 SelectionDAG &DAG) const { 7502 // This transform is not correct for converting 0 when rounding mode is set 7503 // to round toward negative infinity which will produce -0.0. So disable under 7504 // strictfp. 7505 if (Node->isStrictFPOpcode()) 7506 return false; 7507 7508 SDValue Src = Node->getOperand(0); 7509 EVT SrcVT = Src.getValueType(); 7510 EVT DstVT = Node->getValueType(0); 7511 7512 if (SrcVT.getScalarType() != MVT::i64 || DstVT.getScalarType() != MVT::f64) 7513 return false; 7514 7515 // Only expand vector types if we have the appropriate vector bit operations. 7516 if (SrcVT.isVector() && (!isOperationLegalOrCustom(ISD::SRL, SrcVT) || 7517 !isOperationLegalOrCustom(ISD::FADD, DstVT) || 7518 !isOperationLegalOrCustom(ISD::FSUB, DstVT) || 7519 !isOperationLegalOrCustomOrPromote(ISD::OR, SrcVT) || 7520 !isOperationLegalOrCustomOrPromote(ISD::AND, SrcVT))) 7521 return false; 7522 7523 SDLoc dl(SDValue(Node, 0)); 7524 EVT ShiftVT = getShiftAmountTy(SrcVT, DAG.getDataLayout()); 7525 7526 // Implementation of unsigned i64 to f64 following the algorithm in 7527 // __floatundidf in compiler_rt. This implementation performs rounding 7528 // correctly in all rounding modes with the exception of converting 0 7529 // when rounding toward negative infinity. In that case the fsub will produce 7530 // -0.0. This will be added to +0.0 and produce -0.0 which is incorrect. 7531 SDValue TwoP52 = DAG.getConstant(UINT64_C(0x4330000000000000), dl, SrcVT); 7532 SDValue TwoP84PlusTwoP52 = DAG.getConstantFP( 7533 BitsToDouble(UINT64_C(0x4530000000100000)), dl, DstVT); 7534 SDValue TwoP84 = DAG.getConstant(UINT64_C(0x4530000000000000), dl, SrcVT); 7535 SDValue LoMask = DAG.getConstant(UINT64_C(0x00000000FFFFFFFF), dl, SrcVT); 7536 SDValue HiShift = DAG.getConstant(32, dl, ShiftVT); 7537 7538 SDValue Lo = DAG.getNode(ISD::AND, dl, SrcVT, Src, LoMask); 7539 SDValue Hi = DAG.getNode(ISD::SRL, dl, SrcVT, Src, HiShift); 7540 SDValue LoOr = DAG.getNode(ISD::OR, dl, SrcVT, Lo, TwoP52); 7541 SDValue HiOr = DAG.getNode(ISD::OR, dl, SrcVT, Hi, TwoP84); 7542 SDValue LoFlt = DAG.getBitcast(DstVT, LoOr); 7543 SDValue HiFlt = DAG.getBitcast(DstVT, HiOr); 7544 SDValue HiSub = 7545 DAG.getNode(ISD::FSUB, dl, DstVT, HiFlt, TwoP84PlusTwoP52); 7546 Result = DAG.getNode(ISD::FADD, dl, DstVT, LoFlt, HiSub); 7547 return true; 7548 } 7549 7550 SDValue 7551 TargetLowering::createSelectForFMINNUM_FMAXNUM(SDNode *Node, 7552 SelectionDAG &DAG) const { 7553 unsigned Opcode = Node->getOpcode(); 7554 assert((Opcode == ISD::FMINNUM || Opcode == ISD::FMAXNUM || 7555 Opcode == ISD::STRICT_FMINNUM || Opcode == ISD::STRICT_FMAXNUM) && 7556 "Wrong opcode"); 7557 7558 if (Node->getFlags().hasNoNaNs()) { 7559 ISD::CondCode Pred = Opcode == ISD::FMINNUM ? ISD::SETLT : ISD::SETGT; 7560 SDValue Op1 = Node->getOperand(0); 7561 SDValue Op2 = Node->getOperand(1); 7562 SDValue SelCC = DAG.getSelectCC(SDLoc(Node), Op1, Op2, Op1, Op2, Pred); 7563 // Copy FMF flags, but always set the no-signed-zeros flag 7564 // as this is implied by the FMINNUM/FMAXNUM semantics. 7565 SDNodeFlags Flags = Node->getFlags(); 7566 Flags.setNoSignedZeros(true); 7567 SelCC->setFlags(Flags); 7568 return SelCC; 7569 } 7570 7571 return SDValue(); 7572 } 7573 7574 SDValue TargetLowering::expandFMINNUM_FMAXNUM(SDNode *Node, 7575 SelectionDAG &DAG) const { 7576 SDLoc dl(Node); 7577 unsigned NewOp = Node->getOpcode() == ISD::FMINNUM ? 7578 ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE; 7579 EVT VT = Node->getValueType(0); 7580 7581 if (VT.isScalableVector()) 7582 report_fatal_error( 7583 "Expanding fminnum/fmaxnum for scalable vectors is undefined."); 7584 7585 if (isOperationLegalOrCustom(NewOp, VT)) { 7586 SDValue Quiet0 = Node->getOperand(0); 7587 SDValue Quiet1 = Node->getOperand(1); 7588 7589 if (!Node->getFlags().hasNoNaNs()) { 7590 // Insert canonicalizes if it's possible we need to quiet to get correct 7591 // sNaN behavior. 7592 if (!DAG.isKnownNeverSNaN(Quiet0)) { 7593 Quiet0 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet0, 7594 Node->getFlags()); 7595 } 7596 if (!DAG.isKnownNeverSNaN(Quiet1)) { 7597 Quiet1 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet1, 7598 Node->getFlags()); 7599 } 7600 } 7601 7602 return DAG.getNode(NewOp, dl, VT, Quiet0, Quiet1, Node->getFlags()); 7603 } 7604 7605 // If the target has FMINIMUM/FMAXIMUM but not FMINNUM/FMAXNUM use that 7606 // instead if there are no NaNs. 7607 if (Node->getFlags().hasNoNaNs()) { 7608 unsigned IEEE2018Op = 7609 Node->getOpcode() == ISD::FMINNUM ? ISD::FMINIMUM : ISD::FMAXIMUM; 7610 if (isOperationLegalOrCustom(IEEE2018Op, VT)) { 7611 return DAG.getNode(IEEE2018Op, dl, VT, Node->getOperand(0), 7612 Node->getOperand(1), Node->getFlags()); 7613 } 7614 } 7615 7616 if (SDValue SelCC = createSelectForFMINNUM_FMAXNUM(Node, DAG)) 7617 return SelCC; 7618 7619 return SDValue(); 7620 } 7621 7622 SDValue TargetLowering::expandIS_FPCLASS(EVT ResultVT, SDValue Op, 7623 unsigned Test, SDNodeFlags Flags, 7624 const SDLoc &DL, 7625 SelectionDAG &DAG) const { 7626 EVT OperandVT = Op.getValueType(); 7627 assert(OperandVT.isFloatingPoint()); 7628 7629 // Degenerated cases. 7630 if (Test == 0) 7631 return DAG.getBoolConstant(false, DL, ResultVT, OperandVT); 7632 if ((Test & fcAllFlags) == fcAllFlags) 7633 return DAG.getBoolConstant(true, DL, ResultVT, OperandVT); 7634 7635 // PPC double double is a pair of doubles, of which the higher part determines 7636 // the value class. 7637 if (OperandVT == MVT::ppcf128) { 7638 Op = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::f64, Op, 7639 DAG.getConstant(1, DL, MVT::i32)); 7640 OperandVT = MVT::f64; 7641 } 7642 7643 // Some checks may be represented as inversion of simpler check, for example 7644 // "inf|normal|subnormal|zero" => !"nan". 7645 bool IsInverted = false; 7646 if (unsigned InvertedCheck = getInvertedFPClassTest(Test)) { 7647 IsInverted = true; 7648 Test = InvertedCheck; 7649 } 7650 7651 // Floating-point type properties. 7652 EVT ScalarFloatVT = OperandVT.getScalarType(); 7653 const Type *FloatTy = ScalarFloatVT.getTypeForEVT(*DAG.getContext()); 7654 const llvm::fltSemantics &Semantics = FloatTy->getFltSemantics(); 7655 bool IsF80 = (ScalarFloatVT == MVT::f80); 7656 7657 // Some checks can be implemented using float comparisons, if floating point 7658 // exceptions are ignored. 7659 if (Flags.hasNoFPExcept() && 7660 isOperationLegalOrCustom(ISD::SETCC, OperandVT.getScalarType())) { 7661 if (Test == fcZero) 7662 return DAG.getSetCC(DL, ResultVT, Op, 7663 DAG.getConstantFP(0.0, DL, OperandVT), 7664 IsInverted ? ISD::SETUNE : ISD::SETOEQ); 7665 if (Test == fcNan) 7666 return DAG.getSetCC(DL, ResultVT, Op, Op, 7667 IsInverted ? ISD::SETO : ISD::SETUO); 7668 } 7669 7670 // In the general case use integer operations. 7671 unsigned BitSize = OperandVT.getScalarSizeInBits(); 7672 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), BitSize); 7673 if (OperandVT.isVector()) 7674 IntVT = EVT::getVectorVT(*DAG.getContext(), IntVT, 7675 OperandVT.getVectorElementCount()); 7676 SDValue OpAsInt = DAG.getBitcast(IntVT, Op); 7677 7678 // Various masks. 7679 APInt SignBit = APInt::getSignMask(BitSize); 7680 APInt ValueMask = APInt::getSignedMaxValue(BitSize); // All bits but sign. 7681 APInt Inf = APFloat::getInf(Semantics).bitcastToAPInt(); // Exp and int bit. 7682 const unsigned ExplicitIntBitInF80 = 63; 7683 APInt ExpMask = Inf; 7684 if (IsF80) 7685 ExpMask.clearBit(ExplicitIntBitInF80); 7686 APInt AllOneMantissa = APFloat::getLargest(Semantics).bitcastToAPInt() & ~Inf; 7687 APInt QNaNBitMask = 7688 APInt::getOneBitSet(BitSize, AllOneMantissa.getActiveBits() - 1); 7689 APInt InvertionMask = APInt::getAllOnesValue(ResultVT.getScalarSizeInBits()); 7690 7691 SDValue ValueMaskV = DAG.getConstant(ValueMask, DL, IntVT); 7692 SDValue SignBitV = DAG.getConstant(SignBit, DL, IntVT); 7693 SDValue ExpMaskV = DAG.getConstant(ExpMask, DL, IntVT); 7694 SDValue ZeroV = DAG.getConstant(0, DL, IntVT); 7695 SDValue InfV = DAG.getConstant(Inf, DL, IntVT); 7696 SDValue ResultInvertionMask = DAG.getConstant(InvertionMask, DL, ResultVT); 7697 7698 SDValue Res; 7699 const auto appendResult = [&](SDValue PartialRes) { 7700 if (PartialRes) { 7701 if (Res) 7702 Res = DAG.getNode(ISD::OR, DL, ResultVT, Res, PartialRes); 7703 else 7704 Res = PartialRes; 7705 } 7706 }; 7707 7708 SDValue IntBitIsSetV; // Explicit integer bit in f80 mantissa is set. 7709 const auto getIntBitIsSet = [&]() -> SDValue { 7710 if (!IntBitIsSetV) { 7711 APInt IntBitMask(BitSize, 0); 7712 IntBitMask.setBit(ExplicitIntBitInF80); 7713 SDValue IntBitMaskV = DAG.getConstant(IntBitMask, DL, IntVT); 7714 SDValue IntBitV = DAG.getNode(ISD::AND, DL, IntVT, OpAsInt, IntBitMaskV); 7715 IntBitIsSetV = DAG.getSetCC(DL, ResultVT, IntBitV, ZeroV, ISD::SETNE); 7716 } 7717 return IntBitIsSetV; 7718 }; 7719 7720 // Split the value into sign bit and absolute value. 7721 SDValue AbsV = DAG.getNode(ISD::AND, DL, IntVT, OpAsInt, ValueMaskV); 7722 SDValue SignV = DAG.getSetCC(DL, ResultVT, OpAsInt, 7723 DAG.getConstant(0.0, DL, IntVT), ISD::SETLT); 7724 7725 // Tests that involve more than one class should be processed first. 7726 SDValue PartialRes; 7727 7728 if (IsF80) 7729 ; // Detect finite numbers of f80 by checking individual classes because 7730 // they have different settings of the explicit integer bit. 7731 else if ((Test & fcFinite) == fcFinite) { 7732 // finite(V) ==> abs(V) < exp_mask 7733 PartialRes = DAG.getSetCC(DL, ResultVT, AbsV, ExpMaskV, ISD::SETLT); 7734 Test &= ~fcFinite; 7735 } else if ((Test & fcFinite) == fcPosFinite) { 7736 // finite(V) && V > 0 ==> V < exp_mask 7737 PartialRes = DAG.getSetCC(DL, ResultVT, OpAsInt, ExpMaskV, ISD::SETULT); 7738 Test &= ~fcPosFinite; 7739 } else if ((Test & fcFinite) == fcNegFinite) { 7740 // finite(V) && V < 0 ==> abs(V) < exp_mask && signbit == 1 7741 PartialRes = DAG.getSetCC(DL, ResultVT, AbsV, ExpMaskV, ISD::SETLT); 7742 PartialRes = DAG.getNode(ISD::AND, DL, ResultVT, PartialRes, SignV); 7743 Test &= ~fcNegFinite; 7744 } 7745 appendResult(PartialRes); 7746 7747 // Check for individual classes. 7748 7749 if (unsigned PartialCheck = Test & fcZero) { 7750 if (PartialCheck == fcPosZero) 7751 PartialRes = DAG.getSetCC(DL, ResultVT, OpAsInt, ZeroV, ISD::SETEQ); 7752 else if (PartialCheck == fcZero) 7753 PartialRes = DAG.getSetCC(DL, ResultVT, AbsV, ZeroV, ISD::SETEQ); 7754 else // ISD::fcNegZero 7755 PartialRes = DAG.getSetCC(DL, ResultVT, OpAsInt, SignBitV, ISD::SETEQ); 7756 appendResult(PartialRes); 7757 } 7758 7759 if (unsigned PartialCheck = Test & fcInf) { 7760 if (PartialCheck == fcPosInf) 7761 PartialRes = DAG.getSetCC(DL, ResultVT, OpAsInt, InfV, ISD::SETEQ); 7762 else if (PartialCheck == fcInf) 7763 PartialRes = DAG.getSetCC(DL, ResultVT, AbsV, InfV, ISD::SETEQ); 7764 else { // ISD::fcNegInf 7765 APInt NegInf = APFloat::getInf(Semantics, true).bitcastToAPInt(); 7766 SDValue NegInfV = DAG.getConstant(NegInf, DL, IntVT); 7767 PartialRes = DAG.getSetCC(DL, ResultVT, OpAsInt, NegInfV, ISD::SETEQ); 7768 } 7769 appendResult(PartialRes); 7770 } 7771 7772 if (unsigned PartialCheck = Test & fcNan) { 7773 APInt InfWithQnanBit = Inf | QNaNBitMask; 7774 SDValue InfWithQnanBitV = DAG.getConstant(InfWithQnanBit, DL, IntVT); 7775 if (PartialCheck == fcNan) { 7776 // isnan(V) ==> abs(V) > int(inf) 7777 PartialRes = DAG.getSetCC(DL, ResultVT, AbsV, InfV, ISD::SETGT); 7778 if (IsF80) { 7779 // Recognize unsupported values as NaNs for compatibility with glibc. 7780 // In them (exp(V)==0) == int_bit. 7781 SDValue ExpBits = DAG.getNode(ISD::AND, DL, IntVT, AbsV, ExpMaskV); 7782 SDValue ExpIsZero = 7783 DAG.getSetCC(DL, ResultVT, ExpBits, ZeroV, ISD::SETEQ); 7784 SDValue IsPseudo = 7785 DAG.getSetCC(DL, ResultVT, getIntBitIsSet(), ExpIsZero, ISD::SETEQ); 7786 PartialRes = DAG.getNode(ISD::OR, DL, ResultVT, PartialRes, IsPseudo); 7787 } 7788 } else if (PartialCheck == fcQNan) { 7789 // isquiet(V) ==> abs(V) >= (unsigned(Inf) | quiet_bit) 7790 PartialRes = 7791 DAG.getSetCC(DL, ResultVT, AbsV, InfWithQnanBitV, ISD::SETGE); 7792 } else { // ISD::fcSNan 7793 // issignaling(V) ==> abs(V) > unsigned(Inf) && 7794 // abs(V) < (unsigned(Inf) | quiet_bit) 7795 SDValue IsNan = DAG.getSetCC(DL, ResultVT, AbsV, InfV, ISD::SETGT); 7796 SDValue IsNotQnan = 7797 DAG.getSetCC(DL, ResultVT, AbsV, InfWithQnanBitV, ISD::SETLT); 7798 PartialRes = DAG.getNode(ISD::AND, DL, ResultVT, IsNan, IsNotQnan); 7799 } 7800 appendResult(PartialRes); 7801 } 7802 7803 if (unsigned PartialCheck = Test & fcSubnormal) { 7804 // issubnormal(V) ==> unsigned(abs(V) - 1) < (all mantissa bits set) 7805 // issubnormal(V) && V>0 ==> unsigned(V - 1) < (all mantissa bits set) 7806 SDValue V = (PartialCheck == fcPosSubnormal) ? OpAsInt : AbsV; 7807 SDValue MantissaV = DAG.getConstant(AllOneMantissa, DL, IntVT); 7808 SDValue VMinusOneV = 7809 DAG.getNode(ISD::SUB, DL, IntVT, V, DAG.getConstant(1, DL, IntVT)); 7810 PartialRes = DAG.getSetCC(DL, ResultVT, VMinusOneV, MantissaV, ISD::SETULT); 7811 if (PartialCheck == fcNegSubnormal) 7812 PartialRes = DAG.getNode(ISD::AND, DL, ResultVT, PartialRes, SignV); 7813 appendResult(PartialRes); 7814 } 7815 7816 if (unsigned PartialCheck = Test & fcNormal) { 7817 // isnormal(V) ==> (0 < exp < max_exp) ==> (unsigned(exp-1) < (max_exp-1)) 7818 APInt ExpLSB = ExpMask & ~(ExpMask.shl(1)); 7819 SDValue ExpLSBV = DAG.getConstant(ExpLSB, DL, IntVT); 7820 SDValue ExpMinus1 = DAG.getNode(ISD::SUB, DL, IntVT, AbsV, ExpLSBV); 7821 APInt ExpLimit = ExpMask - ExpLSB; 7822 SDValue ExpLimitV = DAG.getConstant(ExpLimit, DL, IntVT); 7823 PartialRes = DAG.getSetCC(DL, ResultVT, ExpMinus1, ExpLimitV, ISD::SETULT); 7824 if (PartialCheck == fcNegNormal) 7825 PartialRes = DAG.getNode(ISD::AND, DL, ResultVT, PartialRes, SignV); 7826 else if (PartialCheck == fcPosNormal) { 7827 SDValue PosSignV = 7828 DAG.getNode(ISD::XOR, DL, ResultVT, SignV, ResultInvertionMask); 7829 PartialRes = DAG.getNode(ISD::AND, DL, ResultVT, PartialRes, PosSignV); 7830 } 7831 if (IsF80) 7832 PartialRes = 7833 DAG.getNode(ISD::AND, DL, ResultVT, PartialRes, getIntBitIsSet()); 7834 appendResult(PartialRes); 7835 } 7836 7837 if (!Res) 7838 return DAG.getConstant(IsInverted, DL, ResultVT); 7839 if (IsInverted) 7840 Res = DAG.getNode(ISD::XOR, DL, ResultVT, Res, ResultInvertionMask); 7841 return Res; 7842 } 7843 7844 // Only expand vector types if we have the appropriate vector bit operations. 7845 static bool canExpandVectorCTPOP(const TargetLowering &TLI, EVT VT) { 7846 assert(VT.isVector() && "Expected vector type"); 7847 unsigned Len = VT.getScalarSizeInBits(); 7848 return TLI.isOperationLegalOrCustom(ISD::ADD, VT) && 7849 TLI.isOperationLegalOrCustom(ISD::SUB, VT) && 7850 TLI.isOperationLegalOrCustom(ISD::SRL, VT) && 7851 (Len == 8 || TLI.isOperationLegalOrCustom(ISD::MUL, VT)) && 7852 TLI.isOperationLegalOrCustomOrPromote(ISD::AND, VT); 7853 } 7854 7855 SDValue TargetLowering::expandCTPOP(SDNode *Node, SelectionDAG &DAG) const { 7856 SDLoc dl(Node); 7857 EVT VT = Node->getValueType(0); 7858 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 7859 SDValue Op = Node->getOperand(0); 7860 unsigned Len = VT.getScalarSizeInBits(); 7861 assert(VT.isInteger() && "CTPOP not implemented for this type."); 7862 7863 // TODO: Add support for irregular type lengths. 7864 if (!(Len <= 128 && Len % 8 == 0)) 7865 return SDValue(); 7866 7867 // Only expand vector types if we have the appropriate vector bit operations. 7868 if (VT.isVector() && !canExpandVectorCTPOP(*this, VT)) 7869 return SDValue(); 7870 7871 // This is the "best" algorithm from 7872 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 7873 SDValue Mask55 = 7874 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), dl, VT); 7875 SDValue Mask33 = 7876 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), dl, VT); 7877 SDValue Mask0F = 7878 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), dl, VT); 7879 7880 // v = v - ((v >> 1) & 0x55555555...) 7881 Op = DAG.getNode(ISD::SUB, dl, VT, Op, 7882 DAG.getNode(ISD::AND, dl, VT, 7883 DAG.getNode(ISD::SRL, dl, VT, Op, 7884 DAG.getConstant(1, dl, ShVT)), 7885 Mask55)); 7886 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) 7887 Op = DAG.getNode(ISD::ADD, dl, VT, DAG.getNode(ISD::AND, dl, VT, Op, Mask33), 7888 DAG.getNode(ISD::AND, dl, VT, 7889 DAG.getNode(ISD::SRL, dl, VT, Op, 7890 DAG.getConstant(2, dl, ShVT)), 7891 Mask33)); 7892 // v = (v + (v >> 4)) & 0x0F0F0F0F... 7893 Op = DAG.getNode(ISD::AND, dl, VT, 7894 DAG.getNode(ISD::ADD, dl, VT, Op, 7895 DAG.getNode(ISD::SRL, dl, VT, Op, 7896 DAG.getConstant(4, dl, ShVT))), 7897 Mask0F); 7898 7899 if (Len <= 8) 7900 return Op; 7901 7902 // Avoid the multiply if we only have 2 bytes to add. 7903 // TODO: Only doing this for scalars because vectors weren't as obviously 7904 // improved. 7905 if (Len == 16 && !VT.isVector()) { 7906 // v = (v + (v >> 8)) & 0x00FF; 7907 return DAG.getNode(ISD::AND, dl, VT, 7908 DAG.getNode(ISD::ADD, dl, VT, Op, 7909 DAG.getNode(ISD::SRL, dl, VT, Op, 7910 DAG.getConstant(8, dl, ShVT))), 7911 DAG.getConstant(0xFF, dl, VT)); 7912 } 7913 7914 // v = (v * 0x01010101...) >> (Len - 8) 7915 SDValue Mask01 = 7916 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), dl, VT); 7917 return DAG.getNode(ISD::SRL, dl, VT, 7918 DAG.getNode(ISD::MUL, dl, VT, Op, Mask01), 7919 DAG.getConstant(Len - 8, dl, ShVT)); 7920 } 7921 7922 SDValue TargetLowering::expandCTLZ(SDNode *Node, SelectionDAG &DAG) const { 7923 SDLoc dl(Node); 7924 EVT VT = Node->getValueType(0); 7925 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 7926 SDValue Op = Node->getOperand(0); 7927 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 7928 7929 // If the non-ZERO_UNDEF version is supported we can use that instead. 7930 if (Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF && 7931 isOperationLegalOrCustom(ISD::CTLZ, VT)) 7932 return DAG.getNode(ISD::CTLZ, dl, VT, Op); 7933 7934 // If the ZERO_UNDEF version is supported use that and handle the zero case. 7935 if (isOperationLegalOrCustom(ISD::CTLZ_ZERO_UNDEF, VT)) { 7936 EVT SetCCVT = 7937 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7938 SDValue CTLZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, dl, VT, Op); 7939 SDValue Zero = DAG.getConstant(0, dl, VT); 7940 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 7941 return DAG.getSelect(dl, VT, SrcIsZero, 7942 DAG.getConstant(NumBitsPerElt, dl, VT), CTLZ); 7943 } 7944 7945 // Only expand vector types if we have the appropriate vector bit operations. 7946 // This includes the operations needed to expand CTPOP if it isn't supported. 7947 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 7948 (!isOperationLegalOrCustom(ISD::CTPOP, VT) && 7949 !canExpandVectorCTPOP(*this, VT)) || 7950 !isOperationLegalOrCustom(ISD::SRL, VT) || 7951 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 7952 return SDValue(); 7953 7954 // for now, we do this: 7955 // x = x | (x >> 1); 7956 // x = x | (x >> 2); 7957 // ... 7958 // x = x | (x >>16); 7959 // x = x | (x >>32); // for 64-bit input 7960 // return popcount(~x); 7961 // 7962 // Ref: "Hacker's Delight" by Henry Warren 7963 for (unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) { 7964 SDValue Tmp = DAG.getConstant(1ULL << i, dl, ShVT); 7965 Op = DAG.getNode(ISD::OR, dl, VT, Op, 7966 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp)); 7967 } 7968 Op = DAG.getNOT(dl, Op, VT); 7969 return DAG.getNode(ISD::CTPOP, dl, VT, Op); 7970 } 7971 7972 SDValue TargetLowering::expandCTTZ(SDNode *Node, SelectionDAG &DAG) const { 7973 SDLoc dl(Node); 7974 EVT VT = Node->getValueType(0); 7975 SDValue Op = Node->getOperand(0); 7976 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 7977 7978 // If the non-ZERO_UNDEF version is supported we can use that instead. 7979 if (Node->getOpcode() == ISD::CTTZ_ZERO_UNDEF && 7980 isOperationLegalOrCustom(ISD::CTTZ, VT)) 7981 return DAG.getNode(ISD::CTTZ, dl, VT, Op); 7982 7983 // If the ZERO_UNDEF version is supported use that and handle the zero case. 7984 if (isOperationLegalOrCustom(ISD::CTTZ_ZERO_UNDEF, VT)) { 7985 EVT SetCCVT = 7986 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7987 SDValue CTTZ = DAG.getNode(ISD::CTTZ_ZERO_UNDEF, dl, VT, Op); 7988 SDValue Zero = DAG.getConstant(0, dl, VT); 7989 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 7990 return DAG.getSelect(dl, VT, SrcIsZero, 7991 DAG.getConstant(NumBitsPerElt, dl, VT), CTTZ); 7992 } 7993 7994 // Only expand vector types if we have the appropriate vector bit operations. 7995 // This includes the operations needed to expand CTPOP if it isn't supported. 7996 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 7997 (!isOperationLegalOrCustom(ISD::CTPOP, VT) && 7998 !isOperationLegalOrCustom(ISD::CTLZ, VT) && 7999 !canExpandVectorCTPOP(*this, VT)) || 8000 !isOperationLegalOrCustom(ISD::SUB, VT) || 8001 !isOperationLegalOrCustomOrPromote(ISD::AND, VT) || 8002 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 8003 return SDValue(); 8004 8005 // for now, we use: { return popcount(~x & (x - 1)); } 8006 // unless the target has ctlz but not ctpop, in which case we use: 8007 // { return 32 - nlz(~x & (x-1)); } 8008 // Ref: "Hacker's Delight" by Henry Warren 8009 SDValue Tmp = DAG.getNode( 8010 ISD::AND, dl, VT, DAG.getNOT(dl, Op, VT), 8011 DAG.getNode(ISD::SUB, dl, VT, Op, DAG.getConstant(1, dl, VT))); 8012 8013 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. 8014 if (isOperationLegal(ISD::CTLZ, VT) && !isOperationLegal(ISD::CTPOP, VT)) { 8015 return DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(NumBitsPerElt, dl, VT), 8016 DAG.getNode(ISD::CTLZ, dl, VT, Tmp)); 8017 } 8018 8019 return DAG.getNode(ISD::CTPOP, dl, VT, Tmp); 8020 } 8021 8022 SDValue TargetLowering::expandABS(SDNode *N, SelectionDAG &DAG, 8023 bool IsNegative) const { 8024 SDLoc dl(N); 8025 EVT VT = N->getValueType(0); 8026 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 8027 SDValue Op = N->getOperand(0); 8028 8029 // abs(x) -> smax(x,sub(0,x)) 8030 if (!IsNegative && isOperationLegal(ISD::SUB, VT) && 8031 isOperationLegal(ISD::SMAX, VT)) { 8032 SDValue Zero = DAG.getConstant(0, dl, VT); 8033 return DAG.getNode(ISD::SMAX, dl, VT, Op, 8034 DAG.getNode(ISD::SUB, dl, VT, Zero, Op)); 8035 } 8036 8037 // abs(x) -> umin(x,sub(0,x)) 8038 if (!IsNegative && isOperationLegal(ISD::SUB, VT) && 8039 isOperationLegal(ISD::UMIN, VT)) { 8040 SDValue Zero = DAG.getConstant(0, dl, VT); 8041 Op = DAG.getFreeze(Op); 8042 return DAG.getNode(ISD::UMIN, dl, VT, Op, 8043 DAG.getNode(ISD::SUB, dl, VT, Zero, Op)); 8044 } 8045 8046 // 0 - abs(x) -> smin(x, sub(0,x)) 8047 if (IsNegative && isOperationLegal(ISD::SUB, VT) && 8048 isOperationLegal(ISD::SMIN, VT)) { 8049 Op = DAG.getFreeze(Op); 8050 SDValue Zero = DAG.getConstant(0, dl, VT); 8051 return DAG.getNode(ISD::SMIN, dl, VT, Op, 8052 DAG.getNode(ISD::SUB, dl, VT, Zero, Op)); 8053 } 8054 8055 // Only expand vector types if we have the appropriate vector operations. 8056 if (VT.isVector() && 8057 (!isOperationLegalOrCustom(ISD::SRA, VT) || 8058 (!IsNegative && !isOperationLegalOrCustom(ISD::ADD, VT)) || 8059 (IsNegative && !isOperationLegalOrCustom(ISD::SUB, VT)) || 8060 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 8061 return SDValue(); 8062 8063 Op = DAG.getFreeze(Op); 8064 SDValue Shift = 8065 DAG.getNode(ISD::SRA, dl, VT, Op, 8066 DAG.getConstant(VT.getScalarSizeInBits() - 1, dl, ShVT)); 8067 SDValue Xor = DAG.getNode(ISD::XOR, dl, VT, Op, Shift); 8068 8069 // abs(x) -> Y = sra (X, size(X)-1); sub (xor (X, Y), Y) 8070 if (!IsNegative) 8071 return DAG.getNode(ISD::SUB, dl, VT, Xor, Shift); 8072 8073 // 0 - abs(x) -> Y = sra (X, size(X)-1); sub (Y, xor (X, Y)) 8074 return DAG.getNode(ISD::SUB, dl, VT, Shift, Xor); 8075 } 8076 8077 SDValue TargetLowering::expandBSWAP(SDNode *N, SelectionDAG &DAG) const { 8078 SDLoc dl(N); 8079 EVT VT = N->getValueType(0); 8080 SDValue Op = N->getOperand(0); 8081 8082 if (!VT.isSimple()) 8083 return SDValue(); 8084 8085 EVT SHVT = getShiftAmountTy(VT, DAG.getDataLayout()); 8086 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; 8087 switch (VT.getSimpleVT().getScalarType().SimpleTy) { 8088 default: 8089 return SDValue(); 8090 case MVT::i16: 8091 // Use a rotate by 8. This can be further expanded if necessary. 8092 return DAG.getNode(ISD::ROTL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 8093 case MVT::i32: 8094 Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); 8095 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 8096 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 8097 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); 8098 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, 8099 DAG.getConstant(0xFF0000, dl, VT)); 8100 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(0xFF00, dl, VT)); 8101 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 8102 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 8103 return DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 8104 case MVT::i64: 8105 Tmp8 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(56, dl, SHVT)); 8106 Tmp7 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(40, dl, SHVT)); 8107 Tmp6 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); 8108 Tmp5 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 8109 Tmp4 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 8110 Tmp3 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); 8111 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(40, dl, SHVT)); 8112 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(56, dl, SHVT)); 8113 Tmp7 = DAG.getNode(ISD::AND, dl, VT, Tmp7, 8114 DAG.getConstant(255ULL<<48, dl, VT)); 8115 Tmp6 = DAG.getNode(ISD::AND, dl, VT, Tmp6, 8116 DAG.getConstant(255ULL<<40, dl, VT)); 8117 Tmp5 = DAG.getNode(ISD::AND, dl, VT, Tmp5, 8118 DAG.getConstant(255ULL<<32, dl, VT)); 8119 Tmp4 = DAG.getNode(ISD::AND, dl, VT, Tmp4, 8120 DAG.getConstant(255ULL<<24, dl, VT)); 8121 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, 8122 DAG.getConstant(255ULL<<16, dl, VT)); 8123 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, 8124 DAG.getConstant(255ULL<<8 , dl, VT)); 8125 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp7); 8126 Tmp6 = DAG.getNode(ISD::OR, dl, VT, Tmp6, Tmp5); 8127 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 8128 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 8129 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp6); 8130 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 8131 return DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp4); 8132 } 8133 } 8134 8135 SDValue TargetLowering::expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const { 8136 SDLoc dl(N); 8137 EVT VT = N->getValueType(0); 8138 SDValue Op = N->getOperand(0); 8139 EVT SHVT = getShiftAmountTy(VT, DAG.getDataLayout()); 8140 unsigned Sz = VT.getScalarSizeInBits(); 8141 8142 SDValue Tmp, Tmp2, Tmp3; 8143 8144 // If we can, perform BSWAP first and then the mask+swap the i4, then i2 8145 // and finally the i1 pairs. 8146 // TODO: We can easily support i4/i2 legal types if any target ever does. 8147 if (Sz >= 8 && isPowerOf2_32(Sz)) { 8148 // Create the masks - repeating the pattern every byte. 8149 APInt Mask4 = APInt::getSplat(Sz, APInt(8, 0x0F)); 8150 APInt Mask2 = APInt::getSplat(Sz, APInt(8, 0x33)); 8151 APInt Mask1 = APInt::getSplat(Sz, APInt(8, 0x55)); 8152 8153 // BSWAP if the type is wider than a single byte. 8154 Tmp = (Sz > 8 ? DAG.getNode(ISD::BSWAP, dl, VT, Op) : Op); 8155 8156 // swap i4: ((V >> 4) & 0x0F) | ((V & 0x0F) << 4) 8157 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Tmp, DAG.getConstant(4, dl, SHVT)); 8158 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Mask4, dl, VT)); 8159 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(Mask4, dl, VT)); 8160 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Tmp3, DAG.getConstant(4, dl, SHVT)); 8161 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8162 8163 // swap i2: ((V >> 2) & 0x33) | ((V & 0x33) << 2) 8164 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Tmp, DAG.getConstant(2, dl, SHVT)); 8165 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Mask2, dl, VT)); 8166 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(Mask2, dl, VT)); 8167 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Tmp3, DAG.getConstant(2, dl, SHVT)); 8168 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8169 8170 // swap i1: ((V >> 1) & 0x55) | ((V & 0x55) << 1) 8171 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Tmp, DAG.getConstant(1, dl, SHVT)); 8172 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Mask1, dl, VT)); 8173 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(Mask1, dl, VT)); 8174 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Tmp3, DAG.getConstant(1, dl, SHVT)); 8175 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8176 return Tmp; 8177 } 8178 8179 Tmp = DAG.getConstant(0, dl, VT); 8180 for (unsigned I = 0, J = Sz-1; I < Sz; ++I, --J) { 8181 if (I < J) 8182 Tmp2 = 8183 DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(J - I, dl, SHVT)); 8184 else 8185 Tmp2 = 8186 DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(I - J, dl, SHVT)); 8187 8188 APInt Shift(Sz, 1); 8189 Shift <<= J; 8190 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Shift, dl, VT)); 8191 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp, Tmp2); 8192 } 8193 8194 return Tmp; 8195 } 8196 8197 std::pair<SDValue, SDValue> 8198 TargetLowering::scalarizeVectorLoad(LoadSDNode *LD, 8199 SelectionDAG &DAG) const { 8200 SDLoc SL(LD); 8201 SDValue Chain = LD->getChain(); 8202 SDValue BasePTR = LD->getBasePtr(); 8203 EVT SrcVT = LD->getMemoryVT(); 8204 EVT DstVT = LD->getValueType(0); 8205 ISD::LoadExtType ExtType = LD->getExtensionType(); 8206 8207 if (SrcVT.isScalableVector()) 8208 report_fatal_error("Cannot scalarize scalable vector loads"); 8209 8210 unsigned NumElem = SrcVT.getVectorNumElements(); 8211 8212 EVT SrcEltVT = SrcVT.getScalarType(); 8213 EVT DstEltVT = DstVT.getScalarType(); 8214 8215 // A vector must always be stored in memory as-is, i.e. without any padding 8216 // between the elements, since various code depend on it, e.g. in the 8217 // handling of a bitcast of a vector type to int, which may be done with a 8218 // vector store followed by an integer load. A vector that does not have 8219 // elements that are byte-sized must therefore be stored as an integer 8220 // built out of the extracted vector elements. 8221 if (!SrcEltVT.isByteSized()) { 8222 unsigned NumLoadBits = SrcVT.getStoreSizeInBits(); 8223 EVT LoadVT = EVT::getIntegerVT(*DAG.getContext(), NumLoadBits); 8224 8225 unsigned NumSrcBits = SrcVT.getSizeInBits(); 8226 EVT SrcIntVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcBits); 8227 8228 unsigned SrcEltBits = SrcEltVT.getSizeInBits(); 8229 SDValue SrcEltBitMask = DAG.getConstant( 8230 APInt::getLowBitsSet(NumLoadBits, SrcEltBits), SL, LoadVT); 8231 8232 // Load the whole vector and avoid masking off the top bits as it makes 8233 // the codegen worse. 8234 SDValue Load = 8235 DAG.getExtLoad(ISD::EXTLOAD, SL, LoadVT, Chain, BasePTR, 8236 LD->getPointerInfo(), SrcIntVT, LD->getOriginalAlign(), 8237 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 8238 8239 SmallVector<SDValue, 8> Vals; 8240 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 8241 unsigned ShiftIntoIdx = 8242 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx); 8243 SDValue ShiftAmount = 8244 DAG.getShiftAmountConstant(ShiftIntoIdx * SrcEltVT.getSizeInBits(), 8245 LoadVT, SL, /*LegalTypes=*/false); 8246 SDValue ShiftedElt = DAG.getNode(ISD::SRL, SL, LoadVT, Load, ShiftAmount); 8247 SDValue Elt = 8248 DAG.getNode(ISD::AND, SL, LoadVT, ShiftedElt, SrcEltBitMask); 8249 SDValue Scalar = DAG.getNode(ISD::TRUNCATE, SL, SrcEltVT, Elt); 8250 8251 if (ExtType != ISD::NON_EXTLOAD) { 8252 unsigned ExtendOp = ISD::getExtForLoadExtType(false, ExtType); 8253 Scalar = DAG.getNode(ExtendOp, SL, DstEltVT, Scalar); 8254 } 8255 8256 Vals.push_back(Scalar); 8257 } 8258 8259 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals); 8260 return std::make_pair(Value, Load.getValue(1)); 8261 } 8262 8263 unsigned Stride = SrcEltVT.getSizeInBits() / 8; 8264 assert(SrcEltVT.isByteSized()); 8265 8266 SmallVector<SDValue, 8> Vals; 8267 SmallVector<SDValue, 8> LoadChains; 8268 8269 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 8270 SDValue ScalarLoad = 8271 DAG.getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR, 8272 LD->getPointerInfo().getWithOffset(Idx * Stride), 8273 SrcEltVT, LD->getOriginalAlign(), 8274 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 8275 8276 BasePTR = DAG.getObjectPtrOffset(SL, BasePTR, TypeSize::Fixed(Stride)); 8277 8278 Vals.push_back(ScalarLoad.getValue(0)); 8279 LoadChains.push_back(ScalarLoad.getValue(1)); 8280 } 8281 8282 SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains); 8283 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals); 8284 8285 return std::make_pair(Value, NewChain); 8286 } 8287 8288 SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST, 8289 SelectionDAG &DAG) const { 8290 SDLoc SL(ST); 8291 8292 SDValue Chain = ST->getChain(); 8293 SDValue BasePtr = ST->getBasePtr(); 8294 SDValue Value = ST->getValue(); 8295 EVT StVT = ST->getMemoryVT(); 8296 8297 if (StVT.isScalableVector()) 8298 report_fatal_error("Cannot scalarize scalable vector stores"); 8299 8300 // The type of the data we want to save 8301 EVT RegVT = Value.getValueType(); 8302 EVT RegSclVT = RegVT.getScalarType(); 8303 8304 // The type of data as saved in memory. 8305 EVT MemSclVT = StVT.getScalarType(); 8306 8307 unsigned NumElem = StVT.getVectorNumElements(); 8308 8309 // A vector must always be stored in memory as-is, i.e. without any padding 8310 // between the elements, since various code depend on it, e.g. in the 8311 // handling of a bitcast of a vector type to int, which may be done with a 8312 // vector store followed by an integer load. A vector that does not have 8313 // elements that are byte-sized must therefore be stored as an integer 8314 // built out of the extracted vector elements. 8315 if (!MemSclVT.isByteSized()) { 8316 unsigned NumBits = StVT.getSizeInBits(); 8317 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumBits); 8318 8319 SDValue CurrVal = DAG.getConstant(0, SL, IntVT); 8320 8321 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 8322 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 8323 DAG.getVectorIdxConstant(Idx, SL)); 8324 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MemSclVT, Elt); 8325 SDValue ExtElt = DAG.getNode(ISD::ZERO_EXTEND, SL, IntVT, Trunc); 8326 unsigned ShiftIntoIdx = 8327 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx); 8328 SDValue ShiftAmount = 8329 DAG.getConstant(ShiftIntoIdx * MemSclVT.getSizeInBits(), SL, IntVT); 8330 SDValue ShiftedElt = 8331 DAG.getNode(ISD::SHL, SL, IntVT, ExtElt, ShiftAmount); 8332 CurrVal = DAG.getNode(ISD::OR, SL, IntVT, CurrVal, ShiftedElt); 8333 } 8334 8335 return DAG.getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(), 8336 ST->getOriginalAlign(), ST->getMemOperand()->getFlags(), 8337 ST->getAAInfo()); 8338 } 8339 8340 // Store Stride in bytes 8341 unsigned Stride = MemSclVT.getSizeInBits() / 8; 8342 assert(Stride && "Zero stride!"); 8343 // Extract each of the elements from the original vector and save them into 8344 // memory individually. 8345 SmallVector<SDValue, 8> Stores; 8346 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 8347 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 8348 DAG.getVectorIdxConstant(Idx, SL)); 8349 8350 SDValue Ptr = 8351 DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Idx * Stride)); 8352 8353 // This scalar TruncStore may be illegal, but we legalize it later. 8354 SDValue Store = DAG.getTruncStore( 8355 Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride), 8356 MemSclVT, ST->getOriginalAlign(), ST->getMemOperand()->getFlags(), 8357 ST->getAAInfo()); 8358 8359 Stores.push_back(Store); 8360 } 8361 8362 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Stores); 8363 } 8364 8365 std::pair<SDValue, SDValue> 8366 TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const { 8367 assert(LD->getAddressingMode() == ISD::UNINDEXED && 8368 "unaligned indexed loads not implemented!"); 8369 SDValue Chain = LD->getChain(); 8370 SDValue Ptr = LD->getBasePtr(); 8371 EVT VT = LD->getValueType(0); 8372 EVT LoadedVT = LD->getMemoryVT(); 8373 SDLoc dl(LD); 8374 auto &MF = DAG.getMachineFunction(); 8375 8376 if (VT.isFloatingPoint() || VT.isVector()) { 8377 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits()); 8378 if (isTypeLegal(intVT) && isTypeLegal(LoadedVT)) { 8379 if (!isOperationLegalOrCustom(ISD::LOAD, intVT) && 8380 LoadedVT.isVector()) { 8381 // Scalarize the load and let the individual components be handled. 8382 return scalarizeVectorLoad(LD, DAG); 8383 } 8384 8385 // Expand to a (misaligned) integer load of the same size, 8386 // then bitconvert to floating point or vector. 8387 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, 8388 LD->getMemOperand()); 8389 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad); 8390 if (LoadedVT != VT) 8391 Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND : 8392 ISD::ANY_EXTEND, dl, VT, Result); 8393 8394 return std::make_pair(Result, newLoad.getValue(1)); 8395 } 8396 8397 // Copy the value to a (aligned) stack slot using (unaligned) integer 8398 // loads and stores, then do a (aligned) load from the stack slot. 8399 MVT RegVT = getRegisterType(*DAG.getContext(), intVT); 8400 unsigned LoadedBytes = LoadedVT.getStoreSize(); 8401 unsigned RegBytes = RegVT.getSizeInBits() / 8; 8402 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; 8403 8404 // Make sure the stack slot is also aligned for the register type. 8405 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); 8406 auto FrameIndex = cast<FrameIndexSDNode>(StackBase.getNode())->getIndex(); 8407 SmallVector<SDValue, 8> Stores; 8408 SDValue StackPtr = StackBase; 8409 unsigned Offset = 0; 8410 8411 EVT PtrVT = Ptr.getValueType(); 8412 EVT StackPtrVT = StackPtr.getValueType(); 8413 8414 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 8415 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 8416 8417 // Do all but one copies using the full register width. 8418 for (unsigned i = 1; i < NumRegs; i++) { 8419 // Load one integer register's worth from the original location. 8420 SDValue Load = DAG.getLoad( 8421 RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(Offset), 8422 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(), 8423 LD->getAAInfo()); 8424 // Follow the load with a store to the stack slot. Remember the store. 8425 Stores.push_back(DAG.getStore( 8426 Load.getValue(1), dl, Load, StackPtr, 8427 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset))); 8428 // Increment the pointers. 8429 Offset += RegBytes; 8430 8431 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 8432 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 8433 } 8434 8435 // The last copy may be partial. Do an extending load. 8436 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 8437 8 * (LoadedBytes - Offset)); 8438 SDValue Load = 8439 DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, 8440 LD->getPointerInfo().getWithOffset(Offset), MemVT, 8441 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(), 8442 LD->getAAInfo()); 8443 // Follow the load with a store to the stack slot. Remember the store. 8444 // On big-endian machines this requires a truncating store to ensure 8445 // that the bits end up in the right place. 8446 Stores.push_back(DAG.getTruncStore( 8447 Load.getValue(1), dl, Load, StackPtr, 8448 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), MemVT)); 8449 8450 // The order of the stores doesn't matter - say it with a TokenFactor. 8451 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8452 8453 // Finally, perform the original load only redirected to the stack slot. 8454 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, 8455 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), 8456 LoadedVT); 8457 8458 // Callers expect a MERGE_VALUES node. 8459 return std::make_pair(Load, TF); 8460 } 8461 8462 assert(LoadedVT.isInteger() && !LoadedVT.isVector() && 8463 "Unaligned load of unsupported type."); 8464 8465 // Compute the new VT that is half the size of the old one. This is an 8466 // integer MVT. 8467 unsigned NumBits = LoadedVT.getSizeInBits(); 8468 EVT NewLoadedVT; 8469 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2); 8470 NumBits >>= 1; 8471 8472 Align Alignment = LD->getOriginalAlign(); 8473 unsigned IncrementSize = NumBits / 8; 8474 ISD::LoadExtType HiExtType = LD->getExtensionType(); 8475 8476 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD. 8477 if (HiExtType == ISD::NON_EXTLOAD) 8478 HiExtType = ISD::ZEXTLOAD; 8479 8480 // Load the value in two parts 8481 SDValue Lo, Hi; 8482 if (DAG.getDataLayout().isLittleEndian()) { 8483 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), 8484 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 8485 LD->getAAInfo()); 8486 8487 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 8488 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, 8489 LD->getPointerInfo().getWithOffset(IncrementSize), 8490 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 8491 LD->getAAInfo()); 8492 } else { 8493 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(), 8494 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 8495 LD->getAAInfo()); 8496 8497 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 8498 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, 8499 LD->getPointerInfo().getWithOffset(IncrementSize), 8500 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 8501 LD->getAAInfo()); 8502 } 8503 8504 // aggregate the two parts 8505 SDValue ShiftAmount = 8506 DAG.getConstant(NumBits, dl, getShiftAmountTy(Hi.getValueType(), 8507 DAG.getDataLayout())); 8508 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); 8509 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); 8510 8511 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 8512 Hi.getValue(1)); 8513 8514 return std::make_pair(Result, TF); 8515 } 8516 8517 SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST, 8518 SelectionDAG &DAG) const { 8519 assert(ST->getAddressingMode() == ISD::UNINDEXED && 8520 "unaligned indexed stores not implemented!"); 8521 SDValue Chain = ST->getChain(); 8522 SDValue Ptr = ST->getBasePtr(); 8523 SDValue Val = ST->getValue(); 8524 EVT VT = Val.getValueType(); 8525 Align Alignment = ST->getOriginalAlign(); 8526 auto &MF = DAG.getMachineFunction(); 8527 EVT StoreMemVT = ST->getMemoryVT(); 8528 8529 SDLoc dl(ST); 8530 if (StoreMemVT.isFloatingPoint() || StoreMemVT.isVector()) { 8531 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 8532 if (isTypeLegal(intVT)) { 8533 if (!isOperationLegalOrCustom(ISD::STORE, intVT) && 8534 StoreMemVT.isVector()) { 8535 // Scalarize the store and let the individual components be handled. 8536 SDValue Result = scalarizeVectorStore(ST, DAG); 8537 return Result; 8538 } 8539 // Expand to a bitconvert of the value to the integer type of the 8540 // same size, then a (misaligned) int store. 8541 // FIXME: Does not handle truncating floating point stores! 8542 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val); 8543 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), 8544 Alignment, ST->getMemOperand()->getFlags()); 8545 return Result; 8546 } 8547 // Do a (aligned) store to a stack slot, then copy from the stack slot 8548 // to the final destination using (unaligned) integer loads and stores. 8549 MVT RegVT = getRegisterType( 8550 *DAG.getContext(), 8551 EVT::getIntegerVT(*DAG.getContext(), StoreMemVT.getSizeInBits())); 8552 EVT PtrVT = Ptr.getValueType(); 8553 unsigned StoredBytes = StoreMemVT.getStoreSize(); 8554 unsigned RegBytes = RegVT.getSizeInBits() / 8; 8555 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; 8556 8557 // Make sure the stack slot is also aligned for the register type. 8558 SDValue StackPtr = DAG.CreateStackTemporary(StoreMemVT, RegVT); 8559 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 8560 8561 // Perform the original store, only redirected to the stack slot. 8562 SDValue Store = DAG.getTruncStore( 8563 Chain, dl, Val, StackPtr, 8564 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), StoreMemVT); 8565 8566 EVT StackPtrVT = StackPtr.getValueType(); 8567 8568 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 8569 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 8570 SmallVector<SDValue, 8> Stores; 8571 unsigned Offset = 0; 8572 8573 // Do all but one copies using the full register width. 8574 for (unsigned i = 1; i < NumRegs; i++) { 8575 // Load one integer register's worth from the stack slot. 8576 SDValue Load = DAG.getLoad( 8577 RegVT, dl, Store, StackPtr, 8578 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset)); 8579 // Store it to the final location. Remember the store. 8580 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, 8581 ST->getPointerInfo().getWithOffset(Offset), 8582 ST->getOriginalAlign(), 8583 ST->getMemOperand()->getFlags())); 8584 // Increment the pointers. 8585 Offset += RegBytes; 8586 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 8587 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 8588 } 8589 8590 // The last store may be partial. Do a truncating store. On big-endian 8591 // machines this requires an extending load from the stack slot to ensure 8592 // that the bits are in the right place. 8593 EVT LoadMemVT = 8594 EVT::getIntegerVT(*DAG.getContext(), 8 * (StoredBytes - Offset)); 8595 8596 // Load from the stack slot. 8597 SDValue Load = DAG.getExtLoad( 8598 ISD::EXTLOAD, dl, RegVT, Store, StackPtr, 8599 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), LoadMemVT); 8600 8601 Stores.push_back( 8602 DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, 8603 ST->getPointerInfo().getWithOffset(Offset), LoadMemVT, 8604 ST->getOriginalAlign(), 8605 ST->getMemOperand()->getFlags(), ST->getAAInfo())); 8606 // The order of the stores doesn't matter - say it with a TokenFactor. 8607 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8608 return Result; 8609 } 8610 8611 assert(StoreMemVT.isInteger() && !StoreMemVT.isVector() && 8612 "Unaligned store of unknown type."); 8613 // Get the half-size VT 8614 EVT NewStoredVT = StoreMemVT.getHalfSizedIntegerVT(*DAG.getContext()); 8615 unsigned NumBits = NewStoredVT.getFixedSizeInBits(); 8616 unsigned IncrementSize = NumBits / 8; 8617 8618 // Divide the stored value in two parts. 8619 SDValue ShiftAmount = DAG.getConstant( 8620 NumBits, dl, getShiftAmountTy(Val.getValueType(), DAG.getDataLayout())); 8621 SDValue Lo = Val; 8622 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); 8623 8624 // Store the two parts 8625 SDValue Store1, Store2; 8626 Store1 = DAG.getTruncStore(Chain, dl, 8627 DAG.getDataLayout().isLittleEndian() ? Lo : Hi, 8628 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment, 8629 ST->getMemOperand()->getFlags()); 8630 8631 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 8632 Store2 = DAG.getTruncStore( 8633 Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr, 8634 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment, 8635 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 8636 8637 SDValue Result = 8638 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); 8639 return Result; 8640 } 8641 8642 SDValue 8643 TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask, 8644 const SDLoc &DL, EVT DataVT, 8645 SelectionDAG &DAG, 8646 bool IsCompressedMemory) const { 8647 SDValue Increment; 8648 EVT AddrVT = Addr.getValueType(); 8649 EVT MaskVT = Mask.getValueType(); 8650 assert(DataVT.getVectorElementCount() == MaskVT.getVectorElementCount() && 8651 "Incompatible types of Data and Mask"); 8652 if (IsCompressedMemory) { 8653 if (DataVT.isScalableVector()) 8654 report_fatal_error( 8655 "Cannot currently handle compressed memory with scalable vectors"); 8656 // Incrementing the pointer according to number of '1's in the mask. 8657 EVT MaskIntVT = EVT::getIntegerVT(*DAG.getContext(), MaskVT.getSizeInBits()); 8658 SDValue MaskInIntReg = DAG.getBitcast(MaskIntVT, Mask); 8659 if (MaskIntVT.getSizeInBits() < 32) { 8660 MaskInIntReg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, MaskInIntReg); 8661 MaskIntVT = MVT::i32; 8662 } 8663 8664 // Count '1's with POPCNT. 8665 Increment = DAG.getNode(ISD::CTPOP, DL, MaskIntVT, MaskInIntReg); 8666 Increment = DAG.getZExtOrTrunc(Increment, DL, AddrVT); 8667 // Scale is an element size in bytes. 8668 SDValue Scale = DAG.getConstant(DataVT.getScalarSizeInBits() / 8, DL, 8669 AddrVT); 8670 Increment = DAG.getNode(ISD::MUL, DL, AddrVT, Increment, Scale); 8671 } else if (DataVT.isScalableVector()) { 8672 Increment = DAG.getVScale(DL, AddrVT, 8673 APInt(AddrVT.getFixedSizeInBits(), 8674 DataVT.getStoreSize().getKnownMinSize())); 8675 } else 8676 Increment = DAG.getConstant(DataVT.getStoreSize(), DL, AddrVT); 8677 8678 return DAG.getNode(ISD::ADD, DL, AddrVT, Addr, Increment); 8679 } 8680 8681 static SDValue clampDynamicVectorIndex(SelectionDAG &DAG, SDValue Idx, 8682 EVT VecVT, const SDLoc &dl, 8683 ElementCount SubEC) { 8684 assert(!(SubEC.isScalable() && VecVT.isFixedLengthVector()) && 8685 "Cannot index a scalable vector within a fixed-width vector"); 8686 8687 unsigned NElts = VecVT.getVectorMinNumElements(); 8688 unsigned NumSubElts = SubEC.getKnownMinValue(); 8689 EVT IdxVT = Idx.getValueType(); 8690 8691 if (VecVT.isScalableVector() && !SubEC.isScalable()) { 8692 // If this is a constant index and we know the value plus the number of the 8693 // elements in the subvector minus one is less than the minimum number of 8694 // elements then it's safe to return Idx. 8695 if (auto *IdxCst = dyn_cast<ConstantSDNode>(Idx)) 8696 if (IdxCst->getZExtValue() + (NumSubElts - 1) < NElts) 8697 return Idx; 8698 SDValue VS = 8699 DAG.getVScale(dl, IdxVT, APInt(IdxVT.getFixedSizeInBits(), NElts)); 8700 unsigned SubOpcode = NumSubElts <= NElts ? ISD::SUB : ISD::USUBSAT; 8701 SDValue Sub = DAG.getNode(SubOpcode, dl, IdxVT, VS, 8702 DAG.getConstant(NumSubElts, dl, IdxVT)); 8703 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, Sub); 8704 } 8705 if (isPowerOf2_32(NElts) && NumSubElts == 1) { 8706 APInt Imm = APInt::getLowBitsSet(IdxVT.getSizeInBits(), Log2_32(NElts)); 8707 return DAG.getNode(ISD::AND, dl, IdxVT, Idx, 8708 DAG.getConstant(Imm, dl, IdxVT)); 8709 } 8710 unsigned MaxIndex = NumSubElts < NElts ? NElts - NumSubElts : 0; 8711 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, 8712 DAG.getConstant(MaxIndex, dl, IdxVT)); 8713 } 8714 8715 SDValue TargetLowering::getVectorElementPointer(SelectionDAG &DAG, 8716 SDValue VecPtr, EVT VecVT, 8717 SDValue Index) const { 8718 return getVectorSubVecPointer( 8719 DAG, VecPtr, VecVT, 8720 EVT::getVectorVT(*DAG.getContext(), VecVT.getVectorElementType(), 1), 8721 Index); 8722 } 8723 8724 SDValue TargetLowering::getVectorSubVecPointer(SelectionDAG &DAG, 8725 SDValue VecPtr, EVT VecVT, 8726 EVT SubVecVT, 8727 SDValue Index) const { 8728 SDLoc dl(Index); 8729 // Make sure the index type is big enough to compute in. 8730 Index = DAG.getZExtOrTrunc(Index, dl, VecPtr.getValueType()); 8731 8732 EVT EltVT = VecVT.getVectorElementType(); 8733 8734 // Calculate the element offset and add it to the pointer. 8735 unsigned EltSize = EltVT.getFixedSizeInBits() / 8; // FIXME: should be ABI size. 8736 assert(EltSize * 8 == EltVT.getFixedSizeInBits() && 8737 "Converting bits to bytes lost precision"); 8738 assert(SubVecVT.getVectorElementType() == EltVT && 8739 "Sub-vector must be a vector with matching element type"); 8740 Index = clampDynamicVectorIndex(DAG, Index, VecVT, dl, 8741 SubVecVT.getVectorElementCount()); 8742 8743 EVT IdxVT = Index.getValueType(); 8744 if (SubVecVT.isScalableVector()) 8745 Index = 8746 DAG.getNode(ISD::MUL, dl, IdxVT, Index, 8747 DAG.getVScale(dl, IdxVT, APInt(IdxVT.getSizeInBits(), 1))); 8748 8749 Index = DAG.getNode(ISD::MUL, dl, IdxVT, Index, 8750 DAG.getConstant(EltSize, dl, IdxVT)); 8751 return DAG.getMemBasePlusOffset(VecPtr, Index, dl); 8752 } 8753 8754 //===----------------------------------------------------------------------===// 8755 // Implementation of Emulated TLS Model 8756 //===----------------------------------------------------------------------===// 8757 8758 SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, 8759 SelectionDAG &DAG) const { 8760 // Access to address of TLS varialbe xyz is lowered to a function call: 8761 // __emutls_get_address( address of global variable named "__emutls_v.xyz" ) 8762 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8763 PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext()); 8764 SDLoc dl(GA); 8765 8766 ArgListTy Args; 8767 ArgListEntry Entry; 8768 std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str(); 8769 Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent()); 8770 StringRef EmuTlsVarName(NameString); 8771 GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName); 8772 assert(EmuTlsVar && "Cannot find EmuTlsVar "); 8773 Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT); 8774 Entry.Ty = VoidPtrType; 8775 Args.push_back(Entry); 8776 8777 SDValue EmuTlsGetAddr = DAG.getExternalSymbol("__emutls_get_address", PtrVT); 8778 8779 TargetLowering::CallLoweringInfo CLI(DAG); 8780 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode()); 8781 CLI.setLibCallee(CallingConv::C, VoidPtrType, EmuTlsGetAddr, std::move(Args)); 8782 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 8783 8784 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 8785 // At last for X86 targets, maybe good for other targets too? 8786 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8787 MFI.setAdjustsStack(true); // Is this only for X86 target? 8788 MFI.setHasCalls(true); 8789 8790 assert((GA->getOffset() == 0) && 8791 "Emulated TLS must have zero offset in GlobalAddressSDNode"); 8792 return CallResult.first; 8793 } 8794 8795 SDValue TargetLowering::lowerCmpEqZeroToCtlzSrl(SDValue Op, 8796 SelectionDAG &DAG) const { 8797 assert((Op->getOpcode() == ISD::SETCC) && "Input has to be a SETCC node."); 8798 if (!isCtlzFast()) 8799 return SDValue(); 8800 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 8801 SDLoc dl(Op); 8802 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 8803 if (C->isZero() && CC == ISD::SETEQ) { 8804 EVT VT = Op.getOperand(0).getValueType(); 8805 SDValue Zext = Op.getOperand(0); 8806 if (VT.bitsLT(MVT::i32)) { 8807 VT = MVT::i32; 8808 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 8809 } 8810 unsigned Log2b = Log2_32(VT.getSizeInBits()); 8811 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 8812 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 8813 DAG.getConstant(Log2b, dl, MVT::i32)); 8814 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 8815 } 8816 } 8817 return SDValue(); 8818 } 8819 8820 SDValue TargetLowering::expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const { 8821 SDValue Op0 = Node->getOperand(0); 8822 SDValue Op1 = Node->getOperand(1); 8823 EVT VT = Op0.getValueType(); 8824 unsigned Opcode = Node->getOpcode(); 8825 SDLoc DL(Node); 8826 8827 // umin(x,y) -> sub(x,usubsat(x,y)) 8828 if (Opcode == ISD::UMIN && isOperationLegal(ISD::SUB, VT) && 8829 isOperationLegal(ISD::USUBSAT, VT)) { 8830 return DAG.getNode(ISD::SUB, DL, VT, Op0, 8831 DAG.getNode(ISD::USUBSAT, DL, VT, Op0, Op1)); 8832 } 8833 8834 // umax(x,y) -> add(x,usubsat(y,x)) 8835 if (Opcode == ISD::UMAX && isOperationLegal(ISD::ADD, VT) && 8836 isOperationLegal(ISD::USUBSAT, VT)) { 8837 return DAG.getNode(ISD::ADD, DL, VT, Op0, 8838 DAG.getNode(ISD::USUBSAT, DL, VT, Op1, Op0)); 8839 } 8840 8841 // Expand Y = MAX(A, B) -> Y = (A > B) ? A : B 8842 ISD::CondCode CC; 8843 switch (Opcode) { 8844 default: llvm_unreachable("How did we get here?"); 8845 case ISD::SMAX: CC = ISD::SETGT; break; 8846 case ISD::SMIN: CC = ISD::SETLT; break; 8847 case ISD::UMAX: CC = ISD::SETUGT; break; 8848 case ISD::UMIN: CC = ISD::SETULT; break; 8849 } 8850 8851 // FIXME: Should really try to split the vector in case it's legal on a 8852 // subvector. 8853 if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT)) 8854 return DAG.UnrollVectorOp(Node); 8855 8856 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8857 SDValue Cond = DAG.getSetCC(DL, BoolVT, Op0, Op1, CC); 8858 return DAG.getSelect(DL, VT, Cond, Op0, Op1); 8859 } 8860 8861 SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const { 8862 unsigned Opcode = Node->getOpcode(); 8863 SDValue LHS = Node->getOperand(0); 8864 SDValue RHS = Node->getOperand(1); 8865 EVT VT = LHS.getValueType(); 8866 SDLoc dl(Node); 8867 8868 assert(VT == RHS.getValueType() && "Expected operands to be the same type"); 8869 assert(VT.isInteger() && "Expected operands to be integers"); 8870 8871 // usub.sat(a, b) -> umax(a, b) - b 8872 if (Opcode == ISD::USUBSAT && isOperationLegal(ISD::UMAX, VT)) { 8873 SDValue Max = DAG.getNode(ISD::UMAX, dl, VT, LHS, RHS); 8874 return DAG.getNode(ISD::SUB, dl, VT, Max, RHS); 8875 } 8876 8877 // uadd.sat(a, b) -> umin(a, ~b) + b 8878 if (Opcode == ISD::UADDSAT && isOperationLegal(ISD::UMIN, VT)) { 8879 SDValue InvRHS = DAG.getNOT(dl, RHS, VT); 8880 SDValue Min = DAG.getNode(ISD::UMIN, dl, VT, LHS, InvRHS); 8881 return DAG.getNode(ISD::ADD, dl, VT, Min, RHS); 8882 } 8883 8884 unsigned OverflowOp; 8885 switch (Opcode) { 8886 case ISD::SADDSAT: 8887 OverflowOp = ISD::SADDO; 8888 break; 8889 case ISD::UADDSAT: 8890 OverflowOp = ISD::UADDO; 8891 break; 8892 case ISD::SSUBSAT: 8893 OverflowOp = ISD::SSUBO; 8894 break; 8895 case ISD::USUBSAT: 8896 OverflowOp = ISD::USUBO; 8897 break; 8898 default: 8899 llvm_unreachable("Expected method to receive signed or unsigned saturation " 8900 "addition or subtraction node."); 8901 } 8902 8903 // FIXME: Should really try to split the vector in case it's legal on a 8904 // subvector. 8905 if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT)) 8906 return DAG.UnrollVectorOp(Node); 8907 8908 unsigned BitWidth = LHS.getScalarValueSizeInBits(); 8909 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8910 SDValue Result = DAG.getNode(OverflowOp, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 8911 SDValue SumDiff = Result.getValue(0); 8912 SDValue Overflow = Result.getValue(1); 8913 SDValue Zero = DAG.getConstant(0, dl, VT); 8914 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT); 8915 8916 if (Opcode == ISD::UADDSAT) { 8917 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 8918 // (LHS + RHS) | OverflowMask 8919 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 8920 return DAG.getNode(ISD::OR, dl, VT, SumDiff, OverflowMask); 8921 } 8922 // Overflow ? 0xffff.... : (LHS + RHS) 8923 return DAG.getSelect(dl, VT, Overflow, AllOnes, SumDiff); 8924 } 8925 8926 if (Opcode == ISD::USUBSAT) { 8927 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 8928 // (LHS - RHS) & ~OverflowMask 8929 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 8930 SDValue Not = DAG.getNOT(dl, OverflowMask, VT); 8931 return DAG.getNode(ISD::AND, dl, VT, SumDiff, Not); 8932 } 8933 // Overflow ? 0 : (LHS - RHS) 8934 return DAG.getSelect(dl, VT, Overflow, Zero, SumDiff); 8935 } 8936 8937 // Overflow ? (SumDiff >> BW) ^ MinVal : SumDiff 8938 APInt MinVal = APInt::getSignedMinValue(BitWidth); 8939 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 8940 SDValue Shift = DAG.getNode(ISD::SRA, dl, VT, SumDiff, 8941 DAG.getConstant(BitWidth - 1, dl, VT)); 8942 Result = DAG.getNode(ISD::XOR, dl, VT, Shift, SatMin); 8943 return DAG.getSelect(dl, VT, Overflow, Result, SumDiff); 8944 } 8945 8946 SDValue TargetLowering::expandShlSat(SDNode *Node, SelectionDAG &DAG) const { 8947 unsigned Opcode = Node->getOpcode(); 8948 bool IsSigned = Opcode == ISD::SSHLSAT; 8949 SDValue LHS = Node->getOperand(0); 8950 SDValue RHS = Node->getOperand(1); 8951 EVT VT = LHS.getValueType(); 8952 SDLoc dl(Node); 8953 8954 assert((Node->getOpcode() == ISD::SSHLSAT || 8955 Node->getOpcode() == ISD::USHLSAT) && 8956 "Expected a SHLSAT opcode"); 8957 assert(VT == RHS.getValueType() && "Expected operands to be the same type"); 8958 assert(VT.isInteger() && "Expected operands to be integers"); 8959 8960 // If LHS != (LHS << RHS) >> RHS, we have overflow and must saturate. 8961 8962 unsigned BW = VT.getScalarSizeInBits(); 8963 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, LHS, RHS); 8964 SDValue Orig = 8965 DAG.getNode(IsSigned ? ISD::SRA : ISD::SRL, dl, VT, Result, RHS); 8966 8967 SDValue SatVal; 8968 if (IsSigned) { 8969 SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(BW), dl, VT); 8970 SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(BW), dl, VT); 8971 SatVal = DAG.getSelectCC(dl, LHS, DAG.getConstant(0, dl, VT), 8972 SatMin, SatMax, ISD::SETLT); 8973 } else { 8974 SatVal = DAG.getConstant(APInt::getMaxValue(BW), dl, VT); 8975 } 8976 Result = DAG.getSelectCC(dl, LHS, Orig, SatVal, Result, ISD::SETNE); 8977 8978 return Result; 8979 } 8980 8981 SDValue 8982 TargetLowering::expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const { 8983 assert((Node->getOpcode() == ISD::SMULFIX || 8984 Node->getOpcode() == ISD::UMULFIX || 8985 Node->getOpcode() == ISD::SMULFIXSAT || 8986 Node->getOpcode() == ISD::UMULFIXSAT) && 8987 "Expected a fixed point multiplication opcode"); 8988 8989 SDLoc dl(Node); 8990 SDValue LHS = Node->getOperand(0); 8991 SDValue RHS = Node->getOperand(1); 8992 EVT VT = LHS.getValueType(); 8993 unsigned Scale = Node->getConstantOperandVal(2); 8994 bool Saturating = (Node->getOpcode() == ISD::SMULFIXSAT || 8995 Node->getOpcode() == ISD::UMULFIXSAT); 8996 bool Signed = (Node->getOpcode() == ISD::SMULFIX || 8997 Node->getOpcode() == ISD::SMULFIXSAT); 8998 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8999 unsigned VTSize = VT.getScalarSizeInBits(); 9000 9001 if (!Scale) { 9002 // [us]mul.fix(a, b, 0) -> mul(a, b) 9003 if (!Saturating) { 9004 if (isOperationLegalOrCustom(ISD::MUL, VT)) 9005 return DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 9006 } else if (Signed && isOperationLegalOrCustom(ISD::SMULO, VT)) { 9007 SDValue Result = 9008 DAG.getNode(ISD::SMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 9009 SDValue Product = Result.getValue(0); 9010 SDValue Overflow = Result.getValue(1); 9011 SDValue Zero = DAG.getConstant(0, dl, VT); 9012 9013 APInt MinVal = APInt::getSignedMinValue(VTSize); 9014 APInt MaxVal = APInt::getSignedMaxValue(VTSize); 9015 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 9016 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 9017 // Xor the inputs, if resulting sign bit is 0 the product will be 9018 // positive, else negative. 9019 SDValue Xor = DAG.getNode(ISD::XOR, dl, VT, LHS, RHS); 9020 SDValue ProdNeg = DAG.getSetCC(dl, BoolVT, Xor, Zero, ISD::SETLT); 9021 Result = DAG.getSelect(dl, VT, ProdNeg, SatMin, SatMax); 9022 return DAG.getSelect(dl, VT, Overflow, Result, Product); 9023 } else if (!Signed && isOperationLegalOrCustom(ISD::UMULO, VT)) { 9024 SDValue Result = 9025 DAG.getNode(ISD::UMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 9026 SDValue Product = Result.getValue(0); 9027 SDValue Overflow = Result.getValue(1); 9028 9029 APInt MaxVal = APInt::getMaxValue(VTSize); 9030 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 9031 return DAG.getSelect(dl, VT, Overflow, SatMax, Product); 9032 } 9033 } 9034 9035 assert(((Signed && Scale < VTSize) || (!Signed && Scale <= VTSize)) && 9036 "Expected scale to be less than the number of bits if signed or at " 9037 "most the number of bits if unsigned."); 9038 assert(LHS.getValueType() == RHS.getValueType() && 9039 "Expected both operands to be the same type"); 9040 9041 // Get the upper and lower bits of the result. 9042 SDValue Lo, Hi; 9043 unsigned LoHiOp = Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI; 9044 unsigned HiOp = Signed ? ISD::MULHS : ISD::MULHU; 9045 if (isOperationLegalOrCustom(LoHiOp, VT)) { 9046 SDValue Result = DAG.getNode(LoHiOp, dl, DAG.getVTList(VT, VT), LHS, RHS); 9047 Lo = Result.getValue(0); 9048 Hi = Result.getValue(1); 9049 } else if (isOperationLegalOrCustom(HiOp, VT)) { 9050 Lo = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 9051 Hi = DAG.getNode(HiOp, dl, VT, LHS, RHS); 9052 } else if (VT.isVector()) { 9053 return SDValue(); 9054 } else { 9055 report_fatal_error("Unable to expand fixed point multiplication."); 9056 } 9057 9058 if (Scale == VTSize) 9059 // Result is just the top half since we'd be shifting by the width of the 9060 // operand. Overflow impossible so this works for both UMULFIX and 9061 // UMULFIXSAT. 9062 return Hi; 9063 9064 // The result will need to be shifted right by the scale since both operands 9065 // are scaled. The result is given to us in 2 halves, so we only want part of 9066 // both in the result. 9067 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 9068 SDValue Result = DAG.getNode(ISD::FSHR, dl, VT, Hi, Lo, 9069 DAG.getConstant(Scale, dl, ShiftTy)); 9070 if (!Saturating) 9071 return Result; 9072 9073 if (!Signed) { 9074 // Unsigned overflow happened if the upper (VTSize - Scale) bits (of the 9075 // widened multiplication) aren't all zeroes. 9076 9077 // Saturate to max if ((Hi >> Scale) != 0), 9078 // which is the same as if (Hi > ((1 << Scale) - 1)) 9079 APInt MaxVal = APInt::getMaxValue(VTSize); 9080 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale), 9081 dl, VT); 9082 Result = DAG.getSelectCC(dl, Hi, LowMask, 9083 DAG.getConstant(MaxVal, dl, VT), Result, 9084 ISD::SETUGT); 9085 9086 return Result; 9087 } 9088 9089 // Signed overflow happened if the upper (VTSize - Scale + 1) bits (of the 9090 // widened multiplication) aren't all ones or all zeroes. 9091 9092 SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(VTSize), dl, VT); 9093 SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(VTSize), dl, VT); 9094 9095 if (Scale == 0) { 9096 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, Lo, 9097 DAG.getConstant(VTSize - 1, dl, ShiftTy)); 9098 SDValue Overflow = DAG.getSetCC(dl, BoolVT, Hi, Sign, ISD::SETNE); 9099 // Saturated to SatMin if wide product is negative, and SatMax if wide 9100 // product is positive ... 9101 SDValue Zero = DAG.getConstant(0, dl, VT); 9102 SDValue ResultIfOverflow = DAG.getSelectCC(dl, Hi, Zero, SatMin, SatMax, 9103 ISD::SETLT); 9104 // ... but only if we overflowed. 9105 return DAG.getSelect(dl, VT, Overflow, ResultIfOverflow, Result); 9106 } 9107 9108 // We handled Scale==0 above so all the bits to examine is in Hi. 9109 9110 // Saturate to max if ((Hi >> (Scale - 1)) > 0), 9111 // which is the same as if (Hi > (1 << (Scale - 1)) - 1) 9112 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale - 1), 9113 dl, VT); 9114 Result = DAG.getSelectCC(dl, Hi, LowMask, SatMax, Result, ISD::SETGT); 9115 // Saturate to min if (Hi >> (Scale - 1)) < -1), 9116 // which is the same as if (HI < (-1 << (Scale - 1)) 9117 SDValue HighMask = 9118 DAG.getConstant(APInt::getHighBitsSet(VTSize, VTSize - Scale + 1), 9119 dl, VT); 9120 Result = DAG.getSelectCC(dl, Hi, HighMask, SatMin, Result, ISD::SETLT); 9121 return Result; 9122 } 9123 9124 SDValue 9125 TargetLowering::expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, 9126 SDValue LHS, SDValue RHS, 9127 unsigned Scale, SelectionDAG &DAG) const { 9128 assert((Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT || 9129 Opcode == ISD::UDIVFIX || Opcode == ISD::UDIVFIXSAT) && 9130 "Expected a fixed point division opcode"); 9131 9132 EVT VT = LHS.getValueType(); 9133 bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT; 9134 bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT; 9135 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 9136 9137 // If there is enough room in the type to upscale the LHS or downscale the 9138 // RHS before the division, we can perform it in this type without having to 9139 // resize. For signed operations, the LHS headroom is the number of 9140 // redundant sign bits, and for unsigned ones it is the number of zeroes. 9141 // The headroom for the RHS is the number of trailing zeroes. 9142 unsigned LHSLead = Signed ? DAG.ComputeNumSignBits(LHS) - 1 9143 : DAG.computeKnownBits(LHS).countMinLeadingZeros(); 9144 unsigned RHSTrail = DAG.computeKnownBits(RHS).countMinTrailingZeros(); 9145 9146 // For signed saturating operations, we need to be able to detect true integer 9147 // division overflow; that is, when you have MIN / -EPS. However, this 9148 // is undefined behavior and if we emit divisions that could take such 9149 // values it may cause undesired behavior (arithmetic exceptions on x86, for 9150 // example). 9151 // Avoid this by requiring an extra bit so that we never get this case. 9152 // FIXME: This is a bit unfortunate as it means that for an 8-bit 7-scale 9153 // signed saturating division, we need to emit a whopping 32-bit division. 9154 if (LHSLead + RHSTrail < Scale + (unsigned)(Saturating && Signed)) 9155 return SDValue(); 9156 9157 unsigned LHSShift = std::min(LHSLead, Scale); 9158 unsigned RHSShift = Scale - LHSShift; 9159 9160 // At this point, we know that if we shift the LHS up by LHSShift and the 9161 // RHS down by RHSShift, we can emit a regular division with a final scaling 9162 // factor of Scale. 9163 9164 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 9165 if (LHSShift) 9166 LHS = DAG.getNode(ISD::SHL, dl, VT, LHS, 9167 DAG.getConstant(LHSShift, dl, ShiftTy)); 9168 if (RHSShift) 9169 RHS = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, dl, VT, RHS, 9170 DAG.getConstant(RHSShift, dl, ShiftTy)); 9171 9172 SDValue Quot; 9173 if (Signed) { 9174 // For signed operations, if the resulting quotient is negative and the 9175 // remainder is nonzero, subtract 1 from the quotient to round towards 9176 // negative infinity. 9177 SDValue Rem; 9178 // FIXME: Ideally we would always produce an SDIVREM here, but if the 9179 // type isn't legal, SDIVREM cannot be expanded. There is no reason why 9180 // we couldn't just form a libcall, but the type legalizer doesn't do it. 9181 if (isTypeLegal(VT) && 9182 isOperationLegalOrCustom(ISD::SDIVREM, VT)) { 9183 Quot = DAG.getNode(ISD::SDIVREM, dl, 9184 DAG.getVTList(VT, VT), 9185 LHS, RHS); 9186 Rem = Quot.getValue(1); 9187 Quot = Quot.getValue(0); 9188 } else { 9189 Quot = DAG.getNode(ISD::SDIV, dl, VT, 9190 LHS, RHS); 9191 Rem = DAG.getNode(ISD::SREM, dl, VT, 9192 LHS, RHS); 9193 } 9194 SDValue Zero = DAG.getConstant(0, dl, VT); 9195 SDValue RemNonZero = DAG.getSetCC(dl, BoolVT, Rem, Zero, ISD::SETNE); 9196 SDValue LHSNeg = DAG.getSetCC(dl, BoolVT, LHS, Zero, ISD::SETLT); 9197 SDValue RHSNeg = DAG.getSetCC(dl, BoolVT, RHS, Zero, ISD::SETLT); 9198 SDValue QuotNeg = DAG.getNode(ISD::XOR, dl, BoolVT, LHSNeg, RHSNeg); 9199 SDValue Sub1 = DAG.getNode(ISD::SUB, dl, VT, Quot, 9200 DAG.getConstant(1, dl, VT)); 9201 Quot = DAG.getSelect(dl, VT, 9202 DAG.getNode(ISD::AND, dl, BoolVT, RemNonZero, QuotNeg), 9203 Sub1, Quot); 9204 } else 9205 Quot = DAG.getNode(ISD::UDIV, dl, VT, 9206 LHS, RHS); 9207 9208 return Quot; 9209 } 9210 9211 void TargetLowering::expandUADDSUBO( 9212 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 9213 SDLoc dl(Node); 9214 SDValue LHS = Node->getOperand(0); 9215 SDValue RHS = Node->getOperand(1); 9216 bool IsAdd = Node->getOpcode() == ISD::UADDO; 9217 9218 // If ADD/SUBCARRY is legal, use that instead. 9219 unsigned OpcCarry = IsAdd ? ISD::ADDCARRY : ISD::SUBCARRY; 9220 if (isOperationLegalOrCustom(OpcCarry, Node->getValueType(0))) { 9221 SDValue CarryIn = DAG.getConstant(0, dl, Node->getValueType(1)); 9222 SDValue NodeCarry = DAG.getNode(OpcCarry, dl, Node->getVTList(), 9223 { LHS, RHS, CarryIn }); 9224 Result = SDValue(NodeCarry.getNode(), 0); 9225 Overflow = SDValue(NodeCarry.getNode(), 1); 9226 return; 9227 } 9228 9229 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 9230 LHS.getValueType(), LHS, RHS); 9231 9232 EVT ResultType = Node->getValueType(1); 9233 EVT SetCCType = getSetCCResultType( 9234 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 9235 SDValue SetCC; 9236 if (IsAdd && isOneConstant(RHS)) { 9237 // Special case: uaddo X, 1 overflowed if X+1 is 0. This potential reduces 9238 // the live range of X. We assume comparing with 0 is cheap. 9239 // The general case (X + C) < C is not necessarily beneficial. Although we 9240 // reduce the live range of X, we may introduce the materialization of 9241 // constant C. 9242 SetCC = 9243 DAG.getSetCC(dl, SetCCType, Result, 9244 DAG.getConstant(0, dl, Node->getValueType(0)), ISD::SETEQ); 9245 } else { 9246 ISD::CondCode CC = IsAdd ? ISD::SETULT : ISD::SETUGT; 9247 SetCC = DAG.getSetCC(dl, SetCCType, Result, LHS, CC); 9248 } 9249 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 9250 } 9251 9252 void TargetLowering::expandSADDSUBO( 9253 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 9254 SDLoc dl(Node); 9255 SDValue LHS = Node->getOperand(0); 9256 SDValue RHS = Node->getOperand(1); 9257 bool IsAdd = Node->getOpcode() == ISD::SADDO; 9258 9259 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 9260 LHS.getValueType(), LHS, RHS); 9261 9262 EVT ResultType = Node->getValueType(1); 9263 EVT OType = getSetCCResultType( 9264 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 9265 9266 // If SADDSAT/SSUBSAT is legal, compare results to detect overflow. 9267 unsigned OpcSat = IsAdd ? ISD::SADDSAT : ISD::SSUBSAT; 9268 if (isOperationLegal(OpcSat, LHS.getValueType())) { 9269 SDValue Sat = DAG.getNode(OpcSat, dl, LHS.getValueType(), LHS, RHS); 9270 SDValue SetCC = DAG.getSetCC(dl, OType, Result, Sat, ISD::SETNE); 9271 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 9272 return; 9273 } 9274 9275 SDValue Zero = DAG.getConstant(0, dl, LHS.getValueType()); 9276 9277 // For an addition, the result should be less than one of the operands (LHS) 9278 // if and only if the other operand (RHS) is negative, otherwise there will 9279 // be overflow. 9280 // For a subtraction, the result should be less than one of the operands 9281 // (LHS) if and only if the other operand (RHS) is (non-zero) positive, 9282 // otherwise there will be overflow. 9283 SDValue ResultLowerThanLHS = DAG.getSetCC(dl, OType, Result, LHS, ISD::SETLT); 9284 SDValue ConditionRHS = 9285 DAG.getSetCC(dl, OType, RHS, Zero, IsAdd ? ISD::SETLT : ISD::SETGT); 9286 9287 Overflow = DAG.getBoolExtOrTrunc( 9288 DAG.getNode(ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl, 9289 ResultType, ResultType); 9290 } 9291 9292 bool TargetLowering::expandMULO(SDNode *Node, SDValue &Result, 9293 SDValue &Overflow, SelectionDAG &DAG) const { 9294 SDLoc dl(Node); 9295 EVT VT = Node->getValueType(0); 9296 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 9297 SDValue LHS = Node->getOperand(0); 9298 SDValue RHS = Node->getOperand(1); 9299 bool isSigned = Node->getOpcode() == ISD::SMULO; 9300 9301 // For power-of-two multiplications we can use a simpler shift expansion. 9302 if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) { 9303 const APInt &C = RHSC->getAPIntValue(); 9304 // mulo(X, 1 << S) -> { X << S, (X << S) >> S != X } 9305 if (C.isPowerOf2()) { 9306 // smulo(x, signed_min) is same as umulo(x, signed_min). 9307 bool UseArithShift = isSigned && !C.isMinSignedValue(); 9308 EVT ShiftAmtTy = getShiftAmountTy(VT, DAG.getDataLayout()); 9309 SDValue ShiftAmt = DAG.getConstant(C.logBase2(), dl, ShiftAmtTy); 9310 Result = DAG.getNode(ISD::SHL, dl, VT, LHS, ShiftAmt); 9311 Overflow = DAG.getSetCC(dl, SetCCVT, 9312 DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL, 9313 dl, VT, Result, ShiftAmt), 9314 LHS, ISD::SETNE); 9315 return true; 9316 } 9317 } 9318 9319 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getScalarSizeInBits() * 2); 9320 if (VT.isVector()) 9321 WideVT = 9322 EVT::getVectorVT(*DAG.getContext(), WideVT, VT.getVectorElementCount()); 9323 9324 SDValue BottomHalf; 9325 SDValue TopHalf; 9326 static const unsigned Ops[2][3] = 9327 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, 9328 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; 9329 if (isOperationLegalOrCustom(Ops[isSigned][0], VT)) { 9330 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 9331 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); 9332 } else if (isOperationLegalOrCustom(Ops[isSigned][1], VT)) { 9333 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, 9334 RHS); 9335 TopHalf = BottomHalf.getValue(1); 9336 } else if (isTypeLegal(WideVT)) { 9337 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 9338 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 9339 SDValue Mul = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); 9340 BottomHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul); 9341 SDValue ShiftAmt = DAG.getConstant(VT.getScalarSizeInBits(), dl, 9342 getShiftAmountTy(WideVT, DAG.getDataLayout())); 9343 TopHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, 9344 DAG.getNode(ISD::SRL, dl, WideVT, Mul, ShiftAmt)); 9345 } else { 9346 if (VT.isVector()) 9347 return false; 9348 9349 // We can fall back to a libcall with an illegal type for the MUL if we 9350 // have a libcall big enough. 9351 // Also, we can fall back to a division in some cases, but that's a big 9352 // performance hit in the general case. 9353 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 9354 if (WideVT == MVT::i16) 9355 LC = RTLIB::MUL_I16; 9356 else if (WideVT == MVT::i32) 9357 LC = RTLIB::MUL_I32; 9358 else if (WideVT == MVT::i64) 9359 LC = RTLIB::MUL_I64; 9360 else if (WideVT == MVT::i128) 9361 LC = RTLIB::MUL_I128; 9362 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!"); 9363 9364 SDValue HiLHS; 9365 SDValue HiRHS; 9366 if (isSigned) { 9367 // The high part is obtained by SRA'ing all but one of the bits of low 9368 // part. 9369 unsigned LoSize = VT.getFixedSizeInBits(); 9370 HiLHS = 9371 DAG.getNode(ISD::SRA, dl, VT, LHS, 9372 DAG.getConstant(LoSize - 1, dl, 9373 getPointerTy(DAG.getDataLayout()))); 9374 HiRHS = 9375 DAG.getNode(ISD::SRA, dl, VT, RHS, 9376 DAG.getConstant(LoSize - 1, dl, 9377 getPointerTy(DAG.getDataLayout()))); 9378 } else { 9379 HiLHS = DAG.getConstant(0, dl, VT); 9380 HiRHS = DAG.getConstant(0, dl, VT); 9381 } 9382 9383 // Here we're passing the 2 arguments explicitly as 4 arguments that are 9384 // pre-lowered to the correct types. This all depends upon WideVT not 9385 // being a legal type for the architecture and thus has to be split to 9386 // two arguments. 9387 SDValue Ret; 9388 TargetLowering::MakeLibCallOptions CallOptions; 9389 CallOptions.setSExt(isSigned); 9390 CallOptions.setIsPostTypeLegalization(true); 9391 if (shouldSplitFunctionArgumentsAsLittleEndian(DAG.getDataLayout())) { 9392 // Halves of WideVT are packed into registers in different order 9393 // depending on platform endianness. This is usually handled by 9394 // the C calling convention, but we can't defer to it in 9395 // the legalizer. 9396 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS }; 9397 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 9398 } else { 9399 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS }; 9400 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 9401 } 9402 assert(Ret.getOpcode() == ISD::MERGE_VALUES && 9403 "Ret value is a collection of constituent nodes holding result."); 9404 if (DAG.getDataLayout().isLittleEndian()) { 9405 // Same as above. 9406 BottomHalf = Ret.getOperand(0); 9407 TopHalf = Ret.getOperand(1); 9408 } else { 9409 BottomHalf = Ret.getOperand(1); 9410 TopHalf = Ret.getOperand(0); 9411 } 9412 } 9413 9414 Result = BottomHalf; 9415 if (isSigned) { 9416 SDValue ShiftAmt = DAG.getConstant( 9417 VT.getScalarSizeInBits() - 1, dl, 9418 getShiftAmountTy(BottomHalf.getValueType(), DAG.getDataLayout())); 9419 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt); 9420 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, Sign, ISD::SETNE); 9421 } else { 9422 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, 9423 DAG.getConstant(0, dl, VT), ISD::SETNE); 9424 } 9425 9426 // Truncate the result if SetCC returns a larger type than needed. 9427 EVT RType = Node->getValueType(1); 9428 if (RType.bitsLT(Overflow.getValueType())) 9429 Overflow = DAG.getNode(ISD::TRUNCATE, dl, RType, Overflow); 9430 9431 assert(RType.getSizeInBits() == Overflow.getValueSizeInBits() && 9432 "Unexpected result type for S/UMULO legalization"); 9433 return true; 9434 } 9435 9436 SDValue TargetLowering::expandVecReduce(SDNode *Node, SelectionDAG &DAG) const { 9437 SDLoc dl(Node); 9438 unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Node->getOpcode()); 9439 SDValue Op = Node->getOperand(0); 9440 EVT VT = Op.getValueType(); 9441 9442 if (VT.isScalableVector()) 9443 report_fatal_error( 9444 "Expanding reductions for scalable vectors is undefined."); 9445 9446 // Try to use a shuffle reduction for power of two vectors. 9447 if (VT.isPow2VectorType()) { 9448 while (VT.getVectorNumElements() > 1) { 9449 EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); 9450 if (!isOperationLegalOrCustom(BaseOpcode, HalfVT)) 9451 break; 9452 9453 SDValue Lo, Hi; 9454 std::tie(Lo, Hi) = DAG.SplitVector(Op, dl); 9455 Op = DAG.getNode(BaseOpcode, dl, HalfVT, Lo, Hi); 9456 VT = HalfVT; 9457 } 9458 } 9459 9460 EVT EltVT = VT.getVectorElementType(); 9461 unsigned NumElts = VT.getVectorNumElements(); 9462 9463 SmallVector<SDValue, 8> Ops; 9464 DAG.ExtractVectorElements(Op, Ops, 0, NumElts); 9465 9466 SDValue Res = Ops[0]; 9467 for (unsigned i = 1; i < NumElts; i++) 9468 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Node->getFlags()); 9469 9470 // Result type may be wider than element type. 9471 if (EltVT != Node->getValueType(0)) 9472 Res = DAG.getNode(ISD::ANY_EXTEND, dl, Node->getValueType(0), Res); 9473 return Res; 9474 } 9475 9476 SDValue TargetLowering::expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const { 9477 SDLoc dl(Node); 9478 SDValue AccOp = Node->getOperand(0); 9479 SDValue VecOp = Node->getOperand(1); 9480 SDNodeFlags Flags = Node->getFlags(); 9481 9482 EVT VT = VecOp.getValueType(); 9483 EVT EltVT = VT.getVectorElementType(); 9484 9485 if (VT.isScalableVector()) 9486 report_fatal_error( 9487 "Expanding reductions for scalable vectors is undefined."); 9488 9489 unsigned NumElts = VT.getVectorNumElements(); 9490 9491 SmallVector<SDValue, 8> Ops; 9492 DAG.ExtractVectorElements(VecOp, Ops, 0, NumElts); 9493 9494 unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Node->getOpcode()); 9495 9496 SDValue Res = AccOp; 9497 for (unsigned i = 0; i < NumElts; i++) 9498 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Flags); 9499 9500 return Res; 9501 } 9502 9503 bool TargetLowering::expandREM(SDNode *Node, SDValue &Result, 9504 SelectionDAG &DAG) const { 9505 EVT VT = Node->getValueType(0); 9506 SDLoc dl(Node); 9507 bool isSigned = Node->getOpcode() == ISD::SREM; 9508 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV; 9509 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 9510 SDValue Dividend = Node->getOperand(0); 9511 SDValue Divisor = Node->getOperand(1); 9512 if (isOperationLegalOrCustom(DivRemOpc, VT)) { 9513 SDVTList VTs = DAG.getVTList(VT, VT); 9514 Result = DAG.getNode(DivRemOpc, dl, VTs, Dividend, Divisor).getValue(1); 9515 return true; 9516 } 9517 if (isOperationLegalOrCustom(DivOpc, VT)) { 9518 // X % Y -> X-X/Y*Y 9519 SDValue Divide = DAG.getNode(DivOpc, dl, VT, Dividend, Divisor); 9520 SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Divide, Divisor); 9521 Result = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul); 9522 return true; 9523 } 9524 return false; 9525 } 9526 9527 SDValue TargetLowering::expandFP_TO_INT_SAT(SDNode *Node, 9528 SelectionDAG &DAG) const { 9529 bool IsSigned = Node->getOpcode() == ISD::FP_TO_SINT_SAT; 9530 SDLoc dl(SDValue(Node, 0)); 9531 SDValue Src = Node->getOperand(0); 9532 9533 // DstVT is the result type, while SatVT is the size to which we saturate 9534 EVT SrcVT = Src.getValueType(); 9535 EVT DstVT = Node->getValueType(0); 9536 9537 EVT SatVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 9538 unsigned SatWidth = SatVT.getScalarSizeInBits(); 9539 unsigned DstWidth = DstVT.getScalarSizeInBits(); 9540 assert(SatWidth <= DstWidth && 9541 "Expected saturation width smaller than result width"); 9542 9543 // Determine minimum and maximum integer values and their corresponding 9544 // floating-point values. 9545 APInt MinInt, MaxInt; 9546 if (IsSigned) { 9547 MinInt = APInt::getSignedMinValue(SatWidth).sext(DstWidth); 9548 MaxInt = APInt::getSignedMaxValue(SatWidth).sext(DstWidth); 9549 } else { 9550 MinInt = APInt::getMinValue(SatWidth).zext(DstWidth); 9551 MaxInt = APInt::getMaxValue(SatWidth).zext(DstWidth); 9552 } 9553 9554 // We cannot risk emitting FP_TO_XINT nodes with a source VT of f16, as 9555 // libcall emission cannot handle this. Large result types will fail. 9556 if (SrcVT == MVT::f16) { 9557 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, Src); 9558 SrcVT = Src.getValueType(); 9559 } 9560 9561 APFloat MinFloat(DAG.EVTToAPFloatSemantics(SrcVT)); 9562 APFloat MaxFloat(DAG.EVTToAPFloatSemantics(SrcVT)); 9563 9564 APFloat::opStatus MinStatus = 9565 MinFloat.convertFromAPInt(MinInt, IsSigned, APFloat::rmTowardZero); 9566 APFloat::opStatus MaxStatus = 9567 MaxFloat.convertFromAPInt(MaxInt, IsSigned, APFloat::rmTowardZero); 9568 bool AreExactFloatBounds = !(MinStatus & APFloat::opStatus::opInexact) && 9569 !(MaxStatus & APFloat::opStatus::opInexact); 9570 9571 SDValue MinFloatNode = DAG.getConstantFP(MinFloat, dl, SrcVT); 9572 SDValue MaxFloatNode = DAG.getConstantFP(MaxFloat, dl, SrcVT); 9573 9574 // If the integer bounds are exactly representable as floats and min/max are 9575 // legal, emit a min+max+fptoi sequence. Otherwise we have to use a sequence 9576 // of comparisons and selects. 9577 bool MinMaxLegal = isOperationLegal(ISD::FMINNUM, SrcVT) && 9578 isOperationLegal(ISD::FMAXNUM, SrcVT); 9579 if (AreExactFloatBounds && MinMaxLegal) { 9580 SDValue Clamped = Src; 9581 9582 // Clamp Src by MinFloat from below. If Src is NaN the result is MinFloat. 9583 Clamped = DAG.getNode(ISD::FMAXNUM, dl, SrcVT, Clamped, MinFloatNode); 9584 // Clamp by MaxFloat from above. NaN cannot occur. 9585 Clamped = DAG.getNode(ISD::FMINNUM, dl, SrcVT, Clamped, MaxFloatNode); 9586 // Convert clamped value to integer. 9587 SDValue FpToInt = DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, 9588 dl, DstVT, Clamped); 9589 9590 // In the unsigned case we're done, because we mapped NaN to MinFloat, 9591 // which will cast to zero. 9592 if (!IsSigned) 9593 return FpToInt; 9594 9595 // Otherwise, select 0 if Src is NaN. 9596 SDValue ZeroInt = DAG.getConstant(0, dl, DstVT); 9597 return DAG.getSelectCC(dl, Src, Src, ZeroInt, FpToInt, 9598 ISD::CondCode::SETUO); 9599 } 9600 9601 SDValue MinIntNode = DAG.getConstant(MinInt, dl, DstVT); 9602 SDValue MaxIntNode = DAG.getConstant(MaxInt, dl, DstVT); 9603 9604 // Result of direct conversion. The assumption here is that the operation is 9605 // non-trapping and it's fine to apply it to an out-of-range value if we 9606 // select it away later. 9607 SDValue FpToInt = 9608 DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, dl, DstVT, Src); 9609 9610 SDValue Select = FpToInt; 9611 9612 // If Src ULT MinFloat, select MinInt. In particular, this also selects 9613 // MinInt if Src is NaN. 9614 Select = DAG.getSelectCC(dl, Src, MinFloatNode, MinIntNode, Select, 9615 ISD::CondCode::SETULT); 9616 // If Src OGT MaxFloat, select MaxInt. 9617 Select = DAG.getSelectCC(dl, Src, MaxFloatNode, MaxIntNode, Select, 9618 ISD::CondCode::SETOGT); 9619 9620 // In the unsigned case we are done, because we mapped NaN to MinInt, which 9621 // is already zero. 9622 if (!IsSigned) 9623 return Select; 9624 9625 // Otherwise, select 0 if Src is NaN. 9626 SDValue ZeroInt = DAG.getConstant(0, dl, DstVT); 9627 return DAG.getSelectCC(dl, Src, Src, ZeroInt, Select, ISD::CondCode::SETUO); 9628 } 9629 9630 SDValue TargetLowering::expandVectorSplice(SDNode *Node, 9631 SelectionDAG &DAG) const { 9632 assert(Node->getOpcode() == ISD::VECTOR_SPLICE && "Unexpected opcode!"); 9633 assert(Node->getValueType(0).isScalableVector() && 9634 "Fixed length vector types expected to use SHUFFLE_VECTOR!"); 9635 9636 EVT VT = Node->getValueType(0); 9637 SDValue V1 = Node->getOperand(0); 9638 SDValue V2 = Node->getOperand(1); 9639 int64_t Imm = cast<ConstantSDNode>(Node->getOperand(2))->getSExtValue(); 9640 SDLoc DL(Node); 9641 9642 // Expand through memory thusly: 9643 // Alloca CONCAT_VECTORS_TYPES(V1, V2) Ptr 9644 // Store V1, Ptr 9645 // Store V2, Ptr + sizeof(V1) 9646 // If (Imm < 0) 9647 // TrailingElts = -Imm 9648 // Ptr = Ptr + sizeof(V1) - (TrailingElts * sizeof(VT.Elt)) 9649 // else 9650 // Ptr = Ptr + (Imm * sizeof(VT.Elt)) 9651 // Res = Load Ptr 9652 9653 Align Alignment = DAG.getReducedAlign(VT, /*UseABI=*/false); 9654 9655 EVT MemVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 9656 VT.getVectorElementCount() * 2); 9657 SDValue StackPtr = DAG.CreateStackTemporary(MemVT.getStoreSize(), Alignment); 9658 EVT PtrVT = StackPtr.getValueType(); 9659 auto &MF = DAG.getMachineFunction(); 9660 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 9661 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex); 9662 9663 // Store the lo part of CONCAT_VECTORS(V1, V2) 9664 SDValue StoreV1 = DAG.getStore(DAG.getEntryNode(), DL, V1, StackPtr, PtrInfo); 9665 // Store the hi part of CONCAT_VECTORS(V1, V2) 9666 SDValue OffsetToV2 = DAG.getVScale( 9667 DL, PtrVT, 9668 APInt(PtrVT.getFixedSizeInBits(), VT.getStoreSize().getKnownMinSize())); 9669 SDValue StackPtr2 = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, OffsetToV2); 9670 SDValue StoreV2 = DAG.getStore(StoreV1, DL, V2, StackPtr2, PtrInfo); 9671 9672 if (Imm >= 0) { 9673 // Load back the required element. getVectorElementPointer takes care of 9674 // clamping the index if it's out-of-bounds. 9675 StackPtr = getVectorElementPointer(DAG, StackPtr, VT, Node->getOperand(2)); 9676 // Load the spliced result 9677 return DAG.getLoad(VT, DL, StoreV2, StackPtr, 9678 MachinePointerInfo::getUnknownStack(MF)); 9679 } 9680 9681 uint64_t TrailingElts = -Imm; 9682 9683 // NOTE: TrailingElts must be clamped so as not to read outside of V1:V2. 9684 TypeSize EltByteSize = VT.getVectorElementType().getStoreSize(); 9685 SDValue TrailingBytes = 9686 DAG.getConstant(TrailingElts * EltByteSize, DL, PtrVT); 9687 9688 if (TrailingElts > VT.getVectorMinNumElements()) { 9689 SDValue VLBytes = DAG.getVScale( 9690 DL, PtrVT, 9691 APInt(PtrVT.getFixedSizeInBits(), VT.getStoreSize().getKnownMinSize())); 9692 TrailingBytes = DAG.getNode(ISD::UMIN, DL, PtrVT, TrailingBytes, VLBytes); 9693 } 9694 9695 // Calculate the start address of the spliced result. 9696 StackPtr2 = DAG.getNode(ISD::SUB, DL, PtrVT, StackPtr2, TrailingBytes); 9697 9698 // Load the spliced result 9699 return DAG.getLoad(VT, DL, StoreV2, StackPtr2, 9700 MachinePointerInfo::getUnknownStack(MF)); 9701 } 9702 9703 bool TargetLowering::LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, 9704 SDValue &LHS, SDValue &RHS, 9705 SDValue &CC, SDValue Mask, 9706 SDValue EVL, bool &NeedInvert, 9707 const SDLoc &dl, SDValue &Chain, 9708 bool IsSignaling) const { 9709 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9710 MVT OpVT = LHS.getSimpleValueType(); 9711 ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get(); 9712 NeedInvert = false; 9713 assert(!EVL == !Mask && "VP Mask and EVL must either both be set or unset"); 9714 bool IsNonVP = !EVL; 9715 switch (TLI.getCondCodeAction(CCCode, OpVT)) { 9716 default: 9717 llvm_unreachable("Unknown condition code action!"); 9718 case TargetLowering::Legal: 9719 // Nothing to do. 9720 break; 9721 case TargetLowering::Expand: { 9722 ISD::CondCode InvCC = ISD::getSetCCSwappedOperands(CCCode); 9723 if (TLI.isCondCodeLegalOrCustom(InvCC, OpVT)) { 9724 std::swap(LHS, RHS); 9725 CC = DAG.getCondCode(InvCC); 9726 return true; 9727 } 9728 // Swapping operands didn't work. Try inverting the condition. 9729 bool NeedSwap = false; 9730 InvCC = getSetCCInverse(CCCode, OpVT); 9731 if (!TLI.isCondCodeLegalOrCustom(InvCC, OpVT)) { 9732 // If inverting the condition is not enough, try swapping operands 9733 // on top of it. 9734 InvCC = ISD::getSetCCSwappedOperands(InvCC); 9735 NeedSwap = true; 9736 } 9737 if (TLI.isCondCodeLegalOrCustom(InvCC, OpVT)) { 9738 CC = DAG.getCondCode(InvCC); 9739 NeedInvert = true; 9740 if (NeedSwap) 9741 std::swap(LHS, RHS); 9742 return true; 9743 } 9744 9745 ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID; 9746 unsigned Opc = 0; 9747 switch (CCCode) { 9748 default: 9749 llvm_unreachable("Don't know how to expand this condition!"); 9750 case ISD::SETUO: 9751 if (TLI.isCondCodeLegal(ISD::SETUNE, OpVT)) { 9752 CC1 = ISD::SETUNE; 9753 CC2 = ISD::SETUNE; 9754 Opc = ISD::OR; 9755 break; 9756 } 9757 assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) && 9758 "If SETUE is expanded, SETOEQ or SETUNE must be legal!"); 9759 NeedInvert = true; 9760 LLVM_FALLTHROUGH; 9761 case ISD::SETO: 9762 assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) && 9763 "If SETO is expanded, SETOEQ must be legal!"); 9764 CC1 = ISD::SETOEQ; 9765 CC2 = ISD::SETOEQ; 9766 Opc = ISD::AND; 9767 break; 9768 case ISD::SETONE: 9769 case ISD::SETUEQ: 9770 // If the SETUO or SETO CC isn't legal, we might be able to use 9771 // SETOGT || SETOLT, inverting the result for SETUEQ. We only need one 9772 // of SETOGT/SETOLT to be legal, the other can be emulated by swapping 9773 // the operands. 9774 CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO; 9775 if (!TLI.isCondCodeLegal(CC2, OpVT) && 9776 (TLI.isCondCodeLegal(ISD::SETOGT, OpVT) || 9777 TLI.isCondCodeLegal(ISD::SETOLT, OpVT))) { 9778 CC1 = ISD::SETOGT; 9779 CC2 = ISD::SETOLT; 9780 Opc = ISD::OR; 9781 NeedInvert = ((unsigned)CCCode & 0x8U); 9782 break; 9783 } 9784 LLVM_FALLTHROUGH; 9785 case ISD::SETOEQ: 9786 case ISD::SETOGT: 9787 case ISD::SETOGE: 9788 case ISD::SETOLT: 9789 case ISD::SETOLE: 9790 case ISD::SETUNE: 9791 case ISD::SETUGT: 9792 case ISD::SETUGE: 9793 case ISD::SETULT: 9794 case ISD::SETULE: 9795 // If we are floating point, assign and break, otherwise fall through. 9796 if (!OpVT.isInteger()) { 9797 // We can use the 4th bit to tell if we are the unordered 9798 // or ordered version of the opcode. 9799 CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO; 9800 Opc = ((unsigned)CCCode & 0x8U) ? ISD::OR : ISD::AND; 9801 CC1 = (ISD::CondCode)(((int)CCCode & 0x7) | 0x10); 9802 break; 9803 } 9804 // Fallthrough if we are unsigned integer. 9805 LLVM_FALLTHROUGH; 9806 case ISD::SETLE: 9807 case ISD::SETGT: 9808 case ISD::SETGE: 9809 case ISD::SETLT: 9810 case ISD::SETNE: 9811 case ISD::SETEQ: 9812 // If all combinations of inverting the condition and swapping operands 9813 // didn't work then we have no means to expand the condition. 9814 llvm_unreachable("Don't know how to expand this condition!"); 9815 } 9816 9817 SDValue SetCC1, SetCC2; 9818 if (CCCode != ISD::SETO && CCCode != ISD::SETUO) { 9819 // If we aren't the ordered or unorder operation, 9820 // then the pattern is (LHS CC1 RHS) Opc (LHS CC2 RHS). 9821 if (IsNonVP) { 9822 SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1, Chain, IsSignaling); 9823 SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2, Chain, IsSignaling); 9824 } else { 9825 SetCC1 = DAG.getSetCCVP(dl, VT, LHS, RHS, CC1, Mask, EVL); 9826 SetCC2 = DAG.getSetCCVP(dl, VT, LHS, RHS, CC2, Mask, EVL); 9827 } 9828 } else { 9829 // Otherwise, the pattern is (LHS CC1 LHS) Opc (RHS CC2 RHS) 9830 if (IsNonVP) { 9831 SetCC1 = DAG.getSetCC(dl, VT, LHS, LHS, CC1, Chain, IsSignaling); 9832 SetCC2 = DAG.getSetCC(dl, VT, RHS, RHS, CC2, Chain, IsSignaling); 9833 } else { 9834 SetCC1 = DAG.getSetCCVP(dl, VT, LHS, LHS, CC1, Mask, EVL); 9835 SetCC2 = DAG.getSetCCVP(dl, VT, RHS, RHS, CC2, Mask, EVL); 9836 } 9837 } 9838 if (Chain) 9839 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, SetCC1.getValue(1), 9840 SetCC2.getValue(1)); 9841 if (IsNonVP) 9842 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2); 9843 else { 9844 // Transform the binary opcode to the VP equivalent. 9845 assert((Opc == ISD::OR || Opc == ISD::AND) && "Unexpected opcode"); 9846 Opc = Opc == ISD::OR ? ISD::VP_OR : ISD::VP_AND; 9847 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2, Mask, EVL); 9848 } 9849 RHS = SDValue(); 9850 CC = SDValue(); 9851 return true; 9852 } 9853 } 9854 return false; 9855 } 9856