1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the TargetLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/TargetLowering.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/Analysis/VectorUtils.h" 16 #include "llvm/CodeGen/CallingConvLower.h" 17 #include "llvm/CodeGen/CodeGenCommonISel.h" 18 #include "llvm/CodeGen/MachineFrameInfo.h" 19 #include "llvm/CodeGen/MachineFunction.h" 20 #include "llvm/CodeGen/MachineJumpTableInfo.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/CodeGen/SelectionDAG.h" 23 #include "llvm/CodeGen/TargetRegisterInfo.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/DerivedTypes.h" 26 #include "llvm/IR/GlobalVariable.h" 27 #include "llvm/IR/LLVMContext.h" 28 #include "llvm/MC/MCAsmInfo.h" 29 #include "llvm/MC/MCExpr.h" 30 #include "llvm/Support/DivisionByConstantInfo.h" 31 #include "llvm/Support/ErrorHandling.h" 32 #include "llvm/Support/KnownBits.h" 33 #include "llvm/Support/MathExtras.h" 34 #include "llvm/Target/TargetMachine.h" 35 #include <cctype> 36 using namespace llvm; 37 38 /// NOTE: The TargetMachine owns TLOF. 39 TargetLowering::TargetLowering(const TargetMachine &tm) 40 : TargetLoweringBase(tm) {} 41 42 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 43 return nullptr; 44 } 45 46 bool TargetLowering::isPositionIndependent() const { 47 return getTargetMachine().isPositionIndependent(); 48 } 49 50 /// Check whether a given call node is in tail position within its function. If 51 /// so, it sets Chain to the input chain of the tail call. 52 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, 53 SDValue &Chain) const { 54 const Function &F = DAG.getMachineFunction().getFunction(); 55 56 // First, check if tail calls have been disabled in this function. 57 if (F.getFnAttribute("disable-tail-calls").getValueAsBool()) 58 return false; 59 60 // Conservatively require the attributes of the call to match those of 61 // the return. Ignore following attributes because they don't affect the 62 // call sequence. 63 AttrBuilder CallerAttrs(F.getContext(), F.getAttributes().getRetAttrs()); 64 for (const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable, 65 Attribute::DereferenceableOrNull, Attribute::NoAlias, 66 Attribute::NonNull, Attribute::NoUndef}) 67 CallerAttrs.removeAttribute(Attr); 68 69 if (CallerAttrs.hasAttributes()) 70 return false; 71 72 // It's not safe to eliminate the sign / zero extension of the return value. 73 if (CallerAttrs.contains(Attribute::ZExt) || 74 CallerAttrs.contains(Attribute::SExt)) 75 return false; 76 77 // Check if the only use is a function return node. 78 return isUsedByReturnOnly(Node, Chain); 79 } 80 81 bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI, 82 const uint32_t *CallerPreservedMask, 83 const SmallVectorImpl<CCValAssign> &ArgLocs, 84 const SmallVectorImpl<SDValue> &OutVals) const { 85 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 86 const CCValAssign &ArgLoc = ArgLocs[I]; 87 if (!ArgLoc.isRegLoc()) 88 continue; 89 MCRegister Reg = ArgLoc.getLocReg(); 90 // Only look at callee saved registers. 91 if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg)) 92 continue; 93 // Check that we pass the value used for the caller. 94 // (We look for a CopyFromReg reading a virtual register that is used 95 // for the function live-in value of register Reg) 96 SDValue Value = OutVals[I]; 97 if (Value->getOpcode() == ISD::AssertZext) 98 Value = Value.getOperand(0); 99 if (Value->getOpcode() != ISD::CopyFromReg) 100 return false; 101 Register ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg(); 102 if (MRI.getLiveInPhysReg(ArgReg) != Reg) 103 return false; 104 } 105 return true; 106 } 107 108 /// Set CallLoweringInfo attribute flags based on a call instruction 109 /// and called function attributes. 110 void TargetLoweringBase::ArgListEntry::setAttributes(const CallBase *Call, 111 unsigned ArgIdx) { 112 IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt); 113 IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt); 114 IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg); 115 IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet); 116 IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest); 117 IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal); 118 IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated); 119 IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca); 120 IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned); 121 IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf); 122 IsSwiftAsync = Call->paramHasAttr(ArgIdx, Attribute::SwiftAsync); 123 IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError); 124 Alignment = Call->getParamStackAlign(ArgIdx); 125 IndirectType = nullptr; 126 assert(IsByVal + IsPreallocated + IsInAlloca + IsSRet <= 1 && 127 "multiple ABI attributes?"); 128 if (IsByVal) { 129 IndirectType = Call->getParamByValType(ArgIdx); 130 if (!Alignment) 131 Alignment = Call->getParamAlign(ArgIdx); 132 } 133 if (IsPreallocated) 134 IndirectType = Call->getParamPreallocatedType(ArgIdx); 135 if (IsInAlloca) 136 IndirectType = Call->getParamInAllocaType(ArgIdx); 137 if (IsSRet) 138 IndirectType = Call->getParamStructRetType(ArgIdx); 139 } 140 141 /// Generate a libcall taking the given operands as arguments and returning a 142 /// result of type RetVT. 143 std::pair<SDValue, SDValue> 144 TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, 145 ArrayRef<SDValue> Ops, 146 MakeLibCallOptions CallOptions, 147 const SDLoc &dl, 148 SDValue InChain) const { 149 if (!InChain) 150 InChain = DAG.getEntryNode(); 151 152 TargetLowering::ArgListTy Args; 153 Args.reserve(Ops.size()); 154 155 TargetLowering::ArgListEntry Entry; 156 for (unsigned i = 0; i < Ops.size(); ++i) { 157 SDValue NewOp = Ops[i]; 158 Entry.Node = NewOp; 159 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext()); 160 Entry.IsSExt = shouldSignExtendTypeInLibCall(NewOp.getValueType(), 161 CallOptions.IsSExt); 162 Entry.IsZExt = !Entry.IsSExt; 163 164 if (CallOptions.IsSoften && 165 !shouldExtendTypeInLibCall(CallOptions.OpsVTBeforeSoften[i])) { 166 Entry.IsSExt = Entry.IsZExt = false; 167 } 168 Args.push_back(Entry); 169 } 170 171 if (LC == RTLIB::UNKNOWN_LIBCALL) 172 report_fatal_error("Unsupported library call operation!"); 173 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 174 getPointerTy(DAG.getDataLayout())); 175 176 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 177 TargetLowering::CallLoweringInfo CLI(DAG); 178 bool signExtend = shouldSignExtendTypeInLibCall(RetVT, CallOptions.IsSExt); 179 bool zeroExtend = !signExtend; 180 181 if (CallOptions.IsSoften && 182 !shouldExtendTypeInLibCall(CallOptions.RetVTBeforeSoften)) { 183 signExtend = zeroExtend = false; 184 } 185 186 CLI.setDebugLoc(dl) 187 .setChain(InChain) 188 .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) 189 .setNoReturn(CallOptions.DoesNotReturn) 190 .setDiscardResult(!CallOptions.IsReturnValueUsed) 191 .setIsPostTypeLegalization(CallOptions.IsPostTypeLegalization) 192 .setSExtResult(signExtend) 193 .setZExtResult(zeroExtend); 194 return LowerCallTo(CLI); 195 } 196 197 bool TargetLowering::findOptimalMemOpLowering( 198 std::vector<EVT> &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, 199 unsigned SrcAS, const AttributeList &FuncAttributes) const { 200 if (Limit != ~unsigned(0) && Op.isMemcpyWithFixedDstAlign() && 201 Op.getSrcAlign() < Op.getDstAlign()) 202 return false; 203 204 EVT VT = getOptimalMemOpType(Op, FuncAttributes); 205 206 if (VT == MVT::Other) { 207 // Use the largest integer type whose alignment constraints are satisfied. 208 // We only need to check DstAlign here as SrcAlign is always greater or 209 // equal to DstAlign (or zero). 210 VT = MVT::i64; 211 if (Op.isFixedDstAlign()) 212 while (Op.getDstAlign() < (VT.getSizeInBits() / 8) && 213 !allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign())) 214 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); 215 assert(VT.isInteger()); 216 217 // Find the largest legal integer type. 218 MVT LVT = MVT::i64; 219 while (!isTypeLegal(LVT)) 220 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 221 assert(LVT.isInteger()); 222 223 // If the type we've chosen is larger than the largest legal integer type 224 // then use that instead. 225 if (VT.bitsGT(LVT)) 226 VT = LVT; 227 } 228 229 unsigned NumMemOps = 0; 230 uint64_t Size = Op.size(); 231 while (Size) { 232 unsigned VTSize = VT.getSizeInBits() / 8; 233 while (VTSize > Size) { 234 // For now, only use non-vector load / store's for the left-over pieces. 235 EVT NewVT = VT; 236 unsigned NewVTSize; 237 238 bool Found = false; 239 if (VT.isVector() || VT.isFloatingPoint()) { 240 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 241 if (isOperationLegalOrCustom(ISD::STORE, NewVT) && 242 isSafeMemOpType(NewVT.getSimpleVT())) 243 Found = true; 244 else if (NewVT == MVT::i64 && 245 isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 246 isSafeMemOpType(MVT::f64)) { 247 // i64 is usually not legal on 32-bit targets, but f64 may be. 248 NewVT = MVT::f64; 249 Found = true; 250 } 251 } 252 253 if (!Found) { 254 do { 255 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 256 if (NewVT == MVT::i8) 257 break; 258 } while (!isSafeMemOpType(NewVT.getSimpleVT())); 259 } 260 NewVTSize = NewVT.getSizeInBits() / 8; 261 262 // If the new VT cannot cover all of the remaining bits, then consider 263 // issuing a (or a pair of) unaligned and overlapping load / store. 264 unsigned Fast; 265 if (NumMemOps && Op.allowOverlap() && NewVTSize < Size && 266 allowsMisalignedMemoryAccesses( 267 VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign() : Align(1), 268 MachineMemOperand::MONone, &Fast) && 269 Fast) 270 VTSize = Size; 271 else { 272 VT = NewVT; 273 VTSize = NewVTSize; 274 } 275 } 276 277 if (++NumMemOps > Limit) 278 return false; 279 280 MemOps.push_back(VT); 281 Size -= VTSize; 282 } 283 284 return true; 285 } 286 287 /// Soften the operands of a comparison. This code is shared among BR_CC, 288 /// SELECT_CC, and SETCC handlers. 289 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 290 SDValue &NewLHS, SDValue &NewRHS, 291 ISD::CondCode &CCCode, 292 const SDLoc &dl, const SDValue OldLHS, 293 const SDValue OldRHS) const { 294 SDValue Chain; 295 return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS, 296 OldRHS, Chain); 297 } 298 299 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 300 SDValue &NewLHS, SDValue &NewRHS, 301 ISD::CondCode &CCCode, 302 const SDLoc &dl, const SDValue OldLHS, 303 const SDValue OldRHS, 304 SDValue &Chain, 305 bool IsSignaling) const { 306 // FIXME: Currently we cannot really respect all IEEE predicates due to libgcc 307 // not supporting it. We can update this code when libgcc provides such 308 // functions. 309 310 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128) 311 && "Unsupported setcc type!"); 312 313 // Expand into one or more soft-fp libcall(s). 314 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL; 315 bool ShouldInvertCC = false; 316 switch (CCCode) { 317 case ISD::SETEQ: 318 case ISD::SETOEQ: 319 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 320 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 321 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 322 break; 323 case ISD::SETNE: 324 case ISD::SETUNE: 325 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 : 326 (VT == MVT::f64) ? RTLIB::UNE_F64 : 327 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128; 328 break; 329 case ISD::SETGE: 330 case ISD::SETOGE: 331 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 332 (VT == MVT::f64) ? RTLIB::OGE_F64 : 333 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 334 break; 335 case ISD::SETLT: 336 case ISD::SETOLT: 337 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 338 (VT == MVT::f64) ? RTLIB::OLT_F64 : 339 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 340 break; 341 case ISD::SETLE: 342 case ISD::SETOLE: 343 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 344 (VT == MVT::f64) ? RTLIB::OLE_F64 : 345 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 346 break; 347 case ISD::SETGT: 348 case ISD::SETOGT: 349 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 350 (VT == MVT::f64) ? RTLIB::OGT_F64 : 351 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 352 break; 353 case ISD::SETO: 354 ShouldInvertCC = true; 355 [[fallthrough]]; 356 case ISD::SETUO: 357 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 358 (VT == MVT::f64) ? RTLIB::UO_F64 : 359 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 360 break; 361 case ISD::SETONE: 362 // SETONE = O && UNE 363 ShouldInvertCC = true; 364 [[fallthrough]]; 365 case ISD::SETUEQ: 366 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 367 (VT == MVT::f64) ? RTLIB::UO_F64 : 368 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 369 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 370 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 371 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 372 break; 373 default: 374 // Invert CC for unordered comparisons 375 ShouldInvertCC = true; 376 switch (CCCode) { 377 case ISD::SETULT: 378 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 379 (VT == MVT::f64) ? RTLIB::OGE_F64 : 380 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 381 break; 382 case ISD::SETULE: 383 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 384 (VT == MVT::f64) ? RTLIB::OGT_F64 : 385 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 386 break; 387 case ISD::SETUGT: 388 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 389 (VT == MVT::f64) ? RTLIB::OLE_F64 : 390 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 391 break; 392 case ISD::SETUGE: 393 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 394 (VT == MVT::f64) ? RTLIB::OLT_F64 : 395 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 396 break; 397 default: llvm_unreachable("Do not know how to soften this setcc!"); 398 } 399 } 400 401 // Use the target specific return value for comparison lib calls. 402 EVT RetVT = getCmpLibcallReturnType(); 403 SDValue Ops[2] = {NewLHS, NewRHS}; 404 TargetLowering::MakeLibCallOptions CallOptions; 405 EVT OpsVT[2] = { OldLHS.getValueType(), 406 OldRHS.getValueType() }; 407 CallOptions.setTypeListBeforeSoften(OpsVT, RetVT, true); 408 auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain); 409 NewLHS = Call.first; 410 NewRHS = DAG.getConstant(0, dl, RetVT); 411 412 CCCode = getCmpLibcallCC(LC1); 413 if (ShouldInvertCC) { 414 assert(RetVT.isInteger()); 415 CCCode = getSetCCInverse(CCCode, RetVT); 416 } 417 418 if (LC2 == RTLIB::UNKNOWN_LIBCALL) { 419 // Update Chain. 420 Chain = Call.second; 421 } else { 422 EVT SetCCVT = 423 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT); 424 SDValue Tmp = DAG.getSetCC(dl, SetCCVT, NewLHS, NewRHS, CCCode); 425 auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain); 426 CCCode = getCmpLibcallCC(LC2); 427 if (ShouldInvertCC) 428 CCCode = getSetCCInverse(CCCode, RetVT); 429 NewLHS = DAG.getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode); 430 if (Chain) 431 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Call.second, 432 Call2.second); 433 NewLHS = DAG.getNode(ShouldInvertCC ? ISD::AND : ISD::OR, dl, 434 Tmp.getValueType(), Tmp, NewLHS); 435 NewRHS = SDValue(); 436 } 437 } 438 439 /// Return the entry encoding for a jump table in the current function. The 440 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum. 441 unsigned TargetLowering::getJumpTableEncoding() const { 442 // In non-pic modes, just use the address of a block. 443 if (!isPositionIndependent()) 444 return MachineJumpTableInfo::EK_BlockAddress; 445 446 // In PIC mode, if the target supports a GPRel32 directive, use it. 447 if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr) 448 return MachineJumpTableInfo::EK_GPRel32BlockAddress; 449 450 // Otherwise, use a label difference. 451 return MachineJumpTableInfo::EK_LabelDifference32; 452 } 453 454 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table, 455 SelectionDAG &DAG) const { 456 // If our PIC model is GP relative, use the global offset table as the base. 457 unsigned JTEncoding = getJumpTableEncoding(); 458 459 if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) || 460 (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress)) 461 return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout())); 462 463 return Table; 464 } 465 466 /// This returns the relocation base for the given PIC jumptable, the same as 467 /// getPICJumpTableRelocBase, but as an MCExpr. 468 const MCExpr * 469 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 470 unsigned JTI,MCContext &Ctx) const{ 471 // The normal PIC reloc base is the label at the start of the jump table. 472 return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx); 473 } 474 475 bool 476 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 477 const TargetMachine &TM = getTargetMachine(); 478 const GlobalValue *GV = GA->getGlobal(); 479 480 // If the address is not even local to this DSO we will have to load it from 481 // a got and then add the offset. 482 if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) 483 return false; 484 485 // If the code is position independent we will have to add a base register. 486 if (isPositionIndependent()) 487 return false; 488 489 // Otherwise we can do it. 490 return true; 491 } 492 493 //===----------------------------------------------------------------------===// 494 // Optimization Methods 495 //===----------------------------------------------------------------------===// 496 497 /// If the specified instruction has a constant integer operand and there are 498 /// bits set in that constant that are not demanded, then clear those bits and 499 /// return true. 500 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, 501 const APInt &DemandedBits, 502 const APInt &DemandedElts, 503 TargetLoweringOpt &TLO) const { 504 SDLoc DL(Op); 505 unsigned Opcode = Op.getOpcode(); 506 507 // Early-out if we've ended up calling an undemanded node, leave this to 508 // constant folding. 509 if (DemandedBits.isZero() || DemandedElts.isZero()) 510 return false; 511 512 // Do target-specific constant optimization. 513 if (targetShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 514 return TLO.New.getNode(); 515 516 // FIXME: ISD::SELECT, ISD::SELECT_CC 517 switch (Opcode) { 518 default: 519 break; 520 case ISD::XOR: 521 case ISD::AND: 522 case ISD::OR: { 523 auto *Op1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 524 if (!Op1C || Op1C->isOpaque()) 525 return false; 526 527 // If this is a 'not' op, don't touch it because that's a canonical form. 528 const APInt &C = Op1C->getAPIntValue(); 529 if (Opcode == ISD::XOR && DemandedBits.isSubsetOf(C)) 530 return false; 531 532 if (!C.isSubsetOf(DemandedBits)) { 533 EVT VT = Op.getValueType(); 534 SDValue NewC = TLO.DAG.getConstant(DemandedBits & C, DL, VT); 535 SDValue NewOp = TLO.DAG.getNode(Opcode, DL, VT, Op.getOperand(0), NewC); 536 return TLO.CombineTo(Op, NewOp); 537 } 538 539 break; 540 } 541 } 542 543 return false; 544 } 545 546 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, 547 const APInt &DemandedBits, 548 TargetLoweringOpt &TLO) const { 549 EVT VT = Op.getValueType(); 550 APInt DemandedElts = VT.isVector() 551 ? APInt::getAllOnes(VT.getVectorNumElements()) 552 : APInt(1, 1); 553 return ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO); 554 } 555 556 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. 557 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be 558 /// generalized for targets with other types of implicit widening casts. 559 bool TargetLowering::ShrinkDemandedOp(SDValue Op, unsigned BitWidth, 560 const APInt &DemandedBits, 561 TargetLoweringOpt &TLO) const { 562 assert(Op.getNumOperands() == 2 && 563 "ShrinkDemandedOp only supports binary operators!"); 564 assert(Op.getNode()->getNumValues() == 1 && 565 "ShrinkDemandedOp only supports nodes with one result!"); 566 567 EVT VT = Op.getValueType(); 568 SelectionDAG &DAG = TLO.DAG; 569 SDLoc dl(Op); 570 571 // Early return, as this function cannot handle vector types. 572 if (VT.isVector()) 573 return false; 574 575 // Don't do this if the node has another user, which may require the 576 // full value. 577 if (!Op.getNode()->hasOneUse()) 578 return false; 579 580 // Search for the smallest integer type with free casts to and from 581 // Op's type. For expedience, just check power-of-2 integer types. 582 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 583 unsigned DemandedSize = DemandedBits.getActiveBits(); 584 for (unsigned SmallVTBits = llvm::bit_ceil(DemandedSize); 585 SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) { 586 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits); 587 if (TLI.isTruncateFree(VT, SmallVT) && TLI.isZExtFree(SmallVT, VT)) { 588 // We found a type with free casts. 589 SDValue X = DAG.getNode( 590 Op.getOpcode(), dl, SmallVT, 591 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(0)), 592 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(1))); 593 assert(DemandedSize <= SmallVTBits && "Narrowed below demanded bits?"); 594 SDValue Z = DAG.getNode(ISD::ANY_EXTEND, dl, VT, X); 595 return TLO.CombineTo(Op, Z); 596 } 597 } 598 return false; 599 } 600 601 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 602 DAGCombinerInfo &DCI) const { 603 SelectionDAG &DAG = DCI.DAG; 604 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 605 !DCI.isBeforeLegalizeOps()); 606 KnownBits Known; 607 608 bool Simplified = SimplifyDemandedBits(Op, DemandedBits, Known, TLO); 609 if (Simplified) { 610 DCI.AddToWorklist(Op.getNode()); 611 DCI.CommitTargetLoweringOpt(TLO); 612 } 613 return Simplified; 614 } 615 616 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 617 const APInt &DemandedElts, 618 DAGCombinerInfo &DCI) const { 619 SelectionDAG &DAG = DCI.DAG; 620 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 621 !DCI.isBeforeLegalizeOps()); 622 KnownBits Known; 623 624 bool Simplified = 625 SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO); 626 if (Simplified) { 627 DCI.AddToWorklist(Op.getNode()); 628 DCI.CommitTargetLoweringOpt(TLO); 629 } 630 return Simplified; 631 } 632 633 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 634 KnownBits &Known, 635 TargetLoweringOpt &TLO, 636 unsigned Depth, 637 bool AssumeSingleUse) const { 638 EVT VT = Op.getValueType(); 639 640 // Since the number of lanes in a scalable vector is unknown at compile time, 641 // we track one bit which is implicitly broadcast to all lanes. This means 642 // that all lanes in a scalable vector are considered demanded. 643 APInt DemandedElts = VT.isFixedLengthVector() 644 ? APInt::getAllOnes(VT.getVectorNumElements()) 645 : APInt(1, 1); 646 return SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO, Depth, 647 AssumeSingleUse); 648 } 649 650 // TODO: Under what circumstances can we create nodes? Constant folding? 651 SDValue TargetLowering::SimplifyMultipleUseDemandedBits( 652 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 653 SelectionDAG &DAG, unsigned Depth) const { 654 EVT VT = Op.getValueType(); 655 656 // Limit search depth. 657 if (Depth >= SelectionDAG::MaxRecursionDepth) 658 return SDValue(); 659 660 // Ignore UNDEFs. 661 if (Op.isUndef()) 662 return SDValue(); 663 664 // Not demanding any bits/elts from Op. 665 if (DemandedBits == 0 || DemandedElts == 0) 666 return DAG.getUNDEF(VT); 667 668 bool IsLE = DAG.getDataLayout().isLittleEndian(); 669 unsigned NumElts = DemandedElts.getBitWidth(); 670 unsigned BitWidth = DemandedBits.getBitWidth(); 671 KnownBits LHSKnown, RHSKnown; 672 switch (Op.getOpcode()) { 673 case ISD::BITCAST: { 674 if (VT.isScalableVector()) 675 return SDValue(); 676 677 SDValue Src = peekThroughBitcasts(Op.getOperand(0)); 678 EVT SrcVT = Src.getValueType(); 679 EVT DstVT = Op.getValueType(); 680 if (SrcVT == DstVT) 681 return Src; 682 683 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 684 unsigned NumDstEltBits = DstVT.getScalarSizeInBits(); 685 if (NumSrcEltBits == NumDstEltBits) 686 if (SDValue V = SimplifyMultipleUseDemandedBits( 687 Src, DemandedBits, DemandedElts, DAG, Depth + 1)) 688 return DAG.getBitcast(DstVT, V); 689 690 if (SrcVT.isVector() && (NumDstEltBits % NumSrcEltBits) == 0) { 691 unsigned Scale = NumDstEltBits / NumSrcEltBits; 692 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 693 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 694 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 695 for (unsigned i = 0; i != Scale; ++i) { 696 unsigned EltOffset = IsLE ? i : (Scale - 1 - i); 697 unsigned BitOffset = EltOffset * NumSrcEltBits; 698 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, BitOffset); 699 if (!Sub.isZero()) { 700 DemandedSrcBits |= Sub; 701 for (unsigned j = 0; j != NumElts; ++j) 702 if (DemandedElts[j]) 703 DemandedSrcElts.setBit((j * Scale) + i); 704 } 705 } 706 707 if (SDValue V = SimplifyMultipleUseDemandedBits( 708 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 709 return DAG.getBitcast(DstVT, V); 710 } 711 712 // TODO - bigendian once we have test coverage. 713 if (IsLE && (NumSrcEltBits % NumDstEltBits) == 0) { 714 unsigned Scale = NumSrcEltBits / NumDstEltBits; 715 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 716 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 717 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 718 for (unsigned i = 0; i != NumElts; ++i) 719 if (DemandedElts[i]) { 720 unsigned Offset = (i % Scale) * NumDstEltBits; 721 DemandedSrcBits.insertBits(DemandedBits, Offset); 722 DemandedSrcElts.setBit(i / Scale); 723 } 724 725 if (SDValue V = SimplifyMultipleUseDemandedBits( 726 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 727 return DAG.getBitcast(DstVT, V); 728 } 729 730 break; 731 } 732 case ISD::AND: { 733 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 734 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 735 736 // If all of the demanded bits are known 1 on one side, return the other. 737 // These bits cannot contribute to the result of the 'and' in this 738 // context. 739 if (DemandedBits.isSubsetOf(LHSKnown.Zero | RHSKnown.One)) 740 return Op.getOperand(0); 741 if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.One)) 742 return Op.getOperand(1); 743 break; 744 } 745 case ISD::OR: { 746 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 747 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 748 749 // If all of the demanded bits are known zero on one side, return the 750 // other. These bits cannot contribute to the result of the 'or' in this 751 // context. 752 if (DemandedBits.isSubsetOf(LHSKnown.One | RHSKnown.Zero)) 753 return Op.getOperand(0); 754 if (DemandedBits.isSubsetOf(RHSKnown.One | LHSKnown.Zero)) 755 return Op.getOperand(1); 756 break; 757 } 758 case ISD::XOR: { 759 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 760 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 761 762 // If all of the demanded bits are known zero on one side, return the 763 // other. 764 if (DemandedBits.isSubsetOf(RHSKnown.Zero)) 765 return Op.getOperand(0); 766 if (DemandedBits.isSubsetOf(LHSKnown.Zero)) 767 return Op.getOperand(1); 768 break; 769 } 770 case ISD::SHL: { 771 // If we are only demanding sign bits then we can use the shift source 772 // directly. 773 if (const APInt *MaxSA = 774 DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 775 SDValue Op0 = Op.getOperand(0); 776 unsigned ShAmt = MaxSA->getZExtValue(); 777 unsigned NumSignBits = 778 DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 779 unsigned UpperDemandedBits = BitWidth - DemandedBits.countr_zero(); 780 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits)) 781 return Op0; 782 } 783 break; 784 } 785 case ISD::SETCC: { 786 SDValue Op0 = Op.getOperand(0); 787 SDValue Op1 = Op.getOperand(1); 788 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 789 // If (1) we only need the sign-bit, (2) the setcc operands are the same 790 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 791 // -1, we may be able to bypass the setcc. 792 if (DemandedBits.isSignMask() && 793 Op0.getScalarValueSizeInBits() == BitWidth && 794 getBooleanContents(Op0.getValueType()) == 795 BooleanContent::ZeroOrNegativeOneBooleanContent) { 796 // If we're testing X < 0, then this compare isn't needed - just use X! 797 // FIXME: We're limiting to integer types here, but this should also work 798 // if we don't care about FP signed-zero. The use of SETLT with FP means 799 // that we don't care about NaNs. 800 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 801 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 802 return Op0; 803 } 804 break; 805 } 806 case ISD::SIGN_EXTEND_INREG: { 807 // If none of the extended bits are demanded, eliminate the sextinreg. 808 SDValue Op0 = Op.getOperand(0); 809 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 810 unsigned ExBits = ExVT.getScalarSizeInBits(); 811 if (DemandedBits.getActiveBits() <= ExBits && 812 shouldRemoveRedundantExtend(Op)) 813 return Op0; 814 // If the input is already sign extended, just drop the extension. 815 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 816 if (NumSignBits >= (BitWidth - ExBits + 1)) 817 return Op0; 818 break; 819 } 820 case ISD::ANY_EXTEND_VECTOR_INREG: 821 case ISD::SIGN_EXTEND_VECTOR_INREG: 822 case ISD::ZERO_EXTEND_VECTOR_INREG: { 823 if (VT.isScalableVector()) 824 return SDValue(); 825 826 // If we only want the lowest element and none of extended bits, then we can 827 // return the bitcasted source vector. 828 SDValue Src = Op.getOperand(0); 829 EVT SrcVT = Src.getValueType(); 830 EVT DstVT = Op.getValueType(); 831 if (IsLE && DemandedElts == 1 && 832 DstVT.getSizeInBits() == SrcVT.getSizeInBits() && 833 DemandedBits.getActiveBits() <= SrcVT.getScalarSizeInBits()) { 834 return DAG.getBitcast(DstVT, Src); 835 } 836 break; 837 } 838 case ISD::INSERT_VECTOR_ELT: { 839 if (VT.isScalableVector()) 840 return SDValue(); 841 842 // If we don't demand the inserted element, return the base vector. 843 SDValue Vec = Op.getOperand(0); 844 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 845 EVT VecVT = Vec.getValueType(); 846 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) && 847 !DemandedElts[CIdx->getZExtValue()]) 848 return Vec; 849 break; 850 } 851 case ISD::INSERT_SUBVECTOR: { 852 if (VT.isScalableVector()) 853 return SDValue(); 854 855 SDValue Vec = Op.getOperand(0); 856 SDValue Sub = Op.getOperand(1); 857 uint64_t Idx = Op.getConstantOperandVal(2); 858 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 859 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 860 // If we don't demand the inserted subvector, return the base vector. 861 if (DemandedSubElts == 0) 862 return Vec; 863 break; 864 } 865 case ISD::VECTOR_SHUFFLE: { 866 assert(!VT.isScalableVector()); 867 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 868 869 // If all the demanded elts are from one operand and are inline, 870 // then we can use the operand directly. 871 bool AllUndef = true, IdentityLHS = true, IdentityRHS = true; 872 for (unsigned i = 0; i != NumElts; ++i) { 873 int M = ShuffleMask[i]; 874 if (M < 0 || !DemandedElts[i]) 875 continue; 876 AllUndef = false; 877 IdentityLHS &= (M == (int)i); 878 IdentityRHS &= ((M - NumElts) == i); 879 } 880 881 if (AllUndef) 882 return DAG.getUNDEF(Op.getValueType()); 883 if (IdentityLHS) 884 return Op.getOperand(0); 885 if (IdentityRHS) 886 return Op.getOperand(1); 887 break; 888 } 889 default: 890 // TODO: Probably okay to remove after audit; here to reduce change size 891 // in initial enablement patch for scalable vectors 892 if (VT.isScalableVector()) 893 return SDValue(); 894 895 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) 896 if (SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode( 897 Op, DemandedBits, DemandedElts, DAG, Depth)) 898 return V; 899 break; 900 } 901 return SDValue(); 902 } 903 904 SDValue TargetLowering::SimplifyMultipleUseDemandedBits( 905 SDValue Op, const APInt &DemandedBits, SelectionDAG &DAG, 906 unsigned Depth) const { 907 EVT VT = Op.getValueType(); 908 // Since the number of lanes in a scalable vector is unknown at compile time, 909 // we track one bit which is implicitly broadcast to all lanes. This means 910 // that all lanes in a scalable vector are considered demanded. 911 APInt DemandedElts = VT.isFixedLengthVector() 912 ? APInt::getAllOnes(VT.getVectorNumElements()) 913 : APInt(1, 1); 914 return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG, 915 Depth); 916 } 917 918 SDValue TargetLowering::SimplifyMultipleUseDemandedVectorElts( 919 SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG, 920 unsigned Depth) const { 921 APInt DemandedBits = APInt::getAllOnes(Op.getScalarValueSizeInBits()); 922 return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG, 923 Depth); 924 } 925 926 // Attempt to form ext(avgfloor(A, B)) from shr(add(ext(A), ext(B)), 1). 927 // or to form ext(avgceil(A, B)) from shr(add(ext(A), ext(B), 1), 1). 928 static SDValue combineShiftToAVG(SDValue Op, SelectionDAG &DAG, 929 const TargetLowering &TLI, 930 const APInt &DemandedBits, 931 const APInt &DemandedElts, 932 unsigned Depth) { 933 assert((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SRA) && 934 "SRL or SRA node is required here!"); 935 // Is the right shift using an immediate value of 1? 936 ConstantSDNode *N1C = isConstOrConstSplat(Op.getOperand(1), DemandedElts); 937 if (!N1C || !N1C->isOne()) 938 return SDValue(); 939 940 // We are looking for an avgfloor 941 // add(ext, ext) 942 // or one of these as a avgceil 943 // add(add(ext, ext), 1) 944 // add(add(ext, 1), ext) 945 // add(ext, add(ext, 1)) 946 SDValue Add = Op.getOperand(0); 947 if (Add.getOpcode() != ISD::ADD) 948 return SDValue(); 949 950 SDValue ExtOpA = Add.getOperand(0); 951 SDValue ExtOpB = Add.getOperand(1); 952 SDValue Add2; 953 auto MatchOperands = [&](SDValue Op1, SDValue Op2, SDValue Op3, SDValue A) { 954 ConstantSDNode *ConstOp; 955 if ((ConstOp = isConstOrConstSplat(Op2, DemandedElts)) && 956 ConstOp->isOne()) { 957 ExtOpA = Op1; 958 ExtOpB = Op3; 959 Add2 = A; 960 return true; 961 } 962 if ((ConstOp = isConstOrConstSplat(Op3, DemandedElts)) && 963 ConstOp->isOne()) { 964 ExtOpA = Op1; 965 ExtOpB = Op2; 966 Add2 = A; 967 return true; 968 } 969 return false; 970 }; 971 bool IsCeil = 972 (ExtOpA.getOpcode() == ISD::ADD && 973 MatchOperands(ExtOpA.getOperand(0), ExtOpA.getOperand(1), ExtOpB, ExtOpA)) || 974 (ExtOpB.getOpcode() == ISD::ADD && 975 MatchOperands(ExtOpB.getOperand(0), ExtOpB.getOperand(1), ExtOpA, ExtOpB)); 976 977 // If the shift is signed (sra): 978 // - Needs >= 2 sign bit for both operands. 979 // - Needs >= 2 zero bits. 980 // If the shift is unsigned (srl): 981 // - Needs >= 1 zero bit for both operands. 982 // - Needs 1 demanded bit zero and >= 2 sign bits. 983 unsigned ShiftOpc = Op.getOpcode(); 984 bool IsSigned = false; 985 unsigned KnownBits; 986 unsigned NumSignedA = DAG.ComputeNumSignBits(ExtOpA, DemandedElts, Depth); 987 unsigned NumSignedB = DAG.ComputeNumSignBits(ExtOpB, DemandedElts, Depth); 988 unsigned NumSigned = std::min(NumSignedA, NumSignedB) - 1; 989 unsigned NumZeroA = 990 DAG.computeKnownBits(ExtOpA, DemandedElts, Depth).countMinLeadingZeros(); 991 unsigned NumZeroB = 992 DAG.computeKnownBits(ExtOpB, DemandedElts, Depth).countMinLeadingZeros(); 993 unsigned NumZero = std::min(NumZeroA, NumZeroB); 994 995 switch (ShiftOpc) { 996 default: 997 llvm_unreachable("Unexpected ShiftOpc in combineShiftToAVG"); 998 case ISD::SRA: { 999 if (NumZero >= 2 && NumSigned < NumZero) { 1000 IsSigned = false; 1001 KnownBits = NumZero; 1002 break; 1003 } 1004 if (NumSigned >= 1) { 1005 IsSigned = true; 1006 KnownBits = NumSigned; 1007 break; 1008 } 1009 return SDValue(); 1010 } 1011 case ISD::SRL: { 1012 if (NumZero >= 1 && NumSigned < NumZero) { 1013 IsSigned = false; 1014 KnownBits = NumZero; 1015 break; 1016 } 1017 if (NumSigned >= 1 && DemandedBits.isSignBitClear()) { 1018 IsSigned = true; 1019 KnownBits = NumSigned; 1020 break; 1021 } 1022 return SDValue(); 1023 } 1024 } 1025 1026 unsigned AVGOpc = IsCeil ? (IsSigned ? ISD::AVGCEILS : ISD::AVGCEILU) 1027 : (IsSigned ? ISD::AVGFLOORS : ISD::AVGFLOORU); 1028 1029 // Find the smallest power-2 type that is legal for this vector size and 1030 // operation, given the original type size and the number of known sign/zero 1031 // bits. 1032 EVT VT = Op.getValueType(); 1033 unsigned MinWidth = 1034 std::max<unsigned>(VT.getScalarSizeInBits() - KnownBits, 8); 1035 EVT NVT = EVT::getIntegerVT(*DAG.getContext(), llvm::bit_ceil(MinWidth)); 1036 if (VT.isVector()) 1037 NVT = EVT::getVectorVT(*DAG.getContext(), NVT, VT.getVectorElementCount()); 1038 if (!TLI.isOperationLegalOrCustom(AVGOpc, NVT)) { 1039 // If we could not transform, and (both) adds are nuw/nsw, we can use the 1040 // larger type size to do the transform. 1041 if (!TLI.isOperationLegalOrCustom(AVGOpc, VT)) 1042 return SDValue(); 1043 1044 if (DAG.computeOverflowForAdd(IsSigned, Add.getOperand(0), 1045 Add.getOperand(1)) == 1046 SelectionDAG::OFK_Never && 1047 (!Add2 || DAG.computeOverflowForAdd(IsSigned, Add2.getOperand(0), 1048 Add2.getOperand(1)) == 1049 SelectionDAG::OFK_Never)) 1050 NVT = VT; 1051 else 1052 return SDValue(); 1053 } 1054 1055 SDLoc DL(Op); 1056 SDValue ResultAVG = 1057 DAG.getNode(AVGOpc, DL, NVT, DAG.getNode(ISD::TRUNCATE, DL, NVT, ExtOpA), 1058 DAG.getNode(ISD::TRUNCATE, DL, NVT, ExtOpB)); 1059 return DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, VT, 1060 ResultAVG); 1061 } 1062 1063 /// Look at Op. At this point, we know that only the OriginalDemandedBits of the 1064 /// result of Op are ever used downstream. If we can use this information to 1065 /// simplify Op, create a new simplified DAG node and return true, returning the 1066 /// original and new nodes in Old and New. Otherwise, analyze the expression and 1067 /// return a mask of Known bits for the expression (used to simplify the 1068 /// caller). The Known bits may only be accurate for those bits in the 1069 /// OriginalDemandedBits and OriginalDemandedElts. 1070 bool TargetLowering::SimplifyDemandedBits( 1071 SDValue Op, const APInt &OriginalDemandedBits, 1072 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, 1073 unsigned Depth, bool AssumeSingleUse) const { 1074 unsigned BitWidth = OriginalDemandedBits.getBitWidth(); 1075 assert(Op.getScalarValueSizeInBits() == BitWidth && 1076 "Mask size mismatches value type size!"); 1077 1078 // Don't know anything. 1079 Known = KnownBits(BitWidth); 1080 1081 EVT VT = Op.getValueType(); 1082 bool IsLE = TLO.DAG.getDataLayout().isLittleEndian(); 1083 unsigned NumElts = OriginalDemandedElts.getBitWidth(); 1084 assert((!VT.isFixedLengthVector() || NumElts == VT.getVectorNumElements()) && 1085 "Unexpected vector size"); 1086 1087 APInt DemandedBits = OriginalDemandedBits; 1088 APInt DemandedElts = OriginalDemandedElts; 1089 SDLoc dl(Op); 1090 auto &DL = TLO.DAG.getDataLayout(); 1091 1092 // Undef operand. 1093 if (Op.isUndef()) 1094 return false; 1095 1096 // We can't simplify target constants. 1097 if (Op.getOpcode() == ISD::TargetConstant) 1098 return false; 1099 1100 if (Op.getOpcode() == ISD::Constant) { 1101 // We know all of the bits for a constant! 1102 Known = KnownBits::makeConstant(cast<ConstantSDNode>(Op)->getAPIntValue()); 1103 return false; 1104 } 1105 1106 if (Op.getOpcode() == ISD::ConstantFP) { 1107 // We know all of the bits for a floating point constant! 1108 Known = KnownBits::makeConstant( 1109 cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt()); 1110 return false; 1111 } 1112 1113 // Other users may use these bits. 1114 bool HasMultiUse = false; 1115 if (!AssumeSingleUse && !Op.getNode()->hasOneUse()) { 1116 if (Depth >= SelectionDAG::MaxRecursionDepth) { 1117 // Limit search depth. 1118 return false; 1119 } 1120 // Allow multiple uses, just set the DemandedBits/Elts to all bits. 1121 DemandedBits = APInt::getAllOnes(BitWidth); 1122 DemandedElts = APInt::getAllOnes(NumElts); 1123 HasMultiUse = true; 1124 } else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) { 1125 // Not demanding any bits/elts from Op. 1126 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 1127 } else if (Depth >= SelectionDAG::MaxRecursionDepth) { 1128 // Limit search depth. 1129 return false; 1130 } 1131 1132 KnownBits Known2; 1133 switch (Op.getOpcode()) { 1134 case ISD::SCALAR_TO_VECTOR: { 1135 if (VT.isScalableVector()) 1136 return false; 1137 if (!DemandedElts[0]) 1138 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 1139 1140 KnownBits SrcKnown; 1141 SDValue Src = Op.getOperand(0); 1142 unsigned SrcBitWidth = Src.getScalarValueSizeInBits(); 1143 APInt SrcDemandedBits = DemandedBits.zext(SrcBitWidth); 1144 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO, Depth + 1)) 1145 return true; 1146 1147 // Upper elements are undef, so only get the knownbits if we just demand 1148 // the bottom element. 1149 if (DemandedElts == 1) 1150 Known = SrcKnown.anyextOrTrunc(BitWidth); 1151 break; 1152 } 1153 case ISD::BUILD_VECTOR: 1154 // Collect the known bits that are shared by every demanded element. 1155 // TODO: Call SimplifyDemandedBits for non-constant demanded elements. 1156 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 1157 return false; // Don't fall through, will infinitely loop. 1158 case ISD::LOAD: { 1159 auto *LD = cast<LoadSDNode>(Op); 1160 if (getTargetConstantFromLoad(LD)) { 1161 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 1162 return false; // Don't fall through, will infinitely loop. 1163 } 1164 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 1165 // If this is a ZEXTLoad and we are looking at the loaded value. 1166 EVT MemVT = LD->getMemoryVT(); 1167 unsigned MemBits = MemVT.getScalarSizeInBits(); 1168 Known.Zero.setBitsFrom(MemBits); 1169 return false; // Don't fall through, will infinitely loop. 1170 } 1171 break; 1172 } 1173 case ISD::INSERT_VECTOR_ELT: { 1174 if (VT.isScalableVector()) 1175 return false; 1176 SDValue Vec = Op.getOperand(0); 1177 SDValue Scl = Op.getOperand(1); 1178 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 1179 EVT VecVT = Vec.getValueType(); 1180 1181 // If index isn't constant, assume we need all vector elements AND the 1182 // inserted element. 1183 APInt DemandedVecElts(DemandedElts); 1184 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) { 1185 unsigned Idx = CIdx->getZExtValue(); 1186 DemandedVecElts.clearBit(Idx); 1187 1188 // Inserted element is not required. 1189 if (!DemandedElts[Idx]) 1190 return TLO.CombineTo(Op, Vec); 1191 } 1192 1193 KnownBits KnownScl; 1194 unsigned NumSclBits = Scl.getScalarValueSizeInBits(); 1195 APInt DemandedSclBits = DemandedBits.zextOrTrunc(NumSclBits); 1196 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1)) 1197 return true; 1198 1199 Known = KnownScl.anyextOrTrunc(BitWidth); 1200 1201 KnownBits KnownVec; 1202 if (SimplifyDemandedBits(Vec, DemandedBits, DemandedVecElts, KnownVec, TLO, 1203 Depth + 1)) 1204 return true; 1205 1206 if (!!DemandedVecElts) 1207 Known = Known.intersectWith(KnownVec); 1208 1209 return false; 1210 } 1211 case ISD::INSERT_SUBVECTOR: { 1212 if (VT.isScalableVector()) 1213 return false; 1214 // Demand any elements from the subvector and the remainder from the src its 1215 // inserted into. 1216 SDValue Src = Op.getOperand(0); 1217 SDValue Sub = Op.getOperand(1); 1218 uint64_t Idx = Op.getConstantOperandVal(2); 1219 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 1220 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 1221 APInt DemandedSrcElts = DemandedElts; 1222 DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx); 1223 1224 KnownBits KnownSub, KnownSrc; 1225 if (SimplifyDemandedBits(Sub, DemandedBits, DemandedSubElts, KnownSub, TLO, 1226 Depth + 1)) 1227 return true; 1228 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, KnownSrc, TLO, 1229 Depth + 1)) 1230 return true; 1231 1232 Known.Zero.setAllBits(); 1233 Known.One.setAllBits(); 1234 if (!!DemandedSubElts) 1235 Known = Known.intersectWith(KnownSub); 1236 if (!!DemandedSrcElts) 1237 Known = Known.intersectWith(KnownSrc); 1238 1239 // Attempt to avoid multi-use src if we don't need anything from it. 1240 if (!DemandedBits.isAllOnes() || !DemandedSubElts.isAllOnes() || 1241 !DemandedSrcElts.isAllOnes()) { 1242 SDValue NewSub = SimplifyMultipleUseDemandedBits( 1243 Sub, DemandedBits, DemandedSubElts, TLO.DAG, Depth + 1); 1244 SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1245 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 1246 if (NewSub || NewSrc) { 1247 NewSub = NewSub ? NewSub : Sub; 1248 NewSrc = NewSrc ? NewSrc : Src; 1249 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc, NewSub, 1250 Op.getOperand(2)); 1251 return TLO.CombineTo(Op, NewOp); 1252 } 1253 } 1254 break; 1255 } 1256 case ISD::EXTRACT_SUBVECTOR: { 1257 if (VT.isScalableVector()) 1258 return false; 1259 // Offset the demanded elts by the subvector index. 1260 SDValue Src = Op.getOperand(0); 1261 if (Src.getValueType().isScalableVector()) 1262 break; 1263 uint64_t Idx = Op.getConstantOperandVal(1); 1264 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 1265 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx); 1266 1267 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, Known, TLO, 1268 Depth + 1)) 1269 return true; 1270 1271 // Attempt to avoid multi-use src if we don't need anything from it. 1272 if (!DemandedBits.isAllOnes() || !DemandedSrcElts.isAllOnes()) { 1273 SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 1274 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 1275 if (DemandedSrc) { 1276 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, 1277 Op.getOperand(1)); 1278 return TLO.CombineTo(Op, NewOp); 1279 } 1280 } 1281 break; 1282 } 1283 case ISD::CONCAT_VECTORS: { 1284 if (VT.isScalableVector()) 1285 return false; 1286 Known.Zero.setAllBits(); 1287 Known.One.setAllBits(); 1288 EVT SubVT = Op.getOperand(0).getValueType(); 1289 unsigned NumSubVecs = Op.getNumOperands(); 1290 unsigned NumSubElts = SubVT.getVectorNumElements(); 1291 for (unsigned i = 0; i != NumSubVecs; ++i) { 1292 APInt DemandedSubElts = 1293 DemandedElts.extractBits(NumSubElts, i * NumSubElts); 1294 if (SimplifyDemandedBits(Op.getOperand(i), DemandedBits, DemandedSubElts, 1295 Known2, TLO, Depth + 1)) 1296 return true; 1297 // Known bits are shared by every demanded subvector element. 1298 if (!!DemandedSubElts) 1299 Known = Known.intersectWith(Known2); 1300 } 1301 break; 1302 } 1303 case ISD::VECTOR_SHUFFLE: { 1304 assert(!VT.isScalableVector()); 1305 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 1306 1307 // Collect demanded elements from shuffle operands.. 1308 APInt DemandedLHS, DemandedRHS; 1309 if (!getShuffleDemandedElts(NumElts, ShuffleMask, DemandedElts, DemandedLHS, 1310 DemandedRHS)) 1311 break; 1312 1313 if (!!DemandedLHS || !!DemandedRHS) { 1314 SDValue Op0 = Op.getOperand(0); 1315 SDValue Op1 = Op.getOperand(1); 1316 1317 Known.Zero.setAllBits(); 1318 Known.One.setAllBits(); 1319 if (!!DemandedLHS) { 1320 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedLHS, Known2, TLO, 1321 Depth + 1)) 1322 return true; 1323 Known = Known.intersectWith(Known2); 1324 } 1325 if (!!DemandedRHS) { 1326 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedRHS, Known2, TLO, 1327 Depth + 1)) 1328 return true; 1329 Known = Known.intersectWith(Known2); 1330 } 1331 1332 // Attempt to avoid multi-use ops if we don't need anything from them. 1333 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1334 Op0, DemandedBits, DemandedLHS, TLO.DAG, Depth + 1); 1335 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1336 Op1, DemandedBits, DemandedRHS, TLO.DAG, Depth + 1); 1337 if (DemandedOp0 || DemandedOp1) { 1338 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1339 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1340 SDValue NewOp = TLO.DAG.getVectorShuffle(VT, dl, Op0, Op1, ShuffleMask); 1341 return TLO.CombineTo(Op, NewOp); 1342 } 1343 } 1344 break; 1345 } 1346 case ISD::AND: { 1347 SDValue Op0 = Op.getOperand(0); 1348 SDValue Op1 = Op.getOperand(1); 1349 1350 // If the RHS is a constant, check to see if the LHS would be zero without 1351 // using the bits from the RHS. Below, we use knowledge about the RHS to 1352 // simplify the LHS, here we're using information from the LHS to simplify 1353 // the RHS. 1354 if (ConstantSDNode *RHSC = isConstOrConstSplat(Op1)) { 1355 // Do not increment Depth here; that can cause an infinite loop. 1356 KnownBits LHSKnown = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth); 1357 // If the LHS already has zeros where RHSC does, this 'and' is dead. 1358 if ((LHSKnown.Zero & DemandedBits) == 1359 (~RHSC->getAPIntValue() & DemandedBits)) 1360 return TLO.CombineTo(Op, Op0); 1361 1362 // If any of the set bits in the RHS are known zero on the LHS, shrink 1363 // the constant. 1364 if (ShrinkDemandedConstant(Op, ~LHSKnown.Zero & DemandedBits, 1365 DemandedElts, TLO)) 1366 return true; 1367 1368 // Bitwise-not (xor X, -1) is a special case: we don't usually shrink its 1369 // constant, but if this 'and' is only clearing bits that were just set by 1370 // the xor, then this 'and' can be eliminated by shrinking the mask of 1371 // the xor. For example, for a 32-bit X: 1372 // and (xor (srl X, 31), -1), 1 --> xor (srl X, 31), 1 1373 if (isBitwiseNot(Op0) && Op0.hasOneUse() && 1374 LHSKnown.One == ~RHSC->getAPIntValue()) { 1375 SDValue Xor = TLO.DAG.getNode(ISD::XOR, dl, VT, Op0.getOperand(0), Op1); 1376 return TLO.CombineTo(Op, Xor); 1377 } 1378 } 1379 1380 // AND(INSERT_SUBVECTOR(C,X,I),M) -> INSERT_SUBVECTOR(AND(C,M),X,I) 1381 // iff 'C' is Undef/Constant and AND(X,M) == X (for DemandedBits). 1382 if (Op0.getOpcode() == ISD::INSERT_SUBVECTOR && !VT.isScalableVector() && 1383 (Op0.getOperand(0).isUndef() || 1384 ISD::isBuildVectorOfConstantSDNodes(Op0.getOperand(0).getNode())) && 1385 Op0->hasOneUse()) { 1386 unsigned NumSubElts = 1387 Op0.getOperand(1).getValueType().getVectorNumElements(); 1388 unsigned SubIdx = Op0.getConstantOperandVal(2); 1389 APInt DemandedSub = 1390 APInt::getBitsSet(NumElts, SubIdx, SubIdx + NumSubElts); 1391 KnownBits KnownSubMask = 1392 TLO.DAG.computeKnownBits(Op1, DemandedSub & DemandedElts, Depth + 1); 1393 if (DemandedBits.isSubsetOf(KnownSubMask.One)) { 1394 SDValue NewAnd = 1395 TLO.DAG.getNode(ISD::AND, dl, VT, Op0.getOperand(0), Op1); 1396 SDValue NewInsert = 1397 TLO.DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, NewAnd, 1398 Op0.getOperand(1), Op0.getOperand(2)); 1399 return TLO.CombineTo(Op, NewInsert); 1400 } 1401 } 1402 1403 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1404 Depth + 1)) 1405 return true; 1406 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1407 if (SimplifyDemandedBits(Op0, ~Known.Zero & DemandedBits, DemandedElts, 1408 Known2, TLO, Depth + 1)) 1409 return true; 1410 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1411 1412 // If all of the demanded bits are known one on one side, return the other. 1413 // These bits cannot contribute to the result of the 'and'. 1414 if (DemandedBits.isSubsetOf(Known2.Zero | Known.One)) 1415 return TLO.CombineTo(Op, Op0); 1416 if (DemandedBits.isSubsetOf(Known.Zero | Known2.One)) 1417 return TLO.CombineTo(Op, Op1); 1418 // If all of the demanded bits in the inputs are known zeros, return zero. 1419 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1420 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, VT)); 1421 // If the RHS is a constant, see if we can simplify it. 1422 if (ShrinkDemandedConstant(Op, ~Known2.Zero & DemandedBits, DemandedElts, 1423 TLO)) 1424 return true; 1425 // If the operation can be done in a smaller type, do so. 1426 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1427 return true; 1428 1429 // Attempt to avoid multi-use ops if we don't need anything from them. 1430 if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) { 1431 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1432 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1433 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1434 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1435 if (DemandedOp0 || DemandedOp1) { 1436 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1437 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1438 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1439 return TLO.CombineTo(Op, NewOp); 1440 } 1441 } 1442 1443 Known &= Known2; 1444 break; 1445 } 1446 case ISD::OR: { 1447 SDValue Op0 = Op.getOperand(0); 1448 SDValue Op1 = Op.getOperand(1); 1449 1450 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1451 Depth + 1)) 1452 return true; 1453 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1454 if (SimplifyDemandedBits(Op0, ~Known.One & DemandedBits, DemandedElts, 1455 Known2, TLO, Depth + 1)) 1456 return true; 1457 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1458 1459 // If all of the demanded bits are known zero on one side, return the other. 1460 // These bits cannot contribute to the result of the 'or'. 1461 if (DemandedBits.isSubsetOf(Known2.One | Known.Zero)) 1462 return TLO.CombineTo(Op, Op0); 1463 if (DemandedBits.isSubsetOf(Known.One | Known2.Zero)) 1464 return TLO.CombineTo(Op, Op1); 1465 // If the RHS is a constant, see if we can simplify it. 1466 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1467 return true; 1468 // If the operation can be done in a smaller type, do so. 1469 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1470 return true; 1471 1472 // Attempt to avoid multi-use ops if we don't need anything from them. 1473 if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) { 1474 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1475 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1476 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1477 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1478 if (DemandedOp0 || DemandedOp1) { 1479 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1480 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1481 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1482 return TLO.CombineTo(Op, NewOp); 1483 } 1484 } 1485 1486 // (or (and X, C1), (and (or X, Y), C2)) -> (or (and X, C1|C2), (and Y, C2)) 1487 // TODO: Use SimplifyMultipleUseDemandedBits to peek through masks. 1488 if (Op0.getOpcode() == ISD::AND && Op1.getOpcode() == ISD::AND && 1489 Op0->hasOneUse() && Op1->hasOneUse()) { 1490 // Attempt to match all commutations - m_c_Or would've been useful! 1491 for (int I = 0; I != 2; ++I) { 1492 SDValue X = Op.getOperand(I).getOperand(0); 1493 SDValue C1 = Op.getOperand(I).getOperand(1); 1494 SDValue Alt = Op.getOperand(1 - I).getOperand(0); 1495 SDValue C2 = Op.getOperand(1 - I).getOperand(1); 1496 if (Alt.getOpcode() == ISD::OR) { 1497 for (int J = 0; J != 2; ++J) { 1498 if (X == Alt.getOperand(J)) { 1499 SDValue Y = Alt.getOperand(1 - J); 1500 if (SDValue C12 = TLO.DAG.FoldConstantArithmetic(ISD::OR, dl, VT, 1501 {C1, C2})) { 1502 SDValue MaskX = TLO.DAG.getNode(ISD::AND, dl, VT, X, C12); 1503 SDValue MaskY = TLO.DAG.getNode(ISD::AND, dl, VT, Y, C2); 1504 return TLO.CombineTo( 1505 Op, TLO.DAG.getNode(ISD::OR, dl, VT, MaskX, MaskY)); 1506 } 1507 } 1508 } 1509 } 1510 } 1511 } 1512 1513 Known |= Known2; 1514 break; 1515 } 1516 case ISD::XOR: { 1517 SDValue Op0 = Op.getOperand(0); 1518 SDValue Op1 = Op.getOperand(1); 1519 1520 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1521 Depth + 1)) 1522 return true; 1523 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1524 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedElts, Known2, TLO, 1525 Depth + 1)) 1526 return true; 1527 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1528 1529 // If all of the demanded bits are known zero on one side, return the other. 1530 // These bits cannot contribute to the result of the 'xor'. 1531 if (DemandedBits.isSubsetOf(Known.Zero)) 1532 return TLO.CombineTo(Op, Op0); 1533 if (DemandedBits.isSubsetOf(Known2.Zero)) 1534 return TLO.CombineTo(Op, Op1); 1535 // If the operation can be done in a smaller type, do so. 1536 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1537 return true; 1538 1539 // If all of the unknown bits are known to be zero on one side or the other 1540 // turn this into an *inclusive* or. 1541 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 1542 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1543 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, VT, Op0, Op1)); 1544 1545 ConstantSDNode *C = isConstOrConstSplat(Op1, DemandedElts); 1546 if (C) { 1547 // If one side is a constant, and all of the set bits in the constant are 1548 // also known set on the other side, turn this into an AND, as we know 1549 // the bits will be cleared. 1550 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 1551 // NB: it is okay if more bits are known than are requested 1552 if (C->getAPIntValue() == Known2.One) { 1553 SDValue ANDC = 1554 TLO.DAG.getConstant(~C->getAPIntValue() & DemandedBits, dl, VT); 1555 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT, Op0, ANDC)); 1556 } 1557 1558 // If the RHS is a constant, see if we can change it. Don't alter a -1 1559 // constant because that's a 'not' op, and that is better for combining 1560 // and codegen. 1561 if (!C->isAllOnes() && DemandedBits.isSubsetOf(C->getAPIntValue())) { 1562 // We're flipping all demanded bits. Flip the undemanded bits too. 1563 SDValue New = TLO.DAG.getNOT(dl, Op0, VT); 1564 return TLO.CombineTo(Op, New); 1565 } 1566 1567 unsigned Op0Opcode = Op0.getOpcode(); 1568 if ((Op0Opcode == ISD::SRL || Op0Opcode == ISD::SHL) && Op0.hasOneUse()) { 1569 if (ConstantSDNode *ShiftC = 1570 isConstOrConstSplat(Op0.getOperand(1), DemandedElts)) { 1571 // Don't crash on an oversized shift. We can not guarantee that a 1572 // bogus shift has been simplified to undef. 1573 if (ShiftC->getAPIntValue().ult(BitWidth)) { 1574 uint64_t ShiftAmt = ShiftC->getZExtValue(); 1575 APInt Ones = APInt::getAllOnes(BitWidth); 1576 Ones = Op0Opcode == ISD::SHL ? Ones.shl(ShiftAmt) 1577 : Ones.lshr(ShiftAmt); 1578 const TargetLowering &TLI = TLO.DAG.getTargetLoweringInfo(); 1579 if ((DemandedBits & C->getAPIntValue()) == (DemandedBits & Ones) && 1580 TLI.isDesirableToCommuteXorWithShift(Op.getNode())) { 1581 // If the xor constant is a demanded mask, do a 'not' before the 1582 // shift: 1583 // xor (X << ShiftC), XorC --> (not X) << ShiftC 1584 // xor (X >> ShiftC), XorC --> (not X) >> ShiftC 1585 SDValue Not = TLO.DAG.getNOT(dl, Op0.getOperand(0), VT); 1586 return TLO.CombineTo(Op, TLO.DAG.getNode(Op0Opcode, dl, VT, Not, 1587 Op0.getOperand(1))); 1588 } 1589 } 1590 } 1591 } 1592 } 1593 1594 // If we can't turn this into a 'not', try to shrink the constant. 1595 if (!C || !C->isAllOnes()) 1596 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1597 return true; 1598 1599 // Attempt to avoid multi-use ops if we don't need anything from them. 1600 if (!DemandedBits.isAllOnes() || !DemandedElts.isAllOnes()) { 1601 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1602 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1603 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1604 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1605 if (DemandedOp0 || DemandedOp1) { 1606 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1607 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1608 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1609 return TLO.CombineTo(Op, NewOp); 1610 } 1611 } 1612 1613 Known ^= Known2; 1614 break; 1615 } 1616 case ISD::SELECT: 1617 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known, TLO, 1618 Depth + 1)) 1619 return true; 1620 if (SimplifyDemandedBits(Op.getOperand(1), DemandedBits, Known2, TLO, 1621 Depth + 1)) 1622 return true; 1623 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1624 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1625 1626 // If the operands are constants, see if we can simplify them. 1627 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1628 return true; 1629 1630 // Only known if known in both the LHS and RHS. 1631 Known = Known.intersectWith(Known2); 1632 break; 1633 case ISD::VSELECT: 1634 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, DemandedElts, 1635 Known, TLO, Depth + 1)) 1636 return true; 1637 if (SimplifyDemandedBits(Op.getOperand(1), DemandedBits, DemandedElts, 1638 Known2, TLO, Depth + 1)) 1639 return true; 1640 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1641 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1642 1643 // Only known if known in both the LHS and RHS. 1644 Known = Known.intersectWith(Known2); 1645 break; 1646 case ISD::SELECT_CC: 1647 if (SimplifyDemandedBits(Op.getOperand(3), DemandedBits, Known, TLO, 1648 Depth + 1)) 1649 return true; 1650 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known2, TLO, 1651 Depth + 1)) 1652 return true; 1653 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1654 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1655 1656 // If the operands are constants, see if we can simplify them. 1657 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1658 return true; 1659 1660 // Only known if known in both the LHS and RHS. 1661 Known = Known.intersectWith(Known2); 1662 break; 1663 case ISD::SETCC: { 1664 SDValue Op0 = Op.getOperand(0); 1665 SDValue Op1 = Op.getOperand(1); 1666 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1667 // If (1) we only need the sign-bit, (2) the setcc operands are the same 1668 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 1669 // -1, we may be able to bypass the setcc. 1670 if (DemandedBits.isSignMask() && 1671 Op0.getScalarValueSizeInBits() == BitWidth && 1672 getBooleanContents(Op0.getValueType()) == 1673 BooleanContent::ZeroOrNegativeOneBooleanContent) { 1674 // If we're testing X < 0, then this compare isn't needed - just use X! 1675 // FIXME: We're limiting to integer types here, but this should also work 1676 // if we don't care about FP signed-zero. The use of SETLT with FP means 1677 // that we don't care about NaNs. 1678 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 1679 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 1680 return TLO.CombineTo(Op, Op0); 1681 1682 // TODO: Should we check for other forms of sign-bit comparisons? 1683 // Examples: X <= -1, X >= 0 1684 } 1685 if (getBooleanContents(Op0.getValueType()) == 1686 TargetLowering::ZeroOrOneBooleanContent && 1687 BitWidth > 1) 1688 Known.Zero.setBitsFrom(1); 1689 break; 1690 } 1691 case ISD::SHL: { 1692 SDValue Op0 = Op.getOperand(0); 1693 SDValue Op1 = Op.getOperand(1); 1694 EVT ShiftVT = Op1.getValueType(); 1695 1696 if (const APInt *SA = 1697 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1698 unsigned ShAmt = SA->getZExtValue(); 1699 if (ShAmt == 0) 1700 return TLO.CombineTo(Op, Op0); 1701 1702 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a 1703 // single shift. We can do this if the bottom bits (which are shifted 1704 // out) are never demanded. 1705 // TODO - support non-uniform vector amounts. 1706 if (Op0.getOpcode() == ISD::SRL) { 1707 if (!DemandedBits.intersects(APInt::getLowBitsSet(BitWidth, ShAmt))) { 1708 if (const APInt *SA2 = 1709 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) { 1710 unsigned C1 = SA2->getZExtValue(); 1711 unsigned Opc = ISD::SHL; 1712 int Diff = ShAmt - C1; 1713 if (Diff < 0) { 1714 Diff = -Diff; 1715 Opc = ISD::SRL; 1716 } 1717 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT); 1718 return TLO.CombineTo( 1719 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1720 } 1721 } 1722 } 1723 1724 // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits 1725 // are not demanded. This will likely allow the anyext to be folded away. 1726 // TODO - support non-uniform vector amounts. 1727 if (Op0.getOpcode() == ISD::ANY_EXTEND) { 1728 SDValue InnerOp = Op0.getOperand(0); 1729 EVT InnerVT = InnerOp.getValueType(); 1730 unsigned InnerBits = InnerVT.getScalarSizeInBits(); 1731 if (ShAmt < InnerBits && DemandedBits.getActiveBits() <= InnerBits && 1732 isTypeDesirableForOp(ISD::SHL, InnerVT)) { 1733 SDValue NarrowShl = TLO.DAG.getNode( 1734 ISD::SHL, dl, InnerVT, InnerOp, 1735 TLO.DAG.getShiftAmountConstant(ShAmt, InnerVT, dl)); 1736 return TLO.CombineTo( 1737 Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, NarrowShl)); 1738 } 1739 1740 // Repeat the SHL optimization above in cases where an extension 1741 // intervenes: (shl (anyext (shr x, c1)), c2) to 1742 // (shl (anyext x), c2-c1). This requires that the bottom c1 bits 1743 // aren't demanded (as above) and that the shifted upper c1 bits of 1744 // x aren't demanded. 1745 // TODO - support non-uniform vector amounts. 1746 if (InnerOp.getOpcode() == ISD::SRL && Op0.hasOneUse() && 1747 InnerOp.hasOneUse()) { 1748 if (const APInt *SA2 = 1749 TLO.DAG.getValidShiftAmountConstant(InnerOp, DemandedElts)) { 1750 unsigned InnerShAmt = SA2->getZExtValue(); 1751 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits && 1752 DemandedBits.getActiveBits() <= 1753 (InnerBits - InnerShAmt + ShAmt) && 1754 DemandedBits.countr_zero() >= ShAmt) { 1755 SDValue NewSA = 1756 TLO.DAG.getConstant(ShAmt - InnerShAmt, dl, ShiftVT); 1757 SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, 1758 InnerOp.getOperand(0)); 1759 return TLO.CombineTo( 1760 Op, TLO.DAG.getNode(ISD::SHL, dl, VT, NewExt, NewSA)); 1761 } 1762 } 1763 } 1764 } 1765 1766 APInt InDemandedMask = DemandedBits.lshr(ShAmt); 1767 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1768 Depth + 1)) 1769 return true; 1770 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1771 Known.Zero <<= ShAmt; 1772 Known.One <<= ShAmt; 1773 // low bits known zero. 1774 Known.Zero.setLowBits(ShAmt); 1775 1776 // Attempt to avoid multi-use ops if we don't need anything from them. 1777 if (!InDemandedMask.isAllOnes() || !DemandedElts.isAllOnes()) { 1778 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1779 Op0, InDemandedMask, DemandedElts, TLO.DAG, Depth + 1); 1780 if (DemandedOp0) { 1781 SDValue NewOp = TLO.DAG.getNode(ISD::SHL, dl, VT, DemandedOp0, Op1); 1782 return TLO.CombineTo(Op, NewOp); 1783 } 1784 } 1785 1786 // Try shrinking the operation as long as the shift amount will still be 1787 // in range. 1788 if ((ShAmt < DemandedBits.getActiveBits()) && 1789 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1790 return true; 1791 } else { 1792 // This is a variable shift, so we can't shift the demand mask by a known 1793 // amount. But if we are not demanding high bits, then we are not 1794 // demanding those bits from the pre-shifted operand either. 1795 if (unsigned CTLZ = DemandedBits.countl_zero()) { 1796 APInt DemandedFromOp(APInt::getLowBitsSet(BitWidth, BitWidth - CTLZ)); 1797 if (SimplifyDemandedBits(Op0, DemandedFromOp, DemandedElts, Known, TLO, 1798 Depth + 1)) { 1799 SDNodeFlags Flags = Op.getNode()->getFlags(); 1800 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) { 1801 // Disable the nsw and nuw flags. We can no longer guarantee that we 1802 // won't wrap after simplification. 1803 Flags.setNoSignedWrap(false); 1804 Flags.setNoUnsignedWrap(false); 1805 Op->setFlags(Flags); 1806 } 1807 return true; 1808 } 1809 Known.resetAll(); 1810 } 1811 } 1812 1813 // If we are only demanding sign bits then we can use the shift source 1814 // directly. 1815 if (const APInt *MaxSA = 1816 TLO.DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 1817 unsigned ShAmt = MaxSA->getZExtValue(); 1818 unsigned NumSignBits = 1819 TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 1820 unsigned UpperDemandedBits = BitWidth - DemandedBits.countr_zero(); 1821 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits)) 1822 return TLO.CombineTo(Op, Op0); 1823 } 1824 break; 1825 } 1826 case ISD::SRL: { 1827 SDValue Op0 = Op.getOperand(0); 1828 SDValue Op1 = Op.getOperand(1); 1829 EVT ShiftVT = Op1.getValueType(); 1830 1831 // Try to match AVG patterns. 1832 if (SDValue AVG = combineShiftToAVG(Op, TLO.DAG, *this, DemandedBits, 1833 DemandedElts, Depth + 1)) 1834 return TLO.CombineTo(Op, AVG); 1835 1836 if (const APInt *SA = 1837 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1838 unsigned ShAmt = SA->getZExtValue(); 1839 if (ShAmt == 0) 1840 return TLO.CombineTo(Op, Op0); 1841 1842 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a 1843 // single shift. We can do this if the top bits (which are shifted out) 1844 // are never demanded. 1845 // TODO - support non-uniform vector amounts. 1846 if (Op0.getOpcode() == ISD::SHL) { 1847 if (!DemandedBits.intersects(APInt::getHighBitsSet(BitWidth, ShAmt))) { 1848 if (const APInt *SA2 = 1849 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) { 1850 unsigned C1 = SA2->getZExtValue(); 1851 unsigned Opc = ISD::SRL; 1852 int Diff = ShAmt - C1; 1853 if (Diff < 0) { 1854 Diff = -Diff; 1855 Opc = ISD::SHL; 1856 } 1857 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT); 1858 return TLO.CombineTo( 1859 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1860 } 1861 } 1862 } 1863 1864 APInt InDemandedMask = (DemandedBits << ShAmt); 1865 1866 // If the shift is exact, then it does demand the low bits (and knows that 1867 // they are zero). 1868 if (Op->getFlags().hasExact()) 1869 InDemandedMask.setLowBits(ShAmt); 1870 1871 // Narrow shift to lower half - similar to ShrinkDemandedOp. 1872 // (srl i64:x, K) -> (i64 zero_extend (srl (i32 (trunc i64:x)), K)) 1873 if ((BitWidth % 2) == 0 && !VT.isVector() && 1874 ((InDemandedMask.countLeadingZeros() >= (BitWidth / 2)) || 1875 TLO.DAG.MaskedValueIsZero( 1876 Op0, APInt::getHighBitsSet(BitWidth, BitWidth / 2)))) { 1877 EVT HalfVT = EVT::getIntegerVT(*TLO.DAG.getContext(), BitWidth / 2); 1878 if (isNarrowingProfitable(VT, HalfVT) && 1879 isTypeDesirableForOp(ISD::SRL, HalfVT) && 1880 isTruncateFree(VT, HalfVT) && isZExtFree(HalfVT, VT) && 1881 (!TLO.LegalOperations() || isOperationLegal(ISD::SRL, VT))) { 1882 SDValue NewOp = TLO.DAG.getNode(ISD::TRUNCATE, dl, HalfVT, Op0); 1883 SDValue NewShiftAmt = TLO.DAG.getShiftAmountConstant( 1884 ShAmt, HalfVT, dl, TLO.LegalTypes()); 1885 SDValue NewShift = 1886 TLO.DAG.getNode(ISD::SRL, dl, HalfVT, NewOp, NewShiftAmt); 1887 return TLO.CombineTo( 1888 Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, VT, NewShift)); 1889 } 1890 } 1891 1892 // Compute the new bits that are at the top now. 1893 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1894 Depth + 1)) 1895 return true; 1896 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1897 Known.Zero.lshrInPlace(ShAmt); 1898 Known.One.lshrInPlace(ShAmt); 1899 // High bits known zero. 1900 Known.Zero.setHighBits(ShAmt); 1901 1902 // Attempt to avoid multi-use ops if we don't need anything from them. 1903 if (!InDemandedMask.isAllOnes() || !DemandedElts.isAllOnes()) { 1904 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1905 Op0, InDemandedMask, DemandedElts, TLO.DAG, Depth + 1); 1906 if (DemandedOp0) { 1907 SDValue NewOp = TLO.DAG.getNode(ISD::SRL, dl, VT, DemandedOp0, Op1); 1908 return TLO.CombineTo(Op, NewOp); 1909 } 1910 } 1911 } else { 1912 // Use generic knownbits computation as it has support for non-uniform 1913 // shift amounts. 1914 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 1915 } 1916 break; 1917 } 1918 case ISD::SRA: { 1919 SDValue Op0 = Op.getOperand(0); 1920 SDValue Op1 = Op.getOperand(1); 1921 EVT ShiftVT = Op1.getValueType(); 1922 1923 // If we only want bits that already match the signbit then we don't need 1924 // to shift. 1925 unsigned NumHiDemandedBits = BitWidth - DemandedBits.countr_zero(); 1926 if (TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1) >= 1927 NumHiDemandedBits) 1928 return TLO.CombineTo(Op, Op0); 1929 1930 // If this is an arithmetic shift right and only the low-bit is set, we can 1931 // always convert this into a logical shr, even if the shift amount is 1932 // variable. The low bit of the shift cannot be an input sign bit unless 1933 // the shift amount is >= the size of the datatype, which is undefined. 1934 if (DemandedBits.isOne()) 1935 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1)); 1936 1937 // Try to match AVG patterns. 1938 if (SDValue AVG = combineShiftToAVG(Op, TLO.DAG, *this, DemandedBits, 1939 DemandedElts, Depth + 1)) 1940 return TLO.CombineTo(Op, AVG); 1941 1942 if (const APInt *SA = 1943 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1944 unsigned ShAmt = SA->getZExtValue(); 1945 if (ShAmt == 0) 1946 return TLO.CombineTo(Op, Op0); 1947 1948 APInt InDemandedMask = (DemandedBits << ShAmt); 1949 1950 // If the shift is exact, then it does demand the low bits (and knows that 1951 // they are zero). 1952 if (Op->getFlags().hasExact()) 1953 InDemandedMask.setLowBits(ShAmt); 1954 1955 // If any of the demanded bits are produced by the sign extension, we also 1956 // demand the input sign bit. 1957 if (DemandedBits.countl_zero() < ShAmt) 1958 InDemandedMask.setSignBit(); 1959 1960 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1961 Depth + 1)) 1962 return true; 1963 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1964 Known.Zero.lshrInPlace(ShAmt); 1965 Known.One.lshrInPlace(ShAmt); 1966 1967 // If the input sign bit is known to be zero, or if none of the top bits 1968 // are demanded, turn this into an unsigned shift right. 1969 if (Known.Zero[BitWidth - ShAmt - 1] || 1970 DemandedBits.countl_zero() >= ShAmt) { 1971 SDNodeFlags Flags; 1972 Flags.setExact(Op->getFlags().hasExact()); 1973 return TLO.CombineTo( 1974 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1, Flags)); 1975 } 1976 1977 int Log2 = DemandedBits.exactLogBase2(); 1978 if (Log2 >= 0) { 1979 // The bit must come from the sign. 1980 SDValue NewSA = TLO.DAG.getConstant(BitWidth - 1 - Log2, dl, ShiftVT); 1981 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, NewSA)); 1982 } 1983 1984 if (Known.One[BitWidth - ShAmt - 1]) 1985 // New bits are known one. 1986 Known.One.setHighBits(ShAmt); 1987 1988 // Attempt to avoid multi-use ops if we don't need anything from them. 1989 if (!InDemandedMask.isAllOnes() || !DemandedElts.isAllOnes()) { 1990 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1991 Op0, InDemandedMask, DemandedElts, TLO.DAG, Depth + 1); 1992 if (DemandedOp0) { 1993 SDValue NewOp = TLO.DAG.getNode(ISD::SRA, dl, VT, DemandedOp0, Op1); 1994 return TLO.CombineTo(Op, NewOp); 1995 } 1996 } 1997 } 1998 break; 1999 } 2000 case ISD::FSHL: 2001 case ISD::FSHR: { 2002 SDValue Op0 = Op.getOperand(0); 2003 SDValue Op1 = Op.getOperand(1); 2004 SDValue Op2 = Op.getOperand(2); 2005 bool IsFSHL = (Op.getOpcode() == ISD::FSHL); 2006 2007 if (ConstantSDNode *SA = isConstOrConstSplat(Op2, DemandedElts)) { 2008 unsigned Amt = SA->getAPIntValue().urem(BitWidth); 2009 2010 // For fshl, 0-shift returns the 1st arg. 2011 // For fshr, 0-shift returns the 2nd arg. 2012 if (Amt == 0) { 2013 if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1, DemandedBits, DemandedElts, 2014 Known, TLO, Depth + 1)) 2015 return true; 2016 break; 2017 } 2018 2019 // fshl: (Op0 << Amt) | (Op1 >> (BW - Amt)) 2020 // fshr: (Op0 << (BW - Amt)) | (Op1 >> Amt) 2021 APInt Demanded0 = DemandedBits.lshr(IsFSHL ? Amt : (BitWidth - Amt)); 2022 APInt Demanded1 = DemandedBits << (IsFSHL ? (BitWidth - Amt) : Amt); 2023 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO, 2024 Depth + 1)) 2025 return true; 2026 if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO, 2027 Depth + 1)) 2028 return true; 2029 2030 Known2.One <<= (IsFSHL ? Amt : (BitWidth - Amt)); 2031 Known2.Zero <<= (IsFSHL ? Amt : (BitWidth - Amt)); 2032 Known.One.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 2033 Known.Zero.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 2034 Known = Known.unionWith(Known2); 2035 2036 // Attempt to avoid multi-use ops if we don't need anything from them. 2037 if (!Demanded0.isAllOnes() || !Demanded1.isAllOnes() || 2038 !DemandedElts.isAllOnes()) { 2039 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 2040 Op0, Demanded0, DemandedElts, TLO.DAG, Depth + 1); 2041 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 2042 Op1, Demanded1, DemandedElts, TLO.DAG, Depth + 1); 2043 if (DemandedOp0 || DemandedOp1) { 2044 DemandedOp0 = DemandedOp0 ? DemandedOp0 : Op0; 2045 DemandedOp1 = DemandedOp1 ? DemandedOp1 : Op1; 2046 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedOp0, 2047 DemandedOp1, Op2); 2048 return TLO.CombineTo(Op, NewOp); 2049 } 2050 } 2051 } 2052 2053 // For pow-2 bitwidths we only demand the bottom modulo amt bits. 2054 if (isPowerOf2_32(BitWidth)) { 2055 APInt DemandedAmtBits(Op2.getScalarValueSizeInBits(), BitWidth - 1); 2056 if (SimplifyDemandedBits(Op2, DemandedAmtBits, DemandedElts, 2057 Known2, TLO, Depth + 1)) 2058 return true; 2059 } 2060 break; 2061 } 2062 case ISD::ROTL: 2063 case ISD::ROTR: { 2064 SDValue Op0 = Op.getOperand(0); 2065 SDValue Op1 = Op.getOperand(1); 2066 bool IsROTL = (Op.getOpcode() == ISD::ROTL); 2067 2068 // If we're rotating an 0/-1 value, then it stays an 0/-1 value. 2069 if (BitWidth == TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1)) 2070 return TLO.CombineTo(Op, Op0); 2071 2072 if (ConstantSDNode *SA = isConstOrConstSplat(Op1, DemandedElts)) { 2073 unsigned Amt = SA->getAPIntValue().urem(BitWidth); 2074 unsigned RevAmt = BitWidth - Amt; 2075 2076 // rotl: (Op0 << Amt) | (Op0 >> (BW - Amt)) 2077 // rotr: (Op0 << (BW - Amt)) | (Op0 >> Amt) 2078 APInt Demanded0 = DemandedBits.rotr(IsROTL ? Amt : RevAmt); 2079 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO, 2080 Depth + 1)) 2081 return true; 2082 2083 // rot*(x, 0) --> x 2084 if (Amt == 0) 2085 return TLO.CombineTo(Op, Op0); 2086 2087 // See if we don't demand either half of the rotated bits. 2088 if ((!TLO.LegalOperations() || isOperationLegal(ISD::SHL, VT)) && 2089 DemandedBits.countr_zero() >= (IsROTL ? Amt : RevAmt)) { 2090 Op1 = TLO.DAG.getConstant(IsROTL ? Amt : RevAmt, dl, Op1.getValueType()); 2091 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, Op1)); 2092 } 2093 if ((!TLO.LegalOperations() || isOperationLegal(ISD::SRL, VT)) && 2094 DemandedBits.countl_zero() >= (IsROTL ? RevAmt : Amt)) { 2095 Op1 = TLO.DAG.getConstant(IsROTL ? RevAmt : Amt, dl, Op1.getValueType()); 2096 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1)); 2097 } 2098 } 2099 2100 // For pow-2 bitwidths we only demand the bottom modulo amt bits. 2101 if (isPowerOf2_32(BitWidth)) { 2102 APInt DemandedAmtBits(Op1.getScalarValueSizeInBits(), BitWidth - 1); 2103 if (SimplifyDemandedBits(Op1, DemandedAmtBits, DemandedElts, Known2, TLO, 2104 Depth + 1)) 2105 return true; 2106 } 2107 break; 2108 } 2109 case ISD::UMIN: { 2110 // Check if one arg is always less than (or equal) to the other arg. 2111 SDValue Op0 = Op.getOperand(0); 2112 SDValue Op1 = Op.getOperand(1); 2113 KnownBits Known0 = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth + 1); 2114 KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1); 2115 Known = KnownBits::umin(Known0, Known1); 2116 if (std::optional<bool> IsULE = KnownBits::ule(Known0, Known1)) 2117 return TLO.CombineTo(Op, *IsULE ? Op0 : Op1); 2118 if (std::optional<bool> IsULT = KnownBits::ult(Known0, Known1)) 2119 return TLO.CombineTo(Op, *IsULT ? Op0 : Op1); 2120 break; 2121 } 2122 case ISD::UMAX: { 2123 // Check if one arg is always greater than (or equal) to the other arg. 2124 SDValue Op0 = Op.getOperand(0); 2125 SDValue Op1 = Op.getOperand(1); 2126 KnownBits Known0 = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth + 1); 2127 KnownBits Known1 = TLO.DAG.computeKnownBits(Op1, DemandedElts, Depth + 1); 2128 Known = KnownBits::umax(Known0, Known1); 2129 if (std::optional<bool> IsUGE = KnownBits::uge(Known0, Known1)) 2130 return TLO.CombineTo(Op, *IsUGE ? Op0 : Op1); 2131 if (std::optional<bool> IsUGT = KnownBits::ugt(Known0, Known1)) 2132 return TLO.CombineTo(Op, *IsUGT ? Op0 : Op1); 2133 break; 2134 } 2135 case ISD::BITREVERSE: { 2136 SDValue Src = Op.getOperand(0); 2137 APInt DemandedSrcBits = DemandedBits.reverseBits(); 2138 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 2139 Depth + 1)) 2140 return true; 2141 Known.One = Known2.One.reverseBits(); 2142 Known.Zero = Known2.Zero.reverseBits(); 2143 break; 2144 } 2145 case ISD::BSWAP: { 2146 SDValue Src = Op.getOperand(0); 2147 2148 // If the only bits demanded come from one byte of the bswap result, 2149 // just shift the input byte into position to eliminate the bswap. 2150 unsigned NLZ = DemandedBits.countl_zero(); 2151 unsigned NTZ = DemandedBits.countr_zero(); 2152 2153 // Round NTZ down to the next byte. If we have 11 trailing zeros, then 2154 // we need all the bits down to bit 8. Likewise, round NLZ. If we 2155 // have 14 leading zeros, round to 8. 2156 NLZ = alignDown(NLZ, 8); 2157 NTZ = alignDown(NTZ, 8); 2158 // If we need exactly one byte, we can do this transformation. 2159 if (BitWidth - NLZ - NTZ == 8) { 2160 // Replace this with either a left or right shift to get the byte into 2161 // the right place. 2162 unsigned ShiftOpcode = NLZ > NTZ ? ISD::SRL : ISD::SHL; 2163 if (!TLO.LegalOperations() || isOperationLegal(ShiftOpcode, VT)) { 2164 EVT ShiftAmtTy = getShiftAmountTy(VT, DL); 2165 unsigned ShiftAmount = NLZ > NTZ ? NLZ - NTZ : NTZ - NLZ; 2166 SDValue ShAmt = TLO.DAG.getConstant(ShiftAmount, dl, ShiftAmtTy); 2167 SDValue NewOp = TLO.DAG.getNode(ShiftOpcode, dl, VT, Src, ShAmt); 2168 return TLO.CombineTo(Op, NewOp); 2169 } 2170 } 2171 2172 APInt DemandedSrcBits = DemandedBits.byteSwap(); 2173 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 2174 Depth + 1)) 2175 return true; 2176 Known.One = Known2.One.byteSwap(); 2177 Known.Zero = Known2.Zero.byteSwap(); 2178 break; 2179 } 2180 case ISD::CTPOP: { 2181 // If only 1 bit is demanded, replace with PARITY as long as we're before 2182 // op legalization. 2183 // FIXME: Limit to scalars for now. 2184 if (DemandedBits.isOne() && !TLO.LegalOps && !VT.isVector()) 2185 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::PARITY, dl, VT, 2186 Op.getOperand(0))); 2187 2188 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2189 break; 2190 } 2191 case ISD::SIGN_EXTEND_INREG: { 2192 SDValue Op0 = Op.getOperand(0); 2193 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2194 unsigned ExVTBits = ExVT.getScalarSizeInBits(); 2195 2196 // If we only care about the highest bit, don't bother shifting right. 2197 if (DemandedBits.isSignMask()) { 2198 unsigned MinSignedBits = 2199 TLO.DAG.ComputeMaxSignificantBits(Op0, DemandedElts, Depth + 1); 2200 bool AlreadySignExtended = ExVTBits >= MinSignedBits; 2201 // However if the input is already sign extended we expect the sign 2202 // extension to be dropped altogether later and do not simplify. 2203 if (!AlreadySignExtended) { 2204 // Compute the correct shift amount type, which must be getShiftAmountTy 2205 // for scalar types after legalization. 2206 SDValue ShiftAmt = TLO.DAG.getConstant(BitWidth - ExVTBits, dl, 2207 getShiftAmountTy(VT, DL)); 2208 return TLO.CombineTo(Op, 2209 TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, ShiftAmt)); 2210 } 2211 } 2212 2213 // If none of the extended bits are demanded, eliminate the sextinreg. 2214 if (DemandedBits.getActiveBits() <= ExVTBits) 2215 return TLO.CombineTo(Op, Op0); 2216 2217 APInt InputDemandedBits = DemandedBits.getLoBits(ExVTBits); 2218 2219 // Since the sign extended bits are demanded, we know that the sign 2220 // bit is demanded. 2221 InputDemandedBits.setBit(ExVTBits - 1); 2222 2223 if (SimplifyDemandedBits(Op0, InputDemandedBits, DemandedElts, Known, TLO, 2224 Depth + 1)) 2225 return true; 2226 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2227 2228 // If the sign bit of the input is known set or clear, then we know the 2229 // top bits of the result. 2230 2231 // If the input sign bit is known zero, convert this into a zero extension. 2232 if (Known.Zero[ExVTBits - 1]) 2233 return TLO.CombineTo(Op, TLO.DAG.getZeroExtendInReg(Op0, dl, ExVT)); 2234 2235 APInt Mask = APInt::getLowBitsSet(BitWidth, ExVTBits); 2236 if (Known.One[ExVTBits - 1]) { // Input sign bit known set 2237 Known.One.setBitsFrom(ExVTBits); 2238 Known.Zero &= Mask; 2239 } else { // Input sign bit unknown 2240 Known.Zero &= Mask; 2241 Known.One &= Mask; 2242 } 2243 break; 2244 } 2245 case ISD::BUILD_PAIR: { 2246 EVT HalfVT = Op.getOperand(0).getValueType(); 2247 unsigned HalfBitWidth = HalfVT.getScalarSizeInBits(); 2248 2249 APInt MaskLo = DemandedBits.getLoBits(HalfBitWidth).trunc(HalfBitWidth); 2250 APInt MaskHi = DemandedBits.getHiBits(HalfBitWidth).trunc(HalfBitWidth); 2251 2252 KnownBits KnownLo, KnownHi; 2253 2254 if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownLo, TLO, Depth + 1)) 2255 return true; 2256 2257 if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownHi, TLO, Depth + 1)) 2258 return true; 2259 2260 Known = KnownHi.concat(KnownLo); 2261 break; 2262 } 2263 case ISD::ZERO_EXTEND_VECTOR_INREG: 2264 if (VT.isScalableVector()) 2265 return false; 2266 [[fallthrough]]; 2267 case ISD::ZERO_EXTEND: { 2268 SDValue Src = Op.getOperand(0); 2269 EVT SrcVT = Src.getValueType(); 2270 unsigned InBits = SrcVT.getScalarSizeInBits(); 2271 unsigned InElts = SrcVT.isFixedLengthVector() ? SrcVT.getVectorNumElements() : 1; 2272 bool IsVecInReg = Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG; 2273 2274 // If none of the top bits are demanded, convert this into an any_extend. 2275 if (DemandedBits.getActiveBits() <= InBits) { 2276 // If we only need the non-extended bits of the bottom element 2277 // then we can just bitcast to the result. 2278 if (IsLE && IsVecInReg && DemandedElts == 1 && 2279 VT.getSizeInBits() == SrcVT.getSizeInBits()) 2280 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 2281 2282 unsigned Opc = 2283 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 2284 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 2285 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 2286 } 2287 2288 APInt InDemandedBits = DemandedBits.trunc(InBits); 2289 APInt InDemandedElts = DemandedElts.zext(InElts); 2290 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 2291 Depth + 1)) 2292 return true; 2293 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2294 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 2295 Known = Known.zext(BitWidth); 2296 2297 // Attempt to avoid multi-use ops if we don't need anything from them. 2298 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2299 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 2300 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 2301 break; 2302 } 2303 case ISD::SIGN_EXTEND_VECTOR_INREG: 2304 if (VT.isScalableVector()) 2305 return false; 2306 [[fallthrough]]; 2307 case ISD::SIGN_EXTEND: { 2308 SDValue Src = Op.getOperand(0); 2309 EVT SrcVT = Src.getValueType(); 2310 unsigned InBits = SrcVT.getScalarSizeInBits(); 2311 unsigned InElts = SrcVT.isFixedLengthVector() ? SrcVT.getVectorNumElements() : 1; 2312 bool IsVecInReg = Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG; 2313 2314 // If none of the top bits are demanded, convert this into an any_extend. 2315 if (DemandedBits.getActiveBits() <= InBits) { 2316 // If we only need the non-extended bits of the bottom element 2317 // then we can just bitcast to the result. 2318 if (IsLE && IsVecInReg && DemandedElts == 1 && 2319 VT.getSizeInBits() == SrcVT.getSizeInBits()) 2320 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 2321 2322 unsigned Opc = 2323 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 2324 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 2325 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 2326 } 2327 2328 APInt InDemandedBits = DemandedBits.trunc(InBits); 2329 APInt InDemandedElts = DemandedElts.zext(InElts); 2330 2331 // Since some of the sign extended bits are demanded, we know that the sign 2332 // bit is demanded. 2333 InDemandedBits.setBit(InBits - 1); 2334 2335 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 2336 Depth + 1)) 2337 return true; 2338 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2339 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 2340 2341 // If the sign bit is known one, the top bits match. 2342 Known = Known.sext(BitWidth); 2343 2344 // If the sign bit is known zero, convert this to a zero extend. 2345 if (Known.isNonNegative()) { 2346 unsigned Opc = 2347 IsVecInReg ? ISD::ZERO_EXTEND_VECTOR_INREG : ISD::ZERO_EXTEND; 2348 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 2349 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 2350 } 2351 2352 // Attempt to avoid multi-use ops if we don't need anything from them. 2353 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2354 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 2355 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 2356 break; 2357 } 2358 case ISD::ANY_EXTEND_VECTOR_INREG: 2359 if (VT.isScalableVector()) 2360 return false; 2361 [[fallthrough]]; 2362 case ISD::ANY_EXTEND: { 2363 SDValue Src = Op.getOperand(0); 2364 EVT SrcVT = Src.getValueType(); 2365 unsigned InBits = SrcVT.getScalarSizeInBits(); 2366 unsigned InElts = SrcVT.isFixedLengthVector() ? SrcVT.getVectorNumElements() : 1; 2367 bool IsVecInReg = Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG; 2368 2369 // If we only need the bottom element then we can just bitcast. 2370 // TODO: Handle ANY_EXTEND? 2371 if (IsLE && IsVecInReg && DemandedElts == 1 && 2372 VT.getSizeInBits() == SrcVT.getSizeInBits()) 2373 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 2374 2375 APInt InDemandedBits = DemandedBits.trunc(InBits); 2376 APInt InDemandedElts = DemandedElts.zext(InElts); 2377 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 2378 Depth + 1)) 2379 return true; 2380 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2381 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 2382 Known = Known.anyext(BitWidth); 2383 2384 // Attempt to avoid multi-use ops if we don't need anything from them. 2385 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2386 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 2387 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 2388 break; 2389 } 2390 case ISD::TRUNCATE: { 2391 SDValue Src = Op.getOperand(0); 2392 2393 // Simplify the input, using demanded bit information, and compute the known 2394 // zero/one bits live out. 2395 unsigned OperandBitWidth = Src.getScalarValueSizeInBits(); 2396 APInt TruncMask = DemandedBits.zext(OperandBitWidth); 2397 if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, Known, TLO, 2398 Depth + 1)) 2399 return true; 2400 Known = Known.trunc(BitWidth); 2401 2402 // Attempt to avoid multi-use ops if we don't need anything from them. 2403 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 2404 Src, TruncMask, DemandedElts, TLO.DAG, Depth + 1)) 2405 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, NewSrc)); 2406 2407 // If the input is only used by this truncate, see if we can shrink it based 2408 // on the known demanded bits. 2409 switch (Src.getOpcode()) { 2410 default: 2411 break; 2412 case ISD::SRL: 2413 // Shrink SRL by a constant if none of the high bits shifted in are 2414 // demanded. 2415 if (TLO.LegalTypes() && !isTypeDesirableForOp(ISD::SRL, VT)) 2416 // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is 2417 // undesirable. 2418 break; 2419 2420 if (Src.getNode()->hasOneUse()) { 2421 const APInt *ShAmtC = 2422 TLO.DAG.getValidShiftAmountConstant(Src, DemandedElts); 2423 if (!ShAmtC || ShAmtC->uge(BitWidth)) 2424 break; 2425 uint64_t ShVal = ShAmtC->getZExtValue(); 2426 2427 APInt HighBits = 2428 APInt::getHighBitsSet(OperandBitWidth, OperandBitWidth - BitWidth); 2429 HighBits.lshrInPlace(ShVal); 2430 HighBits = HighBits.trunc(BitWidth); 2431 2432 if (!(HighBits & DemandedBits)) { 2433 // None of the shifted in bits are needed. Add a truncate of the 2434 // shift input, then shift it. 2435 SDValue NewShAmt = TLO.DAG.getConstant( 2436 ShVal, dl, getShiftAmountTy(VT, DL, TLO.LegalTypes())); 2437 SDValue NewTrunc = 2438 TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, Src.getOperand(0)); 2439 return TLO.CombineTo( 2440 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, NewTrunc, NewShAmt)); 2441 } 2442 } 2443 break; 2444 } 2445 2446 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2447 break; 2448 } 2449 case ISD::AssertZext: { 2450 // AssertZext demands all of the high bits, plus any of the low bits 2451 // demanded by its users. 2452 EVT ZVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2453 APInt InMask = APInt::getLowBitsSet(BitWidth, ZVT.getSizeInBits()); 2454 if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | DemandedBits, Known, 2455 TLO, Depth + 1)) 2456 return true; 2457 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2458 2459 Known.Zero |= ~InMask; 2460 Known.One &= (~Known.Zero); 2461 break; 2462 } 2463 case ISD::EXTRACT_VECTOR_ELT: { 2464 SDValue Src = Op.getOperand(0); 2465 SDValue Idx = Op.getOperand(1); 2466 ElementCount SrcEltCnt = Src.getValueType().getVectorElementCount(); 2467 unsigned EltBitWidth = Src.getScalarValueSizeInBits(); 2468 2469 if (SrcEltCnt.isScalable()) 2470 return false; 2471 2472 // Demand the bits from every vector element without a constant index. 2473 unsigned NumSrcElts = SrcEltCnt.getFixedValue(); 2474 APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts); 2475 if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx)) 2476 if (CIdx->getAPIntValue().ult(NumSrcElts)) 2477 DemandedSrcElts = APInt::getOneBitSet(NumSrcElts, CIdx->getZExtValue()); 2478 2479 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 2480 // anything about the extended bits. 2481 APInt DemandedSrcBits = DemandedBits; 2482 if (BitWidth > EltBitWidth) 2483 DemandedSrcBits = DemandedSrcBits.trunc(EltBitWidth); 2484 2485 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO, 2486 Depth + 1)) 2487 return true; 2488 2489 // Attempt to avoid multi-use ops if we don't need anything from them. 2490 if (!DemandedSrcBits.isAllOnes() || !DemandedSrcElts.isAllOnes()) { 2491 if (SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 2492 Src, DemandedSrcBits, DemandedSrcElts, TLO.DAG, Depth + 1)) { 2493 SDValue NewOp = 2494 TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, Idx); 2495 return TLO.CombineTo(Op, NewOp); 2496 } 2497 } 2498 2499 Known = Known2; 2500 if (BitWidth > EltBitWidth) 2501 Known = Known.anyext(BitWidth); 2502 break; 2503 } 2504 case ISD::BITCAST: { 2505 if (VT.isScalableVector()) 2506 return false; 2507 SDValue Src = Op.getOperand(0); 2508 EVT SrcVT = Src.getValueType(); 2509 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 2510 2511 // If this is an FP->Int bitcast and if the sign bit is the only 2512 // thing demanded, turn this into a FGETSIGN. 2513 if (!TLO.LegalOperations() && !VT.isVector() && !SrcVT.isVector() && 2514 DemandedBits == APInt::getSignMask(Op.getValueSizeInBits()) && 2515 SrcVT.isFloatingPoint()) { 2516 bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, VT); 2517 bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32); 2518 if ((OpVTLegal || i32Legal) && VT.isSimple() && SrcVT != MVT::f16 && 2519 SrcVT != MVT::f128) { 2520 // Cannot eliminate/lower SHL for f128 yet. 2521 EVT Ty = OpVTLegal ? VT : MVT::i32; 2522 // Make a FGETSIGN + SHL to move the sign bit into the appropriate 2523 // place. We expect the SHL to be eliminated by other optimizations. 2524 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Src); 2525 unsigned OpVTSizeInBits = Op.getValueSizeInBits(); 2526 if (!OpVTLegal && OpVTSizeInBits > 32) 2527 Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Sign); 2528 unsigned ShVal = Op.getValueSizeInBits() - 1; 2529 SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, VT); 2530 return TLO.CombineTo(Op, 2531 TLO.DAG.getNode(ISD::SHL, dl, VT, Sign, ShAmt)); 2532 } 2533 } 2534 2535 // Bitcast from a vector using SimplifyDemanded Bits/VectorElts. 2536 // Demand the elt/bit if any of the original elts/bits are demanded. 2537 if (SrcVT.isVector() && (BitWidth % NumSrcEltBits) == 0) { 2538 unsigned Scale = BitWidth / NumSrcEltBits; 2539 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 2540 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 2541 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 2542 for (unsigned i = 0; i != Scale; ++i) { 2543 unsigned EltOffset = IsLE ? i : (Scale - 1 - i); 2544 unsigned BitOffset = EltOffset * NumSrcEltBits; 2545 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, BitOffset); 2546 if (!Sub.isZero()) { 2547 DemandedSrcBits |= Sub; 2548 for (unsigned j = 0; j != NumElts; ++j) 2549 if (DemandedElts[j]) 2550 DemandedSrcElts.setBit((j * Scale) + i); 2551 } 2552 } 2553 2554 APInt KnownSrcUndef, KnownSrcZero; 2555 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 2556 KnownSrcZero, TLO, Depth + 1)) 2557 return true; 2558 2559 KnownBits KnownSrcBits; 2560 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 2561 KnownSrcBits, TLO, Depth + 1)) 2562 return true; 2563 } else if (IsLE && (NumSrcEltBits % BitWidth) == 0) { 2564 // TODO - bigendian once we have test coverage. 2565 unsigned Scale = NumSrcEltBits / BitWidth; 2566 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 2567 APInt DemandedSrcBits = APInt::getZero(NumSrcEltBits); 2568 APInt DemandedSrcElts = APInt::getZero(NumSrcElts); 2569 for (unsigned i = 0; i != NumElts; ++i) 2570 if (DemandedElts[i]) { 2571 unsigned Offset = (i % Scale) * BitWidth; 2572 DemandedSrcBits.insertBits(DemandedBits, Offset); 2573 DemandedSrcElts.setBit(i / Scale); 2574 } 2575 2576 if (SrcVT.isVector()) { 2577 APInt KnownSrcUndef, KnownSrcZero; 2578 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 2579 KnownSrcZero, TLO, Depth + 1)) 2580 return true; 2581 } 2582 2583 KnownBits KnownSrcBits; 2584 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 2585 KnownSrcBits, TLO, Depth + 1)) 2586 return true; 2587 2588 // Attempt to avoid multi-use ops if we don't need anything from them. 2589 if (!DemandedSrcBits.isAllOnes() || !DemandedSrcElts.isAllOnes()) { 2590 if (SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 2591 Src, DemandedSrcBits, DemandedSrcElts, TLO.DAG, Depth + 1)) { 2592 SDValue NewOp = TLO.DAG.getBitcast(VT, DemandedSrc); 2593 return TLO.CombineTo(Op, NewOp); 2594 } 2595 } 2596 } 2597 2598 // If this is a bitcast, let computeKnownBits handle it. Only do this on a 2599 // recursive call where Known may be useful to the caller. 2600 if (Depth > 0) { 2601 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2602 return false; 2603 } 2604 break; 2605 } 2606 case ISD::MUL: 2607 if (DemandedBits.isPowerOf2()) { 2608 // The LSB of X*Y is set only if (X & 1) == 1 and (Y & 1) == 1. 2609 // If we demand exactly one bit N and we have "X * (C' << N)" where C' is 2610 // odd (has LSB set), then the left-shifted low bit of X is the answer. 2611 unsigned CTZ = DemandedBits.countr_zero(); 2612 ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1), DemandedElts); 2613 if (C && C->getAPIntValue().countr_zero() == CTZ) { 2614 EVT ShiftAmtTy = getShiftAmountTy(VT, TLO.DAG.getDataLayout()); 2615 SDValue AmtC = TLO.DAG.getConstant(CTZ, dl, ShiftAmtTy); 2616 SDValue Shl = TLO.DAG.getNode(ISD::SHL, dl, VT, Op.getOperand(0), AmtC); 2617 return TLO.CombineTo(Op, Shl); 2618 } 2619 } 2620 // For a squared value "X * X", the bottom 2 bits are 0 and X[0] because: 2621 // X * X is odd iff X is odd. 2622 // 'Quadratic Reciprocity': X * X -> 0 for bit[1] 2623 if (Op.getOperand(0) == Op.getOperand(1) && DemandedBits.ult(4)) { 2624 SDValue One = TLO.DAG.getConstant(1, dl, VT); 2625 SDValue And1 = TLO.DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), One); 2626 return TLO.CombineTo(Op, And1); 2627 } 2628 [[fallthrough]]; 2629 case ISD::ADD: 2630 case ISD::SUB: { 2631 // Add, Sub, and Mul don't demand any bits in positions beyond that 2632 // of the highest bit demanded of them. 2633 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1); 2634 SDNodeFlags Flags = Op.getNode()->getFlags(); 2635 unsigned DemandedBitsLZ = DemandedBits.countl_zero(); 2636 APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ); 2637 KnownBits KnownOp0, KnownOp1; 2638 if (SimplifyDemandedBits(Op0, LoMask, DemandedElts, KnownOp0, TLO, 2639 Depth + 1) || 2640 SimplifyDemandedBits(Op1, LoMask, DemandedElts, KnownOp1, TLO, 2641 Depth + 1) || 2642 // See if the operation should be performed at a smaller bit width. 2643 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) { 2644 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) { 2645 // Disable the nsw and nuw flags. We can no longer guarantee that we 2646 // won't wrap after simplification. 2647 Flags.setNoSignedWrap(false); 2648 Flags.setNoUnsignedWrap(false); 2649 Op->setFlags(Flags); 2650 } 2651 return true; 2652 } 2653 2654 // neg x with only low bit demanded is simply x. 2655 if (Op.getOpcode() == ISD::SUB && DemandedBits.isOne() && 2656 isa<ConstantSDNode>(Op0) && cast<ConstantSDNode>(Op0)->isZero()) 2657 return TLO.CombineTo(Op, Op1); 2658 2659 // Attempt to avoid multi-use ops if we don't need anything from them. 2660 if (!LoMask.isAllOnes() || !DemandedElts.isAllOnes()) { 2661 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 2662 Op0, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2663 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 2664 Op1, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2665 if (DemandedOp0 || DemandedOp1) { 2666 Flags.setNoSignedWrap(false); 2667 Flags.setNoUnsignedWrap(false); 2668 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 2669 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 2670 SDValue NewOp = 2671 TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags); 2672 return TLO.CombineTo(Op, NewOp); 2673 } 2674 } 2675 2676 // If we have a constant operand, we may be able to turn it into -1 if we 2677 // do not demand the high bits. This can make the constant smaller to 2678 // encode, allow more general folding, or match specialized instruction 2679 // patterns (eg, 'blsr' on x86). Don't bother changing 1 to -1 because that 2680 // is probably not useful (and could be detrimental). 2681 ConstantSDNode *C = isConstOrConstSplat(Op1); 2682 APInt HighMask = APInt::getHighBitsSet(BitWidth, DemandedBitsLZ); 2683 if (C && !C->isAllOnes() && !C->isOne() && 2684 (C->getAPIntValue() | HighMask).isAllOnes()) { 2685 SDValue Neg1 = TLO.DAG.getAllOnesConstant(dl, VT); 2686 // Disable the nsw and nuw flags. We can no longer guarantee that we 2687 // won't wrap after simplification. 2688 Flags.setNoSignedWrap(false); 2689 Flags.setNoUnsignedWrap(false); 2690 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Neg1, Flags); 2691 return TLO.CombineTo(Op, NewOp); 2692 } 2693 2694 // Match a multiply with a disguised negated-power-of-2 and convert to a 2695 // an equivalent shift-left amount. 2696 // Example: (X * MulC) + Op1 --> Op1 - (X << log2(-MulC)) 2697 auto getShiftLeftAmt = [&HighMask](SDValue Mul) -> unsigned { 2698 if (Mul.getOpcode() != ISD::MUL || !Mul.hasOneUse()) 2699 return 0; 2700 2701 // Don't touch opaque constants. Also, ignore zero and power-of-2 2702 // multiplies. Those will get folded later. 2703 ConstantSDNode *MulC = isConstOrConstSplat(Mul.getOperand(1)); 2704 if (MulC && !MulC->isOpaque() && !MulC->isZero() && 2705 !MulC->getAPIntValue().isPowerOf2()) { 2706 APInt UnmaskedC = MulC->getAPIntValue() | HighMask; 2707 if (UnmaskedC.isNegatedPowerOf2()) 2708 return (-UnmaskedC).logBase2(); 2709 } 2710 return 0; 2711 }; 2712 2713 auto foldMul = [&](ISD::NodeType NT, SDValue X, SDValue Y, unsigned ShlAmt) { 2714 EVT ShiftAmtTy = getShiftAmountTy(VT, TLO.DAG.getDataLayout()); 2715 SDValue ShlAmtC = TLO.DAG.getConstant(ShlAmt, dl, ShiftAmtTy); 2716 SDValue Shl = TLO.DAG.getNode(ISD::SHL, dl, VT, X, ShlAmtC); 2717 SDValue Res = TLO.DAG.getNode(NT, dl, VT, Y, Shl); 2718 return TLO.CombineTo(Op, Res); 2719 }; 2720 2721 if (isOperationLegalOrCustom(ISD::SHL, VT)) { 2722 if (Op.getOpcode() == ISD::ADD) { 2723 // (X * MulC) + Op1 --> Op1 - (X << log2(-MulC)) 2724 if (unsigned ShAmt = getShiftLeftAmt(Op0)) 2725 return foldMul(ISD::SUB, Op0.getOperand(0), Op1, ShAmt); 2726 // Op0 + (X * MulC) --> Op0 - (X << log2(-MulC)) 2727 if (unsigned ShAmt = getShiftLeftAmt(Op1)) 2728 return foldMul(ISD::SUB, Op1.getOperand(0), Op0, ShAmt); 2729 } 2730 if (Op.getOpcode() == ISD::SUB) { 2731 // Op0 - (X * MulC) --> Op0 + (X << log2(-MulC)) 2732 if (unsigned ShAmt = getShiftLeftAmt(Op1)) 2733 return foldMul(ISD::ADD, Op1.getOperand(0), Op0, ShAmt); 2734 } 2735 } 2736 2737 if (Op.getOpcode() == ISD::MUL) { 2738 Known = KnownBits::mul(KnownOp0, KnownOp1); 2739 } else { // Op.getOpcode() is either ISD::ADD or ISD::SUB. 2740 Known = KnownBits::computeForAddSub(Op.getOpcode() == ISD::ADD, 2741 Flags.hasNoSignedWrap(), KnownOp0, 2742 KnownOp1); 2743 } 2744 break; 2745 } 2746 default: 2747 // We also ask the target about intrinsics (which could be specific to it). 2748 if (Op.getOpcode() >= ISD::BUILTIN_OP_END || 2749 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN) { 2750 // TODO: Probably okay to remove after audit; here to reduce change size 2751 // in initial enablement patch for scalable vectors 2752 if (Op.getValueType().isScalableVector()) 2753 break; 2754 if (SimplifyDemandedBitsForTargetNode(Op, DemandedBits, DemandedElts, 2755 Known, TLO, Depth)) 2756 return true; 2757 break; 2758 } 2759 2760 // Just use computeKnownBits to compute output bits. 2761 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2762 break; 2763 } 2764 2765 // If we know the value of all of the demanded bits, return this as a 2766 // constant. 2767 if (!isTargetCanonicalConstantNode(Op) && 2768 DemandedBits.isSubsetOf(Known.Zero | Known.One)) { 2769 // Avoid folding to a constant if any OpaqueConstant is involved. 2770 const SDNode *N = Op.getNode(); 2771 for (SDNode *Op : 2772 llvm::make_range(SDNodeIterator::begin(N), SDNodeIterator::end(N))) { 2773 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 2774 if (C->isOpaque()) 2775 return false; 2776 } 2777 if (VT.isInteger()) 2778 return TLO.CombineTo(Op, TLO.DAG.getConstant(Known.One, dl, VT)); 2779 if (VT.isFloatingPoint()) 2780 return TLO.CombineTo( 2781 Op, 2782 TLO.DAG.getConstantFP( 2783 APFloat(TLO.DAG.EVTToAPFloatSemantics(VT), Known.One), dl, VT)); 2784 } 2785 2786 // A multi use 'all demanded elts' simplify failed to find any knownbits. 2787 // Try again just for the original demanded elts. 2788 // Ensure we do this AFTER constant folding above. 2789 if (HasMultiUse && Known.isUnknown() && !OriginalDemandedElts.isAllOnes()) 2790 Known = TLO.DAG.computeKnownBits(Op, OriginalDemandedElts, Depth); 2791 2792 return false; 2793 } 2794 2795 bool TargetLowering::SimplifyDemandedVectorElts(SDValue Op, 2796 const APInt &DemandedElts, 2797 DAGCombinerInfo &DCI) const { 2798 SelectionDAG &DAG = DCI.DAG; 2799 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 2800 !DCI.isBeforeLegalizeOps()); 2801 2802 APInt KnownUndef, KnownZero; 2803 bool Simplified = 2804 SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, TLO); 2805 if (Simplified) { 2806 DCI.AddToWorklist(Op.getNode()); 2807 DCI.CommitTargetLoweringOpt(TLO); 2808 } 2809 2810 return Simplified; 2811 } 2812 2813 /// Given a vector binary operation and known undefined elements for each input 2814 /// operand, compute whether each element of the output is undefined. 2815 static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG, 2816 const APInt &UndefOp0, 2817 const APInt &UndefOp1) { 2818 EVT VT = BO.getValueType(); 2819 assert(DAG.getTargetLoweringInfo().isBinOp(BO.getOpcode()) && VT.isVector() && 2820 "Vector binop only"); 2821 2822 EVT EltVT = VT.getVectorElementType(); 2823 unsigned NumElts = VT.isFixedLengthVector() ? VT.getVectorNumElements() : 1; 2824 assert(UndefOp0.getBitWidth() == NumElts && 2825 UndefOp1.getBitWidth() == NumElts && "Bad type for undef analysis"); 2826 2827 auto getUndefOrConstantElt = [&](SDValue V, unsigned Index, 2828 const APInt &UndefVals) { 2829 if (UndefVals[Index]) 2830 return DAG.getUNDEF(EltVT); 2831 2832 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 2833 // Try hard to make sure that the getNode() call is not creating temporary 2834 // nodes. Ignore opaque integers because they do not constant fold. 2835 SDValue Elt = BV->getOperand(Index); 2836 auto *C = dyn_cast<ConstantSDNode>(Elt); 2837 if (isa<ConstantFPSDNode>(Elt) || Elt.isUndef() || (C && !C->isOpaque())) 2838 return Elt; 2839 } 2840 2841 return SDValue(); 2842 }; 2843 2844 APInt KnownUndef = APInt::getZero(NumElts); 2845 for (unsigned i = 0; i != NumElts; ++i) { 2846 // If both inputs for this element are either constant or undef and match 2847 // the element type, compute the constant/undef result for this element of 2848 // the vector. 2849 // TODO: Ideally we would use FoldConstantArithmetic() here, but that does 2850 // not handle FP constants. The code within getNode() should be refactored 2851 // to avoid the danger of creating a bogus temporary node here. 2852 SDValue C0 = getUndefOrConstantElt(BO.getOperand(0), i, UndefOp0); 2853 SDValue C1 = getUndefOrConstantElt(BO.getOperand(1), i, UndefOp1); 2854 if (C0 && C1 && C0.getValueType() == EltVT && C1.getValueType() == EltVT) 2855 if (DAG.getNode(BO.getOpcode(), SDLoc(BO), EltVT, C0, C1).isUndef()) 2856 KnownUndef.setBit(i); 2857 } 2858 return KnownUndef; 2859 } 2860 2861 bool TargetLowering::SimplifyDemandedVectorElts( 2862 SDValue Op, const APInt &OriginalDemandedElts, APInt &KnownUndef, 2863 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth, 2864 bool AssumeSingleUse) const { 2865 EVT VT = Op.getValueType(); 2866 unsigned Opcode = Op.getOpcode(); 2867 APInt DemandedElts = OriginalDemandedElts; 2868 unsigned NumElts = DemandedElts.getBitWidth(); 2869 assert(VT.isVector() && "Expected vector op"); 2870 2871 KnownUndef = KnownZero = APInt::getZero(NumElts); 2872 2873 const TargetLowering &TLI = TLO.DAG.getTargetLoweringInfo(); 2874 if (!TLI.shouldSimplifyDemandedVectorElts(Op, TLO)) 2875 return false; 2876 2877 // TODO: For now we assume we know nothing about scalable vectors. 2878 if (VT.isScalableVector()) 2879 return false; 2880 2881 assert(VT.getVectorNumElements() == NumElts && 2882 "Mask size mismatches value type element count!"); 2883 2884 // Undef operand. 2885 if (Op.isUndef()) { 2886 KnownUndef.setAllBits(); 2887 return false; 2888 } 2889 2890 // If Op has other users, assume that all elements are needed. 2891 if (!AssumeSingleUse && !Op.getNode()->hasOneUse()) 2892 DemandedElts.setAllBits(); 2893 2894 // Not demanding any elements from Op. 2895 if (DemandedElts == 0) { 2896 KnownUndef.setAllBits(); 2897 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2898 } 2899 2900 // Limit search depth. 2901 if (Depth >= SelectionDAG::MaxRecursionDepth) 2902 return false; 2903 2904 SDLoc DL(Op); 2905 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 2906 bool IsLE = TLO.DAG.getDataLayout().isLittleEndian(); 2907 2908 // Helper for demanding the specified elements and all the bits of both binary 2909 // operands. 2910 auto SimplifyDemandedVectorEltsBinOp = [&](SDValue Op0, SDValue Op1) { 2911 SDValue NewOp0 = SimplifyMultipleUseDemandedVectorElts(Op0, DemandedElts, 2912 TLO.DAG, Depth + 1); 2913 SDValue NewOp1 = SimplifyMultipleUseDemandedVectorElts(Op1, DemandedElts, 2914 TLO.DAG, Depth + 1); 2915 if (NewOp0 || NewOp1) { 2916 SDValue NewOp = TLO.DAG.getNode( 2917 Opcode, SDLoc(Op), VT, NewOp0 ? NewOp0 : Op0, NewOp1 ? NewOp1 : Op1); 2918 return TLO.CombineTo(Op, NewOp); 2919 } 2920 return false; 2921 }; 2922 2923 switch (Opcode) { 2924 case ISD::SCALAR_TO_VECTOR: { 2925 if (!DemandedElts[0]) { 2926 KnownUndef.setAllBits(); 2927 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2928 } 2929 SDValue ScalarSrc = Op.getOperand(0); 2930 if (ScalarSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 2931 SDValue Src = ScalarSrc.getOperand(0); 2932 SDValue Idx = ScalarSrc.getOperand(1); 2933 EVT SrcVT = Src.getValueType(); 2934 2935 ElementCount SrcEltCnt = SrcVT.getVectorElementCount(); 2936 2937 if (SrcEltCnt.isScalable()) 2938 return false; 2939 2940 unsigned NumSrcElts = SrcEltCnt.getFixedValue(); 2941 if (isNullConstant(Idx)) { 2942 APInt SrcDemandedElts = APInt::getOneBitSet(NumSrcElts, 0); 2943 APInt SrcUndef = KnownUndef.zextOrTrunc(NumSrcElts); 2944 APInt SrcZero = KnownZero.zextOrTrunc(NumSrcElts); 2945 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2946 TLO, Depth + 1)) 2947 return true; 2948 } 2949 } 2950 KnownUndef.setHighBits(NumElts - 1); 2951 break; 2952 } 2953 case ISD::BITCAST: { 2954 SDValue Src = Op.getOperand(0); 2955 EVT SrcVT = Src.getValueType(); 2956 2957 // We only handle vectors here. 2958 // TODO - investigate calling SimplifyDemandedBits/ComputeKnownBits? 2959 if (!SrcVT.isVector()) 2960 break; 2961 2962 // Fast handling of 'identity' bitcasts. 2963 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 2964 if (NumSrcElts == NumElts) 2965 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, 2966 KnownZero, TLO, Depth + 1); 2967 2968 APInt SrcDemandedElts, SrcZero, SrcUndef; 2969 2970 // Bitcast from 'large element' src vector to 'small element' vector, we 2971 // must demand a source element if any DemandedElt maps to it. 2972 if ((NumElts % NumSrcElts) == 0) { 2973 unsigned Scale = NumElts / NumSrcElts; 2974 SrcDemandedElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts); 2975 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2976 TLO, Depth + 1)) 2977 return true; 2978 2979 // Try calling SimplifyDemandedBits, converting demanded elts to the bits 2980 // of the large element. 2981 // TODO - bigendian once we have test coverage. 2982 if (IsLE) { 2983 unsigned SrcEltSizeInBits = SrcVT.getScalarSizeInBits(); 2984 APInt SrcDemandedBits = APInt::getZero(SrcEltSizeInBits); 2985 for (unsigned i = 0; i != NumElts; ++i) 2986 if (DemandedElts[i]) { 2987 unsigned Ofs = (i % Scale) * EltSizeInBits; 2988 SrcDemandedBits.setBits(Ofs, Ofs + EltSizeInBits); 2989 } 2990 2991 KnownBits Known; 2992 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known, 2993 TLO, Depth + 1)) 2994 return true; 2995 2996 // The bitcast has split each wide element into a number of 2997 // narrow subelements. We have just computed the Known bits 2998 // for wide elements. See if element splitting results in 2999 // some subelements being zero. Only for demanded elements! 3000 for (unsigned SubElt = 0; SubElt != Scale; ++SubElt) { 3001 if (!Known.Zero.extractBits(EltSizeInBits, SubElt * EltSizeInBits) 3002 .isAllOnes()) 3003 continue; 3004 for (unsigned SrcElt = 0; SrcElt != NumSrcElts; ++SrcElt) { 3005 unsigned Elt = Scale * SrcElt + SubElt; 3006 if (DemandedElts[Elt]) 3007 KnownZero.setBit(Elt); 3008 } 3009 } 3010 } 3011 3012 // If the src element is zero/undef then all the output elements will be - 3013 // only demanded elements are guaranteed to be correct. 3014 for (unsigned i = 0; i != NumSrcElts; ++i) { 3015 if (SrcDemandedElts[i]) { 3016 if (SrcZero[i]) 3017 KnownZero.setBits(i * Scale, (i + 1) * Scale); 3018 if (SrcUndef[i]) 3019 KnownUndef.setBits(i * Scale, (i + 1) * Scale); 3020 } 3021 } 3022 } 3023 3024 // Bitcast from 'small element' src vector to 'large element' vector, we 3025 // demand all smaller source elements covered by the larger demanded element 3026 // of this vector. 3027 if ((NumSrcElts % NumElts) == 0) { 3028 unsigned Scale = NumSrcElts / NumElts; 3029 SrcDemandedElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts); 3030 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 3031 TLO, Depth + 1)) 3032 return true; 3033 3034 // If all the src elements covering an output element are zero/undef, then 3035 // the output element will be as well, assuming it was demanded. 3036 for (unsigned i = 0; i != NumElts; ++i) { 3037 if (DemandedElts[i]) { 3038 if (SrcZero.extractBits(Scale, i * Scale).isAllOnes()) 3039 KnownZero.setBit(i); 3040 if (SrcUndef.extractBits(Scale, i * Scale).isAllOnes()) 3041 KnownUndef.setBit(i); 3042 } 3043 } 3044 } 3045 break; 3046 } 3047 case ISD::BUILD_VECTOR: { 3048 // Check all elements and simplify any unused elements with UNDEF. 3049 if (!DemandedElts.isAllOnes()) { 3050 // Don't simplify BROADCASTS. 3051 if (llvm::any_of(Op->op_values(), 3052 [&](SDValue Elt) { return Op.getOperand(0) != Elt; })) { 3053 SmallVector<SDValue, 32> Ops(Op->op_begin(), Op->op_end()); 3054 bool Updated = false; 3055 for (unsigned i = 0; i != NumElts; ++i) { 3056 if (!DemandedElts[i] && !Ops[i].isUndef()) { 3057 Ops[i] = TLO.DAG.getUNDEF(Ops[0].getValueType()); 3058 KnownUndef.setBit(i); 3059 Updated = true; 3060 } 3061 } 3062 if (Updated) 3063 return TLO.CombineTo(Op, TLO.DAG.getBuildVector(VT, DL, Ops)); 3064 } 3065 } 3066 for (unsigned i = 0; i != NumElts; ++i) { 3067 SDValue SrcOp = Op.getOperand(i); 3068 if (SrcOp.isUndef()) { 3069 KnownUndef.setBit(i); 3070 } else if (EltSizeInBits == SrcOp.getScalarValueSizeInBits() && 3071 (isNullConstant(SrcOp) || isNullFPConstant(SrcOp))) { 3072 KnownZero.setBit(i); 3073 } 3074 } 3075 break; 3076 } 3077 case ISD::CONCAT_VECTORS: { 3078 EVT SubVT = Op.getOperand(0).getValueType(); 3079 unsigned NumSubVecs = Op.getNumOperands(); 3080 unsigned NumSubElts = SubVT.getVectorNumElements(); 3081 for (unsigned i = 0; i != NumSubVecs; ++i) { 3082 SDValue SubOp = Op.getOperand(i); 3083 APInt SubElts = DemandedElts.extractBits(NumSubElts, i * NumSubElts); 3084 APInt SubUndef, SubZero; 3085 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO, 3086 Depth + 1)) 3087 return true; 3088 KnownUndef.insertBits(SubUndef, i * NumSubElts); 3089 KnownZero.insertBits(SubZero, i * NumSubElts); 3090 } 3091 3092 // Attempt to avoid multi-use ops if we don't need anything from them. 3093 if (!DemandedElts.isAllOnes()) { 3094 bool FoundNewSub = false; 3095 SmallVector<SDValue, 2> DemandedSubOps; 3096 for (unsigned i = 0; i != NumSubVecs; ++i) { 3097 SDValue SubOp = Op.getOperand(i); 3098 APInt SubElts = DemandedElts.extractBits(NumSubElts, i * NumSubElts); 3099 SDValue NewSubOp = SimplifyMultipleUseDemandedVectorElts( 3100 SubOp, SubElts, TLO.DAG, Depth + 1); 3101 DemandedSubOps.push_back(NewSubOp ? NewSubOp : SubOp); 3102 FoundNewSub = NewSubOp ? true : FoundNewSub; 3103 } 3104 if (FoundNewSub) { 3105 SDValue NewOp = 3106 TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, DemandedSubOps); 3107 return TLO.CombineTo(Op, NewOp); 3108 } 3109 } 3110 break; 3111 } 3112 case ISD::INSERT_SUBVECTOR: { 3113 // Demand any elements from the subvector and the remainder from the src its 3114 // inserted into. 3115 SDValue Src = Op.getOperand(0); 3116 SDValue Sub = Op.getOperand(1); 3117 uint64_t Idx = Op.getConstantOperandVal(2); 3118 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 3119 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 3120 APInt DemandedSrcElts = DemandedElts; 3121 DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx); 3122 3123 APInt SubUndef, SubZero; 3124 if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO, 3125 Depth + 1)) 3126 return true; 3127 3128 // If none of the src operand elements are demanded, replace it with undef. 3129 if (!DemandedSrcElts && !Src.isUndef()) 3130 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, 3131 TLO.DAG.getUNDEF(VT), Sub, 3132 Op.getOperand(2))); 3133 3134 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownUndef, KnownZero, 3135 TLO, Depth + 1)) 3136 return true; 3137 KnownUndef.insertBits(SubUndef, Idx); 3138 KnownZero.insertBits(SubZero, Idx); 3139 3140 // Attempt to avoid multi-use ops if we don't need anything from them. 3141 if (!DemandedSrcElts.isAllOnes() || !DemandedSubElts.isAllOnes()) { 3142 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts( 3143 Src, DemandedSrcElts, TLO.DAG, Depth + 1); 3144 SDValue NewSub = SimplifyMultipleUseDemandedVectorElts( 3145 Sub, DemandedSubElts, TLO.DAG, Depth + 1); 3146 if (NewSrc || NewSub) { 3147 NewSrc = NewSrc ? NewSrc : Src; 3148 NewSub = NewSub ? NewSub : Sub; 3149 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc, 3150 NewSub, Op.getOperand(2)); 3151 return TLO.CombineTo(Op, NewOp); 3152 } 3153 } 3154 break; 3155 } 3156 case ISD::EXTRACT_SUBVECTOR: { 3157 // Offset the demanded elts by the subvector index. 3158 SDValue Src = Op.getOperand(0); 3159 if (Src.getValueType().isScalableVector()) 3160 break; 3161 uint64_t Idx = Op.getConstantOperandVal(1); 3162 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3163 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx); 3164 3165 APInt SrcUndef, SrcZero; 3166 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, 3167 Depth + 1)) 3168 return true; 3169 KnownUndef = SrcUndef.extractBits(NumElts, Idx); 3170 KnownZero = SrcZero.extractBits(NumElts, Idx); 3171 3172 // Attempt to avoid multi-use ops if we don't need anything from them. 3173 if (!DemandedElts.isAllOnes()) { 3174 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts( 3175 Src, DemandedSrcElts, TLO.DAG, Depth + 1); 3176 if (NewSrc) { 3177 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc, 3178 Op.getOperand(1)); 3179 return TLO.CombineTo(Op, NewOp); 3180 } 3181 } 3182 break; 3183 } 3184 case ISD::INSERT_VECTOR_ELT: { 3185 SDValue Vec = Op.getOperand(0); 3186 SDValue Scl = Op.getOperand(1); 3187 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 3188 3189 // For a legal, constant insertion index, if we don't need this insertion 3190 // then strip it, else remove it from the demanded elts. 3191 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) { 3192 unsigned Idx = CIdx->getZExtValue(); 3193 if (!DemandedElts[Idx]) 3194 return TLO.CombineTo(Op, Vec); 3195 3196 APInt DemandedVecElts(DemandedElts); 3197 DemandedVecElts.clearBit(Idx); 3198 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef, 3199 KnownZero, TLO, Depth + 1)) 3200 return true; 3201 3202 KnownUndef.setBitVal(Idx, Scl.isUndef()); 3203 3204 KnownZero.setBitVal(Idx, isNullConstant(Scl) || isNullFPConstant(Scl)); 3205 break; 3206 } 3207 3208 APInt VecUndef, VecZero; 3209 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO, 3210 Depth + 1)) 3211 return true; 3212 // Without knowing the insertion index we can't set KnownUndef/KnownZero. 3213 break; 3214 } 3215 case ISD::VSELECT: { 3216 SDValue Sel = Op.getOperand(0); 3217 SDValue LHS = Op.getOperand(1); 3218 SDValue RHS = Op.getOperand(2); 3219 3220 // Try to transform the select condition based on the current demanded 3221 // elements. 3222 APInt UndefSel, UndefZero; 3223 if (SimplifyDemandedVectorElts(Sel, DemandedElts, UndefSel, UndefZero, TLO, 3224 Depth + 1)) 3225 return true; 3226 3227 // See if we can simplify either vselect operand. 3228 APInt DemandedLHS(DemandedElts); 3229 APInt DemandedRHS(DemandedElts); 3230 APInt UndefLHS, ZeroLHS; 3231 APInt UndefRHS, ZeroRHS; 3232 if (SimplifyDemandedVectorElts(LHS, DemandedLHS, UndefLHS, ZeroLHS, TLO, 3233 Depth + 1)) 3234 return true; 3235 if (SimplifyDemandedVectorElts(RHS, DemandedRHS, UndefRHS, ZeroRHS, TLO, 3236 Depth + 1)) 3237 return true; 3238 3239 KnownUndef = UndefLHS & UndefRHS; 3240 KnownZero = ZeroLHS & ZeroRHS; 3241 3242 // If we know that the selected element is always zero, we don't need the 3243 // select value element. 3244 APInt DemandedSel = DemandedElts & ~KnownZero; 3245 if (DemandedSel != DemandedElts) 3246 if (SimplifyDemandedVectorElts(Sel, DemandedSel, UndefSel, UndefZero, TLO, 3247 Depth + 1)) 3248 return true; 3249 3250 break; 3251 } 3252 case ISD::VECTOR_SHUFFLE: { 3253 SDValue LHS = Op.getOperand(0); 3254 SDValue RHS = Op.getOperand(1); 3255 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 3256 3257 // Collect demanded elements from shuffle operands.. 3258 APInt DemandedLHS(NumElts, 0); 3259 APInt DemandedRHS(NumElts, 0); 3260 for (unsigned i = 0; i != NumElts; ++i) { 3261 int M = ShuffleMask[i]; 3262 if (M < 0 || !DemandedElts[i]) 3263 continue; 3264 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range"); 3265 if (M < (int)NumElts) 3266 DemandedLHS.setBit(M); 3267 else 3268 DemandedRHS.setBit(M - NumElts); 3269 } 3270 3271 // See if we can simplify either shuffle operand. 3272 APInt UndefLHS, ZeroLHS; 3273 APInt UndefRHS, ZeroRHS; 3274 if (SimplifyDemandedVectorElts(LHS, DemandedLHS, UndefLHS, ZeroLHS, TLO, 3275 Depth + 1)) 3276 return true; 3277 if (SimplifyDemandedVectorElts(RHS, DemandedRHS, UndefRHS, ZeroRHS, TLO, 3278 Depth + 1)) 3279 return true; 3280 3281 // Simplify mask using undef elements from LHS/RHS. 3282 bool Updated = false; 3283 bool IdentityLHS = true, IdentityRHS = true; 3284 SmallVector<int, 32> NewMask(ShuffleMask); 3285 for (unsigned i = 0; i != NumElts; ++i) { 3286 int &M = NewMask[i]; 3287 if (M < 0) 3288 continue; 3289 if (!DemandedElts[i] || (M < (int)NumElts && UndefLHS[M]) || 3290 (M >= (int)NumElts && UndefRHS[M - NumElts])) { 3291 Updated = true; 3292 M = -1; 3293 } 3294 IdentityLHS &= (M < 0) || (M == (int)i); 3295 IdentityRHS &= (M < 0) || ((M - NumElts) == i); 3296 } 3297 3298 // Update legal shuffle masks based on demanded elements if it won't reduce 3299 // to Identity which can cause premature removal of the shuffle mask. 3300 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.LegalOps) { 3301 SDValue LegalShuffle = 3302 buildLegalVectorShuffle(VT, DL, LHS, RHS, NewMask, TLO.DAG); 3303 if (LegalShuffle) 3304 return TLO.CombineTo(Op, LegalShuffle); 3305 } 3306 3307 // Propagate undef/zero elements from LHS/RHS. 3308 for (unsigned i = 0; i != NumElts; ++i) { 3309 int M = ShuffleMask[i]; 3310 if (M < 0) { 3311 KnownUndef.setBit(i); 3312 } else if (M < (int)NumElts) { 3313 if (UndefLHS[M]) 3314 KnownUndef.setBit(i); 3315 if (ZeroLHS[M]) 3316 KnownZero.setBit(i); 3317 } else { 3318 if (UndefRHS[M - NumElts]) 3319 KnownUndef.setBit(i); 3320 if (ZeroRHS[M - NumElts]) 3321 KnownZero.setBit(i); 3322 } 3323 } 3324 break; 3325 } 3326 case ISD::ANY_EXTEND_VECTOR_INREG: 3327 case ISD::SIGN_EXTEND_VECTOR_INREG: 3328 case ISD::ZERO_EXTEND_VECTOR_INREG: { 3329 APInt SrcUndef, SrcZero; 3330 SDValue Src = Op.getOperand(0); 3331 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 3332 APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts); 3333 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, 3334 Depth + 1)) 3335 return true; 3336 KnownZero = SrcZero.zextOrTrunc(NumElts); 3337 KnownUndef = SrcUndef.zextOrTrunc(NumElts); 3338 3339 if (IsLE && Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG && 3340 Op.getValueSizeInBits() == Src.getValueSizeInBits() && 3341 DemandedSrcElts == 1) { 3342 // aext - if we just need the bottom element then we can bitcast. 3343 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 3344 } 3345 3346 if (Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) { 3347 // zext(undef) upper bits are guaranteed to be zero. 3348 if (DemandedElts.isSubsetOf(KnownUndef)) 3349 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 3350 KnownUndef.clearAllBits(); 3351 3352 // zext - if we just need the bottom element then we can mask: 3353 // zext(and(x,c)) -> and(x,c') iff the zext is the only user of the and. 3354 if (IsLE && DemandedSrcElts == 1 && Src.getOpcode() == ISD::AND && 3355 Op->isOnlyUserOf(Src.getNode()) && 3356 Op.getValueSizeInBits() == Src.getValueSizeInBits()) { 3357 SDLoc DL(Op); 3358 EVT SrcVT = Src.getValueType(); 3359 EVT SrcSVT = SrcVT.getScalarType(); 3360 SmallVector<SDValue> MaskElts; 3361 MaskElts.push_back(TLO.DAG.getAllOnesConstant(DL, SrcSVT)); 3362 MaskElts.append(NumSrcElts - 1, TLO.DAG.getConstant(0, DL, SrcSVT)); 3363 SDValue Mask = TLO.DAG.getBuildVector(SrcVT, DL, MaskElts); 3364 if (SDValue Fold = TLO.DAG.FoldConstantArithmetic( 3365 ISD::AND, DL, SrcVT, {Src.getOperand(1), Mask})) { 3366 Fold = TLO.DAG.getNode(ISD::AND, DL, SrcVT, Src.getOperand(0), Fold); 3367 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Fold)); 3368 } 3369 } 3370 } 3371 break; 3372 } 3373 3374 // TODO: There are more binop opcodes that could be handled here - MIN, 3375 // MAX, saturated math, etc. 3376 case ISD::ADD: { 3377 SDValue Op0 = Op.getOperand(0); 3378 SDValue Op1 = Op.getOperand(1); 3379 if (Op0 == Op1 && Op->isOnlyUserOf(Op0.getNode())) { 3380 APInt UndefLHS, ZeroLHS; 3381 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 3382 Depth + 1, /*AssumeSingleUse*/ true)) 3383 return true; 3384 } 3385 [[fallthrough]]; 3386 } 3387 case ISD::OR: 3388 case ISD::XOR: 3389 case ISD::SUB: 3390 case ISD::FADD: 3391 case ISD::FSUB: 3392 case ISD::FMUL: 3393 case ISD::FDIV: 3394 case ISD::FREM: { 3395 SDValue Op0 = Op.getOperand(0); 3396 SDValue Op1 = Op.getOperand(1); 3397 3398 APInt UndefRHS, ZeroRHS; 3399 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO, 3400 Depth + 1)) 3401 return true; 3402 APInt UndefLHS, ZeroLHS; 3403 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 3404 Depth + 1)) 3405 return true; 3406 3407 KnownZero = ZeroLHS & ZeroRHS; 3408 KnownUndef = getKnownUndefForVectorBinop(Op, TLO.DAG, UndefLHS, UndefRHS); 3409 3410 // Attempt to avoid multi-use ops if we don't need anything from them. 3411 // TODO - use KnownUndef to relax the demandedelts? 3412 if (!DemandedElts.isAllOnes()) 3413 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 3414 return true; 3415 break; 3416 } 3417 case ISD::SHL: 3418 case ISD::SRL: 3419 case ISD::SRA: 3420 case ISD::ROTL: 3421 case ISD::ROTR: { 3422 SDValue Op0 = Op.getOperand(0); 3423 SDValue Op1 = Op.getOperand(1); 3424 3425 APInt UndefRHS, ZeroRHS; 3426 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO, 3427 Depth + 1)) 3428 return true; 3429 APInt UndefLHS, ZeroLHS; 3430 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 3431 Depth + 1)) 3432 return true; 3433 3434 KnownZero = ZeroLHS; 3435 KnownUndef = UndefLHS & UndefRHS; // TODO: use getKnownUndefForVectorBinop? 3436 3437 // Attempt to avoid multi-use ops if we don't need anything from them. 3438 // TODO - use KnownUndef to relax the demandedelts? 3439 if (!DemandedElts.isAllOnes()) 3440 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 3441 return true; 3442 break; 3443 } 3444 case ISD::MUL: 3445 case ISD::MULHU: 3446 case ISD::MULHS: 3447 case ISD::AND: { 3448 SDValue Op0 = Op.getOperand(0); 3449 SDValue Op1 = Op.getOperand(1); 3450 3451 APInt SrcUndef, SrcZero; 3452 if (SimplifyDemandedVectorElts(Op1, DemandedElts, SrcUndef, SrcZero, TLO, 3453 Depth + 1)) 3454 return true; 3455 // If we know that a demanded element was zero in Op1 we don't need to 3456 // demand it in Op0 - its guaranteed to be zero. 3457 APInt DemandedElts0 = DemandedElts & ~SrcZero; 3458 if (SimplifyDemandedVectorElts(Op0, DemandedElts0, KnownUndef, KnownZero, 3459 TLO, Depth + 1)) 3460 return true; 3461 3462 KnownUndef &= DemandedElts0; 3463 KnownZero &= DemandedElts0; 3464 3465 // If every element pair has a zero/undef then just fold to zero. 3466 // fold (and x, undef) -> 0 / (and x, 0) -> 0 3467 // fold (mul x, undef) -> 0 / (mul x, 0) -> 0 3468 if (DemandedElts.isSubsetOf(SrcZero | KnownZero | SrcUndef | KnownUndef)) 3469 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 3470 3471 // If either side has a zero element, then the result element is zero, even 3472 // if the other is an UNDEF. 3473 // TODO: Extend getKnownUndefForVectorBinop to also deal with known zeros 3474 // and then handle 'and' nodes with the rest of the binop opcodes. 3475 KnownZero |= SrcZero; 3476 KnownUndef &= SrcUndef; 3477 KnownUndef &= ~KnownZero; 3478 3479 // Attempt to avoid multi-use ops if we don't need anything from them. 3480 if (!DemandedElts.isAllOnes()) 3481 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 3482 return true; 3483 break; 3484 } 3485 case ISD::TRUNCATE: 3486 case ISD::SIGN_EXTEND: 3487 case ISD::ZERO_EXTEND: 3488 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef, 3489 KnownZero, TLO, Depth + 1)) 3490 return true; 3491 3492 if (Op.getOpcode() == ISD::ZERO_EXTEND) { 3493 // zext(undef) upper bits are guaranteed to be zero. 3494 if (DemandedElts.isSubsetOf(KnownUndef)) 3495 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 3496 KnownUndef.clearAllBits(); 3497 } 3498 break; 3499 default: { 3500 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 3501 if (SimplifyDemandedVectorEltsForTargetNode(Op, DemandedElts, KnownUndef, 3502 KnownZero, TLO, Depth)) 3503 return true; 3504 } else { 3505 KnownBits Known; 3506 APInt DemandedBits = APInt::getAllOnes(EltSizeInBits); 3507 if (SimplifyDemandedBits(Op, DemandedBits, OriginalDemandedElts, Known, 3508 TLO, Depth, AssumeSingleUse)) 3509 return true; 3510 } 3511 break; 3512 } 3513 } 3514 assert((KnownUndef & KnownZero) == 0 && "Elements flagged as undef AND zero"); 3515 3516 // Constant fold all undef cases. 3517 // TODO: Handle zero cases as well. 3518 if (DemandedElts.isSubsetOf(KnownUndef)) 3519 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 3520 3521 return false; 3522 } 3523 3524 /// Determine which of the bits specified in Mask are known to be either zero or 3525 /// one and return them in the Known. 3526 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 3527 KnownBits &Known, 3528 const APInt &DemandedElts, 3529 const SelectionDAG &DAG, 3530 unsigned Depth) const { 3531 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3532 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3533 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3534 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3535 "Should use MaskedValueIsZero if you don't know whether Op" 3536 " is a target node!"); 3537 Known.resetAll(); 3538 } 3539 3540 void TargetLowering::computeKnownBitsForTargetInstr( 3541 GISelKnownBits &Analysis, Register R, KnownBits &Known, 3542 const APInt &DemandedElts, const MachineRegisterInfo &MRI, 3543 unsigned Depth) const { 3544 Known.resetAll(); 3545 } 3546 3547 void TargetLowering::computeKnownBitsForFrameIndex( 3548 const int FrameIdx, KnownBits &Known, const MachineFunction &MF) const { 3549 // The low bits are known zero if the pointer is aligned. 3550 Known.Zero.setLowBits(Log2(MF.getFrameInfo().getObjectAlign(FrameIdx))); 3551 } 3552 3553 Align TargetLowering::computeKnownAlignForTargetInstr( 3554 GISelKnownBits &Analysis, Register R, const MachineRegisterInfo &MRI, 3555 unsigned Depth) const { 3556 return Align(1); 3557 } 3558 3559 /// This method can be implemented by targets that want to expose additional 3560 /// information about sign bits to the DAG Combiner. 3561 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 3562 const APInt &, 3563 const SelectionDAG &, 3564 unsigned Depth) const { 3565 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3566 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3567 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3568 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3569 "Should use ComputeNumSignBits if you don't know whether Op" 3570 " is a target node!"); 3571 return 1; 3572 } 3573 3574 unsigned TargetLowering::computeNumSignBitsForTargetInstr( 3575 GISelKnownBits &Analysis, Register R, const APInt &DemandedElts, 3576 const MachineRegisterInfo &MRI, unsigned Depth) const { 3577 return 1; 3578 } 3579 3580 bool TargetLowering::SimplifyDemandedVectorEltsForTargetNode( 3581 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, 3582 TargetLoweringOpt &TLO, unsigned Depth) const { 3583 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3584 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3585 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3586 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3587 "Should use SimplifyDemandedVectorElts if you don't know whether Op" 3588 " is a target node!"); 3589 return false; 3590 } 3591 3592 bool TargetLowering::SimplifyDemandedBitsForTargetNode( 3593 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 3594 KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const { 3595 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3596 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3597 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3598 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3599 "Should use SimplifyDemandedBits if you don't know whether Op" 3600 " is a target node!"); 3601 computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth); 3602 return false; 3603 } 3604 3605 SDValue TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode( 3606 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 3607 SelectionDAG &DAG, unsigned Depth) const { 3608 assert( 3609 (Op.getOpcode() >= ISD::BUILTIN_OP_END || 3610 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3611 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3612 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3613 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op" 3614 " is a target node!"); 3615 return SDValue(); 3616 } 3617 3618 SDValue 3619 TargetLowering::buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, 3620 SDValue N1, MutableArrayRef<int> Mask, 3621 SelectionDAG &DAG) const { 3622 bool LegalMask = isShuffleMaskLegal(Mask, VT); 3623 if (!LegalMask) { 3624 std::swap(N0, N1); 3625 ShuffleVectorSDNode::commuteMask(Mask); 3626 LegalMask = isShuffleMaskLegal(Mask, VT); 3627 } 3628 3629 if (!LegalMask) 3630 return SDValue(); 3631 3632 return DAG.getVectorShuffle(VT, DL, N0, N1, Mask); 3633 } 3634 3635 const Constant *TargetLowering::getTargetConstantFromLoad(LoadSDNode*) const { 3636 return nullptr; 3637 } 3638 3639 bool TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode( 3640 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 3641 bool PoisonOnly, unsigned Depth) const { 3642 assert( 3643 (Op.getOpcode() >= ISD::BUILTIN_OP_END || 3644 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3645 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3646 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3647 "Should use isGuaranteedNotToBeUndefOrPoison if you don't know whether Op" 3648 " is a target node!"); 3649 return false; 3650 } 3651 3652 bool TargetLowering::canCreateUndefOrPoisonForTargetNode( 3653 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 3654 bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const { 3655 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3656 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3657 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3658 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3659 "Should use canCreateUndefOrPoison if you don't know whether Op" 3660 " is a target node!"); 3661 // Be conservative and return true. 3662 return true; 3663 } 3664 3665 bool TargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, 3666 const SelectionDAG &DAG, 3667 bool SNaN, 3668 unsigned Depth) const { 3669 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3670 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3671 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3672 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3673 "Should use isKnownNeverNaN if you don't know whether Op" 3674 " is a target node!"); 3675 return false; 3676 } 3677 3678 bool TargetLowering::isSplatValueForTargetNode(SDValue Op, 3679 const APInt &DemandedElts, 3680 APInt &UndefElts, 3681 const SelectionDAG &DAG, 3682 unsigned Depth) const { 3683 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3684 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3685 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3686 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3687 "Should use isSplatValue if you don't know whether Op" 3688 " is a target node!"); 3689 return false; 3690 } 3691 3692 // FIXME: Ideally, this would use ISD::isConstantSplatVector(), but that must 3693 // work with truncating build vectors and vectors with elements of less than 3694 // 8 bits. 3695 bool TargetLowering::isConstTrueVal(SDValue N) const { 3696 if (!N) 3697 return false; 3698 3699 unsigned EltWidth; 3700 APInt CVal; 3701 if (ConstantSDNode *CN = isConstOrConstSplat(N, /*AllowUndefs=*/false, 3702 /*AllowTruncation=*/true)) { 3703 CVal = CN->getAPIntValue(); 3704 EltWidth = N.getValueType().getScalarSizeInBits(); 3705 } else 3706 return false; 3707 3708 // If this is a truncating splat, truncate the splat value. 3709 // Otherwise, we may fail to match the expected values below. 3710 if (EltWidth < CVal.getBitWidth()) 3711 CVal = CVal.trunc(EltWidth); 3712 3713 switch (getBooleanContents(N.getValueType())) { 3714 case UndefinedBooleanContent: 3715 return CVal[0]; 3716 case ZeroOrOneBooleanContent: 3717 return CVal.isOne(); 3718 case ZeroOrNegativeOneBooleanContent: 3719 return CVal.isAllOnes(); 3720 } 3721 3722 llvm_unreachable("Invalid boolean contents"); 3723 } 3724 3725 bool TargetLowering::isConstFalseVal(SDValue N) const { 3726 if (!N) 3727 return false; 3728 3729 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N); 3730 if (!CN) { 3731 const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N); 3732 if (!BV) 3733 return false; 3734 3735 // Only interested in constant splats, we don't care about undef 3736 // elements in identifying boolean constants and getConstantSplatNode 3737 // returns NULL if all ops are undef; 3738 CN = BV->getConstantSplatNode(); 3739 if (!CN) 3740 return false; 3741 } 3742 3743 if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent) 3744 return !CN->getAPIntValue()[0]; 3745 3746 return CN->isZero(); 3747 } 3748 3749 bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT, 3750 bool SExt) const { 3751 if (VT == MVT::i1) 3752 return N->isOne(); 3753 3754 TargetLowering::BooleanContent Cnt = getBooleanContents(VT); 3755 switch (Cnt) { 3756 case TargetLowering::ZeroOrOneBooleanContent: 3757 // An extended value of 1 is always true, unless its original type is i1, 3758 // in which case it will be sign extended to -1. 3759 return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1)); 3760 case TargetLowering::UndefinedBooleanContent: 3761 case TargetLowering::ZeroOrNegativeOneBooleanContent: 3762 return N->isAllOnes() && SExt; 3763 } 3764 llvm_unreachable("Unexpected enumeration."); 3765 } 3766 3767 /// This helper function of SimplifySetCC tries to optimize the comparison when 3768 /// either operand of the SetCC node is a bitwise-and instruction. 3769 SDValue TargetLowering::foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, 3770 ISD::CondCode Cond, const SDLoc &DL, 3771 DAGCombinerInfo &DCI) const { 3772 if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND) 3773 std::swap(N0, N1); 3774 3775 SelectionDAG &DAG = DCI.DAG; 3776 EVT OpVT = N0.getValueType(); 3777 if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() || 3778 (Cond != ISD::SETEQ && Cond != ISD::SETNE)) 3779 return SDValue(); 3780 3781 // (X & Y) != 0 --> zextOrTrunc(X & Y) 3782 // iff everything but LSB is known zero: 3783 if (Cond == ISD::SETNE && isNullConstant(N1) && 3784 (getBooleanContents(OpVT) == TargetLowering::UndefinedBooleanContent || 3785 getBooleanContents(OpVT) == TargetLowering::ZeroOrOneBooleanContent)) { 3786 unsigned NumEltBits = OpVT.getScalarSizeInBits(); 3787 APInt UpperBits = APInt::getHighBitsSet(NumEltBits, NumEltBits - 1); 3788 if (DAG.MaskedValueIsZero(N0, UpperBits)) 3789 return DAG.getBoolExtOrTrunc(N0, DL, VT, OpVT); 3790 } 3791 3792 // Try to eliminate a power-of-2 mask constant by converting to a signbit 3793 // test in a narrow type that we can truncate to with no cost. Examples: 3794 // (i32 X & 32768) == 0 --> (trunc X to i16) >= 0 3795 // (i32 X & 32768) != 0 --> (trunc X to i16) < 0 3796 // TODO: This conservatively checks for type legality on the source and 3797 // destination types. That may inhibit optimizations, but it also 3798 // allows setcc->shift transforms that may be more beneficial. 3799 auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3800 if (AndC && isNullConstant(N1) && AndC->getAPIntValue().isPowerOf2() && 3801 isTypeLegal(OpVT) && N0.hasOneUse()) { 3802 EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), 3803 AndC->getAPIntValue().getActiveBits()); 3804 if (isTruncateFree(OpVT, NarrowVT) && isTypeLegal(NarrowVT)) { 3805 SDValue Trunc = DAG.getZExtOrTrunc(N0.getOperand(0), DL, NarrowVT); 3806 SDValue Zero = DAG.getConstant(0, DL, NarrowVT); 3807 return DAG.getSetCC(DL, VT, Trunc, Zero, 3808 Cond == ISD::SETEQ ? ISD::SETGE : ISD::SETLT); 3809 } 3810 } 3811 3812 // Match these patterns in any of their permutations: 3813 // (X & Y) == Y 3814 // (X & Y) != Y 3815 SDValue X, Y; 3816 if (N0.getOperand(0) == N1) { 3817 X = N0.getOperand(1); 3818 Y = N0.getOperand(0); 3819 } else if (N0.getOperand(1) == N1) { 3820 X = N0.getOperand(0); 3821 Y = N0.getOperand(1); 3822 } else { 3823 return SDValue(); 3824 } 3825 3826 SDValue Zero = DAG.getConstant(0, DL, OpVT); 3827 if (DAG.isKnownToBeAPowerOfTwo(Y)) { 3828 // Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set. 3829 // Note that where Y is variable and is known to have at most one bit set 3830 // (for example, if it is Z & 1) we cannot do this; the expressions are not 3831 // equivalent when Y == 0. 3832 assert(OpVT.isInteger()); 3833 Cond = ISD::getSetCCInverse(Cond, OpVT); 3834 if (DCI.isBeforeLegalizeOps() || 3835 isCondCodeLegal(Cond, N0.getSimpleValueType())) 3836 return DAG.getSetCC(DL, VT, N0, Zero, Cond); 3837 } else if (N0.hasOneUse() && hasAndNotCompare(Y)) { 3838 // If the target supports an 'and-not' or 'and-complement' logic operation, 3839 // try to use that to make a comparison operation more efficient. 3840 // But don't do this transform if the mask is a single bit because there are 3841 // more efficient ways to deal with that case (for example, 'bt' on x86 or 3842 // 'rlwinm' on PPC). 3843 3844 // Bail out if the compare operand that we want to turn into a zero is 3845 // already a zero (otherwise, infinite loop). 3846 auto *YConst = dyn_cast<ConstantSDNode>(Y); 3847 if (YConst && YConst->isZero()) 3848 return SDValue(); 3849 3850 // Transform this into: ~X & Y == 0. 3851 SDValue NotX = DAG.getNOT(SDLoc(X), X, OpVT); 3852 SDValue NewAnd = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, NotX, Y); 3853 return DAG.getSetCC(DL, VT, NewAnd, Zero, Cond); 3854 } 3855 3856 return SDValue(); 3857 } 3858 3859 /// There are multiple IR patterns that could be checking whether certain 3860 /// truncation of a signed number would be lossy or not. The pattern which is 3861 /// best at IR level, may not lower optimally. Thus, we want to unfold it. 3862 /// We are looking for the following pattern: (KeptBits is a constant) 3863 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits) 3864 /// KeptBits won't be bitwidth(x), that will be constant-folded to true/false. 3865 /// KeptBits also can't be 1, that would have been folded to %x dstcond 0 3866 /// We will unfold it into the natural trunc+sext pattern: 3867 /// ((%x << C) a>> C) dstcond %x 3868 /// Where C = bitwidth(x) - KeptBits and C u< bitwidth(x) 3869 SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck( 3870 EVT SCCVT, SDValue N0, SDValue N1, ISD::CondCode Cond, DAGCombinerInfo &DCI, 3871 const SDLoc &DL) const { 3872 // We must be comparing with a constant. 3873 ConstantSDNode *C1; 3874 if (!(C1 = dyn_cast<ConstantSDNode>(N1))) 3875 return SDValue(); 3876 3877 // N0 should be: add %x, (1 << (KeptBits-1)) 3878 if (N0->getOpcode() != ISD::ADD) 3879 return SDValue(); 3880 3881 // And we must be 'add'ing a constant. 3882 ConstantSDNode *C01; 3883 if (!(C01 = dyn_cast<ConstantSDNode>(N0->getOperand(1)))) 3884 return SDValue(); 3885 3886 SDValue X = N0->getOperand(0); 3887 EVT XVT = X.getValueType(); 3888 3889 // Validate constants ... 3890 3891 APInt I1 = C1->getAPIntValue(); 3892 3893 ISD::CondCode NewCond; 3894 if (Cond == ISD::CondCode::SETULT) { 3895 NewCond = ISD::CondCode::SETEQ; 3896 } else if (Cond == ISD::CondCode::SETULE) { 3897 NewCond = ISD::CondCode::SETEQ; 3898 // But need to 'canonicalize' the constant. 3899 I1 += 1; 3900 } else if (Cond == ISD::CondCode::SETUGT) { 3901 NewCond = ISD::CondCode::SETNE; 3902 // But need to 'canonicalize' the constant. 3903 I1 += 1; 3904 } else if (Cond == ISD::CondCode::SETUGE) { 3905 NewCond = ISD::CondCode::SETNE; 3906 } else 3907 return SDValue(); 3908 3909 APInt I01 = C01->getAPIntValue(); 3910 3911 auto checkConstants = [&I1, &I01]() -> bool { 3912 // Both of them must be power-of-two, and the constant from setcc is bigger. 3913 return I1.ugt(I01) && I1.isPowerOf2() && I01.isPowerOf2(); 3914 }; 3915 3916 if (checkConstants()) { 3917 // Great, e.g. got icmp ult i16 (add i16 %x, 128), 256 3918 } else { 3919 // What if we invert constants? (and the target predicate) 3920 I1.negate(); 3921 I01.negate(); 3922 assert(XVT.isInteger()); 3923 NewCond = getSetCCInverse(NewCond, XVT); 3924 if (!checkConstants()) 3925 return SDValue(); 3926 // Great, e.g. got icmp uge i16 (add i16 %x, -128), -256 3927 } 3928 3929 // They are power-of-two, so which bit is set? 3930 const unsigned KeptBits = I1.logBase2(); 3931 const unsigned KeptBitsMinusOne = I01.logBase2(); 3932 3933 // Magic! 3934 if (KeptBits != (KeptBitsMinusOne + 1)) 3935 return SDValue(); 3936 assert(KeptBits > 0 && KeptBits < XVT.getSizeInBits() && "unreachable"); 3937 3938 // We don't want to do this in every single case. 3939 SelectionDAG &DAG = DCI.DAG; 3940 if (!DAG.getTargetLoweringInfo().shouldTransformSignedTruncationCheck( 3941 XVT, KeptBits)) 3942 return SDValue(); 3943 3944 const unsigned MaskedBits = XVT.getSizeInBits() - KeptBits; 3945 assert(MaskedBits > 0 && MaskedBits < XVT.getSizeInBits() && "unreachable"); 3946 3947 // Unfold into: ((%x << C) a>> C) cond %x 3948 // Where 'cond' will be either 'eq' or 'ne'. 3949 SDValue ShiftAmt = DAG.getConstant(MaskedBits, DL, XVT); 3950 SDValue T0 = DAG.getNode(ISD::SHL, DL, XVT, X, ShiftAmt); 3951 SDValue T1 = DAG.getNode(ISD::SRA, DL, XVT, T0, ShiftAmt); 3952 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, X, NewCond); 3953 3954 return T2; 3955 } 3956 3957 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 3958 SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift( 3959 EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond, 3960 DAGCombinerInfo &DCI, const SDLoc &DL) const { 3961 assert(isConstOrConstSplat(N1C) && isConstOrConstSplat(N1C)->isZero() && 3962 "Should be a comparison with 0."); 3963 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3964 "Valid only for [in]equality comparisons."); 3965 3966 unsigned NewShiftOpcode; 3967 SDValue X, C, Y; 3968 3969 SelectionDAG &DAG = DCI.DAG; 3970 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3971 3972 // Look for '(C l>>/<< Y)'. 3973 auto Match = [&NewShiftOpcode, &X, &C, &Y, &TLI, &DAG](SDValue V) { 3974 // The shift should be one-use. 3975 if (!V.hasOneUse()) 3976 return false; 3977 unsigned OldShiftOpcode = V.getOpcode(); 3978 switch (OldShiftOpcode) { 3979 case ISD::SHL: 3980 NewShiftOpcode = ISD::SRL; 3981 break; 3982 case ISD::SRL: 3983 NewShiftOpcode = ISD::SHL; 3984 break; 3985 default: 3986 return false; // must be a logical shift. 3987 } 3988 // We should be shifting a constant. 3989 // FIXME: best to use isConstantOrConstantVector(). 3990 C = V.getOperand(0); 3991 ConstantSDNode *CC = 3992 isConstOrConstSplat(C, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3993 if (!CC) 3994 return false; 3995 Y = V.getOperand(1); 3996 3997 ConstantSDNode *XC = 3998 isConstOrConstSplat(X, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3999 return TLI.shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( 4000 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG); 4001 }; 4002 4003 // LHS of comparison should be an one-use 'and'. 4004 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 4005 return SDValue(); 4006 4007 X = N0.getOperand(0); 4008 SDValue Mask = N0.getOperand(1); 4009 4010 // 'and' is commutative! 4011 if (!Match(Mask)) { 4012 std::swap(X, Mask); 4013 if (!Match(Mask)) 4014 return SDValue(); 4015 } 4016 4017 EVT VT = X.getValueType(); 4018 4019 // Produce: 4020 // ((X 'OppositeShiftOpcode' Y) & C) Cond 0 4021 SDValue T0 = DAG.getNode(NewShiftOpcode, DL, VT, X, Y); 4022 SDValue T1 = DAG.getNode(ISD::AND, DL, VT, T0, C); 4023 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, N1C, Cond); 4024 return T2; 4025 } 4026 4027 /// Try to fold an equality comparison with a {add/sub/xor} binary operation as 4028 /// the 1st operand (N0). Callers are expected to swap the N0/N1 parameters to 4029 /// handle the commuted versions of these patterns. 4030 SDValue TargetLowering::foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, 4031 ISD::CondCode Cond, const SDLoc &DL, 4032 DAGCombinerInfo &DCI) const { 4033 unsigned BOpcode = N0.getOpcode(); 4034 assert((BOpcode == ISD::ADD || BOpcode == ISD::SUB || BOpcode == ISD::XOR) && 4035 "Unexpected binop"); 4036 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && "Unexpected condcode"); 4037 4038 // (X + Y) == X --> Y == 0 4039 // (X - Y) == X --> Y == 0 4040 // (X ^ Y) == X --> Y == 0 4041 SelectionDAG &DAG = DCI.DAG; 4042 EVT OpVT = N0.getValueType(); 4043 SDValue X = N0.getOperand(0); 4044 SDValue Y = N0.getOperand(1); 4045 if (X == N1) 4046 return DAG.getSetCC(DL, VT, Y, DAG.getConstant(0, DL, OpVT), Cond); 4047 4048 if (Y != N1) 4049 return SDValue(); 4050 4051 // (X + Y) == Y --> X == 0 4052 // (X ^ Y) == Y --> X == 0 4053 if (BOpcode == ISD::ADD || BOpcode == ISD::XOR) 4054 return DAG.getSetCC(DL, VT, X, DAG.getConstant(0, DL, OpVT), Cond); 4055 4056 // The shift would not be valid if the operands are boolean (i1). 4057 if (!N0.hasOneUse() || OpVT.getScalarSizeInBits() == 1) 4058 return SDValue(); 4059 4060 // (X - Y) == Y --> X == Y << 1 4061 EVT ShiftVT = getShiftAmountTy(OpVT, DAG.getDataLayout(), 4062 !DCI.isBeforeLegalize()); 4063 SDValue One = DAG.getConstant(1, DL, ShiftVT); 4064 SDValue YShl1 = DAG.getNode(ISD::SHL, DL, N1.getValueType(), Y, One); 4065 if (!DCI.isCalledByLegalizer()) 4066 DCI.AddToWorklist(YShl1.getNode()); 4067 return DAG.getSetCC(DL, VT, X, YShl1, Cond); 4068 } 4069 4070 static SDValue simplifySetCCWithCTPOP(const TargetLowering &TLI, EVT VT, 4071 SDValue N0, const APInt &C1, 4072 ISD::CondCode Cond, const SDLoc &dl, 4073 SelectionDAG &DAG) { 4074 // Look through truncs that don't change the value of a ctpop. 4075 // FIXME: Add vector support? Need to be careful with setcc result type below. 4076 SDValue CTPOP = N0; 4077 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && !VT.isVector() && 4078 N0.getScalarValueSizeInBits() > Log2_32(N0.getOperand(0).getScalarValueSizeInBits())) 4079 CTPOP = N0.getOperand(0); 4080 4081 if (CTPOP.getOpcode() != ISD::CTPOP || !CTPOP.hasOneUse()) 4082 return SDValue(); 4083 4084 EVT CTVT = CTPOP.getValueType(); 4085 SDValue CTOp = CTPOP.getOperand(0); 4086 4087 // Expand a power-of-2-or-zero comparison based on ctpop: 4088 // (ctpop x) u< 2 -> (x & x-1) == 0 4089 // (ctpop x) u> 1 -> (x & x-1) != 0 4090 if (Cond == ISD::SETULT || Cond == ISD::SETUGT) { 4091 // Keep the CTPOP if it is a legal vector op. 4092 if (CTVT.isVector() && TLI.isOperationLegal(ISD::CTPOP, CTVT)) 4093 return SDValue(); 4094 4095 unsigned CostLimit = TLI.getCustomCtpopCost(CTVT, Cond); 4096 if (C1.ugt(CostLimit + (Cond == ISD::SETULT))) 4097 return SDValue(); 4098 if (C1 == 0 && (Cond == ISD::SETULT)) 4099 return SDValue(); // This is handled elsewhere. 4100 4101 unsigned Passes = C1.getLimitedValue() - (Cond == ISD::SETULT); 4102 4103 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 4104 SDValue Result = CTOp; 4105 for (unsigned i = 0; i < Passes; i++) { 4106 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, Result, NegOne); 4107 Result = DAG.getNode(ISD::AND, dl, CTVT, Result, Add); 4108 } 4109 ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE; 4110 return DAG.getSetCC(dl, VT, Result, DAG.getConstant(0, dl, CTVT), CC); 4111 } 4112 4113 // Expand a power-of-2 comparison based on ctpop: 4114 // (ctpop x) == 1 --> (x != 0) && ((x & x-1) == 0) 4115 // (ctpop x) != 1 --> (x == 0) || ((x & x-1) != 0) 4116 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && C1 == 1) { 4117 // Keep the CTPOP if it is legal. 4118 if (TLI.isOperationLegal(ISD::CTPOP, CTVT)) 4119 return SDValue(); 4120 4121 SDValue Zero = DAG.getConstant(0, dl, CTVT); 4122 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 4123 assert(CTVT.isInteger()); 4124 ISD::CondCode InvCond = ISD::getSetCCInverse(Cond, CTVT); 4125 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne); 4126 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add); 4127 SDValue RHS = DAG.getSetCC(dl, VT, And, Zero, Cond); 4128 // Its not uncommon for known-never-zero X to exist in (ctpop X) eq/ne 1, so 4129 // check before the emit a potentially unnecessary op. 4130 if (DAG.isKnownNeverZero(CTOp)) 4131 return RHS; 4132 SDValue LHS = DAG.getSetCC(dl, VT, CTOp, Zero, InvCond); 4133 unsigned LogicOpcode = Cond == ISD::SETEQ ? ISD::AND : ISD::OR; 4134 return DAG.getNode(LogicOpcode, dl, VT, LHS, RHS); 4135 } 4136 4137 return SDValue(); 4138 } 4139 4140 static SDValue foldSetCCWithRotate(EVT VT, SDValue N0, SDValue N1, 4141 ISD::CondCode Cond, const SDLoc &dl, 4142 SelectionDAG &DAG) { 4143 if (Cond != ISD::SETEQ && Cond != ISD::SETNE) 4144 return SDValue(); 4145 4146 auto *C1 = isConstOrConstSplat(N1, /* AllowUndefs */ true); 4147 if (!C1 || !(C1->isZero() || C1->isAllOnes())) 4148 return SDValue(); 4149 4150 auto getRotateSource = [](SDValue X) { 4151 if (X.getOpcode() == ISD::ROTL || X.getOpcode() == ISD::ROTR) 4152 return X.getOperand(0); 4153 return SDValue(); 4154 }; 4155 4156 // Peek through a rotated value compared against 0 or -1: 4157 // (rot X, Y) == 0/-1 --> X == 0/-1 4158 // (rot X, Y) != 0/-1 --> X != 0/-1 4159 if (SDValue R = getRotateSource(N0)) 4160 return DAG.getSetCC(dl, VT, R, N1, Cond); 4161 4162 // Peek through an 'or' of a rotated value compared against 0: 4163 // or (rot X, Y), Z ==/!= 0 --> (or X, Z) ==/!= 0 4164 // or Z, (rot X, Y) ==/!= 0 --> (or X, Z) ==/!= 0 4165 // 4166 // TODO: Add the 'and' with -1 sibling. 4167 // TODO: Recurse through a series of 'or' ops to find the rotate. 4168 EVT OpVT = N0.getValueType(); 4169 if (N0.hasOneUse() && N0.getOpcode() == ISD::OR && C1->isZero()) { 4170 if (SDValue R = getRotateSource(N0.getOperand(0))) { 4171 SDValue NewOr = DAG.getNode(ISD::OR, dl, OpVT, R, N0.getOperand(1)); 4172 return DAG.getSetCC(dl, VT, NewOr, N1, Cond); 4173 } 4174 if (SDValue R = getRotateSource(N0.getOperand(1))) { 4175 SDValue NewOr = DAG.getNode(ISD::OR, dl, OpVT, R, N0.getOperand(0)); 4176 return DAG.getSetCC(dl, VT, NewOr, N1, Cond); 4177 } 4178 } 4179 4180 return SDValue(); 4181 } 4182 4183 static SDValue foldSetCCWithFunnelShift(EVT VT, SDValue N0, SDValue N1, 4184 ISD::CondCode Cond, const SDLoc &dl, 4185 SelectionDAG &DAG) { 4186 // If we are testing for all-bits-clear, we might be able to do that with 4187 // less shifting since bit-order does not matter. 4188 if (Cond != ISD::SETEQ && Cond != ISD::SETNE) 4189 return SDValue(); 4190 4191 auto *C1 = isConstOrConstSplat(N1, /* AllowUndefs */ true); 4192 if (!C1 || !C1->isZero()) 4193 return SDValue(); 4194 4195 if (!N0.hasOneUse() || 4196 (N0.getOpcode() != ISD::FSHL && N0.getOpcode() != ISD::FSHR)) 4197 return SDValue(); 4198 4199 unsigned BitWidth = N0.getScalarValueSizeInBits(); 4200 auto *ShAmtC = isConstOrConstSplat(N0.getOperand(2)); 4201 if (!ShAmtC || ShAmtC->getAPIntValue().uge(BitWidth)) 4202 return SDValue(); 4203 4204 // Canonicalize fshr as fshl to reduce pattern-matching. 4205 unsigned ShAmt = ShAmtC->getZExtValue(); 4206 if (N0.getOpcode() == ISD::FSHR) 4207 ShAmt = BitWidth - ShAmt; 4208 4209 // Match an 'or' with a specific operand 'Other' in either commuted variant. 4210 SDValue X, Y; 4211 auto matchOr = [&X, &Y](SDValue Or, SDValue Other) { 4212 if (Or.getOpcode() != ISD::OR || !Or.hasOneUse()) 4213 return false; 4214 if (Or.getOperand(0) == Other) { 4215 X = Or.getOperand(0); 4216 Y = Or.getOperand(1); 4217 return true; 4218 } 4219 if (Or.getOperand(1) == Other) { 4220 X = Or.getOperand(1); 4221 Y = Or.getOperand(0); 4222 return true; 4223 } 4224 return false; 4225 }; 4226 4227 EVT OpVT = N0.getValueType(); 4228 EVT ShAmtVT = N0.getOperand(2).getValueType(); 4229 SDValue F0 = N0.getOperand(0); 4230 SDValue F1 = N0.getOperand(1); 4231 if (matchOr(F0, F1)) { 4232 // fshl (or X, Y), X, C ==/!= 0 --> or (shl Y, C), X ==/!= 0 4233 SDValue NewShAmt = DAG.getConstant(ShAmt, dl, ShAmtVT); 4234 SDValue Shift = DAG.getNode(ISD::SHL, dl, OpVT, Y, NewShAmt); 4235 SDValue NewOr = DAG.getNode(ISD::OR, dl, OpVT, Shift, X); 4236 return DAG.getSetCC(dl, VT, NewOr, N1, Cond); 4237 } 4238 if (matchOr(F1, F0)) { 4239 // fshl X, (or X, Y), C ==/!= 0 --> or (srl Y, BW-C), X ==/!= 0 4240 SDValue NewShAmt = DAG.getConstant(BitWidth - ShAmt, dl, ShAmtVT); 4241 SDValue Shift = DAG.getNode(ISD::SRL, dl, OpVT, Y, NewShAmt); 4242 SDValue NewOr = DAG.getNode(ISD::OR, dl, OpVT, Shift, X); 4243 return DAG.getSetCC(dl, VT, NewOr, N1, Cond); 4244 } 4245 4246 return SDValue(); 4247 } 4248 4249 /// Try to simplify a setcc built with the specified operands and cc. If it is 4250 /// unable to simplify it, return a null SDValue. 4251 SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 4252 ISD::CondCode Cond, bool foldBooleans, 4253 DAGCombinerInfo &DCI, 4254 const SDLoc &dl) const { 4255 SelectionDAG &DAG = DCI.DAG; 4256 const DataLayout &Layout = DAG.getDataLayout(); 4257 EVT OpVT = N0.getValueType(); 4258 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 4259 4260 // Constant fold or commute setcc. 4261 if (SDValue Fold = DAG.FoldSetCC(VT, N0, N1, Cond, dl)) 4262 return Fold; 4263 4264 bool N0ConstOrSplat = 4265 isConstOrConstSplat(N0, /*AllowUndefs*/ false, /*AllowTruncate*/ true); 4266 bool N1ConstOrSplat = 4267 isConstOrConstSplat(N1, /*AllowUndefs*/ false, /*AllowTruncate*/ true); 4268 4269 // Canonicalize toward having the constant on the RHS. 4270 // TODO: Handle non-splat vector constants. All undef causes trouble. 4271 // FIXME: We can't yet fold constant scalable vector splats, so avoid an 4272 // infinite loop here when we encounter one. 4273 ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond); 4274 if (N0ConstOrSplat && !N1ConstOrSplat && 4275 (DCI.isBeforeLegalizeOps() || 4276 isCondCodeLegal(SwappedCC, N0.getSimpleValueType()))) 4277 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 4278 4279 // If we have a subtract with the same 2 non-constant operands as this setcc 4280 // -- but in reverse order -- then try to commute the operands of this setcc 4281 // to match. A matching pair of setcc (cmp) and sub may be combined into 1 4282 // instruction on some targets. 4283 if (!N0ConstOrSplat && !N1ConstOrSplat && 4284 (DCI.isBeforeLegalizeOps() || 4285 isCondCodeLegal(SwappedCC, N0.getSimpleValueType())) && 4286 DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N1, N0}) && 4287 !DAG.doesNodeExist(ISD::SUB, DAG.getVTList(OpVT), {N0, N1})) 4288 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 4289 4290 if (SDValue V = foldSetCCWithRotate(VT, N0, N1, Cond, dl, DAG)) 4291 return V; 4292 4293 if (SDValue V = foldSetCCWithFunnelShift(VT, N0, N1, Cond, dl, DAG)) 4294 return V; 4295 4296 if (auto *N1C = isConstOrConstSplat(N1)) { 4297 const APInt &C1 = N1C->getAPIntValue(); 4298 4299 // Optimize some CTPOP cases. 4300 if (SDValue V = simplifySetCCWithCTPOP(*this, VT, N0, C1, Cond, dl, DAG)) 4301 return V; 4302 4303 // For equality to 0 of a no-wrap multiply, decompose and test each op: 4304 // X * Y == 0 --> (X == 0) || (Y == 0) 4305 // X * Y != 0 --> (X != 0) && (Y != 0) 4306 // TODO: This bails out if minsize is set, but if the target doesn't have a 4307 // single instruction multiply for this type, it would likely be 4308 // smaller to decompose. 4309 if (C1.isZero() && (Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4310 N0.getOpcode() == ISD::MUL && N0.hasOneUse() && 4311 (N0->getFlags().hasNoUnsignedWrap() || 4312 N0->getFlags().hasNoSignedWrap()) && 4313 !Attr.hasFnAttr(Attribute::MinSize)) { 4314 SDValue IsXZero = DAG.getSetCC(dl, VT, N0.getOperand(0), N1, Cond); 4315 SDValue IsYZero = DAG.getSetCC(dl, VT, N0.getOperand(1), N1, Cond); 4316 unsigned LogicOp = Cond == ISD::SETEQ ? ISD::OR : ISD::AND; 4317 return DAG.getNode(LogicOp, dl, VT, IsXZero, IsYZero); 4318 } 4319 4320 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an 4321 // equality comparison, then we're just comparing whether X itself is 4322 // zero. 4323 if (N0.getOpcode() == ISD::SRL && (C1.isZero() || C1.isOne()) && 4324 N0.getOperand(0).getOpcode() == ISD::CTLZ && 4325 llvm::has_single_bit<uint32_t>(N0.getScalarValueSizeInBits())) { 4326 if (ConstantSDNode *ShAmt = isConstOrConstSplat(N0.getOperand(1))) { 4327 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4328 ShAmt->getAPIntValue() == Log2_32(N0.getScalarValueSizeInBits())) { 4329 if ((C1 == 0) == (Cond == ISD::SETEQ)) { 4330 // (srl (ctlz x), 5) == 0 -> X != 0 4331 // (srl (ctlz x), 5) != 1 -> X != 0 4332 Cond = ISD::SETNE; 4333 } else { 4334 // (srl (ctlz x), 5) != 0 -> X == 0 4335 // (srl (ctlz x), 5) == 1 -> X == 0 4336 Cond = ISD::SETEQ; 4337 } 4338 SDValue Zero = DAG.getConstant(0, dl, N0.getValueType()); 4339 return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0), Zero, 4340 Cond); 4341 } 4342 } 4343 } 4344 } 4345 4346 // FIXME: Support vectors. 4347 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 4348 const APInt &C1 = N1C->getAPIntValue(); 4349 4350 // (zext x) == C --> x == (trunc C) 4351 // (sext x) == C --> x == (trunc C) 4352 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4353 DCI.isBeforeLegalize() && N0->hasOneUse()) { 4354 unsigned MinBits = N0.getValueSizeInBits(); 4355 SDValue PreExt; 4356 bool Signed = false; 4357 if (N0->getOpcode() == ISD::ZERO_EXTEND) { 4358 // ZExt 4359 MinBits = N0->getOperand(0).getValueSizeInBits(); 4360 PreExt = N0->getOperand(0); 4361 } else if (N0->getOpcode() == ISD::AND) { 4362 // DAGCombine turns costly ZExts into ANDs 4363 if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1))) 4364 if ((C->getAPIntValue()+1).isPowerOf2()) { 4365 MinBits = C->getAPIntValue().countr_one(); 4366 PreExt = N0->getOperand(0); 4367 } 4368 } else if (N0->getOpcode() == ISD::SIGN_EXTEND) { 4369 // SExt 4370 MinBits = N0->getOperand(0).getValueSizeInBits(); 4371 PreExt = N0->getOperand(0); 4372 Signed = true; 4373 } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) { 4374 // ZEXTLOAD / SEXTLOAD 4375 if (LN0->getExtensionType() == ISD::ZEXTLOAD) { 4376 MinBits = LN0->getMemoryVT().getSizeInBits(); 4377 PreExt = N0; 4378 } else if (LN0->getExtensionType() == ISD::SEXTLOAD) { 4379 Signed = true; 4380 MinBits = LN0->getMemoryVT().getSizeInBits(); 4381 PreExt = N0; 4382 } 4383 } 4384 4385 // Figure out how many bits we need to preserve this constant. 4386 unsigned ReqdBits = Signed ? C1.getSignificantBits() : C1.getActiveBits(); 4387 4388 // Make sure we're not losing bits from the constant. 4389 if (MinBits > 0 && 4390 MinBits < C1.getBitWidth() && 4391 MinBits >= ReqdBits) { 4392 EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits); 4393 if (isTypeDesirableForOp(ISD::SETCC, MinVT)) { 4394 // Will get folded away. 4395 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt); 4396 if (MinBits == 1 && C1 == 1) 4397 // Invert the condition. 4398 return DAG.getSetCC(dl, VT, Trunc, DAG.getConstant(0, dl, MVT::i1), 4399 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 4400 SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT); 4401 return DAG.getSetCC(dl, VT, Trunc, C, Cond); 4402 } 4403 4404 // If truncating the setcc operands is not desirable, we can still 4405 // simplify the expression in some cases: 4406 // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc) 4407 // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc)) 4408 // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc)) 4409 // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc) 4410 // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc)) 4411 // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc) 4412 SDValue TopSetCC = N0->getOperand(0); 4413 unsigned N0Opc = N0->getOpcode(); 4414 bool SExt = (N0Opc == ISD::SIGN_EXTEND); 4415 if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 && 4416 TopSetCC.getOpcode() == ISD::SETCC && 4417 (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) && 4418 (isConstFalseVal(N1) || 4419 isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) { 4420 4421 bool Inverse = (N1C->isZero() && Cond == ISD::SETEQ) || 4422 (!N1C->isZero() && Cond == ISD::SETNE); 4423 4424 if (!Inverse) 4425 return TopSetCC; 4426 4427 ISD::CondCode InvCond = ISD::getSetCCInverse( 4428 cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(), 4429 TopSetCC.getOperand(0).getValueType()); 4430 return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0), 4431 TopSetCC.getOperand(1), 4432 InvCond); 4433 } 4434 } 4435 } 4436 4437 // If the LHS is '(and load, const)', the RHS is 0, the test is for 4438 // equality or unsigned, and all 1 bits of the const are in the same 4439 // partial word, see if we can shorten the load. 4440 if (DCI.isBeforeLegalize() && 4441 !ISD::isSignedIntSetCC(Cond) && 4442 N0.getOpcode() == ISD::AND && C1 == 0 && 4443 N0.getNode()->hasOneUse() && 4444 isa<LoadSDNode>(N0.getOperand(0)) && 4445 N0.getOperand(0).getNode()->hasOneUse() && 4446 isa<ConstantSDNode>(N0.getOperand(1))) { 4447 LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0)); 4448 APInt bestMask; 4449 unsigned bestWidth = 0, bestOffset = 0; 4450 if (Lod->isSimple() && Lod->isUnindexed()) { 4451 unsigned origWidth = N0.getValueSizeInBits(); 4452 unsigned maskWidth = origWidth; 4453 // We can narrow (e.g.) 16-bit extending loads on 32-bit target to 4454 // 8 bits, but have to be careful... 4455 if (Lod->getExtensionType() != ISD::NON_EXTLOAD) 4456 origWidth = Lod->getMemoryVT().getSizeInBits(); 4457 const APInt &Mask = N0.getConstantOperandAPInt(1); 4458 for (unsigned width = origWidth / 2; width>=8; width /= 2) { 4459 APInt newMask = APInt::getLowBitsSet(maskWidth, width); 4460 for (unsigned offset=0; offset<origWidth/width; offset++) { 4461 if (Mask.isSubsetOf(newMask)) { 4462 if (Layout.isLittleEndian()) 4463 bestOffset = (uint64_t)offset * (width/8); 4464 else 4465 bestOffset = (origWidth/width - offset - 1) * (width/8); 4466 bestMask = Mask.lshr(offset * (width/8) * 8); 4467 bestWidth = width; 4468 break; 4469 } 4470 newMask <<= width; 4471 } 4472 } 4473 } 4474 if (bestWidth) { 4475 EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth); 4476 if (newVT.isRound() && 4477 shouldReduceLoadWidth(Lod, ISD::NON_EXTLOAD, newVT)) { 4478 SDValue Ptr = Lod->getBasePtr(); 4479 if (bestOffset != 0) 4480 Ptr = 4481 DAG.getMemBasePlusOffset(Ptr, TypeSize::Fixed(bestOffset), dl); 4482 SDValue NewLoad = 4483 DAG.getLoad(newVT, dl, Lod->getChain(), Ptr, 4484 Lod->getPointerInfo().getWithOffset(bestOffset), 4485 Lod->getOriginalAlign()); 4486 return DAG.getSetCC(dl, VT, 4487 DAG.getNode(ISD::AND, dl, newVT, NewLoad, 4488 DAG.getConstant(bestMask.trunc(bestWidth), 4489 dl, newVT)), 4490 DAG.getConstant(0LL, dl, newVT), Cond); 4491 } 4492 } 4493 } 4494 4495 // If the LHS is a ZERO_EXTEND, perform the comparison on the input. 4496 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 4497 unsigned InSize = N0.getOperand(0).getValueSizeInBits(); 4498 4499 // If the comparison constant has bits in the upper part, the 4500 // zero-extended value could never match. 4501 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(), 4502 C1.getBitWidth() - InSize))) { 4503 switch (Cond) { 4504 case ISD::SETUGT: 4505 case ISD::SETUGE: 4506 case ISD::SETEQ: 4507 return DAG.getConstant(0, dl, VT); 4508 case ISD::SETULT: 4509 case ISD::SETULE: 4510 case ISD::SETNE: 4511 return DAG.getConstant(1, dl, VT); 4512 case ISD::SETGT: 4513 case ISD::SETGE: 4514 // True if the sign bit of C1 is set. 4515 return DAG.getConstant(C1.isNegative(), dl, VT); 4516 case ISD::SETLT: 4517 case ISD::SETLE: 4518 // True if the sign bit of C1 isn't set. 4519 return DAG.getConstant(C1.isNonNegative(), dl, VT); 4520 default: 4521 break; 4522 } 4523 } 4524 4525 // Otherwise, we can perform the comparison with the low bits. 4526 switch (Cond) { 4527 case ISD::SETEQ: 4528 case ISD::SETNE: 4529 case ISD::SETUGT: 4530 case ISD::SETUGE: 4531 case ISD::SETULT: 4532 case ISD::SETULE: { 4533 EVT newVT = N0.getOperand(0).getValueType(); 4534 if (DCI.isBeforeLegalizeOps() || 4535 (isOperationLegal(ISD::SETCC, newVT) && 4536 isCondCodeLegal(Cond, newVT.getSimpleVT()))) { 4537 EVT NewSetCCVT = getSetCCResultType(Layout, *DAG.getContext(), newVT); 4538 SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT); 4539 4540 SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0), 4541 NewConst, Cond); 4542 return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType()); 4543 } 4544 break; 4545 } 4546 default: 4547 break; // todo, be more careful with signed comparisons 4548 } 4549 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 4550 (Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4551 !isSExtCheaperThanZExt(cast<VTSDNode>(N0.getOperand(1))->getVT(), 4552 OpVT)) { 4553 EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); 4554 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); 4555 EVT ExtDstTy = N0.getValueType(); 4556 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits(); 4557 4558 // If the constant doesn't fit into the number of bits for the source of 4559 // the sign extension, it is impossible for both sides to be equal. 4560 if (C1.getSignificantBits() > ExtSrcTyBits) 4561 return DAG.getBoolConstant(Cond == ISD::SETNE, dl, VT, OpVT); 4562 4563 assert(ExtDstTy == N0.getOperand(0).getValueType() && 4564 ExtDstTy != ExtSrcTy && "Unexpected types!"); 4565 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits); 4566 SDValue ZextOp = DAG.getNode(ISD::AND, dl, ExtDstTy, N0.getOperand(0), 4567 DAG.getConstant(Imm, dl, ExtDstTy)); 4568 if (!DCI.isCalledByLegalizer()) 4569 DCI.AddToWorklist(ZextOp.getNode()); 4570 // Otherwise, make this a use of a zext. 4571 return DAG.getSetCC(dl, VT, ZextOp, 4572 DAG.getConstant(C1 & Imm, dl, ExtDstTy), Cond); 4573 } else if ((N1C->isZero() || N1C->isOne()) && 4574 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 4575 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC 4576 if (N0.getOpcode() == ISD::SETCC && 4577 isTypeLegal(VT) && VT.bitsLE(N0.getValueType()) && 4578 (N0.getValueType() == MVT::i1 || 4579 getBooleanContents(N0.getOperand(0).getValueType()) == 4580 ZeroOrOneBooleanContent)) { 4581 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (!N1C->isOne()); 4582 if (TrueWhenTrue) 4583 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0); 4584 // Invert the condition. 4585 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 4586 CC = ISD::getSetCCInverse(CC, N0.getOperand(0).getValueType()); 4587 if (DCI.isBeforeLegalizeOps() || 4588 isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType())) 4589 return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC); 4590 } 4591 4592 if ((N0.getOpcode() == ISD::XOR || 4593 (N0.getOpcode() == ISD::AND && 4594 N0.getOperand(0).getOpcode() == ISD::XOR && 4595 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) && 4596 isOneConstant(N0.getOperand(1))) { 4597 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We 4598 // can only do this if the top bits are known zero. 4599 unsigned BitWidth = N0.getValueSizeInBits(); 4600 if (DAG.MaskedValueIsZero(N0, 4601 APInt::getHighBitsSet(BitWidth, 4602 BitWidth-1))) { 4603 // Okay, get the un-inverted input value. 4604 SDValue Val; 4605 if (N0.getOpcode() == ISD::XOR) { 4606 Val = N0.getOperand(0); 4607 } else { 4608 assert(N0.getOpcode() == ISD::AND && 4609 N0.getOperand(0).getOpcode() == ISD::XOR); 4610 // ((X^1)&1)^1 -> X & 1 4611 Val = DAG.getNode(ISD::AND, dl, N0.getValueType(), 4612 N0.getOperand(0).getOperand(0), 4613 N0.getOperand(1)); 4614 } 4615 4616 return DAG.getSetCC(dl, VT, Val, N1, 4617 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 4618 } 4619 } else if (N1C->isOne()) { 4620 SDValue Op0 = N0; 4621 if (Op0.getOpcode() == ISD::TRUNCATE) 4622 Op0 = Op0.getOperand(0); 4623 4624 if ((Op0.getOpcode() == ISD::XOR) && 4625 Op0.getOperand(0).getOpcode() == ISD::SETCC && 4626 Op0.getOperand(1).getOpcode() == ISD::SETCC) { 4627 SDValue XorLHS = Op0.getOperand(0); 4628 SDValue XorRHS = Op0.getOperand(1); 4629 // Ensure that the input setccs return an i1 type or 0/1 value. 4630 if (Op0.getValueType() == MVT::i1 || 4631 (getBooleanContents(XorLHS.getOperand(0).getValueType()) == 4632 ZeroOrOneBooleanContent && 4633 getBooleanContents(XorRHS.getOperand(0).getValueType()) == 4634 ZeroOrOneBooleanContent)) { 4635 // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc) 4636 Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ; 4637 return DAG.getSetCC(dl, VT, XorLHS, XorRHS, Cond); 4638 } 4639 } 4640 if (Op0.getOpcode() == ISD::AND && isOneConstant(Op0.getOperand(1))) { 4641 // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0. 4642 if (Op0.getValueType().bitsGT(VT)) 4643 Op0 = DAG.getNode(ISD::AND, dl, VT, 4644 DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)), 4645 DAG.getConstant(1, dl, VT)); 4646 else if (Op0.getValueType().bitsLT(VT)) 4647 Op0 = DAG.getNode(ISD::AND, dl, VT, 4648 DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)), 4649 DAG.getConstant(1, dl, VT)); 4650 4651 return DAG.getSetCC(dl, VT, Op0, 4652 DAG.getConstant(0, dl, Op0.getValueType()), 4653 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 4654 } 4655 if (Op0.getOpcode() == ISD::AssertZext && 4656 cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1) 4657 return DAG.getSetCC(dl, VT, Op0, 4658 DAG.getConstant(0, dl, Op0.getValueType()), 4659 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 4660 } 4661 } 4662 4663 // Given: 4664 // icmp eq/ne (urem %x, %y), 0 4665 // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem': 4666 // icmp eq/ne %x, 0 4667 if (N0.getOpcode() == ISD::UREM && N1C->isZero() && 4668 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 4669 KnownBits XKnown = DAG.computeKnownBits(N0.getOperand(0)); 4670 KnownBits YKnown = DAG.computeKnownBits(N0.getOperand(1)); 4671 if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2) 4672 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1, Cond); 4673 } 4674 4675 // Fold set_cc seteq (ashr X, BW-1), -1 -> set_cc setlt X, 0 4676 // and set_cc setne (ashr X, BW-1), -1 -> set_cc setge X, 0 4677 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4678 N0.getOpcode() == ISD::SRA && isa<ConstantSDNode>(N0.getOperand(1)) && 4679 N0.getConstantOperandAPInt(1) == OpVT.getScalarSizeInBits() - 1 && 4680 N1C && N1C->isAllOnes()) { 4681 return DAG.getSetCC(dl, VT, N0.getOperand(0), 4682 DAG.getConstant(0, dl, OpVT), 4683 Cond == ISD::SETEQ ? ISD::SETLT : ISD::SETGE); 4684 } 4685 4686 if (SDValue V = 4687 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1, Cond, DCI, dl)) 4688 return V; 4689 } 4690 4691 // These simplifications apply to splat vectors as well. 4692 // TODO: Handle more splat vector cases. 4693 if (auto *N1C = isConstOrConstSplat(N1)) { 4694 const APInt &C1 = N1C->getAPIntValue(); 4695 4696 APInt MinVal, MaxVal; 4697 unsigned OperandBitSize = N1C->getValueType(0).getScalarSizeInBits(); 4698 if (ISD::isSignedIntSetCC(Cond)) { 4699 MinVal = APInt::getSignedMinValue(OperandBitSize); 4700 MaxVal = APInt::getSignedMaxValue(OperandBitSize); 4701 } else { 4702 MinVal = APInt::getMinValue(OperandBitSize); 4703 MaxVal = APInt::getMaxValue(OperandBitSize); 4704 } 4705 4706 // Canonicalize GE/LE comparisons to use GT/LT comparisons. 4707 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) { 4708 // X >= MIN --> true 4709 if (C1 == MinVal) 4710 return DAG.getBoolConstant(true, dl, VT, OpVT); 4711 4712 if (!VT.isVector()) { // TODO: Support this for vectors. 4713 // X >= C0 --> X > (C0 - 1) 4714 APInt C = C1 - 1; 4715 ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT; 4716 if ((DCI.isBeforeLegalizeOps() || 4717 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 4718 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 4719 isLegalICmpImmediate(C.getSExtValue())))) { 4720 return DAG.getSetCC(dl, VT, N0, 4721 DAG.getConstant(C, dl, N1.getValueType()), 4722 NewCC); 4723 } 4724 } 4725 } 4726 4727 if (Cond == ISD::SETLE || Cond == ISD::SETULE) { 4728 // X <= MAX --> true 4729 if (C1 == MaxVal) 4730 return DAG.getBoolConstant(true, dl, VT, OpVT); 4731 4732 // X <= C0 --> X < (C0 + 1) 4733 if (!VT.isVector()) { // TODO: Support this for vectors. 4734 APInt C = C1 + 1; 4735 ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT; 4736 if ((DCI.isBeforeLegalizeOps() || 4737 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 4738 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 4739 isLegalICmpImmediate(C.getSExtValue())))) { 4740 return DAG.getSetCC(dl, VT, N0, 4741 DAG.getConstant(C, dl, N1.getValueType()), 4742 NewCC); 4743 } 4744 } 4745 } 4746 4747 if (Cond == ISD::SETLT || Cond == ISD::SETULT) { 4748 if (C1 == MinVal) 4749 return DAG.getBoolConstant(false, dl, VT, OpVT); // X < MIN --> false 4750 4751 // TODO: Support this for vectors after legalize ops. 4752 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 4753 // Canonicalize setlt X, Max --> setne X, Max 4754 if (C1 == MaxVal) 4755 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 4756 4757 // If we have setult X, 1, turn it into seteq X, 0 4758 if (C1 == MinVal+1) 4759 return DAG.getSetCC(dl, VT, N0, 4760 DAG.getConstant(MinVal, dl, N0.getValueType()), 4761 ISD::SETEQ); 4762 } 4763 } 4764 4765 if (Cond == ISD::SETGT || Cond == ISD::SETUGT) { 4766 if (C1 == MaxVal) 4767 return DAG.getBoolConstant(false, dl, VT, OpVT); // X > MAX --> false 4768 4769 // TODO: Support this for vectors after legalize ops. 4770 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 4771 // Canonicalize setgt X, Min --> setne X, Min 4772 if (C1 == MinVal) 4773 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 4774 4775 // If we have setugt X, Max-1, turn it into seteq X, Max 4776 if (C1 == MaxVal-1) 4777 return DAG.getSetCC(dl, VT, N0, 4778 DAG.getConstant(MaxVal, dl, N0.getValueType()), 4779 ISD::SETEQ); 4780 } 4781 } 4782 4783 if (Cond == ISD::SETEQ || Cond == ISD::SETNE) { 4784 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 4785 if (C1.isZero()) 4786 if (SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift( 4787 VT, N0, N1, Cond, DCI, dl)) 4788 return CC; 4789 4790 // For all/any comparisons, replace or(x,shl(y,bw/2)) with and/or(x,y). 4791 // For example, when high 32-bits of i64 X are known clear: 4792 // all bits clear: (X | (Y<<32)) == 0 --> (X | Y) == 0 4793 // all bits set: (X | (Y<<32)) == -1 --> (X & Y) == -1 4794 bool CmpZero = N1C->isZero(); 4795 bool CmpNegOne = N1C->isAllOnes(); 4796 if ((CmpZero || CmpNegOne) && N0.hasOneUse()) { 4797 // Match or(lo,shl(hi,bw/2)) pattern. 4798 auto IsConcat = [&](SDValue V, SDValue &Lo, SDValue &Hi) { 4799 unsigned EltBits = V.getScalarValueSizeInBits(); 4800 if (V.getOpcode() != ISD::OR || (EltBits % 2) != 0) 4801 return false; 4802 SDValue LHS = V.getOperand(0); 4803 SDValue RHS = V.getOperand(1); 4804 APInt HiBits = APInt::getHighBitsSet(EltBits, EltBits / 2); 4805 // Unshifted element must have zero upperbits. 4806 if (RHS.getOpcode() == ISD::SHL && 4807 isa<ConstantSDNode>(RHS.getOperand(1)) && 4808 RHS.getConstantOperandAPInt(1) == (EltBits / 2) && 4809 DAG.MaskedValueIsZero(LHS, HiBits)) { 4810 Lo = LHS; 4811 Hi = RHS.getOperand(0); 4812 return true; 4813 } 4814 if (LHS.getOpcode() == ISD::SHL && 4815 isa<ConstantSDNode>(LHS.getOperand(1)) && 4816 LHS.getConstantOperandAPInt(1) == (EltBits / 2) && 4817 DAG.MaskedValueIsZero(RHS, HiBits)) { 4818 Lo = RHS; 4819 Hi = LHS.getOperand(0); 4820 return true; 4821 } 4822 return false; 4823 }; 4824 4825 auto MergeConcat = [&](SDValue Lo, SDValue Hi) { 4826 unsigned EltBits = N0.getScalarValueSizeInBits(); 4827 unsigned HalfBits = EltBits / 2; 4828 APInt HiBits = APInt::getHighBitsSet(EltBits, HalfBits); 4829 SDValue LoBits = DAG.getConstant(~HiBits, dl, OpVT); 4830 SDValue HiMask = DAG.getNode(ISD::AND, dl, OpVT, Hi, LoBits); 4831 SDValue NewN0 = 4832 DAG.getNode(CmpZero ? ISD::OR : ISD::AND, dl, OpVT, Lo, HiMask); 4833 SDValue NewN1 = CmpZero ? DAG.getConstant(0, dl, OpVT) : LoBits; 4834 return DAG.getSetCC(dl, VT, NewN0, NewN1, Cond); 4835 }; 4836 4837 SDValue Lo, Hi; 4838 if (IsConcat(N0, Lo, Hi)) 4839 return MergeConcat(Lo, Hi); 4840 4841 if (N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR) { 4842 SDValue Lo0, Lo1, Hi0, Hi1; 4843 if (IsConcat(N0.getOperand(0), Lo0, Hi0) && 4844 IsConcat(N0.getOperand(1), Lo1, Hi1)) { 4845 return MergeConcat(DAG.getNode(N0.getOpcode(), dl, OpVT, Lo0, Lo1), 4846 DAG.getNode(N0.getOpcode(), dl, OpVT, Hi0, Hi1)); 4847 } 4848 } 4849 } 4850 } 4851 4852 // If we have "setcc X, C0", check to see if we can shrink the immediate 4853 // by changing cc. 4854 // TODO: Support this for vectors after legalize ops. 4855 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 4856 // SETUGT X, SINTMAX -> SETLT X, 0 4857 // SETUGE X, SINTMIN -> SETLT X, 0 4858 if ((Cond == ISD::SETUGT && C1.isMaxSignedValue()) || 4859 (Cond == ISD::SETUGE && C1.isMinSignedValue())) 4860 return DAG.getSetCC(dl, VT, N0, 4861 DAG.getConstant(0, dl, N1.getValueType()), 4862 ISD::SETLT); 4863 4864 // SETULT X, SINTMIN -> SETGT X, -1 4865 // SETULE X, SINTMAX -> SETGT X, -1 4866 if ((Cond == ISD::SETULT && C1.isMinSignedValue()) || 4867 (Cond == ISD::SETULE && C1.isMaxSignedValue())) 4868 return DAG.getSetCC(dl, VT, N0, 4869 DAG.getAllOnesConstant(dl, N1.getValueType()), 4870 ISD::SETGT); 4871 } 4872 } 4873 4874 // Back to non-vector simplifications. 4875 // TODO: Can we do these for vector splats? 4876 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 4877 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4878 const APInt &C1 = N1C->getAPIntValue(); 4879 EVT ShValTy = N0.getValueType(); 4880 4881 // Fold bit comparisons when we can. This will result in an 4882 // incorrect value when boolean false is negative one, unless 4883 // the bitsize is 1 in which case the false value is the same 4884 // in practice regardless of the representation. 4885 if ((VT.getSizeInBits() == 1 || 4886 getBooleanContents(N0.getValueType()) == ZeroOrOneBooleanContent) && 4887 (Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4888 (VT == ShValTy || (isTypeLegal(VT) && VT.bitsLE(ShValTy))) && 4889 N0.getOpcode() == ISD::AND) { 4890 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4891 EVT ShiftTy = 4892 getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 4893 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3 4894 // Perform the xform if the AND RHS is a single bit. 4895 unsigned ShCt = AndRHS->getAPIntValue().logBase2(); 4896 if (AndRHS->getAPIntValue().isPowerOf2() && 4897 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 4898 return DAG.getNode(ISD::TRUNCATE, dl, VT, 4899 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4900 DAG.getConstant(ShCt, dl, ShiftTy))); 4901 } 4902 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) { 4903 // (X & 8) == 8 --> (X & 8) >> 3 4904 // Perform the xform if C1 is a single bit. 4905 unsigned ShCt = C1.logBase2(); 4906 if (C1.isPowerOf2() && 4907 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 4908 return DAG.getNode(ISD::TRUNCATE, dl, VT, 4909 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4910 DAG.getConstant(ShCt, dl, ShiftTy))); 4911 } 4912 } 4913 } 4914 } 4915 4916 if (C1.getSignificantBits() <= 64 && 4917 !isLegalICmpImmediate(C1.getSExtValue())) { 4918 EVT ShiftTy = getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 4919 // (X & -256) == 256 -> (X >> 8) == 1 4920 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4921 N0.getOpcode() == ISD::AND && N0.hasOneUse()) { 4922 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4923 const APInt &AndRHSC = AndRHS->getAPIntValue(); 4924 if (AndRHSC.isNegatedPowerOf2() && (AndRHSC & C1) == C1) { 4925 unsigned ShiftBits = AndRHSC.countr_zero(); 4926 if (!TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 4927 SDValue Shift = 4928 DAG.getNode(ISD::SRL, dl, ShValTy, N0.getOperand(0), 4929 DAG.getConstant(ShiftBits, dl, ShiftTy)); 4930 SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, ShValTy); 4931 return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond); 4932 } 4933 } 4934 } 4935 } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE || 4936 Cond == ISD::SETULE || Cond == ISD::SETUGT) { 4937 bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT); 4938 // X < 0x100000000 -> (X >> 32) < 1 4939 // X >= 0x100000000 -> (X >> 32) >= 1 4940 // X <= 0x0ffffffff -> (X >> 32) < 1 4941 // X > 0x0ffffffff -> (X >> 32) >= 1 4942 unsigned ShiftBits; 4943 APInt NewC = C1; 4944 ISD::CondCode NewCond = Cond; 4945 if (AdjOne) { 4946 ShiftBits = C1.countr_one(); 4947 NewC = NewC + 1; 4948 NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 4949 } else { 4950 ShiftBits = C1.countr_zero(); 4951 } 4952 NewC.lshrInPlace(ShiftBits); 4953 if (ShiftBits && NewC.getSignificantBits() <= 64 && 4954 isLegalICmpImmediate(NewC.getSExtValue()) && 4955 !TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 4956 SDValue Shift = DAG.getNode(ISD::SRL, dl, ShValTy, N0, 4957 DAG.getConstant(ShiftBits, dl, ShiftTy)); 4958 SDValue CmpRHS = DAG.getConstant(NewC, dl, ShValTy); 4959 return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond); 4960 } 4961 } 4962 } 4963 } 4964 4965 if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) { 4966 auto *CFP = cast<ConstantFPSDNode>(N1); 4967 assert(!CFP->getValueAPF().isNaN() && "Unexpected NaN value"); 4968 4969 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the 4970 // constant if knowing that the operand is non-nan is enough. We prefer to 4971 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to 4972 // materialize 0.0. 4973 if (Cond == ISD::SETO || Cond == ISD::SETUO) 4974 return DAG.getSetCC(dl, VT, N0, N0, Cond); 4975 4976 // setcc (fneg x), C -> setcc swap(pred) x, -C 4977 if (N0.getOpcode() == ISD::FNEG) { 4978 ISD::CondCode SwapCond = ISD::getSetCCSwappedOperands(Cond); 4979 if (DCI.isBeforeLegalizeOps() || 4980 isCondCodeLegal(SwapCond, N0.getSimpleValueType())) { 4981 SDValue NegN1 = DAG.getNode(ISD::FNEG, dl, N0.getValueType(), N1); 4982 return DAG.getSetCC(dl, VT, N0.getOperand(0), NegN1, SwapCond); 4983 } 4984 } 4985 4986 // If the condition is not legal, see if we can find an equivalent one 4987 // which is legal. 4988 if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) { 4989 // If the comparison was an awkward floating-point == or != and one of 4990 // the comparison operands is infinity or negative infinity, convert the 4991 // condition to a less-awkward <= or >=. 4992 if (CFP->getValueAPF().isInfinity()) { 4993 bool IsNegInf = CFP->getValueAPF().isNegative(); 4994 ISD::CondCode NewCond = ISD::SETCC_INVALID; 4995 switch (Cond) { 4996 case ISD::SETOEQ: NewCond = IsNegInf ? ISD::SETOLE : ISD::SETOGE; break; 4997 case ISD::SETUEQ: NewCond = IsNegInf ? ISD::SETULE : ISD::SETUGE; break; 4998 case ISD::SETUNE: NewCond = IsNegInf ? ISD::SETUGT : ISD::SETULT; break; 4999 case ISD::SETONE: NewCond = IsNegInf ? ISD::SETOGT : ISD::SETOLT; break; 5000 default: break; 5001 } 5002 if (NewCond != ISD::SETCC_INVALID && 5003 isCondCodeLegal(NewCond, N0.getSimpleValueType())) 5004 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 5005 } 5006 } 5007 } 5008 5009 if (N0 == N1) { 5010 // The sext(setcc()) => setcc() optimization relies on the appropriate 5011 // constant being emitted. 5012 assert(!N0.getValueType().isInteger() && 5013 "Integer types should be handled by FoldSetCC"); 5014 5015 bool EqTrue = ISD::isTrueWhenEqual(Cond); 5016 unsigned UOF = ISD::getUnorderedFlavor(Cond); 5017 if (UOF == 2) // FP operators that are undefined on NaNs. 5018 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 5019 if (UOF == unsigned(EqTrue)) 5020 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 5021 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO 5022 // if it is not already. 5023 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO; 5024 if (NewCond != Cond && 5025 (DCI.isBeforeLegalizeOps() || 5026 isCondCodeLegal(NewCond, N0.getSimpleValueType()))) 5027 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 5028 } 5029 5030 // ~X > ~Y --> Y > X 5031 // ~X < ~Y --> Y < X 5032 // ~X < C --> X > ~C 5033 // ~X > C --> X < ~C 5034 if ((isSignedIntSetCC(Cond) || isUnsignedIntSetCC(Cond)) && 5035 N0.getValueType().isInteger()) { 5036 if (isBitwiseNot(N0)) { 5037 if (isBitwiseNot(N1)) 5038 return DAG.getSetCC(dl, VT, N1.getOperand(0), N0.getOperand(0), Cond); 5039 5040 if (DAG.isConstantIntBuildVectorOrConstantInt(N1)) { 5041 SDValue Not = DAG.getNOT(dl, N1, OpVT); 5042 return DAG.getSetCC(dl, VT, Not, N0.getOperand(0), Cond); 5043 } 5044 } 5045 } 5046 5047 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 5048 N0.getValueType().isInteger()) { 5049 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || 5050 N0.getOpcode() == ISD::XOR) { 5051 // Simplify (X+Y) == (X+Z) --> Y == Z 5052 if (N0.getOpcode() == N1.getOpcode()) { 5053 if (N0.getOperand(0) == N1.getOperand(0)) 5054 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond); 5055 if (N0.getOperand(1) == N1.getOperand(1)) 5056 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond); 5057 if (isCommutativeBinOp(N0.getOpcode())) { 5058 // If X op Y == Y op X, try other combinations. 5059 if (N0.getOperand(0) == N1.getOperand(1)) 5060 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0), 5061 Cond); 5062 if (N0.getOperand(1) == N1.getOperand(0)) 5063 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1), 5064 Cond); 5065 } 5066 } 5067 5068 // If RHS is a legal immediate value for a compare instruction, we need 5069 // to be careful about increasing register pressure needlessly. 5070 bool LegalRHSImm = false; 5071 5072 if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) { 5073 if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 5074 // Turn (X+C1) == C2 --> X == C2-C1 5075 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) 5076 return DAG.getSetCC( 5077 dl, VT, N0.getOperand(0), 5078 DAG.getConstant(RHSC->getAPIntValue() - LHSR->getAPIntValue(), 5079 dl, N0.getValueType()), 5080 Cond); 5081 5082 // Turn (X^C1) == C2 --> X == C1^C2 5083 if (N0.getOpcode() == ISD::XOR && N0.getNode()->hasOneUse()) 5084 return DAG.getSetCC( 5085 dl, VT, N0.getOperand(0), 5086 DAG.getConstant(LHSR->getAPIntValue() ^ RHSC->getAPIntValue(), 5087 dl, N0.getValueType()), 5088 Cond); 5089 } 5090 5091 // Turn (C1-X) == C2 --> X == C1-C2 5092 if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) 5093 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) 5094 return DAG.getSetCC( 5095 dl, VT, N0.getOperand(1), 5096 DAG.getConstant(SUBC->getAPIntValue() - RHSC->getAPIntValue(), 5097 dl, N0.getValueType()), 5098 Cond); 5099 5100 // Could RHSC fold directly into a compare? 5101 if (RHSC->getValueType(0).getSizeInBits() <= 64) 5102 LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue()); 5103 } 5104 5105 // (X+Y) == X --> Y == 0 and similar folds. 5106 // Don't do this if X is an immediate that can fold into a cmp 5107 // instruction and X+Y has other uses. It could be an induction variable 5108 // chain, and the transform would increase register pressure. 5109 if (!LegalRHSImm || N0.hasOneUse()) 5110 if (SDValue V = foldSetCCWithBinOp(VT, N0, N1, Cond, dl, DCI)) 5111 return V; 5112 } 5113 5114 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB || 5115 N1.getOpcode() == ISD::XOR) 5116 if (SDValue V = foldSetCCWithBinOp(VT, N1, N0, Cond, dl, DCI)) 5117 return V; 5118 5119 if (SDValue V = foldSetCCWithAnd(VT, N0, N1, Cond, dl, DCI)) 5120 return V; 5121 } 5122 5123 // Fold remainder of division by a constant. 5124 if ((N0.getOpcode() == ISD::UREM || N0.getOpcode() == ISD::SREM) && 5125 N0.hasOneUse() && (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 5126 // When division is cheap or optimizing for minimum size, 5127 // fall through to DIVREM creation by skipping this fold. 5128 if (!isIntDivCheap(VT, Attr) && !Attr.hasFnAttr(Attribute::MinSize)) { 5129 if (N0.getOpcode() == ISD::UREM) { 5130 if (SDValue Folded = buildUREMEqFold(VT, N0, N1, Cond, DCI, dl)) 5131 return Folded; 5132 } else if (N0.getOpcode() == ISD::SREM) { 5133 if (SDValue Folded = buildSREMEqFold(VT, N0, N1, Cond, DCI, dl)) 5134 return Folded; 5135 } 5136 } 5137 } 5138 5139 // Fold away ALL boolean setcc's. 5140 if (N0.getValueType().getScalarType() == MVT::i1 && foldBooleans) { 5141 SDValue Temp; 5142 switch (Cond) { 5143 default: llvm_unreachable("Unknown integer setcc!"); 5144 case ISD::SETEQ: // X == Y -> ~(X^Y) 5145 Temp = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 5146 N0 = DAG.getNOT(dl, Temp, OpVT); 5147 if (!DCI.isCalledByLegalizer()) 5148 DCI.AddToWorklist(Temp.getNode()); 5149 break; 5150 case ISD::SETNE: // X != Y --> (X^Y) 5151 N0 = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 5152 break; 5153 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y 5154 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y 5155 Temp = DAG.getNOT(dl, N0, OpVT); 5156 N0 = DAG.getNode(ISD::AND, dl, OpVT, N1, Temp); 5157 if (!DCI.isCalledByLegalizer()) 5158 DCI.AddToWorklist(Temp.getNode()); 5159 break; 5160 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X 5161 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X 5162 Temp = DAG.getNOT(dl, N1, OpVT); 5163 N0 = DAG.getNode(ISD::AND, dl, OpVT, N0, Temp); 5164 if (!DCI.isCalledByLegalizer()) 5165 DCI.AddToWorklist(Temp.getNode()); 5166 break; 5167 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y 5168 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y 5169 Temp = DAG.getNOT(dl, N0, OpVT); 5170 N0 = DAG.getNode(ISD::OR, dl, OpVT, N1, Temp); 5171 if (!DCI.isCalledByLegalizer()) 5172 DCI.AddToWorklist(Temp.getNode()); 5173 break; 5174 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X 5175 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X 5176 Temp = DAG.getNOT(dl, N1, OpVT); 5177 N0 = DAG.getNode(ISD::OR, dl, OpVT, N0, Temp); 5178 break; 5179 } 5180 if (VT.getScalarType() != MVT::i1) { 5181 if (!DCI.isCalledByLegalizer()) 5182 DCI.AddToWorklist(N0.getNode()); 5183 // FIXME: If running after legalize, we probably can't do this. 5184 ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(OpVT)); 5185 N0 = DAG.getNode(ExtendCode, dl, VT, N0); 5186 } 5187 return N0; 5188 } 5189 5190 // Could not fold it. 5191 return SDValue(); 5192 } 5193 5194 /// Returns true (and the GlobalValue and the offset) if the node is a 5195 /// GlobalAddress + offset. 5196 bool TargetLowering::isGAPlusOffset(SDNode *WN, const GlobalValue *&GA, 5197 int64_t &Offset) const { 5198 5199 SDNode *N = unwrapAddress(SDValue(WN, 0)).getNode(); 5200 5201 if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) { 5202 GA = GASD->getGlobal(); 5203 Offset += GASD->getOffset(); 5204 return true; 5205 } 5206 5207 if (N->getOpcode() == ISD::ADD) { 5208 SDValue N1 = N->getOperand(0); 5209 SDValue N2 = N->getOperand(1); 5210 if (isGAPlusOffset(N1.getNode(), GA, Offset)) { 5211 if (auto *V = dyn_cast<ConstantSDNode>(N2)) { 5212 Offset += V->getSExtValue(); 5213 return true; 5214 } 5215 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) { 5216 if (auto *V = dyn_cast<ConstantSDNode>(N1)) { 5217 Offset += V->getSExtValue(); 5218 return true; 5219 } 5220 } 5221 } 5222 5223 return false; 5224 } 5225 5226 SDValue TargetLowering::PerformDAGCombine(SDNode *N, 5227 DAGCombinerInfo &DCI) const { 5228 // Default implementation: no optimization. 5229 return SDValue(); 5230 } 5231 5232 //===----------------------------------------------------------------------===// 5233 // Inline Assembler Implementation Methods 5234 //===----------------------------------------------------------------------===// 5235 5236 TargetLowering::ConstraintType 5237 TargetLowering::getConstraintType(StringRef Constraint) const { 5238 unsigned S = Constraint.size(); 5239 5240 if (S == 1) { 5241 switch (Constraint[0]) { 5242 default: break; 5243 case 'r': 5244 return C_RegisterClass; 5245 case 'm': // memory 5246 case 'o': // offsetable 5247 case 'V': // not offsetable 5248 return C_Memory; 5249 case 'p': // Address. 5250 return C_Address; 5251 case 'n': // Simple Integer 5252 case 'E': // Floating Point Constant 5253 case 'F': // Floating Point Constant 5254 return C_Immediate; 5255 case 'i': // Simple Integer or Relocatable Constant 5256 case 's': // Relocatable Constant 5257 case 'X': // Allow ANY value. 5258 case 'I': // Target registers. 5259 case 'J': 5260 case 'K': 5261 case 'L': 5262 case 'M': 5263 case 'N': 5264 case 'O': 5265 case 'P': 5266 case '<': 5267 case '>': 5268 return C_Other; 5269 } 5270 } 5271 5272 if (S > 1 && Constraint[0] == '{' && Constraint[S - 1] == '}') { 5273 if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}" 5274 return C_Memory; 5275 return C_Register; 5276 } 5277 return C_Unknown; 5278 } 5279 5280 /// Try to replace an X constraint, which matches anything, with another that 5281 /// has more specific requirements based on the type of the corresponding 5282 /// operand. 5283 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const { 5284 if (ConstraintVT.isInteger()) 5285 return "r"; 5286 if (ConstraintVT.isFloatingPoint()) 5287 return "f"; // works for many targets 5288 return nullptr; 5289 } 5290 5291 SDValue TargetLowering::LowerAsmOutputForConstraint( 5292 SDValue &Chain, SDValue &Glue, const SDLoc &DL, 5293 const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const { 5294 return SDValue(); 5295 } 5296 5297 /// Lower the specified operand into the Ops vector. 5298 /// If it is invalid, don't add anything to Ops. 5299 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 5300 std::string &Constraint, 5301 std::vector<SDValue> &Ops, 5302 SelectionDAG &DAG) const { 5303 5304 if (Constraint.length() > 1) return; 5305 5306 char ConstraintLetter = Constraint[0]; 5307 switch (ConstraintLetter) { 5308 default: break; 5309 case 'X': // Allows any operand 5310 case 'i': // Simple Integer or Relocatable Constant 5311 case 'n': // Simple Integer 5312 case 's': { // Relocatable Constant 5313 5314 ConstantSDNode *C; 5315 uint64_t Offset = 0; 5316 5317 // Match (GA) or (C) or (GA+C) or (GA-C) or ((GA+C)+C) or (((GA+C)+C)+C), 5318 // etc., since getelementpointer is variadic. We can't use 5319 // SelectionDAG::FoldSymbolOffset because it expects the GA to be accessible 5320 // while in this case the GA may be furthest from the root node which is 5321 // likely an ISD::ADD. 5322 while (true) { 5323 if ((C = dyn_cast<ConstantSDNode>(Op)) && ConstraintLetter != 's') { 5324 // gcc prints these as sign extended. Sign extend value to 64 bits 5325 // now; without this it would get ZExt'd later in 5326 // ScheduleDAGSDNodes::EmitNode, which is very generic. 5327 bool IsBool = C->getConstantIntValue()->getBitWidth() == 1; 5328 BooleanContent BCont = getBooleanContents(MVT::i64); 5329 ISD::NodeType ExtOpc = 5330 IsBool ? getExtendForContent(BCont) : ISD::SIGN_EXTEND; 5331 int64_t ExtVal = 5332 ExtOpc == ISD::ZERO_EXTEND ? C->getZExtValue() : C->getSExtValue(); 5333 Ops.push_back( 5334 DAG.getTargetConstant(Offset + ExtVal, SDLoc(C), MVT::i64)); 5335 return; 5336 } 5337 if (ConstraintLetter != 'n') { 5338 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) { 5339 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), 5340 GA->getValueType(0), 5341 Offset + GA->getOffset())); 5342 return; 5343 } 5344 if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) { 5345 Ops.push_back(DAG.getTargetBlockAddress( 5346 BA->getBlockAddress(), BA->getValueType(0), 5347 Offset + BA->getOffset(), BA->getTargetFlags())); 5348 return; 5349 } 5350 if (isa<BasicBlockSDNode>(Op)) { 5351 Ops.push_back(Op); 5352 return; 5353 } 5354 } 5355 const unsigned OpCode = Op.getOpcode(); 5356 if (OpCode == ISD::ADD || OpCode == ISD::SUB) { 5357 if ((C = dyn_cast<ConstantSDNode>(Op.getOperand(0)))) 5358 Op = Op.getOperand(1); 5359 // Subtraction is not commutative. 5360 else if (OpCode == ISD::ADD && 5361 (C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))) 5362 Op = Op.getOperand(0); 5363 else 5364 return; 5365 Offset += (OpCode == ISD::ADD ? 1 : -1) * C->getSExtValue(); 5366 continue; 5367 } 5368 return; 5369 } 5370 break; 5371 } 5372 } 5373 } 5374 5375 void TargetLowering::CollectTargetIntrinsicOperands( 5376 const CallInst &I, SmallVectorImpl<SDValue> &Ops, SelectionDAG &DAG) const { 5377 } 5378 5379 std::pair<unsigned, const TargetRegisterClass *> 5380 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI, 5381 StringRef Constraint, 5382 MVT VT) const { 5383 if (Constraint.empty() || Constraint[0] != '{') 5384 return std::make_pair(0u, static_cast<TargetRegisterClass *>(nullptr)); 5385 assert(*(Constraint.end() - 1) == '}' && "Not a brace enclosed constraint?"); 5386 5387 // Remove the braces from around the name. 5388 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2); 5389 5390 std::pair<unsigned, const TargetRegisterClass *> R = 5391 std::make_pair(0u, static_cast<const TargetRegisterClass *>(nullptr)); 5392 5393 // Figure out which register class contains this reg. 5394 for (const TargetRegisterClass *RC : RI->regclasses()) { 5395 // If none of the value types for this register class are valid, we 5396 // can't use it. For example, 64-bit reg classes on 32-bit targets. 5397 if (!isLegalRC(*RI, *RC)) 5398 continue; 5399 5400 for (const MCPhysReg &PR : *RC) { 5401 if (RegName.equals_insensitive(RI->getRegAsmName(PR))) { 5402 std::pair<unsigned, const TargetRegisterClass *> S = 5403 std::make_pair(PR, RC); 5404 5405 // If this register class has the requested value type, return it, 5406 // otherwise keep searching and return the first class found 5407 // if no other is found which explicitly has the requested type. 5408 if (RI->isTypeLegalForClass(*RC, VT)) 5409 return S; 5410 if (!R.second) 5411 R = S; 5412 } 5413 } 5414 } 5415 5416 return R; 5417 } 5418 5419 //===----------------------------------------------------------------------===// 5420 // Constraint Selection. 5421 5422 /// Return true of this is an input operand that is a matching constraint like 5423 /// "4". 5424 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const { 5425 assert(!ConstraintCode.empty() && "No known constraint!"); 5426 return isdigit(static_cast<unsigned char>(ConstraintCode[0])); 5427 } 5428 5429 /// If this is an input matching constraint, this method returns the output 5430 /// operand it matches. 5431 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const { 5432 assert(!ConstraintCode.empty() && "No known constraint!"); 5433 return atoi(ConstraintCode.c_str()); 5434 } 5435 5436 /// Split up the constraint string from the inline assembly value into the 5437 /// specific constraints and their prefixes, and also tie in the associated 5438 /// operand values. 5439 /// If this returns an empty vector, and if the constraint string itself 5440 /// isn't empty, there was an error parsing. 5441 TargetLowering::AsmOperandInfoVector 5442 TargetLowering::ParseConstraints(const DataLayout &DL, 5443 const TargetRegisterInfo *TRI, 5444 const CallBase &Call) const { 5445 /// Information about all of the constraints. 5446 AsmOperandInfoVector ConstraintOperands; 5447 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand()); 5448 unsigned maCount = 0; // Largest number of multiple alternative constraints. 5449 5450 // Do a prepass over the constraints, canonicalizing them, and building up the 5451 // ConstraintOperands list. 5452 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 5453 unsigned ResNo = 0; // ResNo - The result number of the next output. 5454 unsigned LabelNo = 0; // LabelNo - CallBr indirect dest number. 5455 5456 for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) { 5457 ConstraintOperands.emplace_back(std::move(CI)); 5458 AsmOperandInfo &OpInfo = ConstraintOperands.back(); 5459 5460 // Update multiple alternative constraint count. 5461 if (OpInfo.multipleAlternatives.size() > maCount) 5462 maCount = OpInfo.multipleAlternatives.size(); 5463 5464 OpInfo.ConstraintVT = MVT::Other; 5465 5466 // Compute the value type for each operand. 5467 switch (OpInfo.Type) { 5468 case InlineAsm::isOutput: 5469 // Indirect outputs just consume an argument. 5470 if (OpInfo.isIndirect) { 5471 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo); 5472 break; 5473 } 5474 5475 // The return value of the call is this value. As such, there is no 5476 // corresponding argument. 5477 assert(!Call.getType()->isVoidTy() && "Bad inline asm!"); 5478 if (StructType *STy = dyn_cast<StructType>(Call.getType())) { 5479 OpInfo.ConstraintVT = 5480 getSimpleValueType(DL, STy->getElementType(ResNo)); 5481 } else { 5482 assert(ResNo == 0 && "Asm only has one result!"); 5483 OpInfo.ConstraintVT = 5484 getAsmOperandValueType(DL, Call.getType()).getSimpleVT(); 5485 } 5486 ++ResNo; 5487 break; 5488 case InlineAsm::isInput: 5489 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo); 5490 break; 5491 case InlineAsm::isLabel: 5492 OpInfo.CallOperandVal = cast<CallBrInst>(&Call)->getIndirectDest(LabelNo); 5493 ++LabelNo; 5494 continue; 5495 case InlineAsm::isClobber: 5496 // Nothing to do. 5497 break; 5498 } 5499 5500 if (OpInfo.CallOperandVal) { 5501 llvm::Type *OpTy = OpInfo.CallOperandVal->getType(); 5502 if (OpInfo.isIndirect) { 5503 OpTy = Call.getParamElementType(ArgNo); 5504 assert(OpTy && "Indirect operand must have elementtype attribute"); 5505 } 5506 5507 // Look for vector wrapped in a struct. e.g. { <16 x i8> }. 5508 if (StructType *STy = dyn_cast<StructType>(OpTy)) 5509 if (STy->getNumElements() == 1) 5510 OpTy = STy->getElementType(0); 5511 5512 // If OpTy is not a single value, it may be a struct/union that we 5513 // can tile with integers. 5514 if (!OpTy->isSingleValueType() && OpTy->isSized()) { 5515 unsigned BitSize = DL.getTypeSizeInBits(OpTy); 5516 switch (BitSize) { 5517 default: break; 5518 case 1: 5519 case 8: 5520 case 16: 5521 case 32: 5522 case 64: 5523 case 128: 5524 OpTy = IntegerType::get(OpTy->getContext(), BitSize); 5525 break; 5526 } 5527 } 5528 5529 EVT VT = getAsmOperandValueType(DL, OpTy, true); 5530 OpInfo.ConstraintVT = VT.isSimple() ? VT.getSimpleVT() : MVT::Other; 5531 ArgNo++; 5532 } 5533 } 5534 5535 // If we have multiple alternative constraints, select the best alternative. 5536 if (!ConstraintOperands.empty()) { 5537 if (maCount) { 5538 unsigned bestMAIndex = 0; 5539 int bestWeight = -1; 5540 // weight: -1 = invalid match, and 0 = so-so match to 5 = good match. 5541 int weight = -1; 5542 unsigned maIndex; 5543 // Compute the sums of the weights for each alternative, keeping track 5544 // of the best (highest weight) one so far. 5545 for (maIndex = 0; maIndex < maCount; ++maIndex) { 5546 int weightSum = 0; 5547 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 5548 cIndex != eIndex; ++cIndex) { 5549 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 5550 if (OpInfo.Type == InlineAsm::isClobber) 5551 continue; 5552 5553 // If this is an output operand with a matching input operand, 5554 // look up the matching input. If their types mismatch, e.g. one 5555 // is an integer, the other is floating point, or their sizes are 5556 // different, flag it as an maCantMatch. 5557 if (OpInfo.hasMatchingInput()) { 5558 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 5559 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 5560 if ((OpInfo.ConstraintVT.isInteger() != 5561 Input.ConstraintVT.isInteger()) || 5562 (OpInfo.ConstraintVT.getSizeInBits() != 5563 Input.ConstraintVT.getSizeInBits())) { 5564 weightSum = -1; // Can't match. 5565 break; 5566 } 5567 } 5568 } 5569 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex); 5570 if (weight == -1) { 5571 weightSum = -1; 5572 break; 5573 } 5574 weightSum += weight; 5575 } 5576 // Update best. 5577 if (weightSum > bestWeight) { 5578 bestWeight = weightSum; 5579 bestMAIndex = maIndex; 5580 } 5581 } 5582 5583 // Now select chosen alternative in each constraint. 5584 for (AsmOperandInfo &cInfo : ConstraintOperands) 5585 if (cInfo.Type != InlineAsm::isClobber) 5586 cInfo.selectAlternative(bestMAIndex); 5587 } 5588 } 5589 5590 // Check and hook up tied operands, choose constraint code to use. 5591 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 5592 cIndex != eIndex; ++cIndex) { 5593 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 5594 5595 // If this is an output operand with a matching input operand, look up the 5596 // matching input. If their types mismatch, e.g. one is an integer, the 5597 // other is floating point, or their sizes are different, flag it as an 5598 // error. 5599 if (OpInfo.hasMatchingInput()) { 5600 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 5601 5602 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 5603 std::pair<unsigned, const TargetRegisterClass *> MatchRC = 5604 getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode, 5605 OpInfo.ConstraintVT); 5606 std::pair<unsigned, const TargetRegisterClass *> InputRC = 5607 getRegForInlineAsmConstraint(TRI, Input.ConstraintCode, 5608 Input.ConstraintVT); 5609 if ((OpInfo.ConstraintVT.isInteger() != 5610 Input.ConstraintVT.isInteger()) || 5611 (MatchRC.second != InputRC.second)) { 5612 report_fatal_error("Unsupported asm: input constraint" 5613 " with a matching output constraint of" 5614 " incompatible type!"); 5615 } 5616 } 5617 } 5618 } 5619 5620 return ConstraintOperands; 5621 } 5622 5623 /// Return an integer indicating how general CT is. 5624 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { 5625 switch (CT) { 5626 case TargetLowering::C_Immediate: 5627 case TargetLowering::C_Other: 5628 case TargetLowering::C_Unknown: 5629 return 0; 5630 case TargetLowering::C_Register: 5631 return 1; 5632 case TargetLowering::C_RegisterClass: 5633 return 2; 5634 case TargetLowering::C_Memory: 5635 case TargetLowering::C_Address: 5636 return 3; 5637 } 5638 llvm_unreachable("Invalid constraint type"); 5639 } 5640 5641 /// Examine constraint type and operand type and determine a weight value. 5642 /// This object must already have been set up with the operand type 5643 /// and the current alternative constraint selected. 5644 TargetLowering::ConstraintWeight 5645 TargetLowering::getMultipleConstraintMatchWeight( 5646 AsmOperandInfo &info, int maIndex) const { 5647 InlineAsm::ConstraintCodeVector *rCodes; 5648 if (maIndex >= (int)info.multipleAlternatives.size()) 5649 rCodes = &info.Codes; 5650 else 5651 rCodes = &info.multipleAlternatives[maIndex].Codes; 5652 ConstraintWeight BestWeight = CW_Invalid; 5653 5654 // Loop over the options, keeping track of the most general one. 5655 for (const std::string &rCode : *rCodes) { 5656 ConstraintWeight weight = 5657 getSingleConstraintMatchWeight(info, rCode.c_str()); 5658 if (weight > BestWeight) 5659 BestWeight = weight; 5660 } 5661 5662 return BestWeight; 5663 } 5664 5665 /// Examine constraint type and operand type and determine a weight value. 5666 /// This object must already have been set up with the operand type 5667 /// and the current alternative constraint selected. 5668 TargetLowering::ConstraintWeight 5669 TargetLowering::getSingleConstraintMatchWeight( 5670 AsmOperandInfo &info, const char *constraint) const { 5671 ConstraintWeight weight = CW_Invalid; 5672 Value *CallOperandVal = info.CallOperandVal; 5673 // If we don't have a value, we can't do a match, 5674 // but allow it at the lowest weight. 5675 if (!CallOperandVal) 5676 return CW_Default; 5677 // Look at the constraint type. 5678 switch (*constraint) { 5679 case 'i': // immediate integer. 5680 case 'n': // immediate integer with a known value. 5681 if (isa<ConstantInt>(CallOperandVal)) 5682 weight = CW_Constant; 5683 break; 5684 case 's': // non-explicit intregal immediate. 5685 if (isa<GlobalValue>(CallOperandVal)) 5686 weight = CW_Constant; 5687 break; 5688 case 'E': // immediate float if host format. 5689 case 'F': // immediate float. 5690 if (isa<ConstantFP>(CallOperandVal)) 5691 weight = CW_Constant; 5692 break; 5693 case '<': // memory operand with autodecrement. 5694 case '>': // memory operand with autoincrement. 5695 case 'm': // memory operand. 5696 case 'o': // offsettable memory operand 5697 case 'V': // non-offsettable memory operand 5698 weight = CW_Memory; 5699 break; 5700 case 'r': // general register. 5701 case 'g': // general register, memory operand or immediate integer. 5702 // note: Clang converts "g" to "imr". 5703 if (CallOperandVal->getType()->isIntegerTy()) 5704 weight = CW_Register; 5705 break; 5706 case 'X': // any operand. 5707 default: 5708 weight = CW_Default; 5709 break; 5710 } 5711 return weight; 5712 } 5713 5714 /// If there are multiple different constraints that we could pick for this 5715 /// operand (e.g. "imr") try to pick the 'best' one. 5716 /// This is somewhat tricky: constraints fall into four classes: 5717 /// Other -> immediates and magic values 5718 /// Register -> one specific register 5719 /// RegisterClass -> a group of regs 5720 /// Memory -> memory 5721 /// Ideally, we would pick the most specific constraint possible: if we have 5722 /// something that fits into a register, we would pick it. The problem here 5723 /// is that if we have something that could either be in a register or in 5724 /// memory that use of the register could cause selection of *other* 5725 /// operands to fail: they might only succeed if we pick memory. Because of 5726 /// this the heuristic we use is: 5727 /// 5728 /// 1) If there is an 'other' constraint, and if the operand is valid for 5729 /// that constraint, use it. This makes us take advantage of 'i' 5730 /// constraints when available. 5731 /// 2) Otherwise, pick the most general constraint present. This prefers 5732 /// 'm' over 'r', for example. 5733 /// 5734 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, 5735 const TargetLowering &TLI, 5736 SDValue Op, SelectionDAG *DAG) { 5737 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); 5738 unsigned BestIdx = 0; 5739 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; 5740 int BestGenerality = -1; 5741 5742 // Loop over the options, keeping track of the most general one. 5743 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { 5744 TargetLowering::ConstraintType CType = 5745 TLI.getConstraintType(OpInfo.Codes[i]); 5746 5747 // Indirect 'other' or 'immediate' constraints are not allowed. 5748 if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory || 5749 CType == TargetLowering::C_Register || 5750 CType == TargetLowering::C_RegisterClass)) 5751 continue; 5752 5753 // If this is an 'other' or 'immediate' constraint, see if the operand is 5754 // valid for it. For example, on X86 we might have an 'rI' constraint. If 5755 // the operand is an integer in the range [0..31] we want to use I (saving a 5756 // load of a register), otherwise we must use 'r'. 5757 if ((CType == TargetLowering::C_Other || 5758 CType == TargetLowering::C_Immediate) && Op.getNode()) { 5759 assert(OpInfo.Codes[i].size() == 1 && 5760 "Unhandled multi-letter 'other' constraint"); 5761 std::vector<SDValue> ResultOps; 5762 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i], 5763 ResultOps, *DAG); 5764 if (!ResultOps.empty()) { 5765 BestType = CType; 5766 BestIdx = i; 5767 break; 5768 } 5769 } 5770 5771 // Things with matching constraints can only be registers, per gcc 5772 // documentation. This mainly affects "g" constraints. 5773 if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput()) 5774 continue; 5775 5776 // This constraint letter is more general than the previous one, use it. 5777 int Generality = getConstraintGenerality(CType); 5778 if (Generality > BestGenerality) { 5779 BestType = CType; 5780 BestIdx = i; 5781 BestGenerality = Generality; 5782 } 5783 } 5784 5785 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; 5786 OpInfo.ConstraintType = BestType; 5787 } 5788 5789 /// Determines the constraint code and constraint type to use for the specific 5790 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType. 5791 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, 5792 SDValue Op, 5793 SelectionDAG *DAG) const { 5794 assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); 5795 5796 // Single-letter constraints ('r') are very common. 5797 if (OpInfo.Codes.size() == 1) { 5798 OpInfo.ConstraintCode = OpInfo.Codes[0]; 5799 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 5800 } else { 5801 ChooseConstraint(OpInfo, *this, Op, DAG); 5802 } 5803 5804 // 'X' matches anything. 5805 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { 5806 // Constants are handled elsewhere. For Functions, the type here is the 5807 // type of the result, which is not what we want to look at; leave them 5808 // alone. 5809 Value *v = OpInfo.CallOperandVal; 5810 if (isa<ConstantInt>(v) || isa<Function>(v)) { 5811 return; 5812 } 5813 5814 if (isa<BasicBlock>(v) || isa<BlockAddress>(v)) { 5815 OpInfo.ConstraintCode = "i"; 5816 return; 5817 } 5818 5819 // Otherwise, try to resolve it to something we know about by looking at 5820 // the actual operand type. 5821 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) { 5822 OpInfo.ConstraintCode = Repl; 5823 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 5824 } 5825 } 5826 } 5827 5828 /// Given an exact SDIV by a constant, create a multiplication 5829 /// with the multiplicative inverse of the constant. 5830 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N, 5831 const SDLoc &dl, SelectionDAG &DAG, 5832 SmallVectorImpl<SDNode *> &Created) { 5833 SDValue Op0 = N->getOperand(0); 5834 SDValue Op1 = N->getOperand(1); 5835 EVT VT = N->getValueType(0); 5836 EVT SVT = VT.getScalarType(); 5837 EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout()); 5838 EVT ShSVT = ShVT.getScalarType(); 5839 5840 bool UseSRA = false; 5841 SmallVector<SDValue, 16> Shifts, Factors; 5842 5843 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 5844 if (C->isZero()) 5845 return false; 5846 APInt Divisor = C->getAPIntValue(); 5847 unsigned Shift = Divisor.countr_zero(); 5848 if (Shift) { 5849 Divisor.ashrInPlace(Shift); 5850 UseSRA = true; 5851 } 5852 // Calculate the multiplicative inverse, using Newton's method. 5853 APInt t; 5854 APInt Factor = Divisor; 5855 while ((t = Divisor * Factor) != 1) 5856 Factor *= APInt(Divisor.getBitWidth(), 2) - t; 5857 Shifts.push_back(DAG.getConstant(Shift, dl, ShSVT)); 5858 Factors.push_back(DAG.getConstant(Factor, dl, SVT)); 5859 return true; 5860 }; 5861 5862 // Collect all magic values from the build vector. 5863 if (!ISD::matchUnaryPredicate(Op1, BuildSDIVPattern)) 5864 return SDValue(); 5865 5866 SDValue Shift, Factor; 5867 if (Op1.getOpcode() == ISD::BUILD_VECTOR) { 5868 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 5869 Factor = DAG.getBuildVector(VT, dl, Factors); 5870 } else if (Op1.getOpcode() == ISD::SPLAT_VECTOR) { 5871 assert(Shifts.size() == 1 && Factors.size() == 1 && 5872 "Expected matchUnaryPredicate to return one element for scalable " 5873 "vectors"); 5874 Shift = DAG.getSplatVector(ShVT, dl, Shifts[0]); 5875 Factor = DAG.getSplatVector(VT, dl, Factors[0]); 5876 } else { 5877 assert(isa<ConstantSDNode>(Op1) && "Expected a constant"); 5878 Shift = Shifts[0]; 5879 Factor = Factors[0]; 5880 } 5881 5882 SDValue Res = Op0; 5883 5884 // Shift the value upfront if it is even, so the LSB is one. 5885 if (UseSRA) { 5886 // TODO: For UDIV use SRL instead of SRA. 5887 SDNodeFlags Flags; 5888 Flags.setExact(true); 5889 Res = DAG.getNode(ISD::SRA, dl, VT, Res, Shift, Flags); 5890 Created.push_back(Res.getNode()); 5891 } 5892 5893 return DAG.getNode(ISD::MUL, dl, VT, Res, Factor); 5894 } 5895 5896 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 5897 SelectionDAG &DAG, 5898 SmallVectorImpl<SDNode *> &Created) const { 5899 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 5900 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5901 if (TLI.isIntDivCheap(N->getValueType(0), Attr)) 5902 return SDValue(N, 0); // Lower SDIV as SDIV 5903 return SDValue(); 5904 } 5905 5906 SDValue 5907 TargetLowering::BuildSREMPow2(SDNode *N, const APInt &Divisor, 5908 SelectionDAG &DAG, 5909 SmallVectorImpl<SDNode *> &Created) const { 5910 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 5911 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5912 if (TLI.isIntDivCheap(N->getValueType(0), Attr)) 5913 return SDValue(N, 0); // Lower SREM as SREM 5914 return SDValue(); 5915 } 5916 5917 /// Given an ISD::SDIV node expressing a divide by constant, 5918 /// return a DAG expression to select that will generate the same value by 5919 /// multiplying by a magic number. 5920 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 5921 SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 5922 bool IsAfterLegalization, 5923 SmallVectorImpl<SDNode *> &Created) const { 5924 SDLoc dl(N); 5925 EVT VT = N->getValueType(0); 5926 EVT SVT = VT.getScalarType(); 5927 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5928 EVT ShSVT = ShVT.getScalarType(); 5929 unsigned EltBits = VT.getScalarSizeInBits(); 5930 EVT MulVT; 5931 5932 // Check to see if we can do this. 5933 // FIXME: We should be more aggressive here. 5934 if (!isTypeLegal(VT)) { 5935 // Limit this to simple scalars for now. 5936 if (VT.isVector() || !VT.isSimple()) 5937 return SDValue(); 5938 5939 // If this type will be promoted to a large enough type with a legal 5940 // multiply operation, we can go ahead and do this transform. 5941 if (getTypeAction(VT.getSimpleVT()) != TypePromoteInteger) 5942 return SDValue(); 5943 5944 MulVT = getTypeToTransformTo(*DAG.getContext(), VT); 5945 if (MulVT.getSizeInBits() < (2 * EltBits) || 5946 !isOperationLegal(ISD::MUL, MulVT)) 5947 return SDValue(); 5948 } 5949 5950 // If the sdiv has an 'exact' bit we can use a simpler lowering. 5951 if (N->getFlags().hasExact()) 5952 return BuildExactSDIV(*this, N, dl, DAG, Created); 5953 5954 SmallVector<SDValue, 16> MagicFactors, Factors, Shifts, ShiftMasks; 5955 5956 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 5957 if (C->isZero()) 5958 return false; 5959 5960 const APInt &Divisor = C->getAPIntValue(); 5961 SignedDivisionByConstantInfo magics = SignedDivisionByConstantInfo::get(Divisor); 5962 int NumeratorFactor = 0; 5963 int ShiftMask = -1; 5964 5965 if (Divisor.isOne() || Divisor.isAllOnes()) { 5966 // If d is +1/-1, we just multiply the numerator by +1/-1. 5967 NumeratorFactor = Divisor.getSExtValue(); 5968 magics.Magic = 0; 5969 magics.ShiftAmount = 0; 5970 ShiftMask = 0; 5971 } else if (Divisor.isStrictlyPositive() && magics.Magic.isNegative()) { 5972 // If d > 0 and m < 0, add the numerator. 5973 NumeratorFactor = 1; 5974 } else if (Divisor.isNegative() && magics.Magic.isStrictlyPositive()) { 5975 // If d < 0 and m > 0, subtract the numerator. 5976 NumeratorFactor = -1; 5977 } 5978 5979 MagicFactors.push_back(DAG.getConstant(magics.Magic, dl, SVT)); 5980 Factors.push_back(DAG.getConstant(NumeratorFactor, dl, SVT)); 5981 Shifts.push_back(DAG.getConstant(magics.ShiftAmount, dl, ShSVT)); 5982 ShiftMasks.push_back(DAG.getConstant(ShiftMask, dl, SVT)); 5983 return true; 5984 }; 5985 5986 SDValue N0 = N->getOperand(0); 5987 SDValue N1 = N->getOperand(1); 5988 5989 // Collect the shifts / magic values from each element. 5990 if (!ISD::matchUnaryPredicate(N1, BuildSDIVPattern)) 5991 return SDValue(); 5992 5993 SDValue MagicFactor, Factor, Shift, ShiftMask; 5994 if (N1.getOpcode() == ISD::BUILD_VECTOR) { 5995 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 5996 Factor = DAG.getBuildVector(VT, dl, Factors); 5997 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 5998 ShiftMask = DAG.getBuildVector(VT, dl, ShiftMasks); 5999 } else if (N1.getOpcode() == ISD::SPLAT_VECTOR) { 6000 assert(MagicFactors.size() == 1 && Factors.size() == 1 && 6001 Shifts.size() == 1 && ShiftMasks.size() == 1 && 6002 "Expected matchUnaryPredicate to return one element for scalable " 6003 "vectors"); 6004 MagicFactor = DAG.getSplatVector(VT, dl, MagicFactors[0]); 6005 Factor = DAG.getSplatVector(VT, dl, Factors[0]); 6006 Shift = DAG.getSplatVector(ShVT, dl, Shifts[0]); 6007 ShiftMask = DAG.getSplatVector(VT, dl, ShiftMasks[0]); 6008 } else { 6009 assert(isa<ConstantSDNode>(N1) && "Expected a constant"); 6010 MagicFactor = MagicFactors[0]; 6011 Factor = Factors[0]; 6012 Shift = Shifts[0]; 6013 ShiftMask = ShiftMasks[0]; 6014 } 6015 6016 // Multiply the numerator (operand 0) by the magic value. 6017 // FIXME: We should support doing a MUL in a wider type. 6018 auto GetMULHS = [&](SDValue X, SDValue Y) { 6019 // If the type isn't legal, use a wider mul of the the type calculated 6020 // earlier. 6021 if (!isTypeLegal(VT)) { 6022 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MulVT, X); 6023 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MulVT, Y); 6024 Y = DAG.getNode(ISD::MUL, dl, MulVT, X, Y); 6025 Y = DAG.getNode(ISD::SRL, dl, MulVT, Y, 6026 DAG.getShiftAmountConstant(EltBits, MulVT, dl)); 6027 return DAG.getNode(ISD::TRUNCATE, dl, VT, Y); 6028 } 6029 6030 if (isOperationLegalOrCustom(ISD::MULHS, VT, IsAfterLegalization)) 6031 return DAG.getNode(ISD::MULHS, dl, VT, X, Y); 6032 if (isOperationLegalOrCustom(ISD::SMUL_LOHI, VT, IsAfterLegalization)) { 6033 SDValue LoHi = 6034 DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), X, Y); 6035 return SDValue(LoHi.getNode(), 1); 6036 } 6037 // If type twice as wide legal, widen and use a mul plus a shift. 6038 unsigned Size = VT.getScalarSizeInBits(); 6039 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), Size * 2); 6040 if (VT.isVector()) 6041 WideVT = EVT::getVectorVT(*DAG.getContext(), WideVT, 6042 VT.getVectorElementCount()); 6043 if (isOperationLegalOrCustom(ISD::MUL, WideVT)) { 6044 X = DAG.getNode(ISD::SIGN_EXTEND, dl, WideVT, X); 6045 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, WideVT, Y); 6046 Y = DAG.getNode(ISD::MUL, dl, WideVT, X, Y); 6047 Y = DAG.getNode(ISD::SRL, dl, WideVT, Y, 6048 DAG.getShiftAmountConstant(EltBits, WideVT, dl)); 6049 return DAG.getNode(ISD::TRUNCATE, dl, VT, Y); 6050 } 6051 return SDValue(); 6052 }; 6053 6054 SDValue Q = GetMULHS(N0, MagicFactor); 6055 if (!Q) 6056 return SDValue(); 6057 6058 Created.push_back(Q.getNode()); 6059 6060 // (Optionally) Add/subtract the numerator using Factor. 6061 Factor = DAG.getNode(ISD::MUL, dl, VT, N0, Factor); 6062 Created.push_back(Factor.getNode()); 6063 Q = DAG.getNode(ISD::ADD, dl, VT, Q, Factor); 6064 Created.push_back(Q.getNode()); 6065 6066 // Shift right algebraic by shift value. 6067 Q = DAG.getNode(ISD::SRA, dl, VT, Q, Shift); 6068 Created.push_back(Q.getNode()); 6069 6070 // Extract the sign bit, mask it and add it to the quotient. 6071 SDValue SignShift = DAG.getConstant(EltBits - 1, dl, ShVT); 6072 SDValue T = DAG.getNode(ISD::SRL, dl, VT, Q, SignShift); 6073 Created.push_back(T.getNode()); 6074 T = DAG.getNode(ISD::AND, dl, VT, T, ShiftMask); 6075 Created.push_back(T.getNode()); 6076 return DAG.getNode(ISD::ADD, dl, VT, Q, T); 6077 } 6078 6079 /// Given an ISD::UDIV node expressing a divide by constant, 6080 /// return a DAG expression to select that will generate the same value by 6081 /// multiplying by a magic number. 6082 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 6083 SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, 6084 bool IsAfterLegalization, 6085 SmallVectorImpl<SDNode *> &Created) const { 6086 SDLoc dl(N); 6087 EVT VT = N->getValueType(0); 6088 EVT SVT = VT.getScalarType(); 6089 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 6090 EVT ShSVT = ShVT.getScalarType(); 6091 unsigned EltBits = VT.getScalarSizeInBits(); 6092 EVT MulVT; 6093 6094 // Check to see if we can do this. 6095 // FIXME: We should be more aggressive here. 6096 if (!isTypeLegal(VT)) { 6097 // Limit this to simple scalars for now. 6098 if (VT.isVector() || !VT.isSimple()) 6099 return SDValue(); 6100 6101 // If this type will be promoted to a large enough type with a legal 6102 // multiply operation, we can go ahead and do this transform. 6103 if (getTypeAction(VT.getSimpleVT()) != TypePromoteInteger) 6104 return SDValue(); 6105 6106 MulVT = getTypeToTransformTo(*DAG.getContext(), VT); 6107 if (MulVT.getSizeInBits() < (2 * EltBits) || 6108 !isOperationLegal(ISD::MUL, MulVT)) 6109 return SDValue(); 6110 } 6111 6112 SDValue N0 = N->getOperand(0); 6113 SDValue N1 = N->getOperand(1); 6114 6115 // Try to use leading zeros of the dividend to reduce the multiplier and 6116 // avoid expensive fixups. 6117 // TODO: Support vectors. 6118 unsigned LeadingZeros = 0; 6119 if (!VT.isVector() && isa<ConstantSDNode>(N1)) { 6120 assert(!isOneConstant(N1) && "Unexpected divisor"); 6121 LeadingZeros = DAG.computeKnownBits(N0).countMinLeadingZeros(); 6122 // UnsignedDivisionByConstantInfo doesn't work correctly if leading zeros in 6123 // the dividend exceeds the leading zeros for the divisor. 6124 LeadingZeros = std::min( 6125 LeadingZeros, cast<ConstantSDNode>(N1)->getAPIntValue().countl_zero()); 6126 } 6127 6128 bool UseNPQ = false, UsePreShift = false, UsePostShift = false; 6129 SmallVector<SDValue, 16> PreShifts, PostShifts, MagicFactors, NPQFactors; 6130 6131 auto BuildUDIVPattern = [&](ConstantSDNode *C) { 6132 if (C->isZero()) 6133 return false; 6134 const APInt& Divisor = C->getAPIntValue(); 6135 6136 SDValue PreShift, MagicFactor, NPQFactor, PostShift; 6137 6138 // Magic algorithm doesn't work for division by 1. We need to emit a select 6139 // at the end. 6140 if (Divisor.isOne()) { 6141 PreShift = PostShift = DAG.getUNDEF(ShSVT); 6142 MagicFactor = NPQFactor = DAG.getUNDEF(SVT); 6143 } else { 6144 UnsignedDivisionByConstantInfo magics = 6145 UnsignedDivisionByConstantInfo::get(Divisor, LeadingZeros); 6146 6147 MagicFactor = DAG.getConstant(magics.Magic, dl, SVT); 6148 6149 assert(magics.PreShift < Divisor.getBitWidth() && 6150 "We shouldn't generate an undefined shift!"); 6151 assert(magics.PostShift < Divisor.getBitWidth() && 6152 "We shouldn't generate an undefined shift!"); 6153 assert((!magics.IsAdd || magics.PreShift == 0) && 6154 "Unexpected pre-shift"); 6155 PreShift = DAG.getConstant(magics.PreShift, dl, ShSVT); 6156 PostShift = DAG.getConstant(magics.PostShift, dl, ShSVT); 6157 NPQFactor = DAG.getConstant( 6158 magics.IsAdd ? APInt::getOneBitSet(EltBits, EltBits - 1) 6159 : APInt::getZero(EltBits), 6160 dl, SVT); 6161 UseNPQ |= magics.IsAdd; 6162 UsePreShift |= magics.PreShift != 0; 6163 UsePostShift |= magics.PostShift != 0; 6164 } 6165 6166 PreShifts.push_back(PreShift); 6167 MagicFactors.push_back(MagicFactor); 6168 NPQFactors.push_back(NPQFactor); 6169 PostShifts.push_back(PostShift); 6170 return true; 6171 }; 6172 6173 // Collect the shifts/magic values from each element. 6174 if (!ISD::matchUnaryPredicate(N1, BuildUDIVPattern)) 6175 return SDValue(); 6176 6177 SDValue PreShift, PostShift, MagicFactor, NPQFactor; 6178 if (N1.getOpcode() == ISD::BUILD_VECTOR) { 6179 PreShift = DAG.getBuildVector(ShVT, dl, PreShifts); 6180 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 6181 NPQFactor = DAG.getBuildVector(VT, dl, NPQFactors); 6182 PostShift = DAG.getBuildVector(ShVT, dl, PostShifts); 6183 } else if (N1.getOpcode() == ISD::SPLAT_VECTOR) { 6184 assert(PreShifts.size() == 1 && MagicFactors.size() == 1 && 6185 NPQFactors.size() == 1 && PostShifts.size() == 1 && 6186 "Expected matchUnaryPredicate to return one for scalable vectors"); 6187 PreShift = DAG.getSplatVector(ShVT, dl, PreShifts[0]); 6188 MagicFactor = DAG.getSplatVector(VT, dl, MagicFactors[0]); 6189 NPQFactor = DAG.getSplatVector(VT, dl, NPQFactors[0]); 6190 PostShift = DAG.getSplatVector(ShVT, dl, PostShifts[0]); 6191 } else { 6192 assert(isa<ConstantSDNode>(N1) && "Expected a constant"); 6193 PreShift = PreShifts[0]; 6194 MagicFactor = MagicFactors[0]; 6195 PostShift = PostShifts[0]; 6196 } 6197 6198 SDValue Q = N0; 6199 if (UsePreShift) { 6200 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PreShift); 6201 Created.push_back(Q.getNode()); 6202 } 6203 6204 // FIXME: We should support doing a MUL in a wider type. 6205 auto GetMULHU = [&](SDValue X, SDValue Y) { 6206 // If the type isn't legal, use a wider mul of the the type calculated 6207 // earlier. 6208 if (!isTypeLegal(VT)) { 6209 X = DAG.getNode(ISD::ZERO_EXTEND, dl, MulVT, X); 6210 Y = DAG.getNode(ISD::ZERO_EXTEND, dl, MulVT, Y); 6211 Y = DAG.getNode(ISD::MUL, dl, MulVT, X, Y); 6212 Y = DAG.getNode(ISD::SRL, dl, MulVT, Y, 6213 DAG.getShiftAmountConstant(EltBits, MulVT, dl)); 6214 return DAG.getNode(ISD::TRUNCATE, dl, VT, Y); 6215 } 6216 6217 if (isOperationLegalOrCustom(ISD::MULHU, VT, IsAfterLegalization)) 6218 return DAG.getNode(ISD::MULHU, dl, VT, X, Y); 6219 if (isOperationLegalOrCustom(ISD::UMUL_LOHI, VT, IsAfterLegalization)) { 6220 SDValue LoHi = 6221 DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), X, Y); 6222 return SDValue(LoHi.getNode(), 1); 6223 } 6224 // If type twice as wide legal, widen and use a mul plus a shift. 6225 unsigned Size = VT.getScalarSizeInBits(); 6226 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), Size * 2); 6227 if (VT.isVector()) 6228 WideVT = EVT::getVectorVT(*DAG.getContext(), WideVT, 6229 VT.getVectorElementCount()); 6230 if (isOperationLegalOrCustom(ISD::MUL, WideVT)) { 6231 X = DAG.getNode(ISD::ZERO_EXTEND, dl, WideVT, X); 6232 Y = DAG.getNode(ISD::ZERO_EXTEND, dl, WideVT, Y); 6233 Y = DAG.getNode(ISD::MUL, dl, WideVT, X, Y); 6234 Y = DAG.getNode(ISD::SRL, dl, WideVT, Y, 6235 DAG.getShiftAmountConstant(EltBits, WideVT, dl)); 6236 return DAG.getNode(ISD::TRUNCATE, dl, VT, Y); 6237 } 6238 return SDValue(); // No mulhu or equivalent 6239 }; 6240 6241 // Multiply the numerator (operand 0) by the magic value. 6242 Q = GetMULHU(Q, MagicFactor); 6243 if (!Q) 6244 return SDValue(); 6245 6246 Created.push_back(Q.getNode()); 6247 6248 if (UseNPQ) { 6249 SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N0, Q); 6250 Created.push_back(NPQ.getNode()); 6251 6252 // For vectors we might have a mix of non-NPQ/NPQ paths, so use 6253 // MULHU to act as a SRL-by-1 for NPQ, else multiply by zero. 6254 if (VT.isVector()) 6255 NPQ = GetMULHU(NPQ, NPQFactor); 6256 else 6257 NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ, DAG.getConstant(1, dl, ShVT)); 6258 6259 Created.push_back(NPQ.getNode()); 6260 6261 Q = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q); 6262 Created.push_back(Q.getNode()); 6263 } 6264 6265 if (UsePostShift) { 6266 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PostShift); 6267 Created.push_back(Q.getNode()); 6268 } 6269 6270 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6271 6272 SDValue One = DAG.getConstant(1, dl, VT); 6273 SDValue IsOne = DAG.getSetCC(dl, SetCCVT, N1, One, ISD::SETEQ); 6274 return DAG.getSelect(dl, VT, IsOne, N0, Q); 6275 } 6276 6277 /// If all values in Values that *don't* match the predicate are same 'splat' 6278 /// value, then replace all values with that splat value. 6279 /// Else, if AlternativeReplacement was provided, then replace all values that 6280 /// do match predicate with AlternativeReplacement value. 6281 static void 6282 turnVectorIntoSplatVector(MutableArrayRef<SDValue> Values, 6283 std::function<bool(SDValue)> Predicate, 6284 SDValue AlternativeReplacement = SDValue()) { 6285 SDValue Replacement; 6286 // Is there a value for which the Predicate does *NOT* match? What is it? 6287 auto SplatValue = llvm::find_if_not(Values, Predicate); 6288 if (SplatValue != Values.end()) { 6289 // Does Values consist only of SplatValue's and values matching Predicate? 6290 if (llvm::all_of(Values, [Predicate, SplatValue](SDValue Value) { 6291 return Value == *SplatValue || Predicate(Value); 6292 })) // Then we shall replace values matching predicate with SplatValue. 6293 Replacement = *SplatValue; 6294 } 6295 if (!Replacement) { 6296 // Oops, we did not find the "baseline" splat value. 6297 if (!AlternativeReplacement) 6298 return; // Nothing to do. 6299 // Let's replace with provided value then. 6300 Replacement = AlternativeReplacement; 6301 } 6302 std::replace_if(Values.begin(), Values.end(), Predicate, Replacement); 6303 } 6304 6305 /// Given an ISD::UREM used only by an ISD::SETEQ or ISD::SETNE 6306 /// where the divisor is constant and the comparison target is zero, 6307 /// return a DAG expression that will generate the same comparison result 6308 /// using only multiplications, additions and shifts/rotations. 6309 /// Ref: "Hacker's Delight" 10-17. 6310 SDValue TargetLowering::buildUREMEqFold(EVT SETCCVT, SDValue REMNode, 6311 SDValue CompTargetNode, 6312 ISD::CondCode Cond, 6313 DAGCombinerInfo &DCI, 6314 const SDLoc &DL) const { 6315 SmallVector<SDNode *, 5> Built; 6316 if (SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 6317 DCI, DL, Built)) { 6318 for (SDNode *N : Built) 6319 DCI.AddToWorklist(N); 6320 return Folded; 6321 } 6322 6323 return SDValue(); 6324 } 6325 6326 SDValue 6327 TargetLowering::prepareUREMEqFold(EVT SETCCVT, SDValue REMNode, 6328 SDValue CompTargetNode, ISD::CondCode Cond, 6329 DAGCombinerInfo &DCI, const SDLoc &DL, 6330 SmallVectorImpl<SDNode *> &Created) const { 6331 // fold (seteq/ne (urem N, D), 0) -> (setule/ugt (rotr (mul N, P), K), Q) 6332 // - D must be constant, with D = D0 * 2^K where D0 is odd 6333 // - P is the multiplicative inverse of D0 modulo 2^W 6334 // - Q = floor(((2^W) - 1) / D) 6335 // where W is the width of the common type of N and D. 6336 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 6337 "Only applicable for (in)equality comparisons."); 6338 6339 SelectionDAG &DAG = DCI.DAG; 6340 6341 EVT VT = REMNode.getValueType(); 6342 EVT SVT = VT.getScalarType(); 6343 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout(), !DCI.isBeforeLegalize()); 6344 EVT ShSVT = ShVT.getScalarType(); 6345 6346 // If MUL is unavailable, we cannot proceed in any case. 6347 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::MUL, VT)) 6348 return SDValue(); 6349 6350 bool ComparingWithAllZeros = true; 6351 bool AllComparisonsWithNonZerosAreTautological = true; 6352 bool HadTautologicalLanes = false; 6353 bool AllLanesAreTautological = true; 6354 bool HadEvenDivisor = false; 6355 bool AllDivisorsArePowerOfTwo = true; 6356 bool HadTautologicalInvertedLanes = false; 6357 SmallVector<SDValue, 16> PAmts, KAmts, QAmts, IAmts; 6358 6359 auto BuildUREMPattern = [&](ConstantSDNode *CDiv, ConstantSDNode *CCmp) { 6360 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 6361 if (CDiv->isZero()) 6362 return false; 6363 6364 const APInt &D = CDiv->getAPIntValue(); 6365 const APInt &Cmp = CCmp->getAPIntValue(); 6366 6367 ComparingWithAllZeros &= Cmp.isZero(); 6368 6369 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 6370 // if C2 is not less than C1, the comparison is always false. 6371 // But we will only be able to produce the comparison that will give the 6372 // opposive tautological answer. So this lane would need to be fixed up. 6373 bool TautologicalInvertedLane = D.ule(Cmp); 6374 HadTautologicalInvertedLanes |= TautologicalInvertedLane; 6375 6376 // If all lanes are tautological (either all divisors are ones, or divisor 6377 // is not greater than the constant we are comparing with), 6378 // we will prefer to avoid the fold. 6379 bool TautologicalLane = D.isOne() || TautologicalInvertedLane; 6380 HadTautologicalLanes |= TautologicalLane; 6381 AllLanesAreTautological &= TautologicalLane; 6382 6383 // If we are comparing with non-zero, we need'll need to subtract said 6384 // comparison value from the LHS. But there is no point in doing that if 6385 // every lane where we are comparing with non-zero is tautological.. 6386 if (!Cmp.isZero()) 6387 AllComparisonsWithNonZerosAreTautological &= TautologicalLane; 6388 6389 // Decompose D into D0 * 2^K 6390 unsigned K = D.countr_zero(); 6391 assert((!D.isOne() || (K == 0)) && "For divisor '1' we won't rotate."); 6392 APInt D0 = D.lshr(K); 6393 6394 // D is even if it has trailing zeros. 6395 HadEvenDivisor |= (K != 0); 6396 // D is a power-of-two if D0 is one. 6397 // If all divisors are power-of-two, we will prefer to avoid the fold. 6398 AllDivisorsArePowerOfTwo &= D0.isOne(); 6399 6400 // P = inv(D0, 2^W) 6401 // 2^W requires W + 1 bits, so we have to extend and then truncate. 6402 unsigned W = D.getBitWidth(); 6403 APInt P = D0.zext(W + 1) 6404 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 6405 .trunc(W); 6406 assert(!P.isZero() && "No multiplicative inverse!"); // unreachable 6407 assert((D0 * P).isOne() && "Multiplicative inverse basic check failed."); 6408 6409 // Q = floor((2^W - 1) u/ D) 6410 // R = ((2^W - 1) u% D) 6411 APInt Q, R; 6412 APInt::udivrem(APInt::getAllOnes(W), D, Q, R); 6413 6414 // If we are comparing with zero, then that comparison constant is okay, 6415 // else it may need to be one less than that. 6416 if (Cmp.ugt(R)) 6417 Q -= 1; 6418 6419 assert(APInt::getAllOnes(ShSVT.getSizeInBits()).ugt(K) && 6420 "We are expecting that K is always less than all-ones for ShSVT"); 6421 6422 // If the lane is tautological the result can be constant-folded. 6423 if (TautologicalLane) { 6424 // Set P and K amount to a bogus values so we can try to splat them. 6425 P = 0; 6426 K = -1; 6427 // And ensure that comparison constant is tautological, 6428 // it will always compare true/false. 6429 Q = -1; 6430 } 6431 6432 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 6433 KAmts.push_back( 6434 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 6435 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 6436 return true; 6437 }; 6438 6439 SDValue N = REMNode.getOperand(0); 6440 SDValue D = REMNode.getOperand(1); 6441 6442 // Collect the values from each element. 6443 if (!ISD::matchBinaryPredicate(D, CompTargetNode, BuildUREMPattern)) 6444 return SDValue(); 6445 6446 // If all lanes are tautological, the result can be constant-folded. 6447 if (AllLanesAreTautological) 6448 return SDValue(); 6449 6450 // If this is a urem by a powers-of-two, avoid the fold since it can be 6451 // best implemented as a bit test. 6452 if (AllDivisorsArePowerOfTwo) 6453 return SDValue(); 6454 6455 SDValue PVal, KVal, QVal; 6456 if (D.getOpcode() == ISD::BUILD_VECTOR) { 6457 if (HadTautologicalLanes) { 6458 // Try to turn PAmts into a splat, since we don't care about the values 6459 // that are currently '0'. If we can't, just keep '0'`s. 6460 turnVectorIntoSplatVector(PAmts, isNullConstant); 6461 // Try to turn KAmts into a splat, since we don't care about the values 6462 // that are currently '-1'. If we can't, change them to '0'`s. 6463 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 6464 DAG.getConstant(0, DL, ShSVT)); 6465 } 6466 6467 PVal = DAG.getBuildVector(VT, DL, PAmts); 6468 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 6469 QVal = DAG.getBuildVector(VT, DL, QAmts); 6470 } else if (D.getOpcode() == ISD::SPLAT_VECTOR) { 6471 assert(PAmts.size() == 1 && KAmts.size() == 1 && QAmts.size() == 1 && 6472 "Expected matchBinaryPredicate to return one element for " 6473 "SPLAT_VECTORs"); 6474 PVal = DAG.getSplatVector(VT, DL, PAmts[0]); 6475 KVal = DAG.getSplatVector(ShVT, DL, KAmts[0]); 6476 QVal = DAG.getSplatVector(VT, DL, QAmts[0]); 6477 } else { 6478 PVal = PAmts[0]; 6479 KVal = KAmts[0]; 6480 QVal = QAmts[0]; 6481 } 6482 6483 if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) { 6484 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::SUB, VT)) 6485 return SDValue(); // FIXME: Could/should use `ISD::ADD`? 6486 assert(CompTargetNode.getValueType() == N.getValueType() && 6487 "Expecting that the types on LHS and RHS of comparisons match."); 6488 N = DAG.getNode(ISD::SUB, DL, VT, N, CompTargetNode); 6489 } 6490 6491 // (mul N, P) 6492 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 6493 Created.push_back(Op0.getNode()); 6494 6495 // Rotate right only if any divisor was even. We avoid rotates for all-odd 6496 // divisors as a performance improvement, since rotating by 0 is a no-op. 6497 if (HadEvenDivisor) { 6498 // We need ROTR to do this. 6499 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::ROTR, VT)) 6500 return SDValue(); 6501 // UREM: (rotr (mul N, P), K) 6502 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal); 6503 Created.push_back(Op0.getNode()); 6504 } 6505 6506 // UREM: (setule/setugt (rotr (mul N, P), K), Q) 6507 SDValue NewCC = 6508 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 6509 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 6510 if (!HadTautologicalInvertedLanes) 6511 return NewCC; 6512 6513 // If any lanes previously compared always-false, the NewCC will give 6514 // always-true result for them, so we need to fixup those lanes. 6515 // Or the other way around for inequality predicate. 6516 assert(VT.isVector() && "Can/should only get here for vectors."); 6517 Created.push_back(NewCC.getNode()); 6518 6519 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 6520 // if C2 is not less than C1, the comparison is always false. 6521 // But we have produced the comparison that will give the 6522 // opposive tautological answer. So these lanes would need to be fixed up. 6523 SDValue TautologicalInvertedChannels = 6524 DAG.getSetCC(DL, SETCCVT, D, CompTargetNode, ISD::SETULE); 6525 Created.push_back(TautologicalInvertedChannels.getNode()); 6526 6527 // NOTE: we avoid letting illegal types through even if we're before legalize 6528 // ops – legalization has a hard time producing good code for this. 6529 if (isOperationLegalOrCustom(ISD::VSELECT, SETCCVT)) { 6530 // If we have a vector select, let's replace the comparison results in the 6531 // affected lanes with the correct tautological result. 6532 SDValue Replacement = DAG.getBoolConstant(Cond == ISD::SETEQ ? false : true, 6533 DL, SETCCVT, SETCCVT); 6534 return DAG.getNode(ISD::VSELECT, DL, SETCCVT, TautologicalInvertedChannels, 6535 Replacement, NewCC); 6536 } 6537 6538 // Else, we can just invert the comparison result in the appropriate lanes. 6539 // 6540 // NOTE: see the note above VSELECT above. 6541 if (isOperationLegalOrCustom(ISD::XOR, SETCCVT)) 6542 return DAG.getNode(ISD::XOR, DL, SETCCVT, NewCC, 6543 TautologicalInvertedChannels); 6544 6545 return SDValue(); // Don't know how to lower. 6546 } 6547 6548 /// Given an ISD::SREM used only by an ISD::SETEQ or ISD::SETNE 6549 /// where the divisor is constant and the comparison target is zero, 6550 /// return a DAG expression that will generate the same comparison result 6551 /// using only multiplications, additions and shifts/rotations. 6552 /// Ref: "Hacker's Delight" 10-17. 6553 SDValue TargetLowering::buildSREMEqFold(EVT SETCCVT, SDValue REMNode, 6554 SDValue CompTargetNode, 6555 ISD::CondCode Cond, 6556 DAGCombinerInfo &DCI, 6557 const SDLoc &DL) const { 6558 SmallVector<SDNode *, 7> Built; 6559 if (SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 6560 DCI, DL, Built)) { 6561 assert(Built.size() <= 7 && "Max size prediction failed."); 6562 for (SDNode *N : Built) 6563 DCI.AddToWorklist(N); 6564 return Folded; 6565 } 6566 6567 return SDValue(); 6568 } 6569 6570 SDValue 6571 TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode, 6572 SDValue CompTargetNode, ISD::CondCode Cond, 6573 DAGCombinerInfo &DCI, const SDLoc &DL, 6574 SmallVectorImpl<SDNode *> &Created) const { 6575 // Fold: 6576 // (seteq/ne (srem N, D), 0) 6577 // To: 6578 // (setule/ugt (rotr (add (mul N, P), A), K), Q) 6579 // 6580 // - D must be constant, with D = D0 * 2^K where D0 is odd 6581 // - P is the multiplicative inverse of D0 modulo 2^W 6582 // - A = bitwiseand(floor((2^(W - 1) - 1) / D0), (-(2^k))) 6583 // - Q = floor((2 * A) / (2^K)) 6584 // where W is the width of the common type of N and D. 6585 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 6586 "Only applicable for (in)equality comparisons."); 6587 6588 SelectionDAG &DAG = DCI.DAG; 6589 6590 EVT VT = REMNode.getValueType(); 6591 EVT SVT = VT.getScalarType(); 6592 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout(), !DCI.isBeforeLegalize()); 6593 EVT ShSVT = ShVT.getScalarType(); 6594 6595 // If we are after ops legalization, and MUL is unavailable, we can not 6596 // proceed. 6597 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::MUL, VT)) 6598 return SDValue(); 6599 6600 // TODO: Could support comparing with non-zero too. 6601 ConstantSDNode *CompTarget = isConstOrConstSplat(CompTargetNode); 6602 if (!CompTarget || !CompTarget->isZero()) 6603 return SDValue(); 6604 6605 bool HadIntMinDivisor = false; 6606 bool HadOneDivisor = false; 6607 bool AllDivisorsAreOnes = true; 6608 bool HadEvenDivisor = false; 6609 bool NeedToApplyOffset = false; 6610 bool AllDivisorsArePowerOfTwo = true; 6611 SmallVector<SDValue, 16> PAmts, AAmts, KAmts, QAmts; 6612 6613 auto BuildSREMPattern = [&](ConstantSDNode *C) { 6614 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 6615 if (C->isZero()) 6616 return false; 6617 6618 // FIXME: we don't fold `rem %X, -C` to `rem %X, C` in DAGCombine. 6619 6620 // WARNING: this fold is only valid for positive divisors! 6621 APInt D = C->getAPIntValue(); 6622 if (D.isNegative()) 6623 D.negate(); // `rem %X, -C` is equivalent to `rem %X, C` 6624 6625 HadIntMinDivisor |= D.isMinSignedValue(); 6626 6627 // If all divisors are ones, we will prefer to avoid the fold. 6628 HadOneDivisor |= D.isOne(); 6629 AllDivisorsAreOnes &= D.isOne(); 6630 6631 // Decompose D into D0 * 2^K 6632 unsigned K = D.countr_zero(); 6633 assert((!D.isOne() || (K == 0)) && "For divisor '1' we won't rotate."); 6634 APInt D0 = D.lshr(K); 6635 6636 if (!D.isMinSignedValue()) { 6637 // D is even if it has trailing zeros; unless it's INT_MIN, in which case 6638 // we don't care about this lane in this fold, we'll special-handle it. 6639 HadEvenDivisor |= (K != 0); 6640 } 6641 6642 // D is a power-of-two if D0 is one. This includes INT_MIN. 6643 // If all divisors are power-of-two, we will prefer to avoid the fold. 6644 AllDivisorsArePowerOfTwo &= D0.isOne(); 6645 6646 // P = inv(D0, 2^W) 6647 // 2^W requires W + 1 bits, so we have to extend and then truncate. 6648 unsigned W = D.getBitWidth(); 6649 APInt P = D0.zext(W + 1) 6650 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 6651 .trunc(W); 6652 assert(!P.isZero() && "No multiplicative inverse!"); // unreachable 6653 assert((D0 * P).isOne() && "Multiplicative inverse basic check failed."); 6654 6655 // A = floor((2^(W - 1) - 1) / D0) & -2^K 6656 APInt A = APInt::getSignedMaxValue(W).udiv(D0); 6657 A.clearLowBits(K); 6658 6659 if (!D.isMinSignedValue()) { 6660 // If divisor INT_MIN, then we don't care about this lane in this fold, 6661 // we'll special-handle it. 6662 NeedToApplyOffset |= A != 0; 6663 } 6664 6665 // Q = floor((2 * A) / (2^K)) 6666 APInt Q = (2 * A).udiv(APInt::getOneBitSet(W, K)); 6667 6668 assert(APInt::getAllOnes(SVT.getSizeInBits()).ugt(A) && 6669 "We are expecting that A is always less than all-ones for SVT"); 6670 assert(APInt::getAllOnes(ShSVT.getSizeInBits()).ugt(K) && 6671 "We are expecting that K is always less than all-ones for ShSVT"); 6672 6673 // If the divisor is 1 the result can be constant-folded. Likewise, we 6674 // don't care about INT_MIN lanes, those can be set to undef if appropriate. 6675 if (D.isOne()) { 6676 // Set P, A and K to a bogus values so we can try to splat them. 6677 P = 0; 6678 A = -1; 6679 K = -1; 6680 6681 // x ?% 1 == 0 <--> true <--> x u<= -1 6682 Q = -1; 6683 } 6684 6685 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 6686 AAmts.push_back(DAG.getConstant(A, DL, SVT)); 6687 KAmts.push_back( 6688 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 6689 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 6690 return true; 6691 }; 6692 6693 SDValue N = REMNode.getOperand(0); 6694 SDValue D = REMNode.getOperand(1); 6695 6696 // Collect the values from each element. 6697 if (!ISD::matchUnaryPredicate(D, BuildSREMPattern)) 6698 return SDValue(); 6699 6700 // If this is a srem by a one, avoid the fold since it can be constant-folded. 6701 if (AllDivisorsAreOnes) 6702 return SDValue(); 6703 6704 // If this is a srem by a powers-of-two (including INT_MIN), avoid the fold 6705 // since it can be best implemented as a bit test. 6706 if (AllDivisorsArePowerOfTwo) 6707 return SDValue(); 6708 6709 SDValue PVal, AVal, KVal, QVal; 6710 if (D.getOpcode() == ISD::BUILD_VECTOR) { 6711 if (HadOneDivisor) { 6712 // Try to turn PAmts into a splat, since we don't care about the values 6713 // that are currently '0'. If we can't, just keep '0'`s. 6714 turnVectorIntoSplatVector(PAmts, isNullConstant); 6715 // Try to turn AAmts into a splat, since we don't care about the 6716 // values that are currently '-1'. If we can't, change them to '0'`s. 6717 turnVectorIntoSplatVector(AAmts, isAllOnesConstant, 6718 DAG.getConstant(0, DL, SVT)); 6719 // Try to turn KAmts into a splat, since we don't care about the values 6720 // that are currently '-1'. If we can't, change them to '0'`s. 6721 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 6722 DAG.getConstant(0, DL, ShSVT)); 6723 } 6724 6725 PVal = DAG.getBuildVector(VT, DL, PAmts); 6726 AVal = DAG.getBuildVector(VT, DL, AAmts); 6727 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 6728 QVal = DAG.getBuildVector(VT, DL, QAmts); 6729 } else if (D.getOpcode() == ISD::SPLAT_VECTOR) { 6730 assert(PAmts.size() == 1 && AAmts.size() == 1 && KAmts.size() == 1 && 6731 QAmts.size() == 1 && 6732 "Expected matchUnaryPredicate to return one element for scalable " 6733 "vectors"); 6734 PVal = DAG.getSplatVector(VT, DL, PAmts[0]); 6735 AVal = DAG.getSplatVector(VT, DL, AAmts[0]); 6736 KVal = DAG.getSplatVector(ShVT, DL, KAmts[0]); 6737 QVal = DAG.getSplatVector(VT, DL, QAmts[0]); 6738 } else { 6739 assert(isa<ConstantSDNode>(D) && "Expected a constant"); 6740 PVal = PAmts[0]; 6741 AVal = AAmts[0]; 6742 KVal = KAmts[0]; 6743 QVal = QAmts[0]; 6744 } 6745 6746 // (mul N, P) 6747 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 6748 Created.push_back(Op0.getNode()); 6749 6750 if (NeedToApplyOffset) { 6751 // We need ADD to do this. 6752 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::ADD, VT)) 6753 return SDValue(); 6754 6755 // (add (mul N, P), A) 6756 Op0 = DAG.getNode(ISD::ADD, DL, VT, Op0, AVal); 6757 Created.push_back(Op0.getNode()); 6758 } 6759 6760 // Rotate right only if any divisor was even. We avoid rotates for all-odd 6761 // divisors as a performance improvement, since rotating by 0 is a no-op. 6762 if (HadEvenDivisor) { 6763 // We need ROTR to do this. 6764 if (!DCI.isBeforeLegalizeOps() && !isOperationLegalOrCustom(ISD::ROTR, VT)) 6765 return SDValue(); 6766 // SREM: (rotr (add (mul N, P), A), K) 6767 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal); 6768 Created.push_back(Op0.getNode()); 6769 } 6770 6771 // SREM: (setule/setugt (rotr (add (mul N, P), A), K), Q) 6772 SDValue Fold = 6773 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 6774 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 6775 6776 // If we didn't have lanes with INT_MIN divisor, then we're done. 6777 if (!HadIntMinDivisor) 6778 return Fold; 6779 6780 // That fold is only valid for positive divisors. Which effectively means, 6781 // it is invalid for INT_MIN divisors. So if we have such a lane, 6782 // we must fix-up results for said lanes. 6783 assert(VT.isVector() && "Can/should only get here for vectors."); 6784 6785 // NOTE: we avoid letting illegal types through even if we're before legalize 6786 // ops – legalization has a hard time producing good code for the code that 6787 // follows. 6788 if (!isOperationLegalOrCustom(ISD::SETCC, SETCCVT) || 6789 !isOperationLegalOrCustom(ISD::AND, VT) || 6790 !isCondCodeLegalOrCustom(Cond, VT.getSimpleVT()) || 6791 !isOperationLegalOrCustom(ISD::VSELECT, SETCCVT)) 6792 return SDValue(); 6793 6794 Created.push_back(Fold.getNode()); 6795 6796 SDValue IntMin = DAG.getConstant( 6797 APInt::getSignedMinValue(SVT.getScalarSizeInBits()), DL, VT); 6798 SDValue IntMax = DAG.getConstant( 6799 APInt::getSignedMaxValue(SVT.getScalarSizeInBits()), DL, VT); 6800 SDValue Zero = 6801 DAG.getConstant(APInt::getZero(SVT.getScalarSizeInBits()), DL, VT); 6802 6803 // Which lanes had INT_MIN divisors? Divisor is constant, so const-folded. 6804 SDValue DivisorIsIntMin = DAG.getSetCC(DL, SETCCVT, D, IntMin, ISD::SETEQ); 6805 Created.push_back(DivisorIsIntMin.getNode()); 6806 6807 // (N s% INT_MIN) ==/!= 0 <--> (N & INT_MAX) ==/!= 0 6808 SDValue Masked = DAG.getNode(ISD::AND, DL, VT, N, IntMax); 6809 Created.push_back(Masked.getNode()); 6810 SDValue MaskedIsZero = DAG.getSetCC(DL, SETCCVT, Masked, Zero, Cond); 6811 Created.push_back(MaskedIsZero.getNode()); 6812 6813 // To produce final result we need to blend 2 vectors: 'SetCC' and 6814 // 'MaskedIsZero'. If the divisor for channel was *NOT* INT_MIN, we pick 6815 // from 'Fold', else pick from 'MaskedIsZero'. Since 'DivisorIsIntMin' is 6816 // constant-folded, select can get lowered to a shuffle with constant mask. 6817 SDValue Blended = DAG.getNode(ISD::VSELECT, DL, SETCCVT, DivisorIsIntMin, 6818 MaskedIsZero, Fold); 6819 6820 return Blended; 6821 } 6822 6823 bool TargetLowering:: 6824 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const { 6825 if (!isa<ConstantSDNode>(Op.getOperand(0))) { 6826 DAG.getContext()->emitError("argument to '__builtin_return_address' must " 6827 "be a constant integer"); 6828 return true; 6829 } 6830 6831 return false; 6832 } 6833 6834 SDValue TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG, 6835 const DenormalMode &Mode) const { 6836 SDLoc DL(Op); 6837 EVT VT = Op.getValueType(); 6838 EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6839 SDValue FPZero = DAG.getConstantFP(0.0, DL, VT); 6840 6841 // This is specifically a check for the handling of denormal inputs, not the 6842 // result. 6843 if (Mode.Input == DenormalMode::PreserveSign || 6844 Mode.Input == DenormalMode::PositiveZero) { 6845 // Test = X == 0.0 6846 return DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ); 6847 } 6848 6849 // Testing it with denormal inputs to avoid wrong estimate. 6850 // 6851 // Test = fabs(X) < SmallestNormal 6852 const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT); 6853 APFloat SmallestNorm = APFloat::getSmallestNormalized(FltSem); 6854 SDValue NormC = DAG.getConstantFP(SmallestNorm, DL, VT); 6855 SDValue Fabs = DAG.getNode(ISD::FABS, DL, VT, Op); 6856 return DAG.getSetCC(DL, CCVT, Fabs, NormC, ISD::SETLT); 6857 } 6858 6859 SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, 6860 bool LegalOps, bool OptForSize, 6861 NegatibleCost &Cost, 6862 unsigned Depth) const { 6863 // fneg is removable even if it has multiple uses. 6864 if (Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::VP_FNEG) { 6865 Cost = NegatibleCost::Cheaper; 6866 return Op.getOperand(0); 6867 } 6868 6869 // Don't recurse exponentially. 6870 if (Depth > SelectionDAG::MaxRecursionDepth) 6871 return SDValue(); 6872 6873 // Pre-increment recursion depth for use in recursive calls. 6874 ++Depth; 6875 const SDNodeFlags Flags = Op->getFlags(); 6876 const TargetOptions &Options = DAG.getTarget().Options; 6877 EVT VT = Op.getValueType(); 6878 unsigned Opcode = Op.getOpcode(); 6879 6880 // Don't allow anything with multiple uses unless we know it is free. 6881 if (!Op.hasOneUse() && Opcode != ISD::ConstantFP) { 6882 bool IsFreeExtend = Opcode == ISD::FP_EXTEND && 6883 isFPExtFree(VT, Op.getOperand(0).getValueType()); 6884 if (!IsFreeExtend) 6885 return SDValue(); 6886 } 6887 6888 auto RemoveDeadNode = [&](SDValue N) { 6889 if (N && N.getNode()->use_empty()) 6890 DAG.RemoveDeadNode(N.getNode()); 6891 }; 6892 6893 SDLoc DL(Op); 6894 6895 // Because getNegatedExpression can delete nodes we need a handle to keep 6896 // temporary nodes alive in case the recursion manages to create an identical 6897 // node. 6898 std::list<HandleSDNode> Handles; 6899 6900 switch (Opcode) { 6901 case ISD::ConstantFP: { 6902 // Don't invert constant FP values after legalization unless the target says 6903 // the negated constant is legal. 6904 bool IsOpLegal = 6905 isOperationLegal(ISD::ConstantFP, VT) || 6906 isFPImmLegal(neg(cast<ConstantFPSDNode>(Op)->getValueAPF()), VT, 6907 OptForSize); 6908 6909 if (LegalOps && !IsOpLegal) 6910 break; 6911 6912 APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF(); 6913 V.changeSign(); 6914 SDValue CFP = DAG.getConstantFP(V, DL, VT); 6915 6916 // If we already have the use of the negated floating constant, it is free 6917 // to negate it even it has multiple uses. 6918 if (!Op.hasOneUse() && CFP.use_empty()) 6919 break; 6920 Cost = NegatibleCost::Neutral; 6921 return CFP; 6922 } 6923 case ISD::BUILD_VECTOR: { 6924 // Only permit BUILD_VECTOR of constants. 6925 if (llvm::any_of(Op->op_values(), [&](SDValue N) { 6926 return !N.isUndef() && !isa<ConstantFPSDNode>(N); 6927 })) 6928 break; 6929 6930 bool IsOpLegal = 6931 (isOperationLegal(ISD::ConstantFP, VT) && 6932 isOperationLegal(ISD::BUILD_VECTOR, VT)) || 6933 llvm::all_of(Op->op_values(), [&](SDValue N) { 6934 return N.isUndef() || 6935 isFPImmLegal(neg(cast<ConstantFPSDNode>(N)->getValueAPF()), VT, 6936 OptForSize); 6937 }); 6938 6939 if (LegalOps && !IsOpLegal) 6940 break; 6941 6942 SmallVector<SDValue, 4> Ops; 6943 for (SDValue C : Op->op_values()) { 6944 if (C.isUndef()) { 6945 Ops.push_back(C); 6946 continue; 6947 } 6948 APFloat V = cast<ConstantFPSDNode>(C)->getValueAPF(); 6949 V.changeSign(); 6950 Ops.push_back(DAG.getConstantFP(V, DL, C.getValueType())); 6951 } 6952 Cost = NegatibleCost::Neutral; 6953 return DAG.getBuildVector(VT, DL, Ops); 6954 } 6955 case ISD::FADD: { 6956 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 6957 break; 6958 6959 // After operation legalization, it might not be legal to create new FSUBs. 6960 if (LegalOps && !isOperationLegalOrCustom(ISD::FSUB, VT)) 6961 break; 6962 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 6963 6964 // fold (fneg (fadd X, Y)) -> (fsub (fneg X), Y) 6965 NegatibleCost CostX = NegatibleCost::Expensive; 6966 SDValue NegX = 6967 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 6968 // Prevent this node from being deleted by the next call. 6969 if (NegX) 6970 Handles.emplace_back(NegX); 6971 6972 // fold (fneg (fadd X, Y)) -> (fsub (fneg Y), X) 6973 NegatibleCost CostY = NegatibleCost::Expensive; 6974 SDValue NegY = 6975 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 6976 6977 // We're done with the handles. 6978 Handles.clear(); 6979 6980 // Negate the X if its cost is less or equal than Y. 6981 if (NegX && (CostX <= CostY)) { 6982 Cost = CostX; 6983 SDValue N = DAG.getNode(ISD::FSUB, DL, VT, NegX, Y, Flags); 6984 if (NegY != N) 6985 RemoveDeadNode(NegY); 6986 return N; 6987 } 6988 6989 // Negate the Y if it is not expensive. 6990 if (NegY) { 6991 Cost = CostY; 6992 SDValue N = DAG.getNode(ISD::FSUB, DL, VT, NegY, X, Flags); 6993 if (NegX != N) 6994 RemoveDeadNode(NegX); 6995 return N; 6996 } 6997 break; 6998 } 6999 case ISD::FSUB: { 7000 // We can't turn -(A-B) into B-A when we honor signed zeros. 7001 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 7002 break; 7003 7004 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 7005 // fold (fneg (fsub 0, Y)) -> Y 7006 if (ConstantFPSDNode *C = isConstOrConstSplatFP(X, /*AllowUndefs*/ true)) 7007 if (C->isZero()) { 7008 Cost = NegatibleCost::Cheaper; 7009 return Y; 7010 } 7011 7012 // fold (fneg (fsub X, Y)) -> (fsub Y, X) 7013 Cost = NegatibleCost::Neutral; 7014 return DAG.getNode(ISD::FSUB, DL, VT, Y, X, Flags); 7015 } 7016 case ISD::FMUL: 7017 case ISD::FDIV: { 7018 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 7019 7020 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) 7021 NegatibleCost CostX = NegatibleCost::Expensive; 7022 SDValue NegX = 7023 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 7024 // Prevent this node from being deleted by the next call. 7025 if (NegX) 7026 Handles.emplace_back(NegX); 7027 7028 // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y)) 7029 NegatibleCost CostY = NegatibleCost::Expensive; 7030 SDValue NegY = 7031 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 7032 7033 // We're done with the handles. 7034 Handles.clear(); 7035 7036 // Negate the X if its cost is less or equal than Y. 7037 if (NegX && (CostX <= CostY)) { 7038 Cost = CostX; 7039 SDValue N = DAG.getNode(Opcode, DL, VT, NegX, Y, Flags); 7040 if (NegY != N) 7041 RemoveDeadNode(NegY); 7042 return N; 7043 } 7044 7045 // Ignore X * 2.0 because that is expected to be canonicalized to X + X. 7046 if (auto *C = isConstOrConstSplatFP(Op.getOperand(1))) 7047 if (C->isExactlyValue(2.0) && Op.getOpcode() == ISD::FMUL) 7048 break; 7049 7050 // Negate the Y if it is not expensive. 7051 if (NegY) { 7052 Cost = CostY; 7053 SDValue N = DAG.getNode(Opcode, DL, VT, X, NegY, Flags); 7054 if (NegX != N) 7055 RemoveDeadNode(NegX); 7056 return N; 7057 } 7058 break; 7059 } 7060 case ISD::FMA: 7061 case ISD::FMAD: { 7062 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 7063 break; 7064 7065 SDValue X = Op.getOperand(0), Y = Op.getOperand(1), Z = Op.getOperand(2); 7066 NegatibleCost CostZ = NegatibleCost::Expensive; 7067 SDValue NegZ = 7068 getNegatedExpression(Z, DAG, LegalOps, OptForSize, CostZ, Depth); 7069 // Give up if fail to negate the Z. 7070 if (!NegZ) 7071 break; 7072 7073 // Prevent this node from being deleted by the next two calls. 7074 Handles.emplace_back(NegZ); 7075 7076 // fold (fneg (fma X, Y, Z)) -> (fma (fneg X), Y, (fneg Z)) 7077 NegatibleCost CostX = NegatibleCost::Expensive; 7078 SDValue NegX = 7079 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 7080 // Prevent this node from being deleted by the next call. 7081 if (NegX) 7082 Handles.emplace_back(NegX); 7083 7084 // fold (fneg (fma X, Y, Z)) -> (fma X, (fneg Y), (fneg Z)) 7085 NegatibleCost CostY = NegatibleCost::Expensive; 7086 SDValue NegY = 7087 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 7088 7089 // We're done with the handles. 7090 Handles.clear(); 7091 7092 // Negate the X if its cost is less or equal than Y. 7093 if (NegX && (CostX <= CostY)) { 7094 Cost = std::min(CostX, CostZ); 7095 SDValue N = DAG.getNode(Opcode, DL, VT, NegX, Y, NegZ, Flags); 7096 if (NegY != N) 7097 RemoveDeadNode(NegY); 7098 return N; 7099 } 7100 7101 // Negate the Y if it is not expensive. 7102 if (NegY) { 7103 Cost = std::min(CostY, CostZ); 7104 SDValue N = DAG.getNode(Opcode, DL, VT, X, NegY, NegZ, Flags); 7105 if (NegX != N) 7106 RemoveDeadNode(NegX); 7107 return N; 7108 } 7109 break; 7110 } 7111 7112 case ISD::FP_EXTEND: 7113 case ISD::FSIN: 7114 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps, 7115 OptForSize, Cost, Depth)) 7116 return DAG.getNode(Opcode, DL, VT, NegV); 7117 break; 7118 case ISD::FP_ROUND: 7119 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps, 7120 OptForSize, Cost, Depth)) 7121 return DAG.getNode(ISD::FP_ROUND, DL, VT, NegV, Op.getOperand(1)); 7122 break; 7123 case ISD::SELECT: 7124 case ISD::VSELECT: { 7125 // fold (fneg (select C, LHS, RHS)) -> (select C, (fneg LHS), (fneg RHS)) 7126 // iff at least one cost is cheaper and the other is neutral/cheaper 7127 SDValue LHS = Op.getOperand(1); 7128 NegatibleCost CostLHS = NegatibleCost::Expensive; 7129 SDValue NegLHS = 7130 getNegatedExpression(LHS, DAG, LegalOps, OptForSize, CostLHS, Depth); 7131 if (!NegLHS || CostLHS > NegatibleCost::Neutral) { 7132 RemoveDeadNode(NegLHS); 7133 break; 7134 } 7135 7136 // Prevent this node from being deleted by the next call. 7137 Handles.emplace_back(NegLHS); 7138 7139 SDValue RHS = Op.getOperand(2); 7140 NegatibleCost CostRHS = NegatibleCost::Expensive; 7141 SDValue NegRHS = 7142 getNegatedExpression(RHS, DAG, LegalOps, OptForSize, CostRHS, Depth); 7143 7144 // We're done with the handles. 7145 Handles.clear(); 7146 7147 if (!NegRHS || CostRHS > NegatibleCost::Neutral || 7148 (CostLHS != NegatibleCost::Cheaper && 7149 CostRHS != NegatibleCost::Cheaper)) { 7150 RemoveDeadNode(NegLHS); 7151 RemoveDeadNode(NegRHS); 7152 break; 7153 } 7154 7155 Cost = std::min(CostLHS, CostRHS); 7156 return DAG.getSelect(DL, VT, Op.getOperand(0), NegLHS, NegRHS); 7157 } 7158 } 7159 7160 return SDValue(); 7161 } 7162 7163 //===----------------------------------------------------------------------===// 7164 // Legalization Utilities 7165 //===----------------------------------------------------------------------===// 7166 7167 bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, 7168 SDValue LHS, SDValue RHS, 7169 SmallVectorImpl<SDValue> &Result, 7170 EVT HiLoVT, SelectionDAG &DAG, 7171 MulExpansionKind Kind, SDValue LL, 7172 SDValue LH, SDValue RL, SDValue RH) const { 7173 assert(Opcode == ISD::MUL || Opcode == ISD::UMUL_LOHI || 7174 Opcode == ISD::SMUL_LOHI); 7175 7176 bool HasMULHS = (Kind == MulExpansionKind::Always) || 7177 isOperationLegalOrCustom(ISD::MULHS, HiLoVT); 7178 bool HasMULHU = (Kind == MulExpansionKind::Always) || 7179 isOperationLegalOrCustom(ISD::MULHU, HiLoVT); 7180 bool HasSMUL_LOHI = (Kind == MulExpansionKind::Always) || 7181 isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT); 7182 bool HasUMUL_LOHI = (Kind == MulExpansionKind::Always) || 7183 isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT); 7184 7185 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI) 7186 return false; 7187 7188 unsigned OuterBitSize = VT.getScalarSizeInBits(); 7189 unsigned InnerBitSize = HiLoVT.getScalarSizeInBits(); 7190 7191 // LL, LH, RL, and RH must be either all NULL or all set to a value. 7192 assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) || 7193 (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode())); 7194 7195 SDVTList VTs = DAG.getVTList(HiLoVT, HiLoVT); 7196 auto MakeMUL_LOHI = [&](SDValue L, SDValue R, SDValue &Lo, SDValue &Hi, 7197 bool Signed) -> bool { 7198 if ((Signed && HasSMUL_LOHI) || (!Signed && HasUMUL_LOHI)) { 7199 Lo = DAG.getNode(Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI, dl, VTs, L, R); 7200 Hi = SDValue(Lo.getNode(), 1); 7201 return true; 7202 } 7203 if ((Signed && HasMULHS) || (!Signed && HasMULHU)) { 7204 Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, L, R); 7205 Hi = DAG.getNode(Signed ? ISD::MULHS : ISD::MULHU, dl, HiLoVT, L, R); 7206 return true; 7207 } 7208 return false; 7209 }; 7210 7211 SDValue Lo, Hi; 7212 7213 if (!LL.getNode() && !RL.getNode() && 7214 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 7215 LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LHS); 7216 RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RHS); 7217 } 7218 7219 if (!LL.getNode()) 7220 return false; 7221 7222 APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize); 7223 if (DAG.MaskedValueIsZero(LHS, HighMask) && 7224 DAG.MaskedValueIsZero(RHS, HighMask)) { 7225 // The inputs are both zero-extended. 7226 if (MakeMUL_LOHI(LL, RL, Lo, Hi, false)) { 7227 Result.push_back(Lo); 7228 Result.push_back(Hi); 7229 if (Opcode != ISD::MUL) { 7230 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 7231 Result.push_back(Zero); 7232 Result.push_back(Zero); 7233 } 7234 return true; 7235 } 7236 } 7237 7238 if (!VT.isVector() && Opcode == ISD::MUL && 7239 DAG.ComputeMaxSignificantBits(LHS) <= InnerBitSize && 7240 DAG.ComputeMaxSignificantBits(RHS) <= InnerBitSize) { 7241 // The input values are both sign-extended. 7242 // TODO non-MUL case? 7243 if (MakeMUL_LOHI(LL, RL, Lo, Hi, true)) { 7244 Result.push_back(Lo); 7245 Result.push_back(Hi); 7246 return true; 7247 } 7248 } 7249 7250 unsigned ShiftAmount = OuterBitSize - InnerBitSize; 7251 SDValue Shift = DAG.getShiftAmountConstant(ShiftAmount, VT, dl); 7252 7253 if (!LH.getNode() && !RH.getNode() && 7254 isOperationLegalOrCustom(ISD::SRL, VT) && 7255 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 7256 LH = DAG.getNode(ISD::SRL, dl, VT, LHS, Shift); 7257 LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH); 7258 RH = DAG.getNode(ISD::SRL, dl, VT, RHS, Shift); 7259 RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH); 7260 } 7261 7262 if (!LH.getNode()) 7263 return false; 7264 7265 if (!MakeMUL_LOHI(LL, RL, Lo, Hi, false)) 7266 return false; 7267 7268 Result.push_back(Lo); 7269 7270 if (Opcode == ISD::MUL) { 7271 RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH); 7272 LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL); 7273 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH); 7274 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH); 7275 Result.push_back(Hi); 7276 return true; 7277 } 7278 7279 // Compute the full width result. 7280 auto Merge = [&](SDValue Lo, SDValue Hi) -> SDValue { 7281 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo); 7282 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 7283 Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift); 7284 return DAG.getNode(ISD::OR, dl, VT, Lo, Hi); 7285 }; 7286 7287 SDValue Next = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 7288 if (!MakeMUL_LOHI(LL, RH, Lo, Hi, false)) 7289 return false; 7290 7291 // This is effectively the add part of a multiply-add of half-sized operands, 7292 // so it cannot overflow. 7293 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 7294 7295 if (!MakeMUL_LOHI(LH, RL, Lo, Hi, false)) 7296 return false; 7297 7298 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 7299 EVT BoolType = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7300 7301 bool UseGlue = (isOperationLegalOrCustom(ISD::ADDC, VT) && 7302 isOperationLegalOrCustom(ISD::ADDE, VT)); 7303 if (UseGlue) 7304 Next = DAG.getNode(ISD::ADDC, dl, DAG.getVTList(VT, MVT::Glue), Next, 7305 Merge(Lo, Hi)); 7306 else 7307 Next = DAG.getNode(ISD::UADDO_CARRY, dl, DAG.getVTList(VT, BoolType), Next, 7308 Merge(Lo, Hi), DAG.getConstant(0, dl, BoolType)); 7309 7310 SDValue Carry = Next.getValue(1); 7311 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 7312 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 7313 7314 if (!MakeMUL_LOHI(LH, RH, Lo, Hi, Opcode == ISD::SMUL_LOHI)) 7315 return false; 7316 7317 if (UseGlue) 7318 Hi = DAG.getNode(ISD::ADDE, dl, DAG.getVTList(HiLoVT, MVT::Glue), Hi, Zero, 7319 Carry); 7320 else 7321 Hi = DAG.getNode(ISD::UADDO_CARRY, dl, DAG.getVTList(HiLoVT, BoolType), Hi, 7322 Zero, Carry); 7323 7324 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 7325 7326 if (Opcode == ISD::SMUL_LOHI) { 7327 SDValue NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 7328 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, RL)); 7329 Next = DAG.getSelectCC(dl, LH, Zero, NextSub, Next, ISD::SETLT); 7330 7331 NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 7332 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, LL)); 7333 Next = DAG.getSelectCC(dl, RH, Zero, NextSub, Next, ISD::SETLT); 7334 } 7335 7336 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 7337 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 7338 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 7339 return true; 7340 } 7341 7342 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, 7343 SelectionDAG &DAG, MulExpansionKind Kind, 7344 SDValue LL, SDValue LH, SDValue RL, 7345 SDValue RH) const { 7346 SmallVector<SDValue, 2> Result; 7347 bool Ok = expandMUL_LOHI(N->getOpcode(), N->getValueType(0), SDLoc(N), 7348 N->getOperand(0), N->getOperand(1), Result, HiLoVT, 7349 DAG, Kind, LL, LH, RL, RH); 7350 if (Ok) { 7351 assert(Result.size() == 2); 7352 Lo = Result[0]; 7353 Hi = Result[1]; 7354 } 7355 return Ok; 7356 } 7357 7358 // Optimize unsigned division or remainder by constants for types twice as large 7359 // as a legal VT. 7360 // 7361 // If (1 << (BitWidth / 2)) % Constant == 1, then the remainder 7362 // can be computed 7363 // as: 7364 // Sum += __builtin_uadd_overflow(Lo, High, &Sum); 7365 // Remainder = Sum % Constant 7366 // This is based on "Remainder by Summing Digits" from Hacker's Delight. 7367 // 7368 // For division, we can compute the remainder using the algorithm described 7369 // above, subtract it from the dividend to get an exact multiple of Constant. 7370 // Then multiply that extact multiply by the multiplicative inverse modulo 7371 // (1 << (BitWidth / 2)) to get the quotient. 7372 7373 // If Constant is even, we can shift right the dividend and the divisor by the 7374 // number of trailing zeros in Constant before applying the remainder algorithm. 7375 // If we're after the quotient, we can subtract this value from the shifted 7376 // dividend and multiply by the multiplicative inverse of the shifted divisor. 7377 // If we want the remainder, we shift the value left by the number of trailing 7378 // zeros and add the bits that were shifted out of the dividend. 7379 bool TargetLowering::expandDIVREMByConstant(SDNode *N, 7380 SmallVectorImpl<SDValue> &Result, 7381 EVT HiLoVT, SelectionDAG &DAG, 7382 SDValue LL, SDValue LH) const { 7383 unsigned Opcode = N->getOpcode(); 7384 EVT VT = N->getValueType(0); 7385 7386 // TODO: Support signed division/remainder. 7387 if (Opcode == ISD::SREM || Opcode == ISD::SDIV || Opcode == ISD::SDIVREM) 7388 return false; 7389 assert( 7390 (Opcode == ISD::UREM || Opcode == ISD::UDIV || Opcode == ISD::UDIVREM) && 7391 "Unexpected opcode"); 7392 7393 auto *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)); 7394 if (!CN) 7395 return false; 7396 7397 APInt Divisor = CN->getAPIntValue(); 7398 unsigned BitWidth = Divisor.getBitWidth(); 7399 unsigned HBitWidth = BitWidth / 2; 7400 assert(VT.getScalarSizeInBits() == BitWidth && 7401 HiLoVT.getScalarSizeInBits() == HBitWidth && "Unexpected VTs"); 7402 7403 // Divisor needs to less than (1 << HBitWidth). 7404 APInt HalfMaxPlus1 = APInt::getOneBitSet(BitWidth, HBitWidth); 7405 if (Divisor.uge(HalfMaxPlus1)) 7406 return false; 7407 7408 // We depend on the UREM by constant optimization in DAGCombiner that requires 7409 // high multiply. 7410 if (!isOperationLegalOrCustom(ISD::MULHU, HiLoVT) && 7411 !isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT)) 7412 return false; 7413 7414 // Don't expand if optimizing for size. 7415 if (DAG.shouldOptForSize()) 7416 return false; 7417 7418 // Early out for 0 or 1 divisors. 7419 if (Divisor.ule(1)) 7420 return false; 7421 7422 // If the divisor is even, shift it until it becomes odd. 7423 unsigned TrailingZeros = 0; 7424 if (!Divisor[0]) { 7425 TrailingZeros = Divisor.countr_zero(); 7426 Divisor.lshrInPlace(TrailingZeros); 7427 } 7428 7429 SDLoc dl(N); 7430 SDValue Sum; 7431 SDValue PartialRem; 7432 7433 // If (1 << HBitWidth) % divisor == 1, we can add the two halves together and 7434 // then add in the carry. 7435 // TODO: If we can't split it in half, we might be able to split into 3 or 7436 // more pieces using a smaller bit width. 7437 if (HalfMaxPlus1.urem(Divisor).isOne()) { 7438 assert(!LL == !LH && "Expected both input halves or no input halves!"); 7439 if (!LL) 7440 std::tie(LL, LH) = DAG.SplitScalar(N->getOperand(0), dl, HiLoVT, HiLoVT); 7441 7442 // Shift the input by the number of TrailingZeros in the divisor. The 7443 // shifted out bits will be added to the remainder later. 7444 if (TrailingZeros) { 7445 // Save the shifted off bits if we need the remainder. 7446 if (Opcode != ISD::UDIV) { 7447 APInt Mask = APInt::getLowBitsSet(HBitWidth, TrailingZeros); 7448 PartialRem = DAG.getNode(ISD::AND, dl, HiLoVT, LL, 7449 DAG.getConstant(Mask, dl, HiLoVT)); 7450 } 7451 7452 LL = DAG.getNode( 7453 ISD::OR, dl, HiLoVT, 7454 DAG.getNode(ISD::SRL, dl, HiLoVT, LL, 7455 DAG.getShiftAmountConstant(TrailingZeros, HiLoVT, dl)), 7456 DAG.getNode(ISD::SHL, dl, HiLoVT, LH, 7457 DAG.getShiftAmountConstant(HBitWidth - TrailingZeros, 7458 HiLoVT, dl))); 7459 LH = DAG.getNode(ISD::SRL, dl, HiLoVT, LH, 7460 DAG.getShiftAmountConstant(TrailingZeros, HiLoVT, dl)); 7461 } 7462 7463 // Use uaddo_carry if we can, otherwise use a compare to detect overflow. 7464 EVT SetCCType = 7465 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), HiLoVT); 7466 if (isOperationLegalOrCustom(ISD::UADDO_CARRY, HiLoVT)) { 7467 SDVTList VTList = DAG.getVTList(HiLoVT, SetCCType); 7468 Sum = DAG.getNode(ISD::UADDO, dl, VTList, LL, LH); 7469 Sum = DAG.getNode(ISD::UADDO_CARRY, dl, VTList, Sum, 7470 DAG.getConstant(0, dl, HiLoVT), Sum.getValue(1)); 7471 } else { 7472 Sum = DAG.getNode(ISD::ADD, dl, HiLoVT, LL, LH); 7473 SDValue Carry = DAG.getSetCC(dl, SetCCType, Sum, LL, ISD::SETULT); 7474 // If the boolean for the target is 0 or 1, we can add the setcc result 7475 // directly. 7476 if (getBooleanContents(HiLoVT) == 7477 TargetLoweringBase::ZeroOrOneBooleanContent) 7478 Carry = DAG.getZExtOrTrunc(Carry, dl, HiLoVT); 7479 else 7480 Carry = DAG.getSelect(dl, HiLoVT, Carry, DAG.getConstant(1, dl, HiLoVT), 7481 DAG.getConstant(0, dl, HiLoVT)); 7482 Sum = DAG.getNode(ISD::ADD, dl, HiLoVT, Sum, Carry); 7483 } 7484 } 7485 7486 // If we didn't find a sum, we can't do the expansion. 7487 if (!Sum) 7488 return false; 7489 7490 // Perform a HiLoVT urem on the Sum using truncated divisor. 7491 SDValue RemL = 7492 DAG.getNode(ISD::UREM, dl, HiLoVT, Sum, 7493 DAG.getConstant(Divisor.trunc(HBitWidth), dl, HiLoVT)); 7494 SDValue RemH = DAG.getConstant(0, dl, HiLoVT); 7495 7496 if (Opcode != ISD::UREM) { 7497 // Subtract the remainder from the shifted dividend. 7498 SDValue Dividend = DAG.getNode(ISD::BUILD_PAIR, dl, VT, LL, LH); 7499 SDValue Rem = DAG.getNode(ISD::BUILD_PAIR, dl, VT, RemL, RemH); 7500 7501 Dividend = DAG.getNode(ISD::SUB, dl, VT, Dividend, Rem); 7502 7503 // Multiply by the multiplicative inverse of the divisor modulo 7504 // (1 << BitWidth). 7505 APInt Mod = APInt::getSignedMinValue(BitWidth + 1); 7506 APInt MulFactor = Divisor.zext(BitWidth + 1); 7507 MulFactor = MulFactor.multiplicativeInverse(Mod); 7508 MulFactor = MulFactor.trunc(BitWidth); 7509 7510 SDValue Quotient = DAG.getNode(ISD::MUL, dl, VT, Dividend, 7511 DAG.getConstant(MulFactor, dl, VT)); 7512 7513 // Split the quotient into low and high parts. 7514 SDValue QuotL, QuotH; 7515 std::tie(QuotL, QuotH) = DAG.SplitScalar(Quotient, dl, HiLoVT, HiLoVT); 7516 Result.push_back(QuotL); 7517 Result.push_back(QuotH); 7518 } 7519 7520 if (Opcode != ISD::UDIV) { 7521 // If we shifted the input, shift the remainder left and add the bits we 7522 // shifted off the input. 7523 if (TrailingZeros) { 7524 APInt Mask = APInt::getLowBitsSet(HBitWidth, TrailingZeros); 7525 RemL = DAG.getNode(ISD::SHL, dl, HiLoVT, RemL, 7526 DAG.getShiftAmountConstant(TrailingZeros, HiLoVT, dl)); 7527 RemL = DAG.getNode(ISD::ADD, dl, HiLoVT, RemL, PartialRem); 7528 } 7529 Result.push_back(RemL); 7530 Result.push_back(DAG.getConstant(0, dl, HiLoVT)); 7531 } 7532 7533 return true; 7534 } 7535 7536 // Check that (every element of) Z is undef or not an exact multiple of BW. 7537 static bool isNonZeroModBitWidthOrUndef(SDValue Z, unsigned BW) { 7538 return ISD::matchUnaryPredicate( 7539 Z, 7540 [=](ConstantSDNode *C) { return !C || C->getAPIntValue().urem(BW) != 0; }, 7541 true); 7542 } 7543 7544 static SDValue expandVPFunnelShift(SDNode *Node, SelectionDAG &DAG) { 7545 EVT VT = Node->getValueType(0); 7546 SDValue ShX, ShY; 7547 SDValue ShAmt, InvShAmt; 7548 SDValue X = Node->getOperand(0); 7549 SDValue Y = Node->getOperand(1); 7550 SDValue Z = Node->getOperand(2); 7551 SDValue Mask = Node->getOperand(3); 7552 SDValue VL = Node->getOperand(4); 7553 7554 unsigned BW = VT.getScalarSizeInBits(); 7555 bool IsFSHL = Node->getOpcode() == ISD::VP_FSHL; 7556 SDLoc DL(SDValue(Node, 0)); 7557 7558 EVT ShVT = Z.getValueType(); 7559 if (isNonZeroModBitWidthOrUndef(Z, BW)) { 7560 // fshl: X << C | Y >> (BW - C) 7561 // fshr: X << (BW - C) | Y >> C 7562 // where C = Z % BW is not zero 7563 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); 7564 ShAmt = DAG.getNode(ISD::VP_UREM, DL, ShVT, Z, BitWidthC, Mask, VL); 7565 InvShAmt = DAG.getNode(ISD::VP_SUB, DL, ShVT, BitWidthC, ShAmt, Mask, VL); 7566 ShX = DAG.getNode(ISD::VP_SHL, DL, VT, X, IsFSHL ? ShAmt : InvShAmt, Mask, 7567 VL); 7568 ShY = DAG.getNode(ISD::VP_LSHR, DL, VT, Y, IsFSHL ? InvShAmt : ShAmt, Mask, 7569 VL); 7570 } else { 7571 // fshl: X << (Z % BW) | Y >> 1 >> (BW - 1 - (Z % BW)) 7572 // fshr: X << 1 << (BW - 1 - (Z % BW)) | Y >> (Z % BW) 7573 SDValue BitMask = DAG.getConstant(BW - 1, DL, ShVT); 7574 if (isPowerOf2_32(BW)) { 7575 // Z % BW -> Z & (BW - 1) 7576 ShAmt = DAG.getNode(ISD::VP_AND, DL, ShVT, Z, BitMask, Mask, VL); 7577 // (BW - 1) - (Z % BW) -> ~Z & (BW - 1) 7578 SDValue NotZ = DAG.getNode(ISD::VP_XOR, DL, ShVT, Z, 7579 DAG.getAllOnesConstant(DL, ShVT), Mask, VL); 7580 InvShAmt = DAG.getNode(ISD::VP_AND, DL, ShVT, NotZ, BitMask, Mask, VL); 7581 } else { 7582 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); 7583 ShAmt = DAG.getNode(ISD::VP_UREM, DL, ShVT, Z, BitWidthC, Mask, VL); 7584 InvShAmt = DAG.getNode(ISD::VP_SUB, DL, ShVT, BitMask, ShAmt, Mask, VL); 7585 } 7586 7587 SDValue One = DAG.getConstant(1, DL, ShVT); 7588 if (IsFSHL) { 7589 ShX = DAG.getNode(ISD::VP_SHL, DL, VT, X, ShAmt, Mask, VL); 7590 SDValue ShY1 = DAG.getNode(ISD::VP_LSHR, DL, VT, Y, One, Mask, VL); 7591 ShY = DAG.getNode(ISD::VP_LSHR, DL, VT, ShY1, InvShAmt, Mask, VL); 7592 } else { 7593 SDValue ShX1 = DAG.getNode(ISD::VP_SHL, DL, VT, X, One, Mask, VL); 7594 ShX = DAG.getNode(ISD::VP_SHL, DL, VT, ShX1, InvShAmt, Mask, VL); 7595 ShY = DAG.getNode(ISD::VP_LSHR, DL, VT, Y, ShAmt, Mask, VL); 7596 } 7597 } 7598 return DAG.getNode(ISD::VP_OR, DL, VT, ShX, ShY, Mask, VL); 7599 } 7600 7601 SDValue TargetLowering::expandFunnelShift(SDNode *Node, 7602 SelectionDAG &DAG) const { 7603 if (Node->isVPOpcode()) 7604 return expandVPFunnelShift(Node, DAG); 7605 7606 EVT VT = Node->getValueType(0); 7607 7608 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SHL, VT) || 7609 !isOperationLegalOrCustom(ISD::SRL, VT) || 7610 !isOperationLegalOrCustom(ISD::SUB, VT) || 7611 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 7612 return SDValue(); 7613 7614 SDValue X = Node->getOperand(0); 7615 SDValue Y = Node->getOperand(1); 7616 SDValue Z = Node->getOperand(2); 7617 7618 unsigned BW = VT.getScalarSizeInBits(); 7619 bool IsFSHL = Node->getOpcode() == ISD::FSHL; 7620 SDLoc DL(SDValue(Node, 0)); 7621 7622 EVT ShVT = Z.getValueType(); 7623 7624 // If a funnel shift in the other direction is more supported, use it. 7625 unsigned RevOpcode = IsFSHL ? ISD::FSHR : ISD::FSHL; 7626 if (!isOperationLegalOrCustom(Node->getOpcode(), VT) && 7627 isOperationLegalOrCustom(RevOpcode, VT) && isPowerOf2_32(BW)) { 7628 if (isNonZeroModBitWidthOrUndef(Z, BW)) { 7629 // fshl X, Y, Z -> fshr X, Y, -Z 7630 // fshr X, Y, Z -> fshl X, Y, -Z 7631 SDValue Zero = DAG.getConstant(0, DL, ShVT); 7632 Z = DAG.getNode(ISD::SUB, DL, VT, Zero, Z); 7633 } else { 7634 // fshl X, Y, Z -> fshr (srl X, 1), (fshr X, Y, 1), ~Z 7635 // fshr X, Y, Z -> fshl (fshl X, Y, 1), (shl Y, 1), ~Z 7636 SDValue One = DAG.getConstant(1, DL, ShVT); 7637 if (IsFSHL) { 7638 Y = DAG.getNode(RevOpcode, DL, VT, X, Y, One); 7639 X = DAG.getNode(ISD::SRL, DL, VT, X, One); 7640 } else { 7641 X = DAG.getNode(RevOpcode, DL, VT, X, Y, One); 7642 Y = DAG.getNode(ISD::SHL, DL, VT, Y, One); 7643 } 7644 Z = DAG.getNOT(DL, Z, ShVT); 7645 } 7646 return DAG.getNode(RevOpcode, DL, VT, X, Y, Z); 7647 } 7648 7649 SDValue ShX, ShY; 7650 SDValue ShAmt, InvShAmt; 7651 if (isNonZeroModBitWidthOrUndef(Z, BW)) { 7652 // fshl: X << C | Y >> (BW - C) 7653 // fshr: X << (BW - C) | Y >> C 7654 // where C = Z % BW is not zero 7655 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); 7656 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC); 7657 InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, ShAmt); 7658 ShX = DAG.getNode(ISD::SHL, DL, VT, X, IsFSHL ? ShAmt : InvShAmt); 7659 ShY = DAG.getNode(ISD::SRL, DL, VT, Y, IsFSHL ? InvShAmt : ShAmt); 7660 } else { 7661 // fshl: X << (Z % BW) | Y >> 1 >> (BW - 1 - (Z % BW)) 7662 // fshr: X << 1 << (BW - 1 - (Z % BW)) | Y >> (Z % BW) 7663 SDValue Mask = DAG.getConstant(BW - 1, DL, ShVT); 7664 if (isPowerOf2_32(BW)) { 7665 // Z % BW -> Z & (BW - 1) 7666 ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Z, Mask); 7667 // (BW - 1) - (Z % BW) -> ~Z & (BW - 1) 7668 InvShAmt = DAG.getNode(ISD::AND, DL, ShVT, DAG.getNOT(DL, Z, ShVT), Mask); 7669 } else { 7670 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); 7671 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC); 7672 InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, Mask, ShAmt); 7673 } 7674 7675 SDValue One = DAG.getConstant(1, DL, ShVT); 7676 if (IsFSHL) { 7677 ShX = DAG.getNode(ISD::SHL, DL, VT, X, ShAmt); 7678 SDValue ShY1 = DAG.getNode(ISD::SRL, DL, VT, Y, One); 7679 ShY = DAG.getNode(ISD::SRL, DL, VT, ShY1, InvShAmt); 7680 } else { 7681 SDValue ShX1 = DAG.getNode(ISD::SHL, DL, VT, X, One); 7682 ShX = DAG.getNode(ISD::SHL, DL, VT, ShX1, InvShAmt); 7683 ShY = DAG.getNode(ISD::SRL, DL, VT, Y, ShAmt); 7684 } 7685 } 7686 return DAG.getNode(ISD::OR, DL, VT, ShX, ShY); 7687 } 7688 7689 // TODO: Merge with expandFunnelShift. 7690 SDValue TargetLowering::expandROT(SDNode *Node, bool AllowVectorOps, 7691 SelectionDAG &DAG) const { 7692 EVT VT = Node->getValueType(0); 7693 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 7694 bool IsLeft = Node->getOpcode() == ISD::ROTL; 7695 SDValue Op0 = Node->getOperand(0); 7696 SDValue Op1 = Node->getOperand(1); 7697 SDLoc DL(SDValue(Node, 0)); 7698 7699 EVT ShVT = Op1.getValueType(); 7700 SDValue Zero = DAG.getConstant(0, DL, ShVT); 7701 7702 // If a rotate in the other direction is more supported, use it. 7703 unsigned RevRot = IsLeft ? ISD::ROTR : ISD::ROTL; 7704 if (!isOperationLegalOrCustom(Node->getOpcode(), VT) && 7705 isOperationLegalOrCustom(RevRot, VT) && isPowerOf2_32(EltSizeInBits)) { 7706 SDValue Sub = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1); 7707 return DAG.getNode(RevRot, DL, VT, Op0, Sub); 7708 } 7709 7710 if (!AllowVectorOps && VT.isVector() && 7711 (!isOperationLegalOrCustom(ISD::SHL, VT) || 7712 !isOperationLegalOrCustom(ISD::SRL, VT) || 7713 !isOperationLegalOrCustom(ISD::SUB, VT) || 7714 !isOperationLegalOrCustomOrPromote(ISD::OR, VT) || 7715 !isOperationLegalOrCustomOrPromote(ISD::AND, VT))) 7716 return SDValue(); 7717 7718 unsigned ShOpc = IsLeft ? ISD::SHL : ISD::SRL; 7719 unsigned HsOpc = IsLeft ? ISD::SRL : ISD::SHL; 7720 SDValue BitWidthMinusOneC = DAG.getConstant(EltSizeInBits - 1, DL, ShVT); 7721 SDValue ShVal; 7722 SDValue HsVal; 7723 if (isPowerOf2_32(EltSizeInBits)) { 7724 // (rotl x, c) -> x << (c & (w - 1)) | x >> (-c & (w - 1)) 7725 // (rotr x, c) -> x >> (c & (w - 1)) | x << (-c & (w - 1)) 7726 SDValue NegOp1 = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1); 7727 SDValue ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Op1, BitWidthMinusOneC); 7728 ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt); 7729 SDValue HsAmt = DAG.getNode(ISD::AND, DL, ShVT, NegOp1, BitWidthMinusOneC); 7730 HsVal = DAG.getNode(HsOpc, DL, VT, Op0, HsAmt); 7731 } else { 7732 // (rotl x, c) -> x << (c % w) | x >> 1 >> (w - 1 - (c % w)) 7733 // (rotr x, c) -> x >> (c % w) | x << 1 << (w - 1 - (c % w)) 7734 SDValue BitWidthC = DAG.getConstant(EltSizeInBits, DL, ShVT); 7735 SDValue ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Op1, BitWidthC); 7736 ShVal = DAG.getNode(ShOpc, DL, VT, Op0, ShAmt); 7737 SDValue HsAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthMinusOneC, ShAmt); 7738 SDValue One = DAG.getConstant(1, DL, ShVT); 7739 HsVal = 7740 DAG.getNode(HsOpc, DL, VT, DAG.getNode(HsOpc, DL, VT, Op0, One), HsAmt); 7741 } 7742 return DAG.getNode(ISD::OR, DL, VT, ShVal, HsVal); 7743 } 7744 7745 void TargetLowering::expandShiftParts(SDNode *Node, SDValue &Lo, SDValue &Hi, 7746 SelectionDAG &DAG) const { 7747 assert(Node->getNumOperands() == 3 && "Not a double-shift!"); 7748 EVT VT = Node->getValueType(0); 7749 unsigned VTBits = VT.getScalarSizeInBits(); 7750 assert(isPowerOf2_32(VTBits) && "Power-of-two integer type expected"); 7751 7752 bool IsSHL = Node->getOpcode() == ISD::SHL_PARTS; 7753 bool IsSRA = Node->getOpcode() == ISD::SRA_PARTS; 7754 SDValue ShOpLo = Node->getOperand(0); 7755 SDValue ShOpHi = Node->getOperand(1); 7756 SDValue ShAmt = Node->getOperand(2); 7757 EVT ShAmtVT = ShAmt.getValueType(); 7758 EVT ShAmtCCVT = 7759 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ShAmtVT); 7760 SDLoc dl(Node); 7761 7762 // ISD::FSHL and ISD::FSHR have defined overflow behavior but ISD::SHL and 7763 // ISD::SRA/L nodes haven't. Insert an AND to be safe, it's usually optimized 7764 // away during isel. 7765 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, ShAmtVT, ShAmt, 7766 DAG.getConstant(VTBits - 1, dl, ShAmtVT)); 7767 SDValue Tmp1 = IsSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, 7768 DAG.getConstant(VTBits - 1, dl, ShAmtVT)) 7769 : DAG.getConstant(0, dl, VT); 7770 7771 SDValue Tmp2, Tmp3; 7772 if (IsSHL) { 7773 Tmp2 = DAG.getNode(ISD::FSHL, dl, VT, ShOpHi, ShOpLo, ShAmt); 7774 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt); 7775 } else { 7776 Tmp2 = DAG.getNode(ISD::FSHR, dl, VT, ShOpHi, ShOpLo, ShAmt); 7777 Tmp3 = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt); 7778 } 7779 7780 // If the shift amount is larger or equal than the width of a part we don't 7781 // use the result from the FSHL/FSHR. Insert a test and select the appropriate 7782 // values for large shift amounts. 7783 SDValue AndNode = DAG.getNode(ISD::AND, dl, ShAmtVT, ShAmt, 7784 DAG.getConstant(VTBits, dl, ShAmtVT)); 7785 SDValue Cond = DAG.getSetCC(dl, ShAmtCCVT, AndNode, 7786 DAG.getConstant(0, dl, ShAmtVT), ISD::SETNE); 7787 7788 if (IsSHL) { 7789 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2); 7790 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3); 7791 } else { 7792 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2); 7793 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3); 7794 } 7795 } 7796 7797 bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result, 7798 SelectionDAG &DAG) const { 7799 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 7800 SDValue Src = Node->getOperand(OpNo); 7801 EVT SrcVT = Src.getValueType(); 7802 EVT DstVT = Node->getValueType(0); 7803 SDLoc dl(SDValue(Node, 0)); 7804 7805 // FIXME: Only f32 to i64 conversions are supported. 7806 if (SrcVT != MVT::f32 || DstVT != MVT::i64) 7807 return false; 7808 7809 if (Node->isStrictFPOpcode()) 7810 // When a NaN is converted to an integer a trap is allowed. We can't 7811 // use this expansion here because it would eliminate that trap. Other 7812 // traps are also allowed and cannot be eliminated. See 7813 // IEEE 754-2008 sec 5.8. 7814 return false; 7815 7816 // Expand f32 -> i64 conversion 7817 // This algorithm comes from compiler-rt's implementation of fixsfdi: 7818 // https://github.com/llvm/llvm-project/blob/main/compiler-rt/lib/builtins/fixsfdi.c 7819 unsigned SrcEltBits = SrcVT.getScalarSizeInBits(); 7820 EVT IntVT = SrcVT.changeTypeToInteger(); 7821 EVT IntShVT = getShiftAmountTy(IntVT, DAG.getDataLayout()); 7822 7823 SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT); 7824 SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT); 7825 SDValue Bias = DAG.getConstant(127, dl, IntVT); 7826 SDValue SignMask = DAG.getConstant(APInt::getSignMask(SrcEltBits), dl, IntVT); 7827 SDValue SignLowBit = DAG.getConstant(SrcEltBits - 1, dl, IntVT); 7828 SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT); 7829 7830 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Src); 7831 7832 SDValue ExponentBits = DAG.getNode( 7833 ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask), 7834 DAG.getZExtOrTrunc(ExponentLoBit, dl, IntShVT)); 7835 SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias); 7836 7837 SDValue Sign = DAG.getNode(ISD::SRA, dl, IntVT, 7838 DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask), 7839 DAG.getZExtOrTrunc(SignLowBit, dl, IntShVT)); 7840 Sign = DAG.getSExtOrTrunc(Sign, dl, DstVT); 7841 7842 SDValue R = DAG.getNode(ISD::OR, dl, IntVT, 7843 DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask), 7844 DAG.getConstant(0x00800000, dl, IntVT)); 7845 7846 R = DAG.getZExtOrTrunc(R, dl, DstVT); 7847 7848 R = DAG.getSelectCC( 7849 dl, Exponent, ExponentLoBit, 7850 DAG.getNode(ISD::SHL, dl, DstVT, R, 7851 DAG.getZExtOrTrunc( 7852 DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit), 7853 dl, IntShVT)), 7854 DAG.getNode(ISD::SRL, dl, DstVT, R, 7855 DAG.getZExtOrTrunc( 7856 DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent), 7857 dl, IntShVT)), 7858 ISD::SETGT); 7859 7860 SDValue Ret = DAG.getNode(ISD::SUB, dl, DstVT, 7861 DAG.getNode(ISD::XOR, dl, DstVT, R, Sign), Sign); 7862 7863 Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT), 7864 DAG.getConstant(0, dl, DstVT), Ret, ISD::SETLT); 7865 return true; 7866 } 7867 7868 bool TargetLowering::expandFP_TO_UINT(SDNode *Node, SDValue &Result, 7869 SDValue &Chain, 7870 SelectionDAG &DAG) const { 7871 SDLoc dl(SDValue(Node, 0)); 7872 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 7873 SDValue Src = Node->getOperand(OpNo); 7874 7875 EVT SrcVT = Src.getValueType(); 7876 EVT DstVT = Node->getValueType(0); 7877 EVT SetCCVT = 7878 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT); 7879 EVT DstSetCCVT = 7880 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT); 7881 7882 // Only expand vector types if we have the appropriate vector bit operations. 7883 unsigned SIntOpcode = Node->isStrictFPOpcode() ? ISD::STRICT_FP_TO_SINT : 7884 ISD::FP_TO_SINT; 7885 if (DstVT.isVector() && (!isOperationLegalOrCustom(SIntOpcode, DstVT) || 7886 !isOperationLegalOrCustomOrPromote(ISD::XOR, SrcVT))) 7887 return false; 7888 7889 // If the maximum float value is smaller then the signed integer range, 7890 // the destination signmask can't be represented by the float, so we can 7891 // just use FP_TO_SINT directly. 7892 const fltSemantics &APFSem = DAG.EVTToAPFloatSemantics(SrcVT); 7893 APFloat APF(APFSem, APInt::getZero(SrcVT.getScalarSizeInBits())); 7894 APInt SignMask = APInt::getSignMask(DstVT.getScalarSizeInBits()); 7895 if (APFloat::opOverflow & 7896 APF.convertFromAPInt(SignMask, false, APFloat::rmNearestTiesToEven)) { 7897 if (Node->isStrictFPOpcode()) { 7898 Result = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 7899 { Node->getOperand(0), Src }); 7900 Chain = Result.getValue(1); 7901 } else 7902 Result = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 7903 return true; 7904 } 7905 7906 // Don't expand it if there isn't cheap fsub instruction. 7907 if (!isOperationLegalOrCustom( 7908 Node->isStrictFPOpcode() ? ISD::STRICT_FSUB : ISD::FSUB, SrcVT)) 7909 return false; 7910 7911 SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT); 7912 SDValue Sel; 7913 7914 if (Node->isStrictFPOpcode()) { 7915 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT, 7916 Node->getOperand(0), /*IsSignaling*/ true); 7917 Chain = Sel.getValue(1); 7918 } else { 7919 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT); 7920 } 7921 7922 bool Strict = Node->isStrictFPOpcode() || 7923 shouldUseStrictFP_TO_INT(SrcVT, DstVT, /*IsSigned*/ false); 7924 7925 if (Strict) { 7926 // Expand based on maximum range of FP_TO_SINT, if the value exceeds the 7927 // signmask then offset (the result of which should be fully representable). 7928 // Sel = Src < 0x8000000000000000 7929 // FltOfs = select Sel, 0, 0x8000000000000000 7930 // IntOfs = select Sel, 0, 0x8000000000000000 7931 // Result = fp_to_sint(Src - FltOfs) ^ IntOfs 7932 7933 // TODO: Should any fast-math-flags be set for the FSUB? 7934 SDValue FltOfs = DAG.getSelect(dl, SrcVT, Sel, 7935 DAG.getConstantFP(0.0, dl, SrcVT), Cst); 7936 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 7937 SDValue IntOfs = DAG.getSelect(dl, DstVT, Sel, 7938 DAG.getConstant(0, dl, DstVT), 7939 DAG.getConstant(SignMask, dl, DstVT)); 7940 SDValue SInt; 7941 if (Node->isStrictFPOpcode()) { 7942 SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl, { SrcVT, MVT::Other }, 7943 { Chain, Src, FltOfs }); 7944 SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 7945 { Val.getValue(1), Val }); 7946 Chain = SInt.getValue(1); 7947 } else { 7948 SDValue Val = DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FltOfs); 7949 SInt = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Val); 7950 } 7951 Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs); 7952 } else { 7953 // Expand based on maximum range of FP_TO_SINT: 7954 // True = fp_to_sint(Src) 7955 // False = 0x8000000000000000 + fp_to_sint(Src - 0x8000000000000000) 7956 // Result = select (Src < 0x8000000000000000), True, False 7957 7958 SDValue True = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 7959 // TODO: Should any fast-math-flags be set for the FSUB? 7960 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, 7961 DAG.getNode(ISD::FSUB, dl, SrcVT, Src, Cst)); 7962 False = DAG.getNode(ISD::XOR, dl, DstVT, False, 7963 DAG.getConstant(SignMask, dl, DstVT)); 7964 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 7965 Result = DAG.getSelect(dl, DstVT, Sel, True, False); 7966 } 7967 return true; 7968 } 7969 7970 bool TargetLowering::expandUINT_TO_FP(SDNode *Node, SDValue &Result, 7971 SDValue &Chain, 7972 SelectionDAG &DAG) const { 7973 // This transform is not correct for converting 0 when rounding mode is set 7974 // to round toward negative infinity which will produce -0.0. So disable under 7975 // strictfp. 7976 if (Node->isStrictFPOpcode()) 7977 return false; 7978 7979 SDValue Src = Node->getOperand(0); 7980 EVT SrcVT = Src.getValueType(); 7981 EVT DstVT = Node->getValueType(0); 7982 7983 if (SrcVT.getScalarType() != MVT::i64 || DstVT.getScalarType() != MVT::f64) 7984 return false; 7985 7986 // Only expand vector types if we have the appropriate vector bit operations. 7987 if (SrcVT.isVector() && (!isOperationLegalOrCustom(ISD::SRL, SrcVT) || 7988 !isOperationLegalOrCustom(ISD::FADD, DstVT) || 7989 !isOperationLegalOrCustom(ISD::FSUB, DstVT) || 7990 !isOperationLegalOrCustomOrPromote(ISD::OR, SrcVT) || 7991 !isOperationLegalOrCustomOrPromote(ISD::AND, SrcVT))) 7992 return false; 7993 7994 SDLoc dl(SDValue(Node, 0)); 7995 EVT ShiftVT = getShiftAmountTy(SrcVT, DAG.getDataLayout()); 7996 7997 // Implementation of unsigned i64 to f64 following the algorithm in 7998 // __floatundidf in compiler_rt. This implementation performs rounding 7999 // correctly in all rounding modes with the exception of converting 0 8000 // when rounding toward negative infinity. In that case the fsub will produce 8001 // -0.0. This will be added to +0.0 and produce -0.0 which is incorrect. 8002 SDValue TwoP52 = DAG.getConstant(UINT64_C(0x4330000000000000), dl, SrcVT); 8003 SDValue TwoP84PlusTwoP52 = DAG.getConstantFP( 8004 llvm::bit_cast<double>(UINT64_C(0x4530000000100000)), dl, DstVT); 8005 SDValue TwoP84 = DAG.getConstant(UINT64_C(0x4530000000000000), dl, SrcVT); 8006 SDValue LoMask = DAG.getConstant(UINT64_C(0x00000000FFFFFFFF), dl, SrcVT); 8007 SDValue HiShift = DAG.getConstant(32, dl, ShiftVT); 8008 8009 SDValue Lo = DAG.getNode(ISD::AND, dl, SrcVT, Src, LoMask); 8010 SDValue Hi = DAG.getNode(ISD::SRL, dl, SrcVT, Src, HiShift); 8011 SDValue LoOr = DAG.getNode(ISD::OR, dl, SrcVT, Lo, TwoP52); 8012 SDValue HiOr = DAG.getNode(ISD::OR, dl, SrcVT, Hi, TwoP84); 8013 SDValue LoFlt = DAG.getBitcast(DstVT, LoOr); 8014 SDValue HiFlt = DAG.getBitcast(DstVT, HiOr); 8015 SDValue HiSub = 8016 DAG.getNode(ISD::FSUB, dl, DstVT, HiFlt, TwoP84PlusTwoP52); 8017 Result = DAG.getNode(ISD::FADD, dl, DstVT, LoFlt, HiSub); 8018 return true; 8019 } 8020 8021 SDValue 8022 TargetLowering::createSelectForFMINNUM_FMAXNUM(SDNode *Node, 8023 SelectionDAG &DAG) const { 8024 unsigned Opcode = Node->getOpcode(); 8025 assert((Opcode == ISD::FMINNUM || Opcode == ISD::FMAXNUM || 8026 Opcode == ISD::STRICT_FMINNUM || Opcode == ISD::STRICT_FMAXNUM) && 8027 "Wrong opcode"); 8028 8029 if (Node->getFlags().hasNoNaNs()) { 8030 ISD::CondCode Pred = Opcode == ISD::FMINNUM ? ISD::SETLT : ISD::SETGT; 8031 SDValue Op1 = Node->getOperand(0); 8032 SDValue Op2 = Node->getOperand(1); 8033 SDValue SelCC = DAG.getSelectCC(SDLoc(Node), Op1, Op2, Op1, Op2, Pred); 8034 // Copy FMF flags, but always set the no-signed-zeros flag 8035 // as this is implied by the FMINNUM/FMAXNUM semantics. 8036 SDNodeFlags Flags = Node->getFlags(); 8037 Flags.setNoSignedZeros(true); 8038 SelCC->setFlags(Flags); 8039 return SelCC; 8040 } 8041 8042 return SDValue(); 8043 } 8044 8045 SDValue TargetLowering::expandFMINNUM_FMAXNUM(SDNode *Node, 8046 SelectionDAG &DAG) const { 8047 SDLoc dl(Node); 8048 unsigned NewOp = Node->getOpcode() == ISD::FMINNUM ? 8049 ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE; 8050 EVT VT = Node->getValueType(0); 8051 8052 if (VT.isScalableVector()) 8053 report_fatal_error( 8054 "Expanding fminnum/fmaxnum for scalable vectors is undefined."); 8055 8056 if (isOperationLegalOrCustom(NewOp, VT)) { 8057 SDValue Quiet0 = Node->getOperand(0); 8058 SDValue Quiet1 = Node->getOperand(1); 8059 8060 if (!Node->getFlags().hasNoNaNs()) { 8061 // Insert canonicalizes if it's possible we need to quiet to get correct 8062 // sNaN behavior. 8063 if (!DAG.isKnownNeverSNaN(Quiet0)) { 8064 Quiet0 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet0, 8065 Node->getFlags()); 8066 } 8067 if (!DAG.isKnownNeverSNaN(Quiet1)) { 8068 Quiet1 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet1, 8069 Node->getFlags()); 8070 } 8071 } 8072 8073 return DAG.getNode(NewOp, dl, VT, Quiet0, Quiet1, Node->getFlags()); 8074 } 8075 8076 // If the target has FMINIMUM/FMAXIMUM but not FMINNUM/FMAXNUM use that 8077 // instead if there are no NaNs and there can't be an incompatible zero 8078 // compare: at least one operand isn't +/-0, or there are no signed-zeros. 8079 if ((Node->getFlags().hasNoNaNs() || 8080 (DAG.isKnownNeverNaN(Node->getOperand(0)) && 8081 DAG.isKnownNeverNaN(Node->getOperand(1)))) && 8082 (Node->getFlags().hasNoSignedZeros() || 8083 DAG.isKnownNeverZeroFloat(Node->getOperand(0)) || 8084 DAG.isKnownNeverZeroFloat(Node->getOperand(1)))) { 8085 unsigned IEEE2018Op = 8086 Node->getOpcode() == ISD::FMINNUM ? ISD::FMINIMUM : ISD::FMAXIMUM; 8087 if (isOperationLegalOrCustom(IEEE2018Op, VT)) 8088 return DAG.getNode(IEEE2018Op, dl, VT, Node->getOperand(0), 8089 Node->getOperand(1), Node->getFlags()); 8090 } 8091 8092 if (SDValue SelCC = createSelectForFMINNUM_FMAXNUM(Node, DAG)) 8093 return SelCC; 8094 8095 return SDValue(); 8096 } 8097 8098 /// Returns a true value if if this FPClassTest can be performed with an ordered 8099 /// fcmp to 0, and a false value if it's an unordered fcmp to 0. Returns 8100 /// std::nullopt if it cannot be performed as a compare with 0. 8101 static std::optional<bool> isFCmpEqualZero(FPClassTest Test, 8102 const fltSemantics &Semantics, 8103 const MachineFunction &MF) { 8104 FPClassTest OrderedMask = Test & ~fcNan; 8105 FPClassTest NanTest = Test & fcNan; 8106 bool IsOrdered = NanTest == fcNone; 8107 bool IsUnordered = NanTest == fcNan; 8108 8109 // Skip cases that are testing for only a qnan or snan. 8110 if (!IsOrdered && !IsUnordered) 8111 return std::nullopt; 8112 8113 if (OrderedMask == fcZero && 8114 MF.getDenormalMode(Semantics).Input == DenormalMode::IEEE) 8115 return IsOrdered; 8116 if (OrderedMask == (fcZero | fcSubnormal) && 8117 MF.getDenormalMode(Semantics).inputsAreZero()) 8118 return IsOrdered; 8119 return std::nullopt; 8120 } 8121 8122 SDValue TargetLowering::expandIS_FPCLASS(EVT ResultVT, SDValue Op, 8123 FPClassTest Test, SDNodeFlags Flags, 8124 const SDLoc &DL, 8125 SelectionDAG &DAG) const { 8126 EVT OperandVT = Op.getValueType(); 8127 assert(OperandVT.isFloatingPoint()); 8128 8129 // Degenerated cases. 8130 if (Test == fcNone) 8131 return DAG.getBoolConstant(false, DL, ResultVT, OperandVT); 8132 if ((Test & fcAllFlags) == fcAllFlags) 8133 return DAG.getBoolConstant(true, DL, ResultVT, OperandVT); 8134 8135 // PPC double double is a pair of doubles, of which the higher part determines 8136 // the value class. 8137 if (OperandVT == MVT::ppcf128) { 8138 Op = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::f64, Op, 8139 DAG.getConstant(1, DL, MVT::i32)); 8140 OperandVT = MVT::f64; 8141 } 8142 8143 // Some checks may be represented as inversion of simpler check, for example 8144 // "inf|normal|subnormal|zero" => !"nan". 8145 bool IsInverted = false; 8146 if (FPClassTest InvertedCheck = invertFPClassTestIfSimpler(Test)) { 8147 IsInverted = true; 8148 Test = InvertedCheck; 8149 } 8150 8151 // Floating-point type properties. 8152 EVT ScalarFloatVT = OperandVT.getScalarType(); 8153 const Type *FloatTy = ScalarFloatVT.getTypeForEVT(*DAG.getContext()); 8154 const llvm::fltSemantics &Semantics = FloatTy->getFltSemantics(); 8155 bool IsF80 = (ScalarFloatVT == MVT::f80); 8156 8157 // Some checks can be implemented using float comparisons, if floating point 8158 // exceptions are ignored. 8159 if (Flags.hasNoFPExcept() && 8160 isOperationLegalOrCustom(ISD::SETCC, OperandVT.getScalarType())) { 8161 ISD::CondCode OrderedCmpOpcode = IsInverted ? ISD::SETUNE : ISD::SETOEQ; 8162 ISD::CondCode UnorderedCmpOpcode = IsInverted ? ISD::SETONE : ISD::SETUEQ; 8163 8164 if (std::optional<bool> IsCmp0 = 8165 isFCmpEqualZero(Test, Semantics, DAG.getMachineFunction()); 8166 IsCmp0 && (isCondCodeLegalOrCustom( 8167 *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode, 8168 OperandVT.getScalarType().getSimpleVT()))) { 8169 8170 // If denormals could be implicitly treated as 0, this is not equivalent 8171 // to a compare with 0 since it will also be true for denormals. 8172 return DAG.getSetCC(DL, ResultVT, Op, 8173 DAG.getConstantFP(0.0, DL, OperandVT), 8174 *IsCmp0 ? OrderedCmpOpcode : UnorderedCmpOpcode); 8175 } 8176 8177 if (Test == fcNan && 8178 isCondCodeLegalOrCustom(IsInverted ? ISD::SETO : ISD::SETUO, 8179 OperandVT.getScalarType().getSimpleVT())) { 8180 return DAG.getSetCC(DL, ResultVT, Op, Op, 8181 IsInverted ? ISD::SETO : ISD::SETUO); 8182 } 8183 8184 if (Test == fcInf && 8185 isCondCodeLegalOrCustom(IsInverted ? ISD::SETUNE : ISD::SETOEQ, 8186 OperandVT.getScalarType().getSimpleVT()) && 8187 isOperationLegalOrCustom(ISD::FABS, OperandVT.getScalarType())) { 8188 // isinf(x) --> fabs(x) == inf 8189 SDValue Abs = DAG.getNode(ISD::FABS, DL, OperandVT, Op); 8190 SDValue Inf = 8191 DAG.getConstantFP(APFloat::getInf(Semantics), DL, OperandVT); 8192 return DAG.getSetCC(DL, ResultVT, Abs, Inf, 8193 IsInverted ? ISD::SETUNE : ISD::SETOEQ); 8194 } 8195 } 8196 8197 // In the general case use integer operations. 8198 unsigned BitSize = OperandVT.getScalarSizeInBits(); 8199 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), BitSize); 8200 if (OperandVT.isVector()) 8201 IntVT = EVT::getVectorVT(*DAG.getContext(), IntVT, 8202 OperandVT.getVectorElementCount()); 8203 SDValue OpAsInt = DAG.getBitcast(IntVT, Op); 8204 8205 // Various masks. 8206 APInt SignBit = APInt::getSignMask(BitSize); 8207 APInt ValueMask = APInt::getSignedMaxValue(BitSize); // All bits but sign. 8208 APInt Inf = APFloat::getInf(Semantics).bitcastToAPInt(); // Exp and int bit. 8209 const unsigned ExplicitIntBitInF80 = 63; 8210 APInt ExpMask = Inf; 8211 if (IsF80) 8212 ExpMask.clearBit(ExplicitIntBitInF80); 8213 APInt AllOneMantissa = APFloat::getLargest(Semantics).bitcastToAPInt() & ~Inf; 8214 APInt QNaNBitMask = 8215 APInt::getOneBitSet(BitSize, AllOneMantissa.getActiveBits() - 1); 8216 APInt InvertionMask = APInt::getAllOnes(ResultVT.getScalarSizeInBits()); 8217 8218 SDValue ValueMaskV = DAG.getConstant(ValueMask, DL, IntVT); 8219 SDValue SignBitV = DAG.getConstant(SignBit, DL, IntVT); 8220 SDValue ExpMaskV = DAG.getConstant(ExpMask, DL, IntVT); 8221 SDValue ZeroV = DAG.getConstant(0, DL, IntVT); 8222 SDValue InfV = DAG.getConstant(Inf, DL, IntVT); 8223 SDValue ResultInvertionMask = DAG.getConstant(InvertionMask, DL, ResultVT); 8224 8225 SDValue Res; 8226 const auto appendResult = [&](SDValue PartialRes) { 8227 if (PartialRes) { 8228 if (Res) 8229 Res = DAG.getNode(ISD::OR, DL, ResultVT, Res, PartialRes); 8230 else 8231 Res = PartialRes; 8232 } 8233 }; 8234 8235 SDValue IntBitIsSetV; // Explicit integer bit in f80 mantissa is set. 8236 const auto getIntBitIsSet = [&]() -> SDValue { 8237 if (!IntBitIsSetV) { 8238 APInt IntBitMask(BitSize, 0); 8239 IntBitMask.setBit(ExplicitIntBitInF80); 8240 SDValue IntBitMaskV = DAG.getConstant(IntBitMask, DL, IntVT); 8241 SDValue IntBitV = DAG.getNode(ISD::AND, DL, IntVT, OpAsInt, IntBitMaskV); 8242 IntBitIsSetV = DAG.getSetCC(DL, ResultVT, IntBitV, ZeroV, ISD::SETNE); 8243 } 8244 return IntBitIsSetV; 8245 }; 8246 8247 // Split the value into sign bit and absolute value. 8248 SDValue AbsV = DAG.getNode(ISD::AND, DL, IntVT, OpAsInt, ValueMaskV); 8249 SDValue SignV = DAG.getSetCC(DL, ResultVT, OpAsInt, 8250 DAG.getConstant(0.0, DL, IntVT), ISD::SETLT); 8251 8252 // Tests that involve more than one class should be processed first. 8253 SDValue PartialRes; 8254 8255 if (IsF80) 8256 ; // Detect finite numbers of f80 by checking individual classes because 8257 // they have different settings of the explicit integer bit. 8258 else if ((Test & fcFinite) == fcFinite) { 8259 // finite(V) ==> abs(V) < exp_mask 8260 PartialRes = DAG.getSetCC(DL, ResultVT, AbsV, ExpMaskV, ISD::SETLT); 8261 Test &= ~fcFinite; 8262 } else if ((Test & fcFinite) == fcPosFinite) { 8263 // finite(V) && V > 0 ==> V < exp_mask 8264 PartialRes = DAG.getSetCC(DL, ResultVT, OpAsInt, ExpMaskV, ISD::SETULT); 8265 Test &= ~fcPosFinite; 8266 } else if ((Test & fcFinite) == fcNegFinite) { 8267 // finite(V) && V < 0 ==> abs(V) < exp_mask && signbit == 1 8268 PartialRes = DAG.getSetCC(DL, ResultVT, AbsV, ExpMaskV, ISD::SETLT); 8269 PartialRes = DAG.getNode(ISD::AND, DL, ResultVT, PartialRes, SignV); 8270 Test &= ~fcNegFinite; 8271 } 8272 appendResult(PartialRes); 8273 8274 if (FPClassTest PartialCheck = Test & (fcZero | fcSubnormal)) { 8275 // fcZero | fcSubnormal => test all exponent bits are 0 8276 // TODO: Handle sign bit specific cases 8277 if (PartialCheck == (fcZero | fcSubnormal)) { 8278 SDValue ExpBits = DAG.getNode(ISD::AND, DL, IntVT, OpAsInt, ExpMaskV); 8279 SDValue ExpIsZero = 8280 DAG.getSetCC(DL, ResultVT, ExpBits, ZeroV, ISD::SETEQ); 8281 appendResult(ExpIsZero); 8282 Test &= ~PartialCheck & fcAllFlags; 8283 } 8284 } 8285 8286 // Check for individual classes. 8287 8288 if (unsigned PartialCheck = Test & fcZero) { 8289 if (PartialCheck == fcPosZero) 8290 PartialRes = DAG.getSetCC(DL, ResultVT, OpAsInt, ZeroV, ISD::SETEQ); 8291 else if (PartialCheck == fcZero) 8292 PartialRes = DAG.getSetCC(DL, ResultVT, AbsV, ZeroV, ISD::SETEQ); 8293 else // ISD::fcNegZero 8294 PartialRes = DAG.getSetCC(DL, ResultVT, OpAsInt, SignBitV, ISD::SETEQ); 8295 appendResult(PartialRes); 8296 } 8297 8298 if (unsigned PartialCheck = Test & fcSubnormal) { 8299 // issubnormal(V) ==> unsigned(abs(V) - 1) < (all mantissa bits set) 8300 // issubnormal(V) && V>0 ==> unsigned(V - 1) < (all mantissa bits set) 8301 SDValue V = (PartialCheck == fcPosSubnormal) ? OpAsInt : AbsV; 8302 SDValue MantissaV = DAG.getConstant(AllOneMantissa, DL, IntVT); 8303 SDValue VMinusOneV = 8304 DAG.getNode(ISD::SUB, DL, IntVT, V, DAG.getConstant(1, DL, IntVT)); 8305 PartialRes = DAG.getSetCC(DL, ResultVT, VMinusOneV, MantissaV, ISD::SETULT); 8306 if (PartialCheck == fcNegSubnormal) 8307 PartialRes = DAG.getNode(ISD::AND, DL, ResultVT, PartialRes, SignV); 8308 appendResult(PartialRes); 8309 } 8310 8311 if (unsigned PartialCheck = Test & fcInf) { 8312 if (PartialCheck == fcPosInf) 8313 PartialRes = DAG.getSetCC(DL, ResultVT, OpAsInt, InfV, ISD::SETEQ); 8314 else if (PartialCheck == fcInf) 8315 PartialRes = DAG.getSetCC(DL, ResultVT, AbsV, InfV, ISD::SETEQ); 8316 else { // ISD::fcNegInf 8317 APInt NegInf = APFloat::getInf(Semantics, true).bitcastToAPInt(); 8318 SDValue NegInfV = DAG.getConstant(NegInf, DL, IntVT); 8319 PartialRes = DAG.getSetCC(DL, ResultVT, OpAsInt, NegInfV, ISD::SETEQ); 8320 } 8321 appendResult(PartialRes); 8322 } 8323 8324 if (unsigned PartialCheck = Test & fcNan) { 8325 APInt InfWithQnanBit = Inf | QNaNBitMask; 8326 SDValue InfWithQnanBitV = DAG.getConstant(InfWithQnanBit, DL, IntVT); 8327 if (PartialCheck == fcNan) { 8328 // isnan(V) ==> abs(V) > int(inf) 8329 PartialRes = DAG.getSetCC(DL, ResultVT, AbsV, InfV, ISD::SETGT); 8330 if (IsF80) { 8331 // Recognize unsupported values as NaNs for compatibility with glibc. 8332 // In them (exp(V)==0) == int_bit. 8333 SDValue ExpBits = DAG.getNode(ISD::AND, DL, IntVT, AbsV, ExpMaskV); 8334 SDValue ExpIsZero = 8335 DAG.getSetCC(DL, ResultVT, ExpBits, ZeroV, ISD::SETEQ); 8336 SDValue IsPseudo = 8337 DAG.getSetCC(DL, ResultVT, getIntBitIsSet(), ExpIsZero, ISD::SETEQ); 8338 PartialRes = DAG.getNode(ISD::OR, DL, ResultVT, PartialRes, IsPseudo); 8339 } 8340 } else if (PartialCheck == fcQNan) { 8341 // isquiet(V) ==> abs(V) >= (unsigned(Inf) | quiet_bit) 8342 PartialRes = 8343 DAG.getSetCC(DL, ResultVT, AbsV, InfWithQnanBitV, ISD::SETGE); 8344 } else { // ISD::fcSNan 8345 // issignaling(V) ==> abs(V) > unsigned(Inf) && 8346 // abs(V) < (unsigned(Inf) | quiet_bit) 8347 SDValue IsNan = DAG.getSetCC(DL, ResultVT, AbsV, InfV, ISD::SETGT); 8348 SDValue IsNotQnan = 8349 DAG.getSetCC(DL, ResultVT, AbsV, InfWithQnanBitV, ISD::SETLT); 8350 PartialRes = DAG.getNode(ISD::AND, DL, ResultVT, IsNan, IsNotQnan); 8351 } 8352 appendResult(PartialRes); 8353 } 8354 8355 if (unsigned PartialCheck = Test & fcNormal) { 8356 // isnormal(V) ==> (0 < exp < max_exp) ==> (unsigned(exp-1) < (max_exp-1)) 8357 APInt ExpLSB = ExpMask & ~(ExpMask.shl(1)); 8358 SDValue ExpLSBV = DAG.getConstant(ExpLSB, DL, IntVT); 8359 SDValue ExpMinus1 = DAG.getNode(ISD::SUB, DL, IntVT, AbsV, ExpLSBV); 8360 APInt ExpLimit = ExpMask - ExpLSB; 8361 SDValue ExpLimitV = DAG.getConstant(ExpLimit, DL, IntVT); 8362 PartialRes = DAG.getSetCC(DL, ResultVT, ExpMinus1, ExpLimitV, ISD::SETULT); 8363 if (PartialCheck == fcNegNormal) 8364 PartialRes = DAG.getNode(ISD::AND, DL, ResultVT, PartialRes, SignV); 8365 else if (PartialCheck == fcPosNormal) { 8366 SDValue PosSignV = 8367 DAG.getNode(ISD::XOR, DL, ResultVT, SignV, ResultInvertionMask); 8368 PartialRes = DAG.getNode(ISD::AND, DL, ResultVT, PartialRes, PosSignV); 8369 } 8370 if (IsF80) 8371 PartialRes = 8372 DAG.getNode(ISD::AND, DL, ResultVT, PartialRes, getIntBitIsSet()); 8373 appendResult(PartialRes); 8374 } 8375 8376 if (!Res) 8377 return DAG.getConstant(IsInverted, DL, ResultVT); 8378 if (IsInverted) 8379 Res = DAG.getNode(ISD::XOR, DL, ResultVT, Res, ResultInvertionMask); 8380 return Res; 8381 } 8382 8383 // Only expand vector types if we have the appropriate vector bit operations. 8384 static bool canExpandVectorCTPOP(const TargetLowering &TLI, EVT VT) { 8385 assert(VT.isVector() && "Expected vector type"); 8386 unsigned Len = VT.getScalarSizeInBits(); 8387 return TLI.isOperationLegalOrCustom(ISD::ADD, VT) && 8388 TLI.isOperationLegalOrCustom(ISD::SUB, VT) && 8389 TLI.isOperationLegalOrCustom(ISD::SRL, VT) && 8390 (Len == 8 || TLI.isOperationLegalOrCustom(ISD::MUL, VT)) && 8391 TLI.isOperationLegalOrCustomOrPromote(ISD::AND, VT); 8392 } 8393 8394 SDValue TargetLowering::expandCTPOP(SDNode *Node, SelectionDAG &DAG) const { 8395 SDLoc dl(Node); 8396 EVT VT = Node->getValueType(0); 8397 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 8398 SDValue Op = Node->getOperand(0); 8399 unsigned Len = VT.getScalarSizeInBits(); 8400 assert(VT.isInteger() && "CTPOP not implemented for this type."); 8401 8402 // TODO: Add support for irregular type lengths. 8403 if (!(Len <= 128 && Len % 8 == 0)) 8404 return SDValue(); 8405 8406 // Only expand vector types if we have the appropriate vector bit operations. 8407 if (VT.isVector() && !canExpandVectorCTPOP(*this, VT)) 8408 return SDValue(); 8409 8410 // This is the "best" algorithm from 8411 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 8412 SDValue Mask55 = 8413 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), dl, VT); 8414 SDValue Mask33 = 8415 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), dl, VT); 8416 SDValue Mask0F = 8417 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), dl, VT); 8418 8419 // v = v - ((v >> 1) & 0x55555555...) 8420 Op = DAG.getNode(ISD::SUB, dl, VT, Op, 8421 DAG.getNode(ISD::AND, dl, VT, 8422 DAG.getNode(ISD::SRL, dl, VT, Op, 8423 DAG.getConstant(1, dl, ShVT)), 8424 Mask55)); 8425 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) 8426 Op = DAG.getNode(ISD::ADD, dl, VT, DAG.getNode(ISD::AND, dl, VT, Op, Mask33), 8427 DAG.getNode(ISD::AND, dl, VT, 8428 DAG.getNode(ISD::SRL, dl, VT, Op, 8429 DAG.getConstant(2, dl, ShVT)), 8430 Mask33)); 8431 // v = (v + (v >> 4)) & 0x0F0F0F0F... 8432 Op = DAG.getNode(ISD::AND, dl, VT, 8433 DAG.getNode(ISD::ADD, dl, VT, Op, 8434 DAG.getNode(ISD::SRL, dl, VT, Op, 8435 DAG.getConstant(4, dl, ShVT))), 8436 Mask0F); 8437 8438 if (Len <= 8) 8439 return Op; 8440 8441 // Avoid the multiply if we only have 2 bytes to add. 8442 // TODO: Only doing this for scalars because vectors weren't as obviously 8443 // improved. 8444 if (Len == 16 && !VT.isVector()) { 8445 // v = (v + (v >> 8)) & 0x00FF; 8446 return DAG.getNode(ISD::AND, dl, VT, 8447 DAG.getNode(ISD::ADD, dl, VT, Op, 8448 DAG.getNode(ISD::SRL, dl, VT, Op, 8449 DAG.getConstant(8, dl, ShVT))), 8450 DAG.getConstant(0xFF, dl, VT)); 8451 } 8452 8453 // v = (v * 0x01010101...) >> (Len - 8) 8454 SDValue Mask01 = 8455 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), dl, VT); 8456 return DAG.getNode(ISD::SRL, dl, VT, 8457 DAG.getNode(ISD::MUL, dl, VT, Op, Mask01), 8458 DAG.getConstant(Len - 8, dl, ShVT)); 8459 } 8460 8461 SDValue TargetLowering::expandVPCTPOP(SDNode *Node, SelectionDAG &DAG) const { 8462 SDLoc dl(Node); 8463 EVT VT = Node->getValueType(0); 8464 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 8465 SDValue Op = Node->getOperand(0); 8466 SDValue Mask = Node->getOperand(1); 8467 SDValue VL = Node->getOperand(2); 8468 unsigned Len = VT.getScalarSizeInBits(); 8469 assert(VT.isInteger() && "VP_CTPOP not implemented for this type."); 8470 8471 // TODO: Add support for irregular type lengths. 8472 if (!(Len <= 128 && Len % 8 == 0)) 8473 return SDValue(); 8474 8475 // This is same algorithm of expandCTPOP from 8476 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 8477 SDValue Mask55 = 8478 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), dl, VT); 8479 SDValue Mask33 = 8480 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), dl, VT); 8481 SDValue Mask0F = 8482 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), dl, VT); 8483 8484 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5; 8485 8486 // v = v - ((v >> 1) & 0x55555555...) 8487 Tmp1 = DAG.getNode(ISD::VP_AND, dl, VT, 8488 DAG.getNode(ISD::VP_LSHR, dl, VT, Op, 8489 DAG.getConstant(1, dl, ShVT), Mask, VL), 8490 Mask55, Mask, VL); 8491 Op = DAG.getNode(ISD::VP_SUB, dl, VT, Op, Tmp1, Mask, VL); 8492 8493 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) 8494 Tmp2 = DAG.getNode(ISD::VP_AND, dl, VT, Op, Mask33, Mask, VL); 8495 Tmp3 = DAG.getNode(ISD::VP_AND, dl, VT, 8496 DAG.getNode(ISD::VP_LSHR, dl, VT, Op, 8497 DAG.getConstant(2, dl, ShVT), Mask, VL), 8498 Mask33, Mask, VL); 8499 Op = DAG.getNode(ISD::VP_ADD, dl, VT, Tmp2, Tmp3, Mask, VL); 8500 8501 // v = (v + (v >> 4)) & 0x0F0F0F0F... 8502 Tmp4 = DAG.getNode(ISD::VP_LSHR, dl, VT, Op, DAG.getConstant(4, dl, ShVT), 8503 Mask, VL), 8504 Tmp5 = DAG.getNode(ISD::VP_ADD, dl, VT, Op, Tmp4, Mask, VL); 8505 Op = DAG.getNode(ISD::VP_AND, dl, VT, Tmp5, Mask0F, Mask, VL); 8506 8507 if (Len <= 8) 8508 return Op; 8509 8510 // v = (v * 0x01010101...) >> (Len - 8) 8511 SDValue Mask01 = 8512 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), dl, VT); 8513 return DAG.getNode(ISD::VP_LSHR, dl, VT, 8514 DAG.getNode(ISD::VP_MUL, dl, VT, Op, Mask01, Mask, VL), 8515 DAG.getConstant(Len - 8, dl, ShVT), Mask, VL); 8516 } 8517 8518 SDValue TargetLowering::expandCTLZ(SDNode *Node, SelectionDAG &DAG) const { 8519 SDLoc dl(Node); 8520 EVT VT = Node->getValueType(0); 8521 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 8522 SDValue Op = Node->getOperand(0); 8523 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 8524 8525 // If the non-ZERO_UNDEF version is supported we can use that instead. 8526 if (Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF && 8527 isOperationLegalOrCustom(ISD::CTLZ, VT)) 8528 return DAG.getNode(ISD::CTLZ, dl, VT, Op); 8529 8530 // If the ZERO_UNDEF version is supported use that and handle the zero case. 8531 if (isOperationLegalOrCustom(ISD::CTLZ_ZERO_UNDEF, VT)) { 8532 EVT SetCCVT = 8533 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8534 SDValue CTLZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, dl, VT, Op); 8535 SDValue Zero = DAG.getConstant(0, dl, VT); 8536 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 8537 return DAG.getSelect(dl, VT, SrcIsZero, 8538 DAG.getConstant(NumBitsPerElt, dl, VT), CTLZ); 8539 } 8540 8541 // Only expand vector types if we have the appropriate vector bit operations. 8542 // This includes the operations needed to expand CTPOP if it isn't supported. 8543 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 8544 (!isOperationLegalOrCustom(ISD::CTPOP, VT) && 8545 !canExpandVectorCTPOP(*this, VT)) || 8546 !isOperationLegalOrCustom(ISD::SRL, VT) || 8547 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 8548 return SDValue(); 8549 8550 // for now, we do this: 8551 // x = x | (x >> 1); 8552 // x = x | (x >> 2); 8553 // ... 8554 // x = x | (x >>16); 8555 // x = x | (x >>32); // for 64-bit input 8556 // return popcount(~x); 8557 // 8558 // Ref: "Hacker's Delight" by Henry Warren 8559 for (unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) { 8560 SDValue Tmp = DAG.getConstant(1ULL << i, dl, ShVT); 8561 Op = DAG.getNode(ISD::OR, dl, VT, Op, 8562 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp)); 8563 } 8564 Op = DAG.getNOT(dl, Op, VT); 8565 return DAG.getNode(ISD::CTPOP, dl, VT, Op); 8566 } 8567 8568 SDValue TargetLowering::expandVPCTLZ(SDNode *Node, SelectionDAG &DAG) const { 8569 SDLoc dl(Node); 8570 EVT VT = Node->getValueType(0); 8571 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 8572 SDValue Op = Node->getOperand(0); 8573 SDValue Mask = Node->getOperand(1); 8574 SDValue VL = Node->getOperand(2); 8575 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 8576 8577 // do this: 8578 // x = x | (x >> 1); 8579 // x = x | (x >> 2); 8580 // ... 8581 // x = x | (x >>16); 8582 // x = x | (x >>32); // for 64-bit input 8583 // return popcount(~x); 8584 for (unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) { 8585 SDValue Tmp = DAG.getConstant(1ULL << i, dl, ShVT); 8586 Op = DAG.getNode(ISD::VP_OR, dl, VT, Op, 8587 DAG.getNode(ISD::VP_LSHR, dl, VT, Op, Tmp, Mask, VL), Mask, 8588 VL); 8589 } 8590 Op = DAG.getNode(ISD::VP_XOR, dl, VT, Op, DAG.getConstant(-1, dl, VT), Mask, 8591 VL); 8592 return DAG.getNode(ISD::VP_CTPOP, dl, VT, Op, Mask, VL); 8593 } 8594 8595 SDValue TargetLowering::CTTZTableLookup(SDNode *Node, SelectionDAG &DAG, 8596 const SDLoc &DL, EVT VT, SDValue Op, 8597 unsigned BitWidth) const { 8598 if (BitWidth != 32 && BitWidth != 64) 8599 return SDValue(); 8600 APInt DeBruijn = BitWidth == 32 ? APInt(32, 0x077CB531U) 8601 : APInt(64, 0x0218A392CD3D5DBFULL); 8602 const DataLayout &TD = DAG.getDataLayout(); 8603 MachinePointerInfo PtrInfo = 8604 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()); 8605 unsigned ShiftAmt = BitWidth - Log2_32(BitWidth); 8606 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 8607 SDValue Lookup = DAG.getNode( 8608 ISD::SRL, DL, VT, 8609 DAG.getNode(ISD::MUL, DL, VT, DAG.getNode(ISD::AND, DL, VT, Op, Neg), 8610 DAG.getConstant(DeBruijn, DL, VT)), 8611 DAG.getConstant(ShiftAmt, DL, VT)); 8612 Lookup = DAG.getSExtOrTrunc(Lookup, DL, getPointerTy(TD)); 8613 8614 SmallVector<uint8_t> Table(BitWidth, 0); 8615 for (unsigned i = 0; i < BitWidth; i++) { 8616 APInt Shl = DeBruijn.shl(i); 8617 APInt Lshr = Shl.lshr(ShiftAmt); 8618 Table[Lshr.getZExtValue()] = i; 8619 } 8620 8621 // Create a ConstantArray in Constant Pool 8622 auto *CA = ConstantDataArray::get(*DAG.getContext(), Table); 8623 SDValue CPIdx = DAG.getConstantPool(CA, getPointerTy(TD), 8624 TD.getPrefTypeAlign(CA->getType())); 8625 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, DL, VT, DAG.getEntryNode(), 8626 DAG.getMemBasePlusOffset(CPIdx, Lookup, DL), 8627 PtrInfo, MVT::i8); 8628 if (Node->getOpcode() == ISD::CTTZ_ZERO_UNDEF) 8629 return ExtLoad; 8630 8631 EVT SetCCVT = 8632 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8633 SDValue Zero = DAG.getConstant(0, DL, VT); 8634 SDValue SrcIsZero = DAG.getSetCC(DL, SetCCVT, Op, Zero, ISD::SETEQ); 8635 return DAG.getSelect(DL, VT, SrcIsZero, 8636 DAG.getConstant(BitWidth, DL, VT), ExtLoad); 8637 } 8638 8639 SDValue TargetLowering::expandCTTZ(SDNode *Node, SelectionDAG &DAG) const { 8640 SDLoc dl(Node); 8641 EVT VT = Node->getValueType(0); 8642 SDValue Op = Node->getOperand(0); 8643 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 8644 8645 // If the non-ZERO_UNDEF version is supported we can use that instead. 8646 if (Node->getOpcode() == ISD::CTTZ_ZERO_UNDEF && 8647 isOperationLegalOrCustom(ISD::CTTZ, VT)) 8648 return DAG.getNode(ISD::CTTZ, dl, VT, Op); 8649 8650 // If the ZERO_UNDEF version is supported use that and handle the zero case. 8651 if (isOperationLegalOrCustom(ISD::CTTZ_ZERO_UNDEF, VT)) { 8652 EVT SetCCVT = 8653 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8654 SDValue CTTZ = DAG.getNode(ISD::CTTZ_ZERO_UNDEF, dl, VT, Op); 8655 SDValue Zero = DAG.getConstant(0, dl, VT); 8656 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 8657 return DAG.getSelect(dl, VT, SrcIsZero, 8658 DAG.getConstant(NumBitsPerElt, dl, VT), CTTZ); 8659 } 8660 8661 // Only expand vector types if we have the appropriate vector bit operations. 8662 // This includes the operations needed to expand CTPOP if it isn't supported. 8663 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 8664 (!isOperationLegalOrCustom(ISD::CTPOP, VT) && 8665 !isOperationLegalOrCustom(ISD::CTLZ, VT) && 8666 !canExpandVectorCTPOP(*this, VT)) || 8667 !isOperationLegalOrCustom(ISD::SUB, VT) || 8668 !isOperationLegalOrCustomOrPromote(ISD::AND, VT) || 8669 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 8670 return SDValue(); 8671 8672 // Emit Table Lookup if ISD::CTLZ and ISD::CTPOP are not legal. 8673 if (!VT.isVector() && isOperationExpand(ISD::CTPOP, VT) && 8674 !isOperationLegal(ISD::CTLZ, VT)) 8675 if (SDValue V = CTTZTableLookup(Node, DAG, dl, VT, Op, NumBitsPerElt)) 8676 return V; 8677 8678 // for now, we use: { return popcount(~x & (x - 1)); } 8679 // unless the target has ctlz but not ctpop, in which case we use: 8680 // { return 32 - nlz(~x & (x-1)); } 8681 // Ref: "Hacker's Delight" by Henry Warren 8682 SDValue Tmp = DAG.getNode( 8683 ISD::AND, dl, VT, DAG.getNOT(dl, Op, VT), 8684 DAG.getNode(ISD::SUB, dl, VT, Op, DAG.getConstant(1, dl, VT))); 8685 8686 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. 8687 if (isOperationLegal(ISD::CTLZ, VT) && !isOperationLegal(ISD::CTPOP, VT)) { 8688 return DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(NumBitsPerElt, dl, VT), 8689 DAG.getNode(ISD::CTLZ, dl, VT, Tmp)); 8690 } 8691 8692 return DAG.getNode(ISD::CTPOP, dl, VT, Tmp); 8693 } 8694 8695 SDValue TargetLowering::expandVPCTTZ(SDNode *Node, SelectionDAG &DAG) const { 8696 SDValue Op = Node->getOperand(0); 8697 SDValue Mask = Node->getOperand(1); 8698 SDValue VL = Node->getOperand(2); 8699 SDLoc dl(Node); 8700 EVT VT = Node->getValueType(0); 8701 8702 // Same as the vector part of expandCTTZ, use: popcount(~x & (x - 1)) 8703 SDValue Not = DAG.getNode(ISD::VP_XOR, dl, VT, Op, 8704 DAG.getConstant(-1, dl, VT), Mask, VL); 8705 SDValue MinusOne = DAG.getNode(ISD::VP_SUB, dl, VT, Op, 8706 DAG.getConstant(1, dl, VT), Mask, VL); 8707 SDValue Tmp = DAG.getNode(ISD::VP_AND, dl, VT, Not, MinusOne, Mask, VL); 8708 return DAG.getNode(ISD::VP_CTPOP, dl, VT, Tmp, Mask, VL); 8709 } 8710 8711 SDValue TargetLowering::expandABS(SDNode *N, SelectionDAG &DAG, 8712 bool IsNegative) const { 8713 SDLoc dl(N); 8714 EVT VT = N->getValueType(0); 8715 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 8716 SDValue Op = N->getOperand(0); 8717 8718 // abs(x) -> smax(x,sub(0,x)) 8719 if (!IsNegative && isOperationLegal(ISD::SUB, VT) && 8720 isOperationLegal(ISD::SMAX, VT)) { 8721 SDValue Zero = DAG.getConstant(0, dl, VT); 8722 return DAG.getNode(ISD::SMAX, dl, VT, Op, 8723 DAG.getNode(ISD::SUB, dl, VT, Zero, Op)); 8724 } 8725 8726 // abs(x) -> umin(x,sub(0,x)) 8727 if (!IsNegative && isOperationLegal(ISD::SUB, VT) && 8728 isOperationLegal(ISD::UMIN, VT)) { 8729 SDValue Zero = DAG.getConstant(0, dl, VT); 8730 Op = DAG.getFreeze(Op); 8731 return DAG.getNode(ISD::UMIN, dl, VT, Op, 8732 DAG.getNode(ISD::SUB, dl, VT, Zero, Op)); 8733 } 8734 8735 // 0 - abs(x) -> smin(x, sub(0,x)) 8736 if (IsNegative && isOperationLegal(ISD::SUB, VT) && 8737 isOperationLegal(ISD::SMIN, VT)) { 8738 Op = DAG.getFreeze(Op); 8739 SDValue Zero = DAG.getConstant(0, dl, VT); 8740 return DAG.getNode(ISD::SMIN, dl, VT, Op, 8741 DAG.getNode(ISD::SUB, dl, VT, Zero, Op)); 8742 } 8743 8744 // Only expand vector types if we have the appropriate vector operations. 8745 if (VT.isVector() && 8746 (!isOperationLegalOrCustom(ISD::SRA, VT) || 8747 (!IsNegative && !isOperationLegalOrCustom(ISD::ADD, VT)) || 8748 (IsNegative && !isOperationLegalOrCustom(ISD::SUB, VT)) || 8749 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 8750 return SDValue(); 8751 8752 Op = DAG.getFreeze(Op); 8753 SDValue Shift = 8754 DAG.getNode(ISD::SRA, dl, VT, Op, 8755 DAG.getConstant(VT.getScalarSizeInBits() - 1, dl, ShVT)); 8756 SDValue Xor = DAG.getNode(ISD::XOR, dl, VT, Op, Shift); 8757 8758 // abs(x) -> Y = sra (X, size(X)-1); sub (xor (X, Y), Y) 8759 if (!IsNegative) 8760 return DAG.getNode(ISD::SUB, dl, VT, Xor, Shift); 8761 8762 // 0 - abs(x) -> Y = sra (X, size(X)-1); sub (Y, xor (X, Y)) 8763 return DAG.getNode(ISD::SUB, dl, VT, Shift, Xor); 8764 } 8765 8766 SDValue TargetLowering::expandABD(SDNode *N, SelectionDAG &DAG) const { 8767 SDLoc dl(N); 8768 EVT VT = N->getValueType(0); 8769 SDValue LHS = DAG.getFreeze(N->getOperand(0)); 8770 SDValue RHS = DAG.getFreeze(N->getOperand(1)); 8771 bool IsSigned = N->getOpcode() == ISD::ABDS; 8772 8773 // abds(lhs, rhs) -> sub(smax(lhs,rhs), smin(lhs,rhs)) 8774 // abdu(lhs, rhs) -> sub(umax(lhs,rhs), umin(lhs,rhs)) 8775 unsigned MaxOpc = IsSigned ? ISD::SMAX : ISD::UMAX; 8776 unsigned MinOpc = IsSigned ? ISD::SMIN : ISD::UMIN; 8777 if (isOperationLegal(MaxOpc, VT) && isOperationLegal(MinOpc, VT)) { 8778 SDValue Max = DAG.getNode(MaxOpc, dl, VT, LHS, RHS); 8779 SDValue Min = DAG.getNode(MinOpc, dl, VT, LHS, RHS); 8780 return DAG.getNode(ISD::SUB, dl, VT, Max, Min); 8781 } 8782 8783 // abdu(lhs, rhs) -> or(usubsat(lhs,rhs), usubsat(rhs,lhs)) 8784 if (!IsSigned && isOperationLegal(ISD::USUBSAT, VT)) 8785 return DAG.getNode(ISD::OR, dl, VT, 8786 DAG.getNode(ISD::USUBSAT, dl, VT, LHS, RHS), 8787 DAG.getNode(ISD::USUBSAT, dl, VT, RHS, LHS)); 8788 8789 // abds(lhs, rhs) -> select(sgt(lhs,rhs), sub(lhs,rhs), sub(rhs,lhs)) 8790 // abdu(lhs, rhs) -> select(ugt(lhs,rhs), sub(lhs,rhs), sub(rhs,lhs)) 8791 EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 8792 ISD::CondCode CC = IsSigned ? ISD::CondCode::SETGT : ISD::CondCode::SETUGT; 8793 SDValue Cmp = DAG.getSetCC(dl, CCVT, LHS, RHS, CC); 8794 return DAG.getSelect(dl, VT, Cmp, DAG.getNode(ISD::SUB, dl, VT, LHS, RHS), 8795 DAG.getNode(ISD::SUB, dl, VT, RHS, LHS)); 8796 } 8797 8798 SDValue TargetLowering::expandBSWAP(SDNode *N, SelectionDAG &DAG) const { 8799 SDLoc dl(N); 8800 EVT VT = N->getValueType(0); 8801 SDValue Op = N->getOperand(0); 8802 8803 if (!VT.isSimple()) 8804 return SDValue(); 8805 8806 EVT SHVT = getShiftAmountTy(VT, DAG.getDataLayout()); 8807 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; 8808 switch (VT.getSimpleVT().getScalarType().SimpleTy) { 8809 default: 8810 return SDValue(); 8811 case MVT::i16: 8812 // Use a rotate by 8. This can be further expanded if necessary. 8813 return DAG.getNode(ISD::ROTL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 8814 case MVT::i32: 8815 Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); 8816 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Op, 8817 DAG.getConstant(0xFF00, dl, VT)); 8818 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Tmp3, DAG.getConstant(8, dl, SHVT)); 8819 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 8820 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(0xFF00, dl, VT)); 8821 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); 8822 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 8823 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 8824 return DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 8825 case MVT::i64: 8826 Tmp8 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(56, dl, SHVT)); 8827 Tmp7 = DAG.getNode(ISD::AND, dl, VT, Op, 8828 DAG.getConstant(255ULL<<8, dl, VT)); 8829 Tmp7 = DAG.getNode(ISD::SHL, dl, VT, Tmp7, DAG.getConstant(40, dl, SHVT)); 8830 Tmp6 = DAG.getNode(ISD::AND, dl, VT, Op, 8831 DAG.getConstant(255ULL<<16, dl, VT)); 8832 Tmp6 = DAG.getNode(ISD::SHL, dl, VT, Tmp6, DAG.getConstant(24, dl, SHVT)); 8833 Tmp5 = DAG.getNode(ISD::AND, dl, VT, Op, 8834 DAG.getConstant(255ULL<<24, dl, VT)); 8835 Tmp5 = DAG.getNode(ISD::SHL, dl, VT, Tmp5, DAG.getConstant(8, dl, SHVT)); 8836 Tmp4 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, dl, SHVT)); 8837 Tmp4 = DAG.getNode(ISD::AND, dl, VT, Tmp4, 8838 DAG.getConstant(255ULL<<24, dl, VT)); 8839 Tmp3 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(24, dl, SHVT)); 8840 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp3, 8841 DAG.getConstant(255ULL<<16, dl, VT)); 8842 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(40, dl, SHVT)); 8843 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, 8844 DAG.getConstant(255ULL<<8, dl, VT)); 8845 Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(56, dl, SHVT)); 8846 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp7); 8847 Tmp6 = DAG.getNode(ISD::OR, dl, VT, Tmp6, Tmp5); 8848 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3); 8849 Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1); 8850 Tmp8 = DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp6); 8851 Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2); 8852 return DAG.getNode(ISD::OR, dl, VT, Tmp8, Tmp4); 8853 } 8854 } 8855 8856 SDValue TargetLowering::expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const { 8857 SDLoc dl(N); 8858 EVT VT = N->getValueType(0); 8859 SDValue Op = N->getOperand(0); 8860 SDValue Mask = N->getOperand(1); 8861 SDValue EVL = N->getOperand(2); 8862 8863 if (!VT.isSimple()) 8864 return SDValue(); 8865 8866 EVT SHVT = getShiftAmountTy(VT, DAG.getDataLayout()); 8867 SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; 8868 switch (VT.getSimpleVT().getScalarType().SimpleTy) { 8869 default: 8870 return SDValue(); 8871 case MVT::i16: 8872 Tmp1 = DAG.getNode(ISD::VP_SHL, dl, VT, Op, DAG.getConstant(8, dl, SHVT), 8873 Mask, EVL); 8874 Tmp2 = DAG.getNode(ISD::VP_LSHR, dl, VT, Op, DAG.getConstant(8, dl, SHVT), 8875 Mask, EVL); 8876 return DAG.getNode(ISD::VP_OR, dl, VT, Tmp1, Tmp2, Mask, EVL); 8877 case MVT::i32: 8878 Tmp4 = DAG.getNode(ISD::VP_SHL, dl, VT, Op, DAG.getConstant(24, dl, SHVT), 8879 Mask, EVL); 8880 Tmp3 = DAG.getNode(ISD::VP_AND, dl, VT, Op, DAG.getConstant(0xFF00, dl, VT), 8881 Mask, EVL); 8882 Tmp3 = DAG.getNode(ISD::VP_SHL, dl, VT, Tmp3, DAG.getConstant(8, dl, SHVT), 8883 Mask, EVL); 8884 Tmp2 = DAG.getNode(ISD::VP_LSHR, dl, VT, Op, DAG.getConstant(8, dl, SHVT), 8885 Mask, EVL); 8886 Tmp2 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp2, 8887 DAG.getConstant(0xFF00, dl, VT), Mask, EVL); 8888 Tmp1 = DAG.getNode(ISD::VP_LSHR, dl, VT, Op, DAG.getConstant(24, dl, SHVT), 8889 Mask, EVL); 8890 Tmp4 = DAG.getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp3, Mask, EVL); 8891 Tmp2 = DAG.getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp1, Mask, EVL); 8892 return DAG.getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp2, Mask, EVL); 8893 case MVT::i64: 8894 Tmp8 = DAG.getNode(ISD::VP_SHL, dl, VT, Op, DAG.getConstant(56, dl, SHVT), 8895 Mask, EVL); 8896 Tmp7 = DAG.getNode(ISD::VP_AND, dl, VT, Op, 8897 DAG.getConstant(255ULL << 8, dl, VT), Mask, EVL); 8898 Tmp7 = DAG.getNode(ISD::VP_SHL, dl, VT, Tmp7, DAG.getConstant(40, dl, SHVT), 8899 Mask, EVL); 8900 Tmp6 = DAG.getNode(ISD::VP_AND, dl, VT, Op, 8901 DAG.getConstant(255ULL << 16, dl, VT), Mask, EVL); 8902 Tmp6 = DAG.getNode(ISD::VP_SHL, dl, VT, Tmp6, DAG.getConstant(24, dl, SHVT), 8903 Mask, EVL); 8904 Tmp5 = DAG.getNode(ISD::VP_AND, dl, VT, Op, 8905 DAG.getConstant(255ULL << 24, dl, VT), Mask, EVL); 8906 Tmp5 = DAG.getNode(ISD::VP_SHL, dl, VT, Tmp5, DAG.getConstant(8, dl, SHVT), 8907 Mask, EVL); 8908 Tmp4 = DAG.getNode(ISD::VP_LSHR, dl, VT, Op, DAG.getConstant(8, dl, SHVT), 8909 Mask, EVL); 8910 Tmp4 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp4, 8911 DAG.getConstant(255ULL << 24, dl, VT), Mask, EVL); 8912 Tmp3 = DAG.getNode(ISD::VP_LSHR, dl, VT, Op, DAG.getConstant(24, dl, SHVT), 8913 Mask, EVL); 8914 Tmp3 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp3, 8915 DAG.getConstant(255ULL << 16, dl, VT), Mask, EVL); 8916 Tmp2 = DAG.getNode(ISD::VP_LSHR, dl, VT, Op, DAG.getConstant(40, dl, SHVT), 8917 Mask, EVL); 8918 Tmp2 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp2, 8919 DAG.getConstant(255ULL << 8, dl, VT), Mask, EVL); 8920 Tmp1 = DAG.getNode(ISD::VP_LSHR, dl, VT, Op, DAG.getConstant(56, dl, SHVT), 8921 Mask, EVL); 8922 Tmp8 = DAG.getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp7, Mask, EVL); 8923 Tmp6 = DAG.getNode(ISD::VP_OR, dl, VT, Tmp6, Tmp5, Mask, EVL); 8924 Tmp4 = DAG.getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp3, Mask, EVL); 8925 Tmp2 = DAG.getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp1, Mask, EVL); 8926 Tmp8 = DAG.getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp6, Mask, EVL); 8927 Tmp4 = DAG.getNode(ISD::VP_OR, dl, VT, Tmp4, Tmp2, Mask, EVL); 8928 return DAG.getNode(ISD::VP_OR, dl, VT, Tmp8, Tmp4, Mask, EVL); 8929 } 8930 } 8931 8932 SDValue TargetLowering::expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const { 8933 SDLoc dl(N); 8934 EVT VT = N->getValueType(0); 8935 SDValue Op = N->getOperand(0); 8936 EVT SHVT = getShiftAmountTy(VT, DAG.getDataLayout()); 8937 unsigned Sz = VT.getScalarSizeInBits(); 8938 8939 SDValue Tmp, Tmp2, Tmp3; 8940 8941 // If we can, perform BSWAP first and then the mask+swap the i4, then i2 8942 // and finally the i1 pairs. 8943 // TODO: We can easily support i4/i2 legal types if any target ever does. 8944 if (Sz >= 8 && isPowerOf2_32(Sz)) { 8945 // Create the masks - repeating the pattern every byte. 8946 APInt Mask4 = APInt::getSplat(Sz, APInt(8, 0x0F)); 8947 APInt Mask2 = APInt::getSplat(Sz, APInt(8, 0x33)); 8948 APInt Mask1 = APInt::getSplat(Sz, APInt(8, 0x55)); 8949 8950 // BSWAP if the type is wider than a single byte. 8951 Tmp = (Sz > 8 ? DAG.getNode(ISD::BSWAP, dl, VT, Op) : Op); 8952 8953 // swap i4: ((V >> 4) & 0x0F) | ((V & 0x0F) << 4) 8954 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Tmp, DAG.getConstant(4, dl, SHVT)); 8955 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Mask4, dl, VT)); 8956 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(Mask4, dl, VT)); 8957 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Tmp3, DAG.getConstant(4, dl, SHVT)); 8958 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8959 8960 // swap i2: ((V >> 2) & 0x33) | ((V & 0x33) << 2) 8961 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Tmp, DAG.getConstant(2, dl, SHVT)); 8962 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Mask2, dl, VT)); 8963 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(Mask2, dl, VT)); 8964 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Tmp3, DAG.getConstant(2, dl, SHVT)); 8965 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8966 8967 // swap i1: ((V >> 1) & 0x55) | ((V & 0x55) << 1) 8968 Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Tmp, DAG.getConstant(1, dl, SHVT)); 8969 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Mask1, dl, VT)); 8970 Tmp3 = DAG.getNode(ISD::AND, dl, VT, Tmp, DAG.getConstant(Mask1, dl, VT)); 8971 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Tmp3, DAG.getConstant(1, dl, SHVT)); 8972 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8973 return Tmp; 8974 } 8975 8976 Tmp = DAG.getConstant(0, dl, VT); 8977 for (unsigned I = 0, J = Sz-1; I < Sz; ++I, --J) { 8978 if (I < J) 8979 Tmp2 = 8980 DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(J - I, dl, SHVT)); 8981 else 8982 Tmp2 = 8983 DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(I - J, dl, SHVT)); 8984 8985 APInt Shift = APInt::getOneBitSet(Sz, J); 8986 Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Shift, dl, VT)); 8987 Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp, Tmp2); 8988 } 8989 8990 return Tmp; 8991 } 8992 8993 SDValue TargetLowering::expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const { 8994 assert(N->getOpcode() == ISD::VP_BITREVERSE); 8995 8996 SDLoc dl(N); 8997 EVT VT = N->getValueType(0); 8998 SDValue Op = N->getOperand(0); 8999 SDValue Mask = N->getOperand(1); 9000 SDValue EVL = N->getOperand(2); 9001 EVT SHVT = getShiftAmountTy(VT, DAG.getDataLayout()); 9002 unsigned Sz = VT.getScalarSizeInBits(); 9003 9004 SDValue Tmp, Tmp2, Tmp3; 9005 9006 // If we can, perform BSWAP first and then the mask+swap the i4, then i2 9007 // and finally the i1 pairs. 9008 // TODO: We can easily support i4/i2 legal types if any target ever does. 9009 if (Sz >= 8 && isPowerOf2_32(Sz)) { 9010 // Create the masks - repeating the pattern every byte. 9011 APInt Mask4 = APInt::getSplat(Sz, APInt(8, 0x0F)); 9012 APInt Mask2 = APInt::getSplat(Sz, APInt(8, 0x33)); 9013 APInt Mask1 = APInt::getSplat(Sz, APInt(8, 0x55)); 9014 9015 // BSWAP if the type is wider than a single byte. 9016 Tmp = (Sz > 8 ? DAG.getNode(ISD::VP_BSWAP, dl, VT, Op, Mask, EVL) : Op); 9017 9018 // swap i4: ((V >> 4) & 0x0F) | ((V & 0x0F) << 4) 9019 Tmp2 = DAG.getNode(ISD::VP_LSHR, dl, VT, Tmp, DAG.getConstant(4, dl, SHVT), 9020 Mask, EVL); 9021 Tmp2 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp2, 9022 DAG.getConstant(Mask4, dl, VT), Mask, EVL); 9023 Tmp3 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp, DAG.getConstant(Mask4, dl, VT), 9024 Mask, EVL); 9025 Tmp3 = DAG.getNode(ISD::VP_SHL, dl, VT, Tmp3, DAG.getConstant(4, dl, SHVT), 9026 Mask, EVL); 9027 Tmp = DAG.getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL); 9028 9029 // swap i2: ((V >> 2) & 0x33) | ((V & 0x33) << 2) 9030 Tmp2 = DAG.getNode(ISD::VP_LSHR, dl, VT, Tmp, DAG.getConstant(2, dl, SHVT), 9031 Mask, EVL); 9032 Tmp2 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp2, 9033 DAG.getConstant(Mask2, dl, VT), Mask, EVL); 9034 Tmp3 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp, DAG.getConstant(Mask2, dl, VT), 9035 Mask, EVL); 9036 Tmp3 = DAG.getNode(ISD::VP_SHL, dl, VT, Tmp3, DAG.getConstant(2, dl, SHVT), 9037 Mask, EVL); 9038 Tmp = DAG.getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL); 9039 9040 // swap i1: ((V >> 1) & 0x55) | ((V & 0x55) << 1) 9041 Tmp2 = DAG.getNode(ISD::VP_LSHR, dl, VT, Tmp, DAG.getConstant(1, dl, SHVT), 9042 Mask, EVL); 9043 Tmp2 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp2, 9044 DAG.getConstant(Mask1, dl, VT), Mask, EVL); 9045 Tmp3 = DAG.getNode(ISD::VP_AND, dl, VT, Tmp, DAG.getConstant(Mask1, dl, VT), 9046 Mask, EVL); 9047 Tmp3 = DAG.getNode(ISD::VP_SHL, dl, VT, Tmp3, DAG.getConstant(1, dl, SHVT), 9048 Mask, EVL); 9049 Tmp = DAG.getNode(ISD::VP_OR, dl, VT, Tmp2, Tmp3, Mask, EVL); 9050 return Tmp; 9051 } 9052 return SDValue(); 9053 } 9054 9055 std::pair<SDValue, SDValue> 9056 TargetLowering::scalarizeVectorLoad(LoadSDNode *LD, 9057 SelectionDAG &DAG) const { 9058 SDLoc SL(LD); 9059 SDValue Chain = LD->getChain(); 9060 SDValue BasePTR = LD->getBasePtr(); 9061 EVT SrcVT = LD->getMemoryVT(); 9062 EVT DstVT = LD->getValueType(0); 9063 ISD::LoadExtType ExtType = LD->getExtensionType(); 9064 9065 if (SrcVT.isScalableVector()) 9066 report_fatal_error("Cannot scalarize scalable vector loads"); 9067 9068 unsigned NumElem = SrcVT.getVectorNumElements(); 9069 9070 EVT SrcEltVT = SrcVT.getScalarType(); 9071 EVT DstEltVT = DstVT.getScalarType(); 9072 9073 // A vector must always be stored in memory as-is, i.e. without any padding 9074 // between the elements, since various code depend on it, e.g. in the 9075 // handling of a bitcast of a vector type to int, which may be done with a 9076 // vector store followed by an integer load. A vector that does not have 9077 // elements that are byte-sized must therefore be stored as an integer 9078 // built out of the extracted vector elements. 9079 if (!SrcEltVT.isByteSized()) { 9080 unsigned NumLoadBits = SrcVT.getStoreSizeInBits(); 9081 EVT LoadVT = EVT::getIntegerVT(*DAG.getContext(), NumLoadBits); 9082 9083 unsigned NumSrcBits = SrcVT.getSizeInBits(); 9084 EVT SrcIntVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcBits); 9085 9086 unsigned SrcEltBits = SrcEltVT.getSizeInBits(); 9087 SDValue SrcEltBitMask = DAG.getConstant( 9088 APInt::getLowBitsSet(NumLoadBits, SrcEltBits), SL, LoadVT); 9089 9090 // Load the whole vector and avoid masking off the top bits as it makes 9091 // the codegen worse. 9092 SDValue Load = 9093 DAG.getExtLoad(ISD::EXTLOAD, SL, LoadVT, Chain, BasePTR, 9094 LD->getPointerInfo(), SrcIntVT, LD->getOriginalAlign(), 9095 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 9096 9097 SmallVector<SDValue, 8> Vals; 9098 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 9099 unsigned ShiftIntoIdx = 9100 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx); 9101 SDValue ShiftAmount = 9102 DAG.getShiftAmountConstant(ShiftIntoIdx * SrcEltVT.getSizeInBits(), 9103 LoadVT, SL, /*LegalTypes=*/false); 9104 SDValue ShiftedElt = DAG.getNode(ISD::SRL, SL, LoadVT, Load, ShiftAmount); 9105 SDValue Elt = 9106 DAG.getNode(ISD::AND, SL, LoadVT, ShiftedElt, SrcEltBitMask); 9107 SDValue Scalar = DAG.getNode(ISD::TRUNCATE, SL, SrcEltVT, Elt); 9108 9109 if (ExtType != ISD::NON_EXTLOAD) { 9110 unsigned ExtendOp = ISD::getExtForLoadExtType(false, ExtType); 9111 Scalar = DAG.getNode(ExtendOp, SL, DstEltVT, Scalar); 9112 } 9113 9114 Vals.push_back(Scalar); 9115 } 9116 9117 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals); 9118 return std::make_pair(Value, Load.getValue(1)); 9119 } 9120 9121 unsigned Stride = SrcEltVT.getSizeInBits() / 8; 9122 assert(SrcEltVT.isByteSized()); 9123 9124 SmallVector<SDValue, 8> Vals; 9125 SmallVector<SDValue, 8> LoadChains; 9126 9127 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 9128 SDValue ScalarLoad = 9129 DAG.getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR, 9130 LD->getPointerInfo().getWithOffset(Idx * Stride), 9131 SrcEltVT, LD->getOriginalAlign(), 9132 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 9133 9134 BasePTR = DAG.getObjectPtrOffset(SL, BasePTR, TypeSize::Fixed(Stride)); 9135 9136 Vals.push_back(ScalarLoad.getValue(0)); 9137 LoadChains.push_back(ScalarLoad.getValue(1)); 9138 } 9139 9140 SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains); 9141 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals); 9142 9143 return std::make_pair(Value, NewChain); 9144 } 9145 9146 SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST, 9147 SelectionDAG &DAG) const { 9148 SDLoc SL(ST); 9149 9150 SDValue Chain = ST->getChain(); 9151 SDValue BasePtr = ST->getBasePtr(); 9152 SDValue Value = ST->getValue(); 9153 EVT StVT = ST->getMemoryVT(); 9154 9155 if (StVT.isScalableVector()) 9156 report_fatal_error("Cannot scalarize scalable vector stores"); 9157 9158 // The type of the data we want to save 9159 EVT RegVT = Value.getValueType(); 9160 EVT RegSclVT = RegVT.getScalarType(); 9161 9162 // The type of data as saved in memory. 9163 EVT MemSclVT = StVT.getScalarType(); 9164 9165 unsigned NumElem = StVT.getVectorNumElements(); 9166 9167 // A vector must always be stored in memory as-is, i.e. without any padding 9168 // between the elements, since various code depend on it, e.g. in the 9169 // handling of a bitcast of a vector type to int, which may be done with a 9170 // vector store followed by an integer load. A vector that does not have 9171 // elements that are byte-sized must therefore be stored as an integer 9172 // built out of the extracted vector elements. 9173 if (!MemSclVT.isByteSized()) { 9174 unsigned NumBits = StVT.getSizeInBits(); 9175 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumBits); 9176 9177 SDValue CurrVal = DAG.getConstant(0, SL, IntVT); 9178 9179 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 9180 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 9181 DAG.getVectorIdxConstant(Idx, SL)); 9182 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MemSclVT, Elt); 9183 SDValue ExtElt = DAG.getNode(ISD::ZERO_EXTEND, SL, IntVT, Trunc); 9184 unsigned ShiftIntoIdx = 9185 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx); 9186 SDValue ShiftAmount = 9187 DAG.getConstant(ShiftIntoIdx * MemSclVT.getSizeInBits(), SL, IntVT); 9188 SDValue ShiftedElt = 9189 DAG.getNode(ISD::SHL, SL, IntVT, ExtElt, ShiftAmount); 9190 CurrVal = DAG.getNode(ISD::OR, SL, IntVT, CurrVal, ShiftedElt); 9191 } 9192 9193 return DAG.getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(), 9194 ST->getOriginalAlign(), ST->getMemOperand()->getFlags(), 9195 ST->getAAInfo()); 9196 } 9197 9198 // Store Stride in bytes 9199 unsigned Stride = MemSclVT.getSizeInBits() / 8; 9200 assert(Stride && "Zero stride!"); 9201 // Extract each of the elements from the original vector and save them into 9202 // memory individually. 9203 SmallVector<SDValue, 8> Stores; 9204 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 9205 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 9206 DAG.getVectorIdxConstant(Idx, SL)); 9207 9208 SDValue Ptr = 9209 DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Idx * Stride)); 9210 9211 // This scalar TruncStore may be illegal, but we legalize it later. 9212 SDValue Store = DAG.getTruncStore( 9213 Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride), 9214 MemSclVT, ST->getOriginalAlign(), ST->getMemOperand()->getFlags(), 9215 ST->getAAInfo()); 9216 9217 Stores.push_back(Store); 9218 } 9219 9220 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Stores); 9221 } 9222 9223 std::pair<SDValue, SDValue> 9224 TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const { 9225 assert(LD->getAddressingMode() == ISD::UNINDEXED && 9226 "unaligned indexed loads not implemented!"); 9227 SDValue Chain = LD->getChain(); 9228 SDValue Ptr = LD->getBasePtr(); 9229 EVT VT = LD->getValueType(0); 9230 EVT LoadedVT = LD->getMemoryVT(); 9231 SDLoc dl(LD); 9232 auto &MF = DAG.getMachineFunction(); 9233 9234 if (VT.isFloatingPoint() || VT.isVector()) { 9235 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits()); 9236 if (isTypeLegal(intVT) && isTypeLegal(LoadedVT)) { 9237 if (!isOperationLegalOrCustom(ISD::LOAD, intVT) && 9238 LoadedVT.isVector()) { 9239 // Scalarize the load and let the individual components be handled. 9240 return scalarizeVectorLoad(LD, DAG); 9241 } 9242 9243 // Expand to a (misaligned) integer load of the same size, 9244 // then bitconvert to floating point or vector. 9245 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, 9246 LD->getMemOperand()); 9247 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad); 9248 if (LoadedVT != VT) 9249 Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND : 9250 ISD::ANY_EXTEND, dl, VT, Result); 9251 9252 return std::make_pair(Result, newLoad.getValue(1)); 9253 } 9254 9255 // Copy the value to a (aligned) stack slot using (unaligned) integer 9256 // loads and stores, then do a (aligned) load from the stack slot. 9257 MVT RegVT = getRegisterType(*DAG.getContext(), intVT); 9258 unsigned LoadedBytes = LoadedVT.getStoreSize(); 9259 unsigned RegBytes = RegVT.getSizeInBits() / 8; 9260 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; 9261 9262 // Make sure the stack slot is also aligned for the register type. 9263 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); 9264 auto FrameIndex = cast<FrameIndexSDNode>(StackBase.getNode())->getIndex(); 9265 SmallVector<SDValue, 8> Stores; 9266 SDValue StackPtr = StackBase; 9267 unsigned Offset = 0; 9268 9269 EVT PtrVT = Ptr.getValueType(); 9270 EVT StackPtrVT = StackPtr.getValueType(); 9271 9272 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 9273 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 9274 9275 // Do all but one copies using the full register width. 9276 for (unsigned i = 1; i < NumRegs; i++) { 9277 // Load one integer register's worth from the original location. 9278 SDValue Load = DAG.getLoad( 9279 RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(Offset), 9280 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(), 9281 LD->getAAInfo()); 9282 // Follow the load with a store to the stack slot. Remember the store. 9283 Stores.push_back(DAG.getStore( 9284 Load.getValue(1), dl, Load, StackPtr, 9285 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset))); 9286 // Increment the pointers. 9287 Offset += RegBytes; 9288 9289 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 9290 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 9291 } 9292 9293 // The last copy may be partial. Do an extending load. 9294 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 9295 8 * (LoadedBytes - Offset)); 9296 SDValue Load = 9297 DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, 9298 LD->getPointerInfo().getWithOffset(Offset), MemVT, 9299 LD->getOriginalAlign(), LD->getMemOperand()->getFlags(), 9300 LD->getAAInfo()); 9301 // Follow the load with a store to the stack slot. Remember the store. 9302 // On big-endian machines this requires a truncating store to ensure 9303 // that the bits end up in the right place. 9304 Stores.push_back(DAG.getTruncStore( 9305 Load.getValue(1), dl, Load, StackPtr, 9306 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), MemVT)); 9307 9308 // The order of the stores doesn't matter - say it with a TokenFactor. 9309 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 9310 9311 // Finally, perform the original load only redirected to the stack slot. 9312 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, 9313 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), 9314 LoadedVT); 9315 9316 // Callers expect a MERGE_VALUES node. 9317 return std::make_pair(Load, TF); 9318 } 9319 9320 assert(LoadedVT.isInteger() && !LoadedVT.isVector() && 9321 "Unaligned load of unsupported type."); 9322 9323 // Compute the new VT that is half the size of the old one. This is an 9324 // integer MVT. 9325 unsigned NumBits = LoadedVT.getSizeInBits(); 9326 EVT NewLoadedVT; 9327 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2); 9328 NumBits >>= 1; 9329 9330 Align Alignment = LD->getOriginalAlign(); 9331 unsigned IncrementSize = NumBits / 8; 9332 ISD::LoadExtType HiExtType = LD->getExtensionType(); 9333 9334 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD. 9335 if (HiExtType == ISD::NON_EXTLOAD) 9336 HiExtType = ISD::ZEXTLOAD; 9337 9338 // Load the value in two parts 9339 SDValue Lo, Hi; 9340 if (DAG.getDataLayout().isLittleEndian()) { 9341 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), 9342 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 9343 LD->getAAInfo()); 9344 9345 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 9346 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, 9347 LD->getPointerInfo().getWithOffset(IncrementSize), 9348 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 9349 LD->getAAInfo()); 9350 } else { 9351 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(), 9352 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 9353 LD->getAAInfo()); 9354 9355 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 9356 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, 9357 LD->getPointerInfo().getWithOffset(IncrementSize), 9358 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 9359 LD->getAAInfo()); 9360 } 9361 9362 // aggregate the two parts 9363 SDValue ShiftAmount = 9364 DAG.getConstant(NumBits, dl, getShiftAmountTy(Hi.getValueType(), 9365 DAG.getDataLayout())); 9366 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); 9367 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); 9368 9369 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 9370 Hi.getValue(1)); 9371 9372 return std::make_pair(Result, TF); 9373 } 9374 9375 SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST, 9376 SelectionDAG &DAG) const { 9377 assert(ST->getAddressingMode() == ISD::UNINDEXED && 9378 "unaligned indexed stores not implemented!"); 9379 SDValue Chain = ST->getChain(); 9380 SDValue Ptr = ST->getBasePtr(); 9381 SDValue Val = ST->getValue(); 9382 EVT VT = Val.getValueType(); 9383 Align Alignment = ST->getOriginalAlign(); 9384 auto &MF = DAG.getMachineFunction(); 9385 EVT StoreMemVT = ST->getMemoryVT(); 9386 9387 SDLoc dl(ST); 9388 if (StoreMemVT.isFloatingPoint() || StoreMemVT.isVector()) { 9389 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 9390 if (isTypeLegal(intVT)) { 9391 if (!isOperationLegalOrCustom(ISD::STORE, intVT) && 9392 StoreMemVT.isVector()) { 9393 // Scalarize the store and let the individual components be handled. 9394 SDValue Result = scalarizeVectorStore(ST, DAG); 9395 return Result; 9396 } 9397 // Expand to a bitconvert of the value to the integer type of the 9398 // same size, then a (misaligned) int store. 9399 // FIXME: Does not handle truncating floating point stores! 9400 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val); 9401 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), 9402 Alignment, ST->getMemOperand()->getFlags()); 9403 return Result; 9404 } 9405 // Do a (aligned) store to a stack slot, then copy from the stack slot 9406 // to the final destination using (unaligned) integer loads and stores. 9407 MVT RegVT = getRegisterType( 9408 *DAG.getContext(), 9409 EVT::getIntegerVT(*DAG.getContext(), StoreMemVT.getSizeInBits())); 9410 EVT PtrVT = Ptr.getValueType(); 9411 unsigned StoredBytes = StoreMemVT.getStoreSize(); 9412 unsigned RegBytes = RegVT.getSizeInBits() / 8; 9413 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; 9414 9415 // Make sure the stack slot is also aligned for the register type. 9416 SDValue StackPtr = DAG.CreateStackTemporary(StoreMemVT, RegVT); 9417 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 9418 9419 // Perform the original store, only redirected to the stack slot. 9420 SDValue Store = DAG.getTruncStore( 9421 Chain, dl, Val, StackPtr, 9422 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), StoreMemVT); 9423 9424 EVT StackPtrVT = StackPtr.getValueType(); 9425 9426 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 9427 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 9428 SmallVector<SDValue, 8> Stores; 9429 unsigned Offset = 0; 9430 9431 // Do all but one copies using the full register width. 9432 for (unsigned i = 1; i < NumRegs; i++) { 9433 // Load one integer register's worth from the stack slot. 9434 SDValue Load = DAG.getLoad( 9435 RegVT, dl, Store, StackPtr, 9436 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset)); 9437 // Store it to the final location. Remember the store. 9438 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, 9439 ST->getPointerInfo().getWithOffset(Offset), 9440 ST->getOriginalAlign(), 9441 ST->getMemOperand()->getFlags())); 9442 // Increment the pointers. 9443 Offset += RegBytes; 9444 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 9445 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 9446 } 9447 9448 // The last store may be partial. Do a truncating store. On big-endian 9449 // machines this requires an extending load from the stack slot to ensure 9450 // that the bits are in the right place. 9451 EVT LoadMemVT = 9452 EVT::getIntegerVT(*DAG.getContext(), 8 * (StoredBytes - Offset)); 9453 9454 // Load from the stack slot. 9455 SDValue Load = DAG.getExtLoad( 9456 ISD::EXTLOAD, dl, RegVT, Store, StackPtr, 9457 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), LoadMemVT); 9458 9459 Stores.push_back( 9460 DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, 9461 ST->getPointerInfo().getWithOffset(Offset), LoadMemVT, 9462 ST->getOriginalAlign(), 9463 ST->getMemOperand()->getFlags(), ST->getAAInfo())); 9464 // The order of the stores doesn't matter - say it with a TokenFactor. 9465 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 9466 return Result; 9467 } 9468 9469 assert(StoreMemVT.isInteger() && !StoreMemVT.isVector() && 9470 "Unaligned store of unknown type."); 9471 // Get the half-size VT 9472 EVT NewStoredVT = StoreMemVT.getHalfSizedIntegerVT(*DAG.getContext()); 9473 unsigned NumBits = NewStoredVT.getFixedSizeInBits(); 9474 unsigned IncrementSize = NumBits / 8; 9475 9476 // Divide the stored value in two parts. 9477 SDValue ShiftAmount = DAG.getConstant( 9478 NumBits, dl, getShiftAmountTy(Val.getValueType(), DAG.getDataLayout())); 9479 SDValue Lo = Val; 9480 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); 9481 9482 // Store the two parts 9483 SDValue Store1, Store2; 9484 Store1 = DAG.getTruncStore(Chain, dl, 9485 DAG.getDataLayout().isLittleEndian() ? Lo : Hi, 9486 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment, 9487 ST->getMemOperand()->getFlags()); 9488 9489 Ptr = DAG.getObjectPtrOffset(dl, Ptr, TypeSize::Fixed(IncrementSize)); 9490 Store2 = DAG.getTruncStore( 9491 Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr, 9492 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment, 9493 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 9494 9495 SDValue Result = 9496 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); 9497 return Result; 9498 } 9499 9500 SDValue 9501 TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask, 9502 const SDLoc &DL, EVT DataVT, 9503 SelectionDAG &DAG, 9504 bool IsCompressedMemory) const { 9505 SDValue Increment; 9506 EVT AddrVT = Addr.getValueType(); 9507 EVT MaskVT = Mask.getValueType(); 9508 assert(DataVT.getVectorElementCount() == MaskVT.getVectorElementCount() && 9509 "Incompatible types of Data and Mask"); 9510 if (IsCompressedMemory) { 9511 if (DataVT.isScalableVector()) 9512 report_fatal_error( 9513 "Cannot currently handle compressed memory with scalable vectors"); 9514 // Incrementing the pointer according to number of '1's in the mask. 9515 EVT MaskIntVT = EVT::getIntegerVT(*DAG.getContext(), MaskVT.getSizeInBits()); 9516 SDValue MaskInIntReg = DAG.getBitcast(MaskIntVT, Mask); 9517 if (MaskIntVT.getSizeInBits() < 32) { 9518 MaskInIntReg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, MaskInIntReg); 9519 MaskIntVT = MVT::i32; 9520 } 9521 9522 // Count '1's with POPCNT. 9523 Increment = DAG.getNode(ISD::CTPOP, DL, MaskIntVT, MaskInIntReg); 9524 Increment = DAG.getZExtOrTrunc(Increment, DL, AddrVT); 9525 // Scale is an element size in bytes. 9526 SDValue Scale = DAG.getConstant(DataVT.getScalarSizeInBits() / 8, DL, 9527 AddrVT); 9528 Increment = DAG.getNode(ISD::MUL, DL, AddrVT, Increment, Scale); 9529 } else if (DataVT.isScalableVector()) { 9530 Increment = DAG.getVScale(DL, AddrVT, 9531 APInt(AddrVT.getFixedSizeInBits(), 9532 DataVT.getStoreSize().getKnownMinValue())); 9533 } else 9534 Increment = DAG.getConstant(DataVT.getStoreSize(), DL, AddrVT); 9535 9536 return DAG.getNode(ISD::ADD, DL, AddrVT, Addr, Increment); 9537 } 9538 9539 static SDValue clampDynamicVectorIndex(SelectionDAG &DAG, SDValue Idx, 9540 EVT VecVT, const SDLoc &dl, 9541 ElementCount SubEC) { 9542 assert(!(SubEC.isScalable() && VecVT.isFixedLengthVector()) && 9543 "Cannot index a scalable vector within a fixed-width vector"); 9544 9545 unsigned NElts = VecVT.getVectorMinNumElements(); 9546 unsigned NumSubElts = SubEC.getKnownMinValue(); 9547 EVT IdxVT = Idx.getValueType(); 9548 9549 if (VecVT.isScalableVector() && !SubEC.isScalable()) { 9550 // If this is a constant index and we know the value plus the number of the 9551 // elements in the subvector minus one is less than the minimum number of 9552 // elements then it's safe to return Idx. 9553 if (auto *IdxCst = dyn_cast<ConstantSDNode>(Idx)) 9554 if (IdxCst->getZExtValue() + (NumSubElts - 1) < NElts) 9555 return Idx; 9556 SDValue VS = 9557 DAG.getVScale(dl, IdxVT, APInt(IdxVT.getFixedSizeInBits(), NElts)); 9558 unsigned SubOpcode = NumSubElts <= NElts ? ISD::SUB : ISD::USUBSAT; 9559 SDValue Sub = DAG.getNode(SubOpcode, dl, IdxVT, VS, 9560 DAG.getConstant(NumSubElts, dl, IdxVT)); 9561 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, Sub); 9562 } 9563 if (isPowerOf2_32(NElts) && NumSubElts == 1) { 9564 APInt Imm = APInt::getLowBitsSet(IdxVT.getSizeInBits(), Log2_32(NElts)); 9565 return DAG.getNode(ISD::AND, dl, IdxVT, Idx, 9566 DAG.getConstant(Imm, dl, IdxVT)); 9567 } 9568 unsigned MaxIndex = NumSubElts < NElts ? NElts - NumSubElts : 0; 9569 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, 9570 DAG.getConstant(MaxIndex, dl, IdxVT)); 9571 } 9572 9573 SDValue TargetLowering::getVectorElementPointer(SelectionDAG &DAG, 9574 SDValue VecPtr, EVT VecVT, 9575 SDValue Index) const { 9576 return getVectorSubVecPointer( 9577 DAG, VecPtr, VecVT, 9578 EVT::getVectorVT(*DAG.getContext(), VecVT.getVectorElementType(), 1), 9579 Index); 9580 } 9581 9582 SDValue TargetLowering::getVectorSubVecPointer(SelectionDAG &DAG, 9583 SDValue VecPtr, EVT VecVT, 9584 EVT SubVecVT, 9585 SDValue Index) const { 9586 SDLoc dl(Index); 9587 // Make sure the index type is big enough to compute in. 9588 Index = DAG.getZExtOrTrunc(Index, dl, VecPtr.getValueType()); 9589 9590 EVT EltVT = VecVT.getVectorElementType(); 9591 9592 // Calculate the element offset and add it to the pointer. 9593 unsigned EltSize = EltVT.getFixedSizeInBits() / 8; // FIXME: should be ABI size. 9594 assert(EltSize * 8 == EltVT.getFixedSizeInBits() && 9595 "Converting bits to bytes lost precision"); 9596 assert(SubVecVT.getVectorElementType() == EltVT && 9597 "Sub-vector must be a vector with matching element type"); 9598 Index = clampDynamicVectorIndex(DAG, Index, VecVT, dl, 9599 SubVecVT.getVectorElementCount()); 9600 9601 EVT IdxVT = Index.getValueType(); 9602 if (SubVecVT.isScalableVector()) 9603 Index = 9604 DAG.getNode(ISD::MUL, dl, IdxVT, Index, 9605 DAG.getVScale(dl, IdxVT, APInt(IdxVT.getSizeInBits(), 1))); 9606 9607 Index = DAG.getNode(ISD::MUL, dl, IdxVT, Index, 9608 DAG.getConstant(EltSize, dl, IdxVT)); 9609 return DAG.getMemBasePlusOffset(VecPtr, Index, dl); 9610 } 9611 9612 //===----------------------------------------------------------------------===// 9613 // Implementation of Emulated TLS Model 9614 //===----------------------------------------------------------------------===// 9615 9616 SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, 9617 SelectionDAG &DAG) const { 9618 // Access to address of TLS varialbe xyz is lowered to a function call: 9619 // __emutls_get_address( address of global variable named "__emutls_v.xyz" ) 9620 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9621 PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext()); 9622 SDLoc dl(GA); 9623 9624 ArgListTy Args; 9625 ArgListEntry Entry; 9626 std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str(); 9627 Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent()); 9628 StringRef EmuTlsVarName(NameString); 9629 GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName); 9630 assert(EmuTlsVar && "Cannot find EmuTlsVar "); 9631 Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT); 9632 Entry.Ty = VoidPtrType; 9633 Args.push_back(Entry); 9634 9635 SDValue EmuTlsGetAddr = DAG.getExternalSymbol("__emutls_get_address", PtrVT); 9636 9637 TargetLowering::CallLoweringInfo CLI(DAG); 9638 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode()); 9639 CLI.setLibCallee(CallingConv::C, VoidPtrType, EmuTlsGetAddr, std::move(Args)); 9640 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 9641 9642 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 9643 // At last for X86 targets, maybe good for other targets too? 9644 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9645 MFI.setAdjustsStack(true); // Is this only for X86 target? 9646 MFI.setHasCalls(true); 9647 9648 assert((GA->getOffset() == 0) && 9649 "Emulated TLS must have zero offset in GlobalAddressSDNode"); 9650 return CallResult.first; 9651 } 9652 9653 SDValue TargetLowering::lowerCmpEqZeroToCtlzSrl(SDValue Op, 9654 SelectionDAG &DAG) const { 9655 assert((Op->getOpcode() == ISD::SETCC) && "Input has to be a SETCC node."); 9656 if (!isCtlzFast()) 9657 return SDValue(); 9658 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 9659 SDLoc dl(Op); 9660 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 9661 if (C->isZero() && CC == ISD::SETEQ) { 9662 EVT VT = Op.getOperand(0).getValueType(); 9663 SDValue Zext = Op.getOperand(0); 9664 if (VT.bitsLT(MVT::i32)) { 9665 VT = MVT::i32; 9666 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 9667 } 9668 unsigned Log2b = Log2_32(VT.getSizeInBits()); 9669 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 9670 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 9671 DAG.getConstant(Log2b, dl, MVT::i32)); 9672 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 9673 } 9674 } 9675 return SDValue(); 9676 } 9677 9678 SDValue TargetLowering::expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const { 9679 SDValue Op0 = Node->getOperand(0); 9680 SDValue Op1 = Node->getOperand(1); 9681 EVT VT = Op0.getValueType(); 9682 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 9683 unsigned Opcode = Node->getOpcode(); 9684 SDLoc DL(Node); 9685 9686 // umax(x,1) --> sub(x,cmpeq(x,0)) iff cmp result is allbits 9687 if (Opcode == ISD::UMAX && llvm::isOneOrOneSplat(Op1, true) && BoolVT == VT && 9688 getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 9689 Op0 = DAG.getFreeze(Op0); 9690 SDValue Zero = DAG.getConstant(0, DL, VT); 9691 return DAG.getNode(ISD::SUB, DL, VT, Op0, 9692 DAG.getSetCC(DL, VT, Op0, Zero, ISD::SETEQ)); 9693 } 9694 9695 // umin(x,y) -> sub(x,usubsat(x,y)) 9696 // TODO: Missing freeze(Op0)? 9697 if (Opcode == ISD::UMIN && isOperationLegal(ISD::SUB, VT) && 9698 isOperationLegal(ISD::USUBSAT, VT)) { 9699 return DAG.getNode(ISD::SUB, DL, VT, Op0, 9700 DAG.getNode(ISD::USUBSAT, DL, VT, Op0, Op1)); 9701 } 9702 9703 // umax(x,y) -> add(x,usubsat(y,x)) 9704 // TODO: Missing freeze(Op0)? 9705 if (Opcode == ISD::UMAX && isOperationLegal(ISD::ADD, VT) && 9706 isOperationLegal(ISD::USUBSAT, VT)) { 9707 return DAG.getNode(ISD::ADD, DL, VT, Op0, 9708 DAG.getNode(ISD::USUBSAT, DL, VT, Op1, Op0)); 9709 } 9710 9711 // FIXME: Should really try to split the vector in case it's legal on a 9712 // subvector. 9713 if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT)) 9714 return DAG.UnrollVectorOp(Node); 9715 9716 // Attempt to find an existing SETCC node that we can reuse. 9717 // TODO: Do we need a generic doesSETCCNodeExist? 9718 // TODO: Missing freeze(Op0)/freeze(Op1)? 9719 auto buildMinMax = [&](ISD::CondCode PrefCC, ISD::CondCode AltCC, 9720 ISD::CondCode PrefCommuteCC, 9721 ISD::CondCode AltCommuteCC) { 9722 SDVTList BoolVTList = DAG.getVTList(BoolVT); 9723 for (ISD::CondCode CC : {PrefCC, AltCC}) { 9724 if (DAG.doesNodeExist(ISD::SETCC, BoolVTList, 9725 {Op0, Op1, DAG.getCondCode(CC)})) { 9726 SDValue Cond = DAG.getSetCC(DL, BoolVT, Op0, Op1, CC); 9727 return DAG.getSelect(DL, VT, Cond, Op0, Op1); 9728 } 9729 } 9730 for (ISD::CondCode CC : {PrefCommuteCC, AltCommuteCC}) { 9731 if (DAG.doesNodeExist(ISD::SETCC, BoolVTList, 9732 {Op0, Op1, DAG.getCondCode(CC)})) { 9733 SDValue Cond = DAG.getSetCC(DL, BoolVT, Op0, Op1, CC); 9734 return DAG.getSelect(DL, VT, Cond, Op1, Op0); 9735 } 9736 } 9737 SDValue Cond = DAG.getSetCC(DL, BoolVT, Op0, Op1, PrefCC); 9738 return DAG.getSelect(DL, VT, Cond, Op0, Op1); 9739 }; 9740 9741 // Expand Y = MAX(A, B) -> Y = (A > B) ? A : B 9742 // -> Y = (A < B) ? B : A 9743 // -> Y = (A >= B) ? A : B 9744 // -> Y = (A <= B) ? B : A 9745 switch (Opcode) { 9746 case ISD::SMAX: 9747 return buildMinMax(ISD::SETGT, ISD::SETGE, ISD::SETLT, ISD::SETLE); 9748 case ISD::SMIN: 9749 return buildMinMax(ISD::SETLT, ISD::SETLE, ISD::SETGT, ISD::SETGE); 9750 case ISD::UMAX: 9751 return buildMinMax(ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE); 9752 case ISD::UMIN: 9753 return buildMinMax(ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE); 9754 } 9755 9756 llvm_unreachable("How did we get here?"); 9757 } 9758 9759 SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const { 9760 unsigned Opcode = Node->getOpcode(); 9761 SDValue LHS = Node->getOperand(0); 9762 SDValue RHS = Node->getOperand(1); 9763 EVT VT = LHS.getValueType(); 9764 SDLoc dl(Node); 9765 9766 assert(VT == RHS.getValueType() && "Expected operands to be the same type"); 9767 assert(VT.isInteger() && "Expected operands to be integers"); 9768 9769 // usub.sat(a, b) -> umax(a, b) - b 9770 if (Opcode == ISD::USUBSAT && isOperationLegal(ISD::UMAX, VT)) { 9771 SDValue Max = DAG.getNode(ISD::UMAX, dl, VT, LHS, RHS); 9772 return DAG.getNode(ISD::SUB, dl, VT, Max, RHS); 9773 } 9774 9775 // uadd.sat(a, b) -> umin(a, ~b) + b 9776 if (Opcode == ISD::UADDSAT && isOperationLegal(ISD::UMIN, VT)) { 9777 SDValue InvRHS = DAG.getNOT(dl, RHS, VT); 9778 SDValue Min = DAG.getNode(ISD::UMIN, dl, VT, LHS, InvRHS); 9779 return DAG.getNode(ISD::ADD, dl, VT, Min, RHS); 9780 } 9781 9782 unsigned OverflowOp; 9783 switch (Opcode) { 9784 case ISD::SADDSAT: 9785 OverflowOp = ISD::SADDO; 9786 break; 9787 case ISD::UADDSAT: 9788 OverflowOp = ISD::UADDO; 9789 break; 9790 case ISD::SSUBSAT: 9791 OverflowOp = ISD::SSUBO; 9792 break; 9793 case ISD::USUBSAT: 9794 OverflowOp = ISD::USUBO; 9795 break; 9796 default: 9797 llvm_unreachable("Expected method to receive signed or unsigned saturation " 9798 "addition or subtraction node."); 9799 } 9800 9801 // FIXME: Should really try to split the vector in case it's legal on a 9802 // subvector. 9803 if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT)) 9804 return DAG.UnrollVectorOp(Node); 9805 9806 unsigned BitWidth = LHS.getScalarValueSizeInBits(); 9807 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 9808 SDValue Result = DAG.getNode(OverflowOp, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 9809 SDValue SumDiff = Result.getValue(0); 9810 SDValue Overflow = Result.getValue(1); 9811 SDValue Zero = DAG.getConstant(0, dl, VT); 9812 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT); 9813 9814 if (Opcode == ISD::UADDSAT) { 9815 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 9816 // (LHS + RHS) | OverflowMask 9817 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 9818 return DAG.getNode(ISD::OR, dl, VT, SumDiff, OverflowMask); 9819 } 9820 // Overflow ? 0xffff.... : (LHS + RHS) 9821 return DAG.getSelect(dl, VT, Overflow, AllOnes, SumDiff); 9822 } 9823 9824 if (Opcode == ISD::USUBSAT) { 9825 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 9826 // (LHS - RHS) & ~OverflowMask 9827 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 9828 SDValue Not = DAG.getNOT(dl, OverflowMask, VT); 9829 return DAG.getNode(ISD::AND, dl, VT, SumDiff, Not); 9830 } 9831 // Overflow ? 0 : (LHS - RHS) 9832 return DAG.getSelect(dl, VT, Overflow, Zero, SumDiff); 9833 } 9834 9835 if (Opcode == ISD::SADDSAT || Opcode == ISD::SSUBSAT) { 9836 APInt MinVal = APInt::getSignedMinValue(BitWidth); 9837 APInt MaxVal = APInt::getSignedMaxValue(BitWidth); 9838 9839 KnownBits KnownLHS = DAG.computeKnownBits(LHS); 9840 KnownBits KnownRHS = DAG.computeKnownBits(RHS); 9841 9842 // If either of the operand signs are known, then they are guaranteed to 9843 // only saturate in one direction. If non-negative they will saturate 9844 // towards SIGNED_MAX, if negative they will saturate towards SIGNED_MIN. 9845 // 9846 // In the case of ISD::SSUBSAT, 'x - y' is equivalent to 'x + (-y)', so the 9847 // sign of 'y' has to be flipped. 9848 9849 bool LHSIsNonNegative = KnownLHS.isNonNegative(); 9850 bool RHSIsNonNegative = Opcode == ISD::SADDSAT ? KnownRHS.isNonNegative() 9851 : KnownRHS.isNegative(); 9852 if (LHSIsNonNegative || RHSIsNonNegative) { 9853 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 9854 return DAG.getSelect(dl, VT, Overflow, SatMax, SumDiff); 9855 } 9856 9857 bool LHSIsNegative = KnownLHS.isNegative(); 9858 bool RHSIsNegative = Opcode == ISD::SADDSAT ? KnownRHS.isNegative() 9859 : KnownRHS.isNonNegative(); 9860 if (LHSIsNegative || RHSIsNegative) { 9861 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 9862 return DAG.getSelect(dl, VT, Overflow, SatMin, SumDiff); 9863 } 9864 } 9865 9866 // Overflow ? (SumDiff >> BW) ^ MinVal : SumDiff 9867 APInt MinVal = APInt::getSignedMinValue(BitWidth); 9868 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 9869 SDValue Shift = DAG.getNode(ISD::SRA, dl, VT, SumDiff, 9870 DAG.getConstant(BitWidth - 1, dl, VT)); 9871 Result = DAG.getNode(ISD::XOR, dl, VT, Shift, SatMin); 9872 return DAG.getSelect(dl, VT, Overflow, Result, SumDiff); 9873 } 9874 9875 SDValue TargetLowering::expandShlSat(SDNode *Node, SelectionDAG &DAG) const { 9876 unsigned Opcode = Node->getOpcode(); 9877 bool IsSigned = Opcode == ISD::SSHLSAT; 9878 SDValue LHS = Node->getOperand(0); 9879 SDValue RHS = Node->getOperand(1); 9880 EVT VT = LHS.getValueType(); 9881 SDLoc dl(Node); 9882 9883 assert((Node->getOpcode() == ISD::SSHLSAT || 9884 Node->getOpcode() == ISD::USHLSAT) && 9885 "Expected a SHLSAT opcode"); 9886 assert(VT == RHS.getValueType() && "Expected operands to be the same type"); 9887 assert(VT.isInteger() && "Expected operands to be integers"); 9888 9889 if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT)) 9890 return DAG.UnrollVectorOp(Node); 9891 9892 // If LHS != (LHS << RHS) >> RHS, we have overflow and must saturate. 9893 9894 unsigned BW = VT.getScalarSizeInBits(); 9895 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 9896 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, LHS, RHS); 9897 SDValue Orig = 9898 DAG.getNode(IsSigned ? ISD::SRA : ISD::SRL, dl, VT, Result, RHS); 9899 9900 SDValue SatVal; 9901 if (IsSigned) { 9902 SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(BW), dl, VT); 9903 SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(BW), dl, VT); 9904 SDValue Cond = 9905 DAG.getSetCC(dl, BoolVT, LHS, DAG.getConstant(0, dl, VT), ISD::SETLT); 9906 SatVal = DAG.getSelect(dl, VT, Cond, SatMin, SatMax); 9907 } else { 9908 SatVal = DAG.getConstant(APInt::getMaxValue(BW), dl, VT); 9909 } 9910 SDValue Cond = DAG.getSetCC(dl, BoolVT, LHS, Orig, ISD::SETNE); 9911 return DAG.getSelect(dl, VT, Cond, SatVal, Result); 9912 } 9913 9914 SDValue 9915 TargetLowering::expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const { 9916 assert((Node->getOpcode() == ISD::SMULFIX || 9917 Node->getOpcode() == ISD::UMULFIX || 9918 Node->getOpcode() == ISD::SMULFIXSAT || 9919 Node->getOpcode() == ISD::UMULFIXSAT) && 9920 "Expected a fixed point multiplication opcode"); 9921 9922 SDLoc dl(Node); 9923 SDValue LHS = Node->getOperand(0); 9924 SDValue RHS = Node->getOperand(1); 9925 EVT VT = LHS.getValueType(); 9926 unsigned Scale = Node->getConstantOperandVal(2); 9927 bool Saturating = (Node->getOpcode() == ISD::SMULFIXSAT || 9928 Node->getOpcode() == ISD::UMULFIXSAT); 9929 bool Signed = (Node->getOpcode() == ISD::SMULFIX || 9930 Node->getOpcode() == ISD::SMULFIXSAT); 9931 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 9932 unsigned VTSize = VT.getScalarSizeInBits(); 9933 9934 if (!Scale) { 9935 // [us]mul.fix(a, b, 0) -> mul(a, b) 9936 if (!Saturating) { 9937 if (isOperationLegalOrCustom(ISD::MUL, VT)) 9938 return DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 9939 } else if (Signed && isOperationLegalOrCustom(ISD::SMULO, VT)) { 9940 SDValue Result = 9941 DAG.getNode(ISD::SMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 9942 SDValue Product = Result.getValue(0); 9943 SDValue Overflow = Result.getValue(1); 9944 SDValue Zero = DAG.getConstant(0, dl, VT); 9945 9946 APInt MinVal = APInt::getSignedMinValue(VTSize); 9947 APInt MaxVal = APInt::getSignedMaxValue(VTSize); 9948 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 9949 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 9950 // Xor the inputs, if resulting sign bit is 0 the product will be 9951 // positive, else negative. 9952 SDValue Xor = DAG.getNode(ISD::XOR, dl, VT, LHS, RHS); 9953 SDValue ProdNeg = DAG.getSetCC(dl, BoolVT, Xor, Zero, ISD::SETLT); 9954 Result = DAG.getSelect(dl, VT, ProdNeg, SatMin, SatMax); 9955 return DAG.getSelect(dl, VT, Overflow, Result, Product); 9956 } else if (!Signed && isOperationLegalOrCustom(ISD::UMULO, VT)) { 9957 SDValue Result = 9958 DAG.getNode(ISD::UMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 9959 SDValue Product = Result.getValue(0); 9960 SDValue Overflow = Result.getValue(1); 9961 9962 APInt MaxVal = APInt::getMaxValue(VTSize); 9963 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 9964 return DAG.getSelect(dl, VT, Overflow, SatMax, Product); 9965 } 9966 } 9967 9968 assert(((Signed && Scale < VTSize) || (!Signed && Scale <= VTSize)) && 9969 "Expected scale to be less than the number of bits if signed or at " 9970 "most the number of bits if unsigned."); 9971 assert(LHS.getValueType() == RHS.getValueType() && 9972 "Expected both operands to be the same type"); 9973 9974 // Get the upper and lower bits of the result. 9975 SDValue Lo, Hi; 9976 unsigned LoHiOp = Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI; 9977 unsigned HiOp = Signed ? ISD::MULHS : ISD::MULHU; 9978 if (isOperationLegalOrCustom(LoHiOp, VT)) { 9979 SDValue Result = DAG.getNode(LoHiOp, dl, DAG.getVTList(VT, VT), LHS, RHS); 9980 Lo = Result.getValue(0); 9981 Hi = Result.getValue(1); 9982 } else if (isOperationLegalOrCustom(HiOp, VT)) { 9983 Lo = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 9984 Hi = DAG.getNode(HiOp, dl, VT, LHS, RHS); 9985 } else if (VT.isVector()) { 9986 return SDValue(); 9987 } else { 9988 report_fatal_error("Unable to expand fixed point multiplication."); 9989 } 9990 9991 if (Scale == VTSize) 9992 // Result is just the top half since we'd be shifting by the width of the 9993 // operand. Overflow impossible so this works for both UMULFIX and 9994 // UMULFIXSAT. 9995 return Hi; 9996 9997 // The result will need to be shifted right by the scale since both operands 9998 // are scaled. The result is given to us in 2 halves, so we only want part of 9999 // both in the result. 10000 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 10001 SDValue Result = DAG.getNode(ISD::FSHR, dl, VT, Hi, Lo, 10002 DAG.getConstant(Scale, dl, ShiftTy)); 10003 if (!Saturating) 10004 return Result; 10005 10006 if (!Signed) { 10007 // Unsigned overflow happened if the upper (VTSize - Scale) bits (of the 10008 // widened multiplication) aren't all zeroes. 10009 10010 // Saturate to max if ((Hi >> Scale) != 0), 10011 // which is the same as if (Hi > ((1 << Scale) - 1)) 10012 APInt MaxVal = APInt::getMaxValue(VTSize); 10013 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale), 10014 dl, VT); 10015 Result = DAG.getSelectCC(dl, Hi, LowMask, 10016 DAG.getConstant(MaxVal, dl, VT), Result, 10017 ISD::SETUGT); 10018 10019 return Result; 10020 } 10021 10022 // Signed overflow happened if the upper (VTSize - Scale + 1) bits (of the 10023 // widened multiplication) aren't all ones or all zeroes. 10024 10025 SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(VTSize), dl, VT); 10026 SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(VTSize), dl, VT); 10027 10028 if (Scale == 0) { 10029 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, Lo, 10030 DAG.getConstant(VTSize - 1, dl, ShiftTy)); 10031 SDValue Overflow = DAG.getSetCC(dl, BoolVT, Hi, Sign, ISD::SETNE); 10032 // Saturated to SatMin if wide product is negative, and SatMax if wide 10033 // product is positive ... 10034 SDValue Zero = DAG.getConstant(0, dl, VT); 10035 SDValue ResultIfOverflow = DAG.getSelectCC(dl, Hi, Zero, SatMin, SatMax, 10036 ISD::SETLT); 10037 // ... but only if we overflowed. 10038 return DAG.getSelect(dl, VT, Overflow, ResultIfOverflow, Result); 10039 } 10040 10041 // We handled Scale==0 above so all the bits to examine is in Hi. 10042 10043 // Saturate to max if ((Hi >> (Scale - 1)) > 0), 10044 // which is the same as if (Hi > (1 << (Scale - 1)) - 1) 10045 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale - 1), 10046 dl, VT); 10047 Result = DAG.getSelectCC(dl, Hi, LowMask, SatMax, Result, ISD::SETGT); 10048 // Saturate to min if (Hi >> (Scale - 1)) < -1), 10049 // which is the same as if (HI < (-1 << (Scale - 1)) 10050 SDValue HighMask = 10051 DAG.getConstant(APInt::getHighBitsSet(VTSize, VTSize - Scale + 1), 10052 dl, VT); 10053 Result = DAG.getSelectCC(dl, Hi, HighMask, SatMin, Result, ISD::SETLT); 10054 return Result; 10055 } 10056 10057 SDValue 10058 TargetLowering::expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, 10059 SDValue LHS, SDValue RHS, 10060 unsigned Scale, SelectionDAG &DAG) const { 10061 assert((Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT || 10062 Opcode == ISD::UDIVFIX || Opcode == ISD::UDIVFIXSAT) && 10063 "Expected a fixed point division opcode"); 10064 10065 EVT VT = LHS.getValueType(); 10066 bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT; 10067 bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT; 10068 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 10069 10070 // If there is enough room in the type to upscale the LHS or downscale the 10071 // RHS before the division, we can perform it in this type without having to 10072 // resize. For signed operations, the LHS headroom is the number of 10073 // redundant sign bits, and for unsigned ones it is the number of zeroes. 10074 // The headroom for the RHS is the number of trailing zeroes. 10075 unsigned LHSLead = Signed ? DAG.ComputeNumSignBits(LHS) - 1 10076 : DAG.computeKnownBits(LHS).countMinLeadingZeros(); 10077 unsigned RHSTrail = DAG.computeKnownBits(RHS).countMinTrailingZeros(); 10078 10079 // For signed saturating operations, we need to be able to detect true integer 10080 // division overflow; that is, when you have MIN / -EPS. However, this 10081 // is undefined behavior and if we emit divisions that could take such 10082 // values it may cause undesired behavior (arithmetic exceptions on x86, for 10083 // example). 10084 // Avoid this by requiring an extra bit so that we never get this case. 10085 // FIXME: This is a bit unfortunate as it means that for an 8-bit 7-scale 10086 // signed saturating division, we need to emit a whopping 32-bit division. 10087 if (LHSLead + RHSTrail < Scale + (unsigned)(Saturating && Signed)) 10088 return SDValue(); 10089 10090 unsigned LHSShift = std::min(LHSLead, Scale); 10091 unsigned RHSShift = Scale - LHSShift; 10092 10093 // At this point, we know that if we shift the LHS up by LHSShift and the 10094 // RHS down by RHSShift, we can emit a regular division with a final scaling 10095 // factor of Scale. 10096 10097 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 10098 if (LHSShift) 10099 LHS = DAG.getNode(ISD::SHL, dl, VT, LHS, 10100 DAG.getConstant(LHSShift, dl, ShiftTy)); 10101 if (RHSShift) 10102 RHS = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, dl, VT, RHS, 10103 DAG.getConstant(RHSShift, dl, ShiftTy)); 10104 10105 SDValue Quot; 10106 if (Signed) { 10107 // For signed operations, if the resulting quotient is negative and the 10108 // remainder is nonzero, subtract 1 from the quotient to round towards 10109 // negative infinity. 10110 SDValue Rem; 10111 // FIXME: Ideally we would always produce an SDIVREM here, but if the 10112 // type isn't legal, SDIVREM cannot be expanded. There is no reason why 10113 // we couldn't just form a libcall, but the type legalizer doesn't do it. 10114 if (isTypeLegal(VT) && 10115 isOperationLegalOrCustom(ISD::SDIVREM, VT)) { 10116 Quot = DAG.getNode(ISD::SDIVREM, dl, 10117 DAG.getVTList(VT, VT), 10118 LHS, RHS); 10119 Rem = Quot.getValue(1); 10120 Quot = Quot.getValue(0); 10121 } else { 10122 Quot = DAG.getNode(ISD::SDIV, dl, VT, 10123 LHS, RHS); 10124 Rem = DAG.getNode(ISD::SREM, dl, VT, 10125 LHS, RHS); 10126 } 10127 SDValue Zero = DAG.getConstant(0, dl, VT); 10128 SDValue RemNonZero = DAG.getSetCC(dl, BoolVT, Rem, Zero, ISD::SETNE); 10129 SDValue LHSNeg = DAG.getSetCC(dl, BoolVT, LHS, Zero, ISD::SETLT); 10130 SDValue RHSNeg = DAG.getSetCC(dl, BoolVT, RHS, Zero, ISD::SETLT); 10131 SDValue QuotNeg = DAG.getNode(ISD::XOR, dl, BoolVT, LHSNeg, RHSNeg); 10132 SDValue Sub1 = DAG.getNode(ISD::SUB, dl, VT, Quot, 10133 DAG.getConstant(1, dl, VT)); 10134 Quot = DAG.getSelect(dl, VT, 10135 DAG.getNode(ISD::AND, dl, BoolVT, RemNonZero, QuotNeg), 10136 Sub1, Quot); 10137 } else 10138 Quot = DAG.getNode(ISD::UDIV, dl, VT, 10139 LHS, RHS); 10140 10141 return Quot; 10142 } 10143 10144 void TargetLowering::expandUADDSUBO( 10145 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 10146 SDLoc dl(Node); 10147 SDValue LHS = Node->getOperand(0); 10148 SDValue RHS = Node->getOperand(1); 10149 bool IsAdd = Node->getOpcode() == ISD::UADDO; 10150 10151 // If UADDO_CARRY/SUBO_CARRY is legal, use that instead. 10152 unsigned OpcCarry = IsAdd ? ISD::UADDO_CARRY : ISD::USUBO_CARRY; 10153 if (isOperationLegalOrCustom(OpcCarry, Node->getValueType(0))) { 10154 SDValue CarryIn = DAG.getConstant(0, dl, Node->getValueType(1)); 10155 SDValue NodeCarry = DAG.getNode(OpcCarry, dl, Node->getVTList(), 10156 { LHS, RHS, CarryIn }); 10157 Result = SDValue(NodeCarry.getNode(), 0); 10158 Overflow = SDValue(NodeCarry.getNode(), 1); 10159 return; 10160 } 10161 10162 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 10163 LHS.getValueType(), LHS, RHS); 10164 10165 EVT ResultType = Node->getValueType(1); 10166 EVT SetCCType = getSetCCResultType( 10167 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 10168 SDValue SetCC; 10169 if (IsAdd && isOneConstant(RHS)) { 10170 // Special case: uaddo X, 1 overflowed if X+1 is 0. This potential reduces 10171 // the live range of X. We assume comparing with 0 is cheap. 10172 // The general case (X + C) < C is not necessarily beneficial. Although we 10173 // reduce the live range of X, we may introduce the materialization of 10174 // constant C. 10175 SetCC = 10176 DAG.getSetCC(dl, SetCCType, Result, 10177 DAG.getConstant(0, dl, Node->getValueType(0)), ISD::SETEQ); 10178 } else if (IsAdd && isAllOnesConstant(RHS)) { 10179 // Special case: uaddo X, -1 overflows if X != 0. 10180 SetCC = 10181 DAG.getSetCC(dl, SetCCType, LHS, 10182 DAG.getConstant(0, dl, Node->getValueType(0)), ISD::SETNE); 10183 } else { 10184 ISD::CondCode CC = IsAdd ? ISD::SETULT : ISD::SETUGT; 10185 SetCC = DAG.getSetCC(dl, SetCCType, Result, LHS, CC); 10186 } 10187 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 10188 } 10189 10190 void TargetLowering::expandSADDSUBO( 10191 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 10192 SDLoc dl(Node); 10193 SDValue LHS = Node->getOperand(0); 10194 SDValue RHS = Node->getOperand(1); 10195 bool IsAdd = Node->getOpcode() == ISD::SADDO; 10196 10197 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 10198 LHS.getValueType(), LHS, RHS); 10199 10200 EVT ResultType = Node->getValueType(1); 10201 EVT OType = getSetCCResultType( 10202 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 10203 10204 // If SADDSAT/SSUBSAT is legal, compare results to detect overflow. 10205 unsigned OpcSat = IsAdd ? ISD::SADDSAT : ISD::SSUBSAT; 10206 if (isOperationLegal(OpcSat, LHS.getValueType())) { 10207 SDValue Sat = DAG.getNode(OpcSat, dl, LHS.getValueType(), LHS, RHS); 10208 SDValue SetCC = DAG.getSetCC(dl, OType, Result, Sat, ISD::SETNE); 10209 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 10210 return; 10211 } 10212 10213 SDValue Zero = DAG.getConstant(0, dl, LHS.getValueType()); 10214 10215 // For an addition, the result should be less than one of the operands (LHS) 10216 // if and only if the other operand (RHS) is negative, otherwise there will 10217 // be overflow. 10218 // For a subtraction, the result should be less than one of the operands 10219 // (LHS) if and only if the other operand (RHS) is (non-zero) positive, 10220 // otherwise there will be overflow. 10221 SDValue ResultLowerThanLHS = DAG.getSetCC(dl, OType, Result, LHS, ISD::SETLT); 10222 SDValue ConditionRHS = 10223 DAG.getSetCC(dl, OType, RHS, Zero, IsAdd ? ISD::SETLT : ISD::SETGT); 10224 10225 Overflow = DAG.getBoolExtOrTrunc( 10226 DAG.getNode(ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl, 10227 ResultType, ResultType); 10228 } 10229 10230 bool TargetLowering::expandMULO(SDNode *Node, SDValue &Result, 10231 SDValue &Overflow, SelectionDAG &DAG) const { 10232 SDLoc dl(Node); 10233 EVT VT = Node->getValueType(0); 10234 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 10235 SDValue LHS = Node->getOperand(0); 10236 SDValue RHS = Node->getOperand(1); 10237 bool isSigned = Node->getOpcode() == ISD::SMULO; 10238 10239 // For power-of-two multiplications we can use a simpler shift expansion. 10240 if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) { 10241 const APInt &C = RHSC->getAPIntValue(); 10242 // mulo(X, 1 << S) -> { X << S, (X << S) >> S != X } 10243 if (C.isPowerOf2()) { 10244 // smulo(x, signed_min) is same as umulo(x, signed_min). 10245 bool UseArithShift = isSigned && !C.isMinSignedValue(); 10246 EVT ShiftAmtTy = getShiftAmountTy(VT, DAG.getDataLayout()); 10247 SDValue ShiftAmt = DAG.getConstant(C.logBase2(), dl, ShiftAmtTy); 10248 Result = DAG.getNode(ISD::SHL, dl, VT, LHS, ShiftAmt); 10249 Overflow = DAG.getSetCC(dl, SetCCVT, 10250 DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL, 10251 dl, VT, Result, ShiftAmt), 10252 LHS, ISD::SETNE); 10253 return true; 10254 } 10255 } 10256 10257 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getScalarSizeInBits() * 2); 10258 if (VT.isVector()) 10259 WideVT = 10260 EVT::getVectorVT(*DAG.getContext(), WideVT, VT.getVectorElementCount()); 10261 10262 SDValue BottomHalf; 10263 SDValue TopHalf; 10264 static const unsigned Ops[2][3] = 10265 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, 10266 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; 10267 if (isOperationLegalOrCustom(Ops[isSigned][0], VT)) { 10268 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 10269 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); 10270 } else if (isOperationLegalOrCustom(Ops[isSigned][1], VT)) { 10271 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, 10272 RHS); 10273 TopHalf = BottomHalf.getValue(1); 10274 } else if (isTypeLegal(WideVT)) { 10275 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 10276 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 10277 SDValue Mul = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); 10278 BottomHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul); 10279 SDValue ShiftAmt = DAG.getConstant(VT.getScalarSizeInBits(), dl, 10280 getShiftAmountTy(WideVT, DAG.getDataLayout())); 10281 TopHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, 10282 DAG.getNode(ISD::SRL, dl, WideVT, Mul, ShiftAmt)); 10283 } else { 10284 if (VT.isVector()) 10285 return false; 10286 10287 // We can fall back to a libcall with an illegal type for the MUL if we 10288 // have a libcall big enough. 10289 // Also, we can fall back to a division in some cases, but that's a big 10290 // performance hit in the general case. 10291 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 10292 if (WideVT == MVT::i16) 10293 LC = RTLIB::MUL_I16; 10294 else if (WideVT == MVT::i32) 10295 LC = RTLIB::MUL_I32; 10296 else if (WideVT == MVT::i64) 10297 LC = RTLIB::MUL_I64; 10298 else if (WideVT == MVT::i128) 10299 LC = RTLIB::MUL_I128; 10300 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!"); 10301 10302 SDValue HiLHS; 10303 SDValue HiRHS; 10304 if (isSigned) { 10305 // The high part is obtained by SRA'ing all but one of the bits of low 10306 // part. 10307 unsigned LoSize = VT.getFixedSizeInBits(); 10308 HiLHS = 10309 DAG.getNode(ISD::SRA, dl, VT, LHS, 10310 DAG.getConstant(LoSize - 1, dl, 10311 getPointerTy(DAG.getDataLayout()))); 10312 HiRHS = 10313 DAG.getNode(ISD::SRA, dl, VT, RHS, 10314 DAG.getConstant(LoSize - 1, dl, 10315 getPointerTy(DAG.getDataLayout()))); 10316 } else { 10317 HiLHS = DAG.getConstant(0, dl, VT); 10318 HiRHS = DAG.getConstant(0, dl, VT); 10319 } 10320 10321 // Here we're passing the 2 arguments explicitly as 4 arguments that are 10322 // pre-lowered to the correct types. This all depends upon WideVT not 10323 // being a legal type for the architecture and thus has to be split to 10324 // two arguments. 10325 SDValue Ret; 10326 TargetLowering::MakeLibCallOptions CallOptions; 10327 CallOptions.setSExt(isSigned); 10328 CallOptions.setIsPostTypeLegalization(true); 10329 if (shouldSplitFunctionArgumentsAsLittleEndian(DAG.getDataLayout())) { 10330 // Halves of WideVT are packed into registers in different order 10331 // depending on platform endianness. This is usually handled by 10332 // the C calling convention, but we can't defer to it in 10333 // the legalizer. 10334 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS }; 10335 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 10336 } else { 10337 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS }; 10338 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 10339 } 10340 assert(Ret.getOpcode() == ISD::MERGE_VALUES && 10341 "Ret value is a collection of constituent nodes holding result."); 10342 if (DAG.getDataLayout().isLittleEndian()) { 10343 // Same as above. 10344 BottomHalf = Ret.getOperand(0); 10345 TopHalf = Ret.getOperand(1); 10346 } else { 10347 BottomHalf = Ret.getOperand(1); 10348 TopHalf = Ret.getOperand(0); 10349 } 10350 } 10351 10352 Result = BottomHalf; 10353 if (isSigned) { 10354 SDValue ShiftAmt = DAG.getConstant( 10355 VT.getScalarSizeInBits() - 1, dl, 10356 getShiftAmountTy(BottomHalf.getValueType(), DAG.getDataLayout())); 10357 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt); 10358 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, Sign, ISD::SETNE); 10359 } else { 10360 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, 10361 DAG.getConstant(0, dl, VT), ISD::SETNE); 10362 } 10363 10364 // Truncate the result if SetCC returns a larger type than needed. 10365 EVT RType = Node->getValueType(1); 10366 if (RType.bitsLT(Overflow.getValueType())) 10367 Overflow = DAG.getNode(ISD::TRUNCATE, dl, RType, Overflow); 10368 10369 assert(RType.getSizeInBits() == Overflow.getValueSizeInBits() && 10370 "Unexpected result type for S/UMULO legalization"); 10371 return true; 10372 } 10373 10374 SDValue TargetLowering::expandVecReduce(SDNode *Node, SelectionDAG &DAG) const { 10375 SDLoc dl(Node); 10376 unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Node->getOpcode()); 10377 SDValue Op = Node->getOperand(0); 10378 EVT VT = Op.getValueType(); 10379 10380 if (VT.isScalableVector()) 10381 report_fatal_error( 10382 "Expanding reductions for scalable vectors is undefined."); 10383 10384 // Try to use a shuffle reduction for power of two vectors. 10385 if (VT.isPow2VectorType()) { 10386 while (VT.getVectorNumElements() > 1) { 10387 EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); 10388 if (!isOperationLegalOrCustom(BaseOpcode, HalfVT)) 10389 break; 10390 10391 SDValue Lo, Hi; 10392 std::tie(Lo, Hi) = DAG.SplitVector(Op, dl); 10393 Op = DAG.getNode(BaseOpcode, dl, HalfVT, Lo, Hi); 10394 VT = HalfVT; 10395 } 10396 } 10397 10398 EVT EltVT = VT.getVectorElementType(); 10399 unsigned NumElts = VT.getVectorNumElements(); 10400 10401 SmallVector<SDValue, 8> Ops; 10402 DAG.ExtractVectorElements(Op, Ops, 0, NumElts); 10403 10404 SDValue Res = Ops[0]; 10405 for (unsigned i = 1; i < NumElts; i++) 10406 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Node->getFlags()); 10407 10408 // Result type may be wider than element type. 10409 if (EltVT != Node->getValueType(0)) 10410 Res = DAG.getNode(ISD::ANY_EXTEND, dl, Node->getValueType(0), Res); 10411 return Res; 10412 } 10413 10414 SDValue TargetLowering::expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const { 10415 SDLoc dl(Node); 10416 SDValue AccOp = Node->getOperand(0); 10417 SDValue VecOp = Node->getOperand(1); 10418 SDNodeFlags Flags = Node->getFlags(); 10419 10420 EVT VT = VecOp.getValueType(); 10421 EVT EltVT = VT.getVectorElementType(); 10422 10423 if (VT.isScalableVector()) 10424 report_fatal_error( 10425 "Expanding reductions for scalable vectors is undefined."); 10426 10427 unsigned NumElts = VT.getVectorNumElements(); 10428 10429 SmallVector<SDValue, 8> Ops; 10430 DAG.ExtractVectorElements(VecOp, Ops, 0, NumElts); 10431 10432 unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Node->getOpcode()); 10433 10434 SDValue Res = AccOp; 10435 for (unsigned i = 0; i < NumElts; i++) 10436 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Flags); 10437 10438 return Res; 10439 } 10440 10441 bool TargetLowering::expandREM(SDNode *Node, SDValue &Result, 10442 SelectionDAG &DAG) const { 10443 EVT VT = Node->getValueType(0); 10444 SDLoc dl(Node); 10445 bool isSigned = Node->getOpcode() == ISD::SREM; 10446 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV; 10447 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 10448 SDValue Dividend = Node->getOperand(0); 10449 SDValue Divisor = Node->getOperand(1); 10450 if (isOperationLegalOrCustom(DivRemOpc, VT)) { 10451 SDVTList VTs = DAG.getVTList(VT, VT); 10452 Result = DAG.getNode(DivRemOpc, dl, VTs, Dividend, Divisor).getValue(1); 10453 return true; 10454 } 10455 if (isOperationLegalOrCustom(DivOpc, VT)) { 10456 // X % Y -> X-X/Y*Y 10457 SDValue Divide = DAG.getNode(DivOpc, dl, VT, Dividend, Divisor); 10458 SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Divide, Divisor); 10459 Result = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul); 10460 return true; 10461 } 10462 return false; 10463 } 10464 10465 SDValue TargetLowering::expandFP_TO_INT_SAT(SDNode *Node, 10466 SelectionDAG &DAG) const { 10467 bool IsSigned = Node->getOpcode() == ISD::FP_TO_SINT_SAT; 10468 SDLoc dl(SDValue(Node, 0)); 10469 SDValue Src = Node->getOperand(0); 10470 10471 // DstVT is the result type, while SatVT is the size to which we saturate 10472 EVT SrcVT = Src.getValueType(); 10473 EVT DstVT = Node->getValueType(0); 10474 10475 EVT SatVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); 10476 unsigned SatWidth = SatVT.getScalarSizeInBits(); 10477 unsigned DstWidth = DstVT.getScalarSizeInBits(); 10478 assert(SatWidth <= DstWidth && 10479 "Expected saturation width smaller than result width"); 10480 10481 // Determine minimum and maximum integer values and their corresponding 10482 // floating-point values. 10483 APInt MinInt, MaxInt; 10484 if (IsSigned) { 10485 MinInt = APInt::getSignedMinValue(SatWidth).sext(DstWidth); 10486 MaxInt = APInt::getSignedMaxValue(SatWidth).sext(DstWidth); 10487 } else { 10488 MinInt = APInt::getMinValue(SatWidth).zext(DstWidth); 10489 MaxInt = APInt::getMaxValue(SatWidth).zext(DstWidth); 10490 } 10491 10492 // We cannot risk emitting FP_TO_XINT nodes with a source VT of f16, as 10493 // libcall emission cannot handle this. Large result types will fail. 10494 if (SrcVT == MVT::f16) { 10495 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, Src); 10496 SrcVT = Src.getValueType(); 10497 } 10498 10499 APFloat MinFloat(DAG.EVTToAPFloatSemantics(SrcVT)); 10500 APFloat MaxFloat(DAG.EVTToAPFloatSemantics(SrcVT)); 10501 10502 APFloat::opStatus MinStatus = 10503 MinFloat.convertFromAPInt(MinInt, IsSigned, APFloat::rmTowardZero); 10504 APFloat::opStatus MaxStatus = 10505 MaxFloat.convertFromAPInt(MaxInt, IsSigned, APFloat::rmTowardZero); 10506 bool AreExactFloatBounds = !(MinStatus & APFloat::opStatus::opInexact) && 10507 !(MaxStatus & APFloat::opStatus::opInexact); 10508 10509 SDValue MinFloatNode = DAG.getConstantFP(MinFloat, dl, SrcVT); 10510 SDValue MaxFloatNode = DAG.getConstantFP(MaxFloat, dl, SrcVT); 10511 10512 // If the integer bounds are exactly representable as floats and min/max are 10513 // legal, emit a min+max+fptoi sequence. Otherwise we have to use a sequence 10514 // of comparisons and selects. 10515 bool MinMaxLegal = isOperationLegal(ISD::FMINNUM, SrcVT) && 10516 isOperationLegal(ISD::FMAXNUM, SrcVT); 10517 if (AreExactFloatBounds && MinMaxLegal) { 10518 SDValue Clamped = Src; 10519 10520 // Clamp Src by MinFloat from below. If Src is NaN the result is MinFloat. 10521 Clamped = DAG.getNode(ISD::FMAXNUM, dl, SrcVT, Clamped, MinFloatNode); 10522 // Clamp by MaxFloat from above. NaN cannot occur. 10523 Clamped = DAG.getNode(ISD::FMINNUM, dl, SrcVT, Clamped, MaxFloatNode); 10524 // Convert clamped value to integer. 10525 SDValue FpToInt = DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, 10526 dl, DstVT, Clamped); 10527 10528 // In the unsigned case we're done, because we mapped NaN to MinFloat, 10529 // which will cast to zero. 10530 if (!IsSigned) 10531 return FpToInt; 10532 10533 // Otherwise, select 0 if Src is NaN. 10534 SDValue ZeroInt = DAG.getConstant(0, dl, DstVT); 10535 EVT SetCCVT = 10536 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT); 10537 SDValue IsNan = DAG.getSetCC(dl, SetCCVT, Src, Src, ISD::CondCode::SETUO); 10538 return DAG.getSelect(dl, DstVT, IsNan, ZeroInt, FpToInt); 10539 } 10540 10541 SDValue MinIntNode = DAG.getConstant(MinInt, dl, DstVT); 10542 SDValue MaxIntNode = DAG.getConstant(MaxInt, dl, DstVT); 10543 10544 // Result of direct conversion. The assumption here is that the operation is 10545 // non-trapping and it's fine to apply it to an out-of-range value if we 10546 // select it away later. 10547 SDValue FpToInt = 10548 DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, dl, DstVT, Src); 10549 10550 SDValue Select = FpToInt; 10551 10552 EVT SetCCVT = 10553 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT); 10554 10555 // If Src ULT MinFloat, select MinInt. In particular, this also selects 10556 // MinInt if Src is NaN. 10557 SDValue ULT = DAG.getSetCC(dl, SetCCVT, Src, MinFloatNode, ISD::SETULT); 10558 Select = DAG.getSelect(dl, DstVT, ULT, MinIntNode, Select); 10559 // If Src OGT MaxFloat, select MaxInt. 10560 SDValue OGT = DAG.getSetCC(dl, SetCCVT, Src, MaxFloatNode, ISD::SETOGT); 10561 Select = DAG.getSelect(dl, DstVT, OGT, MaxIntNode, Select); 10562 10563 // In the unsigned case we are done, because we mapped NaN to MinInt, which 10564 // is already zero. 10565 if (!IsSigned) 10566 return Select; 10567 10568 // Otherwise, select 0 if Src is NaN. 10569 SDValue ZeroInt = DAG.getConstant(0, dl, DstVT); 10570 SDValue IsNan = DAG.getSetCC(dl, SetCCVT, Src, Src, ISD::CondCode::SETUO); 10571 return DAG.getSelect(dl, DstVT, IsNan, ZeroInt, Select); 10572 } 10573 10574 SDValue TargetLowering::expandVectorSplice(SDNode *Node, 10575 SelectionDAG &DAG) const { 10576 assert(Node->getOpcode() == ISD::VECTOR_SPLICE && "Unexpected opcode!"); 10577 assert(Node->getValueType(0).isScalableVector() && 10578 "Fixed length vector types expected to use SHUFFLE_VECTOR!"); 10579 10580 EVT VT = Node->getValueType(0); 10581 SDValue V1 = Node->getOperand(0); 10582 SDValue V2 = Node->getOperand(1); 10583 int64_t Imm = cast<ConstantSDNode>(Node->getOperand(2))->getSExtValue(); 10584 SDLoc DL(Node); 10585 10586 // Expand through memory thusly: 10587 // Alloca CONCAT_VECTORS_TYPES(V1, V2) Ptr 10588 // Store V1, Ptr 10589 // Store V2, Ptr + sizeof(V1) 10590 // If (Imm < 0) 10591 // TrailingElts = -Imm 10592 // Ptr = Ptr + sizeof(V1) - (TrailingElts * sizeof(VT.Elt)) 10593 // else 10594 // Ptr = Ptr + (Imm * sizeof(VT.Elt)) 10595 // Res = Load Ptr 10596 10597 Align Alignment = DAG.getReducedAlign(VT, /*UseABI=*/false); 10598 10599 EVT MemVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 10600 VT.getVectorElementCount() * 2); 10601 SDValue StackPtr = DAG.CreateStackTemporary(MemVT.getStoreSize(), Alignment); 10602 EVT PtrVT = StackPtr.getValueType(); 10603 auto &MF = DAG.getMachineFunction(); 10604 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 10605 auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex); 10606 10607 // Store the lo part of CONCAT_VECTORS(V1, V2) 10608 SDValue StoreV1 = DAG.getStore(DAG.getEntryNode(), DL, V1, StackPtr, PtrInfo); 10609 // Store the hi part of CONCAT_VECTORS(V1, V2) 10610 SDValue OffsetToV2 = DAG.getVScale( 10611 DL, PtrVT, 10612 APInt(PtrVT.getFixedSizeInBits(), VT.getStoreSize().getKnownMinValue())); 10613 SDValue StackPtr2 = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, OffsetToV2); 10614 SDValue StoreV2 = DAG.getStore(StoreV1, DL, V2, StackPtr2, PtrInfo); 10615 10616 if (Imm >= 0) { 10617 // Load back the required element. getVectorElementPointer takes care of 10618 // clamping the index if it's out-of-bounds. 10619 StackPtr = getVectorElementPointer(DAG, StackPtr, VT, Node->getOperand(2)); 10620 // Load the spliced result 10621 return DAG.getLoad(VT, DL, StoreV2, StackPtr, 10622 MachinePointerInfo::getUnknownStack(MF)); 10623 } 10624 10625 uint64_t TrailingElts = -Imm; 10626 10627 // NOTE: TrailingElts must be clamped so as not to read outside of V1:V2. 10628 TypeSize EltByteSize = VT.getVectorElementType().getStoreSize(); 10629 SDValue TrailingBytes = 10630 DAG.getConstant(TrailingElts * EltByteSize, DL, PtrVT); 10631 10632 if (TrailingElts > VT.getVectorMinNumElements()) { 10633 SDValue VLBytes = 10634 DAG.getVScale(DL, PtrVT, 10635 APInt(PtrVT.getFixedSizeInBits(), 10636 VT.getStoreSize().getKnownMinValue())); 10637 TrailingBytes = DAG.getNode(ISD::UMIN, DL, PtrVT, TrailingBytes, VLBytes); 10638 } 10639 10640 // Calculate the start address of the spliced result. 10641 StackPtr2 = DAG.getNode(ISD::SUB, DL, PtrVT, StackPtr2, TrailingBytes); 10642 10643 // Load the spliced result 10644 return DAG.getLoad(VT, DL, StoreV2, StackPtr2, 10645 MachinePointerInfo::getUnknownStack(MF)); 10646 } 10647 10648 bool TargetLowering::LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, 10649 SDValue &LHS, SDValue &RHS, 10650 SDValue &CC, SDValue Mask, 10651 SDValue EVL, bool &NeedInvert, 10652 const SDLoc &dl, SDValue &Chain, 10653 bool IsSignaling) const { 10654 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10655 MVT OpVT = LHS.getSimpleValueType(); 10656 ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get(); 10657 NeedInvert = false; 10658 assert(!EVL == !Mask && "VP Mask and EVL must either both be set or unset"); 10659 bool IsNonVP = !EVL; 10660 switch (TLI.getCondCodeAction(CCCode, OpVT)) { 10661 default: 10662 llvm_unreachable("Unknown condition code action!"); 10663 case TargetLowering::Legal: 10664 // Nothing to do. 10665 break; 10666 case TargetLowering::Expand: { 10667 ISD::CondCode InvCC = ISD::getSetCCSwappedOperands(CCCode); 10668 if (TLI.isCondCodeLegalOrCustom(InvCC, OpVT)) { 10669 std::swap(LHS, RHS); 10670 CC = DAG.getCondCode(InvCC); 10671 return true; 10672 } 10673 // Swapping operands didn't work. Try inverting the condition. 10674 bool NeedSwap = false; 10675 InvCC = getSetCCInverse(CCCode, OpVT); 10676 if (!TLI.isCondCodeLegalOrCustom(InvCC, OpVT)) { 10677 // If inverting the condition is not enough, try swapping operands 10678 // on top of it. 10679 InvCC = ISD::getSetCCSwappedOperands(InvCC); 10680 NeedSwap = true; 10681 } 10682 if (TLI.isCondCodeLegalOrCustom(InvCC, OpVT)) { 10683 CC = DAG.getCondCode(InvCC); 10684 NeedInvert = true; 10685 if (NeedSwap) 10686 std::swap(LHS, RHS); 10687 return true; 10688 } 10689 10690 ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID; 10691 unsigned Opc = 0; 10692 switch (CCCode) { 10693 default: 10694 llvm_unreachable("Don't know how to expand this condition!"); 10695 case ISD::SETUO: 10696 if (TLI.isCondCodeLegal(ISD::SETUNE, OpVT)) { 10697 CC1 = ISD::SETUNE; 10698 CC2 = ISD::SETUNE; 10699 Opc = ISD::OR; 10700 break; 10701 } 10702 assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) && 10703 "If SETUE is expanded, SETOEQ or SETUNE must be legal!"); 10704 NeedInvert = true; 10705 [[fallthrough]]; 10706 case ISD::SETO: 10707 assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) && 10708 "If SETO is expanded, SETOEQ must be legal!"); 10709 CC1 = ISD::SETOEQ; 10710 CC2 = ISD::SETOEQ; 10711 Opc = ISD::AND; 10712 break; 10713 case ISD::SETONE: 10714 case ISD::SETUEQ: 10715 // If the SETUO or SETO CC isn't legal, we might be able to use 10716 // SETOGT || SETOLT, inverting the result for SETUEQ. We only need one 10717 // of SETOGT/SETOLT to be legal, the other can be emulated by swapping 10718 // the operands. 10719 CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO; 10720 if (!TLI.isCondCodeLegal(CC2, OpVT) && 10721 (TLI.isCondCodeLegal(ISD::SETOGT, OpVT) || 10722 TLI.isCondCodeLegal(ISD::SETOLT, OpVT))) { 10723 CC1 = ISD::SETOGT; 10724 CC2 = ISD::SETOLT; 10725 Opc = ISD::OR; 10726 NeedInvert = ((unsigned)CCCode & 0x8U); 10727 break; 10728 } 10729 [[fallthrough]]; 10730 case ISD::SETOEQ: 10731 case ISD::SETOGT: 10732 case ISD::SETOGE: 10733 case ISD::SETOLT: 10734 case ISD::SETOLE: 10735 case ISD::SETUNE: 10736 case ISD::SETUGT: 10737 case ISD::SETUGE: 10738 case ISD::SETULT: 10739 case ISD::SETULE: 10740 // If we are floating point, assign and break, otherwise fall through. 10741 if (!OpVT.isInteger()) { 10742 // We can use the 4th bit to tell if we are the unordered 10743 // or ordered version of the opcode. 10744 CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO; 10745 Opc = ((unsigned)CCCode & 0x8U) ? ISD::OR : ISD::AND; 10746 CC1 = (ISD::CondCode)(((int)CCCode & 0x7) | 0x10); 10747 break; 10748 } 10749 // Fallthrough if we are unsigned integer. 10750 [[fallthrough]]; 10751 case ISD::SETLE: 10752 case ISD::SETGT: 10753 case ISD::SETGE: 10754 case ISD::SETLT: 10755 case ISD::SETNE: 10756 case ISD::SETEQ: 10757 // If all combinations of inverting the condition and swapping operands 10758 // didn't work then we have no means to expand the condition. 10759 llvm_unreachable("Don't know how to expand this condition!"); 10760 } 10761 10762 SDValue SetCC1, SetCC2; 10763 if (CCCode != ISD::SETO && CCCode != ISD::SETUO) { 10764 // If we aren't the ordered or unorder operation, 10765 // then the pattern is (LHS CC1 RHS) Opc (LHS CC2 RHS). 10766 if (IsNonVP) { 10767 SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1, Chain, IsSignaling); 10768 SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2, Chain, IsSignaling); 10769 } else { 10770 SetCC1 = DAG.getSetCCVP(dl, VT, LHS, RHS, CC1, Mask, EVL); 10771 SetCC2 = DAG.getSetCCVP(dl, VT, LHS, RHS, CC2, Mask, EVL); 10772 } 10773 } else { 10774 // Otherwise, the pattern is (LHS CC1 LHS) Opc (RHS CC2 RHS) 10775 if (IsNonVP) { 10776 SetCC1 = DAG.getSetCC(dl, VT, LHS, LHS, CC1, Chain, IsSignaling); 10777 SetCC2 = DAG.getSetCC(dl, VT, RHS, RHS, CC2, Chain, IsSignaling); 10778 } else { 10779 SetCC1 = DAG.getSetCCVP(dl, VT, LHS, LHS, CC1, Mask, EVL); 10780 SetCC2 = DAG.getSetCCVP(dl, VT, RHS, RHS, CC2, Mask, EVL); 10781 } 10782 } 10783 if (Chain) 10784 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, SetCC1.getValue(1), 10785 SetCC2.getValue(1)); 10786 if (IsNonVP) 10787 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2); 10788 else { 10789 // Transform the binary opcode to the VP equivalent. 10790 assert((Opc == ISD::OR || Opc == ISD::AND) && "Unexpected opcode"); 10791 Opc = Opc == ISD::OR ? ISD::VP_OR : ISD::VP_AND; 10792 LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2, Mask, EVL); 10793 } 10794 RHS = SDValue(); 10795 CC = SDValue(); 10796 return true; 10797 } 10798 } 10799 return false; 10800 } 10801