1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines a DAG pattern matching instruction selector for X86, 10 // converting from a legalized dag to a X86 dag. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "X86.h" 15 #include "X86MachineFunctionInfo.h" 16 #include "X86RegisterInfo.h" 17 #include "X86Subtarget.h" 18 #include "X86TargetMachine.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/CodeGen/MachineModuleInfo.h" 21 #include "llvm/CodeGen/SelectionDAGISel.h" 22 #include "llvm/Config/llvm-config.h" 23 #include "llvm/IR/ConstantRange.h" 24 #include "llvm/IR/Function.h" 25 #include "llvm/IR/Instructions.h" 26 #include "llvm/IR/Intrinsics.h" 27 #include "llvm/IR/IntrinsicsX86.h" 28 #include "llvm/IR/Type.h" 29 #include "llvm/Support/Debug.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/KnownBits.h" 32 #include "llvm/Support/MathExtras.h" 33 #include <stdint.h> 34 using namespace llvm; 35 36 #define DEBUG_TYPE "x86-isel" 37 38 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor"); 39 40 static cl::opt<bool> AndImmShrink("x86-and-imm-shrink", cl::init(true), 41 cl::desc("Enable setting constant bits to reduce size of mask immediates"), 42 cl::Hidden); 43 44 static cl::opt<bool> EnablePromoteAnyextLoad( 45 "x86-promote-anyext-load", cl::init(true), 46 cl::desc("Enable promoting aligned anyext load to wider load"), cl::Hidden); 47 48 extern cl::opt<bool> IndirectBranchTracking; 49 50 //===----------------------------------------------------------------------===// 51 // Pattern Matcher Implementation 52 //===----------------------------------------------------------------------===// 53 54 namespace { 55 /// This corresponds to X86AddressMode, but uses SDValue's instead of register 56 /// numbers for the leaves of the matched tree. 57 struct X86ISelAddressMode { 58 enum { 59 RegBase, 60 FrameIndexBase 61 } BaseType; 62 63 // This is really a union, discriminated by BaseType! 64 SDValue Base_Reg; 65 int Base_FrameIndex; 66 67 unsigned Scale; 68 SDValue IndexReg; 69 int32_t Disp; 70 SDValue Segment; 71 const GlobalValue *GV; 72 const Constant *CP; 73 const BlockAddress *BlockAddr; 74 const char *ES; 75 MCSymbol *MCSym; 76 int JT; 77 Align Alignment; // CP alignment. 78 unsigned char SymbolFlags; // X86II::MO_* 79 bool NegateIndex = false; 80 81 X86ISelAddressMode() 82 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0), 83 Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr), 84 MCSym(nullptr), JT(-1), SymbolFlags(X86II::MO_NO_FLAG) {} 85 86 bool hasSymbolicDisplacement() const { 87 return GV != nullptr || CP != nullptr || ES != nullptr || 88 MCSym != nullptr || JT != -1 || BlockAddr != nullptr; 89 } 90 91 bool hasBaseOrIndexReg() const { 92 return BaseType == FrameIndexBase || 93 IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr; 94 } 95 96 /// Return true if this addressing mode is already RIP-relative. 97 bool isRIPRelative() const { 98 if (BaseType != RegBase) return false; 99 if (RegisterSDNode *RegNode = 100 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode())) 101 return RegNode->getReg() == X86::RIP; 102 return false; 103 } 104 105 void setBaseReg(SDValue Reg) { 106 BaseType = RegBase; 107 Base_Reg = Reg; 108 } 109 110 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 111 void dump(SelectionDAG *DAG = nullptr) { 112 dbgs() << "X86ISelAddressMode " << this << '\n'; 113 dbgs() << "Base_Reg "; 114 if (Base_Reg.getNode()) 115 Base_Reg.getNode()->dump(DAG); 116 else 117 dbgs() << "nul\n"; 118 if (BaseType == FrameIndexBase) 119 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n'; 120 dbgs() << " Scale " << Scale << '\n' 121 << "IndexReg "; 122 if (NegateIndex) 123 dbgs() << "negate "; 124 if (IndexReg.getNode()) 125 IndexReg.getNode()->dump(DAG); 126 else 127 dbgs() << "nul\n"; 128 dbgs() << " Disp " << Disp << '\n' 129 << "GV "; 130 if (GV) 131 GV->dump(); 132 else 133 dbgs() << "nul"; 134 dbgs() << " CP "; 135 if (CP) 136 CP->dump(); 137 else 138 dbgs() << "nul"; 139 dbgs() << '\n' 140 << "ES "; 141 if (ES) 142 dbgs() << ES; 143 else 144 dbgs() << "nul"; 145 dbgs() << " MCSym "; 146 if (MCSym) 147 dbgs() << MCSym; 148 else 149 dbgs() << "nul"; 150 dbgs() << " JT" << JT << " Align" << Alignment.value() << '\n'; 151 } 152 #endif 153 }; 154 } 155 156 namespace { 157 //===--------------------------------------------------------------------===// 158 /// ISel - X86-specific code to select X86 machine instructions for 159 /// SelectionDAG operations. 160 /// 161 class X86DAGToDAGISel final : public SelectionDAGISel { 162 /// Keep a pointer to the X86Subtarget around so that we can 163 /// make the right decision when generating code for different targets. 164 const X86Subtarget *Subtarget; 165 166 /// If true, selector should try to optimize for minimum code size. 167 bool OptForMinSize; 168 169 /// Disable direct TLS access through segment registers. 170 bool IndirectTlsSegRefs; 171 172 public: 173 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel) 174 : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr), 175 OptForMinSize(false), IndirectTlsSegRefs(false) {} 176 177 StringRef getPassName() const override { 178 return "X86 DAG->DAG Instruction Selection"; 179 } 180 181 bool runOnMachineFunction(MachineFunction &MF) override { 182 // Reset the subtarget each time through. 183 Subtarget = &MF.getSubtarget<X86Subtarget>(); 184 IndirectTlsSegRefs = MF.getFunction().hasFnAttribute( 185 "indirect-tls-seg-refs"); 186 187 // OptFor[Min]Size are used in pattern predicates that isel is matching. 188 OptForMinSize = MF.getFunction().hasMinSize(); 189 assert((!OptForMinSize || MF.getFunction().hasOptSize()) && 190 "OptForMinSize implies OptForSize"); 191 192 SelectionDAGISel::runOnMachineFunction(MF); 193 return true; 194 } 195 196 void emitFunctionEntryCode() override; 197 198 bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override; 199 200 void PreprocessISelDAG() override; 201 void PostprocessISelDAG() override; 202 203 // Include the pieces autogenerated from the target description. 204 #include "X86GenDAGISel.inc" 205 206 private: 207 void Select(SDNode *N) override; 208 209 bool foldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM); 210 bool matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM, 211 bool AllowSegmentRegForX32 = false); 212 bool matchWrapper(SDValue N, X86ISelAddressMode &AM); 213 bool matchAddress(SDValue N, X86ISelAddressMode &AM); 214 bool matchVectorAddress(SDValue N, X86ISelAddressMode &AM); 215 bool matchAdd(SDValue &N, X86ISelAddressMode &AM, unsigned Depth); 216 bool matchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 217 unsigned Depth); 218 bool matchAddressBase(SDValue N, X86ISelAddressMode &AM); 219 bool selectAddr(SDNode *Parent, SDValue N, SDValue &Base, 220 SDValue &Scale, SDValue &Index, SDValue &Disp, 221 SDValue &Segment); 222 bool selectVectorAddr(MemSDNode *Parent, SDValue BasePtr, SDValue IndexOp, 223 SDValue ScaleOp, SDValue &Base, SDValue &Scale, 224 SDValue &Index, SDValue &Disp, SDValue &Segment); 225 bool selectMOV64Imm32(SDValue N, SDValue &Imm); 226 bool selectLEAAddr(SDValue N, SDValue &Base, 227 SDValue &Scale, SDValue &Index, SDValue &Disp, 228 SDValue &Segment); 229 bool selectLEA64_32Addr(SDValue N, SDValue &Base, 230 SDValue &Scale, SDValue &Index, SDValue &Disp, 231 SDValue &Segment); 232 bool selectTLSADDRAddr(SDValue N, SDValue &Base, 233 SDValue &Scale, SDValue &Index, SDValue &Disp, 234 SDValue &Segment); 235 bool selectRelocImm(SDValue N, SDValue &Op); 236 237 bool tryFoldLoad(SDNode *Root, SDNode *P, SDValue N, 238 SDValue &Base, SDValue &Scale, 239 SDValue &Index, SDValue &Disp, 240 SDValue &Segment); 241 242 // Convenience method where P is also root. 243 bool tryFoldLoad(SDNode *P, SDValue N, 244 SDValue &Base, SDValue &Scale, 245 SDValue &Index, SDValue &Disp, 246 SDValue &Segment) { 247 return tryFoldLoad(P, P, N, Base, Scale, Index, Disp, Segment); 248 } 249 250 bool tryFoldBroadcast(SDNode *Root, SDNode *P, SDValue N, 251 SDValue &Base, SDValue &Scale, 252 SDValue &Index, SDValue &Disp, 253 SDValue &Segment); 254 255 bool isProfitableToFormMaskedOp(SDNode *N) const; 256 257 /// Implement addressing mode selection for inline asm expressions. 258 bool SelectInlineAsmMemoryOperand(const SDValue &Op, 259 unsigned ConstraintID, 260 std::vector<SDValue> &OutOps) override; 261 262 void emitSpecialCodeForMain(); 263 264 inline void getAddressOperands(X86ISelAddressMode &AM, const SDLoc &DL, 265 MVT VT, SDValue &Base, SDValue &Scale, 266 SDValue &Index, SDValue &Disp, 267 SDValue &Segment) { 268 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase) 269 Base = CurDAG->getTargetFrameIndex( 270 AM.Base_FrameIndex, TLI->getPointerTy(CurDAG->getDataLayout())); 271 else if (AM.Base_Reg.getNode()) 272 Base = AM.Base_Reg; 273 else 274 Base = CurDAG->getRegister(0, VT); 275 276 Scale = getI8Imm(AM.Scale, DL); 277 278 // Negate the index if needed. 279 if (AM.NegateIndex) { 280 unsigned NegOpc = VT == MVT::i64 ? X86::NEG64r : X86::NEG32r; 281 SDValue Neg = SDValue(CurDAG->getMachineNode(NegOpc, DL, VT, MVT::i32, 282 AM.IndexReg), 0); 283 AM.IndexReg = Neg; 284 } 285 286 if (AM.IndexReg.getNode()) 287 Index = AM.IndexReg; 288 else 289 Index = CurDAG->getRegister(0, VT); 290 291 // These are 32-bit even in 64-bit mode since RIP-relative offset 292 // is 32-bit. 293 if (AM.GV) 294 Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(), 295 MVT::i32, AM.Disp, 296 AM.SymbolFlags); 297 else if (AM.CP) 298 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, AM.Alignment, 299 AM.Disp, AM.SymbolFlags); 300 else if (AM.ES) { 301 assert(!AM.Disp && "Non-zero displacement is ignored with ES."); 302 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags); 303 } else if (AM.MCSym) { 304 assert(!AM.Disp && "Non-zero displacement is ignored with MCSym."); 305 assert(AM.SymbolFlags == 0 && "oo"); 306 Disp = CurDAG->getMCSymbol(AM.MCSym, MVT::i32); 307 } else if (AM.JT != -1) { 308 assert(!AM.Disp && "Non-zero displacement is ignored with JT."); 309 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags); 310 } else if (AM.BlockAddr) 311 Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp, 312 AM.SymbolFlags); 313 else 314 Disp = CurDAG->getTargetConstant(AM.Disp, DL, MVT::i32); 315 316 if (AM.Segment.getNode()) 317 Segment = AM.Segment; 318 else 319 Segment = CurDAG->getRegister(0, MVT::i16); 320 } 321 322 // Utility function to determine whether we should avoid selecting 323 // immediate forms of instructions for better code size or not. 324 // At a high level, we'd like to avoid such instructions when 325 // we have similar constants used within the same basic block 326 // that can be kept in a register. 327 // 328 bool shouldAvoidImmediateInstFormsForSize(SDNode *N) const { 329 uint32_t UseCount = 0; 330 331 // Do not want to hoist if we're not optimizing for size. 332 // TODO: We'd like to remove this restriction. 333 // See the comment in X86InstrInfo.td for more info. 334 if (!CurDAG->shouldOptForSize()) 335 return false; 336 337 // Walk all the users of the immediate. 338 for (SDNode::use_iterator UI = N->use_begin(), 339 UE = N->use_end(); (UI != UE) && (UseCount < 2); ++UI) { 340 341 SDNode *User = *UI; 342 343 // This user is already selected. Count it as a legitimate use and 344 // move on. 345 if (User->isMachineOpcode()) { 346 UseCount++; 347 continue; 348 } 349 350 // We want to count stores of immediates as real uses. 351 if (User->getOpcode() == ISD::STORE && 352 User->getOperand(1).getNode() == N) { 353 UseCount++; 354 continue; 355 } 356 357 // We don't currently match users that have > 2 operands (except 358 // for stores, which are handled above) 359 // Those instruction won't match in ISEL, for now, and would 360 // be counted incorrectly. 361 // This may change in the future as we add additional instruction 362 // types. 363 if (User->getNumOperands() != 2) 364 continue; 365 366 // If this is a sign-extended 8-bit integer immediate used in an ALU 367 // instruction, there is probably an opcode encoding to save space. 368 auto *C = dyn_cast<ConstantSDNode>(N); 369 if (C && isInt<8>(C->getSExtValue())) 370 continue; 371 372 // Immediates that are used for offsets as part of stack 373 // manipulation should be left alone. These are typically 374 // used to indicate SP offsets for argument passing and 375 // will get pulled into stores/pushes (implicitly). 376 if (User->getOpcode() == X86ISD::ADD || 377 User->getOpcode() == ISD::ADD || 378 User->getOpcode() == X86ISD::SUB || 379 User->getOpcode() == ISD::SUB) { 380 381 // Find the other operand of the add/sub. 382 SDValue OtherOp = User->getOperand(0); 383 if (OtherOp.getNode() == N) 384 OtherOp = User->getOperand(1); 385 386 // Don't count if the other operand is SP. 387 RegisterSDNode *RegNode; 388 if (OtherOp->getOpcode() == ISD::CopyFromReg && 389 (RegNode = dyn_cast_or_null<RegisterSDNode>( 390 OtherOp->getOperand(1).getNode()))) 391 if ((RegNode->getReg() == X86::ESP) || 392 (RegNode->getReg() == X86::RSP)) 393 continue; 394 } 395 396 // ... otherwise, count this and move on. 397 UseCount++; 398 } 399 400 // If we have more than 1 use, then recommend for hoisting. 401 return (UseCount > 1); 402 } 403 404 /// Return a target constant with the specified value of type i8. 405 inline SDValue getI8Imm(unsigned Imm, const SDLoc &DL) { 406 return CurDAG->getTargetConstant(Imm, DL, MVT::i8); 407 } 408 409 /// Return a target constant with the specified value, of type i32. 410 inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) { 411 return CurDAG->getTargetConstant(Imm, DL, MVT::i32); 412 } 413 414 /// Return a target constant with the specified value, of type i64. 415 inline SDValue getI64Imm(uint64_t Imm, const SDLoc &DL) { 416 return CurDAG->getTargetConstant(Imm, DL, MVT::i64); 417 } 418 419 SDValue getExtractVEXTRACTImmediate(SDNode *N, unsigned VecWidth, 420 const SDLoc &DL) { 421 assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width"); 422 uint64_t Index = N->getConstantOperandVal(1); 423 MVT VecVT = N->getOperand(0).getSimpleValueType(); 424 return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL); 425 } 426 427 SDValue getInsertVINSERTImmediate(SDNode *N, unsigned VecWidth, 428 const SDLoc &DL) { 429 assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width"); 430 uint64_t Index = N->getConstantOperandVal(2); 431 MVT VecVT = N->getSimpleValueType(0); 432 return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL); 433 } 434 435 // Helper to detect unneeded and instructions on shift amounts. Called 436 // from PatFrags in tablegen. 437 bool isUnneededShiftMask(SDNode *N, unsigned Width) const { 438 assert(N->getOpcode() == ISD::AND && "Unexpected opcode"); 439 const APInt &Val = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue(); 440 441 if (Val.countTrailingOnes() >= Width) 442 return true; 443 444 APInt Mask = Val | CurDAG->computeKnownBits(N->getOperand(0)).Zero; 445 return Mask.countTrailingOnes() >= Width; 446 } 447 448 /// Return an SDNode that returns the value of the global base register. 449 /// Output instructions required to initialize the global base register, 450 /// if necessary. 451 SDNode *getGlobalBaseReg(); 452 453 /// Return a reference to the TargetMachine, casted to the target-specific 454 /// type. 455 const X86TargetMachine &getTargetMachine() const { 456 return static_cast<const X86TargetMachine &>(TM); 457 } 458 459 /// Return a reference to the TargetInstrInfo, casted to the target-specific 460 /// type. 461 const X86InstrInfo *getInstrInfo() const { 462 return Subtarget->getInstrInfo(); 463 } 464 465 /// Address-mode matching performs shift-of-and to and-of-shift 466 /// reassociation in order to expose more scaled addressing 467 /// opportunities. 468 bool ComplexPatternFuncMutatesDAG() const override { 469 return true; 470 } 471 472 bool isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const; 473 474 // Indicates we should prefer to use a non-temporal load for this load. 475 bool useNonTemporalLoad(LoadSDNode *N) const { 476 if (!N->isNonTemporal()) 477 return false; 478 479 unsigned StoreSize = N->getMemoryVT().getStoreSize(); 480 481 if (N->getAlignment() < StoreSize) 482 return false; 483 484 switch (StoreSize) { 485 default: llvm_unreachable("Unsupported store size"); 486 case 4: 487 case 8: 488 return false; 489 case 16: 490 return Subtarget->hasSSE41(); 491 case 32: 492 return Subtarget->hasAVX2(); 493 case 64: 494 return Subtarget->hasAVX512(); 495 } 496 } 497 498 bool foldLoadStoreIntoMemOperand(SDNode *Node); 499 MachineSDNode *matchBEXTRFromAndImm(SDNode *Node); 500 bool matchBitExtract(SDNode *Node); 501 bool shrinkAndImmediate(SDNode *N); 502 bool isMaskZeroExtended(SDNode *N) const; 503 bool tryShiftAmountMod(SDNode *N); 504 bool tryShrinkShlLogicImm(SDNode *N); 505 bool tryVPTERNLOG(SDNode *N); 506 bool matchVPTERNLOG(SDNode *Root, SDNode *ParentA, SDNode *ParentBC, 507 SDValue A, SDValue B, SDValue C, uint8_t Imm); 508 bool tryVPTESTM(SDNode *Root, SDValue Setcc, SDValue Mask); 509 bool tryMatchBitSelect(SDNode *N); 510 511 MachineSDNode *emitPCMPISTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad, 512 const SDLoc &dl, MVT VT, SDNode *Node); 513 MachineSDNode *emitPCMPESTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad, 514 const SDLoc &dl, MVT VT, SDNode *Node, 515 SDValue &InFlag); 516 517 bool tryOptimizeRem8Extend(SDNode *N); 518 519 bool onlyUsesZeroFlag(SDValue Flags) const; 520 bool hasNoSignFlagUses(SDValue Flags) const; 521 bool hasNoCarryFlagUses(SDValue Flags) const; 522 }; 523 } 524 525 526 // Returns true if this masked compare can be implemented legally with this 527 // type. 528 static bool isLegalMaskCompare(SDNode *N, const X86Subtarget *Subtarget) { 529 unsigned Opcode = N->getOpcode(); 530 if (Opcode == X86ISD::CMPM || Opcode == X86ISD::CMPMM || 531 Opcode == X86ISD::STRICT_CMPM || Opcode == ISD::SETCC || 532 Opcode == X86ISD::CMPMM_SAE || Opcode == X86ISD::VFPCLASS) { 533 // We can get 256-bit 8 element types here without VLX being enabled. When 534 // this happens we will use 512-bit operations and the mask will not be 535 // zero extended. 536 EVT OpVT = N->getOperand(0).getValueType(); 537 // The first operand of X86ISD::STRICT_CMPM is chain, so we need to get the 538 // second operand. 539 if (Opcode == X86ISD::STRICT_CMPM) 540 OpVT = N->getOperand(1).getValueType(); 541 if (OpVT.is256BitVector() || OpVT.is128BitVector()) 542 return Subtarget->hasVLX(); 543 544 return true; 545 } 546 // Scalar opcodes use 128 bit registers, but aren't subject to the VLX check. 547 if (Opcode == X86ISD::VFPCLASSS || Opcode == X86ISD::FSETCCM || 548 Opcode == X86ISD::FSETCCM_SAE) 549 return true; 550 551 return false; 552 } 553 554 // Returns true if we can assume the writer of the mask has zero extended it 555 // for us. 556 bool X86DAGToDAGISel::isMaskZeroExtended(SDNode *N) const { 557 // If this is an AND, check if we have a compare on either side. As long as 558 // one side guarantees the mask is zero extended, the AND will preserve those 559 // zeros. 560 if (N->getOpcode() == ISD::AND) 561 return isLegalMaskCompare(N->getOperand(0).getNode(), Subtarget) || 562 isLegalMaskCompare(N->getOperand(1).getNode(), Subtarget); 563 564 return isLegalMaskCompare(N, Subtarget); 565 } 566 567 bool 568 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const { 569 if (OptLevel == CodeGenOpt::None) return false; 570 571 if (!N.hasOneUse()) 572 return false; 573 574 if (N.getOpcode() != ISD::LOAD) 575 return true; 576 577 // Don't fold non-temporal loads if we have an instruction for them. 578 if (useNonTemporalLoad(cast<LoadSDNode>(N))) 579 return false; 580 581 // If N is a load, do additional profitability checks. 582 if (U == Root) { 583 switch (U->getOpcode()) { 584 default: break; 585 case X86ISD::ADD: 586 case X86ISD::ADC: 587 case X86ISD::SUB: 588 case X86ISD::SBB: 589 case X86ISD::AND: 590 case X86ISD::XOR: 591 case X86ISD::OR: 592 case ISD::ADD: 593 case ISD::ADDCARRY: 594 case ISD::AND: 595 case ISD::OR: 596 case ISD::XOR: { 597 SDValue Op1 = U->getOperand(1); 598 599 // If the other operand is a 8-bit immediate we should fold the immediate 600 // instead. This reduces code size. 601 // e.g. 602 // movl 4(%esp), %eax 603 // addl $4, %eax 604 // vs. 605 // movl $4, %eax 606 // addl 4(%esp), %eax 607 // The former is 2 bytes shorter. In case where the increment is 1, then 608 // the saving can be 4 bytes (by using incl %eax). 609 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) { 610 if (Imm->getAPIntValue().isSignedIntN(8)) 611 return false; 612 613 // If this is a 64-bit AND with an immediate that fits in 32-bits, 614 // prefer using the smaller and over folding the load. This is needed to 615 // make sure immediates created by shrinkAndImmediate are always folded. 616 // Ideally we would narrow the load during DAG combine and get the 617 // best of both worlds. 618 if (U->getOpcode() == ISD::AND && 619 Imm->getAPIntValue().getBitWidth() == 64 && 620 Imm->getAPIntValue().isIntN(32)) 621 return false; 622 623 // If this really a zext_inreg that can be represented with a movzx 624 // instruction, prefer that. 625 // TODO: We could shrink the load and fold if it is non-volatile. 626 if (U->getOpcode() == ISD::AND && 627 (Imm->getAPIntValue() == UINT8_MAX || 628 Imm->getAPIntValue() == UINT16_MAX || 629 Imm->getAPIntValue() == UINT32_MAX)) 630 return false; 631 632 // ADD/SUB with can negate the immediate and use the opposite operation 633 // to fit 128 into a sign extended 8 bit immediate. 634 if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB) && 635 (-Imm->getAPIntValue()).isSignedIntN(8)) 636 return false; 637 638 if ((U->getOpcode() == X86ISD::ADD || U->getOpcode() == X86ISD::SUB) && 639 (-Imm->getAPIntValue()).isSignedIntN(8) && 640 hasNoCarryFlagUses(SDValue(U, 1))) 641 return false; 642 } 643 644 // If the other operand is a TLS address, we should fold it instead. 645 // This produces 646 // movl %gs:0, %eax 647 // leal i@NTPOFF(%eax), %eax 648 // instead of 649 // movl $i@NTPOFF, %eax 650 // addl %gs:0, %eax 651 // if the block also has an access to a second TLS address this will save 652 // a load. 653 // FIXME: This is probably also true for non-TLS addresses. 654 if (Op1.getOpcode() == X86ISD::Wrapper) { 655 SDValue Val = Op1.getOperand(0); 656 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress) 657 return false; 658 } 659 660 // Don't fold load if this matches the BTS/BTR/BTC patterns. 661 // BTS: (or X, (shl 1, n)) 662 // BTR: (and X, (rotl -2, n)) 663 // BTC: (xor X, (shl 1, n)) 664 if (U->getOpcode() == ISD::OR || U->getOpcode() == ISD::XOR) { 665 if (U->getOperand(0).getOpcode() == ISD::SHL && 666 isOneConstant(U->getOperand(0).getOperand(0))) 667 return false; 668 669 if (U->getOperand(1).getOpcode() == ISD::SHL && 670 isOneConstant(U->getOperand(1).getOperand(0))) 671 return false; 672 } 673 if (U->getOpcode() == ISD::AND) { 674 SDValue U0 = U->getOperand(0); 675 SDValue U1 = U->getOperand(1); 676 if (U0.getOpcode() == ISD::ROTL) { 677 auto *C = dyn_cast<ConstantSDNode>(U0.getOperand(0)); 678 if (C && C->getSExtValue() == -2) 679 return false; 680 } 681 682 if (U1.getOpcode() == ISD::ROTL) { 683 auto *C = dyn_cast<ConstantSDNode>(U1.getOperand(0)); 684 if (C && C->getSExtValue() == -2) 685 return false; 686 } 687 } 688 689 break; 690 } 691 case ISD::SHL: 692 case ISD::SRA: 693 case ISD::SRL: 694 // Don't fold a load into a shift by immediate. The BMI2 instructions 695 // support folding a load, but not an immediate. The legacy instructions 696 // support folding an immediate, but can't fold a load. Folding an 697 // immediate is preferable to folding a load. 698 if (isa<ConstantSDNode>(U->getOperand(1))) 699 return false; 700 701 break; 702 } 703 } 704 705 // Prevent folding a load if this can implemented with an insert_subreg or 706 // a move that implicitly zeroes. 707 if (Root->getOpcode() == ISD::INSERT_SUBVECTOR && 708 isNullConstant(Root->getOperand(2)) && 709 (Root->getOperand(0).isUndef() || 710 ISD::isBuildVectorAllZeros(Root->getOperand(0).getNode()))) 711 return false; 712 713 return true; 714 } 715 716 // Indicates it is profitable to form an AVX512 masked operation. Returning 717 // false will favor a masked register-register masked move or vblendm and the 718 // operation will be selected separately. 719 bool X86DAGToDAGISel::isProfitableToFormMaskedOp(SDNode *N) const { 720 assert( 721 (N->getOpcode() == ISD::VSELECT || N->getOpcode() == X86ISD::SELECTS) && 722 "Unexpected opcode!"); 723 724 // If the operation has additional users, the operation will be duplicated. 725 // Check the use count to prevent that. 726 // FIXME: Are there cheap opcodes we might want to duplicate? 727 return N->getOperand(1).hasOneUse(); 728 } 729 730 /// Replace the original chain operand of the call with 731 /// load's chain operand and move load below the call's chain operand. 732 static void moveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load, 733 SDValue Call, SDValue OrigChain) { 734 SmallVector<SDValue, 8> Ops; 735 SDValue Chain = OrigChain.getOperand(0); 736 if (Chain.getNode() == Load.getNode()) 737 Ops.push_back(Load.getOperand(0)); 738 else { 739 assert(Chain.getOpcode() == ISD::TokenFactor && 740 "Unexpected chain operand"); 741 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) 742 if (Chain.getOperand(i).getNode() == Load.getNode()) 743 Ops.push_back(Load.getOperand(0)); 744 else 745 Ops.push_back(Chain.getOperand(i)); 746 SDValue NewChain = 747 CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops); 748 Ops.clear(); 749 Ops.push_back(NewChain); 750 } 751 Ops.append(OrigChain->op_begin() + 1, OrigChain->op_end()); 752 CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops); 753 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0), 754 Load.getOperand(1), Load.getOperand(2)); 755 756 Ops.clear(); 757 Ops.push_back(SDValue(Load.getNode(), 1)); 758 Ops.append(Call->op_begin() + 1, Call->op_end()); 759 CurDAG->UpdateNodeOperands(Call.getNode(), Ops); 760 } 761 762 /// Return true if call address is a load and it can be 763 /// moved below CALLSEQ_START and the chains leading up to the call. 764 /// Return the CALLSEQ_START by reference as a second output. 765 /// In the case of a tail call, there isn't a callseq node between the call 766 /// chain and the load. 767 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) { 768 // The transformation is somewhat dangerous if the call's chain was glued to 769 // the call. After MoveBelowOrigChain the load is moved between the call and 770 // the chain, this can create a cycle if the load is not folded. So it is 771 // *really* important that we are sure the load will be folded. 772 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse()) 773 return false; 774 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode()); 775 if (!LD || 776 !LD->isSimple() || 777 LD->getAddressingMode() != ISD::UNINDEXED || 778 LD->getExtensionType() != ISD::NON_EXTLOAD) 779 return false; 780 781 // Now let's find the callseq_start. 782 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) { 783 if (!Chain.hasOneUse()) 784 return false; 785 Chain = Chain.getOperand(0); 786 } 787 788 if (!Chain.getNumOperands()) 789 return false; 790 // Since we are not checking for AA here, conservatively abort if the chain 791 // writes to memory. It's not safe to move the callee (a load) across a store. 792 if (isa<MemSDNode>(Chain.getNode()) && 793 cast<MemSDNode>(Chain.getNode())->writeMem()) 794 return false; 795 if (Chain.getOperand(0).getNode() == Callee.getNode()) 796 return true; 797 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor && 798 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) && 799 Callee.getValue(1).hasOneUse()) 800 return true; 801 return false; 802 } 803 804 static bool isEndbrImm64(uint64_t Imm) { 805 // There may be some other prefix bytes between 0xF3 and 0x0F1EFA. 806 // i.g: 0xF3660F1EFA, 0xF3670F1EFA 807 if ((Imm & 0x00FFFFFF) != 0x0F1EFA) 808 return false; 809 810 uint8_t OptionalPrefixBytes [] = {0x26, 0x2e, 0x36, 0x3e, 0x64, 811 0x65, 0x66, 0x67, 0xf0, 0xf2}; 812 int i = 24; // 24bit 0x0F1EFA has matched 813 while (i < 64) { 814 uint8_t Byte = (Imm >> i) & 0xFF; 815 if (Byte == 0xF3) 816 return true; 817 if (!llvm::is_contained(OptionalPrefixBytes, Byte)) 818 return false; 819 i += 8; 820 } 821 822 return false; 823 } 824 825 void X86DAGToDAGISel::PreprocessISelDAG() { 826 bool MadeChange = false; 827 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), 828 E = CurDAG->allnodes_end(); I != E; ) { 829 SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues. 830 831 // This is for CET enhancement. 832 // 833 // ENDBR32 and ENDBR64 have specific opcodes: 834 // ENDBR32: F3 0F 1E FB 835 // ENDBR64: F3 0F 1E FA 836 // And we want that attackers won’t find unintended ENDBR32/64 837 // opcode matches in the binary 838 // Here’s an example: 839 // If the compiler had to generate asm for the following code: 840 // a = 0xF30F1EFA 841 // it could, for example, generate: 842 // mov 0xF30F1EFA, dword ptr[a] 843 // In such a case, the binary would include a gadget that starts 844 // with a fake ENDBR64 opcode. Therefore, we split such generation 845 // into multiple operations, let it not shows in the binary 846 if (N->getOpcode() == ISD::Constant) { 847 MVT VT = N->getSimpleValueType(0); 848 int64_t Imm = cast<ConstantSDNode>(N)->getSExtValue(); 849 int32_t EndbrImm = Subtarget->is64Bit() ? 0xF30F1EFA : 0xF30F1EFB; 850 if (Imm == EndbrImm || isEndbrImm64(Imm)) { 851 // Check that the cf-protection-branch is enabled. 852 Metadata *CFProtectionBranch = 853 MF->getMMI().getModule()->getModuleFlag("cf-protection-branch"); 854 if (CFProtectionBranch || IndirectBranchTracking) { 855 SDLoc dl(N); 856 SDValue Complement = CurDAG->getConstant(~Imm, dl, VT, false, true); 857 Complement = CurDAG->getNOT(dl, Complement, VT); 858 --I; 859 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Complement); 860 ++I; 861 MadeChange = true; 862 continue; 863 } 864 } 865 } 866 867 // If this is a target specific AND node with no flag usages, turn it back 868 // into ISD::AND to enable test instruction matching. 869 if (N->getOpcode() == X86ISD::AND && !N->hasAnyUseOfValue(1)) { 870 SDValue Res = CurDAG->getNode(ISD::AND, SDLoc(N), N->getValueType(0), 871 N->getOperand(0), N->getOperand(1)); 872 --I; 873 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res); 874 ++I; 875 MadeChange = true; 876 continue; 877 } 878 879 /// Convert vector increment or decrement to sub/add with an all-ones 880 /// constant: 881 /// add X, <1, 1...> --> sub X, <-1, -1...> 882 /// sub X, <1, 1...> --> add X, <-1, -1...> 883 /// The all-ones vector constant can be materialized using a pcmpeq 884 /// instruction that is commonly recognized as an idiom (has no register 885 /// dependency), so that's better/smaller than loading a splat 1 constant. 886 if ((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && 887 N->getSimpleValueType(0).isVector()) { 888 889 APInt SplatVal; 890 if (X86::isConstantSplat(N->getOperand(1), SplatVal) && 891 SplatVal.isOneValue()) { 892 SDLoc DL(N); 893 894 MVT VT = N->getSimpleValueType(0); 895 unsigned NumElts = VT.getSizeInBits() / 32; 896 SDValue AllOnes = 897 CurDAG->getAllOnesConstant(DL, MVT::getVectorVT(MVT::i32, NumElts)); 898 AllOnes = CurDAG->getBitcast(VT, AllOnes); 899 900 unsigned NewOpcode = N->getOpcode() == ISD::ADD ? ISD::SUB : ISD::ADD; 901 SDValue Res = 902 CurDAG->getNode(NewOpcode, DL, VT, N->getOperand(0), AllOnes); 903 --I; 904 CurDAG->ReplaceAllUsesWith(N, Res.getNode()); 905 ++I; 906 MadeChange = true; 907 continue; 908 } 909 } 910 911 switch (N->getOpcode()) { 912 case X86ISD::VBROADCAST: { 913 MVT VT = N->getSimpleValueType(0); 914 // Emulate v32i16/v64i8 broadcast without BWI. 915 if (!Subtarget->hasBWI() && (VT == MVT::v32i16 || VT == MVT::v64i8)) { 916 MVT NarrowVT = VT == MVT::v32i16 ? MVT::v16i16 : MVT::v32i8; 917 SDLoc dl(N); 918 SDValue NarrowBCast = 919 CurDAG->getNode(X86ISD::VBROADCAST, dl, NarrowVT, N->getOperand(0)); 920 SDValue Res = 921 CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, CurDAG->getUNDEF(VT), 922 NarrowBCast, CurDAG->getIntPtrConstant(0, dl)); 923 unsigned Index = VT == MVT::v32i16 ? 16 : 32; 924 Res = CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, NarrowBCast, 925 CurDAG->getIntPtrConstant(Index, dl)); 926 927 --I; 928 CurDAG->ReplaceAllUsesWith(N, Res.getNode()); 929 ++I; 930 MadeChange = true; 931 continue; 932 } 933 934 break; 935 } 936 case X86ISD::VBROADCAST_LOAD: { 937 MVT VT = N->getSimpleValueType(0); 938 // Emulate v32i16/v64i8 broadcast without BWI. 939 if (!Subtarget->hasBWI() && (VT == MVT::v32i16 || VT == MVT::v64i8)) { 940 MVT NarrowVT = VT == MVT::v32i16 ? MVT::v16i16 : MVT::v32i8; 941 auto *MemNode = cast<MemSDNode>(N); 942 SDLoc dl(N); 943 SDVTList VTs = CurDAG->getVTList(NarrowVT, MVT::Other); 944 SDValue Ops[] = {MemNode->getChain(), MemNode->getBasePtr()}; 945 SDValue NarrowBCast = CurDAG->getMemIntrinsicNode( 946 X86ISD::VBROADCAST_LOAD, dl, VTs, Ops, MemNode->getMemoryVT(), 947 MemNode->getMemOperand()); 948 SDValue Res = 949 CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, CurDAG->getUNDEF(VT), 950 NarrowBCast, CurDAG->getIntPtrConstant(0, dl)); 951 unsigned Index = VT == MVT::v32i16 ? 16 : 32; 952 Res = CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, NarrowBCast, 953 CurDAG->getIntPtrConstant(Index, dl)); 954 955 --I; 956 SDValue To[] = {Res, NarrowBCast.getValue(1)}; 957 CurDAG->ReplaceAllUsesWith(N, To); 958 ++I; 959 MadeChange = true; 960 continue; 961 } 962 963 break; 964 } 965 case ISD::VSELECT: { 966 // Replace VSELECT with non-mask conditions with with BLENDV. 967 if (N->getOperand(0).getValueType().getVectorElementType() == MVT::i1) 968 break; 969 970 assert(Subtarget->hasSSE41() && "Expected SSE4.1 support!"); 971 SDValue Blendv = 972 CurDAG->getNode(X86ISD::BLENDV, SDLoc(N), N->getValueType(0), 973 N->getOperand(0), N->getOperand(1), N->getOperand(2)); 974 --I; 975 CurDAG->ReplaceAllUsesWith(N, Blendv.getNode()); 976 ++I; 977 MadeChange = true; 978 continue; 979 } 980 case ISD::FP_ROUND: 981 case ISD::STRICT_FP_ROUND: 982 case ISD::FP_TO_SINT: 983 case ISD::FP_TO_UINT: 984 case ISD::STRICT_FP_TO_SINT: 985 case ISD::STRICT_FP_TO_UINT: { 986 // Replace vector fp_to_s/uint with their X86 specific equivalent so we 987 // don't need 2 sets of patterns. 988 if (!N->getSimpleValueType(0).isVector()) 989 break; 990 991 unsigned NewOpc; 992 switch (N->getOpcode()) { 993 default: llvm_unreachable("Unexpected opcode!"); 994 case ISD::FP_ROUND: NewOpc = X86ISD::VFPROUND; break; 995 case ISD::STRICT_FP_ROUND: NewOpc = X86ISD::STRICT_VFPROUND; break; 996 case ISD::STRICT_FP_TO_SINT: NewOpc = X86ISD::STRICT_CVTTP2SI; break; 997 case ISD::FP_TO_SINT: NewOpc = X86ISD::CVTTP2SI; break; 998 case ISD::STRICT_FP_TO_UINT: NewOpc = X86ISD::STRICT_CVTTP2UI; break; 999 case ISD::FP_TO_UINT: NewOpc = X86ISD::CVTTP2UI; break; 1000 } 1001 SDValue Res; 1002 if (N->isStrictFPOpcode()) 1003 Res = 1004 CurDAG->getNode(NewOpc, SDLoc(N), {N->getValueType(0), MVT::Other}, 1005 {N->getOperand(0), N->getOperand(1)}); 1006 else 1007 Res = 1008 CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0), 1009 N->getOperand(0)); 1010 --I; 1011 CurDAG->ReplaceAllUsesWith(N, Res.getNode()); 1012 ++I; 1013 MadeChange = true; 1014 continue; 1015 } 1016 case ISD::SHL: 1017 case ISD::SRA: 1018 case ISD::SRL: { 1019 // Replace vector shifts with their X86 specific equivalent so we don't 1020 // need 2 sets of patterns. 1021 if (!N->getValueType(0).isVector()) 1022 break; 1023 1024 unsigned NewOpc; 1025 switch (N->getOpcode()) { 1026 default: llvm_unreachable("Unexpected opcode!"); 1027 case ISD::SHL: NewOpc = X86ISD::VSHLV; break; 1028 case ISD::SRA: NewOpc = X86ISD::VSRAV; break; 1029 case ISD::SRL: NewOpc = X86ISD::VSRLV; break; 1030 } 1031 SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0), 1032 N->getOperand(0), N->getOperand(1)); 1033 --I; 1034 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res); 1035 ++I; 1036 MadeChange = true; 1037 continue; 1038 } 1039 case ISD::ANY_EXTEND: 1040 case ISD::ANY_EXTEND_VECTOR_INREG: { 1041 // Replace vector any extend with the zero extend equivalents so we don't 1042 // need 2 sets of patterns. Ignore vXi1 extensions. 1043 if (!N->getValueType(0).isVector()) 1044 break; 1045 1046 unsigned NewOpc; 1047 if (N->getOperand(0).getScalarValueSizeInBits() == 1) { 1048 assert(N->getOpcode() == ISD::ANY_EXTEND && 1049 "Unexpected opcode for mask vector!"); 1050 NewOpc = ISD::SIGN_EXTEND; 1051 } else { 1052 NewOpc = N->getOpcode() == ISD::ANY_EXTEND 1053 ? ISD::ZERO_EXTEND 1054 : ISD::ZERO_EXTEND_VECTOR_INREG; 1055 } 1056 1057 SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0), 1058 N->getOperand(0)); 1059 --I; 1060 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res); 1061 ++I; 1062 MadeChange = true; 1063 continue; 1064 } 1065 case ISD::FCEIL: 1066 case ISD::STRICT_FCEIL: 1067 case ISD::FFLOOR: 1068 case ISD::STRICT_FFLOOR: 1069 case ISD::FTRUNC: 1070 case ISD::STRICT_FTRUNC: 1071 case ISD::FROUNDEVEN: 1072 case ISD::STRICT_FROUNDEVEN: 1073 case ISD::FNEARBYINT: 1074 case ISD::STRICT_FNEARBYINT: 1075 case ISD::FRINT: 1076 case ISD::STRICT_FRINT: { 1077 // Replace fp rounding with their X86 specific equivalent so we don't 1078 // need 2 sets of patterns. 1079 unsigned Imm; 1080 switch (N->getOpcode()) { 1081 default: llvm_unreachable("Unexpected opcode!"); 1082 case ISD::STRICT_FCEIL: 1083 case ISD::FCEIL: Imm = 0xA; break; 1084 case ISD::STRICT_FFLOOR: 1085 case ISD::FFLOOR: Imm = 0x9; break; 1086 case ISD::STRICT_FTRUNC: 1087 case ISD::FTRUNC: Imm = 0xB; break; 1088 case ISD::STRICT_FROUNDEVEN: 1089 case ISD::FROUNDEVEN: Imm = 0x8; break; 1090 case ISD::STRICT_FNEARBYINT: 1091 case ISD::FNEARBYINT: Imm = 0xC; break; 1092 case ISD::STRICT_FRINT: 1093 case ISD::FRINT: Imm = 0x4; break; 1094 } 1095 SDLoc dl(N); 1096 bool IsStrict = N->isStrictFPOpcode(); 1097 SDValue Res; 1098 if (IsStrict) 1099 Res = CurDAG->getNode(X86ISD::STRICT_VRNDSCALE, dl, 1100 {N->getValueType(0), MVT::Other}, 1101 {N->getOperand(0), N->getOperand(1), 1102 CurDAG->getTargetConstant(Imm, dl, MVT::i32)}); 1103 else 1104 Res = CurDAG->getNode(X86ISD::VRNDSCALE, dl, N->getValueType(0), 1105 N->getOperand(0), 1106 CurDAG->getTargetConstant(Imm, dl, MVT::i32)); 1107 --I; 1108 CurDAG->ReplaceAllUsesWith(N, Res.getNode()); 1109 ++I; 1110 MadeChange = true; 1111 continue; 1112 } 1113 case X86ISD::FANDN: 1114 case X86ISD::FAND: 1115 case X86ISD::FOR: 1116 case X86ISD::FXOR: { 1117 // Widen scalar fp logic ops to vector to reduce isel patterns. 1118 // FIXME: Can we do this during lowering/combine. 1119 MVT VT = N->getSimpleValueType(0); 1120 if (VT.isVector() || VT == MVT::f128) 1121 break; 1122 1123 MVT VecVT = VT == MVT::f64 ? MVT::v2f64 : MVT::v4f32; 1124 SDLoc dl(N); 1125 SDValue Op0 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, 1126 N->getOperand(0)); 1127 SDValue Op1 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, 1128 N->getOperand(1)); 1129 1130 SDValue Res; 1131 if (Subtarget->hasSSE2()) { 1132 EVT IntVT = EVT(VecVT).changeVectorElementTypeToInteger(); 1133 Op0 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op0); 1134 Op1 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op1); 1135 unsigned Opc; 1136 switch (N->getOpcode()) { 1137 default: llvm_unreachable("Unexpected opcode!"); 1138 case X86ISD::FANDN: Opc = X86ISD::ANDNP; break; 1139 case X86ISD::FAND: Opc = ISD::AND; break; 1140 case X86ISD::FOR: Opc = ISD::OR; break; 1141 case X86ISD::FXOR: Opc = ISD::XOR; break; 1142 } 1143 Res = CurDAG->getNode(Opc, dl, IntVT, Op0, Op1); 1144 Res = CurDAG->getNode(ISD::BITCAST, dl, VecVT, Res); 1145 } else { 1146 Res = CurDAG->getNode(N->getOpcode(), dl, VecVT, Op0, Op1); 1147 } 1148 Res = CurDAG->getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, 1149 CurDAG->getIntPtrConstant(0, dl)); 1150 --I; 1151 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res); 1152 ++I; 1153 MadeChange = true; 1154 continue; 1155 } 1156 } 1157 1158 if (OptLevel != CodeGenOpt::None && 1159 // Only do this when the target can fold the load into the call or 1160 // jmp. 1161 !Subtarget->useIndirectThunkCalls() && 1162 ((N->getOpcode() == X86ISD::CALL && !Subtarget->slowTwoMemOps()) || 1163 (N->getOpcode() == X86ISD::TC_RETURN && 1164 (Subtarget->is64Bit() || 1165 !getTargetMachine().isPositionIndependent())))) { 1166 /// Also try moving call address load from outside callseq_start to just 1167 /// before the call to allow it to be folded. 1168 /// 1169 /// [Load chain] 1170 /// ^ 1171 /// | 1172 /// [Load] 1173 /// ^ ^ 1174 /// | | 1175 /// / \-- 1176 /// / | 1177 ///[CALLSEQ_START] | 1178 /// ^ | 1179 /// | | 1180 /// [LOAD/C2Reg] | 1181 /// | | 1182 /// \ / 1183 /// \ / 1184 /// [CALL] 1185 bool HasCallSeq = N->getOpcode() == X86ISD::CALL; 1186 SDValue Chain = N->getOperand(0); 1187 SDValue Load = N->getOperand(1); 1188 if (!isCalleeLoad(Load, Chain, HasCallSeq)) 1189 continue; 1190 moveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain); 1191 ++NumLoadMoved; 1192 MadeChange = true; 1193 continue; 1194 } 1195 1196 // Lower fpround and fpextend nodes that target the FP stack to be store and 1197 // load to the stack. This is a gross hack. We would like to simply mark 1198 // these as being illegal, but when we do that, legalize produces these when 1199 // it expands calls, then expands these in the same legalize pass. We would 1200 // like dag combine to be able to hack on these between the call expansion 1201 // and the node legalization. As such this pass basically does "really 1202 // late" legalization of these inline with the X86 isel pass. 1203 // FIXME: This should only happen when not compiled with -O0. 1204 switch (N->getOpcode()) { 1205 default: continue; 1206 case ISD::FP_ROUND: 1207 case ISD::FP_EXTEND: 1208 { 1209 MVT SrcVT = N->getOperand(0).getSimpleValueType(); 1210 MVT DstVT = N->getSimpleValueType(0); 1211 1212 // If any of the sources are vectors, no fp stack involved. 1213 if (SrcVT.isVector() || DstVT.isVector()) 1214 continue; 1215 1216 // If the source and destination are SSE registers, then this is a legal 1217 // conversion that should not be lowered. 1218 const X86TargetLowering *X86Lowering = 1219 static_cast<const X86TargetLowering *>(TLI); 1220 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT); 1221 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT); 1222 if (SrcIsSSE && DstIsSSE) 1223 continue; 1224 1225 if (!SrcIsSSE && !DstIsSSE) { 1226 // If this is an FPStack extension, it is a noop. 1227 if (N->getOpcode() == ISD::FP_EXTEND) 1228 continue; 1229 // If this is a value-preserving FPStack truncation, it is a noop. 1230 if (N->getConstantOperandVal(1)) 1231 continue; 1232 } 1233 1234 // Here we could have an FP stack truncation or an FPStack <-> SSE convert. 1235 // FPStack has extload and truncstore. SSE can fold direct loads into other 1236 // operations. Based on this, decide what we want to do. 1237 MVT MemVT = (N->getOpcode() == ISD::FP_ROUND) ? DstVT : SrcVT; 1238 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT); 1239 int SPFI = cast<FrameIndexSDNode>(MemTmp)->getIndex(); 1240 MachinePointerInfo MPI = 1241 MachinePointerInfo::getFixedStack(CurDAG->getMachineFunction(), SPFI); 1242 SDLoc dl(N); 1243 1244 // FIXME: optimize the case where the src/dest is a load or store? 1245 1246 SDValue Store = CurDAG->getTruncStore( 1247 CurDAG->getEntryNode(), dl, N->getOperand(0), MemTmp, MPI, MemVT); 1248 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, 1249 MemTmp, MPI, MemVT); 1250 1251 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the 1252 // extload we created. This will cause general havok on the dag because 1253 // anything below the conversion could be folded into other existing nodes. 1254 // To avoid invalidating 'I', back it up to the convert node. 1255 --I; 1256 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); 1257 break; 1258 } 1259 1260 //The sequence of events for lowering STRICT_FP versions of these nodes requires 1261 //dealing with the chain differently, as there is already a preexisting chain. 1262 case ISD::STRICT_FP_ROUND: 1263 case ISD::STRICT_FP_EXTEND: 1264 { 1265 MVT SrcVT = N->getOperand(1).getSimpleValueType(); 1266 MVT DstVT = N->getSimpleValueType(0); 1267 1268 // If any of the sources are vectors, no fp stack involved. 1269 if (SrcVT.isVector() || DstVT.isVector()) 1270 continue; 1271 1272 // If the source and destination are SSE registers, then this is a legal 1273 // conversion that should not be lowered. 1274 const X86TargetLowering *X86Lowering = 1275 static_cast<const X86TargetLowering *>(TLI); 1276 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT); 1277 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT); 1278 if (SrcIsSSE && DstIsSSE) 1279 continue; 1280 1281 if (!SrcIsSSE && !DstIsSSE) { 1282 // If this is an FPStack extension, it is a noop. 1283 if (N->getOpcode() == ISD::STRICT_FP_EXTEND) 1284 continue; 1285 // If this is a value-preserving FPStack truncation, it is a noop. 1286 if (N->getConstantOperandVal(2)) 1287 continue; 1288 } 1289 1290 // Here we could have an FP stack truncation or an FPStack <-> SSE convert. 1291 // FPStack has extload and truncstore. SSE can fold direct loads into other 1292 // operations. Based on this, decide what we want to do. 1293 MVT MemVT = (N->getOpcode() == ISD::STRICT_FP_ROUND) ? DstVT : SrcVT; 1294 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT); 1295 int SPFI = cast<FrameIndexSDNode>(MemTmp)->getIndex(); 1296 MachinePointerInfo MPI = 1297 MachinePointerInfo::getFixedStack(CurDAG->getMachineFunction(), SPFI); 1298 SDLoc dl(N); 1299 1300 // FIXME: optimize the case where the src/dest is a load or store? 1301 1302 //Since the operation is StrictFP, use the preexisting chain. 1303 SDValue Store, Result; 1304 if (!SrcIsSSE) { 1305 SDVTList VTs = CurDAG->getVTList(MVT::Other); 1306 SDValue Ops[] = {N->getOperand(0), N->getOperand(1), MemTmp}; 1307 Store = CurDAG->getMemIntrinsicNode(X86ISD::FST, dl, VTs, Ops, MemVT, 1308 MPI, /*Align*/ None, 1309 MachineMemOperand::MOStore); 1310 if (N->getFlags().hasNoFPExcept()) { 1311 SDNodeFlags Flags = Store->getFlags(); 1312 Flags.setNoFPExcept(true); 1313 Store->setFlags(Flags); 1314 } 1315 } else { 1316 assert(SrcVT == MemVT && "Unexpected VT!"); 1317 Store = CurDAG->getStore(N->getOperand(0), dl, N->getOperand(1), MemTmp, 1318 MPI); 1319 } 1320 1321 if (!DstIsSSE) { 1322 SDVTList VTs = CurDAG->getVTList(DstVT, MVT::Other); 1323 SDValue Ops[] = {Store, MemTmp}; 1324 Result = CurDAG->getMemIntrinsicNode( 1325 X86ISD::FLD, dl, VTs, Ops, MemVT, MPI, 1326 /*Align*/ None, MachineMemOperand::MOLoad); 1327 if (N->getFlags().hasNoFPExcept()) { 1328 SDNodeFlags Flags = Result->getFlags(); 1329 Flags.setNoFPExcept(true); 1330 Result->setFlags(Flags); 1331 } 1332 } else { 1333 assert(DstVT == MemVT && "Unexpected VT!"); 1334 Result = CurDAG->getLoad(DstVT, dl, Store, MemTmp, MPI); 1335 } 1336 1337 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the 1338 // extload we created. This will cause general havok on the dag because 1339 // anything below the conversion could be folded into other existing nodes. 1340 // To avoid invalidating 'I', back it up to the convert node. 1341 --I; 1342 CurDAG->ReplaceAllUsesWith(N, Result.getNode()); 1343 break; 1344 } 1345 } 1346 1347 1348 // Now that we did that, the node is dead. Increment the iterator to the 1349 // next node to process, then delete N. 1350 ++I; 1351 MadeChange = true; 1352 } 1353 1354 // Remove any dead nodes that may have been left behind. 1355 if (MadeChange) 1356 CurDAG->RemoveDeadNodes(); 1357 } 1358 1359 // Look for a redundant movzx/movsx that can occur after an 8-bit divrem. 1360 bool X86DAGToDAGISel::tryOptimizeRem8Extend(SDNode *N) { 1361 unsigned Opc = N->getMachineOpcode(); 1362 if (Opc != X86::MOVZX32rr8 && Opc != X86::MOVSX32rr8 && 1363 Opc != X86::MOVSX64rr8) 1364 return false; 1365 1366 SDValue N0 = N->getOperand(0); 1367 1368 // We need to be extracting the lower bit of an extend. 1369 if (!N0.isMachineOpcode() || 1370 N0.getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG || 1371 N0.getConstantOperandVal(1) != X86::sub_8bit) 1372 return false; 1373 1374 // We're looking for either a movsx or movzx to match the original opcode. 1375 unsigned ExpectedOpc = Opc == X86::MOVZX32rr8 ? X86::MOVZX32rr8_NOREX 1376 : X86::MOVSX32rr8_NOREX; 1377 SDValue N00 = N0.getOperand(0); 1378 if (!N00.isMachineOpcode() || N00.getMachineOpcode() != ExpectedOpc) 1379 return false; 1380 1381 if (Opc == X86::MOVSX64rr8) { 1382 // If we had a sign extend from 8 to 64 bits. We still need to go from 32 1383 // to 64. 1384 MachineSDNode *Extend = CurDAG->getMachineNode(X86::MOVSX64rr32, SDLoc(N), 1385 MVT::i64, N00); 1386 ReplaceUses(N, Extend); 1387 } else { 1388 // Ok we can drop this extend and just use the original extend. 1389 ReplaceUses(N, N00.getNode()); 1390 } 1391 1392 return true; 1393 } 1394 1395 void X86DAGToDAGISel::PostprocessISelDAG() { 1396 // Skip peepholes at -O0. 1397 if (TM.getOptLevel() == CodeGenOpt::None) 1398 return; 1399 1400 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end(); 1401 1402 bool MadeChange = false; 1403 while (Position != CurDAG->allnodes_begin()) { 1404 SDNode *N = &*--Position; 1405 // Skip dead nodes and any non-machine opcodes. 1406 if (N->use_empty() || !N->isMachineOpcode()) 1407 continue; 1408 1409 if (tryOptimizeRem8Extend(N)) { 1410 MadeChange = true; 1411 continue; 1412 } 1413 1414 // Look for a TESTrr+ANDrr pattern where both operands of the test are 1415 // the same. Rewrite to remove the AND. 1416 unsigned Opc = N->getMachineOpcode(); 1417 if ((Opc == X86::TEST8rr || Opc == X86::TEST16rr || 1418 Opc == X86::TEST32rr || Opc == X86::TEST64rr) && 1419 N->getOperand(0) == N->getOperand(1) && 1420 N->isOnlyUserOf(N->getOperand(0).getNode()) && 1421 N->getOperand(0).isMachineOpcode()) { 1422 SDValue And = N->getOperand(0); 1423 unsigned N0Opc = And.getMachineOpcode(); 1424 if (N0Opc == X86::AND8rr || N0Opc == X86::AND16rr || 1425 N0Opc == X86::AND32rr || N0Opc == X86::AND64rr) { 1426 MachineSDNode *Test = CurDAG->getMachineNode(Opc, SDLoc(N), 1427 MVT::i32, 1428 And.getOperand(0), 1429 And.getOperand(1)); 1430 ReplaceUses(N, Test); 1431 MadeChange = true; 1432 continue; 1433 } 1434 if (N0Opc == X86::AND8rm || N0Opc == X86::AND16rm || 1435 N0Opc == X86::AND32rm || N0Opc == X86::AND64rm) { 1436 unsigned NewOpc; 1437 switch (N0Opc) { 1438 case X86::AND8rm: NewOpc = X86::TEST8mr; break; 1439 case X86::AND16rm: NewOpc = X86::TEST16mr; break; 1440 case X86::AND32rm: NewOpc = X86::TEST32mr; break; 1441 case X86::AND64rm: NewOpc = X86::TEST64mr; break; 1442 } 1443 1444 // Need to swap the memory and register operand. 1445 SDValue Ops[] = { And.getOperand(1), 1446 And.getOperand(2), 1447 And.getOperand(3), 1448 And.getOperand(4), 1449 And.getOperand(5), 1450 And.getOperand(0), 1451 And.getOperand(6) /* Chain */ }; 1452 MachineSDNode *Test = CurDAG->getMachineNode(NewOpc, SDLoc(N), 1453 MVT::i32, MVT::Other, Ops); 1454 CurDAG->setNodeMemRefs( 1455 Test, cast<MachineSDNode>(And.getNode())->memoperands()); 1456 ReplaceUses(N, Test); 1457 MadeChange = true; 1458 continue; 1459 } 1460 } 1461 1462 // Look for a KAND+KORTEST and turn it into KTEST if only the zero flag is 1463 // used. We're doing this late so we can prefer to fold the AND into masked 1464 // comparisons. Doing that can be better for the live range of the mask 1465 // register. 1466 if ((Opc == X86::KORTESTBrr || Opc == X86::KORTESTWrr || 1467 Opc == X86::KORTESTDrr || Opc == X86::KORTESTQrr) && 1468 N->getOperand(0) == N->getOperand(1) && 1469 N->isOnlyUserOf(N->getOperand(0).getNode()) && 1470 N->getOperand(0).isMachineOpcode() && 1471 onlyUsesZeroFlag(SDValue(N, 0))) { 1472 SDValue And = N->getOperand(0); 1473 unsigned N0Opc = And.getMachineOpcode(); 1474 // KANDW is legal with AVX512F, but KTESTW requires AVX512DQ. The other 1475 // KAND instructions and KTEST use the same ISA feature. 1476 if (N0Opc == X86::KANDBrr || 1477 (N0Opc == X86::KANDWrr && Subtarget->hasDQI()) || 1478 N0Opc == X86::KANDDrr || N0Opc == X86::KANDQrr) { 1479 unsigned NewOpc; 1480 switch (Opc) { 1481 default: llvm_unreachable("Unexpected opcode!"); 1482 case X86::KORTESTBrr: NewOpc = X86::KTESTBrr; break; 1483 case X86::KORTESTWrr: NewOpc = X86::KTESTWrr; break; 1484 case X86::KORTESTDrr: NewOpc = X86::KTESTDrr; break; 1485 case X86::KORTESTQrr: NewOpc = X86::KTESTQrr; break; 1486 } 1487 MachineSDNode *KTest = CurDAG->getMachineNode(NewOpc, SDLoc(N), 1488 MVT::i32, 1489 And.getOperand(0), 1490 And.getOperand(1)); 1491 ReplaceUses(N, KTest); 1492 MadeChange = true; 1493 continue; 1494 } 1495 } 1496 1497 // Attempt to remove vectors moves that were inserted to zero upper bits. 1498 if (Opc != TargetOpcode::SUBREG_TO_REG) 1499 continue; 1500 1501 unsigned SubRegIdx = N->getConstantOperandVal(2); 1502 if (SubRegIdx != X86::sub_xmm && SubRegIdx != X86::sub_ymm) 1503 continue; 1504 1505 SDValue Move = N->getOperand(1); 1506 if (!Move.isMachineOpcode()) 1507 continue; 1508 1509 // Make sure its one of the move opcodes we recognize. 1510 switch (Move.getMachineOpcode()) { 1511 default: 1512 continue; 1513 case X86::VMOVAPDrr: case X86::VMOVUPDrr: 1514 case X86::VMOVAPSrr: case X86::VMOVUPSrr: 1515 case X86::VMOVDQArr: case X86::VMOVDQUrr: 1516 case X86::VMOVAPDYrr: case X86::VMOVUPDYrr: 1517 case X86::VMOVAPSYrr: case X86::VMOVUPSYrr: 1518 case X86::VMOVDQAYrr: case X86::VMOVDQUYrr: 1519 case X86::VMOVAPDZ128rr: case X86::VMOVUPDZ128rr: 1520 case X86::VMOVAPSZ128rr: case X86::VMOVUPSZ128rr: 1521 case X86::VMOVDQA32Z128rr: case X86::VMOVDQU32Z128rr: 1522 case X86::VMOVDQA64Z128rr: case X86::VMOVDQU64Z128rr: 1523 case X86::VMOVAPDZ256rr: case X86::VMOVUPDZ256rr: 1524 case X86::VMOVAPSZ256rr: case X86::VMOVUPSZ256rr: 1525 case X86::VMOVDQA32Z256rr: case X86::VMOVDQU32Z256rr: 1526 case X86::VMOVDQA64Z256rr: case X86::VMOVDQU64Z256rr: 1527 break; 1528 } 1529 1530 SDValue In = Move.getOperand(0); 1531 if (!In.isMachineOpcode() || 1532 In.getMachineOpcode() <= TargetOpcode::GENERIC_OP_END) 1533 continue; 1534 1535 // Make sure the instruction has a VEX, XOP, or EVEX prefix. This covers 1536 // the SHA instructions which use a legacy encoding. 1537 uint64_t TSFlags = getInstrInfo()->get(In.getMachineOpcode()).TSFlags; 1538 if ((TSFlags & X86II::EncodingMask) != X86II::VEX && 1539 (TSFlags & X86II::EncodingMask) != X86II::EVEX && 1540 (TSFlags & X86II::EncodingMask) != X86II::XOP) 1541 continue; 1542 1543 // Producing instruction is another vector instruction. We can drop the 1544 // move. 1545 CurDAG->UpdateNodeOperands(N, N->getOperand(0), In, N->getOperand(2)); 1546 MadeChange = true; 1547 } 1548 1549 if (MadeChange) 1550 CurDAG->RemoveDeadNodes(); 1551 } 1552 1553 1554 /// Emit any code that needs to be executed only in the main function. 1555 void X86DAGToDAGISel::emitSpecialCodeForMain() { 1556 if (Subtarget->isTargetCygMing()) { 1557 TargetLowering::ArgListTy Args; 1558 auto &DL = CurDAG->getDataLayout(); 1559 1560 TargetLowering::CallLoweringInfo CLI(*CurDAG); 1561 CLI.setChain(CurDAG->getRoot()) 1562 .setCallee(CallingConv::C, Type::getVoidTy(*CurDAG->getContext()), 1563 CurDAG->getExternalSymbol("__main", TLI->getPointerTy(DL)), 1564 std::move(Args)); 1565 const TargetLowering &TLI = CurDAG->getTargetLoweringInfo(); 1566 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI); 1567 CurDAG->setRoot(Result.second); 1568 } 1569 } 1570 1571 void X86DAGToDAGISel::emitFunctionEntryCode() { 1572 // If this is main, emit special code for main. 1573 const Function &F = MF->getFunction(); 1574 if (F.hasExternalLinkage() && F.getName() == "main") 1575 emitSpecialCodeForMain(); 1576 } 1577 1578 static bool isDispSafeForFrameIndex(int64_t Val) { 1579 // On 64-bit platforms, we can run into an issue where a frame index 1580 // includes a displacement that, when added to the explicit displacement, 1581 // will overflow the displacement field. Assuming that the frame index 1582 // displacement fits into a 31-bit integer (which is only slightly more 1583 // aggressive than the current fundamental assumption that it fits into 1584 // a 32-bit integer), a 31-bit disp should always be safe. 1585 return isInt<31>(Val); 1586 } 1587 1588 bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset, 1589 X86ISelAddressMode &AM) { 1590 // We may have already matched a displacement and the caller just added the 1591 // symbolic displacement. So we still need to do the checks even if Offset 1592 // is zero. 1593 1594 int64_t Val = AM.Disp + Offset; 1595 1596 // Cannot combine ExternalSymbol displacements with integer offsets. 1597 if (Val != 0 && (AM.ES || AM.MCSym)) 1598 return true; 1599 1600 CodeModel::Model M = TM.getCodeModel(); 1601 if (Subtarget->is64Bit()) { 1602 if (Val != 0 && 1603 !X86::isOffsetSuitableForCodeModel(Val, M, 1604 AM.hasSymbolicDisplacement())) 1605 return true; 1606 // In addition to the checks required for a register base, check that 1607 // we do not try to use an unsafe Disp with a frame index. 1608 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase && 1609 !isDispSafeForFrameIndex(Val)) 1610 return true; 1611 } 1612 AM.Disp = Val; 1613 return false; 1614 1615 } 1616 1617 bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM, 1618 bool AllowSegmentRegForX32) { 1619 SDValue Address = N->getOperand(1); 1620 1621 // load gs:0 -> GS segment register. 1622 // load fs:0 -> FS segment register. 1623 // 1624 // This optimization is generally valid because the GNU TLS model defines that 1625 // gs:0 (or fs:0 on X86-64) contains its own address. However, for X86-64 mode 1626 // with 32-bit registers, as we get in ILP32 mode, those registers are first 1627 // zero-extended to 64 bits and then added it to the base address, which gives 1628 // unwanted results when the register holds a negative value. 1629 // For more information see http://people.redhat.com/drepper/tls.pdf 1630 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address)) { 1631 if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr && 1632 !IndirectTlsSegRefs && 1633 (Subtarget->isTargetGlibc() || Subtarget->isTargetAndroid() || 1634 Subtarget->isTargetFuchsia())) { 1635 if (Subtarget->isTarget64BitILP32() && !AllowSegmentRegForX32) 1636 return true; 1637 switch (N->getPointerInfo().getAddrSpace()) { 1638 case X86AS::GS: 1639 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 1640 return false; 1641 case X86AS::FS: 1642 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 1643 return false; 1644 // Address space X86AS::SS is not handled here, because it is not used to 1645 // address TLS areas. 1646 } 1647 } 1648 } 1649 1650 return true; 1651 } 1652 1653 /// Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes into an addressing 1654 /// mode. These wrap things that will resolve down into a symbol reference. 1655 /// If no match is possible, this returns true, otherwise it returns false. 1656 bool X86DAGToDAGISel::matchWrapper(SDValue N, X86ISelAddressMode &AM) { 1657 // If the addressing mode already has a symbol as the displacement, we can 1658 // never match another symbol. 1659 if (AM.hasSymbolicDisplacement()) 1660 return true; 1661 1662 bool IsRIPRelTLS = false; 1663 bool IsRIPRel = N.getOpcode() == X86ISD::WrapperRIP; 1664 if (IsRIPRel) { 1665 SDValue Val = N.getOperand(0); 1666 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress) 1667 IsRIPRelTLS = true; 1668 } 1669 1670 // We can't use an addressing mode in the 64-bit large code model. 1671 // Global TLS addressing is an exception. In the medium code model, 1672 // we use can use a mode when RIP wrappers are present. 1673 // That signifies access to globals that are known to be "near", 1674 // such as the GOT itself. 1675 CodeModel::Model M = TM.getCodeModel(); 1676 if (Subtarget->is64Bit() && 1677 ((M == CodeModel::Large && !IsRIPRelTLS) || 1678 (M == CodeModel::Medium && !IsRIPRel))) 1679 return true; 1680 1681 // Base and index reg must be 0 in order to use %rip as base. 1682 if (IsRIPRel && AM.hasBaseOrIndexReg()) 1683 return true; 1684 1685 // Make a local copy in case we can't do this fold. 1686 X86ISelAddressMode Backup = AM; 1687 1688 int64_t Offset = 0; 1689 SDValue N0 = N.getOperand(0); 1690 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 1691 AM.GV = G->getGlobal(); 1692 AM.SymbolFlags = G->getTargetFlags(); 1693 Offset = G->getOffset(); 1694 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 1695 AM.CP = CP->getConstVal(); 1696 AM.Alignment = CP->getAlign(); 1697 AM.SymbolFlags = CP->getTargetFlags(); 1698 Offset = CP->getOffset(); 1699 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 1700 AM.ES = S->getSymbol(); 1701 AM.SymbolFlags = S->getTargetFlags(); 1702 } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) { 1703 AM.MCSym = S->getMCSymbol(); 1704 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { 1705 AM.JT = J->getIndex(); 1706 AM.SymbolFlags = J->getTargetFlags(); 1707 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) { 1708 AM.BlockAddr = BA->getBlockAddress(); 1709 AM.SymbolFlags = BA->getTargetFlags(); 1710 Offset = BA->getOffset(); 1711 } else 1712 llvm_unreachable("Unhandled symbol reference node."); 1713 1714 if (foldOffsetIntoAddress(Offset, AM)) { 1715 AM = Backup; 1716 return true; 1717 } 1718 1719 if (IsRIPRel) 1720 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64)); 1721 1722 // Commit the changes now that we know this fold is safe. 1723 return false; 1724 } 1725 1726 /// Add the specified node to the specified addressing mode, returning true if 1727 /// it cannot be done. This just pattern matches for the addressing mode. 1728 bool X86DAGToDAGISel::matchAddress(SDValue N, X86ISelAddressMode &AM) { 1729 if (matchAddressRecursively(N, AM, 0)) 1730 return true; 1731 1732 // Post-processing: Make a second attempt to fold a load, if we now know 1733 // that there will not be any other register. This is only performed for 1734 // 64-bit ILP32 mode since 32-bit mode and 64-bit LP64 mode will have folded 1735 // any foldable load the first time. 1736 if (Subtarget->isTarget64BitILP32() && 1737 AM.BaseType == X86ISelAddressMode::RegBase && 1738 AM.Base_Reg.getNode() != nullptr && AM.IndexReg.getNode() == nullptr) { 1739 SDValue Save_Base_Reg = AM.Base_Reg; 1740 if (auto *LoadN = dyn_cast<LoadSDNode>(Save_Base_Reg)) { 1741 AM.Base_Reg = SDValue(); 1742 if (matchLoadInAddress(LoadN, AM, /*AllowSegmentRegForX32=*/true)) 1743 AM.Base_Reg = Save_Base_Reg; 1744 } 1745 } 1746 1747 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has 1748 // a smaller encoding and avoids a scaled-index. 1749 if (AM.Scale == 2 && 1750 AM.BaseType == X86ISelAddressMode::RegBase && 1751 AM.Base_Reg.getNode() == nullptr) { 1752 AM.Base_Reg = AM.IndexReg; 1753 AM.Scale = 1; 1754 } 1755 1756 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode, 1757 // because it has a smaller encoding. 1758 // TODO: Which other code models can use this? 1759 switch (TM.getCodeModel()) { 1760 default: break; 1761 case CodeModel::Small: 1762 case CodeModel::Kernel: 1763 if (Subtarget->is64Bit() && 1764 AM.Scale == 1 && 1765 AM.BaseType == X86ISelAddressMode::RegBase && 1766 AM.Base_Reg.getNode() == nullptr && 1767 AM.IndexReg.getNode() == nullptr && 1768 AM.SymbolFlags == X86II::MO_NO_FLAG && 1769 AM.hasSymbolicDisplacement()) 1770 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64); 1771 break; 1772 } 1773 1774 return false; 1775 } 1776 1777 bool X86DAGToDAGISel::matchAdd(SDValue &N, X86ISelAddressMode &AM, 1778 unsigned Depth) { 1779 // Add an artificial use to this node so that we can keep track of 1780 // it if it gets CSE'd with a different node. 1781 HandleSDNode Handle(N); 1782 1783 X86ISelAddressMode Backup = AM; 1784 if (!matchAddressRecursively(N.getOperand(0), AM, Depth+1) && 1785 !matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)) 1786 return false; 1787 AM = Backup; 1788 1789 // Try again after commutating the operands. 1790 if (!matchAddressRecursively(Handle.getValue().getOperand(1), AM, 1791 Depth + 1) && 1792 !matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth + 1)) 1793 return false; 1794 AM = Backup; 1795 1796 // If we couldn't fold both operands into the address at the same time, 1797 // see if we can just put each operand into a register and fold at least 1798 // the add. 1799 if (AM.BaseType == X86ISelAddressMode::RegBase && 1800 !AM.Base_Reg.getNode() && 1801 !AM.IndexReg.getNode()) { 1802 N = Handle.getValue(); 1803 AM.Base_Reg = N.getOperand(0); 1804 AM.IndexReg = N.getOperand(1); 1805 AM.Scale = 1; 1806 return false; 1807 } 1808 N = Handle.getValue(); 1809 return true; 1810 } 1811 1812 // Insert a node into the DAG at least before the Pos node's position. This 1813 // will reposition the node as needed, and will assign it a node ID that is <= 1814 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node 1815 // IDs! The selection DAG must no longer depend on their uniqueness when this 1816 // is used. 1817 static void insertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) { 1818 if (N->getNodeId() == -1 || 1819 (SelectionDAGISel::getUninvalidatedNodeId(N.getNode()) > 1820 SelectionDAGISel::getUninvalidatedNodeId(Pos.getNode()))) { 1821 DAG.RepositionNode(Pos->getIterator(), N.getNode()); 1822 // Mark Node as invalid for pruning as after this it may be a successor to a 1823 // selected node but otherwise be in the same position of Pos. 1824 // Conservatively mark it with the same -abs(Id) to assure node id 1825 // invariant is preserved. 1826 N->setNodeId(Pos->getNodeId()); 1827 SelectionDAGISel::InvalidateNodeId(N.getNode()); 1828 } 1829 } 1830 1831 // Transform "(X >> (8-C1)) & (0xff << C1)" to "((X >> 8) & 0xff) << C1" if 1832 // safe. This allows us to convert the shift and and into an h-register 1833 // extract and a scaled index. Returns false if the simplification is 1834 // performed. 1835 static bool foldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N, 1836 uint64_t Mask, 1837 SDValue Shift, SDValue X, 1838 X86ISelAddressMode &AM) { 1839 if (Shift.getOpcode() != ISD::SRL || 1840 !isa<ConstantSDNode>(Shift.getOperand(1)) || 1841 !Shift.hasOneUse()) 1842 return true; 1843 1844 int ScaleLog = 8 - Shift.getConstantOperandVal(1); 1845 if (ScaleLog <= 0 || ScaleLog >= 4 || 1846 Mask != (0xffu << ScaleLog)) 1847 return true; 1848 1849 MVT VT = N.getSimpleValueType(); 1850 SDLoc DL(N); 1851 SDValue Eight = DAG.getConstant(8, DL, MVT::i8); 1852 SDValue NewMask = DAG.getConstant(0xff, DL, VT); 1853 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight); 1854 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask); 1855 SDValue ShlCount = DAG.getConstant(ScaleLog, DL, MVT::i8); 1856 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount); 1857 1858 // Insert the new nodes into the topological ordering. We must do this in 1859 // a valid topological ordering as nothing is going to go back and re-sort 1860 // these nodes. We continually insert before 'N' in sequence as this is 1861 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 1862 // hierarchy left to express. 1863 insertDAGNode(DAG, N, Eight); 1864 insertDAGNode(DAG, N, Srl); 1865 insertDAGNode(DAG, N, NewMask); 1866 insertDAGNode(DAG, N, And); 1867 insertDAGNode(DAG, N, ShlCount); 1868 insertDAGNode(DAG, N, Shl); 1869 DAG.ReplaceAllUsesWith(N, Shl); 1870 DAG.RemoveDeadNode(N.getNode()); 1871 AM.IndexReg = And; 1872 AM.Scale = (1 << ScaleLog); 1873 return false; 1874 } 1875 1876 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this 1877 // allows us to fold the shift into this addressing mode. Returns false if the 1878 // transform succeeded. 1879 static bool foldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N, 1880 X86ISelAddressMode &AM) { 1881 SDValue Shift = N.getOperand(0); 1882 1883 // Use a signed mask so that shifting right will insert sign bits. These 1884 // bits will be removed when we shift the result left so it doesn't matter 1885 // what we use. This might allow a smaller immediate encoding. 1886 int64_t Mask = cast<ConstantSDNode>(N->getOperand(1))->getSExtValue(); 1887 1888 // If we have an any_extend feeding the AND, look through it to see if there 1889 // is a shift behind it. But only if the AND doesn't use the extended bits. 1890 // FIXME: Generalize this to other ANY_EXTEND than i32 to i64? 1891 bool FoundAnyExtend = false; 1892 if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() && 1893 Shift.getOperand(0).getSimpleValueType() == MVT::i32 && 1894 isUInt<32>(Mask)) { 1895 FoundAnyExtend = true; 1896 Shift = Shift.getOperand(0); 1897 } 1898 1899 if (Shift.getOpcode() != ISD::SHL || 1900 !isa<ConstantSDNode>(Shift.getOperand(1))) 1901 return true; 1902 1903 SDValue X = Shift.getOperand(0); 1904 1905 // Not likely to be profitable if either the AND or SHIFT node has more 1906 // than one use (unless all uses are for address computation). Besides, 1907 // isel mechanism requires their node ids to be reused. 1908 if (!N.hasOneUse() || !Shift.hasOneUse()) 1909 return true; 1910 1911 // Verify that the shift amount is something we can fold. 1912 unsigned ShiftAmt = Shift.getConstantOperandVal(1); 1913 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3) 1914 return true; 1915 1916 MVT VT = N.getSimpleValueType(); 1917 SDLoc DL(N); 1918 if (FoundAnyExtend) { 1919 SDValue NewX = DAG.getNode(ISD::ANY_EXTEND, DL, VT, X); 1920 insertDAGNode(DAG, N, NewX); 1921 X = NewX; 1922 } 1923 1924 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT); 1925 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask); 1926 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1)); 1927 1928 // Insert the new nodes into the topological ordering. We must do this in 1929 // a valid topological ordering as nothing is going to go back and re-sort 1930 // these nodes. We continually insert before 'N' in sequence as this is 1931 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 1932 // hierarchy left to express. 1933 insertDAGNode(DAG, N, NewMask); 1934 insertDAGNode(DAG, N, NewAnd); 1935 insertDAGNode(DAG, N, NewShift); 1936 DAG.ReplaceAllUsesWith(N, NewShift); 1937 DAG.RemoveDeadNode(N.getNode()); 1938 1939 AM.Scale = 1 << ShiftAmt; 1940 AM.IndexReg = NewAnd; 1941 return false; 1942 } 1943 1944 // Implement some heroics to detect shifts of masked values where the mask can 1945 // be replaced by extending the shift and undoing that in the addressing mode 1946 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and 1947 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in 1948 // the addressing mode. This results in code such as: 1949 // 1950 // int f(short *y, int *lookup_table) { 1951 // ... 1952 // return *y + lookup_table[*y >> 11]; 1953 // } 1954 // 1955 // Turning into: 1956 // movzwl (%rdi), %eax 1957 // movl %eax, %ecx 1958 // shrl $11, %ecx 1959 // addl (%rsi,%rcx,4), %eax 1960 // 1961 // Instead of: 1962 // movzwl (%rdi), %eax 1963 // movl %eax, %ecx 1964 // shrl $9, %ecx 1965 // andl $124, %rcx 1966 // addl (%rsi,%rcx), %eax 1967 // 1968 // Note that this function assumes the mask is provided as a mask *after* the 1969 // value is shifted. The input chain may or may not match that, but computing 1970 // such a mask is trivial. 1971 static bool foldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N, 1972 uint64_t Mask, 1973 SDValue Shift, SDValue X, 1974 X86ISelAddressMode &AM) { 1975 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() || 1976 !isa<ConstantSDNode>(Shift.getOperand(1))) 1977 return true; 1978 1979 unsigned ShiftAmt = Shift.getConstantOperandVal(1); 1980 unsigned MaskLZ = countLeadingZeros(Mask); 1981 unsigned MaskTZ = countTrailingZeros(Mask); 1982 1983 // The amount of shift we're trying to fit into the addressing mode is taken 1984 // from the trailing zeros of the mask. 1985 unsigned AMShiftAmt = MaskTZ; 1986 1987 // There is nothing we can do here unless the mask is removing some bits. 1988 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits. 1989 if (AMShiftAmt == 0 || AMShiftAmt > 3) return true; 1990 1991 // We also need to ensure that mask is a continuous run of bits. 1992 if (countTrailingOnes(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true; 1993 1994 // Scale the leading zero count down based on the actual size of the value. 1995 // Also scale it down based on the size of the shift. 1996 unsigned ScaleDown = (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt; 1997 if (MaskLZ < ScaleDown) 1998 return true; 1999 MaskLZ -= ScaleDown; 2000 2001 // The final check is to ensure that any masked out high bits of X are 2002 // already known to be zero. Otherwise, the mask has a semantic impact 2003 // other than masking out a couple of low bits. Unfortunately, because of 2004 // the mask, zero extensions will be removed from operands in some cases. 2005 // This code works extra hard to look through extensions because we can 2006 // replace them with zero extensions cheaply if necessary. 2007 bool ReplacingAnyExtend = false; 2008 if (X.getOpcode() == ISD::ANY_EXTEND) { 2009 unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() - 2010 X.getOperand(0).getSimpleValueType().getSizeInBits(); 2011 // Assume that we'll replace the any-extend with a zero-extend, and 2012 // narrow the search to the extended value. 2013 X = X.getOperand(0); 2014 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits; 2015 ReplacingAnyExtend = true; 2016 } 2017 APInt MaskedHighBits = 2018 APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ); 2019 KnownBits Known = DAG.computeKnownBits(X); 2020 if (MaskedHighBits != Known.Zero) return true; 2021 2022 // We've identified a pattern that can be transformed into a single shift 2023 // and an addressing mode. Make it so. 2024 MVT VT = N.getSimpleValueType(); 2025 if (ReplacingAnyExtend) { 2026 assert(X.getValueType() != VT); 2027 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND. 2028 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X); 2029 insertDAGNode(DAG, N, NewX); 2030 X = NewX; 2031 } 2032 SDLoc DL(N); 2033 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8); 2034 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt); 2035 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8); 2036 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt); 2037 2038 // Insert the new nodes into the topological ordering. We must do this in 2039 // a valid topological ordering as nothing is going to go back and re-sort 2040 // these nodes. We continually insert before 'N' in sequence as this is 2041 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 2042 // hierarchy left to express. 2043 insertDAGNode(DAG, N, NewSRLAmt); 2044 insertDAGNode(DAG, N, NewSRL); 2045 insertDAGNode(DAG, N, NewSHLAmt); 2046 insertDAGNode(DAG, N, NewSHL); 2047 DAG.ReplaceAllUsesWith(N, NewSHL); 2048 DAG.RemoveDeadNode(N.getNode()); 2049 2050 AM.Scale = 1 << AMShiftAmt; 2051 AM.IndexReg = NewSRL; 2052 return false; 2053 } 2054 2055 // Transform "(X >> SHIFT) & (MASK << C1)" to 2056 // "((X >> (SHIFT + C1)) & (MASK)) << C1". Everything before the SHL will be 2057 // matched to a BEXTR later. Returns false if the simplification is performed. 2058 static bool foldMaskedShiftToBEXTR(SelectionDAG &DAG, SDValue N, 2059 uint64_t Mask, 2060 SDValue Shift, SDValue X, 2061 X86ISelAddressMode &AM, 2062 const X86Subtarget &Subtarget) { 2063 if (Shift.getOpcode() != ISD::SRL || 2064 !isa<ConstantSDNode>(Shift.getOperand(1)) || 2065 !Shift.hasOneUse() || !N.hasOneUse()) 2066 return true; 2067 2068 // Only do this if BEXTR will be matched by matchBEXTRFromAndImm. 2069 if (!Subtarget.hasTBM() && 2070 !(Subtarget.hasBMI() && Subtarget.hasFastBEXTR())) 2071 return true; 2072 2073 // We need to ensure that mask is a continuous run of bits. 2074 if (!isShiftedMask_64(Mask)) return true; 2075 2076 unsigned ShiftAmt = Shift.getConstantOperandVal(1); 2077 2078 // The amount of shift we're trying to fit into the addressing mode is taken 2079 // from the trailing zeros of the mask. 2080 unsigned AMShiftAmt = countTrailingZeros(Mask); 2081 2082 // There is nothing we can do here unless the mask is removing some bits. 2083 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits. 2084 if (AMShiftAmt == 0 || AMShiftAmt > 3) return true; 2085 2086 MVT VT = N.getSimpleValueType(); 2087 SDLoc DL(N); 2088 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8); 2089 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt); 2090 SDValue NewMask = DAG.getConstant(Mask >> AMShiftAmt, DL, VT); 2091 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, NewSRL, NewMask); 2092 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8); 2093 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewAnd, NewSHLAmt); 2094 2095 // Insert the new nodes into the topological ordering. We must do this in 2096 // a valid topological ordering as nothing is going to go back and re-sort 2097 // these nodes. We continually insert before 'N' in sequence as this is 2098 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 2099 // hierarchy left to express. 2100 insertDAGNode(DAG, N, NewSRLAmt); 2101 insertDAGNode(DAG, N, NewSRL); 2102 insertDAGNode(DAG, N, NewMask); 2103 insertDAGNode(DAG, N, NewAnd); 2104 insertDAGNode(DAG, N, NewSHLAmt); 2105 insertDAGNode(DAG, N, NewSHL); 2106 DAG.ReplaceAllUsesWith(N, NewSHL); 2107 DAG.RemoveDeadNode(N.getNode()); 2108 2109 AM.Scale = 1 << AMShiftAmt; 2110 AM.IndexReg = NewAnd; 2111 return false; 2112 } 2113 2114 bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 2115 unsigned Depth) { 2116 SDLoc dl(N); 2117 LLVM_DEBUG({ 2118 dbgs() << "MatchAddress: "; 2119 AM.dump(CurDAG); 2120 }); 2121 // Limit recursion. 2122 if (Depth > 5) 2123 return matchAddressBase(N, AM); 2124 2125 // If this is already a %rip relative address, we can only merge immediates 2126 // into it. Instead of handling this in every case, we handle it here. 2127 // RIP relative addressing: %rip + 32-bit displacement! 2128 if (AM.isRIPRelative()) { 2129 // FIXME: JumpTable and ExternalSymbol address currently don't like 2130 // displacements. It isn't very important, but this should be fixed for 2131 // consistency. 2132 if (!(AM.ES || AM.MCSym) && AM.JT != -1) 2133 return true; 2134 2135 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) 2136 if (!foldOffsetIntoAddress(Cst->getSExtValue(), AM)) 2137 return false; 2138 return true; 2139 } 2140 2141 switch (N.getOpcode()) { 2142 default: break; 2143 case ISD::LOCAL_RECOVER: { 2144 if (!AM.hasSymbolicDisplacement() && AM.Disp == 0) 2145 if (const auto *ESNode = dyn_cast<MCSymbolSDNode>(N.getOperand(0))) { 2146 // Use the symbol and don't prefix it. 2147 AM.MCSym = ESNode->getMCSymbol(); 2148 return false; 2149 } 2150 break; 2151 } 2152 case ISD::Constant: { 2153 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue(); 2154 if (!foldOffsetIntoAddress(Val, AM)) 2155 return false; 2156 break; 2157 } 2158 2159 case X86ISD::Wrapper: 2160 case X86ISD::WrapperRIP: 2161 if (!matchWrapper(N, AM)) 2162 return false; 2163 break; 2164 2165 case ISD::LOAD: 2166 if (!matchLoadInAddress(cast<LoadSDNode>(N), AM)) 2167 return false; 2168 break; 2169 2170 case ISD::FrameIndex: 2171 if (AM.BaseType == X86ISelAddressMode::RegBase && 2172 AM.Base_Reg.getNode() == nullptr && 2173 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) { 2174 AM.BaseType = X86ISelAddressMode::FrameIndexBase; 2175 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex(); 2176 return false; 2177 } 2178 break; 2179 2180 case ISD::SHL: 2181 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) 2182 break; 2183 2184 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 2185 unsigned Val = CN->getZExtValue(); 2186 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so 2187 // that the base operand remains free for further matching. If 2188 // the base doesn't end up getting used, a post-processing step 2189 // in MatchAddress turns (,x,2) into (x,x), which is cheaper. 2190 if (Val == 1 || Val == 2 || Val == 3) { 2191 AM.Scale = 1 << Val; 2192 SDValue ShVal = N.getOperand(0); 2193 2194 // Okay, we know that we have a scale by now. However, if the scaled 2195 // value is an add of something and a constant, we can fold the 2196 // constant into the disp field here. 2197 if (CurDAG->isBaseWithConstantOffset(ShVal)) { 2198 AM.IndexReg = ShVal.getOperand(0); 2199 ConstantSDNode *AddVal = cast<ConstantSDNode>(ShVal.getOperand(1)); 2200 uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val; 2201 if (!foldOffsetIntoAddress(Disp, AM)) 2202 return false; 2203 } 2204 2205 AM.IndexReg = ShVal; 2206 return false; 2207 } 2208 } 2209 break; 2210 2211 case ISD::SRL: { 2212 // Scale must not be used already. 2213 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break; 2214 2215 // We only handle up to 64-bit values here as those are what matter for 2216 // addressing mode optimizations. 2217 assert(N.getSimpleValueType().getSizeInBits() <= 64 && 2218 "Unexpected value size!"); 2219 2220 SDValue And = N.getOperand(0); 2221 if (And.getOpcode() != ISD::AND) break; 2222 SDValue X = And.getOperand(0); 2223 2224 // The mask used for the transform is expected to be post-shift, but we 2225 // found the shift first so just apply the shift to the mask before passing 2226 // it down. 2227 if (!isa<ConstantSDNode>(N.getOperand(1)) || 2228 !isa<ConstantSDNode>(And.getOperand(1))) 2229 break; 2230 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1); 2231 2232 // Try to fold the mask and shift into the scale, and return false if we 2233 // succeed. 2234 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM)) 2235 return false; 2236 break; 2237 } 2238 2239 case ISD::SMUL_LOHI: 2240 case ISD::UMUL_LOHI: 2241 // A mul_lohi where we need the low part can be folded as a plain multiply. 2242 if (N.getResNo() != 0) break; 2243 LLVM_FALLTHROUGH; 2244 case ISD::MUL: 2245 case X86ISD::MUL_IMM: 2246 // X*[3,5,9] -> X+X*[2,4,8] 2247 if (AM.BaseType == X86ISelAddressMode::RegBase && 2248 AM.Base_Reg.getNode() == nullptr && 2249 AM.IndexReg.getNode() == nullptr) { 2250 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) 2251 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 || 2252 CN->getZExtValue() == 9) { 2253 AM.Scale = unsigned(CN->getZExtValue())-1; 2254 2255 SDValue MulVal = N.getOperand(0); 2256 SDValue Reg; 2257 2258 // Okay, we know that we have a scale by now. However, if the scaled 2259 // value is an add of something and a constant, we can fold the 2260 // constant into the disp field here. 2261 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() && 2262 isa<ConstantSDNode>(MulVal.getOperand(1))) { 2263 Reg = MulVal.getOperand(0); 2264 ConstantSDNode *AddVal = 2265 cast<ConstantSDNode>(MulVal.getOperand(1)); 2266 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue(); 2267 if (foldOffsetIntoAddress(Disp, AM)) 2268 Reg = N.getOperand(0); 2269 } else { 2270 Reg = N.getOperand(0); 2271 } 2272 2273 AM.IndexReg = AM.Base_Reg = Reg; 2274 return false; 2275 } 2276 } 2277 break; 2278 2279 case ISD::SUB: { 2280 // Given A-B, if A can be completely folded into the address and 2281 // the index field with the index field unused, use -B as the index. 2282 // This is a win if a has multiple parts that can be folded into 2283 // the address. Also, this saves a mov if the base register has 2284 // other uses, since it avoids a two-address sub instruction, however 2285 // it costs an additional mov if the index register has other uses. 2286 2287 // Add an artificial use to this node so that we can keep track of 2288 // it if it gets CSE'd with a different node. 2289 HandleSDNode Handle(N); 2290 2291 // Test if the LHS of the sub can be folded. 2292 X86ISelAddressMode Backup = AM; 2293 if (matchAddressRecursively(N.getOperand(0), AM, Depth+1)) { 2294 N = Handle.getValue(); 2295 AM = Backup; 2296 break; 2297 } 2298 N = Handle.getValue(); 2299 // Test if the index field is free for use. 2300 if (AM.IndexReg.getNode() || AM.isRIPRelative()) { 2301 AM = Backup; 2302 break; 2303 } 2304 2305 int Cost = 0; 2306 SDValue RHS = N.getOperand(1); 2307 // If the RHS involves a register with multiple uses, this 2308 // transformation incurs an extra mov, due to the neg instruction 2309 // clobbering its operand. 2310 if (!RHS.getNode()->hasOneUse() || 2311 RHS.getNode()->getOpcode() == ISD::CopyFromReg || 2312 RHS.getNode()->getOpcode() == ISD::TRUNCATE || 2313 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND || 2314 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND && 2315 RHS.getOperand(0).getValueType() == MVT::i32)) 2316 ++Cost; 2317 // If the base is a register with multiple uses, this 2318 // transformation may save a mov. 2319 if ((AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode() && 2320 !AM.Base_Reg.getNode()->hasOneUse()) || 2321 AM.BaseType == X86ISelAddressMode::FrameIndexBase) 2322 --Cost; 2323 // If the folded LHS was interesting, this transformation saves 2324 // address arithmetic. 2325 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) + 2326 ((AM.Disp != 0) && (Backup.Disp == 0)) + 2327 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2) 2328 --Cost; 2329 // If it doesn't look like it may be an overall win, don't do it. 2330 if (Cost >= 0) { 2331 AM = Backup; 2332 break; 2333 } 2334 2335 // Ok, the transformation is legal and appears profitable. Go for it. 2336 // Negation will be emitted later to avoid creating dangling nodes if this 2337 // was an unprofitable LEA. 2338 AM.IndexReg = RHS; 2339 AM.NegateIndex = true; 2340 AM.Scale = 1; 2341 return false; 2342 } 2343 2344 case ISD::ADD: 2345 if (!matchAdd(N, AM, Depth)) 2346 return false; 2347 break; 2348 2349 case ISD::OR: 2350 // We want to look through a transform in InstCombine and DAGCombiner that 2351 // turns 'add' into 'or', so we can treat this 'or' exactly like an 'add'. 2352 // Example: (or (and x, 1), (shl y, 3)) --> (add (and x, 1), (shl y, 3)) 2353 // An 'lea' can then be used to match the shift (multiply) and add: 2354 // and $1, %esi 2355 // lea (%rsi, %rdi, 8), %rax 2356 if (CurDAG->haveNoCommonBitsSet(N.getOperand(0), N.getOperand(1)) && 2357 !matchAdd(N, AM, Depth)) 2358 return false; 2359 break; 2360 2361 case ISD::AND: { 2362 // Perform some heroic transforms on an and of a constant-count shift 2363 // with a constant to enable use of the scaled offset field. 2364 2365 // Scale must not be used already. 2366 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break; 2367 2368 // We only handle up to 64-bit values here as those are what matter for 2369 // addressing mode optimizations. 2370 assert(N.getSimpleValueType().getSizeInBits() <= 64 && 2371 "Unexpected value size!"); 2372 2373 if (!isa<ConstantSDNode>(N.getOperand(1))) 2374 break; 2375 2376 if (N.getOperand(0).getOpcode() == ISD::SRL) { 2377 SDValue Shift = N.getOperand(0); 2378 SDValue X = Shift.getOperand(0); 2379 2380 uint64_t Mask = N.getConstantOperandVal(1); 2381 2382 // Try to fold the mask and shift into an extract and scale. 2383 if (!foldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM)) 2384 return false; 2385 2386 // Try to fold the mask and shift directly into the scale. 2387 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM)) 2388 return false; 2389 2390 // Try to fold the mask and shift into BEXTR and scale. 2391 if (!foldMaskedShiftToBEXTR(*CurDAG, N, Mask, Shift, X, AM, *Subtarget)) 2392 return false; 2393 } 2394 2395 // Try to swap the mask and shift to place shifts which can be done as 2396 // a scale on the outside of the mask. 2397 if (!foldMaskedShiftToScaledMask(*CurDAG, N, AM)) 2398 return false; 2399 2400 break; 2401 } 2402 case ISD::ZERO_EXTEND: { 2403 // Try to widen a zexted shift left to the same size as its use, so we can 2404 // match the shift as a scale factor. 2405 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) 2406 break; 2407 if (N.getOperand(0).getOpcode() != ISD::SHL || !N.getOperand(0).hasOneUse()) 2408 break; 2409 2410 // Give up if the shift is not a valid scale factor [1,2,3]. 2411 SDValue Shl = N.getOperand(0); 2412 auto *ShAmtC = dyn_cast<ConstantSDNode>(Shl.getOperand(1)); 2413 if (!ShAmtC || ShAmtC->getZExtValue() > 3) 2414 break; 2415 2416 // The narrow shift must only shift out zero bits (it must be 'nuw'). 2417 // That makes it safe to widen to the destination type. 2418 APInt HighZeros = APInt::getHighBitsSet(Shl.getValueSizeInBits(), 2419 ShAmtC->getZExtValue()); 2420 if (!CurDAG->MaskedValueIsZero(Shl.getOperand(0), HighZeros)) 2421 break; 2422 2423 // zext (shl nuw i8 %x, C) to i32 --> shl (zext i8 %x to i32), (zext C) 2424 MVT VT = N.getSimpleValueType(); 2425 SDLoc DL(N); 2426 SDValue Zext = CurDAG->getNode(ISD::ZERO_EXTEND, DL, VT, Shl.getOperand(0)); 2427 SDValue NewShl = CurDAG->getNode(ISD::SHL, DL, VT, Zext, Shl.getOperand(1)); 2428 2429 // Convert the shift to scale factor. 2430 AM.Scale = 1 << ShAmtC->getZExtValue(); 2431 AM.IndexReg = Zext; 2432 2433 insertDAGNode(*CurDAG, N, Zext); 2434 insertDAGNode(*CurDAG, N, NewShl); 2435 CurDAG->ReplaceAllUsesWith(N, NewShl); 2436 CurDAG->RemoveDeadNode(N.getNode()); 2437 return false; 2438 } 2439 } 2440 2441 return matchAddressBase(N, AM); 2442 } 2443 2444 /// Helper for MatchAddress. Add the specified node to the 2445 /// specified addressing mode without any further recursion. 2446 bool X86DAGToDAGISel::matchAddressBase(SDValue N, X86ISelAddressMode &AM) { 2447 // Is the base register already occupied? 2448 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) { 2449 // If so, check to see if the scale index register is set. 2450 if (!AM.IndexReg.getNode()) { 2451 AM.IndexReg = N; 2452 AM.Scale = 1; 2453 return false; 2454 } 2455 2456 // Otherwise, we cannot select it. 2457 return true; 2458 } 2459 2460 // Default, generate it as a register. 2461 AM.BaseType = X86ISelAddressMode::RegBase; 2462 AM.Base_Reg = N; 2463 return false; 2464 } 2465 2466 /// Helper for selectVectorAddr. Handles things that can be folded into a 2467 /// gather scatter address. The index register and scale should have already 2468 /// been handled. 2469 bool X86DAGToDAGISel::matchVectorAddress(SDValue N, X86ISelAddressMode &AM) { 2470 // TODO: Support other operations. 2471 switch (N.getOpcode()) { 2472 case ISD::Constant: { 2473 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue(); 2474 if (!foldOffsetIntoAddress(Val, AM)) 2475 return false; 2476 break; 2477 } 2478 case X86ISD::Wrapper: 2479 if (!matchWrapper(N, AM)) 2480 return false; 2481 break; 2482 } 2483 2484 return matchAddressBase(N, AM); 2485 } 2486 2487 bool X86DAGToDAGISel::selectVectorAddr(MemSDNode *Parent, SDValue BasePtr, 2488 SDValue IndexOp, SDValue ScaleOp, 2489 SDValue &Base, SDValue &Scale, 2490 SDValue &Index, SDValue &Disp, 2491 SDValue &Segment) { 2492 X86ISelAddressMode AM; 2493 AM.IndexReg = IndexOp; 2494 AM.Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue(); 2495 2496 unsigned AddrSpace = Parent->getPointerInfo().getAddrSpace(); 2497 if (AddrSpace == X86AS::GS) 2498 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 2499 if (AddrSpace == X86AS::FS) 2500 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 2501 if (AddrSpace == X86AS::SS) 2502 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16); 2503 2504 SDLoc DL(BasePtr); 2505 MVT VT = BasePtr.getSimpleValueType(); 2506 2507 // Try to match into the base and displacement fields. 2508 if (matchVectorAddress(BasePtr, AM)) 2509 return false; 2510 2511 getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment); 2512 return true; 2513 } 2514 2515 /// Returns true if it is able to pattern match an addressing mode. 2516 /// It returns the operands which make up the maximal addressing mode it can 2517 /// match by reference. 2518 /// 2519 /// Parent is the parent node of the addr operand that is being matched. It 2520 /// is always a load, store, atomic node, or null. It is only null when 2521 /// checking memory operands for inline asm nodes. 2522 bool X86DAGToDAGISel::selectAddr(SDNode *Parent, SDValue N, SDValue &Base, 2523 SDValue &Scale, SDValue &Index, 2524 SDValue &Disp, SDValue &Segment) { 2525 X86ISelAddressMode AM; 2526 2527 if (Parent && 2528 // This list of opcodes are all the nodes that have an "addr:$ptr" operand 2529 // that are not a MemSDNode, and thus don't have proper addrspace info. 2530 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme 2531 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores 2532 Parent->getOpcode() != X86ISD::TLSCALL && // Fixme 2533 Parent->getOpcode() != X86ISD::ENQCMD && // Fixme 2534 Parent->getOpcode() != X86ISD::ENQCMDS && // Fixme 2535 Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp 2536 Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp 2537 unsigned AddrSpace = 2538 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace(); 2539 if (AddrSpace == X86AS::GS) 2540 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 2541 if (AddrSpace == X86AS::FS) 2542 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 2543 if (AddrSpace == X86AS::SS) 2544 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16); 2545 } 2546 2547 // Save the DL and VT before calling matchAddress, it can invalidate N. 2548 SDLoc DL(N); 2549 MVT VT = N.getSimpleValueType(); 2550 2551 if (matchAddress(N, AM)) 2552 return false; 2553 2554 getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment); 2555 return true; 2556 } 2557 2558 bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) { 2559 // In static codegen with small code model, we can get the address of a label 2560 // into a register with 'movl' 2561 if (N->getOpcode() != X86ISD::Wrapper) 2562 return false; 2563 2564 N = N.getOperand(0); 2565 2566 // At least GNU as does not accept 'movl' for TPOFF relocations. 2567 // FIXME: We could use 'movl' when we know we are targeting MC. 2568 if (N->getOpcode() == ISD::TargetGlobalTLSAddress) 2569 return false; 2570 2571 Imm = N; 2572 if (N->getOpcode() != ISD::TargetGlobalAddress) 2573 return TM.getCodeModel() == CodeModel::Small; 2574 2575 Optional<ConstantRange> CR = 2576 cast<GlobalAddressSDNode>(N)->getGlobal()->getAbsoluteSymbolRange(); 2577 if (!CR) 2578 return TM.getCodeModel() == CodeModel::Small; 2579 2580 return CR->getUnsignedMax().ult(1ull << 32); 2581 } 2582 2583 bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N, SDValue &Base, 2584 SDValue &Scale, SDValue &Index, 2585 SDValue &Disp, SDValue &Segment) { 2586 // Save the debug loc before calling selectLEAAddr, in case it invalidates N. 2587 SDLoc DL(N); 2588 2589 if (!selectLEAAddr(N, Base, Scale, Index, Disp, Segment)) 2590 return false; 2591 2592 RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base); 2593 if (RN && RN->getReg() == 0) 2594 Base = CurDAG->getRegister(0, MVT::i64); 2595 else if (Base.getValueType() == MVT::i32 && !isa<FrameIndexSDNode>(Base)) { 2596 // Base could already be %rip, particularly in the x32 ABI. 2597 SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL, 2598 MVT::i64), 0); 2599 Base = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef, 2600 Base); 2601 } 2602 2603 RN = dyn_cast<RegisterSDNode>(Index); 2604 if (RN && RN->getReg() == 0) 2605 Index = CurDAG->getRegister(0, MVT::i64); 2606 else { 2607 assert(Index.getValueType() == MVT::i32 && 2608 "Expect to be extending 32-bit registers for use in LEA"); 2609 SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL, 2610 MVT::i64), 0); 2611 Index = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef, 2612 Index); 2613 } 2614 2615 return true; 2616 } 2617 2618 /// Calls SelectAddr and determines if the maximal addressing 2619 /// mode it matches can be cost effectively emitted as an LEA instruction. 2620 bool X86DAGToDAGISel::selectLEAAddr(SDValue N, 2621 SDValue &Base, SDValue &Scale, 2622 SDValue &Index, SDValue &Disp, 2623 SDValue &Segment) { 2624 X86ISelAddressMode AM; 2625 2626 // Save the DL and VT before calling matchAddress, it can invalidate N. 2627 SDLoc DL(N); 2628 MVT VT = N.getSimpleValueType(); 2629 2630 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support 2631 // segments. 2632 SDValue Copy = AM.Segment; 2633 SDValue T = CurDAG->getRegister(0, MVT::i32); 2634 AM.Segment = T; 2635 if (matchAddress(N, AM)) 2636 return false; 2637 assert (T == AM.Segment); 2638 AM.Segment = Copy; 2639 2640 unsigned Complexity = 0; 2641 if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode()) 2642 Complexity = 1; 2643 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase) 2644 Complexity = 4; 2645 2646 if (AM.IndexReg.getNode()) 2647 Complexity++; 2648 2649 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with 2650 // a simple shift. 2651 if (AM.Scale > 1) 2652 Complexity++; 2653 2654 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA 2655 // to a LEA. This is determined with some experimentation but is by no means 2656 // optimal (especially for code size consideration). LEA is nice because of 2657 // its three-address nature. Tweak the cost function again when we can run 2658 // convertToThreeAddress() at register allocation time. 2659 if (AM.hasSymbolicDisplacement()) { 2660 // For X86-64, always use LEA to materialize RIP-relative addresses. 2661 if (Subtarget->is64Bit()) 2662 Complexity = 4; 2663 else 2664 Complexity += 2; 2665 } 2666 2667 // Heuristic: try harder to form an LEA from ADD if the operands set flags. 2668 // Unlike ADD, LEA does not affect flags, so we will be less likely to require 2669 // duplicating flag-producing instructions later in the pipeline. 2670 if (N.getOpcode() == ISD::ADD) { 2671 auto isMathWithFlags = [](SDValue V) { 2672 switch (V.getOpcode()) { 2673 case X86ISD::ADD: 2674 case X86ISD::SUB: 2675 case X86ISD::ADC: 2676 case X86ISD::SBB: 2677 /* TODO: These opcodes can be added safely, but we may want to justify 2678 their inclusion for different reasons (better for reg-alloc). 2679 case X86ISD::SMUL: 2680 case X86ISD::UMUL: 2681 case X86ISD::OR: 2682 case X86ISD::XOR: 2683 case X86ISD::AND: 2684 */ 2685 // Value 1 is the flag output of the node - verify it's not dead. 2686 return !SDValue(V.getNode(), 1).use_empty(); 2687 default: 2688 return false; 2689 } 2690 }; 2691 // TODO: This could be an 'or' rather than 'and' to make the transform more 2692 // likely to happen. We might want to factor in whether there's a 2693 // load folding opportunity for the math op that disappears with LEA. 2694 if (isMathWithFlags(N.getOperand(0)) && isMathWithFlags(N.getOperand(1))) 2695 Complexity++; 2696 } 2697 2698 if (AM.Disp) 2699 Complexity++; 2700 2701 // If it isn't worth using an LEA, reject it. 2702 if (Complexity <= 2) 2703 return false; 2704 2705 getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment); 2706 return true; 2707 } 2708 2709 /// This is only run on TargetGlobalTLSAddress nodes. 2710 bool X86DAGToDAGISel::selectTLSADDRAddr(SDValue N, SDValue &Base, 2711 SDValue &Scale, SDValue &Index, 2712 SDValue &Disp, SDValue &Segment) { 2713 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress); 2714 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 2715 2716 X86ISelAddressMode AM; 2717 AM.GV = GA->getGlobal(); 2718 AM.Disp += GA->getOffset(); 2719 AM.SymbolFlags = GA->getTargetFlags(); 2720 2721 if (Subtarget->is32Bit()) { 2722 AM.Scale = 1; 2723 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32); 2724 } 2725 2726 MVT VT = N.getSimpleValueType(); 2727 getAddressOperands(AM, SDLoc(N), VT, Base, Scale, Index, Disp, Segment); 2728 return true; 2729 } 2730 2731 bool X86DAGToDAGISel::selectRelocImm(SDValue N, SDValue &Op) { 2732 // Keep track of the original value type and whether this value was 2733 // truncated. If we see a truncation from pointer type to VT that truncates 2734 // bits that are known to be zero, we can use a narrow reference. 2735 EVT VT = N.getValueType(); 2736 bool WasTruncated = false; 2737 if (N.getOpcode() == ISD::TRUNCATE) { 2738 WasTruncated = true; 2739 N = N.getOperand(0); 2740 } 2741 2742 if (N.getOpcode() != X86ISD::Wrapper) 2743 return false; 2744 2745 // We can only use non-GlobalValues as immediates if they were not truncated, 2746 // as we do not have any range information. If we have a GlobalValue and the 2747 // address was not truncated, we can select it as an operand directly. 2748 unsigned Opc = N.getOperand(0)->getOpcode(); 2749 if (Opc != ISD::TargetGlobalAddress || !WasTruncated) { 2750 Op = N.getOperand(0); 2751 // We can only select the operand directly if we didn't have to look past a 2752 // truncate. 2753 return !WasTruncated; 2754 } 2755 2756 // Check that the global's range fits into VT. 2757 auto *GA = cast<GlobalAddressSDNode>(N.getOperand(0)); 2758 Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange(); 2759 if (!CR || CR->getUnsignedMax().uge(1ull << VT.getSizeInBits())) 2760 return false; 2761 2762 // Okay, we can use a narrow reference. 2763 Op = CurDAG->getTargetGlobalAddress(GA->getGlobal(), SDLoc(N), VT, 2764 GA->getOffset(), GA->getTargetFlags()); 2765 return true; 2766 } 2767 2768 bool X86DAGToDAGISel::tryFoldLoad(SDNode *Root, SDNode *P, SDValue N, 2769 SDValue &Base, SDValue &Scale, 2770 SDValue &Index, SDValue &Disp, 2771 SDValue &Segment) { 2772 assert(Root && P && "Unknown root/parent nodes"); 2773 if (!ISD::isNON_EXTLoad(N.getNode()) || 2774 !IsProfitableToFold(N, P, Root) || 2775 !IsLegalToFold(N, P, Root, OptLevel)) 2776 return false; 2777 2778 return selectAddr(N.getNode(), 2779 N.getOperand(1), Base, Scale, Index, Disp, Segment); 2780 } 2781 2782 bool X86DAGToDAGISel::tryFoldBroadcast(SDNode *Root, SDNode *P, SDValue N, 2783 SDValue &Base, SDValue &Scale, 2784 SDValue &Index, SDValue &Disp, 2785 SDValue &Segment) { 2786 assert(Root && P && "Unknown root/parent nodes"); 2787 if (N->getOpcode() != X86ISD::VBROADCAST_LOAD || 2788 !IsProfitableToFold(N, P, Root) || 2789 !IsLegalToFold(N, P, Root, OptLevel)) 2790 return false; 2791 2792 return selectAddr(N.getNode(), 2793 N.getOperand(1), Base, Scale, Index, Disp, Segment); 2794 } 2795 2796 /// Return an SDNode that returns the value of the global base register. 2797 /// Output instructions required to initialize the global base register, 2798 /// if necessary. 2799 SDNode *X86DAGToDAGISel::getGlobalBaseReg() { 2800 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF); 2801 auto &DL = MF->getDataLayout(); 2802 return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode(); 2803 } 2804 2805 bool X86DAGToDAGISel::isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const { 2806 if (N->getOpcode() == ISD::TRUNCATE) 2807 N = N->getOperand(0).getNode(); 2808 if (N->getOpcode() != X86ISD::Wrapper) 2809 return false; 2810 2811 auto *GA = dyn_cast<GlobalAddressSDNode>(N->getOperand(0)); 2812 if (!GA) 2813 return false; 2814 2815 Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange(); 2816 if (!CR) 2817 return Width == 32 && TM.getCodeModel() == CodeModel::Small; 2818 2819 return CR->getSignedMin().sge(-1ull << Width) && 2820 CR->getSignedMax().slt(1ull << Width); 2821 } 2822 2823 static X86::CondCode getCondFromNode(SDNode *N) { 2824 assert(N->isMachineOpcode() && "Unexpected node"); 2825 X86::CondCode CC = X86::COND_INVALID; 2826 unsigned Opc = N->getMachineOpcode(); 2827 if (Opc == X86::JCC_1) 2828 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(1)); 2829 else if (Opc == X86::SETCCr) 2830 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(0)); 2831 else if (Opc == X86::SETCCm) 2832 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(5)); 2833 else if (Opc == X86::CMOV16rr || Opc == X86::CMOV32rr || 2834 Opc == X86::CMOV64rr) 2835 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(2)); 2836 else if (Opc == X86::CMOV16rm || Opc == X86::CMOV32rm || 2837 Opc == X86::CMOV64rm) 2838 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(6)); 2839 2840 return CC; 2841 } 2842 2843 /// Test whether the given X86ISD::CMP node has any users that use a flag 2844 /// other than ZF. 2845 bool X86DAGToDAGISel::onlyUsesZeroFlag(SDValue Flags) const { 2846 // Examine each user of the node. 2847 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end(); 2848 UI != UE; ++UI) { 2849 // Only check things that use the flags. 2850 if (UI.getUse().getResNo() != Flags.getResNo()) 2851 continue; 2852 // Only examine CopyToReg uses that copy to EFLAGS. 2853 if (UI->getOpcode() != ISD::CopyToReg || 2854 cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS) 2855 return false; 2856 // Examine each user of the CopyToReg use. 2857 for (SDNode::use_iterator FlagUI = UI->use_begin(), 2858 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) { 2859 // Only examine the Flag result. 2860 if (FlagUI.getUse().getResNo() != 1) continue; 2861 // Anything unusual: assume conservatively. 2862 if (!FlagUI->isMachineOpcode()) return false; 2863 // Examine the condition code of the user. 2864 X86::CondCode CC = getCondFromNode(*FlagUI); 2865 2866 switch (CC) { 2867 // Comparisons which only use the zero flag. 2868 case X86::COND_E: case X86::COND_NE: 2869 continue; 2870 // Anything else: assume conservatively. 2871 default: 2872 return false; 2873 } 2874 } 2875 } 2876 return true; 2877 } 2878 2879 /// Test whether the given X86ISD::CMP node has any uses which require the SF 2880 /// flag to be accurate. 2881 bool X86DAGToDAGISel::hasNoSignFlagUses(SDValue Flags) const { 2882 // Examine each user of the node. 2883 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end(); 2884 UI != UE; ++UI) { 2885 // Only check things that use the flags. 2886 if (UI.getUse().getResNo() != Flags.getResNo()) 2887 continue; 2888 // Only examine CopyToReg uses that copy to EFLAGS. 2889 if (UI->getOpcode() != ISD::CopyToReg || 2890 cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS) 2891 return false; 2892 // Examine each user of the CopyToReg use. 2893 for (SDNode::use_iterator FlagUI = UI->use_begin(), 2894 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) { 2895 // Only examine the Flag result. 2896 if (FlagUI.getUse().getResNo() != 1) continue; 2897 // Anything unusual: assume conservatively. 2898 if (!FlagUI->isMachineOpcode()) return false; 2899 // Examine the condition code of the user. 2900 X86::CondCode CC = getCondFromNode(*FlagUI); 2901 2902 switch (CC) { 2903 // Comparisons which don't examine the SF flag. 2904 case X86::COND_A: case X86::COND_AE: 2905 case X86::COND_B: case X86::COND_BE: 2906 case X86::COND_E: case X86::COND_NE: 2907 case X86::COND_O: case X86::COND_NO: 2908 case X86::COND_P: case X86::COND_NP: 2909 continue; 2910 // Anything else: assume conservatively. 2911 default: 2912 return false; 2913 } 2914 } 2915 } 2916 return true; 2917 } 2918 2919 static bool mayUseCarryFlag(X86::CondCode CC) { 2920 switch (CC) { 2921 // Comparisons which don't examine the CF flag. 2922 case X86::COND_O: case X86::COND_NO: 2923 case X86::COND_E: case X86::COND_NE: 2924 case X86::COND_S: case X86::COND_NS: 2925 case X86::COND_P: case X86::COND_NP: 2926 case X86::COND_L: case X86::COND_GE: 2927 case X86::COND_G: case X86::COND_LE: 2928 return false; 2929 // Anything else: assume conservatively. 2930 default: 2931 return true; 2932 } 2933 } 2934 2935 /// Test whether the given node which sets flags has any uses which require the 2936 /// CF flag to be accurate. 2937 bool X86DAGToDAGISel::hasNoCarryFlagUses(SDValue Flags) const { 2938 // Examine each user of the node. 2939 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end(); 2940 UI != UE; ++UI) { 2941 // Only check things that use the flags. 2942 if (UI.getUse().getResNo() != Flags.getResNo()) 2943 continue; 2944 2945 unsigned UIOpc = UI->getOpcode(); 2946 2947 if (UIOpc == ISD::CopyToReg) { 2948 // Only examine CopyToReg uses that copy to EFLAGS. 2949 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS) 2950 return false; 2951 // Examine each user of the CopyToReg use. 2952 for (SDNode::use_iterator FlagUI = UI->use_begin(), FlagUE = UI->use_end(); 2953 FlagUI != FlagUE; ++FlagUI) { 2954 // Only examine the Flag result. 2955 if (FlagUI.getUse().getResNo() != 1) 2956 continue; 2957 // Anything unusual: assume conservatively. 2958 if (!FlagUI->isMachineOpcode()) 2959 return false; 2960 // Examine the condition code of the user. 2961 X86::CondCode CC = getCondFromNode(*FlagUI); 2962 2963 if (mayUseCarryFlag(CC)) 2964 return false; 2965 } 2966 2967 // This CopyToReg is ok. Move on to the next user. 2968 continue; 2969 } 2970 2971 // This might be an unselected node. So look for the pre-isel opcodes that 2972 // use flags. 2973 unsigned CCOpNo; 2974 switch (UIOpc) { 2975 default: 2976 // Something unusual. Be conservative. 2977 return false; 2978 case X86ISD::SETCC: CCOpNo = 0; break; 2979 case X86ISD::SETCC_CARRY: CCOpNo = 0; break; 2980 case X86ISD::CMOV: CCOpNo = 2; break; 2981 case X86ISD::BRCOND: CCOpNo = 2; break; 2982 } 2983 2984 X86::CondCode CC = (X86::CondCode)UI->getConstantOperandVal(CCOpNo); 2985 if (mayUseCarryFlag(CC)) 2986 return false; 2987 } 2988 return true; 2989 } 2990 2991 /// Check whether or not the chain ending in StoreNode is suitable for doing 2992 /// the {load; op; store} to modify transformation. 2993 static bool isFusableLoadOpStorePattern(StoreSDNode *StoreNode, 2994 SDValue StoredVal, SelectionDAG *CurDAG, 2995 unsigned LoadOpNo, 2996 LoadSDNode *&LoadNode, 2997 SDValue &InputChain) { 2998 // Is the stored value result 0 of the operation? 2999 if (StoredVal.getResNo() != 0) return false; 3000 3001 // Are there other uses of the operation other than the store? 3002 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false; 3003 3004 // Is the store non-extending and non-indexed? 3005 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal()) 3006 return false; 3007 3008 SDValue Load = StoredVal->getOperand(LoadOpNo); 3009 // Is the stored value a non-extending and non-indexed load? 3010 if (!ISD::isNormalLoad(Load.getNode())) return false; 3011 3012 // Return LoadNode by reference. 3013 LoadNode = cast<LoadSDNode>(Load); 3014 3015 // Is store the only read of the loaded value? 3016 if (!Load.hasOneUse()) 3017 return false; 3018 3019 // Is the address of the store the same as the load? 3020 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() || 3021 LoadNode->getOffset() != StoreNode->getOffset()) 3022 return false; 3023 3024 bool FoundLoad = false; 3025 SmallVector<SDValue, 4> ChainOps; 3026 SmallVector<const SDNode *, 4> LoopWorklist; 3027 SmallPtrSet<const SDNode *, 16> Visited; 3028 const unsigned int Max = 1024; 3029 3030 // Visualization of Load-Op-Store fusion: 3031 // ------------------------- 3032 // Legend: 3033 // *-lines = Chain operand dependencies. 3034 // |-lines = Normal operand dependencies. 3035 // Dependencies flow down and right. n-suffix references multiple nodes. 3036 // 3037 // C Xn C 3038 // * * * 3039 // * * * 3040 // Xn A-LD Yn TF Yn 3041 // * * \ | * | 3042 // * * \ | * | 3043 // * * \ | => A--LD_OP_ST 3044 // * * \| \ 3045 // TF OP \ 3046 // * | \ Zn 3047 // * | \ 3048 // A-ST Zn 3049 // 3050 3051 // This merge induced dependences from: #1: Xn -> LD, OP, Zn 3052 // #2: Yn -> LD 3053 // #3: ST -> Zn 3054 3055 // Ensure the transform is safe by checking for the dual 3056 // dependencies to make sure we do not induce a loop. 3057 3058 // As LD is a predecessor to both OP and ST we can do this by checking: 3059 // a). if LD is a predecessor to a member of Xn or Yn. 3060 // b). if a Zn is a predecessor to ST. 3061 3062 // However, (b) can only occur through being a chain predecessor to 3063 // ST, which is the same as Zn being a member or predecessor of Xn, 3064 // which is a subset of LD being a predecessor of Xn. So it's 3065 // subsumed by check (a). 3066 3067 SDValue Chain = StoreNode->getChain(); 3068 3069 // Gather X elements in ChainOps. 3070 if (Chain == Load.getValue(1)) { 3071 FoundLoad = true; 3072 ChainOps.push_back(Load.getOperand(0)); 3073 } else if (Chain.getOpcode() == ISD::TokenFactor) { 3074 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) { 3075 SDValue Op = Chain.getOperand(i); 3076 if (Op == Load.getValue(1)) { 3077 FoundLoad = true; 3078 // Drop Load, but keep its chain. No cycle check necessary. 3079 ChainOps.push_back(Load.getOperand(0)); 3080 continue; 3081 } 3082 LoopWorklist.push_back(Op.getNode()); 3083 ChainOps.push_back(Op); 3084 } 3085 } 3086 3087 if (!FoundLoad) 3088 return false; 3089 3090 // Worklist is currently Xn. Add Yn to worklist. 3091 for (SDValue Op : StoredVal->ops()) 3092 if (Op.getNode() != LoadNode) 3093 LoopWorklist.push_back(Op.getNode()); 3094 3095 // Check (a) if Load is a predecessor to Xn + Yn 3096 if (SDNode::hasPredecessorHelper(Load.getNode(), Visited, LoopWorklist, Max, 3097 true)) 3098 return false; 3099 3100 InputChain = 3101 CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ChainOps); 3102 return true; 3103 } 3104 3105 // Change a chain of {load; op; store} of the same value into a simple op 3106 // through memory of that value, if the uses of the modified value and its 3107 // address are suitable. 3108 // 3109 // The tablegen pattern memory operand pattern is currently not able to match 3110 // the case where the EFLAGS on the original operation are used. 3111 // 3112 // To move this to tablegen, we'll need to improve tablegen to allow flags to 3113 // be transferred from a node in the pattern to the result node, probably with 3114 // a new keyword. For example, we have this 3115 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", 3116 // [(store (add (loadi64 addr:$dst), -1), addr:$dst), 3117 // (implicit EFLAGS)]>; 3118 // but maybe need something like this 3119 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", 3120 // [(store (add (loadi64 addr:$dst), -1), addr:$dst), 3121 // (transferrable EFLAGS)]>; 3122 // 3123 // Until then, we manually fold these and instruction select the operation 3124 // here. 3125 bool X86DAGToDAGISel::foldLoadStoreIntoMemOperand(SDNode *Node) { 3126 StoreSDNode *StoreNode = cast<StoreSDNode>(Node); 3127 SDValue StoredVal = StoreNode->getOperand(1); 3128 unsigned Opc = StoredVal->getOpcode(); 3129 3130 // Before we try to select anything, make sure this is memory operand size 3131 // and opcode we can handle. Note that this must match the code below that 3132 // actually lowers the opcodes. 3133 EVT MemVT = StoreNode->getMemoryVT(); 3134 if (MemVT != MVT::i64 && MemVT != MVT::i32 && MemVT != MVT::i16 && 3135 MemVT != MVT::i8) 3136 return false; 3137 3138 bool IsCommutable = false; 3139 bool IsNegate = false; 3140 switch (Opc) { 3141 default: 3142 return false; 3143 case X86ISD::SUB: 3144 IsNegate = isNullConstant(StoredVal.getOperand(0)); 3145 break; 3146 case X86ISD::SBB: 3147 break; 3148 case X86ISD::ADD: 3149 case X86ISD::ADC: 3150 case X86ISD::AND: 3151 case X86ISD::OR: 3152 case X86ISD::XOR: 3153 IsCommutable = true; 3154 break; 3155 } 3156 3157 unsigned LoadOpNo = IsNegate ? 1 : 0; 3158 LoadSDNode *LoadNode = nullptr; 3159 SDValue InputChain; 3160 if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo, 3161 LoadNode, InputChain)) { 3162 if (!IsCommutable) 3163 return false; 3164 3165 // This operation is commutable, try the other operand. 3166 LoadOpNo = 1; 3167 if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo, 3168 LoadNode, InputChain)) 3169 return false; 3170 } 3171 3172 SDValue Base, Scale, Index, Disp, Segment; 3173 if (!selectAddr(LoadNode, LoadNode->getBasePtr(), Base, Scale, Index, Disp, 3174 Segment)) 3175 return false; 3176 3177 auto SelectOpcode = [&](unsigned Opc64, unsigned Opc32, unsigned Opc16, 3178 unsigned Opc8) { 3179 switch (MemVT.getSimpleVT().SimpleTy) { 3180 case MVT::i64: 3181 return Opc64; 3182 case MVT::i32: 3183 return Opc32; 3184 case MVT::i16: 3185 return Opc16; 3186 case MVT::i8: 3187 return Opc8; 3188 default: 3189 llvm_unreachable("Invalid size!"); 3190 } 3191 }; 3192 3193 MachineSDNode *Result; 3194 switch (Opc) { 3195 case X86ISD::SUB: 3196 // Handle negate. 3197 if (IsNegate) { 3198 unsigned NewOpc = SelectOpcode(X86::NEG64m, X86::NEG32m, X86::NEG16m, 3199 X86::NEG8m); 3200 const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain}; 3201 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, 3202 MVT::Other, Ops); 3203 break; 3204 } 3205 LLVM_FALLTHROUGH; 3206 case X86ISD::ADD: 3207 // Try to match inc/dec. 3208 if (!Subtarget->slowIncDec() || CurDAG->shouldOptForSize()) { 3209 bool IsOne = isOneConstant(StoredVal.getOperand(1)); 3210 bool IsNegOne = isAllOnesConstant(StoredVal.getOperand(1)); 3211 // ADD/SUB with 1/-1 and carry flag isn't used can use inc/dec. 3212 if ((IsOne || IsNegOne) && hasNoCarryFlagUses(StoredVal.getValue(1))) { 3213 unsigned NewOpc = 3214 ((Opc == X86ISD::ADD) == IsOne) 3215 ? SelectOpcode(X86::INC64m, X86::INC32m, X86::INC16m, X86::INC8m) 3216 : SelectOpcode(X86::DEC64m, X86::DEC32m, X86::DEC16m, X86::DEC8m); 3217 const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain}; 3218 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, 3219 MVT::Other, Ops); 3220 break; 3221 } 3222 } 3223 LLVM_FALLTHROUGH; 3224 case X86ISD::ADC: 3225 case X86ISD::SBB: 3226 case X86ISD::AND: 3227 case X86ISD::OR: 3228 case X86ISD::XOR: { 3229 auto SelectRegOpcode = [SelectOpcode](unsigned Opc) { 3230 switch (Opc) { 3231 case X86ISD::ADD: 3232 return SelectOpcode(X86::ADD64mr, X86::ADD32mr, X86::ADD16mr, 3233 X86::ADD8mr); 3234 case X86ISD::ADC: 3235 return SelectOpcode(X86::ADC64mr, X86::ADC32mr, X86::ADC16mr, 3236 X86::ADC8mr); 3237 case X86ISD::SUB: 3238 return SelectOpcode(X86::SUB64mr, X86::SUB32mr, X86::SUB16mr, 3239 X86::SUB8mr); 3240 case X86ISD::SBB: 3241 return SelectOpcode(X86::SBB64mr, X86::SBB32mr, X86::SBB16mr, 3242 X86::SBB8mr); 3243 case X86ISD::AND: 3244 return SelectOpcode(X86::AND64mr, X86::AND32mr, X86::AND16mr, 3245 X86::AND8mr); 3246 case X86ISD::OR: 3247 return SelectOpcode(X86::OR64mr, X86::OR32mr, X86::OR16mr, X86::OR8mr); 3248 case X86ISD::XOR: 3249 return SelectOpcode(X86::XOR64mr, X86::XOR32mr, X86::XOR16mr, 3250 X86::XOR8mr); 3251 default: 3252 llvm_unreachable("Invalid opcode!"); 3253 } 3254 }; 3255 auto SelectImm8Opcode = [SelectOpcode](unsigned Opc) { 3256 switch (Opc) { 3257 case X86ISD::ADD: 3258 return SelectOpcode(X86::ADD64mi8, X86::ADD32mi8, X86::ADD16mi8, 0); 3259 case X86ISD::ADC: 3260 return SelectOpcode(X86::ADC64mi8, X86::ADC32mi8, X86::ADC16mi8, 0); 3261 case X86ISD::SUB: 3262 return SelectOpcode(X86::SUB64mi8, X86::SUB32mi8, X86::SUB16mi8, 0); 3263 case X86ISD::SBB: 3264 return SelectOpcode(X86::SBB64mi8, X86::SBB32mi8, X86::SBB16mi8, 0); 3265 case X86ISD::AND: 3266 return SelectOpcode(X86::AND64mi8, X86::AND32mi8, X86::AND16mi8, 0); 3267 case X86ISD::OR: 3268 return SelectOpcode(X86::OR64mi8, X86::OR32mi8, X86::OR16mi8, 0); 3269 case X86ISD::XOR: 3270 return SelectOpcode(X86::XOR64mi8, X86::XOR32mi8, X86::XOR16mi8, 0); 3271 default: 3272 llvm_unreachable("Invalid opcode!"); 3273 } 3274 }; 3275 auto SelectImmOpcode = [SelectOpcode](unsigned Opc) { 3276 switch (Opc) { 3277 case X86ISD::ADD: 3278 return SelectOpcode(X86::ADD64mi32, X86::ADD32mi, X86::ADD16mi, 3279 X86::ADD8mi); 3280 case X86ISD::ADC: 3281 return SelectOpcode(X86::ADC64mi32, X86::ADC32mi, X86::ADC16mi, 3282 X86::ADC8mi); 3283 case X86ISD::SUB: 3284 return SelectOpcode(X86::SUB64mi32, X86::SUB32mi, X86::SUB16mi, 3285 X86::SUB8mi); 3286 case X86ISD::SBB: 3287 return SelectOpcode(X86::SBB64mi32, X86::SBB32mi, X86::SBB16mi, 3288 X86::SBB8mi); 3289 case X86ISD::AND: 3290 return SelectOpcode(X86::AND64mi32, X86::AND32mi, X86::AND16mi, 3291 X86::AND8mi); 3292 case X86ISD::OR: 3293 return SelectOpcode(X86::OR64mi32, X86::OR32mi, X86::OR16mi, 3294 X86::OR8mi); 3295 case X86ISD::XOR: 3296 return SelectOpcode(X86::XOR64mi32, X86::XOR32mi, X86::XOR16mi, 3297 X86::XOR8mi); 3298 default: 3299 llvm_unreachable("Invalid opcode!"); 3300 } 3301 }; 3302 3303 unsigned NewOpc = SelectRegOpcode(Opc); 3304 SDValue Operand = StoredVal->getOperand(1-LoadOpNo); 3305 3306 // See if the operand is a constant that we can fold into an immediate 3307 // operand. 3308 if (auto *OperandC = dyn_cast<ConstantSDNode>(Operand)) { 3309 int64_t OperandV = OperandC->getSExtValue(); 3310 3311 // Check if we can shrink the operand enough to fit in an immediate (or 3312 // fit into a smaller immediate) by negating it and switching the 3313 // operation. 3314 if ((Opc == X86ISD::ADD || Opc == X86ISD::SUB) && 3315 ((MemVT != MVT::i8 && !isInt<8>(OperandV) && isInt<8>(-OperandV)) || 3316 (MemVT == MVT::i64 && !isInt<32>(OperandV) && 3317 isInt<32>(-OperandV))) && 3318 hasNoCarryFlagUses(StoredVal.getValue(1))) { 3319 OperandV = -OperandV; 3320 Opc = Opc == X86ISD::ADD ? X86ISD::SUB : X86ISD::ADD; 3321 } 3322 3323 // First try to fit this into an Imm8 operand. If it doesn't fit, then try 3324 // the larger immediate operand. 3325 if (MemVT != MVT::i8 && isInt<8>(OperandV)) { 3326 Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT); 3327 NewOpc = SelectImm8Opcode(Opc); 3328 } else if (MemVT != MVT::i64 || isInt<32>(OperandV)) { 3329 Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT); 3330 NewOpc = SelectImmOpcode(Opc); 3331 } 3332 } 3333 3334 if (Opc == X86ISD::ADC || Opc == X86ISD::SBB) { 3335 SDValue CopyTo = 3336 CurDAG->getCopyToReg(InputChain, SDLoc(Node), X86::EFLAGS, 3337 StoredVal.getOperand(2), SDValue()); 3338 3339 const SDValue Ops[] = {Base, Scale, Index, Disp, 3340 Segment, Operand, CopyTo, CopyTo.getValue(1)}; 3341 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other, 3342 Ops); 3343 } else { 3344 const SDValue Ops[] = {Base, Scale, Index, Disp, 3345 Segment, Operand, InputChain}; 3346 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other, 3347 Ops); 3348 } 3349 break; 3350 } 3351 default: 3352 llvm_unreachable("Invalid opcode!"); 3353 } 3354 3355 MachineMemOperand *MemOps[] = {StoreNode->getMemOperand(), 3356 LoadNode->getMemOperand()}; 3357 CurDAG->setNodeMemRefs(Result, MemOps); 3358 3359 // Update Load Chain uses as well. 3360 ReplaceUses(SDValue(LoadNode, 1), SDValue(Result, 1)); 3361 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1)); 3362 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0)); 3363 CurDAG->RemoveDeadNode(Node); 3364 return true; 3365 } 3366 3367 // See if this is an X & Mask that we can match to BEXTR/BZHI. 3368 // Where Mask is one of the following patterns: 3369 // a) x & (1 << nbits) - 1 3370 // b) x & ~(-1 << nbits) 3371 // c) x & (-1 >> (32 - y)) 3372 // d) x << (32 - y) >> (32 - y) 3373 bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) { 3374 assert( 3375 (Node->getOpcode() == ISD::AND || Node->getOpcode() == ISD::SRL) && 3376 "Should be either an and-mask, or right-shift after clearing high bits."); 3377 3378 // BEXTR is BMI instruction, BZHI is BMI2 instruction. We need at least one. 3379 if (!Subtarget->hasBMI() && !Subtarget->hasBMI2()) 3380 return false; 3381 3382 MVT NVT = Node->getSimpleValueType(0); 3383 3384 // Only supported for 32 and 64 bits. 3385 if (NVT != MVT::i32 && NVT != MVT::i64) 3386 return false; 3387 3388 SDValue NBits; 3389 3390 // If we have BMI2's BZHI, we are ok with muti-use patterns. 3391 // Else, if we only have BMI1's BEXTR, we require one-use. 3392 const bool CanHaveExtraUses = Subtarget->hasBMI2(); 3393 auto checkUses = [CanHaveExtraUses](SDValue Op, unsigned NUses) { 3394 return CanHaveExtraUses || 3395 Op.getNode()->hasNUsesOfValue(NUses, Op.getResNo()); 3396 }; 3397 auto checkOneUse = [checkUses](SDValue Op) { return checkUses(Op, 1); }; 3398 auto checkTwoUse = [checkUses](SDValue Op) { return checkUses(Op, 2); }; 3399 3400 auto peekThroughOneUseTruncation = [checkOneUse](SDValue V) { 3401 if (V->getOpcode() == ISD::TRUNCATE && checkOneUse(V)) { 3402 assert(V.getSimpleValueType() == MVT::i32 && 3403 V.getOperand(0).getSimpleValueType() == MVT::i64 && 3404 "Expected i64 -> i32 truncation"); 3405 V = V.getOperand(0); 3406 } 3407 return V; 3408 }; 3409 3410 // a) x & ((1 << nbits) + (-1)) 3411 auto matchPatternA = [checkOneUse, peekThroughOneUseTruncation, 3412 &NBits](SDValue Mask) -> bool { 3413 // Match `add`. Must only have one use! 3414 if (Mask->getOpcode() != ISD::ADD || !checkOneUse(Mask)) 3415 return false; 3416 // We should be adding all-ones constant (i.e. subtracting one.) 3417 if (!isAllOnesConstant(Mask->getOperand(1))) 3418 return false; 3419 // Match `1 << nbits`. Might be truncated. Must only have one use! 3420 SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0)); 3421 if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0)) 3422 return false; 3423 if (!isOneConstant(M0->getOperand(0))) 3424 return false; 3425 NBits = M0->getOperand(1); 3426 return true; 3427 }; 3428 3429 auto isAllOnes = [this, peekThroughOneUseTruncation, NVT](SDValue V) { 3430 V = peekThroughOneUseTruncation(V); 3431 return CurDAG->MaskedValueIsAllOnes( 3432 V, APInt::getLowBitsSet(V.getSimpleValueType().getSizeInBits(), 3433 NVT.getSizeInBits())); 3434 }; 3435 3436 // b) x & ~(-1 << nbits) 3437 auto matchPatternB = [checkOneUse, isAllOnes, peekThroughOneUseTruncation, 3438 &NBits](SDValue Mask) -> bool { 3439 // Match `~()`. Must only have one use! 3440 if (Mask.getOpcode() != ISD::XOR || !checkOneUse(Mask)) 3441 return false; 3442 // The -1 only has to be all-ones for the final Node's NVT. 3443 if (!isAllOnes(Mask->getOperand(1))) 3444 return false; 3445 // Match `-1 << nbits`. Might be truncated. Must only have one use! 3446 SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0)); 3447 if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0)) 3448 return false; 3449 // The -1 only has to be all-ones for the final Node's NVT. 3450 if (!isAllOnes(M0->getOperand(0))) 3451 return false; 3452 NBits = M0->getOperand(1); 3453 return true; 3454 }; 3455 3456 // Match potentially-truncated (bitwidth - y) 3457 auto matchShiftAmt = [checkOneUse, &NBits](SDValue ShiftAmt, 3458 unsigned Bitwidth) { 3459 // Skip over a truncate of the shift amount. 3460 if (ShiftAmt.getOpcode() == ISD::TRUNCATE) { 3461 ShiftAmt = ShiftAmt.getOperand(0); 3462 // The trunc should have been the only user of the real shift amount. 3463 if (!checkOneUse(ShiftAmt)) 3464 return false; 3465 } 3466 // Match the shift amount as: (bitwidth - y). It should go away, too. 3467 if (ShiftAmt.getOpcode() != ISD::SUB) 3468 return false; 3469 auto *V0 = dyn_cast<ConstantSDNode>(ShiftAmt.getOperand(0)); 3470 if (!V0 || V0->getZExtValue() != Bitwidth) 3471 return false; 3472 NBits = ShiftAmt.getOperand(1); 3473 return true; 3474 }; 3475 3476 // c) x & (-1 >> (32 - y)) 3477 auto matchPatternC = [checkOneUse, peekThroughOneUseTruncation, 3478 matchShiftAmt](SDValue Mask) -> bool { 3479 // The mask itself may be truncated. 3480 Mask = peekThroughOneUseTruncation(Mask); 3481 unsigned Bitwidth = Mask.getSimpleValueType().getSizeInBits(); 3482 // Match `l>>`. Must only have one use! 3483 if (Mask.getOpcode() != ISD::SRL || !checkOneUse(Mask)) 3484 return false; 3485 // We should be shifting truly all-ones constant. 3486 if (!isAllOnesConstant(Mask.getOperand(0))) 3487 return false; 3488 SDValue M1 = Mask.getOperand(1); 3489 // The shift amount should not be used externally. 3490 if (!checkOneUse(M1)) 3491 return false; 3492 return matchShiftAmt(M1, Bitwidth); 3493 }; 3494 3495 SDValue X; 3496 3497 // d) x << (32 - y) >> (32 - y) 3498 auto matchPatternD = [checkOneUse, checkTwoUse, matchShiftAmt, 3499 &X](SDNode *Node) -> bool { 3500 if (Node->getOpcode() != ISD::SRL) 3501 return false; 3502 SDValue N0 = Node->getOperand(0); 3503 if (N0->getOpcode() != ISD::SHL || !checkOneUse(N0)) 3504 return false; 3505 unsigned Bitwidth = N0.getSimpleValueType().getSizeInBits(); 3506 SDValue N1 = Node->getOperand(1); 3507 SDValue N01 = N0->getOperand(1); 3508 // Both of the shifts must be by the exact same value. 3509 // There should not be any uses of the shift amount outside of the pattern. 3510 if (N1 != N01 || !checkTwoUse(N1)) 3511 return false; 3512 if (!matchShiftAmt(N1, Bitwidth)) 3513 return false; 3514 X = N0->getOperand(0); 3515 return true; 3516 }; 3517 3518 auto matchLowBitMask = [matchPatternA, matchPatternB, 3519 matchPatternC](SDValue Mask) -> bool { 3520 return matchPatternA(Mask) || matchPatternB(Mask) || matchPatternC(Mask); 3521 }; 3522 3523 if (Node->getOpcode() == ISD::AND) { 3524 X = Node->getOperand(0); 3525 SDValue Mask = Node->getOperand(1); 3526 3527 if (matchLowBitMask(Mask)) { 3528 // Great. 3529 } else { 3530 std::swap(X, Mask); 3531 if (!matchLowBitMask(Mask)) 3532 return false; 3533 } 3534 } else if (!matchPatternD(Node)) 3535 return false; 3536 3537 SDLoc DL(Node); 3538 3539 // Truncate the shift amount. 3540 NBits = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NBits); 3541 insertDAGNode(*CurDAG, SDValue(Node, 0), NBits); 3542 3543 // Insert 8-bit NBits into lowest 8 bits of 32-bit register. 3544 // All the other bits are undefined, we do not care about them. 3545 SDValue ImplDef = SDValue( 3546 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::i32), 0); 3547 insertDAGNode(*CurDAG, SDValue(Node, 0), ImplDef); 3548 3549 SDValue SRIdxVal = CurDAG->getTargetConstant(X86::sub_8bit, DL, MVT::i32); 3550 insertDAGNode(*CurDAG, SDValue(Node, 0), SRIdxVal); 3551 NBits = SDValue( 3552 CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::i32, ImplDef, 3553 NBits, SRIdxVal), 0); 3554 insertDAGNode(*CurDAG, SDValue(Node, 0), NBits); 3555 3556 if (Subtarget->hasBMI2()) { 3557 // Great, just emit the the BZHI.. 3558 if (NVT != MVT::i32) { 3559 // But have to place the bit count into the wide-enough register first. 3560 NBits = CurDAG->getNode(ISD::ANY_EXTEND, DL, NVT, NBits); 3561 insertDAGNode(*CurDAG, SDValue(Node, 0), NBits); 3562 } 3563 3564 SDValue Extract = CurDAG->getNode(X86ISD::BZHI, DL, NVT, X, NBits); 3565 ReplaceNode(Node, Extract.getNode()); 3566 SelectCode(Extract.getNode()); 3567 return true; 3568 } 3569 3570 // Else, if we do *NOT* have BMI2, let's find out if the if the 'X' is 3571 // *logically* shifted (potentially with one-use trunc inbetween), 3572 // and the truncation was the only use of the shift, 3573 // and if so look past one-use truncation. 3574 { 3575 SDValue RealX = peekThroughOneUseTruncation(X); 3576 // FIXME: only if the shift is one-use? 3577 if (RealX != X && RealX.getOpcode() == ISD::SRL) 3578 X = RealX; 3579 } 3580 3581 MVT XVT = X.getSimpleValueType(); 3582 3583 // Else, emitting BEXTR requires one more step. 3584 // The 'control' of BEXTR has the pattern of: 3585 // [15...8 bit][ 7...0 bit] location 3586 // [ bit count][ shift] name 3587 // I.e. 0b000000011'00000001 means (x >> 0b1) & 0b11 3588 3589 // Shift NBits left by 8 bits, thus producing 'control'. 3590 // This makes the low 8 bits to be zero. 3591 SDValue C8 = CurDAG->getConstant(8, DL, MVT::i8); 3592 insertDAGNode(*CurDAG, SDValue(Node, 0), C8); 3593 SDValue Control = CurDAG->getNode(ISD::SHL, DL, MVT::i32, NBits, C8); 3594 insertDAGNode(*CurDAG, SDValue(Node, 0), Control); 3595 3596 // If the 'X' is *logically* shifted, we can fold that shift into 'control'. 3597 // FIXME: only if the shift is one-use? 3598 if (X.getOpcode() == ISD::SRL) { 3599 SDValue ShiftAmt = X.getOperand(1); 3600 X = X.getOperand(0); 3601 3602 assert(ShiftAmt.getValueType() == MVT::i8 && 3603 "Expected shift amount to be i8"); 3604 3605 // Now, *zero*-extend the shift amount. The bits 8...15 *must* be zero! 3606 // We could zext to i16 in some form, but we intentionally don't do that. 3607 SDValue OrigShiftAmt = ShiftAmt; 3608 ShiftAmt = CurDAG->getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShiftAmt); 3609 insertDAGNode(*CurDAG, OrigShiftAmt, ShiftAmt); 3610 3611 // And now 'or' these low 8 bits of shift amount into the 'control'. 3612 Control = CurDAG->getNode(ISD::OR, DL, MVT::i32, Control, ShiftAmt); 3613 insertDAGNode(*CurDAG, SDValue(Node, 0), Control); 3614 } 3615 3616 // But have to place the 'control' into the wide-enough register first. 3617 if (XVT != MVT::i32) { 3618 Control = CurDAG->getNode(ISD::ANY_EXTEND, DL, XVT, Control); 3619 insertDAGNode(*CurDAG, SDValue(Node, 0), Control); 3620 } 3621 3622 // And finally, form the BEXTR itself. 3623 SDValue Extract = CurDAG->getNode(X86ISD::BEXTR, DL, XVT, X, Control); 3624 3625 // The 'X' was originally truncated. Do that now. 3626 if (XVT != NVT) { 3627 insertDAGNode(*CurDAG, SDValue(Node, 0), Extract); 3628 Extract = CurDAG->getNode(ISD::TRUNCATE, DL, NVT, Extract); 3629 } 3630 3631 ReplaceNode(Node, Extract.getNode()); 3632 SelectCode(Extract.getNode()); 3633 3634 return true; 3635 } 3636 3637 // See if this is an (X >> C1) & C2 that we can match to BEXTR/BEXTRI. 3638 MachineSDNode *X86DAGToDAGISel::matchBEXTRFromAndImm(SDNode *Node) { 3639 MVT NVT = Node->getSimpleValueType(0); 3640 SDLoc dl(Node); 3641 3642 SDValue N0 = Node->getOperand(0); 3643 SDValue N1 = Node->getOperand(1); 3644 3645 // If we have TBM we can use an immediate for the control. If we have BMI 3646 // we should only do this if the BEXTR instruction is implemented well. 3647 // Otherwise moving the control into a register makes this more costly. 3648 // TODO: Maybe load folding, greater than 32-bit masks, or a guarantee of LICM 3649 // hoisting the move immediate would make it worthwhile with a less optimal 3650 // BEXTR? 3651 bool PreferBEXTR = 3652 Subtarget->hasTBM() || (Subtarget->hasBMI() && Subtarget->hasFastBEXTR()); 3653 if (!PreferBEXTR && !Subtarget->hasBMI2()) 3654 return nullptr; 3655 3656 // Must have a shift right. 3657 if (N0->getOpcode() != ISD::SRL && N0->getOpcode() != ISD::SRA) 3658 return nullptr; 3659 3660 // Shift can't have additional users. 3661 if (!N0->hasOneUse()) 3662 return nullptr; 3663 3664 // Only supported for 32 and 64 bits. 3665 if (NVT != MVT::i32 && NVT != MVT::i64) 3666 return nullptr; 3667 3668 // Shift amount and RHS of and must be constant. 3669 ConstantSDNode *MaskCst = dyn_cast<ConstantSDNode>(N1); 3670 ConstantSDNode *ShiftCst = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 3671 if (!MaskCst || !ShiftCst) 3672 return nullptr; 3673 3674 // And RHS must be a mask. 3675 uint64_t Mask = MaskCst->getZExtValue(); 3676 if (!isMask_64(Mask)) 3677 return nullptr; 3678 3679 uint64_t Shift = ShiftCst->getZExtValue(); 3680 uint64_t MaskSize = countPopulation(Mask); 3681 3682 // Don't interfere with something that can be handled by extracting AH. 3683 // TODO: If we are able to fold a load, BEXTR might still be better than AH. 3684 if (Shift == 8 && MaskSize == 8) 3685 return nullptr; 3686 3687 // Make sure we are only using bits that were in the original value, not 3688 // shifted in. 3689 if (Shift + MaskSize > NVT.getSizeInBits()) 3690 return nullptr; 3691 3692 // BZHI, if available, is always fast, unlike BEXTR. But even if we decide 3693 // that we can't use BEXTR, it is only worthwhile using BZHI if the mask 3694 // does not fit into 32 bits. Load folding is not a sufficient reason. 3695 if (!PreferBEXTR && MaskSize <= 32) 3696 return nullptr; 3697 3698 SDValue Control; 3699 unsigned ROpc, MOpc; 3700 3701 if (!PreferBEXTR) { 3702 assert(Subtarget->hasBMI2() && "We must have BMI2's BZHI then."); 3703 // If we can't make use of BEXTR then we can't fuse shift+mask stages. 3704 // Let's perform the mask first, and apply shift later. Note that we need to 3705 // widen the mask to account for the fact that we'll apply shift afterwards! 3706 Control = CurDAG->getTargetConstant(Shift + MaskSize, dl, NVT); 3707 ROpc = NVT == MVT::i64 ? X86::BZHI64rr : X86::BZHI32rr; 3708 MOpc = NVT == MVT::i64 ? X86::BZHI64rm : X86::BZHI32rm; 3709 unsigned NewOpc = NVT == MVT::i64 ? X86::MOV32ri64 : X86::MOV32ri; 3710 Control = SDValue(CurDAG->getMachineNode(NewOpc, dl, NVT, Control), 0); 3711 } else { 3712 // The 'control' of BEXTR has the pattern of: 3713 // [15...8 bit][ 7...0 bit] location 3714 // [ bit count][ shift] name 3715 // I.e. 0b000000011'00000001 means (x >> 0b1) & 0b11 3716 Control = CurDAG->getTargetConstant(Shift | (MaskSize << 8), dl, NVT); 3717 if (Subtarget->hasTBM()) { 3718 ROpc = NVT == MVT::i64 ? X86::BEXTRI64ri : X86::BEXTRI32ri; 3719 MOpc = NVT == MVT::i64 ? X86::BEXTRI64mi : X86::BEXTRI32mi; 3720 } else { 3721 assert(Subtarget->hasBMI() && "We must have BMI1's BEXTR then."); 3722 // BMI requires the immediate to placed in a register. 3723 ROpc = NVT == MVT::i64 ? X86::BEXTR64rr : X86::BEXTR32rr; 3724 MOpc = NVT == MVT::i64 ? X86::BEXTR64rm : X86::BEXTR32rm; 3725 unsigned NewOpc = NVT == MVT::i64 ? X86::MOV32ri64 : X86::MOV32ri; 3726 Control = SDValue(CurDAG->getMachineNode(NewOpc, dl, NVT, Control), 0); 3727 } 3728 } 3729 3730 MachineSDNode *NewNode; 3731 SDValue Input = N0->getOperand(0); 3732 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 3733 if (tryFoldLoad(Node, N0.getNode(), Input, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) { 3734 SDValue Ops[] = { 3735 Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Control, Input.getOperand(0)}; 3736 SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other); 3737 NewNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); 3738 // Update the chain. 3739 ReplaceUses(Input.getValue(1), SDValue(NewNode, 2)); 3740 // Record the mem-refs 3741 CurDAG->setNodeMemRefs(NewNode, {cast<LoadSDNode>(Input)->getMemOperand()}); 3742 } else { 3743 NewNode = CurDAG->getMachineNode(ROpc, dl, NVT, MVT::i32, Input, Control); 3744 } 3745 3746 if (!PreferBEXTR) { 3747 // We still need to apply the shift. 3748 SDValue ShAmt = CurDAG->getTargetConstant(Shift, dl, NVT); 3749 unsigned NewOpc = NVT == MVT::i64 ? X86::SHR64ri : X86::SHR32ri; 3750 NewNode = 3751 CurDAG->getMachineNode(NewOpc, dl, NVT, SDValue(NewNode, 0), ShAmt); 3752 } 3753 3754 return NewNode; 3755 } 3756 3757 // Emit a PCMISTR(I/M) instruction. 3758 MachineSDNode *X86DAGToDAGISel::emitPCMPISTR(unsigned ROpc, unsigned MOpc, 3759 bool MayFoldLoad, const SDLoc &dl, 3760 MVT VT, SDNode *Node) { 3761 SDValue N0 = Node->getOperand(0); 3762 SDValue N1 = Node->getOperand(1); 3763 SDValue Imm = Node->getOperand(2); 3764 const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue(); 3765 Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType()); 3766 3767 // Try to fold a load. No need to check alignment. 3768 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 3769 if (MayFoldLoad && tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) { 3770 SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm, 3771 N1.getOperand(0) }; 3772 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other); 3773 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); 3774 // Update the chain. 3775 ReplaceUses(N1.getValue(1), SDValue(CNode, 2)); 3776 // Record the mem-refs 3777 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()}); 3778 return CNode; 3779 } 3780 3781 SDValue Ops[] = { N0, N1, Imm }; 3782 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32); 3783 MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops); 3784 return CNode; 3785 } 3786 3787 // Emit a PCMESTR(I/M) instruction. Also return the Glue result in case we need 3788 // to emit a second instruction after this one. This is needed since we have two 3789 // copyToReg nodes glued before this and we need to continue that glue through. 3790 MachineSDNode *X86DAGToDAGISel::emitPCMPESTR(unsigned ROpc, unsigned MOpc, 3791 bool MayFoldLoad, const SDLoc &dl, 3792 MVT VT, SDNode *Node, 3793 SDValue &InFlag) { 3794 SDValue N0 = Node->getOperand(0); 3795 SDValue N2 = Node->getOperand(2); 3796 SDValue Imm = Node->getOperand(4); 3797 const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue(); 3798 Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType()); 3799 3800 // Try to fold a load. No need to check alignment. 3801 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 3802 if (MayFoldLoad && tryFoldLoad(Node, N2, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) { 3803 SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm, 3804 N2.getOperand(0), InFlag }; 3805 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other, MVT::Glue); 3806 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); 3807 InFlag = SDValue(CNode, 3); 3808 // Update the chain. 3809 ReplaceUses(N2.getValue(1), SDValue(CNode, 2)); 3810 // Record the mem-refs 3811 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N2)->getMemOperand()}); 3812 return CNode; 3813 } 3814 3815 SDValue Ops[] = { N0, N2, Imm, InFlag }; 3816 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Glue); 3817 MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops); 3818 InFlag = SDValue(CNode, 2); 3819 return CNode; 3820 } 3821 3822 bool X86DAGToDAGISel::tryShiftAmountMod(SDNode *N) { 3823 EVT VT = N->getValueType(0); 3824 3825 // Only handle scalar shifts. 3826 if (VT.isVector()) 3827 return false; 3828 3829 // Narrower shifts only mask to 5 bits in hardware. 3830 unsigned Size = VT == MVT::i64 ? 64 : 32; 3831 3832 SDValue OrigShiftAmt = N->getOperand(1); 3833 SDValue ShiftAmt = OrigShiftAmt; 3834 SDLoc DL(N); 3835 3836 // Skip over a truncate of the shift amount. 3837 if (ShiftAmt->getOpcode() == ISD::TRUNCATE) 3838 ShiftAmt = ShiftAmt->getOperand(0); 3839 3840 // This function is called after X86DAGToDAGISel::matchBitExtract(), 3841 // so we are not afraid that we might mess up BZHI/BEXTR pattern. 3842 3843 SDValue NewShiftAmt; 3844 if (ShiftAmt->getOpcode() == ISD::ADD || ShiftAmt->getOpcode() == ISD::SUB) { 3845 SDValue Add0 = ShiftAmt->getOperand(0); 3846 SDValue Add1 = ShiftAmt->getOperand(1); 3847 // If we are shifting by X+/-N where N == 0 mod Size, then just shift by X 3848 // to avoid the ADD/SUB. 3849 if (isa<ConstantSDNode>(Add1) && 3850 cast<ConstantSDNode>(Add1)->getZExtValue() % Size == 0) { 3851 NewShiftAmt = Add0; 3852 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to 3853 // generate a NEG instead of a SUB of a constant. 3854 } else if (ShiftAmt->getOpcode() == ISD::SUB && 3855 isa<ConstantSDNode>(Add0) && 3856 cast<ConstantSDNode>(Add0)->getZExtValue() != 0 && 3857 cast<ConstantSDNode>(Add0)->getZExtValue() % Size == 0) { 3858 // Insert a negate op. 3859 // TODO: This isn't guaranteed to replace the sub if there is a logic cone 3860 // that uses it that's not a shift. 3861 EVT SubVT = ShiftAmt.getValueType(); 3862 SDValue Zero = CurDAG->getConstant(0, DL, SubVT); 3863 SDValue Neg = CurDAG->getNode(ISD::SUB, DL, SubVT, Zero, Add1); 3864 NewShiftAmt = Neg; 3865 3866 // Insert these operands into a valid topological order so they can 3867 // get selected independently. 3868 insertDAGNode(*CurDAG, OrigShiftAmt, Zero); 3869 insertDAGNode(*CurDAG, OrigShiftAmt, Neg); 3870 } else 3871 return false; 3872 } else 3873 return false; 3874 3875 if (NewShiftAmt.getValueType() != MVT::i8) { 3876 // Need to truncate the shift amount. 3877 NewShiftAmt = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NewShiftAmt); 3878 // Add to a correct topological ordering. 3879 insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt); 3880 } 3881 3882 // Insert a new mask to keep the shift amount legal. This should be removed 3883 // by isel patterns. 3884 NewShiftAmt = CurDAG->getNode(ISD::AND, DL, MVT::i8, NewShiftAmt, 3885 CurDAG->getConstant(Size - 1, DL, MVT::i8)); 3886 // Place in a correct topological ordering. 3887 insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt); 3888 3889 SDNode *UpdatedNode = CurDAG->UpdateNodeOperands(N, N->getOperand(0), 3890 NewShiftAmt); 3891 if (UpdatedNode != N) { 3892 // If we found an existing node, we should replace ourselves with that node 3893 // and wait for it to be selected after its other users. 3894 ReplaceNode(N, UpdatedNode); 3895 return true; 3896 } 3897 3898 // If the original shift amount is now dead, delete it so that we don't run 3899 // it through isel. 3900 if (OrigShiftAmt.getNode()->use_empty()) 3901 CurDAG->RemoveDeadNode(OrigShiftAmt.getNode()); 3902 3903 // Now that we've optimized the shift amount, defer to normal isel to get 3904 // load folding and legacy vs BMI2 selection without repeating it here. 3905 SelectCode(N); 3906 return true; 3907 } 3908 3909 bool X86DAGToDAGISel::tryShrinkShlLogicImm(SDNode *N) { 3910 MVT NVT = N->getSimpleValueType(0); 3911 unsigned Opcode = N->getOpcode(); 3912 SDLoc dl(N); 3913 3914 // For operations of the form (x << C1) op C2, check if we can use a smaller 3915 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1. 3916 SDValue Shift = N->getOperand(0); 3917 SDValue N1 = N->getOperand(1); 3918 3919 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1); 3920 if (!Cst) 3921 return false; 3922 3923 int64_t Val = Cst->getSExtValue(); 3924 3925 // If we have an any_extend feeding the AND, look through it to see if there 3926 // is a shift behind it. But only if the AND doesn't use the extended bits. 3927 // FIXME: Generalize this to other ANY_EXTEND than i32 to i64? 3928 bool FoundAnyExtend = false; 3929 if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() && 3930 Shift.getOperand(0).getSimpleValueType() == MVT::i32 && 3931 isUInt<32>(Val)) { 3932 FoundAnyExtend = true; 3933 Shift = Shift.getOperand(0); 3934 } 3935 3936 if (Shift.getOpcode() != ISD::SHL || !Shift.hasOneUse()) 3937 return false; 3938 3939 // i8 is unshrinkable, i16 should be promoted to i32. 3940 if (NVT != MVT::i32 && NVT != MVT::i64) 3941 return false; 3942 3943 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(Shift.getOperand(1)); 3944 if (!ShlCst) 3945 return false; 3946 3947 uint64_t ShAmt = ShlCst->getZExtValue(); 3948 3949 // Make sure that we don't change the operation by removing bits. 3950 // This only matters for OR and XOR, AND is unaffected. 3951 uint64_t RemovedBitsMask = (1ULL << ShAmt) - 1; 3952 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0) 3953 return false; 3954 3955 // Check the minimum bitwidth for the new constant. 3956 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32. 3957 auto CanShrinkImmediate = [&](int64_t &ShiftedVal) { 3958 if (Opcode == ISD::AND) { 3959 // AND32ri is the same as AND64ri32 with zext imm. 3960 // Try this before sign extended immediates below. 3961 ShiftedVal = (uint64_t)Val >> ShAmt; 3962 if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal)) 3963 return true; 3964 // Also swap order when the AND can become MOVZX. 3965 if (ShiftedVal == UINT8_MAX || ShiftedVal == UINT16_MAX) 3966 return true; 3967 } 3968 ShiftedVal = Val >> ShAmt; 3969 if ((!isInt<8>(Val) && isInt<8>(ShiftedVal)) || 3970 (!isInt<32>(Val) && isInt<32>(ShiftedVal))) 3971 return true; 3972 if (Opcode != ISD::AND) { 3973 // MOV32ri+OR64r/XOR64r is cheaper than MOV64ri64+OR64rr/XOR64rr 3974 ShiftedVal = (uint64_t)Val >> ShAmt; 3975 if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal)) 3976 return true; 3977 } 3978 return false; 3979 }; 3980 3981 int64_t ShiftedVal; 3982 if (!CanShrinkImmediate(ShiftedVal)) 3983 return false; 3984 3985 // Ok, we can reorder to get a smaller immediate. 3986 3987 // But, its possible the original immediate allowed an AND to become MOVZX. 3988 // Doing this late due to avoid the MakedValueIsZero call as late as 3989 // possible. 3990 if (Opcode == ISD::AND) { 3991 // Find the smallest zext this could possibly be. 3992 unsigned ZExtWidth = Cst->getAPIntValue().getActiveBits(); 3993 ZExtWidth = PowerOf2Ceil(std::max(ZExtWidth, 8U)); 3994 3995 // Figure out which bits need to be zero to achieve that mask. 3996 APInt NeededMask = APInt::getLowBitsSet(NVT.getSizeInBits(), 3997 ZExtWidth); 3998 NeededMask &= ~Cst->getAPIntValue(); 3999 4000 if (CurDAG->MaskedValueIsZero(N->getOperand(0), NeededMask)) 4001 return false; 4002 } 4003 4004 SDValue X = Shift.getOperand(0); 4005 if (FoundAnyExtend) { 4006 SDValue NewX = CurDAG->getNode(ISD::ANY_EXTEND, dl, NVT, X); 4007 insertDAGNode(*CurDAG, SDValue(N, 0), NewX); 4008 X = NewX; 4009 } 4010 4011 SDValue NewCst = CurDAG->getConstant(ShiftedVal, dl, NVT); 4012 insertDAGNode(*CurDAG, SDValue(N, 0), NewCst); 4013 SDValue NewBinOp = CurDAG->getNode(Opcode, dl, NVT, X, NewCst); 4014 insertDAGNode(*CurDAG, SDValue(N, 0), NewBinOp); 4015 SDValue NewSHL = CurDAG->getNode(ISD::SHL, dl, NVT, NewBinOp, 4016 Shift.getOperand(1)); 4017 ReplaceNode(N, NewSHL.getNode()); 4018 SelectCode(NewSHL.getNode()); 4019 return true; 4020 } 4021 4022 bool X86DAGToDAGISel::matchVPTERNLOG(SDNode *Root, SDNode *ParentA, 4023 SDNode *ParentBC, SDValue A, SDValue B, 4024 SDValue C, uint8_t Imm) { 4025 assert(A.isOperandOf(ParentA)); 4026 assert(B.isOperandOf(ParentBC)); 4027 assert(C.isOperandOf(ParentBC)); 4028 4029 auto tryFoldLoadOrBCast = 4030 [this](SDNode *Root, SDNode *P, SDValue &L, SDValue &Base, SDValue &Scale, 4031 SDValue &Index, SDValue &Disp, SDValue &Segment) { 4032 if (tryFoldLoad(Root, P, L, Base, Scale, Index, Disp, Segment)) 4033 return true; 4034 4035 // Not a load, check for broadcast which may be behind a bitcast. 4036 if (L.getOpcode() == ISD::BITCAST && L.hasOneUse()) { 4037 P = L.getNode(); 4038 L = L.getOperand(0); 4039 } 4040 4041 if (L.getOpcode() != X86ISD::VBROADCAST_LOAD) 4042 return false; 4043 4044 // Only 32 and 64 bit broadcasts are supported. 4045 auto *MemIntr = cast<MemIntrinsicSDNode>(L); 4046 unsigned Size = MemIntr->getMemoryVT().getSizeInBits(); 4047 if (Size != 32 && Size != 64) 4048 return false; 4049 4050 return tryFoldBroadcast(Root, P, L, Base, Scale, Index, Disp, Segment); 4051 }; 4052 4053 bool FoldedLoad = false; 4054 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 4055 if (tryFoldLoadOrBCast(Root, ParentBC, C, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) { 4056 FoldedLoad = true; 4057 } else if (tryFoldLoadOrBCast(Root, ParentA, A, Tmp0, Tmp1, Tmp2, Tmp3, 4058 Tmp4)) { 4059 FoldedLoad = true; 4060 std::swap(A, C); 4061 // Swap bits 1/4 and 3/6. 4062 uint8_t OldImm = Imm; 4063 Imm = OldImm & 0xa5; 4064 if (OldImm & 0x02) Imm |= 0x10; 4065 if (OldImm & 0x10) Imm |= 0x02; 4066 if (OldImm & 0x08) Imm |= 0x40; 4067 if (OldImm & 0x40) Imm |= 0x08; 4068 } else if (tryFoldLoadOrBCast(Root, ParentBC, B, Tmp0, Tmp1, Tmp2, Tmp3, 4069 Tmp4)) { 4070 FoldedLoad = true; 4071 std::swap(B, C); 4072 // Swap bits 1/2 and 5/6. 4073 uint8_t OldImm = Imm; 4074 Imm = OldImm & 0x99; 4075 if (OldImm & 0x02) Imm |= 0x04; 4076 if (OldImm & 0x04) Imm |= 0x02; 4077 if (OldImm & 0x20) Imm |= 0x40; 4078 if (OldImm & 0x40) Imm |= 0x20; 4079 } 4080 4081 SDLoc DL(Root); 4082 4083 SDValue TImm = CurDAG->getTargetConstant(Imm, DL, MVT::i8); 4084 4085 MVT NVT = Root->getSimpleValueType(0); 4086 4087 MachineSDNode *MNode; 4088 if (FoldedLoad) { 4089 SDVTList VTs = CurDAG->getVTList(NVT, MVT::Other); 4090 4091 unsigned Opc; 4092 if (C.getOpcode() == X86ISD::VBROADCAST_LOAD) { 4093 auto *MemIntr = cast<MemIntrinsicSDNode>(C); 4094 unsigned EltSize = MemIntr->getMemoryVT().getSizeInBits(); 4095 assert((EltSize == 32 || EltSize == 64) && "Unexpected broadcast size!"); 4096 4097 bool UseD = EltSize == 32; 4098 if (NVT.is128BitVector()) 4099 Opc = UseD ? X86::VPTERNLOGDZ128rmbi : X86::VPTERNLOGQZ128rmbi; 4100 else if (NVT.is256BitVector()) 4101 Opc = UseD ? X86::VPTERNLOGDZ256rmbi : X86::VPTERNLOGQZ256rmbi; 4102 else if (NVT.is512BitVector()) 4103 Opc = UseD ? X86::VPTERNLOGDZrmbi : X86::VPTERNLOGQZrmbi; 4104 else 4105 llvm_unreachable("Unexpected vector size!"); 4106 } else { 4107 bool UseD = NVT.getVectorElementType() == MVT::i32; 4108 if (NVT.is128BitVector()) 4109 Opc = UseD ? X86::VPTERNLOGDZ128rmi : X86::VPTERNLOGQZ128rmi; 4110 else if (NVT.is256BitVector()) 4111 Opc = UseD ? X86::VPTERNLOGDZ256rmi : X86::VPTERNLOGQZ256rmi; 4112 else if (NVT.is512BitVector()) 4113 Opc = UseD ? X86::VPTERNLOGDZrmi : X86::VPTERNLOGQZrmi; 4114 else 4115 llvm_unreachable("Unexpected vector size!"); 4116 } 4117 4118 SDValue Ops[] = {A, B, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, TImm, C.getOperand(0)}; 4119 MNode = CurDAG->getMachineNode(Opc, DL, VTs, Ops); 4120 4121 // Update the chain. 4122 ReplaceUses(C.getValue(1), SDValue(MNode, 1)); 4123 // Record the mem-refs 4124 CurDAG->setNodeMemRefs(MNode, {cast<MemSDNode>(C)->getMemOperand()}); 4125 } else { 4126 bool UseD = NVT.getVectorElementType() == MVT::i32; 4127 unsigned Opc; 4128 if (NVT.is128BitVector()) 4129 Opc = UseD ? X86::VPTERNLOGDZ128rri : X86::VPTERNLOGQZ128rri; 4130 else if (NVT.is256BitVector()) 4131 Opc = UseD ? X86::VPTERNLOGDZ256rri : X86::VPTERNLOGQZ256rri; 4132 else if (NVT.is512BitVector()) 4133 Opc = UseD ? X86::VPTERNLOGDZrri : X86::VPTERNLOGQZrri; 4134 else 4135 llvm_unreachable("Unexpected vector size!"); 4136 4137 MNode = CurDAG->getMachineNode(Opc, DL, NVT, {A, B, C, TImm}); 4138 } 4139 4140 ReplaceUses(SDValue(Root, 0), SDValue(MNode, 0)); 4141 CurDAG->RemoveDeadNode(Root); 4142 return true; 4143 } 4144 4145 // Try to match two logic ops to a VPTERNLOG. 4146 // FIXME: Handle inverted inputs? 4147 // FIXME: Handle more complex patterns that use an operand more than once? 4148 bool X86DAGToDAGISel::tryVPTERNLOG(SDNode *N) { 4149 MVT NVT = N->getSimpleValueType(0); 4150 4151 // Make sure we support VPTERNLOG. 4152 if (!NVT.isVector() || !Subtarget->hasAVX512() || 4153 NVT.getVectorElementType() == MVT::i1) 4154 return false; 4155 4156 // We need VLX for 128/256-bit. 4157 if (!(Subtarget->hasVLX() || NVT.is512BitVector())) 4158 return false; 4159 4160 SDValue N0 = N->getOperand(0); 4161 SDValue N1 = N->getOperand(1); 4162 4163 auto getFoldableLogicOp = [](SDValue Op) { 4164 // Peek through single use bitcast. 4165 if (Op.getOpcode() == ISD::BITCAST && Op.hasOneUse()) 4166 Op = Op.getOperand(0); 4167 4168 if (!Op.hasOneUse()) 4169 return SDValue(); 4170 4171 unsigned Opc = Op.getOpcode(); 4172 if (Opc == ISD::AND || Opc == ISD::OR || Opc == ISD::XOR || 4173 Opc == X86ISD::ANDNP) 4174 return Op; 4175 4176 return SDValue(); 4177 }; 4178 4179 SDValue A, FoldableOp; 4180 if ((FoldableOp = getFoldableLogicOp(N1))) { 4181 A = N0; 4182 } else if ((FoldableOp = getFoldableLogicOp(N0))) { 4183 A = N1; 4184 } else 4185 return false; 4186 4187 SDValue B = FoldableOp.getOperand(0); 4188 SDValue C = FoldableOp.getOperand(1); 4189 4190 // We can build the appropriate control immediate by performing the logic 4191 // operation we're matching using these constants for A, B, and C. 4192 const uint8_t TernlogMagicA = 0xf0; 4193 const uint8_t TernlogMagicB = 0xcc; 4194 const uint8_t TernlogMagicC = 0xaa; 4195 4196 uint8_t Imm; 4197 switch (FoldableOp.getOpcode()) { 4198 default: llvm_unreachable("Unexpected opcode!"); 4199 case ISD::AND: Imm = TernlogMagicB & TernlogMagicC; break; 4200 case ISD::OR: Imm = TernlogMagicB | TernlogMagicC; break; 4201 case ISD::XOR: Imm = TernlogMagicB ^ TernlogMagicC; break; 4202 case X86ISD::ANDNP: Imm = ~(TernlogMagicB) & TernlogMagicC; break; 4203 } 4204 4205 switch (N->getOpcode()) { 4206 default: llvm_unreachable("Unexpected opcode!"); 4207 case X86ISD::ANDNP: 4208 if (A == N0) 4209 Imm &= ~TernlogMagicA; 4210 else 4211 Imm = ~(Imm) & TernlogMagicA; 4212 break; 4213 case ISD::AND: Imm &= TernlogMagicA; break; 4214 case ISD::OR: Imm |= TernlogMagicA; break; 4215 case ISD::XOR: Imm ^= TernlogMagicA; break; 4216 } 4217 4218 return matchVPTERNLOG(N, N, FoldableOp.getNode(), A, B, C, Imm); 4219 } 4220 4221 /// If the high bits of an 'and' operand are known zero, try setting the 4222 /// high bits of an 'and' constant operand to produce a smaller encoding by 4223 /// creating a small, sign-extended negative immediate rather than a large 4224 /// positive one. This reverses a transform in SimplifyDemandedBits that 4225 /// shrinks mask constants by clearing bits. There is also a possibility that 4226 /// the 'and' mask can be made -1, so the 'and' itself is unnecessary. In that 4227 /// case, just replace the 'and'. Return 'true' if the node is replaced. 4228 bool X86DAGToDAGISel::shrinkAndImmediate(SDNode *And) { 4229 // i8 is unshrinkable, i16 should be promoted to i32, and vector ops don't 4230 // have immediate operands. 4231 MVT VT = And->getSimpleValueType(0); 4232 if (VT != MVT::i32 && VT != MVT::i64) 4233 return false; 4234 4235 auto *And1C = dyn_cast<ConstantSDNode>(And->getOperand(1)); 4236 if (!And1C) 4237 return false; 4238 4239 // Bail out if the mask constant is already negative. It's can't shrink more. 4240 // If the upper 32 bits of a 64 bit mask are all zeros, we have special isel 4241 // patterns to use a 32-bit and instead of a 64-bit and by relying on the 4242 // implicit zeroing of 32 bit ops. So we should check if the lower 32 bits 4243 // are negative too. 4244 APInt MaskVal = And1C->getAPIntValue(); 4245 unsigned MaskLZ = MaskVal.countLeadingZeros(); 4246 if (!MaskLZ || (VT == MVT::i64 && MaskLZ == 32)) 4247 return false; 4248 4249 // Don't extend into the upper 32 bits of a 64 bit mask. 4250 if (VT == MVT::i64 && MaskLZ >= 32) { 4251 MaskLZ -= 32; 4252 MaskVal = MaskVal.trunc(32); 4253 } 4254 4255 SDValue And0 = And->getOperand(0); 4256 APInt HighZeros = APInt::getHighBitsSet(MaskVal.getBitWidth(), MaskLZ); 4257 APInt NegMaskVal = MaskVal | HighZeros; 4258 4259 // If a negative constant would not allow a smaller encoding, there's no need 4260 // to continue. Only change the constant when we know it's a win. 4261 unsigned MinWidth = NegMaskVal.getMinSignedBits(); 4262 if (MinWidth > 32 || (MinWidth > 8 && MaskVal.getMinSignedBits() <= 32)) 4263 return false; 4264 4265 // Extend masks if we truncated above. 4266 if (VT == MVT::i64 && MaskVal.getBitWidth() < 64) { 4267 NegMaskVal = NegMaskVal.zext(64); 4268 HighZeros = HighZeros.zext(64); 4269 } 4270 4271 // The variable operand must be all zeros in the top bits to allow using the 4272 // new, negative constant as the mask. 4273 if (!CurDAG->MaskedValueIsZero(And0, HighZeros)) 4274 return false; 4275 4276 // Check if the mask is -1. In that case, this is an unnecessary instruction 4277 // that escaped earlier analysis. 4278 if (NegMaskVal.isAllOnesValue()) { 4279 ReplaceNode(And, And0.getNode()); 4280 return true; 4281 } 4282 4283 // A negative mask allows a smaller encoding. Create a new 'and' node. 4284 SDValue NewMask = CurDAG->getConstant(NegMaskVal, SDLoc(And), VT); 4285 insertDAGNode(*CurDAG, SDValue(And, 0), NewMask); 4286 SDValue NewAnd = CurDAG->getNode(ISD::AND, SDLoc(And), VT, And0, NewMask); 4287 ReplaceNode(And, NewAnd.getNode()); 4288 SelectCode(NewAnd.getNode()); 4289 return true; 4290 } 4291 4292 static unsigned getVPTESTMOpc(MVT TestVT, bool IsTestN, bool FoldedLoad, 4293 bool FoldedBCast, bool Masked) { 4294 #define VPTESTM_CASE(VT, SUFFIX) \ 4295 case MVT::VT: \ 4296 if (Masked) \ 4297 return IsTestN ? X86::VPTESTNM##SUFFIX##k: X86::VPTESTM##SUFFIX##k; \ 4298 return IsTestN ? X86::VPTESTNM##SUFFIX : X86::VPTESTM##SUFFIX; 4299 4300 4301 #define VPTESTM_BROADCAST_CASES(SUFFIX) \ 4302 default: llvm_unreachable("Unexpected VT!"); \ 4303 VPTESTM_CASE(v4i32, DZ128##SUFFIX) \ 4304 VPTESTM_CASE(v2i64, QZ128##SUFFIX) \ 4305 VPTESTM_CASE(v8i32, DZ256##SUFFIX) \ 4306 VPTESTM_CASE(v4i64, QZ256##SUFFIX) \ 4307 VPTESTM_CASE(v16i32, DZ##SUFFIX) \ 4308 VPTESTM_CASE(v8i64, QZ##SUFFIX) 4309 4310 #define VPTESTM_FULL_CASES(SUFFIX) \ 4311 VPTESTM_BROADCAST_CASES(SUFFIX) \ 4312 VPTESTM_CASE(v16i8, BZ128##SUFFIX) \ 4313 VPTESTM_CASE(v8i16, WZ128##SUFFIX) \ 4314 VPTESTM_CASE(v32i8, BZ256##SUFFIX) \ 4315 VPTESTM_CASE(v16i16, WZ256##SUFFIX) \ 4316 VPTESTM_CASE(v64i8, BZ##SUFFIX) \ 4317 VPTESTM_CASE(v32i16, WZ##SUFFIX) 4318 4319 if (FoldedBCast) { 4320 switch (TestVT.SimpleTy) { 4321 VPTESTM_BROADCAST_CASES(rmb) 4322 } 4323 } 4324 4325 if (FoldedLoad) { 4326 switch (TestVT.SimpleTy) { 4327 VPTESTM_FULL_CASES(rm) 4328 } 4329 } 4330 4331 switch (TestVT.SimpleTy) { 4332 VPTESTM_FULL_CASES(rr) 4333 } 4334 4335 #undef VPTESTM_FULL_CASES 4336 #undef VPTESTM_BROADCAST_CASES 4337 #undef VPTESTM_CASE 4338 } 4339 4340 // Try to create VPTESTM instruction. If InMask is not null, it will be used 4341 // to form a masked operation. 4342 bool X86DAGToDAGISel::tryVPTESTM(SDNode *Root, SDValue Setcc, 4343 SDValue InMask) { 4344 assert(Subtarget->hasAVX512() && "Expected AVX512!"); 4345 assert(Setcc.getSimpleValueType().getVectorElementType() == MVT::i1 && 4346 "Unexpected VT!"); 4347 4348 // Look for equal and not equal compares. 4349 ISD::CondCode CC = cast<CondCodeSDNode>(Setcc.getOperand(2))->get(); 4350 if (CC != ISD::SETEQ && CC != ISD::SETNE) 4351 return false; 4352 4353 SDValue SetccOp0 = Setcc.getOperand(0); 4354 SDValue SetccOp1 = Setcc.getOperand(1); 4355 4356 // Canonicalize the all zero vector to the RHS. 4357 if (ISD::isBuildVectorAllZeros(SetccOp0.getNode())) 4358 std::swap(SetccOp0, SetccOp1); 4359 4360 // See if we're comparing against zero. 4361 if (!ISD::isBuildVectorAllZeros(SetccOp1.getNode())) 4362 return false; 4363 4364 SDValue N0 = SetccOp0; 4365 4366 MVT CmpVT = N0.getSimpleValueType(); 4367 MVT CmpSVT = CmpVT.getVectorElementType(); 4368 4369 // Start with both operands the same. We'll try to refine this. 4370 SDValue Src0 = N0; 4371 SDValue Src1 = N0; 4372 4373 { 4374 // Look through single use bitcasts. 4375 SDValue N0Temp = N0; 4376 if (N0Temp.getOpcode() == ISD::BITCAST && N0Temp.hasOneUse()) 4377 N0Temp = N0.getOperand(0); 4378 4379 // Look for single use AND. 4380 if (N0Temp.getOpcode() == ISD::AND && N0Temp.hasOneUse()) { 4381 Src0 = N0Temp.getOperand(0); 4382 Src1 = N0Temp.getOperand(1); 4383 } 4384 } 4385 4386 // Without VLX we need to widen the operation. 4387 bool Widen = !Subtarget->hasVLX() && !CmpVT.is512BitVector(); 4388 4389 auto tryFoldLoadOrBCast = [&](SDNode *Root, SDNode *P, SDValue &L, 4390 SDValue &Base, SDValue &Scale, SDValue &Index, 4391 SDValue &Disp, SDValue &Segment) { 4392 // If we need to widen, we can't fold the load. 4393 if (!Widen) 4394 if (tryFoldLoad(Root, P, L, Base, Scale, Index, Disp, Segment)) 4395 return true; 4396 4397 // If we didn't fold a load, try to match broadcast. No widening limitation 4398 // for this. But only 32 and 64 bit types are supported. 4399 if (CmpSVT != MVT::i32 && CmpSVT != MVT::i64) 4400 return false; 4401 4402 // Look through single use bitcasts. 4403 if (L.getOpcode() == ISD::BITCAST && L.hasOneUse()) { 4404 P = L.getNode(); 4405 L = L.getOperand(0); 4406 } 4407 4408 if (L.getOpcode() != X86ISD::VBROADCAST_LOAD) 4409 return false; 4410 4411 auto *MemIntr = cast<MemIntrinsicSDNode>(L); 4412 if (MemIntr->getMemoryVT().getSizeInBits() != CmpSVT.getSizeInBits()) 4413 return false; 4414 4415 return tryFoldBroadcast(Root, P, L, Base, Scale, Index, Disp, Segment); 4416 }; 4417 4418 // We can only fold loads if the sources are unique. 4419 bool CanFoldLoads = Src0 != Src1; 4420 4421 bool FoldedLoad = false; 4422 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 4423 if (CanFoldLoads) { 4424 FoldedLoad = tryFoldLoadOrBCast(Root, N0.getNode(), Src1, Tmp0, Tmp1, Tmp2, 4425 Tmp3, Tmp4); 4426 if (!FoldedLoad) { 4427 // And is commutative. 4428 FoldedLoad = tryFoldLoadOrBCast(Root, N0.getNode(), Src0, Tmp0, Tmp1, 4429 Tmp2, Tmp3, Tmp4); 4430 if (FoldedLoad) 4431 std::swap(Src0, Src1); 4432 } 4433 } 4434 4435 bool FoldedBCast = FoldedLoad && Src1.getOpcode() == X86ISD::VBROADCAST_LOAD; 4436 4437 bool IsMasked = InMask.getNode() != nullptr; 4438 4439 SDLoc dl(Root); 4440 4441 MVT ResVT = Setcc.getSimpleValueType(); 4442 MVT MaskVT = ResVT; 4443 if (Widen) { 4444 // Widen the inputs using insert_subreg or copy_to_regclass. 4445 unsigned Scale = CmpVT.is128BitVector() ? 4 : 2; 4446 unsigned SubReg = CmpVT.is128BitVector() ? X86::sub_xmm : X86::sub_ymm; 4447 unsigned NumElts = CmpVT.getVectorNumElements() * Scale; 4448 CmpVT = MVT::getVectorVT(CmpSVT, NumElts); 4449 MaskVT = MVT::getVectorVT(MVT::i1, NumElts); 4450 SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, dl, 4451 CmpVT), 0); 4452 Src0 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src0); 4453 4454 if (!FoldedBCast) 4455 Src1 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src1); 4456 4457 if (IsMasked) { 4458 // Widen the mask. 4459 unsigned RegClass = TLI->getRegClassFor(MaskVT)->getID(); 4460 SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32); 4461 InMask = SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, 4462 dl, MaskVT, InMask, RC), 0); 4463 } 4464 } 4465 4466 bool IsTestN = CC == ISD::SETEQ; 4467 unsigned Opc = getVPTESTMOpc(CmpVT, IsTestN, FoldedLoad, FoldedBCast, 4468 IsMasked); 4469 4470 MachineSDNode *CNode; 4471 if (FoldedLoad) { 4472 SDVTList VTs = CurDAG->getVTList(MaskVT, MVT::Other); 4473 4474 if (IsMasked) { 4475 SDValue Ops[] = { InMask, Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, 4476 Src1.getOperand(0) }; 4477 CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); 4478 } else { 4479 SDValue Ops[] = { Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, 4480 Src1.getOperand(0) }; 4481 CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); 4482 } 4483 4484 // Update the chain. 4485 ReplaceUses(Src1.getValue(1), SDValue(CNode, 1)); 4486 // Record the mem-refs 4487 CurDAG->setNodeMemRefs(CNode, {cast<MemSDNode>(Src1)->getMemOperand()}); 4488 } else { 4489 if (IsMasked) 4490 CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, InMask, Src0, Src1); 4491 else 4492 CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, Src0, Src1); 4493 } 4494 4495 // If we widened, we need to shrink the mask VT. 4496 if (Widen) { 4497 unsigned RegClass = TLI->getRegClassFor(ResVT)->getID(); 4498 SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32); 4499 CNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, 4500 dl, ResVT, SDValue(CNode, 0), RC); 4501 } 4502 4503 ReplaceUses(SDValue(Root, 0), SDValue(CNode, 0)); 4504 CurDAG->RemoveDeadNode(Root); 4505 return true; 4506 } 4507 4508 // Try to match the bitselect pattern (or (and A, B), (andn A, C)). Turn it 4509 // into vpternlog. 4510 bool X86DAGToDAGISel::tryMatchBitSelect(SDNode *N) { 4511 assert(N->getOpcode() == ISD::OR && "Unexpected opcode!"); 4512 4513 MVT NVT = N->getSimpleValueType(0); 4514 4515 // Make sure we support VPTERNLOG. 4516 if (!NVT.isVector() || !Subtarget->hasAVX512()) 4517 return false; 4518 4519 // We need VLX for 128/256-bit. 4520 if (!(Subtarget->hasVLX() || NVT.is512BitVector())) 4521 return false; 4522 4523 SDValue N0 = N->getOperand(0); 4524 SDValue N1 = N->getOperand(1); 4525 4526 // Canonicalize AND to LHS. 4527 if (N1.getOpcode() == ISD::AND) 4528 std::swap(N0, N1); 4529 4530 if (N0.getOpcode() != ISD::AND || 4531 N1.getOpcode() != X86ISD::ANDNP || 4532 !N0.hasOneUse() || !N1.hasOneUse()) 4533 return false; 4534 4535 // ANDN is not commutable, use it to pick down A and C. 4536 SDValue A = N1.getOperand(0); 4537 SDValue C = N1.getOperand(1); 4538 4539 // AND is commutable, if one operand matches A, the other operand is B. 4540 // Otherwise this isn't a match. 4541 SDValue B; 4542 if (N0.getOperand(0) == A) 4543 B = N0.getOperand(1); 4544 else if (N0.getOperand(1) == A) 4545 B = N0.getOperand(0); 4546 else 4547 return false; 4548 4549 SDLoc dl(N); 4550 SDValue Imm = CurDAG->getTargetConstant(0xCA, dl, MVT::i8); 4551 SDValue Ternlog = CurDAG->getNode(X86ISD::VPTERNLOG, dl, NVT, A, B, C, Imm); 4552 ReplaceNode(N, Ternlog.getNode()); 4553 4554 return matchVPTERNLOG(Ternlog.getNode(), Ternlog.getNode(), Ternlog.getNode(), 4555 A, B, C, 0xCA); 4556 } 4557 4558 void X86DAGToDAGISel::Select(SDNode *Node) { 4559 MVT NVT = Node->getSimpleValueType(0); 4560 unsigned Opcode = Node->getOpcode(); 4561 SDLoc dl(Node); 4562 4563 if (Node->isMachineOpcode()) { 4564 LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n'); 4565 Node->setNodeId(-1); 4566 return; // Already selected. 4567 } 4568 4569 switch (Opcode) { 4570 default: break; 4571 case ISD::INTRINSIC_W_CHAIN: { 4572 unsigned IntNo = Node->getConstantOperandVal(1); 4573 switch (IntNo) { 4574 default: break; 4575 case Intrinsic::x86_encodekey128: 4576 case Intrinsic::x86_encodekey256: { 4577 if (!Subtarget->hasKL()) 4578 break; 4579 4580 unsigned Opcode; 4581 switch (IntNo) { 4582 default: llvm_unreachable("Impossible intrinsic"); 4583 case Intrinsic::x86_encodekey128: Opcode = X86::ENCODEKEY128; break; 4584 case Intrinsic::x86_encodekey256: Opcode = X86::ENCODEKEY256; break; 4585 } 4586 4587 SDValue Chain = Node->getOperand(0); 4588 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM0, Node->getOperand(3), 4589 SDValue()); 4590 if (Opcode == X86::ENCODEKEY256) 4591 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM1, Node->getOperand(4), 4592 Chain.getValue(1)); 4593 4594 MachineSDNode *Res = CurDAG->getMachineNode( 4595 Opcode, dl, Node->getVTList(), 4596 {Node->getOperand(2), Chain, Chain.getValue(1)}); 4597 ReplaceNode(Node, Res); 4598 return; 4599 } 4600 case Intrinsic::x86_tileloadd64_internal: { 4601 if (!Subtarget->hasAMXTILE()) 4602 break; 4603 unsigned Opc = X86::PTILELOADDV; 4604 // _tile_loadd_internal(row, col, buf, STRIDE) 4605 SDValue Base = Node->getOperand(4); 4606 SDValue Scale = getI8Imm(1, dl); 4607 SDValue Index = Node->getOperand(5); 4608 SDValue Disp = CurDAG->getTargetConstant(0, dl, MVT::i32); 4609 SDValue Segment = CurDAG->getRegister(0, MVT::i16); 4610 SDValue CFG = CurDAG->getRegister(0, MVT::Untyped); 4611 SDValue Chain = Node->getOperand(0); 4612 MachineSDNode *CNode; 4613 SDValue Ops[] = {Node->getOperand(2), 4614 Node->getOperand(3), 4615 Base, 4616 Scale, 4617 Index, 4618 Disp, 4619 Segment, 4620 CFG, 4621 Chain}; 4622 CNode = CurDAG->getMachineNode(Opc, dl, {MVT::x86amx, MVT::Other}, Ops); 4623 ReplaceNode(Node, CNode); 4624 return; 4625 } 4626 case Intrinsic::x86_tdpbssd_internal: { 4627 if (!Subtarget->hasAMXTILE()) 4628 break; 4629 SDValue Chain = Node->getOperand(0); 4630 unsigned Opc = X86::PTDPBSSDV; 4631 SDValue CFG = CurDAG->getRegister(0, MVT::Untyped); 4632 SDValue Ops[] = {Node->getOperand(2), 4633 Node->getOperand(3), 4634 Node->getOperand(4), 4635 Node->getOperand(5), 4636 Node->getOperand(6), 4637 Node->getOperand(7), 4638 CFG, 4639 Chain}; 4640 MachineSDNode *CNode = 4641 CurDAG->getMachineNode(Opc, dl, {MVT::x86amx, MVT::Other}, Ops); 4642 ReplaceNode(Node, CNode); 4643 return; 4644 } 4645 case Intrinsic::x86_tilezero_internal: { 4646 if (!Subtarget->hasAMXTILE()) 4647 break; 4648 unsigned Opc = X86::PTILEZEROV; 4649 SDValue Chain = Node->getOperand(0); 4650 SDValue CFG = CurDAG->getRegister(0, MVT::Untyped); 4651 SDValue Ops[] = {Node->getOperand(2), Node->getOperand(3), CFG, Chain}; 4652 MachineSDNode *CNode = 4653 CurDAG->getMachineNode(Opc, dl, {MVT::x86amx, MVT::Other}, Ops); 4654 ReplaceNode(Node, CNode); 4655 return; 4656 } 4657 } 4658 break; 4659 } 4660 case ISD::INTRINSIC_VOID: { 4661 unsigned IntNo = Node->getConstantOperandVal(1); 4662 switch (IntNo) { 4663 default: break; 4664 case Intrinsic::x86_sse3_monitor: 4665 case Intrinsic::x86_monitorx: 4666 case Intrinsic::x86_clzero: { 4667 bool Use64BitPtr = Node->getOperand(2).getValueType() == MVT::i64; 4668 4669 unsigned Opc = 0; 4670 switch (IntNo) { 4671 default: llvm_unreachable("Unexpected intrinsic!"); 4672 case Intrinsic::x86_sse3_monitor: 4673 if (!Subtarget->hasSSE3()) 4674 break; 4675 Opc = Use64BitPtr ? X86::MONITOR64rrr : X86::MONITOR32rrr; 4676 break; 4677 case Intrinsic::x86_monitorx: 4678 if (!Subtarget->hasMWAITX()) 4679 break; 4680 Opc = Use64BitPtr ? X86::MONITORX64rrr : X86::MONITORX32rrr; 4681 break; 4682 case Intrinsic::x86_clzero: 4683 if (!Subtarget->hasCLZERO()) 4684 break; 4685 Opc = Use64BitPtr ? X86::CLZERO64r : X86::CLZERO32r; 4686 break; 4687 } 4688 4689 if (Opc) { 4690 unsigned PtrReg = Use64BitPtr ? X86::RAX : X86::EAX; 4691 SDValue Chain = CurDAG->getCopyToReg(Node->getOperand(0), dl, PtrReg, 4692 Node->getOperand(2), SDValue()); 4693 SDValue InFlag = Chain.getValue(1); 4694 4695 if (IntNo == Intrinsic::x86_sse3_monitor || 4696 IntNo == Intrinsic::x86_monitorx) { 4697 // Copy the other two operands to ECX and EDX. 4698 Chain = CurDAG->getCopyToReg(Chain, dl, X86::ECX, Node->getOperand(3), 4699 InFlag); 4700 InFlag = Chain.getValue(1); 4701 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EDX, Node->getOperand(4), 4702 InFlag); 4703 InFlag = Chain.getValue(1); 4704 } 4705 4706 MachineSDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, 4707 { Chain, InFlag}); 4708 ReplaceNode(Node, CNode); 4709 return; 4710 } 4711 4712 break; 4713 } 4714 case Intrinsic::x86_tilestored64_internal: { 4715 unsigned Opc = X86::PTILESTOREDV; 4716 // _tile_stored_internal(row, col, buf, STRIDE, c) 4717 SDValue Base = Node->getOperand(4); 4718 SDValue Scale = getI8Imm(1, dl); 4719 SDValue Index = Node->getOperand(5); 4720 SDValue Disp = CurDAG->getTargetConstant(0, dl, MVT::i32); 4721 SDValue Segment = CurDAG->getRegister(0, MVT::i16); 4722 SDValue CFG = CurDAG->getRegister(0, MVT::Untyped); 4723 SDValue Chain = Node->getOperand(0); 4724 MachineSDNode *CNode; 4725 SDValue Ops[] = {Node->getOperand(2), 4726 Node->getOperand(3), 4727 Base, 4728 Scale, 4729 Index, 4730 Disp, 4731 Segment, 4732 Node->getOperand(6), 4733 CFG, 4734 Chain}; 4735 CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops); 4736 ReplaceNode(Node, CNode); 4737 return; 4738 } 4739 case Intrinsic::x86_tileloadd64: 4740 case Intrinsic::x86_tileloaddt164: 4741 case Intrinsic::x86_tilestored64: { 4742 if (!Subtarget->hasAMXTILE()) 4743 break; 4744 unsigned Opc; 4745 switch (IntNo) { 4746 default: llvm_unreachable("Unexpected intrinsic!"); 4747 case Intrinsic::x86_tileloadd64: Opc = X86::PTILELOADD; break; 4748 case Intrinsic::x86_tileloaddt164: Opc = X86::PTILELOADDT1; break; 4749 case Intrinsic::x86_tilestored64: Opc = X86::PTILESTORED; break; 4750 } 4751 // FIXME: Match displacement and scale. 4752 unsigned TIndex = Node->getConstantOperandVal(2); 4753 SDValue TReg = getI8Imm(TIndex, dl); 4754 SDValue Base = Node->getOperand(3); 4755 SDValue Scale = getI8Imm(1, dl); 4756 SDValue Index = Node->getOperand(4); 4757 SDValue Disp = CurDAG->getTargetConstant(0, dl, MVT::i32); 4758 SDValue Segment = CurDAG->getRegister(0, MVT::i16); 4759 SDValue Chain = Node->getOperand(0); 4760 MachineSDNode *CNode; 4761 if (Opc == X86::PTILESTORED) { 4762 SDValue Ops[] = { Base, Scale, Index, Disp, Segment, TReg, Chain }; 4763 CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops); 4764 } else { 4765 SDValue Ops[] = { TReg, Base, Scale, Index, Disp, Segment, Chain }; 4766 CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops); 4767 } 4768 ReplaceNode(Node, CNode); 4769 return; 4770 } 4771 } 4772 break; 4773 } 4774 case ISD::BRIND: { 4775 if (Subtarget->isTargetNaCl()) 4776 // NaCl has its own pass where jmp %r32 are converted to jmp %r64. We 4777 // leave the instruction alone. 4778 break; 4779 if (Subtarget->isTarget64BitILP32()) { 4780 // Converts a 32-bit register to a 64-bit, zero-extended version of 4781 // it. This is needed because x86-64 can do many things, but jmp %r32 4782 // ain't one of them. 4783 SDValue Target = Node->getOperand(1); 4784 assert(Target.getValueType() == MVT::i32 && "Unexpected VT!"); 4785 SDValue ZextTarget = CurDAG->getZExtOrTrunc(Target, dl, MVT::i64); 4786 SDValue Brind = CurDAG->getNode(ISD::BRIND, dl, MVT::Other, 4787 Node->getOperand(0), ZextTarget); 4788 ReplaceNode(Node, Brind.getNode()); 4789 SelectCode(ZextTarget.getNode()); 4790 SelectCode(Brind.getNode()); 4791 return; 4792 } 4793 break; 4794 } 4795 case X86ISD::GlobalBaseReg: 4796 ReplaceNode(Node, getGlobalBaseReg()); 4797 return; 4798 4799 case ISD::BITCAST: 4800 // Just drop all 128/256/512-bit bitcasts. 4801 if (NVT.is512BitVector() || NVT.is256BitVector() || NVT.is128BitVector() || 4802 NVT == MVT::f128) { 4803 ReplaceUses(SDValue(Node, 0), Node->getOperand(0)); 4804 CurDAG->RemoveDeadNode(Node); 4805 return; 4806 } 4807 break; 4808 4809 case ISD::SRL: 4810 if (matchBitExtract(Node)) 4811 return; 4812 LLVM_FALLTHROUGH; 4813 case ISD::SRA: 4814 case ISD::SHL: 4815 if (tryShiftAmountMod(Node)) 4816 return; 4817 break; 4818 4819 case X86ISD::VPTERNLOG: { 4820 uint8_t Imm = cast<ConstantSDNode>(Node->getOperand(3))->getZExtValue(); 4821 if (matchVPTERNLOG(Node, Node, Node, Node->getOperand(0), 4822 Node->getOperand(1), Node->getOperand(2), Imm)) 4823 return; 4824 break; 4825 } 4826 4827 case X86ISD::ANDNP: 4828 if (tryVPTERNLOG(Node)) 4829 return; 4830 break; 4831 4832 case ISD::AND: 4833 if (NVT.isVector() && NVT.getVectorElementType() == MVT::i1) { 4834 // Try to form a masked VPTESTM. Operands can be in either order. 4835 SDValue N0 = Node->getOperand(0); 4836 SDValue N1 = Node->getOperand(1); 4837 if (N0.getOpcode() == ISD::SETCC && N0.hasOneUse() && 4838 tryVPTESTM(Node, N0, N1)) 4839 return; 4840 if (N1.getOpcode() == ISD::SETCC && N1.hasOneUse() && 4841 tryVPTESTM(Node, N1, N0)) 4842 return; 4843 } 4844 4845 if (MachineSDNode *NewNode = matchBEXTRFromAndImm(Node)) { 4846 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0)); 4847 CurDAG->RemoveDeadNode(Node); 4848 return; 4849 } 4850 if (matchBitExtract(Node)) 4851 return; 4852 if (AndImmShrink && shrinkAndImmediate(Node)) 4853 return; 4854 4855 LLVM_FALLTHROUGH; 4856 case ISD::OR: 4857 case ISD::XOR: 4858 if (tryShrinkShlLogicImm(Node)) 4859 return; 4860 if (Opcode == ISD::OR && tryMatchBitSelect(Node)) 4861 return; 4862 if (tryVPTERNLOG(Node)) 4863 return; 4864 4865 LLVM_FALLTHROUGH; 4866 case ISD::ADD: 4867 case ISD::SUB: { 4868 // Try to avoid folding immediates with multiple uses for optsize. 4869 // This code tries to select to register form directly to avoid going 4870 // through the isel table which might fold the immediate. We can't change 4871 // the patterns on the add/sub/and/or/xor with immediate paterns in the 4872 // tablegen files to check immediate use count without making the patterns 4873 // unavailable to the fast-isel table. 4874 if (!CurDAG->shouldOptForSize()) 4875 break; 4876 4877 // Only handle i8/i16/i32/i64. 4878 if (NVT != MVT::i8 && NVT != MVT::i16 && NVT != MVT::i32 && NVT != MVT::i64) 4879 break; 4880 4881 SDValue N0 = Node->getOperand(0); 4882 SDValue N1 = Node->getOperand(1); 4883 4884 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1); 4885 if (!Cst) 4886 break; 4887 4888 int64_t Val = Cst->getSExtValue(); 4889 4890 // Make sure its an immediate that is considered foldable. 4891 // FIXME: Handle unsigned 32 bit immediates for 64-bit AND. 4892 if (!isInt<8>(Val) && !isInt<32>(Val)) 4893 break; 4894 4895 // If this can match to INC/DEC, let it go. 4896 if (Opcode == ISD::ADD && (Val == 1 || Val == -1)) 4897 break; 4898 4899 // Check if we should avoid folding this immediate. 4900 if (!shouldAvoidImmediateInstFormsForSize(N1.getNode())) 4901 break; 4902 4903 // We should not fold the immediate. So we need a register form instead. 4904 unsigned ROpc, MOpc; 4905 switch (NVT.SimpleTy) { 4906 default: llvm_unreachable("Unexpected VT!"); 4907 case MVT::i8: 4908 switch (Opcode) { 4909 default: llvm_unreachable("Unexpected opcode!"); 4910 case ISD::ADD: ROpc = X86::ADD8rr; MOpc = X86::ADD8rm; break; 4911 case ISD::SUB: ROpc = X86::SUB8rr; MOpc = X86::SUB8rm; break; 4912 case ISD::AND: ROpc = X86::AND8rr; MOpc = X86::AND8rm; break; 4913 case ISD::OR: ROpc = X86::OR8rr; MOpc = X86::OR8rm; break; 4914 case ISD::XOR: ROpc = X86::XOR8rr; MOpc = X86::XOR8rm; break; 4915 } 4916 break; 4917 case MVT::i16: 4918 switch (Opcode) { 4919 default: llvm_unreachable("Unexpected opcode!"); 4920 case ISD::ADD: ROpc = X86::ADD16rr; MOpc = X86::ADD16rm; break; 4921 case ISD::SUB: ROpc = X86::SUB16rr; MOpc = X86::SUB16rm; break; 4922 case ISD::AND: ROpc = X86::AND16rr; MOpc = X86::AND16rm; break; 4923 case ISD::OR: ROpc = X86::OR16rr; MOpc = X86::OR16rm; break; 4924 case ISD::XOR: ROpc = X86::XOR16rr; MOpc = X86::XOR16rm; break; 4925 } 4926 break; 4927 case MVT::i32: 4928 switch (Opcode) { 4929 default: llvm_unreachable("Unexpected opcode!"); 4930 case ISD::ADD: ROpc = X86::ADD32rr; MOpc = X86::ADD32rm; break; 4931 case ISD::SUB: ROpc = X86::SUB32rr; MOpc = X86::SUB32rm; break; 4932 case ISD::AND: ROpc = X86::AND32rr; MOpc = X86::AND32rm; break; 4933 case ISD::OR: ROpc = X86::OR32rr; MOpc = X86::OR32rm; break; 4934 case ISD::XOR: ROpc = X86::XOR32rr; MOpc = X86::XOR32rm; break; 4935 } 4936 break; 4937 case MVT::i64: 4938 switch (Opcode) { 4939 default: llvm_unreachable("Unexpected opcode!"); 4940 case ISD::ADD: ROpc = X86::ADD64rr; MOpc = X86::ADD64rm; break; 4941 case ISD::SUB: ROpc = X86::SUB64rr; MOpc = X86::SUB64rm; break; 4942 case ISD::AND: ROpc = X86::AND64rr; MOpc = X86::AND64rm; break; 4943 case ISD::OR: ROpc = X86::OR64rr; MOpc = X86::OR64rm; break; 4944 case ISD::XOR: ROpc = X86::XOR64rr; MOpc = X86::XOR64rm; break; 4945 } 4946 break; 4947 } 4948 4949 // Ok this is a AND/OR/XOR/ADD/SUB with constant. 4950 4951 // If this is a not a subtract, we can still try to fold a load. 4952 if (Opcode != ISD::SUB) { 4953 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 4954 if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) { 4955 SDValue Ops[] = { N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) }; 4956 SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other); 4957 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); 4958 // Update the chain. 4959 ReplaceUses(N0.getValue(1), SDValue(CNode, 2)); 4960 // Record the mem-refs 4961 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N0)->getMemOperand()}); 4962 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0)); 4963 CurDAG->RemoveDeadNode(Node); 4964 return; 4965 } 4966 } 4967 4968 CurDAG->SelectNodeTo(Node, ROpc, NVT, MVT::i32, N0, N1); 4969 return; 4970 } 4971 4972 case X86ISD::SMUL: 4973 // i16/i32/i64 are handled with isel patterns. 4974 if (NVT != MVT::i8) 4975 break; 4976 LLVM_FALLTHROUGH; 4977 case X86ISD::UMUL: { 4978 SDValue N0 = Node->getOperand(0); 4979 SDValue N1 = Node->getOperand(1); 4980 4981 unsigned LoReg, ROpc, MOpc; 4982 switch (NVT.SimpleTy) { 4983 default: llvm_unreachable("Unsupported VT!"); 4984 case MVT::i8: 4985 LoReg = X86::AL; 4986 ROpc = Opcode == X86ISD::SMUL ? X86::IMUL8r : X86::MUL8r; 4987 MOpc = Opcode == X86ISD::SMUL ? X86::IMUL8m : X86::MUL8m; 4988 break; 4989 case MVT::i16: 4990 LoReg = X86::AX; 4991 ROpc = X86::MUL16r; 4992 MOpc = X86::MUL16m; 4993 break; 4994 case MVT::i32: 4995 LoReg = X86::EAX; 4996 ROpc = X86::MUL32r; 4997 MOpc = X86::MUL32m; 4998 break; 4999 case MVT::i64: 5000 LoReg = X86::RAX; 5001 ROpc = X86::MUL64r; 5002 MOpc = X86::MUL64m; 5003 break; 5004 } 5005 5006 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 5007 bool FoldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 5008 // Multiply is commutative. 5009 if (!FoldedLoad) { 5010 FoldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 5011 if (FoldedLoad) 5012 std::swap(N0, N1); 5013 } 5014 5015 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg, 5016 N0, SDValue()).getValue(1); 5017 5018 MachineSDNode *CNode; 5019 if (FoldedLoad) { 5020 // i16/i32/i64 use an instruction that produces a low and high result even 5021 // though only the low result is used. 5022 SDVTList VTs; 5023 if (NVT == MVT::i8) 5024 VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other); 5025 else 5026 VTs = CurDAG->getVTList(NVT, NVT, MVT::i32, MVT::Other); 5027 5028 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 5029 InFlag }; 5030 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); 5031 5032 // Update the chain. 5033 ReplaceUses(N1.getValue(1), SDValue(CNode, NVT == MVT::i8 ? 2 : 3)); 5034 // Record the mem-refs 5035 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()}); 5036 } else { 5037 // i16/i32/i64 use an instruction that produces a low and high result even 5038 // though only the low result is used. 5039 SDVTList VTs; 5040 if (NVT == MVT::i8) 5041 VTs = CurDAG->getVTList(NVT, MVT::i32); 5042 else 5043 VTs = CurDAG->getVTList(NVT, NVT, MVT::i32); 5044 5045 CNode = CurDAG->getMachineNode(ROpc, dl, VTs, {N1, InFlag}); 5046 } 5047 5048 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0)); 5049 ReplaceUses(SDValue(Node, 1), SDValue(CNode, NVT == MVT::i8 ? 1 : 2)); 5050 CurDAG->RemoveDeadNode(Node); 5051 return; 5052 } 5053 5054 case ISD::SMUL_LOHI: 5055 case ISD::UMUL_LOHI: { 5056 SDValue N0 = Node->getOperand(0); 5057 SDValue N1 = Node->getOperand(1); 5058 5059 unsigned Opc, MOpc; 5060 unsigned LoReg, HiReg; 5061 bool IsSigned = Opcode == ISD::SMUL_LOHI; 5062 bool UseMULX = !IsSigned && Subtarget->hasBMI2(); 5063 bool UseMULXHi = UseMULX && SDValue(Node, 0).use_empty(); 5064 switch (NVT.SimpleTy) { 5065 default: llvm_unreachable("Unsupported VT!"); 5066 case MVT::i32: 5067 Opc = UseMULXHi ? X86::MULX32Hrr : 5068 UseMULX ? X86::MULX32rr : 5069 IsSigned ? X86::IMUL32r : X86::MUL32r; 5070 MOpc = UseMULXHi ? X86::MULX32Hrm : 5071 UseMULX ? X86::MULX32rm : 5072 IsSigned ? X86::IMUL32m : X86::MUL32m; 5073 LoReg = UseMULX ? X86::EDX : X86::EAX; 5074 HiReg = X86::EDX; 5075 break; 5076 case MVT::i64: 5077 Opc = UseMULXHi ? X86::MULX64Hrr : 5078 UseMULX ? X86::MULX64rr : 5079 IsSigned ? X86::IMUL64r : X86::MUL64r; 5080 MOpc = UseMULXHi ? X86::MULX64Hrm : 5081 UseMULX ? X86::MULX64rm : 5082 IsSigned ? X86::IMUL64m : X86::MUL64m; 5083 LoReg = UseMULX ? X86::RDX : X86::RAX; 5084 HiReg = X86::RDX; 5085 break; 5086 } 5087 5088 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 5089 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 5090 // Multiply is commmutative. 5091 if (!foldedLoad) { 5092 foldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 5093 if (foldedLoad) 5094 std::swap(N0, N1); 5095 } 5096 5097 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg, 5098 N0, SDValue()).getValue(1); 5099 SDValue ResHi, ResLo; 5100 if (foldedLoad) { 5101 SDValue Chain; 5102 MachineSDNode *CNode = nullptr; 5103 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 5104 InFlag }; 5105 if (UseMULXHi) { 5106 SDVTList VTs = CurDAG->getVTList(NVT, MVT::Other); 5107 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); 5108 ResHi = SDValue(CNode, 0); 5109 Chain = SDValue(CNode, 1); 5110 } else if (UseMULX) { 5111 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other); 5112 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); 5113 ResHi = SDValue(CNode, 0); 5114 ResLo = SDValue(CNode, 1); 5115 Chain = SDValue(CNode, 2); 5116 } else { 5117 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue); 5118 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); 5119 Chain = SDValue(CNode, 0); 5120 InFlag = SDValue(CNode, 1); 5121 } 5122 5123 // Update the chain. 5124 ReplaceUses(N1.getValue(1), Chain); 5125 // Record the mem-refs 5126 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()}); 5127 } else { 5128 SDValue Ops[] = { N1, InFlag }; 5129 if (UseMULXHi) { 5130 SDVTList VTs = CurDAG->getVTList(NVT); 5131 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); 5132 ResHi = SDValue(CNode, 0); 5133 } else if (UseMULX) { 5134 SDVTList VTs = CurDAG->getVTList(NVT, NVT); 5135 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); 5136 ResHi = SDValue(CNode, 0); 5137 ResLo = SDValue(CNode, 1); 5138 } else { 5139 SDVTList VTs = CurDAG->getVTList(MVT::Glue); 5140 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); 5141 InFlag = SDValue(CNode, 0); 5142 } 5143 } 5144 5145 // Copy the low half of the result, if it is needed. 5146 if (!SDValue(Node, 0).use_empty()) { 5147 if (!ResLo) { 5148 assert(LoReg && "Register for low half is not defined!"); 5149 ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, 5150 NVT, InFlag); 5151 InFlag = ResLo.getValue(2); 5152 } 5153 ReplaceUses(SDValue(Node, 0), ResLo); 5154 LLVM_DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG); 5155 dbgs() << '\n'); 5156 } 5157 // Copy the high half of the result, if it is needed. 5158 if (!SDValue(Node, 1).use_empty()) { 5159 if (!ResHi) { 5160 assert(HiReg && "Register for high half is not defined!"); 5161 ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, 5162 NVT, InFlag); 5163 InFlag = ResHi.getValue(2); 5164 } 5165 ReplaceUses(SDValue(Node, 1), ResHi); 5166 LLVM_DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); 5167 dbgs() << '\n'); 5168 } 5169 5170 CurDAG->RemoveDeadNode(Node); 5171 return; 5172 } 5173 5174 case ISD::SDIVREM: 5175 case ISD::UDIVREM: { 5176 SDValue N0 = Node->getOperand(0); 5177 SDValue N1 = Node->getOperand(1); 5178 5179 unsigned ROpc, MOpc; 5180 bool isSigned = Opcode == ISD::SDIVREM; 5181 if (!isSigned) { 5182 switch (NVT.SimpleTy) { 5183 default: llvm_unreachable("Unsupported VT!"); 5184 case MVT::i8: ROpc = X86::DIV8r; MOpc = X86::DIV8m; break; 5185 case MVT::i16: ROpc = X86::DIV16r; MOpc = X86::DIV16m; break; 5186 case MVT::i32: ROpc = X86::DIV32r; MOpc = X86::DIV32m; break; 5187 case MVT::i64: ROpc = X86::DIV64r; MOpc = X86::DIV64m; break; 5188 } 5189 } else { 5190 switch (NVT.SimpleTy) { 5191 default: llvm_unreachable("Unsupported VT!"); 5192 case MVT::i8: ROpc = X86::IDIV8r; MOpc = X86::IDIV8m; break; 5193 case MVT::i16: ROpc = X86::IDIV16r; MOpc = X86::IDIV16m; break; 5194 case MVT::i32: ROpc = X86::IDIV32r; MOpc = X86::IDIV32m; break; 5195 case MVT::i64: ROpc = X86::IDIV64r; MOpc = X86::IDIV64m; break; 5196 } 5197 } 5198 5199 unsigned LoReg, HiReg, ClrReg; 5200 unsigned SExtOpcode; 5201 switch (NVT.SimpleTy) { 5202 default: llvm_unreachable("Unsupported VT!"); 5203 case MVT::i8: 5204 LoReg = X86::AL; ClrReg = HiReg = X86::AH; 5205 SExtOpcode = 0; // Not used. 5206 break; 5207 case MVT::i16: 5208 LoReg = X86::AX; HiReg = X86::DX; 5209 ClrReg = X86::DX; 5210 SExtOpcode = X86::CWD; 5211 break; 5212 case MVT::i32: 5213 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX; 5214 SExtOpcode = X86::CDQ; 5215 break; 5216 case MVT::i64: 5217 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX; 5218 SExtOpcode = X86::CQO; 5219 break; 5220 } 5221 5222 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 5223 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 5224 bool signBitIsZero = CurDAG->SignBitIsZero(N0); 5225 5226 SDValue InFlag; 5227 if (NVT == MVT::i8) { 5228 // Special case for div8, just use a move with zero extension to AX to 5229 // clear the upper 8 bits (AH). 5230 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain; 5231 MachineSDNode *Move; 5232 if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) { 5233 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) }; 5234 unsigned Opc = (isSigned && !signBitIsZero) ? X86::MOVSX16rm8 5235 : X86::MOVZX16rm8; 5236 Move = CurDAG->getMachineNode(Opc, dl, MVT::i16, MVT::Other, Ops); 5237 Chain = SDValue(Move, 1); 5238 ReplaceUses(N0.getValue(1), Chain); 5239 // Record the mem-refs 5240 CurDAG->setNodeMemRefs(Move, {cast<LoadSDNode>(N0)->getMemOperand()}); 5241 } else { 5242 unsigned Opc = (isSigned && !signBitIsZero) ? X86::MOVSX16rr8 5243 : X86::MOVZX16rr8; 5244 Move = CurDAG->getMachineNode(Opc, dl, MVT::i16, N0); 5245 Chain = CurDAG->getEntryNode(); 5246 } 5247 Chain = CurDAG->getCopyToReg(Chain, dl, X86::AX, SDValue(Move, 0), 5248 SDValue()); 5249 InFlag = Chain.getValue(1); 5250 } else { 5251 InFlag = 5252 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, 5253 LoReg, N0, SDValue()).getValue(1); 5254 if (isSigned && !signBitIsZero) { 5255 // Sign extend the low part into the high part. 5256 InFlag = 5257 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0); 5258 } else { 5259 // Zero out the high part, effectively zero extending the input. 5260 SDVTList VTs = CurDAG->getVTList(MVT::i32, MVT::i32); 5261 SDValue ClrNode = 5262 SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, VTs, None), 0); 5263 switch (NVT.SimpleTy) { 5264 case MVT::i16: 5265 ClrNode = 5266 SDValue(CurDAG->getMachineNode( 5267 TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode, 5268 CurDAG->getTargetConstant(X86::sub_16bit, dl, 5269 MVT::i32)), 5270 0); 5271 break; 5272 case MVT::i32: 5273 break; 5274 case MVT::i64: 5275 ClrNode = 5276 SDValue(CurDAG->getMachineNode( 5277 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64, 5278 CurDAG->getTargetConstant(0, dl, MVT::i64), ClrNode, 5279 CurDAG->getTargetConstant(X86::sub_32bit, dl, 5280 MVT::i32)), 5281 0); 5282 break; 5283 default: 5284 llvm_unreachable("Unexpected division source"); 5285 } 5286 5287 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg, 5288 ClrNode, InFlag).getValue(1); 5289 } 5290 } 5291 5292 if (foldedLoad) { 5293 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 5294 InFlag }; 5295 MachineSDNode *CNode = 5296 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops); 5297 InFlag = SDValue(CNode, 1); 5298 // Update the chain. 5299 ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); 5300 // Record the mem-refs 5301 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()}); 5302 } else { 5303 InFlag = 5304 SDValue(CurDAG->getMachineNode(ROpc, dl, MVT::Glue, N1, InFlag), 0); 5305 } 5306 5307 // Prevent use of AH in a REX instruction by explicitly copying it to 5308 // an ABCD_L register. 5309 // 5310 // The current assumption of the register allocator is that isel 5311 // won't generate explicit references to the GR8_ABCD_H registers. If 5312 // the allocator and/or the backend get enhanced to be more robust in 5313 // that regard, this can be, and should be, removed. 5314 if (HiReg == X86::AH && !SDValue(Node, 1).use_empty()) { 5315 SDValue AHCopy = CurDAG->getRegister(X86::AH, MVT::i8); 5316 unsigned AHExtOpcode = 5317 isSigned ? X86::MOVSX32rr8_NOREX : X86::MOVZX32rr8_NOREX; 5318 5319 SDNode *RNode = CurDAG->getMachineNode(AHExtOpcode, dl, MVT::i32, 5320 MVT::Glue, AHCopy, InFlag); 5321 SDValue Result(RNode, 0); 5322 InFlag = SDValue(RNode, 1); 5323 5324 Result = 5325 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result); 5326 5327 ReplaceUses(SDValue(Node, 1), Result); 5328 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); 5329 dbgs() << '\n'); 5330 } 5331 // Copy the division (low) result, if it is needed. 5332 if (!SDValue(Node, 0).use_empty()) { 5333 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 5334 LoReg, NVT, InFlag); 5335 InFlag = Result.getValue(2); 5336 ReplaceUses(SDValue(Node, 0), Result); 5337 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); 5338 dbgs() << '\n'); 5339 } 5340 // Copy the remainder (high) result, if it is needed. 5341 if (!SDValue(Node, 1).use_empty()) { 5342 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 5343 HiReg, NVT, InFlag); 5344 InFlag = Result.getValue(2); 5345 ReplaceUses(SDValue(Node, 1), Result); 5346 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); 5347 dbgs() << '\n'); 5348 } 5349 CurDAG->RemoveDeadNode(Node); 5350 return; 5351 } 5352 5353 case X86ISD::FCMP: 5354 case X86ISD::STRICT_FCMP: 5355 case X86ISD::STRICT_FCMPS: { 5356 bool IsStrictCmp = Node->getOpcode() == X86ISD::STRICT_FCMP || 5357 Node->getOpcode() == X86ISD::STRICT_FCMPS; 5358 SDValue N0 = Node->getOperand(IsStrictCmp ? 1 : 0); 5359 SDValue N1 = Node->getOperand(IsStrictCmp ? 2 : 1); 5360 5361 // Save the original VT of the compare. 5362 MVT CmpVT = N0.getSimpleValueType(); 5363 5364 // Floating point needs special handling if we don't have FCOMI. 5365 if (Subtarget->hasCMov()) 5366 break; 5367 5368 bool IsSignaling = Node->getOpcode() == X86ISD::STRICT_FCMPS; 5369 5370 unsigned Opc; 5371 switch (CmpVT.SimpleTy) { 5372 default: llvm_unreachable("Unexpected type!"); 5373 case MVT::f32: 5374 Opc = IsSignaling ? X86::COM_Fpr32 : X86::UCOM_Fpr32; 5375 break; 5376 case MVT::f64: 5377 Opc = IsSignaling ? X86::COM_Fpr64 : X86::UCOM_Fpr64; 5378 break; 5379 case MVT::f80: 5380 Opc = IsSignaling ? X86::COM_Fpr80 : X86::UCOM_Fpr80; 5381 break; 5382 } 5383 5384 SDValue Cmp; 5385 SDValue Chain = 5386 IsStrictCmp ? Node->getOperand(0) : CurDAG->getEntryNode(); 5387 if (IsStrictCmp) { 5388 SDVTList VTs = CurDAG->getVTList(MVT::i16, MVT::Other); 5389 Cmp = SDValue(CurDAG->getMachineNode(Opc, dl, VTs, {N0, N1, Chain}), 0); 5390 Chain = Cmp.getValue(1); 5391 } else { 5392 Cmp = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::i16, N0, N1), 0); 5393 } 5394 5395 // Move FPSW to AX. 5396 SDValue FPSW = CurDAG->getCopyToReg(Chain, dl, X86::FPSW, Cmp, SDValue()); 5397 Chain = FPSW; 5398 SDValue FNSTSW = 5399 SDValue(CurDAG->getMachineNode(X86::FNSTSW16r, dl, MVT::i16, FPSW, 5400 FPSW.getValue(1)), 5401 0); 5402 5403 // Extract upper 8-bits of AX. 5404 SDValue Extract = 5405 CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl, MVT::i8, FNSTSW); 5406 5407 // Move AH into flags. 5408 // Some 64-bit targets lack SAHF support, but they do support FCOMI. 5409 assert(Subtarget->hasLAHFSAHF() && 5410 "Target doesn't support SAHF or FCOMI?"); 5411 SDValue AH = CurDAG->getCopyToReg(Chain, dl, X86::AH, Extract, SDValue()); 5412 Chain = AH; 5413 SDValue SAHF = SDValue( 5414 CurDAG->getMachineNode(X86::SAHF, dl, MVT::i32, AH.getValue(1)), 0); 5415 5416 if (IsStrictCmp) 5417 ReplaceUses(SDValue(Node, 1), Chain); 5418 5419 ReplaceUses(SDValue(Node, 0), SAHF); 5420 CurDAG->RemoveDeadNode(Node); 5421 return; 5422 } 5423 5424 case X86ISD::CMP: { 5425 SDValue N0 = Node->getOperand(0); 5426 SDValue N1 = Node->getOperand(1); 5427 5428 // Optimizations for TEST compares. 5429 if (!isNullConstant(N1)) 5430 break; 5431 5432 // Save the original VT of the compare. 5433 MVT CmpVT = N0.getSimpleValueType(); 5434 5435 // If we are comparing (and (shr X, C, Mask) with 0, emit a BEXTR followed 5436 // by a test instruction. The test should be removed later by 5437 // analyzeCompare if we are using only the zero flag. 5438 // TODO: Should we check the users and use the BEXTR flags directly? 5439 if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) { 5440 if (MachineSDNode *NewNode = matchBEXTRFromAndImm(N0.getNode())) { 5441 unsigned TestOpc = CmpVT == MVT::i64 ? X86::TEST64rr 5442 : X86::TEST32rr; 5443 SDValue BEXTR = SDValue(NewNode, 0); 5444 NewNode = CurDAG->getMachineNode(TestOpc, dl, MVT::i32, BEXTR, BEXTR); 5445 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0)); 5446 CurDAG->RemoveDeadNode(Node); 5447 return; 5448 } 5449 } 5450 5451 // We can peek through truncates, but we need to be careful below. 5452 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse()) 5453 N0 = N0.getOperand(0); 5454 5455 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to 5456 // use a smaller encoding. 5457 // Look past the truncate if CMP is the only use of it. 5458 if (N0.getOpcode() == ISD::AND && 5459 N0.getNode()->hasOneUse() && 5460 N0.getValueType() != MVT::i8) { 5461 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 5462 if (!C) break; 5463 uint64_t Mask = C->getZExtValue(); 5464 5465 // Check if we can replace AND+IMM64 with a shift. This is possible for 5466 // masks/ like 0xFF000000 or 0x00FFFFFF and if we care only about the zero 5467 // flag. 5468 if (CmpVT == MVT::i64 && !isInt<32>(Mask) && 5469 onlyUsesZeroFlag(SDValue(Node, 0))) { 5470 if (isMask_64(~Mask)) { 5471 unsigned TrailingZeros = countTrailingZeros(Mask); 5472 SDValue Imm = CurDAG->getTargetConstant(TrailingZeros, dl, MVT::i64); 5473 SDValue Shift = 5474 SDValue(CurDAG->getMachineNode(X86::SHR64ri, dl, MVT::i64, MVT::i32, 5475 N0.getOperand(0), Imm), 0); 5476 MachineSDNode *Test = CurDAG->getMachineNode(X86::TEST64rr, dl, 5477 MVT::i32, Shift, Shift); 5478 ReplaceNode(Node, Test); 5479 return; 5480 } 5481 if (isMask_64(Mask)) { 5482 unsigned LeadingZeros = countLeadingZeros(Mask); 5483 SDValue Imm = CurDAG->getTargetConstant(LeadingZeros, dl, MVT::i64); 5484 SDValue Shift = 5485 SDValue(CurDAG->getMachineNode(X86::SHL64ri, dl, MVT::i64, MVT::i32, 5486 N0.getOperand(0), Imm), 0); 5487 MachineSDNode *Test = CurDAG->getMachineNode(X86::TEST64rr, dl, 5488 MVT::i32, Shift, Shift); 5489 ReplaceNode(Node, Test); 5490 return; 5491 } 5492 } 5493 5494 MVT VT; 5495 int SubRegOp; 5496 unsigned ROpc, MOpc; 5497 5498 // For each of these checks we need to be careful if the sign flag is 5499 // being used. It is only safe to use the sign flag in two conditions, 5500 // either the sign bit in the shrunken mask is zero or the final test 5501 // size is equal to the original compare size. 5502 5503 if (isUInt<8>(Mask) && 5504 (!(Mask & 0x80) || CmpVT == MVT::i8 || 5505 hasNoSignFlagUses(SDValue(Node, 0)))) { 5506 // For example, convert "testl %eax, $8" to "testb %al, $8" 5507 VT = MVT::i8; 5508 SubRegOp = X86::sub_8bit; 5509 ROpc = X86::TEST8ri; 5510 MOpc = X86::TEST8mi; 5511 } else if (OptForMinSize && isUInt<16>(Mask) && 5512 (!(Mask & 0x8000) || CmpVT == MVT::i16 || 5513 hasNoSignFlagUses(SDValue(Node, 0)))) { 5514 // For example, "testl %eax, $32776" to "testw %ax, $32776". 5515 // NOTE: We only want to form TESTW instructions if optimizing for 5516 // min size. Otherwise we only save one byte and possibly get a length 5517 // changing prefix penalty in the decoders. 5518 VT = MVT::i16; 5519 SubRegOp = X86::sub_16bit; 5520 ROpc = X86::TEST16ri; 5521 MOpc = X86::TEST16mi; 5522 } else if (isUInt<32>(Mask) && N0.getValueType() != MVT::i16 && 5523 ((!(Mask & 0x80000000) && 5524 // Without minsize 16-bit Cmps can get here so we need to 5525 // be sure we calculate the correct sign flag if needed. 5526 (CmpVT != MVT::i16 || !(Mask & 0x8000))) || 5527 CmpVT == MVT::i32 || 5528 hasNoSignFlagUses(SDValue(Node, 0)))) { 5529 // For example, "testq %rax, $268468232" to "testl %eax, $268468232". 5530 // NOTE: We only want to run that transform if N0 is 32 or 64 bits. 5531 // Otherwize, we find ourselves in a position where we have to do 5532 // promotion. If previous passes did not promote the and, we assume 5533 // they had a good reason not to and do not promote here. 5534 VT = MVT::i32; 5535 SubRegOp = X86::sub_32bit; 5536 ROpc = X86::TEST32ri; 5537 MOpc = X86::TEST32mi; 5538 } else { 5539 // No eligible transformation was found. 5540 break; 5541 } 5542 5543 SDValue Imm = CurDAG->getTargetConstant(Mask, dl, VT); 5544 SDValue Reg = N0.getOperand(0); 5545 5546 // Emit a testl or testw. 5547 MachineSDNode *NewNode; 5548 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 5549 if (tryFoldLoad(Node, N0.getNode(), Reg, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) { 5550 if (auto *LoadN = dyn_cast<LoadSDNode>(N0.getOperand(0).getNode())) { 5551 if (!LoadN->isSimple()) { 5552 unsigned NumVolBits = LoadN->getValueType(0).getSizeInBits(); 5553 if (MOpc == X86::TEST8mi && NumVolBits != 8) 5554 break; 5555 else if (MOpc == X86::TEST16mi && NumVolBits != 16) 5556 break; 5557 else if (MOpc == X86::TEST32mi && NumVolBits != 32) 5558 break; 5559 } 5560 } 5561 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm, 5562 Reg.getOperand(0) }; 5563 NewNode = CurDAG->getMachineNode(MOpc, dl, MVT::i32, MVT::Other, Ops); 5564 // Update the chain. 5565 ReplaceUses(Reg.getValue(1), SDValue(NewNode, 1)); 5566 // Record the mem-refs 5567 CurDAG->setNodeMemRefs(NewNode, 5568 {cast<LoadSDNode>(Reg)->getMemOperand()}); 5569 } else { 5570 // Extract the subregister if necessary. 5571 if (N0.getValueType() != VT) 5572 Reg = CurDAG->getTargetExtractSubreg(SubRegOp, dl, VT, Reg); 5573 5574 NewNode = CurDAG->getMachineNode(ROpc, dl, MVT::i32, Reg, Imm); 5575 } 5576 // Replace CMP with TEST. 5577 ReplaceNode(Node, NewNode); 5578 return; 5579 } 5580 break; 5581 } 5582 case X86ISD::PCMPISTR: { 5583 if (!Subtarget->hasSSE42()) 5584 break; 5585 5586 bool NeedIndex = !SDValue(Node, 0).use_empty(); 5587 bool NeedMask = !SDValue(Node, 1).use_empty(); 5588 // We can't fold a load if we are going to make two instructions. 5589 bool MayFoldLoad = !NeedIndex || !NeedMask; 5590 5591 MachineSDNode *CNode; 5592 if (NeedMask) { 5593 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPISTRMrr : X86::PCMPISTRMrr; 5594 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPISTRMrm : X86::PCMPISTRMrm; 5595 CNode = emitPCMPISTR(ROpc, MOpc, MayFoldLoad, dl, MVT::v16i8, Node); 5596 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 0)); 5597 } 5598 if (NeedIndex || !NeedMask) { 5599 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPISTRIrr : X86::PCMPISTRIrr; 5600 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPISTRIrm : X86::PCMPISTRIrm; 5601 CNode = emitPCMPISTR(ROpc, MOpc, MayFoldLoad, dl, MVT::i32, Node); 5602 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0)); 5603 } 5604 5605 // Connect the flag usage to the last instruction created. 5606 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 1)); 5607 CurDAG->RemoveDeadNode(Node); 5608 return; 5609 } 5610 case X86ISD::PCMPESTR: { 5611 if (!Subtarget->hasSSE42()) 5612 break; 5613 5614 // Copy the two implicit register inputs. 5615 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EAX, 5616 Node->getOperand(1), 5617 SDValue()).getValue(1); 5618 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EDX, 5619 Node->getOperand(3), InFlag).getValue(1); 5620 5621 bool NeedIndex = !SDValue(Node, 0).use_empty(); 5622 bool NeedMask = !SDValue(Node, 1).use_empty(); 5623 // We can't fold a load if we are going to make two instructions. 5624 bool MayFoldLoad = !NeedIndex || !NeedMask; 5625 5626 MachineSDNode *CNode; 5627 if (NeedMask) { 5628 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPESTRMrr : X86::PCMPESTRMrr; 5629 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPESTRMrm : X86::PCMPESTRMrm; 5630 CNode = emitPCMPESTR(ROpc, MOpc, MayFoldLoad, dl, MVT::v16i8, Node, 5631 InFlag); 5632 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 0)); 5633 } 5634 if (NeedIndex || !NeedMask) { 5635 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPESTRIrr : X86::PCMPESTRIrr; 5636 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPESTRIrm : X86::PCMPESTRIrm; 5637 CNode = emitPCMPESTR(ROpc, MOpc, MayFoldLoad, dl, MVT::i32, Node, InFlag); 5638 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0)); 5639 } 5640 // Connect the flag usage to the last instruction created. 5641 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 1)); 5642 CurDAG->RemoveDeadNode(Node); 5643 return; 5644 } 5645 5646 case ISD::SETCC: { 5647 if (NVT.isVector() && tryVPTESTM(Node, SDValue(Node, 0), SDValue())) 5648 return; 5649 5650 break; 5651 } 5652 5653 case ISD::STORE: 5654 if (foldLoadStoreIntoMemOperand(Node)) 5655 return; 5656 break; 5657 5658 case X86ISD::SETCC_CARRY: { 5659 // We have to do this manually because tblgen will put the eflags copy in 5660 // the wrong place if we use an extract_subreg in the pattern. 5661 MVT VT = Node->getSimpleValueType(0); 5662 5663 // Copy flags to the EFLAGS register and glue it to next node. 5664 SDValue EFLAGS = 5665 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EFLAGS, 5666 Node->getOperand(1), SDValue()); 5667 5668 // Create a 64-bit instruction if the result is 64-bits otherwise use the 5669 // 32-bit version. 5670 unsigned Opc = VT == MVT::i64 ? X86::SETB_C64r : X86::SETB_C32r; 5671 MVT SetVT = VT == MVT::i64 ? MVT::i64 : MVT::i32; 5672 SDValue Result = SDValue( 5673 CurDAG->getMachineNode(Opc, dl, SetVT, EFLAGS, EFLAGS.getValue(1)), 0); 5674 5675 // For less than 32-bits we need to extract from the 32-bit node. 5676 if (VT == MVT::i8 || VT == MVT::i16) { 5677 int SubIndex = VT == MVT::i16 ? X86::sub_16bit : X86::sub_8bit; 5678 Result = CurDAG->getTargetExtractSubreg(SubIndex, dl, VT, Result); 5679 } 5680 5681 ReplaceUses(SDValue(Node, 0), Result); 5682 CurDAG->RemoveDeadNode(Node); 5683 return; 5684 } 5685 case X86ISD::SBB: { 5686 if (isNullConstant(Node->getOperand(0)) && 5687 isNullConstant(Node->getOperand(1))) { 5688 MVT VT = Node->getSimpleValueType(0); 5689 5690 // Create zero. 5691 SDVTList VTs = CurDAG->getVTList(MVT::i32, MVT::i32); 5692 SDValue Zero = 5693 SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, VTs, None), 0); 5694 if (VT == MVT::i64) { 5695 Zero = SDValue( 5696 CurDAG->getMachineNode( 5697 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64, 5698 CurDAG->getTargetConstant(0, dl, MVT::i64), Zero, 5699 CurDAG->getTargetConstant(X86::sub_32bit, dl, MVT::i32)), 5700 0); 5701 } 5702 5703 // Copy flags to the EFLAGS register and glue it to next node. 5704 SDValue EFLAGS = 5705 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EFLAGS, 5706 Node->getOperand(2), SDValue()); 5707 5708 // Create a 64-bit instruction if the result is 64-bits otherwise use the 5709 // 32-bit version. 5710 unsigned Opc = VT == MVT::i64 ? X86::SBB64rr : X86::SBB32rr; 5711 MVT SBBVT = VT == MVT::i64 ? MVT::i64 : MVT::i32; 5712 VTs = CurDAG->getVTList(SBBVT, MVT::i32); 5713 SDValue Result = 5714 SDValue(CurDAG->getMachineNode(Opc, dl, VTs, {Zero, Zero, EFLAGS, 5715 EFLAGS.getValue(1)}), 5716 0); 5717 5718 // Replace the flag use. 5719 ReplaceUses(SDValue(Node, 1), Result.getValue(1)); 5720 5721 // Replace the result use. 5722 if (!SDValue(Node, 0).use_empty()) { 5723 // For less than 32-bits we need to extract from the 32-bit node. 5724 if (VT == MVT::i8 || VT == MVT::i16) { 5725 int SubIndex = VT == MVT::i16 ? X86::sub_16bit : X86::sub_8bit; 5726 Result = CurDAG->getTargetExtractSubreg(SubIndex, dl, VT, Result); 5727 } 5728 ReplaceUses(SDValue(Node, 0), Result); 5729 } 5730 5731 CurDAG->RemoveDeadNode(Node); 5732 return; 5733 } 5734 break; 5735 } 5736 case X86ISD::MGATHER: { 5737 auto *Mgt = cast<X86MaskedGatherSDNode>(Node); 5738 SDValue IndexOp = Mgt->getIndex(); 5739 SDValue Mask = Mgt->getMask(); 5740 MVT IndexVT = IndexOp.getSimpleValueType(); 5741 MVT ValueVT = Node->getSimpleValueType(0); 5742 MVT MaskVT = Mask.getSimpleValueType(); 5743 5744 // This is just to prevent crashes if the nodes are malformed somehow. We're 5745 // otherwise only doing loose type checking in here based on type what 5746 // a type constraint would say just like table based isel. 5747 if (!ValueVT.isVector() || !MaskVT.isVector()) 5748 break; 5749 5750 unsigned NumElts = ValueVT.getVectorNumElements(); 5751 MVT ValueSVT = ValueVT.getVectorElementType(); 5752 5753 bool IsFP = ValueSVT.isFloatingPoint(); 5754 unsigned EltSize = ValueSVT.getSizeInBits(); 5755 5756 unsigned Opc = 0; 5757 bool AVX512Gather = MaskVT.getVectorElementType() == MVT::i1; 5758 if (AVX512Gather) { 5759 if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 32) 5760 Opc = IsFP ? X86::VGATHERDPSZ128rm : X86::VPGATHERDDZ128rm; 5761 else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 32) 5762 Opc = IsFP ? X86::VGATHERDPSZ256rm : X86::VPGATHERDDZ256rm; 5763 else if (IndexVT == MVT::v16i32 && NumElts == 16 && EltSize == 32) 5764 Opc = IsFP ? X86::VGATHERDPSZrm : X86::VPGATHERDDZrm; 5765 else if (IndexVT == MVT::v4i32 && NumElts == 2 && EltSize == 64) 5766 Opc = IsFP ? X86::VGATHERDPDZ128rm : X86::VPGATHERDQZ128rm; 5767 else if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 64) 5768 Opc = IsFP ? X86::VGATHERDPDZ256rm : X86::VPGATHERDQZ256rm; 5769 else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 64) 5770 Opc = IsFP ? X86::VGATHERDPDZrm : X86::VPGATHERDQZrm; 5771 else if (IndexVT == MVT::v2i64 && NumElts == 4 && EltSize == 32) 5772 Opc = IsFP ? X86::VGATHERQPSZ128rm : X86::VPGATHERQDZ128rm; 5773 else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 32) 5774 Opc = IsFP ? X86::VGATHERQPSZ256rm : X86::VPGATHERQDZ256rm; 5775 else if (IndexVT == MVT::v8i64 && NumElts == 8 && EltSize == 32) 5776 Opc = IsFP ? X86::VGATHERQPSZrm : X86::VPGATHERQDZrm; 5777 else if (IndexVT == MVT::v2i64 && NumElts == 2 && EltSize == 64) 5778 Opc = IsFP ? X86::VGATHERQPDZ128rm : X86::VPGATHERQQZ128rm; 5779 else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 64) 5780 Opc = IsFP ? X86::VGATHERQPDZ256rm : X86::VPGATHERQQZ256rm; 5781 else if (IndexVT == MVT::v8i64 && NumElts == 8 && EltSize == 64) 5782 Opc = IsFP ? X86::VGATHERQPDZrm : X86::VPGATHERQQZrm; 5783 } else { 5784 assert(EVT(MaskVT) == EVT(ValueVT).changeVectorElementTypeToInteger() && 5785 "Unexpected mask VT!"); 5786 if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 32) 5787 Opc = IsFP ? X86::VGATHERDPSrm : X86::VPGATHERDDrm; 5788 else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 32) 5789 Opc = IsFP ? X86::VGATHERDPSYrm : X86::VPGATHERDDYrm; 5790 else if (IndexVT == MVT::v4i32 && NumElts == 2 && EltSize == 64) 5791 Opc = IsFP ? X86::VGATHERDPDrm : X86::VPGATHERDQrm; 5792 else if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 64) 5793 Opc = IsFP ? X86::VGATHERDPDYrm : X86::VPGATHERDQYrm; 5794 else if (IndexVT == MVT::v2i64 && NumElts == 4 && EltSize == 32) 5795 Opc = IsFP ? X86::VGATHERQPSrm : X86::VPGATHERQDrm; 5796 else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 32) 5797 Opc = IsFP ? X86::VGATHERQPSYrm : X86::VPGATHERQDYrm; 5798 else if (IndexVT == MVT::v2i64 && NumElts == 2 && EltSize == 64) 5799 Opc = IsFP ? X86::VGATHERQPDrm : X86::VPGATHERQQrm; 5800 else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 64) 5801 Opc = IsFP ? X86::VGATHERQPDYrm : X86::VPGATHERQQYrm; 5802 } 5803 5804 if (!Opc) 5805 break; 5806 5807 SDValue Base, Scale, Index, Disp, Segment; 5808 if (!selectVectorAddr(Mgt, Mgt->getBasePtr(), IndexOp, Mgt->getScale(), 5809 Base, Scale, Index, Disp, Segment)) 5810 break; 5811 5812 SDValue PassThru = Mgt->getPassThru(); 5813 SDValue Chain = Mgt->getChain(); 5814 // Gather instructions have a mask output not in the ISD node. 5815 SDVTList VTs = CurDAG->getVTList(ValueVT, MaskVT, MVT::Other); 5816 5817 MachineSDNode *NewNode; 5818 if (AVX512Gather) { 5819 SDValue Ops[] = {PassThru, Mask, Base, Scale, 5820 Index, Disp, Segment, Chain}; 5821 NewNode = CurDAG->getMachineNode(Opc, SDLoc(dl), VTs, Ops); 5822 } else { 5823 SDValue Ops[] = {PassThru, Base, Scale, Index, 5824 Disp, Segment, Mask, Chain}; 5825 NewNode = CurDAG->getMachineNode(Opc, SDLoc(dl), VTs, Ops); 5826 } 5827 CurDAG->setNodeMemRefs(NewNode, {Mgt->getMemOperand()}); 5828 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0)); 5829 ReplaceUses(SDValue(Node, 1), SDValue(NewNode, 2)); 5830 CurDAG->RemoveDeadNode(Node); 5831 return; 5832 } 5833 case X86ISD::MSCATTER: { 5834 auto *Sc = cast<X86MaskedScatterSDNode>(Node); 5835 SDValue Value = Sc->getValue(); 5836 SDValue IndexOp = Sc->getIndex(); 5837 MVT IndexVT = IndexOp.getSimpleValueType(); 5838 MVT ValueVT = Value.getSimpleValueType(); 5839 5840 // This is just to prevent crashes if the nodes are malformed somehow. We're 5841 // otherwise only doing loose type checking in here based on type what 5842 // a type constraint would say just like table based isel. 5843 if (!ValueVT.isVector()) 5844 break; 5845 5846 unsigned NumElts = ValueVT.getVectorNumElements(); 5847 MVT ValueSVT = ValueVT.getVectorElementType(); 5848 5849 bool IsFP = ValueSVT.isFloatingPoint(); 5850 unsigned EltSize = ValueSVT.getSizeInBits(); 5851 5852 unsigned Opc; 5853 if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 32) 5854 Opc = IsFP ? X86::VSCATTERDPSZ128mr : X86::VPSCATTERDDZ128mr; 5855 else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 32) 5856 Opc = IsFP ? X86::VSCATTERDPSZ256mr : X86::VPSCATTERDDZ256mr; 5857 else if (IndexVT == MVT::v16i32 && NumElts == 16 && EltSize == 32) 5858 Opc = IsFP ? X86::VSCATTERDPSZmr : X86::VPSCATTERDDZmr; 5859 else if (IndexVT == MVT::v4i32 && NumElts == 2 && EltSize == 64) 5860 Opc = IsFP ? X86::VSCATTERDPDZ128mr : X86::VPSCATTERDQZ128mr; 5861 else if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 64) 5862 Opc = IsFP ? X86::VSCATTERDPDZ256mr : X86::VPSCATTERDQZ256mr; 5863 else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 64) 5864 Opc = IsFP ? X86::VSCATTERDPDZmr : X86::VPSCATTERDQZmr; 5865 else if (IndexVT == MVT::v2i64 && NumElts == 4 && EltSize == 32) 5866 Opc = IsFP ? X86::VSCATTERQPSZ128mr : X86::VPSCATTERQDZ128mr; 5867 else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 32) 5868 Opc = IsFP ? X86::VSCATTERQPSZ256mr : X86::VPSCATTERQDZ256mr; 5869 else if (IndexVT == MVT::v8i64 && NumElts == 8 && EltSize == 32) 5870 Opc = IsFP ? X86::VSCATTERQPSZmr : X86::VPSCATTERQDZmr; 5871 else if (IndexVT == MVT::v2i64 && NumElts == 2 && EltSize == 64) 5872 Opc = IsFP ? X86::VSCATTERQPDZ128mr : X86::VPSCATTERQQZ128mr; 5873 else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 64) 5874 Opc = IsFP ? X86::VSCATTERQPDZ256mr : X86::VPSCATTERQQZ256mr; 5875 else if (IndexVT == MVT::v8i64 && NumElts == 8 && EltSize == 64) 5876 Opc = IsFP ? X86::VSCATTERQPDZmr : X86::VPSCATTERQQZmr; 5877 else 5878 break; 5879 5880 SDValue Base, Scale, Index, Disp, Segment; 5881 if (!selectVectorAddr(Sc, Sc->getBasePtr(), IndexOp, Sc->getScale(), 5882 Base, Scale, Index, Disp, Segment)) 5883 break; 5884 5885 SDValue Mask = Sc->getMask(); 5886 SDValue Chain = Sc->getChain(); 5887 // Scatter instructions have a mask output not in the ISD node. 5888 SDVTList VTs = CurDAG->getVTList(Mask.getValueType(), MVT::Other); 5889 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, Mask, Value, Chain}; 5890 5891 MachineSDNode *NewNode = CurDAG->getMachineNode(Opc, SDLoc(dl), VTs, Ops); 5892 CurDAG->setNodeMemRefs(NewNode, {Sc->getMemOperand()}); 5893 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 1)); 5894 CurDAG->RemoveDeadNode(Node); 5895 return; 5896 } 5897 case ISD::PREALLOCATED_SETUP: { 5898 auto *MFI = CurDAG->getMachineFunction().getInfo<X86MachineFunctionInfo>(); 5899 auto CallId = MFI->getPreallocatedIdForCallSite( 5900 cast<SrcValueSDNode>(Node->getOperand(1))->getValue()); 5901 SDValue Chain = Node->getOperand(0); 5902 SDValue CallIdValue = CurDAG->getTargetConstant(CallId, dl, MVT::i32); 5903 MachineSDNode *New = CurDAG->getMachineNode( 5904 TargetOpcode::PREALLOCATED_SETUP, dl, MVT::Other, CallIdValue, Chain); 5905 ReplaceUses(SDValue(Node, 0), SDValue(New, 0)); // Chain 5906 CurDAG->RemoveDeadNode(Node); 5907 return; 5908 } 5909 case ISD::PREALLOCATED_ARG: { 5910 auto *MFI = CurDAG->getMachineFunction().getInfo<X86MachineFunctionInfo>(); 5911 auto CallId = MFI->getPreallocatedIdForCallSite( 5912 cast<SrcValueSDNode>(Node->getOperand(1))->getValue()); 5913 SDValue Chain = Node->getOperand(0); 5914 SDValue CallIdValue = CurDAG->getTargetConstant(CallId, dl, MVT::i32); 5915 SDValue ArgIndex = Node->getOperand(2); 5916 SDValue Ops[3]; 5917 Ops[0] = CallIdValue; 5918 Ops[1] = ArgIndex; 5919 Ops[2] = Chain; 5920 MachineSDNode *New = CurDAG->getMachineNode( 5921 TargetOpcode::PREALLOCATED_ARG, dl, 5922 CurDAG->getVTList(TLI->getPointerTy(CurDAG->getDataLayout()), 5923 MVT::Other), 5924 Ops); 5925 ReplaceUses(SDValue(Node, 0), SDValue(New, 0)); // Arg pointer 5926 ReplaceUses(SDValue(Node, 1), SDValue(New, 1)); // Chain 5927 CurDAG->RemoveDeadNode(Node); 5928 return; 5929 } 5930 case X86ISD::AESENCWIDE128KL: 5931 case X86ISD::AESDECWIDE128KL: 5932 case X86ISD::AESENCWIDE256KL: 5933 case X86ISD::AESDECWIDE256KL: { 5934 if (!Subtarget->hasWIDEKL()) 5935 break; 5936 5937 unsigned Opcode; 5938 switch (Node->getOpcode()) { 5939 default: 5940 llvm_unreachable("Unexpected opcode!"); 5941 case X86ISD::AESENCWIDE128KL: 5942 Opcode = X86::AESENCWIDE128KL; 5943 break; 5944 case X86ISD::AESDECWIDE128KL: 5945 Opcode = X86::AESDECWIDE128KL; 5946 break; 5947 case X86ISD::AESENCWIDE256KL: 5948 Opcode = X86::AESENCWIDE256KL; 5949 break; 5950 case X86ISD::AESDECWIDE256KL: 5951 Opcode = X86::AESDECWIDE256KL; 5952 break; 5953 } 5954 5955 SDValue Chain = Node->getOperand(0); 5956 SDValue Addr = Node->getOperand(1); 5957 5958 SDValue Base, Scale, Index, Disp, Segment; 5959 if (!selectAddr(Node, Addr, Base, Scale, Index, Disp, Segment)) 5960 break; 5961 5962 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM0, Node->getOperand(2), 5963 SDValue()); 5964 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM1, Node->getOperand(3), 5965 Chain.getValue(1)); 5966 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM2, Node->getOperand(4), 5967 Chain.getValue(1)); 5968 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM3, Node->getOperand(5), 5969 Chain.getValue(1)); 5970 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM4, Node->getOperand(6), 5971 Chain.getValue(1)); 5972 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM5, Node->getOperand(7), 5973 Chain.getValue(1)); 5974 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM6, Node->getOperand(8), 5975 Chain.getValue(1)); 5976 Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM7, Node->getOperand(9), 5977 Chain.getValue(1)); 5978 5979 MachineSDNode *Res = CurDAG->getMachineNode( 5980 Opcode, dl, Node->getVTList(), 5981 {Base, Scale, Index, Disp, Segment, Chain, Chain.getValue(1)}); 5982 CurDAG->setNodeMemRefs(Res, cast<MemSDNode>(Node)->getMemOperand()); 5983 ReplaceNode(Node, Res); 5984 return; 5985 } 5986 } 5987 5988 SelectCode(Node); 5989 } 5990 5991 bool X86DAGToDAGISel:: 5992 SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, 5993 std::vector<SDValue> &OutOps) { 5994 SDValue Op0, Op1, Op2, Op3, Op4; 5995 switch (ConstraintID) { 5996 default: 5997 llvm_unreachable("Unexpected asm memory constraint"); 5998 case InlineAsm::Constraint_o: // offsetable ?? 5999 case InlineAsm::Constraint_v: // not offsetable ?? 6000 case InlineAsm::Constraint_m: // memory 6001 case InlineAsm::Constraint_X: 6002 if (!selectAddr(nullptr, Op, Op0, Op1, Op2, Op3, Op4)) 6003 return true; 6004 break; 6005 } 6006 6007 OutOps.push_back(Op0); 6008 OutOps.push_back(Op1); 6009 OutOps.push_back(Op2); 6010 OutOps.push_back(Op3); 6011 OutOps.push_back(Op4); 6012 return false; 6013 } 6014 6015 /// This pass converts a legalized DAG into a X86-specific DAG, 6016 /// ready for instruction scheduling. 6017 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, 6018 CodeGenOpt::Level OptLevel) { 6019 return new X86DAGToDAGISel(TM, OptLevel); 6020 } 6021