1 //===-- PPCISelDAGToDAG.cpp - PPC --pattern matching inst selector --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines a pattern matching instruction selector for PowerPC, 10 // converting from a legalized dag to a PPC dag. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MCTargetDesc/PPCMCTargetDesc.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPC.h" 17 #include "PPCISelLowering.h" 18 #include "PPCMachineFunctionInfo.h" 19 #include "PPCSubtarget.h" 20 #include "PPCTargetMachine.h" 21 #include "llvm/ADT/APInt.h" 22 #include "llvm/ADT/DenseMap.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SmallPtrSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/Analysis/BranchProbabilityInfo.h" 28 #include "llvm/CodeGen/FunctionLoweringInfo.h" 29 #include "llvm/CodeGen/ISDOpcodes.h" 30 #include "llvm/CodeGen/MachineBasicBlock.h" 31 #include "llvm/CodeGen/MachineFunction.h" 32 #include "llvm/CodeGen/MachineInstrBuilder.h" 33 #include "llvm/CodeGen/MachineRegisterInfo.h" 34 #include "llvm/CodeGen/SelectionDAG.h" 35 #include "llvm/CodeGen/SelectionDAGISel.h" 36 #include "llvm/CodeGen/SelectionDAGNodes.h" 37 #include "llvm/CodeGen/TargetInstrInfo.h" 38 #include "llvm/CodeGen/TargetRegisterInfo.h" 39 #include "llvm/CodeGen/ValueTypes.h" 40 #include "llvm/IR/BasicBlock.h" 41 #include "llvm/IR/DebugLoc.h" 42 #include "llvm/IR/Function.h" 43 #include "llvm/IR/GlobalValue.h" 44 #include "llvm/IR/InlineAsm.h" 45 #include "llvm/IR/InstrTypes.h" 46 #include "llvm/IR/IntrinsicsPowerPC.h" 47 #include "llvm/IR/Module.h" 48 #include "llvm/Support/Casting.h" 49 #include "llvm/Support/CodeGen.h" 50 #include "llvm/Support/CommandLine.h" 51 #include "llvm/Support/Compiler.h" 52 #include "llvm/Support/Debug.h" 53 #include "llvm/Support/ErrorHandling.h" 54 #include "llvm/Support/KnownBits.h" 55 #include "llvm/Support/MachineValueType.h" 56 #include "llvm/Support/MathExtras.h" 57 #include "llvm/Support/raw_ostream.h" 58 #include <algorithm> 59 #include <cassert> 60 #include <cstdint> 61 #include <iterator> 62 #include <limits> 63 #include <memory> 64 #include <new> 65 #include <tuple> 66 #include <utility> 67 68 using namespace llvm; 69 70 #define DEBUG_TYPE "ppc-codegen" 71 72 STATISTIC(NumSextSetcc, 73 "Number of (sext(setcc)) nodes expanded into GPR sequence."); 74 STATISTIC(NumZextSetcc, 75 "Number of (zext(setcc)) nodes expanded into GPR sequence."); 76 STATISTIC(SignExtensionsAdded, 77 "Number of sign extensions for compare inputs added."); 78 STATISTIC(ZeroExtensionsAdded, 79 "Number of zero extensions for compare inputs added."); 80 STATISTIC(NumLogicOpsOnComparison, 81 "Number of logical ops on i1 values calculated in GPR."); 82 STATISTIC(OmittedForNonExtendUses, 83 "Number of compares not eliminated as they have non-extending uses."); 84 STATISTIC(NumP9Setb, 85 "Number of compares lowered to setb."); 86 87 // FIXME: Remove this once the bug has been fixed! 88 cl::opt<bool> ANDIGlueBug("expose-ppc-andi-glue-bug", 89 cl::desc("expose the ANDI glue bug on PPC"), cl::Hidden); 90 91 static cl::opt<bool> 92 UseBitPermRewriter("ppc-use-bit-perm-rewriter", cl::init(true), 93 cl::desc("use aggressive ppc isel for bit permutations"), 94 cl::Hidden); 95 static cl::opt<bool> BPermRewriterNoMasking( 96 "ppc-bit-perm-rewriter-stress-rotates", 97 cl::desc("stress rotate selection in aggressive ppc isel for " 98 "bit permutations"), 99 cl::Hidden); 100 101 static cl::opt<bool> EnableBranchHint( 102 "ppc-use-branch-hint", cl::init(true), 103 cl::desc("Enable static hinting of branches on ppc"), 104 cl::Hidden); 105 106 static cl::opt<bool> EnableTLSOpt( 107 "ppc-tls-opt", cl::init(true), 108 cl::desc("Enable tls optimization peephole"), 109 cl::Hidden); 110 111 enum ICmpInGPRType { ICGPR_All, ICGPR_None, ICGPR_I32, ICGPR_I64, 112 ICGPR_NonExtIn, ICGPR_Zext, ICGPR_Sext, ICGPR_ZextI32, 113 ICGPR_SextI32, ICGPR_ZextI64, ICGPR_SextI64 }; 114 115 static cl::opt<ICmpInGPRType> CmpInGPR( 116 "ppc-gpr-icmps", cl::Hidden, cl::init(ICGPR_All), 117 cl::desc("Specify the types of comparisons to emit GPR-only code for."), 118 cl::values(clEnumValN(ICGPR_None, "none", "Do not modify integer comparisons."), 119 clEnumValN(ICGPR_All, "all", "All possible int comparisons in GPRs."), 120 clEnumValN(ICGPR_I32, "i32", "Only i32 comparisons in GPRs."), 121 clEnumValN(ICGPR_I64, "i64", "Only i64 comparisons in GPRs."), 122 clEnumValN(ICGPR_NonExtIn, "nonextin", 123 "Only comparisons where inputs don't need [sz]ext."), 124 clEnumValN(ICGPR_Zext, "zext", "Only comparisons with zext result."), 125 clEnumValN(ICGPR_ZextI32, "zexti32", 126 "Only i32 comparisons with zext result."), 127 clEnumValN(ICGPR_ZextI64, "zexti64", 128 "Only i64 comparisons with zext result."), 129 clEnumValN(ICGPR_Sext, "sext", "Only comparisons with sext result."), 130 clEnumValN(ICGPR_SextI32, "sexti32", 131 "Only i32 comparisons with sext result."), 132 clEnumValN(ICGPR_SextI64, "sexti64", 133 "Only i64 comparisons with sext result."))); 134 namespace { 135 136 //===--------------------------------------------------------------------===// 137 /// PPCDAGToDAGISel - PPC specific code to select PPC machine 138 /// instructions for SelectionDAG operations. 139 /// 140 class PPCDAGToDAGISel : public SelectionDAGISel { 141 const PPCTargetMachine &TM; 142 const PPCSubtarget *Subtarget = nullptr; 143 const PPCTargetLowering *PPCLowering = nullptr; 144 unsigned GlobalBaseReg = 0; 145 146 public: 147 explicit PPCDAGToDAGISel(PPCTargetMachine &tm, CodeGenOpt::Level OptLevel) 148 : SelectionDAGISel(tm, OptLevel), TM(tm) {} 149 150 bool runOnMachineFunction(MachineFunction &MF) override { 151 // Make sure we re-emit a set of the global base reg if necessary 152 GlobalBaseReg = 0; 153 Subtarget = &MF.getSubtarget<PPCSubtarget>(); 154 PPCLowering = Subtarget->getTargetLowering(); 155 if (Subtarget->hasROPProtect()) { 156 // Create a place on the stack for the ROP Protection Hash. 157 // The ROP Protection Hash will always be 8 bytes and aligned to 8 158 // bytes. 159 MachineFrameInfo &MFI = MF.getFrameInfo(); 160 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 161 const int Result = MFI.CreateStackObject(8, Align(8), false); 162 FI->setROPProtectionHashSaveIndex(Result); 163 } 164 SelectionDAGISel::runOnMachineFunction(MF); 165 166 return true; 167 } 168 169 void PreprocessISelDAG() override; 170 void PostprocessISelDAG() override; 171 172 /// getI16Imm - Return a target constant with the specified value, of type 173 /// i16. 174 inline SDValue getI16Imm(unsigned Imm, const SDLoc &dl) { 175 return CurDAG->getTargetConstant(Imm, dl, MVT::i16); 176 } 177 178 /// getI32Imm - Return a target constant with the specified value, of type 179 /// i32. 180 inline SDValue getI32Imm(unsigned Imm, const SDLoc &dl) { 181 return CurDAG->getTargetConstant(Imm, dl, MVT::i32); 182 } 183 184 /// getI64Imm - Return a target constant with the specified value, of type 185 /// i64. 186 inline SDValue getI64Imm(uint64_t Imm, const SDLoc &dl) { 187 return CurDAG->getTargetConstant(Imm, dl, MVT::i64); 188 } 189 190 /// getSmallIPtrImm - Return a target constant of pointer type. 191 inline SDValue getSmallIPtrImm(unsigned Imm, const SDLoc &dl) { 192 return CurDAG->getTargetConstant( 193 Imm, dl, PPCLowering->getPointerTy(CurDAG->getDataLayout())); 194 } 195 196 /// isRotateAndMask - Returns true if Mask and Shift can be folded into a 197 /// rotate and mask opcode and mask operation. 198 static bool isRotateAndMask(SDNode *N, unsigned Mask, bool isShiftMask, 199 unsigned &SH, unsigned &MB, unsigned &ME); 200 201 /// getGlobalBaseReg - insert code into the entry mbb to materialize the PIC 202 /// base register. Return the virtual register that holds this value. 203 SDNode *getGlobalBaseReg(); 204 205 void selectFrameIndex(SDNode *SN, SDNode *N, unsigned Offset = 0); 206 207 // Select - Convert the specified operand from a target-independent to a 208 // target-specific node if it hasn't already been changed. 209 void Select(SDNode *N) override; 210 211 bool tryBitfieldInsert(SDNode *N); 212 bool tryBitPermutation(SDNode *N); 213 bool tryIntCompareInGPR(SDNode *N); 214 215 // tryTLSXFormLoad - Convert an ISD::LOAD fed by a PPCISD::ADD_TLS into 216 // an X-Form load instruction with the offset being a relocation coming from 217 // the PPCISD::ADD_TLS. 218 bool tryTLSXFormLoad(LoadSDNode *N); 219 // tryTLSXFormStore - Convert an ISD::STORE fed by a PPCISD::ADD_TLS into 220 // an X-Form store instruction with the offset being a relocation coming from 221 // the PPCISD::ADD_TLS. 222 bool tryTLSXFormStore(StoreSDNode *N); 223 /// SelectCC - Select a comparison of the specified values with the 224 /// specified condition code, returning the CR# of the expression. 225 SDValue SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, 226 const SDLoc &dl, SDValue Chain = SDValue()); 227 228 /// SelectAddrImmOffs - Return true if the operand is valid for a preinc 229 /// immediate field. Note that the operand at this point is already the 230 /// result of a prior SelectAddressRegImm call. 231 bool SelectAddrImmOffs(SDValue N, SDValue &Out) const { 232 if (N.getOpcode() == ISD::TargetConstant || 233 N.getOpcode() == ISD::TargetGlobalAddress) { 234 Out = N; 235 return true; 236 } 237 238 return false; 239 } 240 241 /// SelectDSForm - Returns true if address N can be represented by the 242 /// addressing mode of DSForm instructions (a base register, plus a signed 243 /// 16-bit displacement that is a multiple of 4. 244 bool SelectDSForm(SDNode *Parent, SDValue N, SDValue &Disp, SDValue &Base) { 245 return PPCLowering->SelectOptimalAddrMode(Parent, N, Disp, Base, *CurDAG, 246 Align(4)) == PPC::AM_DSForm; 247 } 248 249 /// SelectDQForm - Returns true if address N can be represented by the 250 /// addressing mode of DQForm instructions (a base register, plus a signed 251 /// 16-bit displacement that is a multiple of 16. 252 bool SelectDQForm(SDNode *Parent, SDValue N, SDValue &Disp, SDValue &Base) { 253 return PPCLowering->SelectOptimalAddrMode(Parent, N, Disp, Base, *CurDAG, 254 Align(16)) == PPC::AM_DQForm; 255 } 256 257 /// SelectDForm - Returns true if address N can be represented by 258 /// the addressing mode of DForm instructions (a base register, plus a 259 /// signed 16-bit immediate. 260 bool SelectDForm(SDNode *Parent, SDValue N, SDValue &Disp, SDValue &Base) { 261 return PPCLowering->SelectOptimalAddrMode(Parent, N, Disp, Base, *CurDAG, 262 None) == PPC::AM_DForm; 263 } 264 265 /// SelectPCRelForm - Returns true if address N can be represented by 266 /// PC-Relative addressing mode. 267 bool SelectPCRelForm(SDNode *Parent, SDValue N, SDValue &Disp, 268 SDValue &Base) { 269 return PPCLowering->SelectOptimalAddrMode(Parent, N, Disp, Base, *CurDAG, 270 None) == PPC::AM_PCRel; 271 } 272 273 /// SelectPDForm - Returns true if address N can be represented by Prefixed 274 /// DForm addressing mode (a base register, plus a signed 34-bit immediate. 275 bool SelectPDForm(SDNode *Parent, SDValue N, SDValue &Disp, SDValue &Base) { 276 return PPCLowering->SelectOptimalAddrMode(Parent, N, Disp, Base, *CurDAG, 277 None) == PPC::AM_PrefixDForm; 278 } 279 280 /// SelectXForm - Returns true if address N can be represented by the 281 /// addressing mode of XForm instructions (an indexed [r+r] operation). 282 bool SelectXForm(SDNode *Parent, SDValue N, SDValue &Disp, SDValue &Base) { 283 return PPCLowering->SelectOptimalAddrMode(Parent, N, Disp, Base, *CurDAG, 284 None) == PPC::AM_XForm; 285 } 286 287 /// SelectForceXForm - Given the specified address, force it to be 288 /// represented as an indexed [r+r] operation (an XForm instruction). 289 bool SelectForceXForm(SDNode *Parent, SDValue N, SDValue &Disp, 290 SDValue &Base) { 291 return PPCLowering->SelectForceXFormMode(N, Disp, Base, *CurDAG) == 292 PPC::AM_XForm; 293 } 294 295 /// SelectAddrIdx - Given the specified address, check to see if it can be 296 /// represented as an indexed [r+r] operation. 297 /// This is for xform instructions whose associated displacement form is D. 298 /// The last parameter \p 0 means associated D form has no requirment for 16 299 /// bit signed displacement. 300 /// Returns false if it can be represented by [r+imm], which are preferred. 301 bool SelectAddrIdx(SDValue N, SDValue &Base, SDValue &Index) { 302 return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG, None); 303 } 304 305 /// SelectAddrIdx4 - Given the specified address, check to see if it can be 306 /// represented as an indexed [r+r] operation. 307 /// This is for xform instructions whose associated displacement form is DS. 308 /// The last parameter \p 4 means associated DS form 16 bit signed 309 /// displacement must be a multiple of 4. 310 /// Returns false if it can be represented by [r+imm], which are preferred. 311 bool SelectAddrIdxX4(SDValue N, SDValue &Base, SDValue &Index) { 312 return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG, 313 Align(4)); 314 } 315 316 /// SelectAddrIdx16 - Given the specified address, check to see if it can be 317 /// represented as an indexed [r+r] operation. 318 /// This is for xform instructions whose associated displacement form is DQ. 319 /// The last parameter \p 16 means associated DQ form 16 bit signed 320 /// displacement must be a multiple of 16. 321 /// Returns false if it can be represented by [r+imm], which are preferred. 322 bool SelectAddrIdxX16(SDValue N, SDValue &Base, SDValue &Index) { 323 return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG, 324 Align(16)); 325 } 326 327 /// SelectAddrIdxOnly - Given the specified address, force it to be 328 /// represented as an indexed [r+r] operation. 329 bool SelectAddrIdxOnly(SDValue N, SDValue &Base, SDValue &Index) { 330 return PPCLowering->SelectAddressRegRegOnly(N, Base, Index, *CurDAG); 331 } 332 333 /// SelectAddrImm - Returns true if the address N can be represented by 334 /// a base register plus a signed 16-bit displacement [r+imm]. 335 /// The last parameter \p 0 means D form has no requirment for 16 bit signed 336 /// displacement. 337 bool SelectAddrImm(SDValue N, SDValue &Disp, 338 SDValue &Base) { 339 return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, None); 340 } 341 342 /// SelectAddrImmX4 - Returns true if the address N can be represented by 343 /// a base register plus a signed 16-bit displacement that is a multiple of 344 /// 4 (last parameter). Suitable for use by STD and friends. 345 bool SelectAddrImmX4(SDValue N, SDValue &Disp, SDValue &Base) { 346 return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, Align(4)); 347 } 348 349 /// SelectAddrImmX16 - Returns true if the address N can be represented by 350 /// a base register plus a signed 16-bit displacement that is a multiple of 351 /// 16(last parameter). Suitable for use by STXV and friends. 352 bool SelectAddrImmX16(SDValue N, SDValue &Disp, SDValue &Base) { 353 return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, 354 Align(16)); 355 } 356 357 /// SelectAddrImmX34 - Returns true if the address N can be represented by 358 /// a base register plus a signed 34-bit displacement. Suitable for use by 359 /// PSTXVP and friends. 360 bool SelectAddrImmX34(SDValue N, SDValue &Disp, SDValue &Base) { 361 return PPCLowering->SelectAddressRegImm34(N, Disp, Base, *CurDAG); 362 } 363 364 // Select an address into a single register. 365 bool SelectAddr(SDValue N, SDValue &Base) { 366 Base = N; 367 return true; 368 } 369 370 bool SelectAddrPCRel(SDValue N, SDValue &Base) { 371 return PPCLowering->SelectAddressPCRel(N, Base); 372 } 373 374 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for 375 /// inline asm expressions. It is always correct to compute the value into 376 /// a register. The case of adding a (possibly relocatable) constant to a 377 /// register can be improved, but it is wrong to substitute Reg+Reg for 378 /// Reg in an asm, because the load or store opcode would have to change. 379 bool SelectInlineAsmMemoryOperand(const SDValue &Op, 380 unsigned ConstraintID, 381 std::vector<SDValue> &OutOps) override { 382 switch(ConstraintID) { 383 default: 384 errs() << "ConstraintID: " << ConstraintID << "\n"; 385 llvm_unreachable("Unexpected asm memory constraint"); 386 case InlineAsm::Constraint_es: 387 case InlineAsm::Constraint_m: 388 case InlineAsm::Constraint_o: 389 case InlineAsm::Constraint_Q: 390 case InlineAsm::Constraint_Z: 391 case InlineAsm::Constraint_Zy: 392 // We need to make sure that this one operand does not end up in r0 393 // (because we might end up lowering this as 0(%op)). 394 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo(); 395 const TargetRegisterClass *TRC = TRI->getPointerRegClass(*MF, /*Kind=*/1); 396 SDLoc dl(Op); 397 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), dl, MVT::i32); 398 SDValue NewOp = 399 SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, 400 dl, Op.getValueType(), 401 Op, RC), 0); 402 403 OutOps.push_back(NewOp); 404 return false; 405 } 406 return true; 407 } 408 409 StringRef getPassName() const override { 410 return "PowerPC DAG->DAG Pattern Instruction Selection"; 411 } 412 413 // Include the pieces autogenerated from the target description. 414 #include "PPCGenDAGISel.inc" 415 416 private: 417 bool trySETCC(SDNode *N); 418 bool tryFoldSWTestBRCC(SDNode *N); 419 bool tryAsSingleRLDICL(SDNode *N); 420 bool tryAsSingleRLDICR(SDNode *N); 421 bool tryAsSingleRLWINM(SDNode *N); 422 bool tryAsSingleRLWINM8(SDNode *N); 423 bool tryAsSingleRLWIMI(SDNode *N); 424 bool tryAsPairOfRLDICL(SDNode *N); 425 bool tryAsSingleRLDIMI(SDNode *N); 426 427 void PeepholePPC64(); 428 void PeepholePPC64ZExt(); 429 void PeepholeCROps(); 430 431 SDValue combineToCMPB(SDNode *N); 432 void foldBoolExts(SDValue &Res, SDNode *&N); 433 434 bool AllUsersSelectZero(SDNode *N); 435 void SwapAllSelectUsers(SDNode *N); 436 437 bool isOffsetMultipleOf(SDNode *N, unsigned Val) const; 438 void transferMemOperands(SDNode *N, SDNode *Result); 439 }; 440 441 } // end anonymous namespace 442 443 /// getGlobalBaseReg - Output the instructions required to put the 444 /// base address to use for accessing globals into a register. 445 /// 446 SDNode *PPCDAGToDAGISel::getGlobalBaseReg() { 447 if (!GlobalBaseReg) { 448 const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); 449 // Insert the set of GlobalBaseReg into the first MBB of the function 450 MachineBasicBlock &FirstMBB = MF->front(); 451 MachineBasicBlock::iterator MBBI = FirstMBB.begin(); 452 const Module *M = MF->getFunction().getParent(); 453 DebugLoc dl; 454 455 if (PPCLowering->getPointerTy(CurDAG->getDataLayout()) == MVT::i32) { 456 if (Subtarget->isTargetELF()) { 457 GlobalBaseReg = PPC::R30; 458 if (!Subtarget->isSecurePlt() && 459 M->getPICLevel() == PICLevel::SmallPIC) { 460 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MoveGOTtoLR)); 461 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg); 462 MF->getInfo<PPCFunctionInfo>()->setUsesPICBase(true); 463 } else { 464 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR)); 465 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg); 466 Register TempReg = RegInfo->createVirtualRegister(&PPC::GPRCRegClass); 467 BuildMI(FirstMBB, MBBI, dl, 468 TII.get(PPC::UpdateGBR), GlobalBaseReg) 469 .addReg(TempReg, RegState::Define).addReg(GlobalBaseReg); 470 MF->getInfo<PPCFunctionInfo>()->setUsesPICBase(true); 471 } 472 } else { 473 GlobalBaseReg = 474 RegInfo->createVirtualRegister(&PPC::GPRC_and_GPRC_NOR0RegClass); 475 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR)); 476 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg); 477 } 478 } else { 479 // We must ensure that this sequence is dominated by the prologue. 480 // FIXME: This is a bit of a big hammer since we don't get the benefits 481 // of shrink-wrapping whenever we emit this instruction. Considering 482 // this is used in any function where we emit a jump table, this may be 483 // a significant limitation. We should consider inserting this in the 484 // block where it is used and then commoning this sequence up if it 485 // appears in multiple places. 486 // Note: on ISA 3.0 cores, we can use lnia (addpcis) instead of 487 // MovePCtoLR8. 488 MF->getInfo<PPCFunctionInfo>()->setShrinkWrapDisabled(true); 489 GlobalBaseReg = RegInfo->createVirtualRegister(&PPC::G8RC_and_G8RC_NOX0RegClass); 490 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR8)); 491 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR8), GlobalBaseReg); 492 } 493 } 494 return CurDAG->getRegister(GlobalBaseReg, 495 PPCLowering->getPointerTy(CurDAG->getDataLayout())) 496 .getNode(); 497 } 498 499 // Check if a SDValue has the toc-data attribute. 500 static bool hasTocDataAttr(SDValue Val, unsigned PointerSize) { 501 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Val); 502 if (!GA) 503 return false; 504 505 const GlobalVariable *GV = dyn_cast_or_null<GlobalVariable>(GA->getGlobal()); 506 if (!GV) 507 return false; 508 509 if (!GV->hasAttribute("toc-data")) 510 return false; 511 512 // TODO: These asserts should be updated as more support for the toc data 513 // transformation is added (64 bit, struct support, etc.). 514 515 assert(PointerSize == 4 && "Only 32 Bit Codegen is currently supported by " 516 "the toc data transformation."); 517 518 assert(PointerSize >= GV->getAlign().valueOrOne().value() && 519 "GlobalVariables with an alignment requirement stricter then 4-bytes " 520 "not supported by the toc data transformation."); 521 522 Type *GVType = GV->getValueType(); 523 524 assert(GVType->isSized() && "A GlobalVariable's size must be known to be " 525 "supported by the toc data transformation."); 526 527 if (GVType->isVectorTy()) 528 report_fatal_error("A GlobalVariable of Vector type is not currently " 529 "supported by the toc data transformation."); 530 531 if (GVType->isArrayTy()) 532 report_fatal_error("A GlobalVariable of Array type is not currently " 533 "supported by the toc data transformation."); 534 535 if (GVType->isStructTy()) 536 report_fatal_error("A GlobalVariable of Struct type is not currently " 537 "supported by the toc data transformation."); 538 539 assert(GVType->getPrimitiveSizeInBits() <= PointerSize * 8 && 540 "A GlobalVariable with size larger than 32 bits is not currently " 541 "supported by the toc data transformation."); 542 543 if (GV->hasLocalLinkage() || GV->hasPrivateLinkage()) 544 report_fatal_error("A GlobalVariable with private or local linkage is not " 545 "currently supported by the toc data transformation."); 546 547 assert(!GV->hasCommonLinkage() && 548 "Tentative definitions cannot have the mapping class XMC_TD."); 549 550 return true; 551 } 552 553 /// isInt32Immediate - This method tests to see if the node is a 32-bit constant 554 /// operand. If so Imm will receive the 32-bit value. 555 static bool isInt32Immediate(SDNode *N, unsigned &Imm) { 556 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) { 557 Imm = cast<ConstantSDNode>(N)->getZExtValue(); 558 return true; 559 } 560 return false; 561 } 562 563 /// isInt64Immediate - This method tests to see if the node is a 64-bit constant 564 /// operand. If so Imm will receive the 64-bit value. 565 static bool isInt64Immediate(SDNode *N, uint64_t &Imm) { 566 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i64) { 567 Imm = cast<ConstantSDNode>(N)->getZExtValue(); 568 return true; 569 } 570 return false; 571 } 572 573 // isInt32Immediate - This method tests to see if a constant operand. 574 // If so Imm will receive the 32 bit value. 575 static bool isInt32Immediate(SDValue N, unsigned &Imm) { 576 return isInt32Immediate(N.getNode(), Imm); 577 } 578 579 /// isInt64Immediate - This method tests to see if the value is a 64-bit 580 /// constant operand. If so Imm will receive the 64-bit value. 581 static bool isInt64Immediate(SDValue N, uint64_t &Imm) { 582 return isInt64Immediate(N.getNode(), Imm); 583 } 584 585 static unsigned getBranchHint(unsigned PCC, 586 const FunctionLoweringInfo &FuncInfo, 587 const SDValue &DestMBB) { 588 assert(isa<BasicBlockSDNode>(DestMBB)); 589 590 if (!FuncInfo.BPI) return PPC::BR_NO_HINT; 591 592 const BasicBlock *BB = FuncInfo.MBB->getBasicBlock(); 593 const Instruction *BBTerm = BB->getTerminator(); 594 595 if (BBTerm->getNumSuccessors() != 2) return PPC::BR_NO_HINT; 596 597 const BasicBlock *TBB = BBTerm->getSuccessor(0); 598 const BasicBlock *FBB = BBTerm->getSuccessor(1); 599 600 auto TProb = FuncInfo.BPI->getEdgeProbability(BB, TBB); 601 auto FProb = FuncInfo.BPI->getEdgeProbability(BB, FBB); 602 603 // We only want to handle cases which are easy to predict at static time, e.g. 604 // C++ throw statement, that is very likely not taken, or calling never 605 // returned function, e.g. stdlib exit(). So we set Threshold to filter 606 // unwanted cases. 607 // 608 // Below is LLVM branch weight table, we only want to handle case 1, 2 609 // 610 // Case Taken:Nontaken Example 611 // 1. Unreachable 1048575:1 C++ throw, stdlib exit(), 612 // 2. Invoke-terminating 1:1048575 613 // 3. Coldblock 4:64 __builtin_expect 614 // 4. Loop Branch 124:4 For loop 615 // 5. PH/ZH/FPH 20:12 616 const uint32_t Threshold = 10000; 617 618 if (std::max(TProb, FProb) / Threshold < std::min(TProb, FProb)) 619 return PPC::BR_NO_HINT; 620 621 LLVM_DEBUG(dbgs() << "Use branch hint for '" << FuncInfo.Fn->getName() 622 << "::" << BB->getName() << "'\n" 623 << " -> " << TBB->getName() << ": " << TProb << "\n" 624 << " -> " << FBB->getName() << ": " << FProb << "\n"); 625 626 const BasicBlockSDNode *BBDN = cast<BasicBlockSDNode>(DestMBB); 627 628 // If Dest BasicBlock is False-BasicBlock (FBB), swap branch probabilities, 629 // because we want 'TProb' stands for 'branch probability' to Dest BasicBlock 630 if (BBDN->getBasicBlock()->getBasicBlock() != TBB) 631 std::swap(TProb, FProb); 632 633 return (TProb > FProb) ? PPC::BR_TAKEN_HINT : PPC::BR_NONTAKEN_HINT; 634 } 635 636 // isOpcWithIntImmediate - This method tests to see if the node is a specific 637 // opcode and that it has a immediate integer right operand. 638 // If so Imm will receive the 32 bit value. 639 static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) { 640 return N->getOpcode() == Opc 641 && isInt32Immediate(N->getOperand(1).getNode(), Imm); 642 } 643 644 void PPCDAGToDAGISel::selectFrameIndex(SDNode *SN, SDNode *N, unsigned Offset) { 645 SDLoc dl(SN); 646 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 647 SDValue TFI = CurDAG->getTargetFrameIndex(FI, N->getValueType(0)); 648 unsigned Opc = N->getValueType(0) == MVT::i32 ? PPC::ADDI : PPC::ADDI8; 649 if (SN->hasOneUse()) 650 CurDAG->SelectNodeTo(SN, Opc, N->getValueType(0), TFI, 651 getSmallIPtrImm(Offset, dl)); 652 else 653 ReplaceNode(SN, CurDAG->getMachineNode(Opc, dl, N->getValueType(0), TFI, 654 getSmallIPtrImm(Offset, dl))); 655 } 656 657 bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask, 658 bool isShiftMask, unsigned &SH, 659 unsigned &MB, unsigned &ME) { 660 // Don't even go down this path for i64, since different logic will be 661 // necessary for rldicl/rldicr/rldimi. 662 if (N->getValueType(0) != MVT::i32) 663 return false; 664 665 unsigned Shift = 32; 666 unsigned Indeterminant = ~0; // bit mask marking indeterminant results 667 unsigned Opcode = N->getOpcode(); 668 if (N->getNumOperands() != 2 || 669 !isInt32Immediate(N->getOperand(1).getNode(), Shift) || (Shift > 31)) 670 return false; 671 672 if (Opcode == ISD::SHL) { 673 // apply shift left to mask if it comes first 674 if (isShiftMask) Mask = Mask << Shift; 675 // determine which bits are made indeterminant by shift 676 Indeterminant = ~(0xFFFFFFFFu << Shift); 677 } else if (Opcode == ISD::SRL) { 678 // apply shift right to mask if it comes first 679 if (isShiftMask) Mask = Mask >> Shift; 680 // determine which bits are made indeterminant by shift 681 Indeterminant = ~(0xFFFFFFFFu >> Shift); 682 // adjust for the left rotate 683 Shift = 32 - Shift; 684 } else if (Opcode == ISD::ROTL) { 685 Indeterminant = 0; 686 } else { 687 return false; 688 } 689 690 // if the mask doesn't intersect any Indeterminant bits 691 if (Mask && !(Mask & Indeterminant)) { 692 SH = Shift & 31; 693 // make sure the mask is still a mask (wrap arounds may not be) 694 return isRunOfOnes(Mask, MB, ME); 695 } 696 return false; 697 } 698 699 bool PPCDAGToDAGISel::tryTLSXFormStore(StoreSDNode *ST) { 700 SDValue Base = ST->getBasePtr(); 701 if (Base.getOpcode() != PPCISD::ADD_TLS) 702 return false; 703 SDValue Offset = ST->getOffset(); 704 if (!Offset.isUndef()) 705 return false; 706 if (Base.getOperand(1).getOpcode() == PPCISD::TLS_LOCAL_EXEC_MAT_ADDR) 707 return false; 708 709 SDLoc dl(ST); 710 EVT MemVT = ST->getMemoryVT(); 711 EVT RegVT = ST->getValue().getValueType(); 712 713 unsigned Opcode; 714 switch (MemVT.getSimpleVT().SimpleTy) { 715 default: 716 return false; 717 case MVT::i8: { 718 Opcode = (RegVT == MVT::i32) ? PPC::STBXTLS_32 : PPC::STBXTLS; 719 break; 720 } 721 case MVT::i16: { 722 Opcode = (RegVT == MVT::i32) ? PPC::STHXTLS_32 : PPC::STHXTLS; 723 break; 724 } 725 case MVT::i32: { 726 Opcode = (RegVT == MVT::i32) ? PPC::STWXTLS_32 : PPC::STWXTLS; 727 break; 728 } 729 case MVT::i64: { 730 Opcode = PPC::STDXTLS; 731 break; 732 } 733 } 734 SDValue Chain = ST->getChain(); 735 SDVTList VTs = ST->getVTList(); 736 SDValue Ops[] = {ST->getValue(), Base.getOperand(0), Base.getOperand(1), 737 Chain}; 738 SDNode *MN = CurDAG->getMachineNode(Opcode, dl, VTs, Ops); 739 transferMemOperands(ST, MN); 740 ReplaceNode(ST, MN); 741 return true; 742 } 743 744 bool PPCDAGToDAGISel::tryTLSXFormLoad(LoadSDNode *LD) { 745 SDValue Base = LD->getBasePtr(); 746 if (Base.getOpcode() != PPCISD::ADD_TLS) 747 return false; 748 SDValue Offset = LD->getOffset(); 749 if (!Offset.isUndef()) 750 return false; 751 if (Base.getOperand(1).getOpcode() == PPCISD::TLS_LOCAL_EXEC_MAT_ADDR) 752 return false; 753 754 SDLoc dl(LD); 755 EVT MemVT = LD->getMemoryVT(); 756 EVT RegVT = LD->getValueType(0); 757 unsigned Opcode; 758 switch (MemVT.getSimpleVT().SimpleTy) { 759 default: 760 return false; 761 case MVT::i8: { 762 Opcode = (RegVT == MVT::i32) ? PPC::LBZXTLS_32 : PPC::LBZXTLS; 763 break; 764 } 765 case MVT::i16: { 766 Opcode = (RegVT == MVT::i32) ? PPC::LHZXTLS_32 : PPC::LHZXTLS; 767 break; 768 } 769 case MVT::i32: { 770 Opcode = (RegVT == MVT::i32) ? PPC::LWZXTLS_32 : PPC::LWZXTLS; 771 break; 772 } 773 case MVT::i64: { 774 Opcode = PPC::LDXTLS; 775 break; 776 } 777 } 778 SDValue Chain = LD->getChain(); 779 SDVTList VTs = LD->getVTList(); 780 SDValue Ops[] = {Base.getOperand(0), Base.getOperand(1), Chain}; 781 SDNode *MN = CurDAG->getMachineNode(Opcode, dl, VTs, Ops); 782 transferMemOperands(LD, MN); 783 ReplaceNode(LD, MN); 784 return true; 785 } 786 787 /// Turn an or of two masked values into the rotate left word immediate then 788 /// mask insert (rlwimi) instruction. 789 bool PPCDAGToDAGISel::tryBitfieldInsert(SDNode *N) { 790 SDValue Op0 = N->getOperand(0); 791 SDValue Op1 = N->getOperand(1); 792 SDLoc dl(N); 793 794 KnownBits LKnown = CurDAG->computeKnownBits(Op0); 795 KnownBits RKnown = CurDAG->computeKnownBits(Op1); 796 797 unsigned TargetMask = LKnown.Zero.getZExtValue(); 798 unsigned InsertMask = RKnown.Zero.getZExtValue(); 799 800 if ((TargetMask | InsertMask) == 0xFFFFFFFF) { 801 unsigned Op0Opc = Op0.getOpcode(); 802 unsigned Op1Opc = Op1.getOpcode(); 803 unsigned Value, SH = 0; 804 TargetMask = ~TargetMask; 805 InsertMask = ~InsertMask; 806 807 // If the LHS has a foldable shift and the RHS does not, then swap it to the 808 // RHS so that we can fold the shift into the insert. 809 if (Op0Opc == ISD::AND && Op1Opc == ISD::AND) { 810 if (Op0.getOperand(0).getOpcode() == ISD::SHL || 811 Op0.getOperand(0).getOpcode() == ISD::SRL) { 812 if (Op1.getOperand(0).getOpcode() != ISD::SHL && 813 Op1.getOperand(0).getOpcode() != ISD::SRL) { 814 std::swap(Op0, Op1); 815 std::swap(Op0Opc, Op1Opc); 816 std::swap(TargetMask, InsertMask); 817 } 818 } 819 } else if (Op0Opc == ISD::SHL || Op0Opc == ISD::SRL) { 820 if (Op1Opc == ISD::AND && Op1.getOperand(0).getOpcode() != ISD::SHL && 821 Op1.getOperand(0).getOpcode() != ISD::SRL) { 822 std::swap(Op0, Op1); 823 std::swap(Op0Opc, Op1Opc); 824 std::swap(TargetMask, InsertMask); 825 } 826 } 827 828 unsigned MB, ME; 829 if (isRunOfOnes(InsertMask, MB, ME)) { 830 if ((Op1Opc == ISD::SHL || Op1Opc == ISD::SRL) && 831 isInt32Immediate(Op1.getOperand(1), Value)) { 832 Op1 = Op1.getOperand(0); 833 SH = (Op1Opc == ISD::SHL) ? Value : 32 - Value; 834 } 835 if (Op1Opc == ISD::AND) { 836 // The AND mask might not be a constant, and we need to make sure that 837 // if we're going to fold the masking with the insert, all bits not 838 // know to be zero in the mask are known to be one. 839 KnownBits MKnown = CurDAG->computeKnownBits(Op1.getOperand(1)); 840 bool CanFoldMask = InsertMask == MKnown.One.getZExtValue(); 841 842 unsigned SHOpc = Op1.getOperand(0).getOpcode(); 843 if ((SHOpc == ISD::SHL || SHOpc == ISD::SRL) && CanFoldMask && 844 isInt32Immediate(Op1.getOperand(0).getOperand(1), Value)) { 845 // Note that Value must be in range here (less than 32) because 846 // otherwise there would not be any bits set in InsertMask. 847 Op1 = Op1.getOperand(0).getOperand(0); 848 SH = (SHOpc == ISD::SHL) ? Value : 32 - Value; 849 } 850 } 851 852 SH &= 31; 853 SDValue Ops[] = { Op0, Op1, getI32Imm(SH, dl), getI32Imm(MB, dl), 854 getI32Imm(ME, dl) }; 855 ReplaceNode(N, CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops)); 856 return true; 857 } 858 } 859 return false; 860 } 861 862 static unsigned allUsesTruncate(SelectionDAG *CurDAG, SDNode *N) { 863 unsigned MaxTruncation = 0; 864 // Cannot use range-based for loop here as we need the actual use (i.e. we 865 // need the operand number corresponding to the use). A range-based for 866 // will unbox the use and provide an SDNode*. 867 for (SDNode::use_iterator Use = N->use_begin(), UseEnd = N->use_end(); 868 Use != UseEnd; ++Use) { 869 unsigned Opc = 870 Use->isMachineOpcode() ? Use->getMachineOpcode() : Use->getOpcode(); 871 switch (Opc) { 872 default: return 0; 873 case ISD::TRUNCATE: 874 if (Use->isMachineOpcode()) 875 return 0; 876 MaxTruncation = 877 std::max(MaxTruncation, (unsigned)Use->getValueType(0).getSizeInBits()); 878 continue; 879 case ISD::STORE: { 880 if (Use->isMachineOpcode()) 881 return 0; 882 StoreSDNode *STN = cast<StoreSDNode>(*Use); 883 unsigned MemVTSize = STN->getMemoryVT().getSizeInBits(); 884 if (MemVTSize == 64 || Use.getOperandNo() != 0) 885 return 0; 886 MaxTruncation = std::max(MaxTruncation, MemVTSize); 887 continue; 888 } 889 case PPC::STW8: 890 case PPC::STWX8: 891 case PPC::STWU8: 892 case PPC::STWUX8: 893 if (Use.getOperandNo() != 0) 894 return 0; 895 MaxTruncation = std::max(MaxTruncation, 32u); 896 continue; 897 case PPC::STH8: 898 case PPC::STHX8: 899 case PPC::STHU8: 900 case PPC::STHUX8: 901 if (Use.getOperandNo() != 0) 902 return 0; 903 MaxTruncation = std::max(MaxTruncation, 16u); 904 continue; 905 case PPC::STB8: 906 case PPC::STBX8: 907 case PPC::STBU8: 908 case PPC::STBUX8: 909 if (Use.getOperandNo() != 0) 910 return 0; 911 MaxTruncation = std::max(MaxTruncation, 8u); 912 continue; 913 } 914 } 915 return MaxTruncation; 916 } 917 918 // For any 32 < Num < 64, check if the Imm contains at least Num consecutive 919 // zeros and return the number of bits by the left of these consecutive zeros. 920 static int findContiguousZerosAtLeast(uint64_t Imm, unsigned Num) { 921 unsigned HiTZ = countTrailingZeros<uint32_t>(Hi_32(Imm)); 922 unsigned LoLZ = countLeadingZeros<uint32_t>(Lo_32(Imm)); 923 if ((HiTZ + LoLZ) >= Num) 924 return (32 + HiTZ); 925 return 0; 926 } 927 928 // Direct materialization of 64-bit constants by enumerated patterns. 929 static SDNode *selectI64ImmDirect(SelectionDAG *CurDAG, const SDLoc &dl, 930 uint64_t Imm, unsigned &InstCnt) { 931 unsigned TZ = countTrailingZeros<uint64_t>(Imm); 932 unsigned LZ = countLeadingZeros<uint64_t>(Imm); 933 unsigned TO = countTrailingOnes<uint64_t>(Imm); 934 unsigned LO = countLeadingOnes<uint64_t>(Imm); 935 unsigned Hi32 = Hi_32(Imm); 936 unsigned Lo32 = Lo_32(Imm); 937 SDNode *Result = nullptr; 938 unsigned Shift = 0; 939 940 auto getI32Imm = [CurDAG, dl](unsigned Imm) { 941 return CurDAG->getTargetConstant(Imm, dl, MVT::i32); 942 }; 943 944 // Following patterns use 1 instructions to materialize the Imm. 945 InstCnt = 1; 946 // 1-1) Patterns : {zeros}{15-bit valve} 947 // {ones}{15-bit valve} 948 if (isInt<16>(Imm)) { 949 SDValue SDImm = CurDAG->getTargetConstant(Imm, dl, MVT::i64); 950 return CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, SDImm); 951 } 952 // 1-2) Patterns : {zeros}{15-bit valve}{16 zeros} 953 // {ones}{15-bit valve}{16 zeros} 954 if (TZ > 15 && (LZ > 32 || LO > 32)) 955 return CurDAG->getMachineNode(PPC::LIS8, dl, MVT::i64, 956 getI32Imm((Imm >> 16) & 0xffff)); 957 958 // Following patterns use 2 instructions to materialize the Imm. 959 InstCnt = 2; 960 assert(LZ < 64 && "Unexpected leading zeros here."); 961 // Count of ones follwing the leading zeros. 962 unsigned FO = countLeadingOnes<uint64_t>(Imm << LZ); 963 // 2-1) Patterns : {zeros}{31-bit value} 964 // {ones}{31-bit value} 965 if (isInt<32>(Imm)) { 966 uint64_t ImmHi16 = (Imm >> 16) & 0xffff; 967 unsigned Opcode = ImmHi16 ? PPC::LIS8 : PPC::LI8; 968 Result = CurDAG->getMachineNode(Opcode, dl, MVT::i64, getI32Imm(ImmHi16)); 969 return CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64, SDValue(Result, 0), 970 getI32Imm(Imm & 0xffff)); 971 } 972 // 2-2) Patterns : {zeros}{ones}{15-bit value}{zeros} 973 // {zeros}{15-bit value}{zeros} 974 // {zeros}{ones}{15-bit value} 975 // {ones}{15-bit value}{zeros} 976 // We can take advantage of LI's sign-extension semantics to generate leading 977 // ones, and then use RLDIC to mask off the ones in both sides after rotation. 978 if ((LZ + FO + TZ) > 48) { 979 Result = CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, 980 getI32Imm((Imm >> TZ) & 0xffff)); 981 return CurDAG->getMachineNode(PPC::RLDIC, dl, MVT::i64, SDValue(Result, 0), 982 getI32Imm(TZ), getI32Imm(LZ)); 983 } 984 // 2-3) Pattern : {zeros}{15-bit value}{ones} 985 // Shift right the Imm by (48 - LZ) bits to construct a negtive 16 bits value, 986 // therefore we can take advantage of LI's sign-extension semantics, and then 987 // mask them off after rotation. 988 // 989 // +--LZ--||-15-bit-||--TO--+ +-------------|--16-bit--+ 990 // |00000001bbbbbbbbb1111111| -> |00000000000001bbbbbbbbb1| 991 // +------------------------+ +------------------------+ 992 // 63 0 63 0 993 // Imm (Imm >> (48 - LZ) & 0xffff) 994 // +----sext-----|--16-bit--+ +clear-|-----------------+ 995 // |11111111111111bbbbbbbbb1| -> |00000001bbbbbbbbb1111111| 996 // +------------------------+ +------------------------+ 997 // 63 0 63 0 998 // LI8: sext many leading zeros RLDICL: rotate left (48 - LZ), clear left LZ 999 if ((LZ + TO) > 48) { 1000 // Since the immediates with (LZ > 32) have been handled by previous 1001 // patterns, here we have (LZ <= 32) to make sure we will not shift right 1002 // the Imm by a negative value. 1003 assert(LZ <= 32 && "Unexpected shift value."); 1004 Result = CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, 1005 getI32Imm((Imm >> (48 - LZ) & 0xffff))); 1006 return CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, SDValue(Result, 0), 1007 getI32Imm(48 - LZ), getI32Imm(LZ)); 1008 } 1009 // 2-4) Patterns : {zeros}{ones}{15-bit value}{ones} 1010 // {ones}{15-bit value}{ones} 1011 // We can take advantage of LI's sign-extension semantics to generate leading 1012 // ones, and then use RLDICL to mask off the ones in left sides (if required) 1013 // after rotation. 1014 // 1015 // +-LZ-FO||-15-bit-||--TO--+ +-------------|--16-bit--+ 1016 // |00011110bbbbbbbbb1111111| -> |000000000011110bbbbbbbbb| 1017 // +------------------------+ +------------------------+ 1018 // 63 0 63 0 1019 // Imm (Imm >> TO) & 0xffff 1020 // +----sext-----|--16-bit--+ +LZ|---------------------+ 1021 // |111111111111110bbbbbbbbb| -> |00011110bbbbbbbbb1111111| 1022 // +------------------------+ +------------------------+ 1023 // 63 0 63 0 1024 // LI8: sext many leading zeros RLDICL: rotate left TO, clear left LZ 1025 if ((LZ + FO + TO) > 48) { 1026 Result = CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, 1027 getI32Imm((Imm >> TO) & 0xffff)); 1028 return CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, SDValue(Result, 0), 1029 getI32Imm(TO), getI32Imm(LZ)); 1030 } 1031 // 2-5) Pattern : {32 zeros}{****}{0}{15-bit value} 1032 // If Hi32 is zero and the Lo16(in Lo32) can be presented as a positive 16 bit 1033 // value, we can use LI for Lo16 without generating leading ones then add the 1034 // Hi16(in Lo32). 1035 if (LZ == 32 && ((Lo32 & 0x8000) == 0)) { 1036 Result = CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, 1037 getI32Imm(Lo32 & 0xffff)); 1038 return CurDAG->getMachineNode(PPC::ORIS8, dl, MVT::i64, SDValue(Result, 0), 1039 getI32Imm(Lo32 >> 16)); 1040 } 1041 // 2-6) Patterns : {******}{49 zeros}{******} 1042 // {******}{49 ones}{******} 1043 // If the Imm contains 49 consecutive zeros/ones, it means that a total of 15 1044 // bits remain on both sides. Rotate right the Imm to construct an int<16> 1045 // value, use LI for int<16> value and then use RLDICL without mask to rotate 1046 // it back. 1047 // 1048 // 1) findContiguousZerosAtLeast(Imm, 49) 1049 // +------|--zeros-|------+ +---ones--||---15 bit--+ 1050 // |bbbbbb0000000000aaaaaa| -> |0000000000aaaaaabbbbbb| 1051 // +----------------------+ +----------------------+ 1052 // 63 0 63 0 1053 // 1054 // 2) findContiguousZerosAtLeast(~Imm, 49) 1055 // +------|--ones--|------+ +---ones--||---15 bit--+ 1056 // |bbbbbb1111111111aaaaaa| -> |1111111111aaaaaabbbbbb| 1057 // +----------------------+ +----------------------+ 1058 // 63 0 63 0 1059 if ((Shift = findContiguousZerosAtLeast(Imm, 49)) || 1060 (Shift = findContiguousZerosAtLeast(~Imm, 49))) { 1061 uint64_t RotImm = APInt(64, Imm).rotr(Shift).getZExtValue(); 1062 Result = CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, 1063 getI32Imm(RotImm & 0xffff)); 1064 return CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, SDValue(Result, 0), 1065 getI32Imm(Shift), getI32Imm(0)); 1066 } 1067 1068 // Following patterns use 3 instructions to materialize the Imm. 1069 InstCnt = 3; 1070 // 3-1) Patterns : {zeros}{ones}{31-bit value}{zeros} 1071 // {zeros}{31-bit value}{zeros} 1072 // {zeros}{ones}{31-bit value} 1073 // {ones}{31-bit value}{zeros} 1074 // We can take advantage of LIS's sign-extension semantics to generate leading 1075 // ones, add the remaining bits with ORI, and then use RLDIC to mask off the 1076 // ones in both sides after rotation. 1077 if ((LZ + FO + TZ) > 32) { 1078 uint64_t ImmHi16 = (Imm >> (TZ + 16)) & 0xffff; 1079 unsigned Opcode = ImmHi16 ? PPC::LIS8 : PPC::LI8; 1080 Result = CurDAG->getMachineNode(Opcode, dl, MVT::i64, getI32Imm(ImmHi16)); 1081 Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64, SDValue(Result, 0), 1082 getI32Imm((Imm >> TZ) & 0xffff)); 1083 return CurDAG->getMachineNode(PPC::RLDIC, dl, MVT::i64, SDValue(Result, 0), 1084 getI32Imm(TZ), getI32Imm(LZ)); 1085 } 1086 // 3-2) Pattern : {zeros}{31-bit value}{ones} 1087 // Shift right the Imm by (32 - LZ) bits to construct a negtive 32 bits value, 1088 // therefore we can take advantage of LIS's sign-extension semantics, add 1089 // the remaining bits with ORI, and then mask them off after rotation. 1090 // This is similar to Pattern 2-3, please refer to the diagram there. 1091 if ((LZ + TO) > 32) { 1092 // Since the immediates with (LZ > 32) have been handled by previous 1093 // patterns, here we have (LZ <= 32) to make sure we will not shift right 1094 // the Imm by a negative value. 1095 assert(LZ <= 32 && "Unexpected shift value."); 1096 Result = CurDAG->getMachineNode(PPC::LIS8, dl, MVT::i64, 1097 getI32Imm((Imm >> (48 - LZ)) & 0xffff)); 1098 Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64, SDValue(Result, 0), 1099 getI32Imm((Imm >> (32 - LZ)) & 0xffff)); 1100 return CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, SDValue(Result, 0), 1101 getI32Imm(32 - LZ), getI32Imm(LZ)); 1102 } 1103 // 3-3) Patterns : {zeros}{ones}{31-bit value}{ones} 1104 // {ones}{31-bit value}{ones} 1105 // We can take advantage of LIS's sign-extension semantics to generate leading 1106 // ones, add the remaining bits with ORI, and then use RLDICL to mask off the 1107 // ones in left sides (if required) after rotation. 1108 // This is similar to Pattern 2-4, please refer to the diagram there. 1109 if ((LZ + FO + TO) > 32) { 1110 Result = CurDAG->getMachineNode(PPC::LIS8, dl, MVT::i64, 1111 getI32Imm((Imm >> (TO + 16)) & 0xffff)); 1112 Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64, SDValue(Result, 0), 1113 getI32Imm((Imm >> TO) & 0xffff)); 1114 return CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, SDValue(Result, 0), 1115 getI32Imm(TO), getI32Imm(LZ)); 1116 } 1117 // 3-4) Patterns : High word == Low word 1118 if (Hi32 == Lo32) { 1119 // Handle the first 32 bits. 1120 uint64_t ImmHi16 = (Lo32 >> 16) & 0xffff; 1121 unsigned Opcode = ImmHi16 ? PPC::LIS8 : PPC::LI8; 1122 Result = CurDAG->getMachineNode(Opcode, dl, MVT::i64, getI32Imm(ImmHi16)); 1123 Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64, SDValue(Result, 0), 1124 getI32Imm(Lo32 & 0xffff)); 1125 // Use rldimi to insert the Low word into High word. 1126 SDValue Ops[] = {SDValue(Result, 0), SDValue(Result, 0), getI32Imm(32), 1127 getI32Imm(0)}; 1128 return CurDAG->getMachineNode(PPC::RLDIMI, dl, MVT::i64, Ops); 1129 } 1130 // 3-5) Patterns : {******}{33 zeros}{******} 1131 // {******}{33 ones}{******} 1132 // If the Imm contains 33 consecutive zeros/ones, it means that a total of 31 1133 // bits remain on both sides. Rotate right the Imm to construct an int<32> 1134 // value, use LIS + ORI for int<32> value and then use RLDICL without mask to 1135 // rotate it back. 1136 // This is similar to Pattern 2-6, please refer to the diagram there. 1137 if ((Shift = findContiguousZerosAtLeast(Imm, 33)) || 1138 (Shift = findContiguousZerosAtLeast(~Imm, 33))) { 1139 uint64_t RotImm = APInt(64, Imm).rotr(Shift).getZExtValue(); 1140 uint64_t ImmHi16 = (RotImm >> 16) & 0xffff; 1141 unsigned Opcode = ImmHi16 ? PPC::LIS8 : PPC::LI8; 1142 Result = CurDAG->getMachineNode(Opcode, dl, MVT::i64, getI32Imm(ImmHi16)); 1143 Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64, SDValue(Result, 0), 1144 getI32Imm(RotImm & 0xffff)); 1145 return CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, SDValue(Result, 0), 1146 getI32Imm(Shift), getI32Imm(0)); 1147 } 1148 1149 InstCnt = 0; 1150 return nullptr; 1151 } 1152 1153 // Try to select instructions to generate a 64 bit immediate using prefix as 1154 // well as non prefix instructions. The function will return the SDNode 1155 // to materialize that constant or it will return nullptr if it does not 1156 // find one. The variable InstCnt is set to the number of instructions that 1157 // were selected. 1158 static SDNode *selectI64ImmDirectPrefix(SelectionDAG *CurDAG, const SDLoc &dl, 1159 uint64_t Imm, unsigned &InstCnt) { 1160 unsigned TZ = countTrailingZeros<uint64_t>(Imm); 1161 unsigned LZ = countLeadingZeros<uint64_t>(Imm); 1162 unsigned TO = countTrailingOnes<uint64_t>(Imm); 1163 unsigned FO = countLeadingOnes<uint64_t>(LZ == 64 ? 0 : (Imm << LZ)); 1164 unsigned Hi32 = Hi_32(Imm); 1165 unsigned Lo32 = Lo_32(Imm); 1166 1167 auto getI32Imm = [CurDAG, dl](unsigned Imm) { 1168 return CurDAG->getTargetConstant(Imm, dl, MVT::i32); 1169 }; 1170 1171 auto getI64Imm = [CurDAG, dl](uint64_t Imm) { 1172 return CurDAG->getTargetConstant(Imm, dl, MVT::i64); 1173 }; 1174 1175 // Following patterns use 1 instruction to materialize Imm. 1176 InstCnt = 1; 1177 1178 // The pli instruction can materialize up to 34 bits directly. 1179 // If a constant fits within 34-bits, emit the pli instruction here directly. 1180 if (isInt<34>(Imm)) 1181 return CurDAG->getMachineNode(PPC::PLI8, dl, MVT::i64, 1182 CurDAG->getTargetConstant(Imm, dl, MVT::i64)); 1183 1184 // Require at least two instructions. 1185 InstCnt = 2; 1186 SDNode *Result = nullptr; 1187 // Patterns : {zeros}{ones}{33-bit value}{zeros} 1188 // {zeros}{33-bit value}{zeros} 1189 // {zeros}{ones}{33-bit value} 1190 // {ones}{33-bit value}{zeros} 1191 // We can take advantage of PLI's sign-extension semantics to generate leading 1192 // ones, and then use RLDIC to mask off the ones on both sides after rotation. 1193 if ((LZ + FO + TZ) > 30) { 1194 APInt SignedInt34 = APInt(34, (Imm >> TZ) & 0x3ffffffff); 1195 APInt Extended = SignedInt34.sext(64); 1196 Result = CurDAG->getMachineNode(PPC::PLI8, dl, MVT::i64, 1197 getI64Imm(*Extended.getRawData())); 1198 return CurDAG->getMachineNode(PPC::RLDIC, dl, MVT::i64, SDValue(Result, 0), 1199 getI32Imm(TZ), getI32Imm(LZ)); 1200 } 1201 // Pattern : {zeros}{33-bit value}{ones} 1202 // Shift right the Imm by (30 - LZ) bits to construct a negative 34 bit value, 1203 // therefore we can take advantage of PLI's sign-extension semantics, and then 1204 // mask them off after rotation. 1205 // 1206 // +--LZ--||-33-bit-||--TO--+ +-------------|--34-bit--+ 1207 // |00000001bbbbbbbbb1111111| -> |00000000000001bbbbbbbbb1| 1208 // +------------------------+ +------------------------+ 1209 // 63 0 63 0 1210 // 1211 // +----sext-----|--34-bit--+ +clear-|-----------------+ 1212 // |11111111111111bbbbbbbbb1| -> |00000001bbbbbbbbb1111111| 1213 // +------------------------+ +------------------------+ 1214 // 63 0 63 0 1215 if ((LZ + TO) > 30) { 1216 APInt SignedInt34 = APInt(34, (Imm >> (30 - LZ)) & 0x3ffffffff); 1217 APInt Extended = SignedInt34.sext(64); 1218 Result = CurDAG->getMachineNode(PPC::PLI8, dl, MVT::i64, 1219 getI64Imm(*Extended.getRawData())); 1220 return CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, SDValue(Result, 0), 1221 getI32Imm(30 - LZ), getI32Imm(LZ)); 1222 } 1223 // Patterns : {zeros}{ones}{33-bit value}{ones} 1224 // {ones}{33-bit value}{ones} 1225 // Similar to LI we can take advantage of PLI's sign-extension semantics to 1226 // generate leading ones, and then use RLDICL to mask off the ones in left 1227 // sides (if required) after rotation. 1228 if ((LZ + FO + TO) > 30) { 1229 APInt SignedInt34 = APInt(34, (Imm >> TO) & 0x3ffffffff); 1230 APInt Extended = SignedInt34.sext(64); 1231 Result = CurDAG->getMachineNode(PPC::PLI8, dl, MVT::i64, 1232 getI64Imm(*Extended.getRawData())); 1233 return CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, SDValue(Result, 0), 1234 getI32Imm(TO), getI32Imm(LZ)); 1235 } 1236 // Patterns : {******}{31 zeros}{******} 1237 // : {******}{31 ones}{******} 1238 // If Imm contains 31 consecutive zeros/ones then the remaining bit count 1239 // is 33. Rotate right the Imm to construct a int<33> value, we can use PLI 1240 // for the int<33> value and then use RLDICL without a mask to rotate it back. 1241 // 1242 // +------|--ones--|------+ +---ones--||---33 bit--+ 1243 // |bbbbbb1111111111aaaaaa| -> |1111111111aaaaaabbbbbb| 1244 // +----------------------+ +----------------------+ 1245 // 63 0 63 0 1246 for (unsigned Shift = 0; Shift < 63; ++Shift) { 1247 uint64_t RotImm = APInt(64, Imm).rotr(Shift).getZExtValue(); 1248 if (isInt<34>(RotImm)) { 1249 Result = 1250 CurDAG->getMachineNode(PPC::PLI8, dl, MVT::i64, getI64Imm(RotImm)); 1251 return CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 1252 SDValue(Result, 0), getI32Imm(Shift), 1253 getI32Imm(0)); 1254 } 1255 } 1256 1257 // Patterns : High word == Low word 1258 // This is basically a splat of a 32 bit immediate. 1259 if (Hi32 == Lo32) { 1260 Result = CurDAG->getMachineNode(PPC::PLI8, dl, MVT::i64, getI64Imm(Hi32)); 1261 SDValue Ops[] = {SDValue(Result, 0), SDValue(Result, 0), getI32Imm(32), 1262 getI32Imm(0)}; 1263 return CurDAG->getMachineNode(PPC::RLDIMI, dl, MVT::i64, Ops); 1264 } 1265 1266 InstCnt = 3; 1267 // Catch-all 1268 // This pattern can form any 64 bit immediate in 3 instructions. 1269 SDNode *ResultHi = 1270 CurDAG->getMachineNode(PPC::PLI8, dl, MVT::i64, getI64Imm(Hi32)); 1271 SDNode *ResultLo = 1272 CurDAG->getMachineNode(PPC::PLI8, dl, MVT::i64, getI64Imm(Lo32)); 1273 SDValue Ops[] = {SDValue(ResultLo, 0), SDValue(ResultHi, 0), getI32Imm(32), 1274 getI32Imm(0)}; 1275 return CurDAG->getMachineNode(PPC::RLDIMI, dl, MVT::i64, Ops); 1276 } 1277 1278 static SDNode *selectI64Imm(SelectionDAG *CurDAG, const SDLoc &dl, uint64_t Imm, 1279 unsigned *InstCnt = nullptr) { 1280 unsigned InstCntDirect = 0; 1281 // No more than 3 instructions is used if we can select the i64 immediate 1282 // directly. 1283 SDNode *Result = selectI64ImmDirect(CurDAG, dl, Imm, InstCntDirect); 1284 1285 const PPCSubtarget &Subtarget = 1286 CurDAG->getMachineFunction().getSubtarget<PPCSubtarget>(); 1287 1288 // If we have prefixed instructions and there is a chance we can 1289 // materialize the constant with fewer prefixed instructions than 1290 // non-prefixed, try that. 1291 if (Subtarget.hasPrefixInstrs() && InstCntDirect != 1) { 1292 unsigned InstCntDirectP = 0; 1293 SDNode *ResultP = selectI64ImmDirectPrefix(CurDAG, dl, Imm, InstCntDirectP); 1294 // Use the prefix case in either of two cases: 1295 // 1) We have no result from the non-prefix case to use. 1296 // 2) The non-prefix case uses more instructions than the prefix case. 1297 // If the prefix and non-prefix cases use the same number of instructions 1298 // we will prefer the non-prefix case. 1299 if (ResultP && (!Result || InstCntDirectP < InstCntDirect)) { 1300 if (InstCnt) 1301 *InstCnt = InstCntDirectP; 1302 return ResultP; 1303 } 1304 } 1305 1306 if (Result) { 1307 if (InstCnt) 1308 *InstCnt = InstCntDirect; 1309 return Result; 1310 } 1311 auto getI32Imm = [CurDAG, dl](unsigned Imm) { 1312 return CurDAG->getTargetConstant(Imm, dl, MVT::i32); 1313 }; 1314 // Handle the upper 32 bit value. 1315 Result = 1316 selectI64ImmDirect(CurDAG, dl, Imm & 0xffffffff00000000, InstCntDirect); 1317 // Add in the last bits as required. 1318 if (uint32_t Hi16 = (Lo_32(Imm) >> 16) & 0xffff) { 1319 Result = CurDAG->getMachineNode(PPC::ORIS8, dl, MVT::i64, 1320 SDValue(Result, 0), getI32Imm(Hi16)); 1321 ++InstCntDirect; 1322 } 1323 if (uint32_t Lo16 = Lo_32(Imm) & 0xffff) { 1324 Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64, SDValue(Result, 0), 1325 getI32Imm(Lo16)); 1326 ++InstCntDirect; 1327 } 1328 if (InstCnt) 1329 *InstCnt = InstCntDirect; 1330 return Result; 1331 } 1332 1333 // Select a 64-bit constant. 1334 static SDNode *selectI64Imm(SelectionDAG *CurDAG, SDNode *N) { 1335 SDLoc dl(N); 1336 1337 // Get 64 bit value. 1338 int64_t Imm = cast<ConstantSDNode>(N)->getZExtValue(); 1339 if (unsigned MinSize = allUsesTruncate(CurDAG, N)) { 1340 uint64_t SextImm = SignExtend64(Imm, MinSize); 1341 SDValue SDImm = CurDAG->getTargetConstant(SextImm, dl, MVT::i64); 1342 if (isInt<16>(SextImm)) 1343 return CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, SDImm); 1344 } 1345 return selectI64Imm(CurDAG, dl, Imm); 1346 } 1347 1348 namespace { 1349 1350 class BitPermutationSelector { 1351 struct ValueBit { 1352 SDValue V; 1353 1354 // The bit number in the value, using a convention where bit 0 is the 1355 // lowest-order bit. 1356 unsigned Idx; 1357 1358 // ConstZero means a bit we need to mask off. 1359 // Variable is a bit comes from an input variable. 1360 // VariableKnownToBeZero is also a bit comes from an input variable, 1361 // but it is known to be already zero. So we do not need to mask them. 1362 enum Kind { 1363 ConstZero, 1364 Variable, 1365 VariableKnownToBeZero 1366 } K; 1367 1368 ValueBit(SDValue V, unsigned I, Kind K = Variable) 1369 : V(V), Idx(I), K(K) {} 1370 ValueBit(Kind K = Variable) 1371 : V(SDValue(nullptr, 0)), Idx(UINT32_MAX), K(K) {} 1372 1373 bool isZero() const { 1374 return K == ConstZero || K == VariableKnownToBeZero; 1375 } 1376 1377 bool hasValue() const { 1378 return K == Variable || K == VariableKnownToBeZero; 1379 } 1380 1381 SDValue getValue() const { 1382 assert(hasValue() && "Cannot get the value of a constant bit"); 1383 return V; 1384 } 1385 1386 unsigned getValueBitIndex() const { 1387 assert(hasValue() && "Cannot get the value bit index of a constant bit"); 1388 return Idx; 1389 } 1390 }; 1391 1392 // A bit group has the same underlying value and the same rotate factor. 1393 struct BitGroup { 1394 SDValue V; 1395 unsigned RLAmt; 1396 unsigned StartIdx, EndIdx; 1397 1398 // This rotation amount assumes that the lower 32 bits of the quantity are 1399 // replicated in the high 32 bits by the rotation operator (which is done 1400 // by rlwinm and friends in 64-bit mode). 1401 bool Repl32; 1402 // Did converting to Repl32 == true change the rotation factor? If it did, 1403 // it decreased it by 32. 1404 bool Repl32CR; 1405 // Was this group coalesced after setting Repl32 to true? 1406 bool Repl32Coalesced; 1407 1408 BitGroup(SDValue V, unsigned R, unsigned S, unsigned E) 1409 : V(V), RLAmt(R), StartIdx(S), EndIdx(E), Repl32(false), Repl32CR(false), 1410 Repl32Coalesced(false) { 1411 LLVM_DEBUG(dbgs() << "\tbit group for " << V.getNode() << " RLAmt = " << R 1412 << " [" << S << ", " << E << "]\n"); 1413 } 1414 }; 1415 1416 // Information on each (Value, RLAmt) pair (like the number of groups 1417 // associated with each) used to choose the lowering method. 1418 struct ValueRotInfo { 1419 SDValue V; 1420 unsigned RLAmt = std::numeric_limits<unsigned>::max(); 1421 unsigned NumGroups = 0; 1422 unsigned FirstGroupStartIdx = std::numeric_limits<unsigned>::max(); 1423 bool Repl32 = false; 1424 1425 ValueRotInfo() = default; 1426 1427 // For sorting (in reverse order) by NumGroups, and then by 1428 // FirstGroupStartIdx. 1429 bool operator < (const ValueRotInfo &Other) const { 1430 // We need to sort so that the non-Repl32 come first because, when we're 1431 // doing masking, the Repl32 bit groups might be subsumed into the 64-bit 1432 // masking operation. 1433 if (Repl32 < Other.Repl32) 1434 return true; 1435 else if (Repl32 > Other.Repl32) 1436 return false; 1437 else if (NumGroups > Other.NumGroups) 1438 return true; 1439 else if (NumGroups < Other.NumGroups) 1440 return false; 1441 else if (RLAmt == 0 && Other.RLAmt != 0) 1442 return true; 1443 else if (RLAmt != 0 && Other.RLAmt == 0) 1444 return false; 1445 else if (FirstGroupStartIdx < Other.FirstGroupStartIdx) 1446 return true; 1447 return false; 1448 } 1449 }; 1450 1451 using ValueBitsMemoizedValue = std::pair<bool, SmallVector<ValueBit, 64>>; 1452 using ValueBitsMemoizer = 1453 DenseMap<SDValue, std::unique_ptr<ValueBitsMemoizedValue>>; 1454 ValueBitsMemoizer Memoizer; 1455 1456 // Return a pair of bool and a SmallVector pointer to a memoization entry. 1457 // The bool is true if something interesting was deduced, otherwise if we're 1458 // providing only a generic representation of V (or something else likewise 1459 // uninteresting for instruction selection) through the SmallVector. 1460 std::pair<bool, SmallVector<ValueBit, 64> *> getValueBits(SDValue V, 1461 unsigned NumBits) { 1462 auto &ValueEntry = Memoizer[V]; 1463 if (ValueEntry) 1464 return std::make_pair(ValueEntry->first, &ValueEntry->second); 1465 ValueEntry.reset(new ValueBitsMemoizedValue()); 1466 bool &Interesting = ValueEntry->first; 1467 SmallVector<ValueBit, 64> &Bits = ValueEntry->second; 1468 Bits.resize(NumBits); 1469 1470 switch (V.getOpcode()) { 1471 default: break; 1472 case ISD::ROTL: 1473 if (isa<ConstantSDNode>(V.getOperand(1))) { 1474 unsigned RotAmt = V.getConstantOperandVal(1); 1475 1476 const auto &LHSBits = *getValueBits(V.getOperand(0), NumBits).second; 1477 1478 for (unsigned i = 0; i < NumBits; ++i) 1479 Bits[i] = LHSBits[i < RotAmt ? i + (NumBits - RotAmt) : i - RotAmt]; 1480 1481 return std::make_pair(Interesting = true, &Bits); 1482 } 1483 break; 1484 case ISD::SHL: 1485 case PPCISD::SHL: 1486 if (isa<ConstantSDNode>(V.getOperand(1))) { 1487 unsigned ShiftAmt = V.getConstantOperandVal(1); 1488 1489 const auto &LHSBits = *getValueBits(V.getOperand(0), NumBits).second; 1490 1491 for (unsigned i = ShiftAmt; i < NumBits; ++i) 1492 Bits[i] = LHSBits[i - ShiftAmt]; 1493 1494 for (unsigned i = 0; i < ShiftAmt; ++i) 1495 Bits[i] = ValueBit(ValueBit::ConstZero); 1496 1497 return std::make_pair(Interesting = true, &Bits); 1498 } 1499 break; 1500 case ISD::SRL: 1501 case PPCISD::SRL: 1502 if (isa<ConstantSDNode>(V.getOperand(1))) { 1503 unsigned ShiftAmt = V.getConstantOperandVal(1); 1504 1505 const auto &LHSBits = *getValueBits(V.getOperand(0), NumBits).second; 1506 1507 for (unsigned i = 0; i < NumBits - ShiftAmt; ++i) 1508 Bits[i] = LHSBits[i + ShiftAmt]; 1509 1510 for (unsigned i = NumBits - ShiftAmt; i < NumBits; ++i) 1511 Bits[i] = ValueBit(ValueBit::ConstZero); 1512 1513 return std::make_pair(Interesting = true, &Bits); 1514 } 1515 break; 1516 case ISD::AND: 1517 if (isa<ConstantSDNode>(V.getOperand(1))) { 1518 uint64_t Mask = V.getConstantOperandVal(1); 1519 1520 const SmallVector<ValueBit, 64> *LHSBits; 1521 // Mark this as interesting, only if the LHS was also interesting. This 1522 // prevents the overall procedure from matching a single immediate 'and' 1523 // (which is non-optimal because such an and might be folded with other 1524 // things if we don't select it here). 1525 std::tie(Interesting, LHSBits) = getValueBits(V.getOperand(0), NumBits); 1526 1527 for (unsigned i = 0; i < NumBits; ++i) 1528 if (((Mask >> i) & 1) == 1) 1529 Bits[i] = (*LHSBits)[i]; 1530 else { 1531 // AND instruction masks this bit. If the input is already zero, 1532 // we have nothing to do here. Otherwise, make the bit ConstZero. 1533 if ((*LHSBits)[i].isZero()) 1534 Bits[i] = (*LHSBits)[i]; 1535 else 1536 Bits[i] = ValueBit(ValueBit::ConstZero); 1537 } 1538 1539 return std::make_pair(Interesting, &Bits); 1540 } 1541 break; 1542 case ISD::OR: { 1543 const auto &LHSBits = *getValueBits(V.getOperand(0), NumBits).second; 1544 const auto &RHSBits = *getValueBits(V.getOperand(1), NumBits).second; 1545 1546 bool AllDisjoint = true; 1547 SDValue LastVal = SDValue(); 1548 unsigned LastIdx = 0; 1549 for (unsigned i = 0; i < NumBits; ++i) { 1550 if (LHSBits[i].isZero() && RHSBits[i].isZero()) { 1551 // If both inputs are known to be zero and one is ConstZero and 1552 // another is VariableKnownToBeZero, we can select whichever 1553 // we like. To minimize the number of bit groups, we select 1554 // VariableKnownToBeZero if this bit is the next bit of the same 1555 // input variable from the previous bit. Otherwise, we select 1556 // ConstZero. 1557 if (LHSBits[i].hasValue() && LHSBits[i].getValue() == LastVal && 1558 LHSBits[i].getValueBitIndex() == LastIdx + 1) 1559 Bits[i] = LHSBits[i]; 1560 else if (RHSBits[i].hasValue() && RHSBits[i].getValue() == LastVal && 1561 RHSBits[i].getValueBitIndex() == LastIdx + 1) 1562 Bits[i] = RHSBits[i]; 1563 else 1564 Bits[i] = ValueBit(ValueBit::ConstZero); 1565 } 1566 else if (LHSBits[i].isZero()) 1567 Bits[i] = RHSBits[i]; 1568 else if (RHSBits[i].isZero()) 1569 Bits[i] = LHSBits[i]; 1570 else { 1571 AllDisjoint = false; 1572 break; 1573 } 1574 // We remember the value and bit index of this bit. 1575 if (Bits[i].hasValue()) { 1576 LastVal = Bits[i].getValue(); 1577 LastIdx = Bits[i].getValueBitIndex(); 1578 } 1579 else { 1580 if (LastVal) LastVal = SDValue(); 1581 LastIdx = 0; 1582 } 1583 } 1584 1585 if (!AllDisjoint) 1586 break; 1587 1588 return std::make_pair(Interesting = true, &Bits); 1589 } 1590 case ISD::ZERO_EXTEND: { 1591 // We support only the case with zero extension from i32 to i64 so far. 1592 if (V.getValueType() != MVT::i64 || 1593 V.getOperand(0).getValueType() != MVT::i32) 1594 break; 1595 1596 const SmallVector<ValueBit, 64> *LHSBits; 1597 const unsigned NumOperandBits = 32; 1598 std::tie(Interesting, LHSBits) = getValueBits(V.getOperand(0), 1599 NumOperandBits); 1600 1601 for (unsigned i = 0; i < NumOperandBits; ++i) 1602 Bits[i] = (*LHSBits)[i]; 1603 1604 for (unsigned i = NumOperandBits; i < NumBits; ++i) 1605 Bits[i] = ValueBit(ValueBit::ConstZero); 1606 1607 return std::make_pair(Interesting, &Bits); 1608 } 1609 case ISD::TRUNCATE: { 1610 EVT FromType = V.getOperand(0).getValueType(); 1611 EVT ToType = V.getValueType(); 1612 // We support only the case with truncate from i64 to i32. 1613 if (FromType != MVT::i64 || ToType != MVT::i32) 1614 break; 1615 const unsigned NumAllBits = FromType.getSizeInBits(); 1616 SmallVector<ValueBit, 64> *InBits; 1617 std::tie(Interesting, InBits) = getValueBits(V.getOperand(0), 1618 NumAllBits); 1619 const unsigned NumValidBits = ToType.getSizeInBits(); 1620 1621 // A 32-bit instruction cannot touch upper 32-bit part of 64-bit value. 1622 // So, we cannot include this truncate. 1623 bool UseUpper32bit = false; 1624 for (unsigned i = 0; i < NumValidBits; ++i) 1625 if ((*InBits)[i].hasValue() && (*InBits)[i].getValueBitIndex() >= 32) { 1626 UseUpper32bit = true; 1627 break; 1628 } 1629 if (UseUpper32bit) 1630 break; 1631 1632 for (unsigned i = 0; i < NumValidBits; ++i) 1633 Bits[i] = (*InBits)[i]; 1634 1635 return std::make_pair(Interesting, &Bits); 1636 } 1637 case ISD::AssertZext: { 1638 // For AssertZext, we look through the operand and 1639 // mark the bits known to be zero. 1640 const SmallVector<ValueBit, 64> *LHSBits; 1641 std::tie(Interesting, LHSBits) = getValueBits(V.getOperand(0), 1642 NumBits); 1643 1644 EVT FromType = cast<VTSDNode>(V.getOperand(1))->getVT(); 1645 const unsigned NumValidBits = FromType.getSizeInBits(); 1646 for (unsigned i = 0; i < NumValidBits; ++i) 1647 Bits[i] = (*LHSBits)[i]; 1648 1649 // These bits are known to be zero but the AssertZext may be from a value 1650 // that already has some constant zero bits (i.e. from a masking and). 1651 for (unsigned i = NumValidBits; i < NumBits; ++i) 1652 Bits[i] = (*LHSBits)[i].hasValue() 1653 ? ValueBit((*LHSBits)[i].getValue(), 1654 (*LHSBits)[i].getValueBitIndex(), 1655 ValueBit::VariableKnownToBeZero) 1656 : ValueBit(ValueBit::ConstZero); 1657 1658 return std::make_pair(Interesting, &Bits); 1659 } 1660 case ISD::LOAD: 1661 LoadSDNode *LD = cast<LoadSDNode>(V); 1662 if (ISD::isZEXTLoad(V.getNode()) && V.getResNo() == 0) { 1663 EVT VT = LD->getMemoryVT(); 1664 const unsigned NumValidBits = VT.getSizeInBits(); 1665 1666 for (unsigned i = 0; i < NumValidBits; ++i) 1667 Bits[i] = ValueBit(V, i); 1668 1669 // These bits are known to be zero. 1670 for (unsigned i = NumValidBits; i < NumBits; ++i) 1671 Bits[i] = ValueBit(V, i, ValueBit::VariableKnownToBeZero); 1672 1673 // Zero-extending load itself cannot be optimized. So, it is not 1674 // interesting by itself though it gives useful information. 1675 return std::make_pair(Interesting = false, &Bits); 1676 } 1677 break; 1678 } 1679 1680 for (unsigned i = 0; i < NumBits; ++i) 1681 Bits[i] = ValueBit(V, i); 1682 1683 return std::make_pair(Interesting = false, &Bits); 1684 } 1685 1686 // For each value (except the constant ones), compute the left-rotate amount 1687 // to get it from its original to final position. 1688 void computeRotationAmounts() { 1689 NeedMask = false; 1690 RLAmt.resize(Bits.size()); 1691 for (unsigned i = 0; i < Bits.size(); ++i) 1692 if (Bits[i].hasValue()) { 1693 unsigned VBI = Bits[i].getValueBitIndex(); 1694 if (i >= VBI) 1695 RLAmt[i] = i - VBI; 1696 else 1697 RLAmt[i] = Bits.size() - (VBI - i); 1698 } else if (Bits[i].isZero()) { 1699 NeedMask = true; 1700 RLAmt[i] = UINT32_MAX; 1701 } else { 1702 llvm_unreachable("Unknown value bit type"); 1703 } 1704 } 1705 1706 // Collect groups of consecutive bits with the same underlying value and 1707 // rotation factor. If we're doing late masking, we ignore zeros, otherwise 1708 // they break up groups. 1709 void collectBitGroups(bool LateMask) { 1710 BitGroups.clear(); 1711 1712 unsigned LastRLAmt = RLAmt[0]; 1713 SDValue LastValue = Bits[0].hasValue() ? Bits[0].getValue() : SDValue(); 1714 unsigned LastGroupStartIdx = 0; 1715 bool IsGroupOfZeros = !Bits[LastGroupStartIdx].hasValue(); 1716 for (unsigned i = 1; i < Bits.size(); ++i) { 1717 unsigned ThisRLAmt = RLAmt[i]; 1718 SDValue ThisValue = Bits[i].hasValue() ? Bits[i].getValue() : SDValue(); 1719 if (LateMask && !ThisValue) { 1720 ThisValue = LastValue; 1721 ThisRLAmt = LastRLAmt; 1722 // If we're doing late masking, then the first bit group always starts 1723 // at zero (even if the first bits were zero). 1724 if (BitGroups.empty()) 1725 LastGroupStartIdx = 0; 1726 } 1727 1728 // If this bit is known to be zero and the current group is a bit group 1729 // of zeros, we do not need to terminate the current bit group even the 1730 // Value or RLAmt does not match here. Instead, we terminate this group 1731 // when the first non-zero bit appears later. 1732 if (IsGroupOfZeros && Bits[i].isZero()) 1733 continue; 1734 1735 // If this bit has the same underlying value and the same rotate factor as 1736 // the last one, then they're part of the same group. 1737 if (ThisRLAmt == LastRLAmt && ThisValue == LastValue) 1738 // We cannot continue the current group if this bits is not known to 1739 // be zero in a bit group of zeros. 1740 if (!(IsGroupOfZeros && ThisValue && !Bits[i].isZero())) 1741 continue; 1742 1743 if (LastValue.getNode()) 1744 BitGroups.push_back(BitGroup(LastValue, LastRLAmt, LastGroupStartIdx, 1745 i-1)); 1746 LastRLAmt = ThisRLAmt; 1747 LastValue = ThisValue; 1748 LastGroupStartIdx = i; 1749 IsGroupOfZeros = !Bits[LastGroupStartIdx].hasValue(); 1750 } 1751 if (LastValue.getNode()) 1752 BitGroups.push_back(BitGroup(LastValue, LastRLAmt, LastGroupStartIdx, 1753 Bits.size()-1)); 1754 1755 if (BitGroups.empty()) 1756 return; 1757 1758 // We might be able to combine the first and last groups. 1759 if (BitGroups.size() > 1) { 1760 // If the first and last groups are the same, then remove the first group 1761 // in favor of the last group, making the ending index of the last group 1762 // equal to the ending index of the to-be-removed first group. 1763 if (BitGroups[0].StartIdx == 0 && 1764 BitGroups[BitGroups.size()-1].EndIdx == Bits.size()-1 && 1765 BitGroups[0].V == BitGroups[BitGroups.size()-1].V && 1766 BitGroups[0].RLAmt == BitGroups[BitGroups.size()-1].RLAmt) { 1767 LLVM_DEBUG(dbgs() << "\tcombining final bit group with initial one\n"); 1768 BitGroups[BitGroups.size()-1].EndIdx = BitGroups[0].EndIdx; 1769 BitGroups.erase(BitGroups.begin()); 1770 } 1771 } 1772 } 1773 1774 // Take all (SDValue, RLAmt) pairs and sort them by the number of groups 1775 // associated with each. If the number of groups are same, we prefer a group 1776 // which does not require rotate, i.e. RLAmt is 0, to avoid the first rotate 1777 // instruction. If there is a degeneracy, pick the one that occurs 1778 // first (in the final value). 1779 void collectValueRotInfo() { 1780 ValueRots.clear(); 1781 1782 for (auto &BG : BitGroups) { 1783 unsigned RLAmtKey = BG.RLAmt + (BG.Repl32 ? 64 : 0); 1784 ValueRotInfo &VRI = ValueRots[std::make_pair(BG.V, RLAmtKey)]; 1785 VRI.V = BG.V; 1786 VRI.RLAmt = BG.RLAmt; 1787 VRI.Repl32 = BG.Repl32; 1788 VRI.NumGroups += 1; 1789 VRI.FirstGroupStartIdx = std::min(VRI.FirstGroupStartIdx, BG.StartIdx); 1790 } 1791 1792 // Now that we've collected the various ValueRotInfo instances, we need to 1793 // sort them. 1794 ValueRotsVec.clear(); 1795 for (auto &I : ValueRots) { 1796 ValueRotsVec.push_back(I.second); 1797 } 1798 llvm::sort(ValueRotsVec); 1799 } 1800 1801 // In 64-bit mode, rlwinm and friends have a rotation operator that 1802 // replicates the low-order 32 bits into the high-order 32-bits. The mask 1803 // indices of these instructions can only be in the lower 32 bits, so they 1804 // can only represent some 64-bit bit groups. However, when they can be used, 1805 // the 32-bit replication can be used to represent, as a single bit group, 1806 // otherwise separate bit groups. We'll convert to replicated-32-bit bit 1807 // groups when possible. Returns true if any of the bit groups were 1808 // converted. 1809 void assignRepl32BitGroups() { 1810 // If we have bits like this: 1811 // 1812 // Indices: 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 1813 // V bits: ... 7 6 5 4 3 2 1 0 31 30 29 28 27 26 25 24 1814 // Groups: | RLAmt = 8 | RLAmt = 40 | 1815 // 1816 // But, making use of a 32-bit operation that replicates the low-order 32 1817 // bits into the high-order 32 bits, this can be one bit group with a RLAmt 1818 // of 8. 1819 1820 auto IsAllLow32 = [this](BitGroup & BG) { 1821 if (BG.StartIdx <= BG.EndIdx) { 1822 for (unsigned i = BG.StartIdx; i <= BG.EndIdx; ++i) { 1823 if (!Bits[i].hasValue()) 1824 continue; 1825 if (Bits[i].getValueBitIndex() >= 32) 1826 return false; 1827 } 1828 } else { 1829 for (unsigned i = BG.StartIdx; i < Bits.size(); ++i) { 1830 if (!Bits[i].hasValue()) 1831 continue; 1832 if (Bits[i].getValueBitIndex() >= 32) 1833 return false; 1834 } 1835 for (unsigned i = 0; i <= BG.EndIdx; ++i) { 1836 if (!Bits[i].hasValue()) 1837 continue; 1838 if (Bits[i].getValueBitIndex() >= 32) 1839 return false; 1840 } 1841 } 1842 1843 return true; 1844 }; 1845 1846 for (auto &BG : BitGroups) { 1847 // If this bit group has RLAmt of 0 and will not be merged with 1848 // another bit group, we don't benefit from Repl32. We don't mark 1849 // such group to give more freedom for later instruction selection. 1850 if (BG.RLAmt == 0) { 1851 auto PotentiallyMerged = [this](BitGroup & BG) { 1852 for (auto &BG2 : BitGroups) 1853 if (&BG != &BG2 && BG.V == BG2.V && 1854 (BG2.RLAmt == 0 || BG2.RLAmt == 32)) 1855 return true; 1856 return false; 1857 }; 1858 if (!PotentiallyMerged(BG)) 1859 continue; 1860 } 1861 if (BG.StartIdx < 32 && BG.EndIdx < 32) { 1862 if (IsAllLow32(BG)) { 1863 if (BG.RLAmt >= 32) { 1864 BG.RLAmt -= 32; 1865 BG.Repl32CR = true; 1866 } 1867 1868 BG.Repl32 = true; 1869 1870 LLVM_DEBUG(dbgs() << "\t32-bit replicated bit group for " 1871 << BG.V.getNode() << " RLAmt = " << BG.RLAmt << " [" 1872 << BG.StartIdx << ", " << BG.EndIdx << "]\n"); 1873 } 1874 } 1875 } 1876 1877 // Now walk through the bit groups, consolidating where possible. 1878 for (auto I = BitGroups.begin(); I != BitGroups.end();) { 1879 // We might want to remove this bit group by merging it with the previous 1880 // group (which might be the ending group). 1881 auto IP = (I == BitGroups.begin()) ? 1882 std::prev(BitGroups.end()) : std::prev(I); 1883 if (I->Repl32 && IP->Repl32 && I->V == IP->V && I->RLAmt == IP->RLAmt && 1884 I->StartIdx == (IP->EndIdx + 1) % 64 && I != IP) { 1885 1886 LLVM_DEBUG(dbgs() << "\tcombining 32-bit replicated bit group for " 1887 << I->V.getNode() << " RLAmt = " << I->RLAmt << " [" 1888 << I->StartIdx << ", " << I->EndIdx 1889 << "] with group with range [" << IP->StartIdx << ", " 1890 << IP->EndIdx << "]\n"); 1891 1892 IP->EndIdx = I->EndIdx; 1893 IP->Repl32CR = IP->Repl32CR || I->Repl32CR; 1894 IP->Repl32Coalesced = true; 1895 I = BitGroups.erase(I); 1896 continue; 1897 } else { 1898 // There is a special case worth handling: If there is a single group 1899 // covering the entire upper 32 bits, and it can be merged with both 1900 // the next and previous groups (which might be the same group), then 1901 // do so. If it is the same group (so there will be only one group in 1902 // total), then we need to reverse the order of the range so that it 1903 // covers the entire 64 bits. 1904 if (I->StartIdx == 32 && I->EndIdx == 63) { 1905 assert(std::next(I) == BitGroups.end() && 1906 "bit group ends at index 63 but there is another?"); 1907 auto IN = BitGroups.begin(); 1908 1909 if (IP->Repl32 && IN->Repl32 && I->V == IP->V && I->V == IN->V && 1910 (I->RLAmt % 32) == IP->RLAmt && (I->RLAmt % 32) == IN->RLAmt && 1911 IP->EndIdx == 31 && IN->StartIdx == 0 && I != IP && 1912 IsAllLow32(*I)) { 1913 1914 LLVM_DEBUG(dbgs() << "\tcombining bit group for " << I->V.getNode() 1915 << " RLAmt = " << I->RLAmt << " [" << I->StartIdx 1916 << ", " << I->EndIdx 1917 << "] with 32-bit replicated groups with ranges [" 1918 << IP->StartIdx << ", " << IP->EndIdx << "] and [" 1919 << IN->StartIdx << ", " << IN->EndIdx << "]\n"); 1920 1921 if (IP == IN) { 1922 // There is only one other group; change it to cover the whole 1923 // range (backward, so that it can still be Repl32 but cover the 1924 // whole 64-bit range). 1925 IP->StartIdx = 31; 1926 IP->EndIdx = 30; 1927 IP->Repl32CR = IP->Repl32CR || I->RLAmt >= 32; 1928 IP->Repl32Coalesced = true; 1929 I = BitGroups.erase(I); 1930 } else { 1931 // There are two separate groups, one before this group and one 1932 // after us (at the beginning). We're going to remove this group, 1933 // but also the group at the very beginning. 1934 IP->EndIdx = IN->EndIdx; 1935 IP->Repl32CR = IP->Repl32CR || IN->Repl32CR || I->RLAmt >= 32; 1936 IP->Repl32Coalesced = true; 1937 I = BitGroups.erase(I); 1938 BitGroups.erase(BitGroups.begin()); 1939 } 1940 1941 // This must be the last group in the vector (and we might have 1942 // just invalidated the iterator above), so break here. 1943 break; 1944 } 1945 } 1946 } 1947 1948 ++I; 1949 } 1950 } 1951 1952 SDValue getI32Imm(unsigned Imm, const SDLoc &dl) { 1953 return CurDAG->getTargetConstant(Imm, dl, MVT::i32); 1954 } 1955 1956 uint64_t getZerosMask() { 1957 uint64_t Mask = 0; 1958 for (unsigned i = 0; i < Bits.size(); ++i) { 1959 if (Bits[i].hasValue()) 1960 continue; 1961 Mask |= (UINT64_C(1) << i); 1962 } 1963 1964 return ~Mask; 1965 } 1966 1967 // This method extends an input value to 64 bit if input is 32-bit integer. 1968 // While selecting instructions in BitPermutationSelector in 64-bit mode, 1969 // an input value can be a 32-bit integer if a ZERO_EXTEND node is included. 1970 // In such case, we extend it to 64 bit to be consistent with other values. 1971 SDValue ExtendToInt64(SDValue V, const SDLoc &dl) { 1972 if (V.getValueSizeInBits() == 64) 1973 return V; 1974 1975 assert(V.getValueSizeInBits() == 32); 1976 SDValue SubRegIdx = CurDAG->getTargetConstant(PPC::sub_32, dl, MVT::i32); 1977 SDValue ImDef = SDValue(CurDAG->getMachineNode(PPC::IMPLICIT_DEF, dl, 1978 MVT::i64), 0); 1979 SDValue ExtVal = SDValue(CurDAG->getMachineNode(PPC::INSERT_SUBREG, dl, 1980 MVT::i64, ImDef, V, 1981 SubRegIdx), 0); 1982 return ExtVal; 1983 } 1984 1985 SDValue TruncateToInt32(SDValue V, const SDLoc &dl) { 1986 if (V.getValueSizeInBits() == 32) 1987 return V; 1988 1989 assert(V.getValueSizeInBits() == 64); 1990 SDValue SubRegIdx = CurDAG->getTargetConstant(PPC::sub_32, dl, MVT::i32); 1991 SDValue SubVal = SDValue(CurDAG->getMachineNode(PPC::EXTRACT_SUBREG, dl, 1992 MVT::i32, V, SubRegIdx), 0); 1993 return SubVal; 1994 } 1995 1996 // Depending on the number of groups for a particular value, it might be 1997 // better to rotate, mask explicitly (using andi/andis), and then or the 1998 // result. Select this part of the result first. 1999 void SelectAndParts32(const SDLoc &dl, SDValue &Res, unsigned *InstCnt) { 2000 if (BPermRewriterNoMasking) 2001 return; 2002 2003 for (ValueRotInfo &VRI : ValueRotsVec) { 2004 unsigned Mask = 0; 2005 for (unsigned i = 0; i < Bits.size(); ++i) { 2006 if (!Bits[i].hasValue() || Bits[i].getValue() != VRI.V) 2007 continue; 2008 if (RLAmt[i] != VRI.RLAmt) 2009 continue; 2010 Mask |= (1u << i); 2011 } 2012 2013 // Compute the masks for andi/andis that would be necessary. 2014 unsigned ANDIMask = (Mask & UINT16_MAX), ANDISMask = Mask >> 16; 2015 assert((ANDIMask != 0 || ANDISMask != 0) && 2016 "No set bits in mask for value bit groups"); 2017 bool NeedsRotate = VRI.RLAmt != 0; 2018 2019 // We're trying to minimize the number of instructions. If we have one 2020 // group, using one of andi/andis can break even. If we have three 2021 // groups, we can use both andi and andis and break even (to use both 2022 // andi and andis we also need to or the results together). We need four 2023 // groups if we also need to rotate. To use andi/andis we need to do more 2024 // than break even because rotate-and-mask instructions tend to be easier 2025 // to schedule. 2026 2027 // FIXME: We've biased here against using andi/andis, which is right for 2028 // POWER cores, but not optimal everywhere. For example, on the A2, 2029 // andi/andis have single-cycle latency whereas the rotate-and-mask 2030 // instructions take two cycles, and it would be better to bias toward 2031 // andi/andis in break-even cases. 2032 2033 unsigned NumAndInsts = (unsigned) NeedsRotate + 2034 (unsigned) (ANDIMask != 0) + 2035 (unsigned) (ANDISMask != 0) + 2036 (unsigned) (ANDIMask != 0 && ANDISMask != 0) + 2037 (unsigned) (bool) Res; 2038 2039 LLVM_DEBUG(dbgs() << "\t\trotation groups for " << VRI.V.getNode() 2040 << " RL: " << VRI.RLAmt << ":" 2041 << "\n\t\t\tisel using masking: " << NumAndInsts 2042 << " using rotates: " << VRI.NumGroups << "\n"); 2043 2044 if (NumAndInsts >= VRI.NumGroups) 2045 continue; 2046 2047 LLVM_DEBUG(dbgs() << "\t\t\t\tusing masking\n"); 2048 2049 if (InstCnt) *InstCnt += NumAndInsts; 2050 2051 SDValue VRot; 2052 if (VRI.RLAmt) { 2053 SDValue Ops[] = 2054 { TruncateToInt32(VRI.V, dl), getI32Imm(VRI.RLAmt, dl), 2055 getI32Imm(0, dl), getI32Imm(31, dl) }; 2056 VRot = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, 2057 Ops), 0); 2058 } else { 2059 VRot = TruncateToInt32(VRI.V, dl); 2060 } 2061 2062 SDValue ANDIVal, ANDISVal; 2063 if (ANDIMask != 0) 2064 ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDI_rec, dl, MVT::i32, 2065 VRot, getI32Imm(ANDIMask, dl)), 2066 0); 2067 if (ANDISMask != 0) 2068 ANDISVal = 2069 SDValue(CurDAG->getMachineNode(PPC::ANDIS_rec, dl, MVT::i32, VRot, 2070 getI32Imm(ANDISMask, dl)), 2071 0); 2072 2073 SDValue TotalVal; 2074 if (!ANDIVal) 2075 TotalVal = ANDISVal; 2076 else if (!ANDISVal) 2077 TotalVal = ANDIVal; 2078 else 2079 TotalVal = SDValue(CurDAG->getMachineNode(PPC::OR, dl, MVT::i32, 2080 ANDIVal, ANDISVal), 0); 2081 2082 if (!Res) 2083 Res = TotalVal; 2084 else 2085 Res = SDValue(CurDAG->getMachineNode(PPC::OR, dl, MVT::i32, 2086 Res, TotalVal), 0); 2087 2088 // Now, remove all groups with this underlying value and rotation 2089 // factor. 2090 eraseMatchingBitGroups([VRI](const BitGroup &BG) { 2091 return BG.V == VRI.V && BG.RLAmt == VRI.RLAmt; 2092 }); 2093 } 2094 } 2095 2096 // Instruction selection for the 32-bit case. 2097 SDNode *Select32(SDNode *N, bool LateMask, unsigned *InstCnt) { 2098 SDLoc dl(N); 2099 SDValue Res; 2100 2101 if (InstCnt) *InstCnt = 0; 2102 2103 // Take care of cases that should use andi/andis first. 2104 SelectAndParts32(dl, Res, InstCnt); 2105 2106 // If we've not yet selected a 'starting' instruction, and we have no zeros 2107 // to fill in, select the (Value, RLAmt) with the highest priority (largest 2108 // number of groups), and start with this rotated value. 2109 if ((!NeedMask || LateMask) && !Res) { 2110 ValueRotInfo &VRI = ValueRotsVec[0]; 2111 if (VRI.RLAmt) { 2112 if (InstCnt) *InstCnt += 1; 2113 SDValue Ops[] = 2114 { TruncateToInt32(VRI.V, dl), getI32Imm(VRI.RLAmt, dl), 2115 getI32Imm(0, dl), getI32Imm(31, dl) }; 2116 Res = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 2117 0); 2118 } else { 2119 Res = TruncateToInt32(VRI.V, dl); 2120 } 2121 2122 // Now, remove all groups with this underlying value and rotation factor. 2123 eraseMatchingBitGroups([VRI](const BitGroup &BG) { 2124 return BG.V == VRI.V && BG.RLAmt == VRI.RLAmt; 2125 }); 2126 } 2127 2128 if (InstCnt) *InstCnt += BitGroups.size(); 2129 2130 // Insert the other groups (one at a time). 2131 for (auto &BG : BitGroups) { 2132 if (!Res) { 2133 SDValue Ops[] = 2134 { TruncateToInt32(BG.V, dl), getI32Imm(BG.RLAmt, dl), 2135 getI32Imm(Bits.size() - BG.EndIdx - 1, dl), 2136 getI32Imm(Bits.size() - BG.StartIdx - 1, dl) }; 2137 Res = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0); 2138 } else { 2139 SDValue Ops[] = 2140 { Res, TruncateToInt32(BG.V, dl), getI32Imm(BG.RLAmt, dl), 2141 getI32Imm(Bits.size() - BG.EndIdx - 1, dl), 2142 getI32Imm(Bits.size() - BG.StartIdx - 1, dl) }; 2143 Res = SDValue(CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops), 0); 2144 } 2145 } 2146 2147 if (LateMask) { 2148 unsigned Mask = (unsigned) getZerosMask(); 2149 2150 unsigned ANDIMask = (Mask & UINT16_MAX), ANDISMask = Mask >> 16; 2151 assert((ANDIMask != 0 || ANDISMask != 0) && 2152 "No set bits in zeros mask?"); 2153 2154 if (InstCnt) *InstCnt += (unsigned) (ANDIMask != 0) + 2155 (unsigned) (ANDISMask != 0) + 2156 (unsigned) (ANDIMask != 0 && ANDISMask != 0); 2157 2158 SDValue ANDIVal, ANDISVal; 2159 if (ANDIMask != 0) 2160 ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDI_rec, dl, MVT::i32, 2161 Res, getI32Imm(ANDIMask, dl)), 2162 0); 2163 if (ANDISMask != 0) 2164 ANDISVal = 2165 SDValue(CurDAG->getMachineNode(PPC::ANDIS_rec, dl, MVT::i32, Res, 2166 getI32Imm(ANDISMask, dl)), 2167 0); 2168 2169 if (!ANDIVal) 2170 Res = ANDISVal; 2171 else if (!ANDISVal) 2172 Res = ANDIVal; 2173 else 2174 Res = SDValue(CurDAG->getMachineNode(PPC::OR, dl, MVT::i32, 2175 ANDIVal, ANDISVal), 0); 2176 } 2177 2178 return Res.getNode(); 2179 } 2180 2181 unsigned SelectRotMask64Count(unsigned RLAmt, bool Repl32, 2182 unsigned MaskStart, unsigned MaskEnd, 2183 bool IsIns) { 2184 // In the notation used by the instructions, 'start' and 'end' are reversed 2185 // because bits are counted from high to low order. 2186 unsigned InstMaskStart = 64 - MaskEnd - 1, 2187 InstMaskEnd = 64 - MaskStart - 1; 2188 2189 if (Repl32) 2190 return 1; 2191 2192 if ((!IsIns && (InstMaskEnd == 63 || InstMaskStart == 0)) || 2193 InstMaskEnd == 63 - RLAmt) 2194 return 1; 2195 2196 return 2; 2197 } 2198 2199 // For 64-bit values, not all combinations of rotates and masks are 2200 // available. Produce one if it is available. 2201 SDValue SelectRotMask64(SDValue V, const SDLoc &dl, unsigned RLAmt, 2202 bool Repl32, unsigned MaskStart, unsigned MaskEnd, 2203 unsigned *InstCnt = nullptr) { 2204 // In the notation used by the instructions, 'start' and 'end' are reversed 2205 // because bits are counted from high to low order. 2206 unsigned InstMaskStart = 64 - MaskEnd - 1, 2207 InstMaskEnd = 64 - MaskStart - 1; 2208 2209 if (InstCnt) *InstCnt += 1; 2210 2211 if (Repl32) { 2212 // This rotation amount assumes that the lower 32 bits of the quantity 2213 // are replicated in the high 32 bits by the rotation operator (which is 2214 // done by rlwinm and friends). 2215 assert(InstMaskStart >= 32 && "Mask cannot start out of range"); 2216 assert(InstMaskEnd >= 32 && "Mask cannot end out of range"); 2217 SDValue Ops[] = 2218 { ExtendToInt64(V, dl), getI32Imm(RLAmt, dl), 2219 getI32Imm(InstMaskStart - 32, dl), getI32Imm(InstMaskEnd - 32, dl) }; 2220 return SDValue(CurDAG->getMachineNode(PPC::RLWINM8, dl, MVT::i64, 2221 Ops), 0); 2222 } 2223 2224 if (InstMaskEnd == 63) { 2225 SDValue Ops[] = 2226 { ExtendToInt64(V, dl), getI32Imm(RLAmt, dl), 2227 getI32Imm(InstMaskStart, dl) }; 2228 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, Ops), 0); 2229 } 2230 2231 if (InstMaskStart == 0) { 2232 SDValue Ops[] = 2233 { ExtendToInt64(V, dl), getI32Imm(RLAmt, dl), 2234 getI32Imm(InstMaskEnd, dl) }; 2235 return SDValue(CurDAG->getMachineNode(PPC::RLDICR, dl, MVT::i64, Ops), 0); 2236 } 2237 2238 if (InstMaskEnd == 63 - RLAmt) { 2239 SDValue Ops[] = 2240 { ExtendToInt64(V, dl), getI32Imm(RLAmt, dl), 2241 getI32Imm(InstMaskStart, dl) }; 2242 return SDValue(CurDAG->getMachineNode(PPC::RLDIC, dl, MVT::i64, Ops), 0); 2243 } 2244 2245 // We cannot do this with a single instruction, so we'll use two. The 2246 // problem is that we're not free to choose both a rotation amount and mask 2247 // start and end independently. We can choose an arbitrary mask start and 2248 // end, but then the rotation amount is fixed. Rotation, however, can be 2249 // inverted, and so by applying an "inverse" rotation first, we can get the 2250 // desired result. 2251 if (InstCnt) *InstCnt += 1; 2252 2253 // The rotation mask for the second instruction must be MaskStart. 2254 unsigned RLAmt2 = MaskStart; 2255 // The first instruction must rotate V so that the overall rotation amount 2256 // is RLAmt. 2257 unsigned RLAmt1 = (64 + RLAmt - RLAmt2) % 64; 2258 if (RLAmt1) 2259 V = SelectRotMask64(V, dl, RLAmt1, false, 0, 63); 2260 return SelectRotMask64(V, dl, RLAmt2, false, MaskStart, MaskEnd); 2261 } 2262 2263 // For 64-bit values, not all combinations of rotates and masks are 2264 // available. Produce a rotate-mask-and-insert if one is available. 2265 SDValue SelectRotMaskIns64(SDValue Base, SDValue V, const SDLoc &dl, 2266 unsigned RLAmt, bool Repl32, unsigned MaskStart, 2267 unsigned MaskEnd, unsigned *InstCnt = nullptr) { 2268 // In the notation used by the instructions, 'start' and 'end' are reversed 2269 // because bits are counted from high to low order. 2270 unsigned InstMaskStart = 64 - MaskEnd - 1, 2271 InstMaskEnd = 64 - MaskStart - 1; 2272 2273 if (InstCnt) *InstCnt += 1; 2274 2275 if (Repl32) { 2276 // This rotation amount assumes that the lower 32 bits of the quantity 2277 // are replicated in the high 32 bits by the rotation operator (which is 2278 // done by rlwinm and friends). 2279 assert(InstMaskStart >= 32 && "Mask cannot start out of range"); 2280 assert(InstMaskEnd >= 32 && "Mask cannot end out of range"); 2281 SDValue Ops[] = 2282 { ExtendToInt64(Base, dl), ExtendToInt64(V, dl), getI32Imm(RLAmt, dl), 2283 getI32Imm(InstMaskStart - 32, dl), getI32Imm(InstMaskEnd - 32, dl) }; 2284 return SDValue(CurDAG->getMachineNode(PPC::RLWIMI8, dl, MVT::i64, 2285 Ops), 0); 2286 } 2287 2288 if (InstMaskEnd == 63 - RLAmt) { 2289 SDValue Ops[] = 2290 { ExtendToInt64(Base, dl), ExtendToInt64(V, dl), getI32Imm(RLAmt, dl), 2291 getI32Imm(InstMaskStart, dl) }; 2292 return SDValue(CurDAG->getMachineNode(PPC::RLDIMI, dl, MVT::i64, Ops), 0); 2293 } 2294 2295 // We cannot do this with a single instruction, so we'll use two. The 2296 // problem is that we're not free to choose both a rotation amount and mask 2297 // start and end independently. We can choose an arbitrary mask start and 2298 // end, but then the rotation amount is fixed. Rotation, however, can be 2299 // inverted, and so by applying an "inverse" rotation first, we can get the 2300 // desired result. 2301 if (InstCnt) *InstCnt += 1; 2302 2303 // The rotation mask for the second instruction must be MaskStart. 2304 unsigned RLAmt2 = MaskStart; 2305 // The first instruction must rotate V so that the overall rotation amount 2306 // is RLAmt. 2307 unsigned RLAmt1 = (64 + RLAmt - RLAmt2) % 64; 2308 if (RLAmt1) 2309 V = SelectRotMask64(V, dl, RLAmt1, false, 0, 63); 2310 return SelectRotMaskIns64(Base, V, dl, RLAmt2, false, MaskStart, MaskEnd); 2311 } 2312 2313 void SelectAndParts64(const SDLoc &dl, SDValue &Res, unsigned *InstCnt) { 2314 if (BPermRewriterNoMasking) 2315 return; 2316 2317 // The idea here is the same as in the 32-bit version, but with additional 2318 // complications from the fact that Repl32 might be true. Because we 2319 // aggressively convert bit groups to Repl32 form (which, for small 2320 // rotation factors, involves no other change), and then coalesce, it might 2321 // be the case that a single 64-bit masking operation could handle both 2322 // some Repl32 groups and some non-Repl32 groups. If converting to Repl32 2323 // form allowed coalescing, then we must use a 32-bit rotaton in order to 2324 // completely capture the new combined bit group. 2325 2326 for (ValueRotInfo &VRI : ValueRotsVec) { 2327 uint64_t Mask = 0; 2328 2329 // We need to add to the mask all bits from the associated bit groups. 2330 // If Repl32 is false, we need to add bits from bit groups that have 2331 // Repl32 true, but are trivially convertable to Repl32 false. Such a 2332 // group is trivially convertable if it overlaps only with the lower 32 2333 // bits, and the group has not been coalesced. 2334 auto MatchingBG = [VRI](const BitGroup &BG) { 2335 if (VRI.V != BG.V) 2336 return false; 2337 2338 unsigned EffRLAmt = BG.RLAmt; 2339 if (!VRI.Repl32 && BG.Repl32) { 2340 if (BG.StartIdx < 32 && BG.EndIdx < 32 && BG.StartIdx <= BG.EndIdx && 2341 !BG.Repl32Coalesced) { 2342 if (BG.Repl32CR) 2343 EffRLAmt += 32; 2344 } else { 2345 return false; 2346 } 2347 } else if (VRI.Repl32 != BG.Repl32) { 2348 return false; 2349 } 2350 2351 return VRI.RLAmt == EffRLAmt; 2352 }; 2353 2354 for (auto &BG : BitGroups) { 2355 if (!MatchingBG(BG)) 2356 continue; 2357 2358 if (BG.StartIdx <= BG.EndIdx) { 2359 for (unsigned i = BG.StartIdx; i <= BG.EndIdx; ++i) 2360 Mask |= (UINT64_C(1) << i); 2361 } else { 2362 for (unsigned i = BG.StartIdx; i < Bits.size(); ++i) 2363 Mask |= (UINT64_C(1) << i); 2364 for (unsigned i = 0; i <= BG.EndIdx; ++i) 2365 Mask |= (UINT64_C(1) << i); 2366 } 2367 } 2368 2369 // We can use the 32-bit andi/andis technique if the mask does not 2370 // require any higher-order bits. This can save an instruction compared 2371 // to always using the general 64-bit technique. 2372 bool Use32BitInsts = isUInt<32>(Mask); 2373 // Compute the masks for andi/andis that would be necessary. 2374 unsigned ANDIMask = (Mask & UINT16_MAX), 2375 ANDISMask = (Mask >> 16) & UINT16_MAX; 2376 2377 bool NeedsRotate = VRI.RLAmt || (VRI.Repl32 && !isUInt<32>(Mask)); 2378 2379 unsigned NumAndInsts = (unsigned) NeedsRotate + 2380 (unsigned) (bool) Res; 2381 unsigned NumOfSelectInsts = 0; 2382 selectI64Imm(CurDAG, dl, Mask, &NumOfSelectInsts); 2383 assert(NumOfSelectInsts > 0 && "Failed to select an i64 constant."); 2384 if (Use32BitInsts) 2385 NumAndInsts += (unsigned) (ANDIMask != 0) + (unsigned) (ANDISMask != 0) + 2386 (unsigned) (ANDIMask != 0 && ANDISMask != 0); 2387 else 2388 NumAndInsts += NumOfSelectInsts + /* and */ 1; 2389 2390 unsigned NumRLInsts = 0; 2391 bool FirstBG = true; 2392 bool MoreBG = false; 2393 for (auto &BG : BitGroups) { 2394 if (!MatchingBG(BG)) { 2395 MoreBG = true; 2396 continue; 2397 } 2398 NumRLInsts += 2399 SelectRotMask64Count(BG.RLAmt, BG.Repl32, BG.StartIdx, BG.EndIdx, 2400 !FirstBG); 2401 FirstBG = false; 2402 } 2403 2404 LLVM_DEBUG(dbgs() << "\t\trotation groups for " << VRI.V.getNode() 2405 << " RL: " << VRI.RLAmt << (VRI.Repl32 ? " (32):" : ":") 2406 << "\n\t\t\tisel using masking: " << NumAndInsts 2407 << " using rotates: " << NumRLInsts << "\n"); 2408 2409 // When we'd use andi/andis, we bias toward using the rotates (andi only 2410 // has a record form, and is cracked on POWER cores). However, when using 2411 // general 64-bit constant formation, bias toward the constant form, 2412 // because that exposes more opportunities for CSE. 2413 if (NumAndInsts > NumRLInsts) 2414 continue; 2415 // When merging multiple bit groups, instruction or is used. 2416 // But when rotate is used, rldimi can inert the rotated value into any 2417 // register, so instruction or can be avoided. 2418 if ((Use32BitInsts || MoreBG) && NumAndInsts == NumRLInsts) 2419 continue; 2420 2421 LLVM_DEBUG(dbgs() << "\t\t\t\tusing masking\n"); 2422 2423 if (InstCnt) *InstCnt += NumAndInsts; 2424 2425 SDValue VRot; 2426 // We actually need to generate a rotation if we have a non-zero rotation 2427 // factor or, in the Repl32 case, if we care about any of the 2428 // higher-order replicated bits. In the latter case, we generate a mask 2429 // backward so that it actually includes the entire 64 bits. 2430 if (VRI.RLAmt || (VRI.Repl32 && !isUInt<32>(Mask))) 2431 VRot = SelectRotMask64(VRI.V, dl, VRI.RLAmt, VRI.Repl32, 2432 VRI.Repl32 ? 31 : 0, VRI.Repl32 ? 30 : 63); 2433 else 2434 VRot = VRI.V; 2435 2436 SDValue TotalVal; 2437 if (Use32BitInsts) { 2438 assert((ANDIMask != 0 || ANDISMask != 0) && 2439 "No set bits in mask when using 32-bit ands for 64-bit value"); 2440 2441 SDValue ANDIVal, ANDISVal; 2442 if (ANDIMask != 0) 2443 ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDI8_rec, dl, MVT::i64, 2444 ExtendToInt64(VRot, dl), 2445 getI32Imm(ANDIMask, dl)), 2446 0); 2447 if (ANDISMask != 0) 2448 ANDISVal = 2449 SDValue(CurDAG->getMachineNode(PPC::ANDIS8_rec, dl, MVT::i64, 2450 ExtendToInt64(VRot, dl), 2451 getI32Imm(ANDISMask, dl)), 2452 0); 2453 2454 if (!ANDIVal) 2455 TotalVal = ANDISVal; 2456 else if (!ANDISVal) 2457 TotalVal = ANDIVal; 2458 else 2459 TotalVal = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64, 2460 ExtendToInt64(ANDIVal, dl), ANDISVal), 0); 2461 } else { 2462 TotalVal = SDValue(selectI64Imm(CurDAG, dl, Mask), 0); 2463 TotalVal = 2464 SDValue(CurDAG->getMachineNode(PPC::AND8, dl, MVT::i64, 2465 ExtendToInt64(VRot, dl), TotalVal), 2466 0); 2467 } 2468 2469 if (!Res) 2470 Res = TotalVal; 2471 else 2472 Res = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64, 2473 ExtendToInt64(Res, dl), TotalVal), 2474 0); 2475 2476 // Now, remove all groups with this underlying value and rotation 2477 // factor. 2478 eraseMatchingBitGroups(MatchingBG); 2479 } 2480 } 2481 2482 // Instruction selection for the 64-bit case. 2483 SDNode *Select64(SDNode *N, bool LateMask, unsigned *InstCnt) { 2484 SDLoc dl(N); 2485 SDValue Res; 2486 2487 if (InstCnt) *InstCnt = 0; 2488 2489 // Take care of cases that should use andi/andis first. 2490 SelectAndParts64(dl, Res, InstCnt); 2491 2492 // If we've not yet selected a 'starting' instruction, and we have no zeros 2493 // to fill in, select the (Value, RLAmt) with the highest priority (largest 2494 // number of groups), and start with this rotated value. 2495 if ((!NeedMask || LateMask) && !Res) { 2496 // If we have both Repl32 groups and non-Repl32 groups, the non-Repl32 2497 // groups will come first, and so the VRI representing the largest number 2498 // of groups might not be first (it might be the first Repl32 groups). 2499 unsigned MaxGroupsIdx = 0; 2500 if (!ValueRotsVec[0].Repl32) { 2501 for (unsigned i = 0, ie = ValueRotsVec.size(); i < ie; ++i) 2502 if (ValueRotsVec[i].Repl32) { 2503 if (ValueRotsVec[i].NumGroups > ValueRotsVec[0].NumGroups) 2504 MaxGroupsIdx = i; 2505 break; 2506 } 2507 } 2508 2509 ValueRotInfo &VRI = ValueRotsVec[MaxGroupsIdx]; 2510 bool NeedsRotate = false; 2511 if (VRI.RLAmt) { 2512 NeedsRotate = true; 2513 } else if (VRI.Repl32) { 2514 for (auto &BG : BitGroups) { 2515 if (BG.V != VRI.V || BG.RLAmt != VRI.RLAmt || 2516 BG.Repl32 != VRI.Repl32) 2517 continue; 2518 2519 // We don't need a rotate if the bit group is confined to the lower 2520 // 32 bits. 2521 if (BG.StartIdx < 32 && BG.EndIdx < 32 && BG.StartIdx < BG.EndIdx) 2522 continue; 2523 2524 NeedsRotate = true; 2525 break; 2526 } 2527 } 2528 2529 if (NeedsRotate) 2530 Res = SelectRotMask64(VRI.V, dl, VRI.RLAmt, VRI.Repl32, 2531 VRI.Repl32 ? 31 : 0, VRI.Repl32 ? 30 : 63, 2532 InstCnt); 2533 else 2534 Res = VRI.V; 2535 2536 // Now, remove all groups with this underlying value and rotation factor. 2537 if (Res) 2538 eraseMatchingBitGroups([VRI](const BitGroup &BG) { 2539 return BG.V == VRI.V && BG.RLAmt == VRI.RLAmt && 2540 BG.Repl32 == VRI.Repl32; 2541 }); 2542 } 2543 2544 // Because 64-bit rotates are more flexible than inserts, we might have a 2545 // preference regarding which one we do first (to save one instruction). 2546 if (!Res) 2547 for (auto I = BitGroups.begin(), IE = BitGroups.end(); I != IE; ++I) { 2548 if (SelectRotMask64Count(I->RLAmt, I->Repl32, I->StartIdx, I->EndIdx, 2549 false) < 2550 SelectRotMask64Count(I->RLAmt, I->Repl32, I->StartIdx, I->EndIdx, 2551 true)) { 2552 if (I != BitGroups.begin()) { 2553 BitGroup BG = *I; 2554 BitGroups.erase(I); 2555 BitGroups.insert(BitGroups.begin(), BG); 2556 } 2557 2558 break; 2559 } 2560 } 2561 2562 // Insert the other groups (one at a time). 2563 for (auto &BG : BitGroups) { 2564 if (!Res) 2565 Res = SelectRotMask64(BG.V, dl, BG.RLAmt, BG.Repl32, BG.StartIdx, 2566 BG.EndIdx, InstCnt); 2567 else 2568 Res = SelectRotMaskIns64(Res, BG.V, dl, BG.RLAmt, BG.Repl32, 2569 BG.StartIdx, BG.EndIdx, InstCnt); 2570 } 2571 2572 if (LateMask) { 2573 uint64_t Mask = getZerosMask(); 2574 2575 // We can use the 32-bit andi/andis technique if the mask does not 2576 // require any higher-order bits. This can save an instruction compared 2577 // to always using the general 64-bit technique. 2578 bool Use32BitInsts = isUInt<32>(Mask); 2579 // Compute the masks for andi/andis that would be necessary. 2580 unsigned ANDIMask = (Mask & UINT16_MAX), 2581 ANDISMask = (Mask >> 16) & UINT16_MAX; 2582 2583 if (Use32BitInsts) { 2584 assert((ANDIMask != 0 || ANDISMask != 0) && 2585 "No set bits in mask when using 32-bit ands for 64-bit value"); 2586 2587 if (InstCnt) *InstCnt += (unsigned) (ANDIMask != 0) + 2588 (unsigned) (ANDISMask != 0) + 2589 (unsigned) (ANDIMask != 0 && ANDISMask != 0); 2590 2591 SDValue ANDIVal, ANDISVal; 2592 if (ANDIMask != 0) 2593 ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDI8_rec, dl, MVT::i64, 2594 ExtendToInt64(Res, dl), 2595 getI32Imm(ANDIMask, dl)), 2596 0); 2597 if (ANDISMask != 0) 2598 ANDISVal = 2599 SDValue(CurDAG->getMachineNode(PPC::ANDIS8_rec, dl, MVT::i64, 2600 ExtendToInt64(Res, dl), 2601 getI32Imm(ANDISMask, dl)), 2602 0); 2603 2604 if (!ANDIVal) 2605 Res = ANDISVal; 2606 else if (!ANDISVal) 2607 Res = ANDIVal; 2608 else 2609 Res = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64, 2610 ExtendToInt64(ANDIVal, dl), ANDISVal), 0); 2611 } else { 2612 unsigned NumOfSelectInsts = 0; 2613 SDValue MaskVal = 2614 SDValue(selectI64Imm(CurDAG, dl, Mask, &NumOfSelectInsts), 0); 2615 Res = SDValue(CurDAG->getMachineNode(PPC::AND8, dl, MVT::i64, 2616 ExtendToInt64(Res, dl), MaskVal), 2617 0); 2618 if (InstCnt) 2619 *InstCnt += NumOfSelectInsts + /* and */ 1; 2620 } 2621 } 2622 2623 return Res.getNode(); 2624 } 2625 2626 SDNode *Select(SDNode *N, bool LateMask, unsigned *InstCnt = nullptr) { 2627 // Fill in BitGroups. 2628 collectBitGroups(LateMask); 2629 if (BitGroups.empty()) 2630 return nullptr; 2631 2632 // For 64-bit values, figure out when we can use 32-bit instructions. 2633 if (Bits.size() == 64) 2634 assignRepl32BitGroups(); 2635 2636 // Fill in ValueRotsVec. 2637 collectValueRotInfo(); 2638 2639 if (Bits.size() == 32) { 2640 return Select32(N, LateMask, InstCnt); 2641 } else { 2642 assert(Bits.size() == 64 && "Not 64 bits here?"); 2643 return Select64(N, LateMask, InstCnt); 2644 } 2645 2646 return nullptr; 2647 } 2648 2649 void eraseMatchingBitGroups(function_ref<bool(const BitGroup &)> F) { 2650 erase_if(BitGroups, F); 2651 } 2652 2653 SmallVector<ValueBit, 64> Bits; 2654 2655 bool NeedMask = false; 2656 SmallVector<unsigned, 64> RLAmt; 2657 2658 SmallVector<BitGroup, 16> BitGroups; 2659 2660 DenseMap<std::pair<SDValue, unsigned>, ValueRotInfo> ValueRots; 2661 SmallVector<ValueRotInfo, 16> ValueRotsVec; 2662 2663 SelectionDAG *CurDAG = nullptr; 2664 2665 public: 2666 BitPermutationSelector(SelectionDAG *DAG) 2667 : CurDAG(DAG) {} 2668 2669 // Here we try to match complex bit permutations into a set of 2670 // rotate-and-shift/shift/and/or instructions, using a set of heuristics 2671 // known to produce optimal code for common cases (like i32 byte swapping). 2672 SDNode *Select(SDNode *N) { 2673 Memoizer.clear(); 2674 auto Result = 2675 getValueBits(SDValue(N, 0), N->getValueType(0).getSizeInBits()); 2676 if (!Result.first) 2677 return nullptr; 2678 Bits = std::move(*Result.second); 2679 2680 LLVM_DEBUG(dbgs() << "Considering bit-permutation-based instruction" 2681 " selection for: "); 2682 LLVM_DEBUG(N->dump(CurDAG)); 2683 2684 // Fill it RLAmt and set NeedMask. 2685 computeRotationAmounts(); 2686 2687 if (!NeedMask) 2688 return Select(N, false); 2689 2690 // We currently have two techniques for handling results with zeros: early 2691 // masking (the default) and late masking. Late masking is sometimes more 2692 // efficient, but because the structure of the bit groups is different, it 2693 // is hard to tell without generating both and comparing the results. With 2694 // late masking, we ignore zeros in the resulting value when inserting each 2695 // set of bit groups, and then mask in the zeros at the end. With early 2696 // masking, we only insert the non-zero parts of the result at every step. 2697 2698 unsigned InstCnt = 0, InstCntLateMask = 0; 2699 LLVM_DEBUG(dbgs() << "\tEarly masking:\n"); 2700 SDNode *RN = Select(N, false, &InstCnt); 2701 LLVM_DEBUG(dbgs() << "\t\tisel would use " << InstCnt << " instructions\n"); 2702 2703 LLVM_DEBUG(dbgs() << "\tLate masking:\n"); 2704 SDNode *RNLM = Select(N, true, &InstCntLateMask); 2705 LLVM_DEBUG(dbgs() << "\t\tisel would use " << InstCntLateMask 2706 << " instructions\n"); 2707 2708 if (InstCnt <= InstCntLateMask) { 2709 LLVM_DEBUG(dbgs() << "\tUsing early-masking for isel\n"); 2710 return RN; 2711 } 2712 2713 LLVM_DEBUG(dbgs() << "\tUsing late-masking for isel\n"); 2714 return RNLM; 2715 } 2716 }; 2717 2718 class IntegerCompareEliminator { 2719 SelectionDAG *CurDAG; 2720 PPCDAGToDAGISel *S; 2721 // Conversion type for interpreting results of a 32-bit instruction as 2722 // a 64-bit value or vice versa. 2723 enum ExtOrTruncConversion { Ext, Trunc }; 2724 2725 // Modifiers to guide how an ISD::SETCC node's result is to be computed 2726 // in a GPR. 2727 // ZExtOrig - use the original condition code, zero-extend value 2728 // ZExtInvert - invert the condition code, zero-extend value 2729 // SExtOrig - use the original condition code, sign-extend value 2730 // SExtInvert - invert the condition code, sign-extend value 2731 enum SetccInGPROpts { ZExtOrig, ZExtInvert, SExtOrig, SExtInvert }; 2732 2733 // Comparisons against zero to emit GPR code sequences for. Each of these 2734 // sequences may need to be emitted for two or more equivalent patterns. 2735 // For example (a >= 0) == (a > -1). The direction of the comparison (</>) 2736 // matters as well as the extension type: sext (-1/0), zext (1/0). 2737 // GEZExt - (zext (LHS >= 0)) 2738 // GESExt - (sext (LHS >= 0)) 2739 // LEZExt - (zext (LHS <= 0)) 2740 // LESExt - (sext (LHS <= 0)) 2741 enum ZeroCompare { GEZExt, GESExt, LEZExt, LESExt }; 2742 2743 SDNode *tryEXTEND(SDNode *N); 2744 SDNode *tryLogicOpOfCompares(SDNode *N); 2745 SDValue computeLogicOpInGPR(SDValue LogicOp); 2746 SDValue signExtendInputIfNeeded(SDValue Input); 2747 SDValue zeroExtendInputIfNeeded(SDValue Input); 2748 SDValue addExtOrTrunc(SDValue NatWidthRes, ExtOrTruncConversion Conv); 2749 SDValue getCompoundZeroComparisonInGPR(SDValue LHS, SDLoc dl, 2750 ZeroCompare CmpTy); 2751 SDValue get32BitZExtCompare(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2752 int64_t RHSValue, SDLoc dl); 2753 SDValue get32BitSExtCompare(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2754 int64_t RHSValue, SDLoc dl); 2755 SDValue get64BitZExtCompare(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2756 int64_t RHSValue, SDLoc dl); 2757 SDValue get64BitSExtCompare(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2758 int64_t RHSValue, SDLoc dl); 2759 SDValue getSETCCInGPR(SDValue Compare, SetccInGPROpts ConvOpts); 2760 2761 public: 2762 IntegerCompareEliminator(SelectionDAG *DAG, 2763 PPCDAGToDAGISel *Sel) : CurDAG(DAG), S(Sel) { 2764 assert(CurDAG->getTargetLoweringInfo() 2765 .getPointerTy(CurDAG->getDataLayout()).getSizeInBits() == 64 && 2766 "Only expecting to use this on 64 bit targets."); 2767 } 2768 SDNode *Select(SDNode *N) { 2769 if (CmpInGPR == ICGPR_None) 2770 return nullptr; 2771 switch (N->getOpcode()) { 2772 default: break; 2773 case ISD::ZERO_EXTEND: 2774 if (CmpInGPR == ICGPR_Sext || CmpInGPR == ICGPR_SextI32 || 2775 CmpInGPR == ICGPR_SextI64) 2776 return nullptr; 2777 LLVM_FALLTHROUGH; 2778 case ISD::SIGN_EXTEND: 2779 if (CmpInGPR == ICGPR_Zext || CmpInGPR == ICGPR_ZextI32 || 2780 CmpInGPR == ICGPR_ZextI64) 2781 return nullptr; 2782 return tryEXTEND(N); 2783 case ISD::AND: 2784 case ISD::OR: 2785 case ISD::XOR: 2786 return tryLogicOpOfCompares(N); 2787 } 2788 return nullptr; 2789 } 2790 }; 2791 2792 static bool isLogicOp(unsigned Opc) { 2793 return Opc == ISD::AND || Opc == ISD::OR || Opc == ISD::XOR; 2794 } 2795 // The obvious case for wanting to keep the value in a GPR. Namely, the 2796 // result of the comparison is actually needed in a GPR. 2797 SDNode *IntegerCompareEliminator::tryEXTEND(SDNode *N) { 2798 assert((N->getOpcode() == ISD::ZERO_EXTEND || 2799 N->getOpcode() == ISD::SIGN_EXTEND) && 2800 "Expecting a zero/sign extend node!"); 2801 SDValue WideRes; 2802 // If we are zero-extending the result of a logical operation on i1 2803 // values, we can keep the values in GPRs. 2804 if (isLogicOp(N->getOperand(0).getOpcode()) && 2805 N->getOperand(0).getValueType() == MVT::i1 && 2806 N->getOpcode() == ISD::ZERO_EXTEND) 2807 WideRes = computeLogicOpInGPR(N->getOperand(0)); 2808 else if (N->getOperand(0).getOpcode() != ISD::SETCC) 2809 return nullptr; 2810 else 2811 WideRes = 2812 getSETCCInGPR(N->getOperand(0), 2813 N->getOpcode() == ISD::SIGN_EXTEND ? 2814 SetccInGPROpts::SExtOrig : SetccInGPROpts::ZExtOrig); 2815 2816 if (!WideRes) 2817 return nullptr; 2818 2819 SDLoc dl(N); 2820 bool Input32Bit = WideRes.getValueType() == MVT::i32; 2821 bool Output32Bit = N->getValueType(0) == MVT::i32; 2822 2823 NumSextSetcc += N->getOpcode() == ISD::SIGN_EXTEND ? 1 : 0; 2824 NumZextSetcc += N->getOpcode() == ISD::SIGN_EXTEND ? 0 : 1; 2825 2826 SDValue ConvOp = WideRes; 2827 if (Input32Bit != Output32Bit) 2828 ConvOp = addExtOrTrunc(WideRes, Input32Bit ? ExtOrTruncConversion::Ext : 2829 ExtOrTruncConversion::Trunc); 2830 return ConvOp.getNode(); 2831 } 2832 2833 // Attempt to perform logical operations on the results of comparisons while 2834 // keeping the values in GPRs. Without doing so, these would end up being 2835 // lowered to CR-logical operations which suffer from significant latency and 2836 // low ILP. 2837 SDNode *IntegerCompareEliminator::tryLogicOpOfCompares(SDNode *N) { 2838 if (N->getValueType(0) != MVT::i1) 2839 return nullptr; 2840 assert(isLogicOp(N->getOpcode()) && 2841 "Expected a logic operation on setcc results."); 2842 SDValue LoweredLogical = computeLogicOpInGPR(SDValue(N, 0)); 2843 if (!LoweredLogical) 2844 return nullptr; 2845 2846 SDLoc dl(N); 2847 bool IsBitwiseNegate = LoweredLogical.getMachineOpcode() == PPC::XORI8; 2848 unsigned SubRegToExtract = IsBitwiseNegate ? PPC::sub_eq : PPC::sub_gt; 2849 SDValue CR0Reg = CurDAG->getRegister(PPC::CR0, MVT::i32); 2850 SDValue LHS = LoweredLogical.getOperand(0); 2851 SDValue RHS = LoweredLogical.getOperand(1); 2852 SDValue WideOp; 2853 SDValue OpToConvToRecForm; 2854 2855 // Look through any 32-bit to 64-bit implicit extend nodes to find the 2856 // opcode that is input to the XORI. 2857 if (IsBitwiseNegate && 2858 LoweredLogical.getOperand(0).getMachineOpcode() == PPC::INSERT_SUBREG) 2859 OpToConvToRecForm = LoweredLogical.getOperand(0).getOperand(1); 2860 else if (IsBitwiseNegate) 2861 // If the input to the XORI isn't an extension, that's what we're after. 2862 OpToConvToRecForm = LoweredLogical.getOperand(0); 2863 else 2864 // If this is not an XORI, it is a reg-reg logical op and we can convert 2865 // it to record-form. 2866 OpToConvToRecForm = LoweredLogical; 2867 2868 // Get the record-form version of the node we're looking to use to get the 2869 // CR result from. 2870 uint16_t NonRecOpc = OpToConvToRecForm.getMachineOpcode(); 2871 int NewOpc = PPCInstrInfo::getRecordFormOpcode(NonRecOpc); 2872 2873 // Convert the right node to record-form. This is either the logical we're 2874 // looking at or it is the input node to the negation (if we're looking at 2875 // a bitwise negation). 2876 if (NewOpc != -1 && IsBitwiseNegate) { 2877 // The input to the XORI has a record-form. Use it. 2878 assert(LoweredLogical.getConstantOperandVal(1) == 1 && 2879 "Expected a PPC::XORI8 only for bitwise negation."); 2880 // Emit the record-form instruction. 2881 std::vector<SDValue> Ops; 2882 for (int i = 0, e = OpToConvToRecForm.getNumOperands(); i < e; i++) 2883 Ops.push_back(OpToConvToRecForm.getOperand(i)); 2884 2885 WideOp = 2886 SDValue(CurDAG->getMachineNode(NewOpc, dl, 2887 OpToConvToRecForm.getValueType(), 2888 MVT::Glue, Ops), 0); 2889 } else { 2890 assert((NewOpc != -1 || !IsBitwiseNegate) && 2891 "No record form available for AND8/OR8/XOR8?"); 2892 WideOp = 2893 SDValue(CurDAG->getMachineNode(NewOpc == -1 ? PPC::ANDI8_rec : NewOpc, 2894 dl, MVT::i64, MVT::Glue, LHS, RHS), 2895 0); 2896 } 2897 2898 // Select this node to a single bit from CR0 set by the record-form node 2899 // just created. For bitwise negation, use the EQ bit which is the equivalent 2900 // of negating the result (i.e. it is a bit set when the result of the 2901 // operation is zero). 2902 SDValue SRIdxVal = 2903 CurDAG->getTargetConstant(SubRegToExtract, dl, MVT::i32); 2904 SDValue CRBit = 2905 SDValue(CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, 2906 MVT::i1, CR0Reg, SRIdxVal, 2907 WideOp.getValue(1)), 0); 2908 return CRBit.getNode(); 2909 } 2910 2911 // Lower a logical operation on i1 values into a GPR sequence if possible. 2912 // The result can be kept in a GPR if requested. 2913 // Three types of inputs can be handled: 2914 // - SETCC 2915 // - TRUNCATE 2916 // - Logical operation (AND/OR/XOR) 2917 // There is also a special case that is handled (namely a complement operation 2918 // achieved with xor %a, -1). 2919 SDValue IntegerCompareEliminator::computeLogicOpInGPR(SDValue LogicOp) { 2920 assert(isLogicOp(LogicOp.getOpcode()) && 2921 "Can only handle logic operations here."); 2922 assert(LogicOp.getValueType() == MVT::i1 && 2923 "Can only handle logic operations on i1 values here."); 2924 SDLoc dl(LogicOp); 2925 SDValue LHS, RHS; 2926 2927 // Special case: xor %a, -1 2928 bool IsBitwiseNegation = isBitwiseNot(LogicOp); 2929 2930 // Produces a GPR sequence for each operand of the binary logic operation. 2931 // For SETCC, it produces the respective comparison, for TRUNCATE it truncates 2932 // the value in a GPR and for logic operations, it will recursively produce 2933 // a GPR sequence for the operation. 2934 auto getLogicOperand = [&] (SDValue Operand) -> SDValue { 2935 unsigned OperandOpcode = Operand.getOpcode(); 2936 if (OperandOpcode == ISD::SETCC) 2937 return getSETCCInGPR(Operand, SetccInGPROpts::ZExtOrig); 2938 else if (OperandOpcode == ISD::TRUNCATE) { 2939 SDValue InputOp = Operand.getOperand(0); 2940 EVT InVT = InputOp.getValueType(); 2941 return SDValue(CurDAG->getMachineNode(InVT == MVT::i32 ? PPC::RLDICL_32 : 2942 PPC::RLDICL, dl, InVT, InputOp, 2943 S->getI64Imm(0, dl), 2944 S->getI64Imm(63, dl)), 0); 2945 } else if (isLogicOp(OperandOpcode)) 2946 return computeLogicOpInGPR(Operand); 2947 return SDValue(); 2948 }; 2949 LHS = getLogicOperand(LogicOp.getOperand(0)); 2950 RHS = getLogicOperand(LogicOp.getOperand(1)); 2951 2952 // If a GPR sequence can't be produced for the LHS we can't proceed. 2953 // Not producing a GPR sequence for the RHS is only a problem if this isn't 2954 // a bitwise negation operation. 2955 if (!LHS || (!RHS && !IsBitwiseNegation)) 2956 return SDValue(); 2957 2958 NumLogicOpsOnComparison++; 2959 2960 // We will use the inputs as 64-bit values. 2961 if (LHS.getValueType() == MVT::i32) 2962 LHS = addExtOrTrunc(LHS, ExtOrTruncConversion::Ext); 2963 if (!IsBitwiseNegation && RHS.getValueType() == MVT::i32) 2964 RHS = addExtOrTrunc(RHS, ExtOrTruncConversion::Ext); 2965 2966 unsigned NewOpc; 2967 switch (LogicOp.getOpcode()) { 2968 default: llvm_unreachable("Unknown logic operation."); 2969 case ISD::AND: NewOpc = PPC::AND8; break; 2970 case ISD::OR: NewOpc = PPC::OR8; break; 2971 case ISD::XOR: NewOpc = PPC::XOR8; break; 2972 } 2973 2974 if (IsBitwiseNegation) { 2975 RHS = S->getI64Imm(1, dl); 2976 NewOpc = PPC::XORI8; 2977 } 2978 2979 return SDValue(CurDAG->getMachineNode(NewOpc, dl, MVT::i64, LHS, RHS), 0); 2980 2981 } 2982 2983 /// If the value isn't guaranteed to be sign-extended to 64-bits, extend it. 2984 /// Otherwise just reinterpret it as a 64-bit value. 2985 /// Useful when emitting comparison code for 32-bit values without using 2986 /// the compare instruction (which only considers the lower 32-bits). 2987 SDValue IntegerCompareEliminator::signExtendInputIfNeeded(SDValue Input) { 2988 assert(Input.getValueType() == MVT::i32 && 2989 "Can only sign-extend 32-bit values here."); 2990 unsigned Opc = Input.getOpcode(); 2991 2992 // The value was sign extended and then truncated to 32-bits. No need to 2993 // sign extend it again. 2994 if (Opc == ISD::TRUNCATE && 2995 (Input.getOperand(0).getOpcode() == ISD::AssertSext || 2996 Input.getOperand(0).getOpcode() == ISD::SIGN_EXTEND)) 2997 return addExtOrTrunc(Input, ExtOrTruncConversion::Ext); 2998 2999 LoadSDNode *InputLoad = dyn_cast<LoadSDNode>(Input); 3000 // The input is a sign-extending load. All ppc sign-extending loads 3001 // sign-extend to the full 64-bits. 3002 if (InputLoad && InputLoad->getExtensionType() == ISD::SEXTLOAD) 3003 return addExtOrTrunc(Input, ExtOrTruncConversion::Ext); 3004 3005 ConstantSDNode *InputConst = dyn_cast<ConstantSDNode>(Input); 3006 // We don't sign-extend constants. 3007 if (InputConst) 3008 return addExtOrTrunc(Input, ExtOrTruncConversion::Ext); 3009 3010 SDLoc dl(Input); 3011 SignExtensionsAdded++; 3012 return SDValue(CurDAG->getMachineNode(PPC::EXTSW_32_64, dl, 3013 MVT::i64, Input), 0); 3014 } 3015 3016 /// If the value isn't guaranteed to be zero-extended to 64-bits, extend it. 3017 /// Otherwise just reinterpret it as a 64-bit value. 3018 /// Useful when emitting comparison code for 32-bit values without using 3019 /// the compare instruction (which only considers the lower 32-bits). 3020 SDValue IntegerCompareEliminator::zeroExtendInputIfNeeded(SDValue Input) { 3021 assert(Input.getValueType() == MVT::i32 && 3022 "Can only zero-extend 32-bit values here."); 3023 unsigned Opc = Input.getOpcode(); 3024 3025 // The only condition under which we can omit the actual extend instruction: 3026 // - The value is a positive constant 3027 // - The value comes from a load that isn't a sign-extending load 3028 // An ISD::TRUNCATE needs to be zero-extended unless it is fed by a zext. 3029 bool IsTruncateOfZExt = Opc == ISD::TRUNCATE && 3030 (Input.getOperand(0).getOpcode() == ISD::AssertZext || 3031 Input.getOperand(0).getOpcode() == ISD::ZERO_EXTEND); 3032 if (IsTruncateOfZExt) 3033 return addExtOrTrunc(Input, ExtOrTruncConversion::Ext); 3034 3035 ConstantSDNode *InputConst = dyn_cast<ConstantSDNode>(Input); 3036 if (InputConst && InputConst->getSExtValue() >= 0) 3037 return addExtOrTrunc(Input, ExtOrTruncConversion::Ext); 3038 3039 LoadSDNode *InputLoad = dyn_cast<LoadSDNode>(Input); 3040 // The input is a load that doesn't sign-extend (it will be zero-extended). 3041 if (InputLoad && InputLoad->getExtensionType() != ISD::SEXTLOAD) 3042 return addExtOrTrunc(Input, ExtOrTruncConversion::Ext); 3043 3044 // None of the above, need to zero-extend. 3045 SDLoc dl(Input); 3046 ZeroExtensionsAdded++; 3047 return SDValue(CurDAG->getMachineNode(PPC::RLDICL_32_64, dl, MVT::i64, Input, 3048 S->getI64Imm(0, dl), 3049 S->getI64Imm(32, dl)), 0); 3050 } 3051 3052 // Handle a 32-bit value in a 64-bit register and vice-versa. These are of 3053 // course not actual zero/sign extensions that will generate machine code, 3054 // they're just a way to reinterpret a 32 bit value in a register as a 3055 // 64 bit value and vice-versa. 3056 SDValue IntegerCompareEliminator::addExtOrTrunc(SDValue NatWidthRes, 3057 ExtOrTruncConversion Conv) { 3058 SDLoc dl(NatWidthRes); 3059 3060 // For reinterpreting 32-bit values as 64 bit values, we generate 3061 // INSERT_SUBREG IMPLICIT_DEF:i64, <input>, TargetConstant:i32<1> 3062 if (Conv == ExtOrTruncConversion::Ext) { 3063 SDValue ImDef(CurDAG->getMachineNode(PPC::IMPLICIT_DEF, dl, MVT::i64), 0); 3064 SDValue SubRegIdx = 3065 CurDAG->getTargetConstant(PPC::sub_32, dl, MVT::i32); 3066 return SDValue(CurDAG->getMachineNode(PPC::INSERT_SUBREG, dl, MVT::i64, 3067 ImDef, NatWidthRes, SubRegIdx), 0); 3068 } 3069 3070 assert(Conv == ExtOrTruncConversion::Trunc && 3071 "Unknown convertion between 32 and 64 bit values."); 3072 // For reinterpreting 64-bit values as 32-bit values, we just need to 3073 // EXTRACT_SUBREG (i.e. extract the low word). 3074 SDValue SubRegIdx = 3075 CurDAG->getTargetConstant(PPC::sub_32, dl, MVT::i32); 3076 return SDValue(CurDAG->getMachineNode(PPC::EXTRACT_SUBREG, dl, MVT::i32, 3077 NatWidthRes, SubRegIdx), 0); 3078 } 3079 3080 // Produce a GPR sequence for compound comparisons (<=, >=) against zero. 3081 // Handle both zero-extensions and sign-extensions. 3082 SDValue 3083 IntegerCompareEliminator::getCompoundZeroComparisonInGPR(SDValue LHS, SDLoc dl, 3084 ZeroCompare CmpTy) { 3085 EVT InVT = LHS.getValueType(); 3086 bool Is32Bit = InVT == MVT::i32; 3087 SDValue ToExtend; 3088 3089 // Produce the value that needs to be either zero or sign extended. 3090 switch (CmpTy) { 3091 case ZeroCompare::GEZExt: 3092 case ZeroCompare::GESExt: 3093 ToExtend = SDValue(CurDAG->getMachineNode(Is32Bit ? PPC::NOR : PPC::NOR8, 3094 dl, InVT, LHS, LHS), 0); 3095 break; 3096 case ZeroCompare::LEZExt: 3097 case ZeroCompare::LESExt: { 3098 if (Is32Bit) { 3099 // Upper 32 bits cannot be undefined for this sequence. 3100 LHS = signExtendInputIfNeeded(LHS); 3101 SDValue Neg = 3102 SDValue(CurDAG->getMachineNode(PPC::NEG8, dl, MVT::i64, LHS), 0); 3103 ToExtend = 3104 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 3105 Neg, S->getI64Imm(1, dl), 3106 S->getI64Imm(63, dl)), 0); 3107 } else { 3108 SDValue Addi = 3109 SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, LHS, 3110 S->getI64Imm(~0ULL, dl)), 0); 3111 ToExtend = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64, 3112 Addi, LHS), 0); 3113 } 3114 break; 3115 } 3116 } 3117 3118 // For 64-bit sequences, the extensions are the same for the GE/LE cases. 3119 if (!Is32Bit && 3120 (CmpTy == ZeroCompare::GEZExt || CmpTy == ZeroCompare::LEZExt)) 3121 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 3122 ToExtend, S->getI64Imm(1, dl), 3123 S->getI64Imm(63, dl)), 0); 3124 if (!Is32Bit && 3125 (CmpTy == ZeroCompare::GESExt || CmpTy == ZeroCompare::LESExt)) 3126 return SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, ToExtend, 3127 S->getI64Imm(63, dl)), 0); 3128 3129 assert(Is32Bit && "Should have handled the 32-bit sequences above."); 3130 // For 32-bit sequences, the extensions differ between GE/LE cases. 3131 switch (CmpTy) { 3132 case ZeroCompare::GEZExt: { 3133 SDValue ShiftOps[] = { ToExtend, S->getI32Imm(1, dl), S->getI32Imm(31, dl), 3134 S->getI32Imm(31, dl) }; 3135 return SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, 3136 ShiftOps), 0); 3137 } 3138 case ZeroCompare::GESExt: 3139 return SDValue(CurDAG->getMachineNode(PPC::SRAWI, dl, MVT::i32, ToExtend, 3140 S->getI32Imm(31, dl)), 0); 3141 case ZeroCompare::LEZExt: 3142 return SDValue(CurDAG->getMachineNode(PPC::XORI8, dl, MVT::i64, ToExtend, 3143 S->getI32Imm(1, dl)), 0); 3144 case ZeroCompare::LESExt: 3145 return SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, ToExtend, 3146 S->getI32Imm(-1, dl)), 0); 3147 } 3148 3149 // The above case covers all the enumerators so it can't have a default clause 3150 // to avoid compiler warnings. 3151 llvm_unreachable("Unknown zero-comparison type."); 3152 } 3153 3154 /// Produces a zero-extended result of comparing two 32-bit values according to 3155 /// the passed condition code. 3156 SDValue 3157 IntegerCompareEliminator::get32BitZExtCompare(SDValue LHS, SDValue RHS, 3158 ISD::CondCode CC, 3159 int64_t RHSValue, SDLoc dl) { 3160 if (CmpInGPR == ICGPR_I64 || CmpInGPR == ICGPR_SextI64 || 3161 CmpInGPR == ICGPR_ZextI64 || CmpInGPR == ICGPR_Sext) 3162 return SDValue(); 3163 bool IsRHSZero = RHSValue == 0; 3164 bool IsRHSOne = RHSValue == 1; 3165 bool IsRHSNegOne = RHSValue == -1LL; 3166 switch (CC) { 3167 default: return SDValue(); 3168 case ISD::SETEQ: { 3169 // (zext (setcc %a, %b, seteq)) -> (lshr (cntlzw (xor %a, %b)), 5) 3170 // (zext (setcc %a, 0, seteq)) -> (lshr (cntlzw %a), 5) 3171 SDValue Xor = IsRHSZero ? LHS : 3172 SDValue(CurDAG->getMachineNode(PPC::XOR, dl, MVT::i32, LHS, RHS), 0); 3173 SDValue Clz = 3174 SDValue(CurDAG->getMachineNode(PPC::CNTLZW, dl, MVT::i32, Xor), 0); 3175 SDValue ShiftOps[] = { Clz, S->getI32Imm(27, dl), S->getI32Imm(5, dl), 3176 S->getI32Imm(31, dl) }; 3177 return SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, 3178 ShiftOps), 0); 3179 } 3180 case ISD::SETNE: { 3181 // (zext (setcc %a, %b, setne)) -> (xor (lshr (cntlzw (xor %a, %b)), 5), 1) 3182 // (zext (setcc %a, 0, setne)) -> (xor (lshr (cntlzw %a), 5), 1) 3183 SDValue Xor = IsRHSZero ? LHS : 3184 SDValue(CurDAG->getMachineNode(PPC::XOR, dl, MVT::i32, LHS, RHS), 0); 3185 SDValue Clz = 3186 SDValue(CurDAG->getMachineNode(PPC::CNTLZW, dl, MVT::i32, Xor), 0); 3187 SDValue ShiftOps[] = { Clz, S->getI32Imm(27, dl), S->getI32Imm(5, dl), 3188 S->getI32Imm(31, dl) }; 3189 SDValue Shift = 3190 SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, ShiftOps), 0); 3191 return SDValue(CurDAG->getMachineNode(PPC::XORI, dl, MVT::i32, Shift, 3192 S->getI32Imm(1, dl)), 0); 3193 } 3194 case ISD::SETGE: { 3195 // (zext (setcc %a, %b, setge)) -> (xor (lshr (sub %a, %b), 63), 1) 3196 // (zext (setcc %a, 0, setge)) -> (lshr (~ %a), 31) 3197 if(IsRHSZero) 3198 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GEZExt); 3199 3200 // Not a special case (i.e. RHS == 0). Handle (%a >= %b) as (%b <= %a) 3201 // by swapping inputs and falling through. 3202 std::swap(LHS, RHS); 3203 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS); 3204 IsRHSZero = RHSConst && RHSConst->isZero(); 3205 LLVM_FALLTHROUGH; 3206 } 3207 case ISD::SETLE: { 3208 if (CmpInGPR == ICGPR_NonExtIn) 3209 return SDValue(); 3210 // (zext (setcc %a, %b, setle)) -> (xor (lshr (sub %b, %a), 63), 1) 3211 // (zext (setcc %a, 0, setle)) -> (xor (lshr (- %a), 63), 1) 3212 if(IsRHSZero) { 3213 if (CmpInGPR == ICGPR_NonExtIn) 3214 return SDValue(); 3215 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LEZExt); 3216 } 3217 3218 // The upper 32-bits of the register can't be undefined for this sequence. 3219 LHS = signExtendInputIfNeeded(LHS); 3220 RHS = signExtendInputIfNeeded(RHS); 3221 SDValue Sub = 3222 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, LHS, RHS), 0); 3223 SDValue Shift = 3224 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, Sub, 3225 S->getI64Imm(1, dl), S->getI64Imm(63, dl)), 3226 0); 3227 return 3228 SDValue(CurDAG->getMachineNode(PPC::XORI8, dl, 3229 MVT::i64, Shift, S->getI32Imm(1, dl)), 0); 3230 } 3231 case ISD::SETGT: { 3232 // (zext (setcc %a, %b, setgt)) -> (lshr (sub %b, %a), 63) 3233 // (zext (setcc %a, -1, setgt)) -> (lshr (~ %a), 31) 3234 // (zext (setcc %a, 0, setgt)) -> (lshr (- %a), 63) 3235 // Handle SETLT -1 (which is equivalent to SETGE 0). 3236 if (IsRHSNegOne) 3237 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GEZExt); 3238 3239 if (IsRHSZero) { 3240 if (CmpInGPR == ICGPR_NonExtIn) 3241 return SDValue(); 3242 // The upper 32-bits of the register can't be undefined for this sequence. 3243 LHS = signExtendInputIfNeeded(LHS); 3244 RHS = signExtendInputIfNeeded(RHS); 3245 SDValue Neg = 3246 SDValue(CurDAG->getMachineNode(PPC::NEG8, dl, MVT::i64, LHS), 0); 3247 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 3248 Neg, S->getI32Imm(1, dl), S->getI32Imm(63, dl)), 0); 3249 } 3250 // Not a special case (i.e. RHS == 0 or RHS == -1). Handle (%a > %b) as 3251 // (%b < %a) by swapping inputs and falling through. 3252 std::swap(LHS, RHS); 3253 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS); 3254 IsRHSZero = RHSConst && RHSConst->isZero(); 3255 IsRHSOne = RHSConst && RHSConst->getSExtValue() == 1; 3256 LLVM_FALLTHROUGH; 3257 } 3258 case ISD::SETLT: { 3259 // (zext (setcc %a, %b, setlt)) -> (lshr (sub %a, %b), 63) 3260 // (zext (setcc %a, 1, setlt)) -> (xor (lshr (- %a), 63), 1) 3261 // (zext (setcc %a, 0, setlt)) -> (lshr %a, 31) 3262 // Handle SETLT 1 (which is equivalent to SETLE 0). 3263 if (IsRHSOne) { 3264 if (CmpInGPR == ICGPR_NonExtIn) 3265 return SDValue(); 3266 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LEZExt); 3267 } 3268 3269 if (IsRHSZero) { 3270 SDValue ShiftOps[] = { LHS, S->getI32Imm(1, dl), S->getI32Imm(31, dl), 3271 S->getI32Imm(31, dl) }; 3272 return SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, 3273 ShiftOps), 0); 3274 } 3275 3276 if (CmpInGPR == ICGPR_NonExtIn) 3277 return SDValue(); 3278 // The upper 32-bits of the register can't be undefined for this sequence. 3279 LHS = signExtendInputIfNeeded(LHS); 3280 RHS = signExtendInputIfNeeded(RHS); 3281 SDValue SUBFNode = 3282 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, RHS, LHS), 0); 3283 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 3284 SUBFNode, S->getI64Imm(1, dl), 3285 S->getI64Imm(63, dl)), 0); 3286 } 3287 case ISD::SETUGE: 3288 // (zext (setcc %a, %b, setuge)) -> (xor (lshr (sub %b, %a), 63), 1) 3289 // (zext (setcc %a, %b, setule)) -> (xor (lshr (sub %a, %b), 63), 1) 3290 std::swap(LHS, RHS); 3291 LLVM_FALLTHROUGH; 3292 case ISD::SETULE: { 3293 if (CmpInGPR == ICGPR_NonExtIn) 3294 return SDValue(); 3295 // The upper 32-bits of the register can't be undefined for this sequence. 3296 LHS = zeroExtendInputIfNeeded(LHS); 3297 RHS = zeroExtendInputIfNeeded(RHS); 3298 SDValue Subtract = 3299 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, LHS, RHS), 0); 3300 SDValue SrdiNode = 3301 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 3302 Subtract, S->getI64Imm(1, dl), 3303 S->getI64Imm(63, dl)), 0); 3304 return SDValue(CurDAG->getMachineNode(PPC::XORI8, dl, MVT::i64, SrdiNode, 3305 S->getI32Imm(1, dl)), 0); 3306 } 3307 case ISD::SETUGT: 3308 // (zext (setcc %a, %b, setugt)) -> (lshr (sub %b, %a), 63) 3309 // (zext (setcc %a, %b, setult)) -> (lshr (sub %a, %b), 63) 3310 std::swap(LHS, RHS); 3311 LLVM_FALLTHROUGH; 3312 case ISD::SETULT: { 3313 if (CmpInGPR == ICGPR_NonExtIn) 3314 return SDValue(); 3315 // The upper 32-bits of the register can't be undefined for this sequence. 3316 LHS = zeroExtendInputIfNeeded(LHS); 3317 RHS = zeroExtendInputIfNeeded(RHS); 3318 SDValue Subtract = 3319 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, RHS, LHS), 0); 3320 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 3321 Subtract, S->getI64Imm(1, dl), 3322 S->getI64Imm(63, dl)), 0); 3323 } 3324 } 3325 } 3326 3327 /// Produces a sign-extended result of comparing two 32-bit values according to 3328 /// the passed condition code. 3329 SDValue 3330 IntegerCompareEliminator::get32BitSExtCompare(SDValue LHS, SDValue RHS, 3331 ISD::CondCode CC, 3332 int64_t RHSValue, SDLoc dl) { 3333 if (CmpInGPR == ICGPR_I64 || CmpInGPR == ICGPR_SextI64 || 3334 CmpInGPR == ICGPR_ZextI64 || CmpInGPR == ICGPR_Zext) 3335 return SDValue(); 3336 bool IsRHSZero = RHSValue == 0; 3337 bool IsRHSOne = RHSValue == 1; 3338 bool IsRHSNegOne = RHSValue == -1LL; 3339 3340 switch (CC) { 3341 default: return SDValue(); 3342 case ISD::SETEQ: { 3343 // (sext (setcc %a, %b, seteq)) -> 3344 // (ashr (shl (ctlz (xor %a, %b)), 58), 63) 3345 // (sext (setcc %a, 0, seteq)) -> 3346 // (ashr (shl (ctlz %a), 58), 63) 3347 SDValue CountInput = IsRHSZero ? LHS : 3348 SDValue(CurDAG->getMachineNode(PPC::XOR, dl, MVT::i32, LHS, RHS), 0); 3349 SDValue Cntlzw = 3350 SDValue(CurDAG->getMachineNode(PPC::CNTLZW, dl, MVT::i32, CountInput), 0); 3351 SDValue SHLOps[] = { Cntlzw, S->getI32Imm(27, dl), 3352 S->getI32Imm(5, dl), S->getI32Imm(31, dl) }; 3353 SDValue Slwi = 3354 SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, SHLOps), 0); 3355 return SDValue(CurDAG->getMachineNode(PPC::NEG, dl, MVT::i32, Slwi), 0); 3356 } 3357 case ISD::SETNE: { 3358 // Bitwise xor the operands, count leading zeros, shift right by 5 bits and 3359 // flip the bit, finally take 2's complement. 3360 // (sext (setcc %a, %b, setne)) -> 3361 // (neg (xor (lshr (ctlz (xor %a, %b)), 5), 1)) 3362 // Same as above, but the first xor is not needed. 3363 // (sext (setcc %a, 0, setne)) -> 3364 // (neg (xor (lshr (ctlz %a), 5), 1)) 3365 SDValue Xor = IsRHSZero ? LHS : 3366 SDValue(CurDAG->getMachineNode(PPC::XOR, dl, MVT::i32, LHS, RHS), 0); 3367 SDValue Clz = 3368 SDValue(CurDAG->getMachineNode(PPC::CNTLZW, dl, MVT::i32, Xor), 0); 3369 SDValue ShiftOps[] = 3370 { Clz, S->getI32Imm(27, dl), S->getI32Imm(5, dl), S->getI32Imm(31, dl) }; 3371 SDValue Shift = 3372 SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, ShiftOps), 0); 3373 SDValue Xori = 3374 SDValue(CurDAG->getMachineNode(PPC::XORI, dl, MVT::i32, Shift, 3375 S->getI32Imm(1, dl)), 0); 3376 return SDValue(CurDAG->getMachineNode(PPC::NEG, dl, MVT::i32, Xori), 0); 3377 } 3378 case ISD::SETGE: { 3379 // (sext (setcc %a, %b, setge)) -> (add (lshr (sub %a, %b), 63), -1) 3380 // (sext (setcc %a, 0, setge)) -> (ashr (~ %a), 31) 3381 if (IsRHSZero) 3382 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GESExt); 3383 3384 // Not a special case (i.e. RHS == 0). Handle (%a >= %b) as (%b <= %a) 3385 // by swapping inputs and falling through. 3386 std::swap(LHS, RHS); 3387 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS); 3388 IsRHSZero = RHSConst && RHSConst->isZero(); 3389 LLVM_FALLTHROUGH; 3390 } 3391 case ISD::SETLE: { 3392 if (CmpInGPR == ICGPR_NonExtIn) 3393 return SDValue(); 3394 // (sext (setcc %a, %b, setge)) -> (add (lshr (sub %b, %a), 63), -1) 3395 // (sext (setcc %a, 0, setle)) -> (add (lshr (- %a), 63), -1) 3396 if (IsRHSZero) 3397 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LESExt); 3398 3399 // The upper 32-bits of the register can't be undefined for this sequence. 3400 LHS = signExtendInputIfNeeded(LHS); 3401 RHS = signExtendInputIfNeeded(RHS); 3402 SDValue SUBFNode = 3403 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, MVT::Glue, 3404 LHS, RHS), 0); 3405 SDValue Srdi = 3406 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 3407 SUBFNode, S->getI64Imm(1, dl), 3408 S->getI64Imm(63, dl)), 0); 3409 return SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, Srdi, 3410 S->getI32Imm(-1, dl)), 0); 3411 } 3412 case ISD::SETGT: { 3413 // (sext (setcc %a, %b, setgt)) -> (ashr (sub %b, %a), 63) 3414 // (sext (setcc %a, -1, setgt)) -> (ashr (~ %a), 31) 3415 // (sext (setcc %a, 0, setgt)) -> (ashr (- %a), 63) 3416 if (IsRHSNegOne) 3417 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GESExt); 3418 if (IsRHSZero) { 3419 if (CmpInGPR == ICGPR_NonExtIn) 3420 return SDValue(); 3421 // The upper 32-bits of the register can't be undefined for this sequence. 3422 LHS = signExtendInputIfNeeded(LHS); 3423 RHS = signExtendInputIfNeeded(RHS); 3424 SDValue Neg = 3425 SDValue(CurDAG->getMachineNode(PPC::NEG8, dl, MVT::i64, LHS), 0); 3426 return SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, Neg, 3427 S->getI64Imm(63, dl)), 0); 3428 } 3429 // Not a special case (i.e. RHS == 0 or RHS == -1). Handle (%a > %b) as 3430 // (%b < %a) by swapping inputs and falling through. 3431 std::swap(LHS, RHS); 3432 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS); 3433 IsRHSZero = RHSConst && RHSConst->isZero(); 3434 IsRHSOne = RHSConst && RHSConst->getSExtValue() == 1; 3435 LLVM_FALLTHROUGH; 3436 } 3437 case ISD::SETLT: { 3438 // (sext (setcc %a, %b, setgt)) -> (ashr (sub %a, %b), 63) 3439 // (sext (setcc %a, 1, setgt)) -> (add (lshr (- %a), 63), -1) 3440 // (sext (setcc %a, 0, setgt)) -> (ashr %a, 31) 3441 if (IsRHSOne) { 3442 if (CmpInGPR == ICGPR_NonExtIn) 3443 return SDValue(); 3444 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LESExt); 3445 } 3446 if (IsRHSZero) 3447 return SDValue(CurDAG->getMachineNode(PPC::SRAWI, dl, MVT::i32, LHS, 3448 S->getI32Imm(31, dl)), 0); 3449 3450 if (CmpInGPR == ICGPR_NonExtIn) 3451 return SDValue(); 3452 // The upper 32-bits of the register can't be undefined for this sequence. 3453 LHS = signExtendInputIfNeeded(LHS); 3454 RHS = signExtendInputIfNeeded(RHS); 3455 SDValue SUBFNode = 3456 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, RHS, LHS), 0); 3457 return SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, 3458 SUBFNode, S->getI64Imm(63, dl)), 0); 3459 } 3460 case ISD::SETUGE: 3461 // (sext (setcc %a, %b, setuge)) -> (add (lshr (sub %a, %b), 63), -1) 3462 // (sext (setcc %a, %b, setule)) -> (add (lshr (sub %b, %a), 63), -1) 3463 std::swap(LHS, RHS); 3464 LLVM_FALLTHROUGH; 3465 case ISD::SETULE: { 3466 if (CmpInGPR == ICGPR_NonExtIn) 3467 return SDValue(); 3468 // The upper 32-bits of the register can't be undefined for this sequence. 3469 LHS = zeroExtendInputIfNeeded(LHS); 3470 RHS = zeroExtendInputIfNeeded(RHS); 3471 SDValue Subtract = 3472 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, LHS, RHS), 0); 3473 SDValue Shift = 3474 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, Subtract, 3475 S->getI32Imm(1, dl), S->getI32Imm(63,dl)), 3476 0); 3477 return SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, Shift, 3478 S->getI32Imm(-1, dl)), 0); 3479 } 3480 case ISD::SETUGT: 3481 // (sext (setcc %a, %b, setugt)) -> (ashr (sub %b, %a), 63) 3482 // (sext (setcc %a, %b, setugt)) -> (ashr (sub %a, %b), 63) 3483 std::swap(LHS, RHS); 3484 LLVM_FALLTHROUGH; 3485 case ISD::SETULT: { 3486 if (CmpInGPR == ICGPR_NonExtIn) 3487 return SDValue(); 3488 // The upper 32-bits of the register can't be undefined for this sequence. 3489 LHS = zeroExtendInputIfNeeded(LHS); 3490 RHS = zeroExtendInputIfNeeded(RHS); 3491 SDValue Subtract = 3492 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, RHS, LHS), 0); 3493 return SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, 3494 Subtract, S->getI64Imm(63, dl)), 0); 3495 } 3496 } 3497 } 3498 3499 /// Produces a zero-extended result of comparing two 64-bit values according to 3500 /// the passed condition code. 3501 SDValue 3502 IntegerCompareEliminator::get64BitZExtCompare(SDValue LHS, SDValue RHS, 3503 ISD::CondCode CC, 3504 int64_t RHSValue, SDLoc dl) { 3505 if (CmpInGPR == ICGPR_I32 || CmpInGPR == ICGPR_SextI32 || 3506 CmpInGPR == ICGPR_ZextI32 || CmpInGPR == ICGPR_Sext) 3507 return SDValue(); 3508 bool IsRHSZero = RHSValue == 0; 3509 bool IsRHSOne = RHSValue == 1; 3510 bool IsRHSNegOne = RHSValue == -1LL; 3511 switch (CC) { 3512 default: return SDValue(); 3513 case ISD::SETEQ: { 3514 // (zext (setcc %a, %b, seteq)) -> (lshr (ctlz (xor %a, %b)), 6) 3515 // (zext (setcc %a, 0, seteq)) -> (lshr (ctlz %a), 6) 3516 SDValue Xor = IsRHSZero ? LHS : 3517 SDValue(CurDAG->getMachineNode(PPC::XOR8, dl, MVT::i64, LHS, RHS), 0); 3518 SDValue Clz = 3519 SDValue(CurDAG->getMachineNode(PPC::CNTLZD, dl, MVT::i64, Xor), 0); 3520 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, Clz, 3521 S->getI64Imm(58, dl), 3522 S->getI64Imm(63, dl)), 0); 3523 } 3524 case ISD::SETNE: { 3525 // {addc.reg, addc.CA} = (addcarry (xor %a, %b), -1) 3526 // (zext (setcc %a, %b, setne)) -> (sube addc.reg, addc.reg, addc.CA) 3527 // {addcz.reg, addcz.CA} = (addcarry %a, -1) 3528 // (zext (setcc %a, 0, setne)) -> (sube addcz.reg, addcz.reg, addcz.CA) 3529 SDValue Xor = IsRHSZero ? LHS : 3530 SDValue(CurDAG->getMachineNode(PPC::XOR8, dl, MVT::i64, LHS, RHS), 0); 3531 SDValue AC = 3532 SDValue(CurDAG->getMachineNode(PPC::ADDIC8, dl, MVT::i64, MVT::Glue, 3533 Xor, S->getI32Imm(~0U, dl)), 0); 3534 return SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64, AC, 3535 Xor, AC.getValue(1)), 0); 3536 } 3537 case ISD::SETGE: { 3538 // {subc.reg, subc.CA} = (subcarry %a, %b) 3539 // (zext (setcc %a, %b, setge)) -> 3540 // (adde (lshr %b, 63), (ashr %a, 63), subc.CA) 3541 // (zext (setcc %a, 0, setge)) -> (lshr (~ %a), 63) 3542 if (IsRHSZero) 3543 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GEZExt); 3544 std::swap(LHS, RHS); 3545 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS); 3546 IsRHSZero = RHSConst && RHSConst->isZero(); 3547 LLVM_FALLTHROUGH; 3548 } 3549 case ISD::SETLE: { 3550 // {subc.reg, subc.CA} = (subcarry %b, %a) 3551 // (zext (setcc %a, %b, setge)) -> 3552 // (adde (lshr %a, 63), (ashr %b, 63), subc.CA) 3553 // (zext (setcc %a, 0, setge)) -> (lshr (or %a, (add %a, -1)), 63) 3554 if (IsRHSZero) 3555 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LEZExt); 3556 SDValue ShiftL = 3557 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, LHS, 3558 S->getI64Imm(1, dl), 3559 S->getI64Imm(63, dl)), 0); 3560 SDValue ShiftR = 3561 SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, RHS, 3562 S->getI64Imm(63, dl)), 0); 3563 SDValue SubtractCarry = 3564 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue, 3565 LHS, RHS), 1); 3566 return SDValue(CurDAG->getMachineNode(PPC::ADDE8, dl, MVT::i64, MVT::Glue, 3567 ShiftR, ShiftL, SubtractCarry), 0); 3568 } 3569 case ISD::SETGT: { 3570 // {subc.reg, subc.CA} = (subcarry %b, %a) 3571 // (zext (setcc %a, %b, setgt)) -> 3572 // (xor (adde (lshr %a, 63), (ashr %b, 63), subc.CA), 1) 3573 // (zext (setcc %a, 0, setgt)) -> (lshr (nor (add %a, -1), %a), 63) 3574 if (IsRHSNegOne) 3575 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GEZExt); 3576 if (IsRHSZero) { 3577 SDValue Addi = 3578 SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, LHS, 3579 S->getI64Imm(~0ULL, dl)), 0); 3580 SDValue Nor = 3581 SDValue(CurDAG->getMachineNode(PPC::NOR8, dl, MVT::i64, Addi, LHS), 0); 3582 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, Nor, 3583 S->getI64Imm(1, dl), 3584 S->getI64Imm(63, dl)), 0); 3585 } 3586 std::swap(LHS, RHS); 3587 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS); 3588 IsRHSZero = RHSConst && RHSConst->isZero(); 3589 IsRHSOne = RHSConst && RHSConst->getSExtValue() == 1; 3590 LLVM_FALLTHROUGH; 3591 } 3592 case ISD::SETLT: { 3593 // {subc.reg, subc.CA} = (subcarry %a, %b) 3594 // (zext (setcc %a, %b, setlt)) -> 3595 // (xor (adde (lshr %b, 63), (ashr %a, 63), subc.CA), 1) 3596 // (zext (setcc %a, 0, setlt)) -> (lshr %a, 63) 3597 if (IsRHSOne) 3598 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LEZExt); 3599 if (IsRHSZero) 3600 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, LHS, 3601 S->getI64Imm(1, dl), 3602 S->getI64Imm(63, dl)), 0); 3603 SDValue SRADINode = 3604 SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, 3605 LHS, S->getI64Imm(63, dl)), 0); 3606 SDValue SRDINode = 3607 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 3608 RHS, S->getI64Imm(1, dl), 3609 S->getI64Imm(63, dl)), 0); 3610 SDValue SUBFC8Carry = 3611 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue, 3612 RHS, LHS), 1); 3613 SDValue ADDE8Node = 3614 SDValue(CurDAG->getMachineNode(PPC::ADDE8, dl, MVT::i64, MVT::Glue, 3615 SRDINode, SRADINode, SUBFC8Carry), 0); 3616 return SDValue(CurDAG->getMachineNode(PPC::XORI8, dl, MVT::i64, 3617 ADDE8Node, S->getI64Imm(1, dl)), 0); 3618 } 3619 case ISD::SETUGE: 3620 // {subc.reg, subc.CA} = (subcarry %a, %b) 3621 // (zext (setcc %a, %b, setuge)) -> (add (sube %b, %b, subc.CA), 1) 3622 std::swap(LHS, RHS); 3623 LLVM_FALLTHROUGH; 3624 case ISD::SETULE: { 3625 // {subc.reg, subc.CA} = (subcarry %b, %a) 3626 // (zext (setcc %a, %b, setule)) -> (add (sube %a, %a, subc.CA), 1) 3627 SDValue SUBFC8Carry = 3628 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue, 3629 LHS, RHS), 1); 3630 SDValue SUBFE8Node = 3631 SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64, MVT::Glue, 3632 LHS, LHS, SUBFC8Carry), 0); 3633 return SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, 3634 SUBFE8Node, S->getI64Imm(1, dl)), 0); 3635 } 3636 case ISD::SETUGT: 3637 // {subc.reg, subc.CA} = (subcarry %b, %a) 3638 // (zext (setcc %a, %b, setugt)) -> -(sube %b, %b, subc.CA) 3639 std::swap(LHS, RHS); 3640 LLVM_FALLTHROUGH; 3641 case ISD::SETULT: { 3642 // {subc.reg, subc.CA} = (subcarry %a, %b) 3643 // (zext (setcc %a, %b, setult)) -> -(sube %a, %a, subc.CA) 3644 SDValue SubtractCarry = 3645 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue, 3646 RHS, LHS), 1); 3647 SDValue ExtSub = 3648 SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64, 3649 LHS, LHS, SubtractCarry), 0); 3650 return SDValue(CurDAG->getMachineNode(PPC::NEG8, dl, MVT::i64, 3651 ExtSub), 0); 3652 } 3653 } 3654 } 3655 3656 /// Produces a sign-extended result of comparing two 64-bit values according to 3657 /// the passed condition code. 3658 SDValue 3659 IntegerCompareEliminator::get64BitSExtCompare(SDValue LHS, SDValue RHS, 3660 ISD::CondCode CC, 3661 int64_t RHSValue, SDLoc dl) { 3662 if (CmpInGPR == ICGPR_I32 || CmpInGPR == ICGPR_SextI32 || 3663 CmpInGPR == ICGPR_ZextI32 || CmpInGPR == ICGPR_Zext) 3664 return SDValue(); 3665 bool IsRHSZero = RHSValue == 0; 3666 bool IsRHSOne = RHSValue == 1; 3667 bool IsRHSNegOne = RHSValue == -1LL; 3668 switch (CC) { 3669 default: return SDValue(); 3670 case ISD::SETEQ: { 3671 // {addc.reg, addc.CA} = (addcarry (xor %a, %b), -1) 3672 // (sext (setcc %a, %b, seteq)) -> (sube addc.reg, addc.reg, addc.CA) 3673 // {addcz.reg, addcz.CA} = (addcarry %a, -1) 3674 // (sext (setcc %a, 0, seteq)) -> (sube addcz.reg, addcz.reg, addcz.CA) 3675 SDValue AddInput = IsRHSZero ? LHS : 3676 SDValue(CurDAG->getMachineNode(PPC::XOR8, dl, MVT::i64, LHS, RHS), 0); 3677 SDValue Addic = 3678 SDValue(CurDAG->getMachineNode(PPC::ADDIC8, dl, MVT::i64, MVT::Glue, 3679 AddInput, S->getI32Imm(~0U, dl)), 0); 3680 return SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64, Addic, 3681 Addic, Addic.getValue(1)), 0); 3682 } 3683 case ISD::SETNE: { 3684 // {subfc.reg, subfc.CA} = (subcarry 0, (xor %a, %b)) 3685 // (sext (setcc %a, %b, setne)) -> (sube subfc.reg, subfc.reg, subfc.CA) 3686 // {subfcz.reg, subfcz.CA} = (subcarry 0, %a) 3687 // (sext (setcc %a, 0, setne)) -> (sube subfcz.reg, subfcz.reg, subfcz.CA) 3688 SDValue Xor = IsRHSZero ? LHS : 3689 SDValue(CurDAG->getMachineNode(PPC::XOR8, dl, MVT::i64, LHS, RHS), 0); 3690 SDValue SC = 3691 SDValue(CurDAG->getMachineNode(PPC::SUBFIC8, dl, MVT::i64, MVT::Glue, 3692 Xor, S->getI32Imm(0, dl)), 0); 3693 return SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64, SC, 3694 SC, SC.getValue(1)), 0); 3695 } 3696 case ISD::SETGE: { 3697 // {subc.reg, subc.CA} = (subcarry %a, %b) 3698 // (zext (setcc %a, %b, setge)) -> 3699 // (- (adde (lshr %b, 63), (ashr %a, 63), subc.CA)) 3700 // (zext (setcc %a, 0, setge)) -> (~ (ashr %a, 63)) 3701 if (IsRHSZero) 3702 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GESExt); 3703 std::swap(LHS, RHS); 3704 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS); 3705 IsRHSZero = RHSConst && RHSConst->isZero(); 3706 LLVM_FALLTHROUGH; 3707 } 3708 case ISD::SETLE: { 3709 // {subc.reg, subc.CA} = (subcarry %b, %a) 3710 // (zext (setcc %a, %b, setge)) -> 3711 // (- (adde (lshr %a, 63), (ashr %b, 63), subc.CA)) 3712 // (zext (setcc %a, 0, setge)) -> (ashr (or %a, (add %a, -1)), 63) 3713 if (IsRHSZero) 3714 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LESExt); 3715 SDValue ShiftR = 3716 SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, RHS, 3717 S->getI64Imm(63, dl)), 0); 3718 SDValue ShiftL = 3719 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, LHS, 3720 S->getI64Imm(1, dl), 3721 S->getI64Imm(63, dl)), 0); 3722 SDValue SubtractCarry = 3723 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue, 3724 LHS, RHS), 1); 3725 SDValue Adde = 3726 SDValue(CurDAG->getMachineNode(PPC::ADDE8, dl, MVT::i64, MVT::Glue, 3727 ShiftR, ShiftL, SubtractCarry), 0); 3728 return SDValue(CurDAG->getMachineNode(PPC::NEG8, dl, MVT::i64, Adde), 0); 3729 } 3730 case ISD::SETGT: { 3731 // {subc.reg, subc.CA} = (subcarry %b, %a) 3732 // (zext (setcc %a, %b, setgt)) -> 3733 // -(xor (adde (lshr %a, 63), (ashr %b, 63), subc.CA), 1) 3734 // (zext (setcc %a, 0, setgt)) -> (ashr (nor (add %a, -1), %a), 63) 3735 if (IsRHSNegOne) 3736 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GESExt); 3737 if (IsRHSZero) { 3738 SDValue Add = 3739 SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, LHS, 3740 S->getI64Imm(-1, dl)), 0); 3741 SDValue Nor = 3742 SDValue(CurDAG->getMachineNode(PPC::NOR8, dl, MVT::i64, Add, LHS), 0); 3743 return SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, Nor, 3744 S->getI64Imm(63, dl)), 0); 3745 } 3746 std::swap(LHS, RHS); 3747 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS); 3748 IsRHSZero = RHSConst && RHSConst->isZero(); 3749 IsRHSOne = RHSConst && RHSConst->getSExtValue() == 1; 3750 LLVM_FALLTHROUGH; 3751 } 3752 case ISD::SETLT: { 3753 // {subc.reg, subc.CA} = (subcarry %a, %b) 3754 // (zext (setcc %a, %b, setlt)) -> 3755 // -(xor (adde (lshr %b, 63), (ashr %a, 63), subc.CA), 1) 3756 // (zext (setcc %a, 0, setlt)) -> (ashr %a, 63) 3757 if (IsRHSOne) 3758 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LESExt); 3759 if (IsRHSZero) { 3760 return SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, LHS, 3761 S->getI64Imm(63, dl)), 0); 3762 } 3763 SDValue SRADINode = 3764 SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, 3765 LHS, S->getI64Imm(63, dl)), 0); 3766 SDValue SRDINode = 3767 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 3768 RHS, S->getI64Imm(1, dl), 3769 S->getI64Imm(63, dl)), 0); 3770 SDValue SUBFC8Carry = 3771 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue, 3772 RHS, LHS), 1); 3773 SDValue ADDE8Node = 3774 SDValue(CurDAG->getMachineNode(PPC::ADDE8, dl, MVT::i64, 3775 SRDINode, SRADINode, SUBFC8Carry), 0); 3776 SDValue XORI8Node = 3777 SDValue(CurDAG->getMachineNode(PPC::XORI8, dl, MVT::i64, 3778 ADDE8Node, S->getI64Imm(1, dl)), 0); 3779 return SDValue(CurDAG->getMachineNode(PPC::NEG8, dl, MVT::i64, 3780 XORI8Node), 0); 3781 } 3782 case ISD::SETUGE: 3783 // {subc.reg, subc.CA} = (subcarry %a, %b) 3784 // (sext (setcc %a, %b, setuge)) -> ~(sube %b, %b, subc.CA) 3785 std::swap(LHS, RHS); 3786 LLVM_FALLTHROUGH; 3787 case ISD::SETULE: { 3788 // {subc.reg, subc.CA} = (subcarry %b, %a) 3789 // (sext (setcc %a, %b, setule)) -> ~(sube %a, %a, subc.CA) 3790 SDValue SubtractCarry = 3791 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue, 3792 LHS, RHS), 1); 3793 SDValue ExtSub = 3794 SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64, MVT::Glue, LHS, 3795 LHS, SubtractCarry), 0); 3796 return SDValue(CurDAG->getMachineNode(PPC::NOR8, dl, MVT::i64, 3797 ExtSub, ExtSub), 0); 3798 } 3799 case ISD::SETUGT: 3800 // {subc.reg, subc.CA} = (subcarry %b, %a) 3801 // (sext (setcc %a, %b, setugt)) -> (sube %b, %b, subc.CA) 3802 std::swap(LHS, RHS); 3803 LLVM_FALLTHROUGH; 3804 case ISD::SETULT: { 3805 // {subc.reg, subc.CA} = (subcarry %a, %b) 3806 // (sext (setcc %a, %b, setult)) -> (sube %a, %a, subc.CA) 3807 SDValue SubCarry = 3808 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue, 3809 RHS, LHS), 1); 3810 return SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64, 3811 LHS, LHS, SubCarry), 0); 3812 } 3813 } 3814 } 3815 3816 /// Do all uses of this SDValue need the result in a GPR? 3817 /// This is meant to be used on values that have type i1 since 3818 /// it is somewhat meaningless to ask if values of other types 3819 /// should be kept in GPR's. 3820 static bool allUsesExtend(SDValue Compare, SelectionDAG *CurDAG) { 3821 assert(Compare.getOpcode() == ISD::SETCC && 3822 "An ISD::SETCC node required here."); 3823 3824 // For values that have a single use, the caller should obviously already have 3825 // checked if that use is an extending use. We check the other uses here. 3826 if (Compare.hasOneUse()) 3827 return true; 3828 // We want the value in a GPR if it is being extended, used for a select, or 3829 // used in logical operations. 3830 for (auto CompareUse : Compare.getNode()->uses()) 3831 if (CompareUse->getOpcode() != ISD::SIGN_EXTEND && 3832 CompareUse->getOpcode() != ISD::ZERO_EXTEND && 3833 CompareUse->getOpcode() != ISD::SELECT && 3834 !isLogicOp(CompareUse->getOpcode())) { 3835 OmittedForNonExtendUses++; 3836 return false; 3837 } 3838 return true; 3839 } 3840 3841 /// Returns an equivalent of a SETCC node but with the result the same width as 3842 /// the inputs. This can also be used for SELECT_CC if either the true or false 3843 /// values is a power of two while the other is zero. 3844 SDValue IntegerCompareEliminator::getSETCCInGPR(SDValue Compare, 3845 SetccInGPROpts ConvOpts) { 3846 assert((Compare.getOpcode() == ISD::SETCC || 3847 Compare.getOpcode() == ISD::SELECT_CC) && 3848 "An ISD::SETCC node required here."); 3849 3850 // Don't convert this comparison to a GPR sequence because there are uses 3851 // of the i1 result (i.e. uses that require the result in the CR). 3852 if ((Compare.getOpcode() == ISD::SETCC) && !allUsesExtend(Compare, CurDAG)) 3853 return SDValue(); 3854 3855 SDValue LHS = Compare.getOperand(0); 3856 SDValue RHS = Compare.getOperand(1); 3857 3858 // The condition code is operand 2 for SETCC and operand 4 for SELECT_CC. 3859 int CCOpNum = Compare.getOpcode() == ISD::SELECT_CC ? 4 : 2; 3860 ISD::CondCode CC = 3861 cast<CondCodeSDNode>(Compare.getOperand(CCOpNum))->get(); 3862 EVT InputVT = LHS.getValueType(); 3863 if (InputVT != MVT::i32 && InputVT != MVT::i64) 3864 return SDValue(); 3865 3866 if (ConvOpts == SetccInGPROpts::ZExtInvert || 3867 ConvOpts == SetccInGPROpts::SExtInvert) 3868 CC = ISD::getSetCCInverse(CC, InputVT); 3869 3870 bool Inputs32Bit = InputVT == MVT::i32; 3871 3872 SDLoc dl(Compare); 3873 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS); 3874 int64_t RHSValue = RHSConst ? RHSConst->getSExtValue() : INT64_MAX; 3875 bool IsSext = ConvOpts == SetccInGPROpts::SExtOrig || 3876 ConvOpts == SetccInGPROpts::SExtInvert; 3877 3878 if (IsSext && Inputs32Bit) 3879 return get32BitSExtCompare(LHS, RHS, CC, RHSValue, dl); 3880 else if (Inputs32Bit) 3881 return get32BitZExtCompare(LHS, RHS, CC, RHSValue, dl); 3882 else if (IsSext) 3883 return get64BitSExtCompare(LHS, RHS, CC, RHSValue, dl); 3884 return get64BitZExtCompare(LHS, RHS, CC, RHSValue, dl); 3885 } 3886 3887 } // end anonymous namespace 3888 3889 bool PPCDAGToDAGISel::tryIntCompareInGPR(SDNode *N) { 3890 if (N->getValueType(0) != MVT::i32 && 3891 N->getValueType(0) != MVT::i64) 3892 return false; 3893 3894 // This optimization will emit code that assumes 64-bit registers 3895 // so we don't want to run it in 32-bit mode. Also don't run it 3896 // on functions that are not to be optimized. 3897 if (TM.getOptLevel() == CodeGenOpt::None || !TM.isPPC64()) 3898 return false; 3899 3900 // For POWER10, it is more profitable to use the set boolean extension 3901 // instructions rather than the integer compare elimination codegen. 3902 // Users can override this via the command line option, `--ppc-gpr-icmps`. 3903 if (!(CmpInGPR.getNumOccurrences() > 0) && Subtarget->isISA3_1()) 3904 return false; 3905 3906 switch (N->getOpcode()) { 3907 default: break; 3908 case ISD::ZERO_EXTEND: 3909 case ISD::SIGN_EXTEND: 3910 case ISD::AND: 3911 case ISD::OR: 3912 case ISD::XOR: { 3913 IntegerCompareEliminator ICmpElim(CurDAG, this); 3914 if (SDNode *New = ICmpElim.Select(N)) { 3915 ReplaceNode(N, New); 3916 return true; 3917 } 3918 } 3919 } 3920 return false; 3921 } 3922 3923 bool PPCDAGToDAGISel::tryBitPermutation(SDNode *N) { 3924 if (N->getValueType(0) != MVT::i32 && 3925 N->getValueType(0) != MVT::i64) 3926 return false; 3927 3928 if (!UseBitPermRewriter) 3929 return false; 3930 3931 switch (N->getOpcode()) { 3932 default: break; 3933 case ISD::ROTL: 3934 case ISD::SHL: 3935 case ISD::SRL: 3936 case ISD::AND: 3937 case ISD::OR: { 3938 BitPermutationSelector BPS(CurDAG); 3939 if (SDNode *New = BPS.Select(N)) { 3940 ReplaceNode(N, New); 3941 return true; 3942 } 3943 return false; 3944 } 3945 } 3946 3947 return false; 3948 } 3949 3950 /// SelectCC - Select a comparison of the specified values with the specified 3951 /// condition code, returning the CR# of the expression. 3952 SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, 3953 const SDLoc &dl, SDValue Chain) { 3954 // Always select the LHS. 3955 unsigned Opc; 3956 3957 if (LHS.getValueType() == MVT::i32) { 3958 unsigned Imm; 3959 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 3960 if (isInt32Immediate(RHS, Imm)) { 3961 // SETEQ/SETNE comparison with 16-bit immediate, fold it. 3962 if (isUInt<16>(Imm)) 3963 return SDValue(CurDAG->getMachineNode(PPC::CMPLWI, dl, MVT::i32, LHS, 3964 getI32Imm(Imm & 0xFFFF, dl)), 3965 0); 3966 // If this is a 16-bit signed immediate, fold it. 3967 if (isInt<16>((int)Imm)) 3968 return SDValue(CurDAG->getMachineNode(PPC::CMPWI, dl, MVT::i32, LHS, 3969 getI32Imm(Imm & 0xFFFF, dl)), 3970 0); 3971 3972 // For non-equality comparisons, the default code would materialize the 3973 // constant, then compare against it, like this: 3974 // lis r2, 4660 3975 // ori r2, r2, 22136 3976 // cmpw cr0, r3, r2 3977 // Since we are just comparing for equality, we can emit this instead: 3978 // xoris r0,r3,0x1234 3979 // cmplwi cr0,r0,0x5678 3980 // beq cr0,L6 3981 SDValue Xor(CurDAG->getMachineNode(PPC::XORIS, dl, MVT::i32, LHS, 3982 getI32Imm(Imm >> 16, dl)), 0); 3983 return SDValue(CurDAG->getMachineNode(PPC::CMPLWI, dl, MVT::i32, Xor, 3984 getI32Imm(Imm & 0xFFFF, dl)), 0); 3985 } 3986 Opc = PPC::CMPLW; 3987 } else if (ISD::isUnsignedIntSetCC(CC)) { 3988 if (isInt32Immediate(RHS, Imm) && isUInt<16>(Imm)) 3989 return SDValue(CurDAG->getMachineNode(PPC::CMPLWI, dl, MVT::i32, LHS, 3990 getI32Imm(Imm & 0xFFFF, dl)), 0); 3991 Opc = PPC::CMPLW; 3992 } else { 3993 int16_t SImm; 3994 if (isIntS16Immediate(RHS, SImm)) 3995 return SDValue(CurDAG->getMachineNode(PPC::CMPWI, dl, MVT::i32, LHS, 3996 getI32Imm((int)SImm & 0xFFFF, 3997 dl)), 3998 0); 3999 Opc = PPC::CMPW; 4000 } 4001 } else if (LHS.getValueType() == MVT::i64) { 4002 uint64_t Imm; 4003 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 4004 if (isInt64Immediate(RHS.getNode(), Imm)) { 4005 // SETEQ/SETNE comparison with 16-bit immediate, fold it. 4006 if (isUInt<16>(Imm)) 4007 return SDValue(CurDAG->getMachineNode(PPC::CMPLDI, dl, MVT::i64, LHS, 4008 getI32Imm(Imm & 0xFFFF, dl)), 4009 0); 4010 // If this is a 16-bit signed immediate, fold it. 4011 if (isInt<16>(Imm)) 4012 return SDValue(CurDAG->getMachineNode(PPC::CMPDI, dl, MVT::i64, LHS, 4013 getI32Imm(Imm & 0xFFFF, dl)), 4014 0); 4015 4016 // For non-equality comparisons, the default code would materialize the 4017 // constant, then compare against it, like this: 4018 // lis r2, 4660 4019 // ori r2, r2, 22136 4020 // cmpd cr0, r3, r2 4021 // Since we are just comparing for equality, we can emit this instead: 4022 // xoris r0,r3,0x1234 4023 // cmpldi cr0,r0,0x5678 4024 // beq cr0,L6 4025 if (isUInt<32>(Imm)) { 4026 SDValue Xor(CurDAG->getMachineNode(PPC::XORIS8, dl, MVT::i64, LHS, 4027 getI64Imm(Imm >> 16, dl)), 0); 4028 return SDValue(CurDAG->getMachineNode(PPC::CMPLDI, dl, MVT::i64, Xor, 4029 getI64Imm(Imm & 0xFFFF, dl)), 4030 0); 4031 } 4032 } 4033 Opc = PPC::CMPLD; 4034 } else if (ISD::isUnsignedIntSetCC(CC)) { 4035 if (isInt64Immediate(RHS.getNode(), Imm) && isUInt<16>(Imm)) 4036 return SDValue(CurDAG->getMachineNode(PPC::CMPLDI, dl, MVT::i64, LHS, 4037 getI64Imm(Imm & 0xFFFF, dl)), 0); 4038 Opc = PPC::CMPLD; 4039 } else { 4040 int16_t SImm; 4041 if (isIntS16Immediate(RHS, SImm)) 4042 return SDValue(CurDAG->getMachineNode(PPC::CMPDI, dl, MVT::i64, LHS, 4043 getI64Imm(SImm & 0xFFFF, dl)), 4044 0); 4045 Opc = PPC::CMPD; 4046 } 4047 } else if (LHS.getValueType() == MVT::f32) { 4048 if (Subtarget->hasSPE()) { 4049 switch (CC) { 4050 default: 4051 case ISD::SETEQ: 4052 case ISD::SETNE: 4053 Opc = PPC::EFSCMPEQ; 4054 break; 4055 case ISD::SETLT: 4056 case ISD::SETGE: 4057 case ISD::SETOLT: 4058 case ISD::SETOGE: 4059 case ISD::SETULT: 4060 case ISD::SETUGE: 4061 Opc = PPC::EFSCMPLT; 4062 break; 4063 case ISD::SETGT: 4064 case ISD::SETLE: 4065 case ISD::SETOGT: 4066 case ISD::SETOLE: 4067 case ISD::SETUGT: 4068 case ISD::SETULE: 4069 Opc = PPC::EFSCMPGT; 4070 break; 4071 } 4072 } else 4073 Opc = PPC::FCMPUS; 4074 } else if (LHS.getValueType() == MVT::f64) { 4075 if (Subtarget->hasSPE()) { 4076 switch (CC) { 4077 default: 4078 case ISD::SETEQ: 4079 case ISD::SETNE: 4080 Opc = PPC::EFDCMPEQ; 4081 break; 4082 case ISD::SETLT: 4083 case ISD::SETGE: 4084 case ISD::SETOLT: 4085 case ISD::SETOGE: 4086 case ISD::SETULT: 4087 case ISD::SETUGE: 4088 Opc = PPC::EFDCMPLT; 4089 break; 4090 case ISD::SETGT: 4091 case ISD::SETLE: 4092 case ISD::SETOGT: 4093 case ISD::SETOLE: 4094 case ISD::SETUGT: 4095 case ISD::SETULE: 4096 Opc = PPC::EFDCMPGT; 4097 break; 4098 } 4099 } else 4100 Opc = Subtarget->hasVSX() ? PPC::XSCMPUDP : PPC::FCMPUD; 4101 } else { 4102 assert(LHS.getValueType() == MVT::f128 && "Unknown vt!"); 4103 assert(Subtarget->hasP9Vector() && "XSCMPUQP requires Power9 Vector"); 4104 Opc = PPC::XSCMPUQP; 4105 } 4106 if (Chain) 4107 return SDValue( 4108 CurDAG->getMachineNode(Opc, dl, MVT::i32, MVT::Other, LHS, RHS, Chain), 4109 0); 4110 else 4111 return SDValue(CurDAG->getMachineNode(Opc, dl, MVT::i32, LHS, RHS), 0); 4112 } 4113 4114 static PPC::Predicate getPredicateForSetCC(ISD::CondCode CC, const EVT &VT, 4115 const PPCSubtarget *Subtarget) { 4116 // For SPE instructions, the result is in GT bit of the CR 4117 bool UseSPE = Subtarget->hasSPE() && VT.isFloatingPoint(); 4118 4119 switch (CC) { 4120 case ISD::SETUEQ: 4121 case ISD::SETONE: 4122 case ISD::SETOLE: 4123 case ISD::SETOGE: 4124 llvm_unreachable("Should be lowered by legalize!"); 4125 default: llvm_unreachable("Unknown condition!"); 4126 case ISD::SETOEQ: 4127 case ISD::SETEQ: 4128 return UseSPE ? PPC::PRED_GT : PPC::PRED_EQ; 4129 case ISD::SETUNE: 4130 case ISD::SETNE: 4131 return UseSPE ? PPC::PRED_LE : PPC::PRED_NE; 4132 case ISD::SETOLT: 4133 case ISD::SETLT: 4134 return UseSPE ? PPC::PRED_GT : PPC::PRED_LT; 4135 case ISD::SETULE: 4136 case ISD::SETLE: 4137 return PPC::PRED_LE; 4138 case ISD::SETOGT: 4139 case ISD::SETGT: 4140 return PPC::PRED_GT; 4141 case ISD::SETUGE: 4142 case ISD::SETGE: 4143 return UseSPE ? PPC::PRED_LE : PPC::PRED_GE; 4144 case ISD::SETO: return PPC::PRED_NU; 4145 case ISD::SETUO: return PPC::PRED_UN; 4146 // These two are invalid for floating point. Assume we have int. 4147 case ISD::SETULT: return PPC::PRED_LT; 4148 case ISD::SETUGT: return PPC::PRED_GT; 4149 } 4150 } 4151 4152 /// getCRIdxForSetCC - Return the index of the condition register field 4153 /// associated with the SetCC condition, and whether or not the field is 4154 /// treated as inverted. That is, lt = 0; ge = 0 inverted. 4155 static unsigned getCRIdxForSetCC(ISD::CondCode CC, bool &Invert) { 4156 Invert = false; 4157 switch (CC) { 4158 default: llvm_unreachable("Unknown condition!"); 4159 case ISD::SETOLT: 4160 case ISD::SETLT: return 0; // Bit #0 = SETOLT 4161 case ISD::SETOGT: 4162 case ISD::SETGT: return 1; // Bit #1 = SETOGT 4163 case ISD::SETOEQ: 4164 case ISD::SETEQ: return 2; // Bit #2 = SETOEQ 4165 case ISD::SETUO: return 3; // Bit #3 = SETUO 4166 case ISD::SETUGE: 4167 case ISD::SETGE: Invert = true; return 0; // !Bit #0 = SETUGE 4168 case ISD::SETULE: 4169 case ISD::SETLE: Invert = true; return 1; // !Bit #1 = SETULE 4170 case ISD::SETUNE: 4171 case ISD::SETNE: Invert = true; return 2; // !Bit #2 = SETUNE 4172 case ISD::SETO: Invert = true; return 3; // !Bit #3 = SETO 4173 case ISD::SETUEQ: 4174 case ISD::SETOGE: 4175 case ISD::SETOLE: 4176 case ISD::SETONE: 4177 llvm_unreachable("Invalid branch code: should be expanded by legalize"); 4178 // These are invalid for floating point. Assume integer. 4179 case ISD::SETULT: return 0; 4180 case ISD::SETUGT: return 1; 4181 } 4182 } 4183 4184 // getVCmpInst: return the vector compare instruction for the specified 4185 // vector type and condition code. Since this is for altivec specific code, 4186 // only support the altivec types (v16i8, v8i16, v4i32, v2i64, v1i128, 4187 // and v4f32). 4188 static unsigned int getVCmpInst(MVT VecVT, ISD::CondCode CC, 4189 bool HasVSX, bool &Swap, bool &Negate) { 4190 Swap = false; 4191 Negate = false; 4192 4193 if (VecVT.isFloatingPoint()) { 4194 /* Handle some cases by swapping input operands. */ 4195 switch (CC) { 4196 case ISD::SETLE: CC = ISD::SETGE; Swap = true; break; 4197 case ISD::SETLT: CC = ISD::SETGT; Swap = true; break; 4198 case ISD::SETOLE: CC = ISD::SETOGE; Swap = true; break; 4199 case ISD::SETOLT: CC = ISD::SETOGT; Swap = true; break; 4200 case ISD::SETUGE: CC = ISD::SETULE; Swap = true; break; 4201 case ISD::SETUGT: CC = ISD::SETULT; Swap = true; break; 4202 default: break; 4203 } 4204 /* Handle some cases by negating the result. */ 4205 switch (CC) { 4206 case ISD::SETNE: CC = ISD::SETEQ; Negate = true; break; 4207 case ISD::SETUNE: CC = ISD::SETOEQ; Negate = true; break; 4208 case ISD::SETULE: CC = ISD::SETOGT; Negate = true; break; 4209 case ISD::SETULT: CC = ISD::SETOGE; Negate = true; break; 4210 default: break; 4211 } 4212 /* We have instructions implementing the remaining cases. */ 4213 switch (CC) { 4214 case ISD::SETEQ: 4215 case ISD::SETOEQ: 4216 if (VecVT == MVT::v4f32) 4217 return HasVSX ? PPC::XVCMPEQSP : PPC::VCMPEQFP; 4218 else if (VecVT == MVT::v2f64) 4219 return PPC::XVCMPEQDP; 4220 break; 4221 case ISD::SETGT: 4222 case ISD::SETOGT: 4223 if (VecVT == MVT::v4f32) 4224 return HasVSX ? PPC::XVCMPGTSP : PPC::VCMPGTFP; 4225 else if (VecVT == MVT::v2f64) 4226 return PPC::XVCMPGTDP; 4227 break; 4228 case ISD::SETGE: 4229 case ISD::SETOGE: 4230 if (VecVT == MVT::v4f32) 4231 return HasVSX ? PPC::XVCMPGESP : PPC::VCMPGEFP; 4232 else if (VecVT == MVT::v2f64) 4233 return PPC::XVCMPGEDP; 4234 break; 4235 default: 4236 break; 4237 } 4238 llvm_unreachable("Invalid floating-point vector compare condition"); 4239 } else { 4240 /* Handle some cases by swapping input operands. */ 4241 switch (CC) { 4242 case ISD::SETGE: CC = ISD::SETLE; Swap = true; break; 4243 case ISD::SETLT: CC = ISD::SETGT; Swap = true; break; 4244 case ISD::SETUGE: CC = ISD::SETULE; Swap = true; break; 4245 case ISD::SETULT: CC = ISD::SETUGT; Swap = true; break; 4246 default: break; 4247 } 4248 /* Handle some cases by negating the result. */ 4249 switch (CC) { 4250 case ISD::SETNE: CC = ISD::SETEQ; Negate = true; break; 4251 case ISD::SETUNE: CC = ISD::SETUEQ; Negate = true; break; 4252 case ISD::SETLE: CC = ISD::SETGT; Negate = true; break; 4253 case ISD::SETULE: CC = ISD::SETUGT; Negate = true; break; 4254 default: break; 4255 } 4256 /* We have instructions implementing the remaining cases. */ 4257 switch (CC) { 4258 case ISD::SETEQ: 4259 case ISD::SETUEQ: 4260 if (VecVT == MVT::v16i8) 4261 return PPC::VCMPEQUB; 4262 else if (VecVT == MVT::v8i16) 4263 return PPC::VCMPEQUH; 4264 else if (VecVT == MVT::v4i32) 4265 return PPC::VCMPEQUW; 4266 else if (VecVT == MVT::v2i64) 4267 return PPC::VCMPEQUD; 4268 else if (VecVT == MVT::v1i128) 4269 return PPC::VCMPEQUQ; 4270 break; 4271 case ISD::SETGT: 4272 if (VecVT == MVT::v16i8) 4273 return PPC::VCMPGTSB; 4274 else if (VecVT == MVT::v8i16) 4275 return PPC::VCMPGTSH; 4276 else if (VecVT == MVT::v4i32) 4277 return PPC::VCMPGTSW; 4278 else if (VecVT == MVT::v2i64) 4279 return PPC::VCMPGTSD; 4280 else if (VecVT == MVT::v1i128) 4281 return PPC::VCMPGTSQ; 4282 break; 4283 case ISD::SETUGT: 4284 if (VecVT == MVT::v16i8) 4285 return PPC::VCMPGTUB; 4286 else if (VecVT == MVT::v8i16) 4287 return PPC::VCMPGTUH; 4288 else if (VecVT == MVT::v4i32) 4289 return PPC::VCMPGTUW; 4290 else if (VecVT == MVT::v2i64) 4291 return PPC::VCMPGTUD; 4292 else if (VecVT == MVT::v1i128) 4293 return PPC::VCMPGTUQ; 4294 break; 4295 default: 4296 break; 4297 } 4298 llvm_unreachable("Invalid integer vector compare condition"); 4299 } 4300 } 4301 4302 bool PPCDAGToDAGISel::trySETCC(SDNode *N) { 4303 SDLoc dl(N); 4304 unsigned Imm; 4305 bool IsStrict = N->isStrictFPOpcode(); 4306 ISD::CondCode CC = 4307 cast<CondCodeSDNode>(N->getOperand(IsStrict ? 3 : 2))->get(); 4308 EVT PtrVT = 4309 CurDAG->getTargetLoweringInfo().getPointerTy(CurDAG->getDataLayout()); 4310 bool isPPC64 = (PtrVT == MVT::i64); 4311 SDValue Chain = IsStrict ? N->getOperand(0) : SDValue(); 4312 4313 SDValue LHS = N->getOperand(IsStrict ? 1 : 0); 4314 SDValue RHS = N->getOperand(IsStrict ? 2 : 1); 4315 4316 if (!IsStrict && !Subtarget->useCRBits() && isInt32Immediate(RHS, Imm)) { 4317 // We can codegen setcc op, imm very efficiently compared to a brcond. 4318 // Check for those cases here. 4319 // setcc op, 0 4320 if (Imm == 0) { 4321 SDValue Op = LHS; 4322 switch (CC) { 4323 default: break; 4324 case ISD::SETEQ: { 4325 Op = SDValue(CurDAG->getMachineNode(PPC::CNTLZW, dl, MVT::i32, Op), 0); 4326 SDValue Ops[] = { Op, getI32Imm(27, dl), getI32Imm(5, dl), 4327 getI32Imm(31, dl) }; 4328 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops); 4329 return true; 4330 } 4331 case ISD::SETNE: { 4332 if (isPPC64) break; 4333 SDValue AD = 4334 SDValue(CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue, 4335 Op, getI32Imm(~0U, dl)), 0); 4336 CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, AD, Op, AD.getValue(1)); 4337 return true; 4338 } 4339 case ISD::SETLT: { 4340 SDValue Ops[] = { Op, getI32Imm(1, dl), getI32Imm(31, dl), 4341 getI32Imm(31, dl) }; 4342 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops); 4343 return true; 4344 } 4345 case ISD::SETGT: { 4346 SDValue T = 4347 SDValue(CurDAG->getMachineNode(PPC::NEG, dl, MVT::i32, Op), 0); 4348 T = SDValue(CurDAG->getMachineNode(PPC::ANDC, dl, MVT::i32, T, Op), 0); 4349 SDValue Ops[] = { T, getI32Imm(1, dl), getI32Imm(31, dl), 4350 getI32Imm(31, dl) }; 4351 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops); 4352 return true; 4353 } 4354 } 4355 } else if (Imm == ~0U) { // setcc op, -1 4356 SDValue Op = LHS; 4357 switch (CC) { 4358 default: break; 4359 case ISD::SETEQ: 4360 if (isPPC64) break; 4361 Op = SDValue(CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue, 4362 Op, getI32Imm(1, dl)), 0); 4363 CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32, 4364 SDValue(CurDAG->getMachineNode(PPC::LI, dl, 4365 MVT::i32, 4366 getI32Imm(0, dl)), 4367 0), Op.getValue(1)); 4368 return true; 4369 case ISD::SETNE: { 4370 if (isPPC64) break; 4371 Op = SDValue(CurDAG->getMachineNode(PPC::NOR, dl, MVT::i32, Op, Op), 0); 4372 SDNode *AD = CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue, 4373 Op, getI32Imm(~0U, dl)); 4374 CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, SDValue(AD, 0), Op, 4375 SDValue(AD, 1)); 4376 return true; 4377 } 4378 case ISD::SETLT: { 4379 SDValue AD = SDValue(CurDAG->getMachineNode(PPC::ADDI, dl, MVT::i32, Op, 4380 getI32Imm(1, dl)), 0); 4381 SDValue AN = SDValue(CurDAG->getMachineNode(PPC::AND, dl, MVT::i32, AD, 4382 Op), 0); 4383 SDValue Ops[] = { AN, getI32Imm(1, dl), getI32Imm(31, dl), 4384 getI32Imm(31, dl) }; 4385 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops); 4386 return true; 4387 } 4388 case ISD::SETGT: { 4389 SDValue Ops[] = { Op, getI32Imm(1, dl), getI32Imm(31, dl), 4390 getI32Imm(31, dl) }; 4391 Op = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0); 4392 CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Op, getI32Imm(1, dl)); 4393 return true; 4394 } 4395 } 4396 } 4397 } 4398 4399 // Altivec Vector compare instructions do not set any CR register by default and 4400 // vector compare operations return the same type as the operands. 4401 if (!IsStrict && LHS.getValueType().isVector()) { 4402 if (Subtarget->hasSPE()) 4403 return false; 4404 4405 EVT VecVT = LHS.getValueType(); 4406 bool Swap, Negate; 4407 unsigned int VCmpInst = 4408 getVCmpInst(VecVT.getSimpleVT(), CC, Subtarget->hasVSX(), Swap, Negate); 4409 if (Swap) 4410 std::swap(LHS, RHS); 4411 4412 EVT ResVT = VecVT.changeVectorElementTypeToInteger(); 4413 if (Negate) { 4414 SDValue VCmp(CurDAG->getMachineNode(VCmpInst, dl, ResVT, LHS, RHS), 0); 4415 CurDAG->SelectNodeTo(N, Subtarget->hasVSX() ? PPC::XXLNOR : PPC::VNOR, 4416 ResVT, VCmp, VCmp); 4417 return true; 4418 } 4419 4420 CurDAG->SelectNodeTo(N, VCmpInst, ResVT, LHS, RHS); 4421 return true; 4422 } 4423 4424 if (Subtarget->useCRBits()) 4425 return false; 4426 4427 bool Inv; 4428 unsigned Idx = getCRIdxForSetCC(CC, Inv); 4429 SDValue CCReg = SelectCC(LHS, RHS, CC, dl, Chain); 4430 if (IsStrict) 4431 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 1), CCReg.getValue(1)); 4432 SDValue IntCR; 4433 4434 // SPE e*cmp* instructions only set the 'gt' bit, so hard-code that 4435 // The correct compare instruction is already set by SelectCC() 4436 if (Subtarget->hasSPE() && LHS.getValueType().isFloatingPoint()) { 4437 Idx = 1; 4438 } 4439 4440 // Force the ccreg into CR7. 4441 SDValue CR7Reg = CurDAG->getRegister(PPC::CR7, MVT::i32); 4442 4443 SDValue InFlag(nullptr, 0); // Null incoming flag value. 4444 CCReg = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, CR7Reg, CCReg, 4445 InFlag).getValue(1); 4446 4447 IntCR = SDValue(CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32, CR7Reg, 4448 CCReg), 0); 4449 4450 SDValue Ops[] = { IntCR, getI32Imm((32 - (3 - Idx)) & 31, dl), 4451 getI32Imm(31, dl), getI32Imm(31, dl) }; 4452 if (!Inv) { 4453 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops); 4454 return true; 4455 } 4456 4457 // Get the specified bit. 4458 SDValue Tmp = 4459 SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0); 4460 CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Tmp, getI32Imm(1, dl)); 4461 return true; 4462 } 4463 4464 /// Does this node represent a load/store node whose address can be represented 4465 /// with a register plus an immediate that's a multiple of \p Val: 4466 bool PPCDAGToDAGISel::isOffsetMultipleOf(SDNode *N, unsigned Val) const { 4467 LoadSDNode *LDN = dyn_cast<LoadSDNode>(N); 4468 StoreSDNode *STN = dyn_cast<StoreSDNode>(N); 4469 SDValue AddrOp; 4470 if (LDN) 4471 AddrOp = LDN->getOperand(1); 4472 else if (STN) 4473 AddrOp = STN->getOperand(2); 4474 4475 // If the address points a frame object or a frame object with an offset, 4476 // we need to check the object alignment. 4477 short Imm = 0; 4478 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>( 4479 AddrOp.getOpcode() == ISD::ADD ? AddrOp.getOperand(0) : 4480 AddrOp)) { 4481 // If op0 is a frame index that is under aligned, we can't do it either, 4482 // because it is translated to r31 or r1 + slot + offset. We won't know the 4483 // slot number until the stack frame is finalized. 4484 const MachineFrameInfo &MFI = CurDAG->getMachineFunction().getFrameInfo(); 4485 unsigned SlotAlign = MFI.getObjectAlign(FI->getIndex()).value(); 4486 if ((SlotAlign % Val) != 0) 4487 return false; 4488 4489 // If we have an offset, we need further check on the offset. 4490 if (AddrOp.getOpcode() != ISD::ADD) 4491 return true; 4492 } 4493 4494 if (AddrOp.getOpcode() == ISD::ADD) 4495 return isIntS16Immediate(AddrOp.getOperand(1), Imm) && !(Imm % Val); 4496 4497 // If the address comes from the outside, the offset will be zero. 4498 return AddrOp.getOpcode() == ISD::CopyFromReg; 4499 } 4500 4501 void PPCDAGToDAGISel::transferMemOperands(SDNode *N, SDNode *Result) { 4502 // Transfer memoperands. 4503 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand(); 4504 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Result), {MemOp}); 4505 } 4506 4507 static bool mayUseP9Setb(SDNode *N, const ISD::CondCode &CC, SelectionDAG *DAG, 4508 bool &NeedSwapOps, bool &IsUnCmp) { 4509 4510 assert(N->getOpcode() == ISD::SELECT_CC && "Expecting a SELECT_CC here."); 4511 4512 SDValue LHS = N->getOperand(0); 4513 SDValue RHS = N->getOperand(1); 4514 SDValue TrueRes = N->getOperand(2); 4515 SDValue FalseRes = N->getOperand(3); 4516 ConstantSDNode *TrueConst = dyn_cast<ConstantSDNode>(TrueRes); 4517 if (!TrueConst || (N->getSimpleValueType(0) != MVT::i64 && 4518 N->getSimpleValueType(0) != MVT::i32)) 4519 return false; 4520 4521 // We are looking for any of: 4522 // (select_cc lhs, rhs, 1, (sext (setcc [lr]hs, [lr]hs, cc2)), cc1) 4523 // (select_cc lhs, rhs, -1, (zext (setcc [lr]hs, [lr]hs, cc2)), cc1) 4524 // (select_cc lhs, rhs, 0, (select_cc [lr]hs, [lr]hs, 1, -1, cc2), seteq) 4525 // (select_cc lhs, rhs, 0, (select_cc [lr]hs, [lr]hs, -1, 1, cc2), seteq) 4526 int64_t TrueResVal = TrueConst->getSExtValue(); 4527 if ((TrueResVal < -1 || TrueResVal > 1) || 4528 (TrueResVal == -1 && FalseRes.getOpcode() != ISD::ZERO_EXTEND) || 4529 (TrueResVal == 1 && FalseRes.getOpcode() != ISD::SIGN_EXTEND) || 4530 (TrueResVal == 0 && 4531 (FalseRes.getOpcode() != ISD::SELECT_CC || CC != ISD::SETEQ))) 4532 return false; 4533 4534 SDValue SetOrSelCC = FalseRes.getOpcode() == ISD::SELECT_CC 4535 ? FalseRes 4536 : FalseRes.getOperand(0); 4537 bool InnerIsSel = SetOrSelCC.getOpcode() == ISD::SELECT_CC; 4538 if (SetOrSelCC.getOpcode() != ISD::SETCC && 4539 SetOrSelCC.getOpcode() != ISD::SELECT_CC) 4540 return false; 4541 4542 // Without this setb optimization, the outer SELECT_CC will be manually 4543 // selected to SELECT_CC_I4/SELECT_CC_I8 Pseudo, then expand-isel-pseudos pass 4544 // transforms pseudo instruction to isel instruction. When there are more than 4545 // one use for result like zext/sext, with current optimization we only see 4546 // isel is replaced by setb but can't see any significant gain. Since 4547 // setb has longer latency than original isel, we should avoid this. Another 4548 // point is that setb requires comparison always kept, it can break the 4549 // opportunity to get the comparison away if we have in future. 4550 if (!SetOrSelCC.hasOneUse() || (!InnerIsSel && !FalseRes.hasOneUse())) 4551 return false; 4552 4553 SDValue InnerLHS = SetOrSelCC.getOperand(0); 4554 SDValue InnerRHS = SetOrSelCC.getOperand(1); 4555 ISD::CondCode InnerCC = 4556 cast<CondCodeSDNode>(SetOrSelCC.getOperand(InnerIsSel ? 4 : 2))->get(); 4557 // If the inner comparison is a select_cc, make sure the true/false values are 4558 // 1/-1 and canonicalize it if needed. 4559 if (InnerIsSel) { 4560 ConstantSDNode *SelCCTrueConst = 4561 dyn_cast<ConstantSDNode>(SetOrSelCC.getOperand(2)); 4562 ConstantSDNode *SelCCFalseConst = 4563 dyn_cast<ConstantSDNode>(SetOrSelCC.getOperand(3)); 4564 if (!SelCCTrueConst || !SelCCFalseConst) 4565 return false; 4566 int64_t SelCCTVal = SelCCTrueConst->getSExtValue(); 4567 int64_t SelCCFVal = SelCCFalseConst->getSExtValue(); 4568 // The values must be -1/1 (requiring a swap) or 1/-1. 4569 if (SelCCTVal == -1 && SelCCFVal == 1) { 4570 std::swap(InnerLHS, InnerRHS); 4571 } else if (SelCCTVal != 1 || SelCCFVal != -1) 4572 return false; 4573 } 4574 4575 // Canonicalize unsigned case 4576 if (InnerCC == ISD::SETULT || InnerCC == ISD::SETUGT) { 4577 IsUnCmp = true; 4578 InnerCC = (InnerCC == ISD::SETULT) ? ISD::SETLT : ISD::SETGT; 4579 } 4580 4581 bool InnerSwapped = false; 4582 if (LHS == InnerRHS && RHS == InnerLHS) 4583 InnerSwapped = true; 4584 else if (LHS != InnerLHS || RHS != InnerRHS) 4585 return false; 4586 4587 switch (CC) { 4588 // (select_cc lhs, rhs, 0, \ 4589 // (select_cc [lr]hs, [lr]hs, 1, -1, setlt/setgt), seteq) 4590 case ISD::SETEQ: 4591 if (!InnerIsSel) 4592 return false; 4593 if (InnerCC != ISD::SETLT && InnerCC != ISD::SETGT) 4594 return false; 4595 NeedSwapOps = (InnerCC == ISD::SETGT) ? InnerSwapped : !InnerSwapped; 4596 break; 4597 4598 // (select_cc lhs, rhs, -1, (zext (setcc [lr]hs, [lr]hs, setne)), setu?lt) 4599 // (select_cc lhs, rhs, -1, (zext (setcc lhs, rhs, setgt)), setu?lt) 4600 // (select_cc lhs, rhs, -1, (zext (setcc rhs, lhs, setlt)), setu?lt) 4601 // (select_cc lhs, rhs, 1, (sext (setcc [lr]hs, [lr]hs, setne)), setu?lt) 4602 // (select_cc lhs, rhs, 1, (sext (setcc lhs, rhs, setgt)), setu?lt) 4603 // (select_cc lhs, rhs, 1, (sext (setcc rhs, lhs, setlt)), setu?lt) 4604 case ISD::SETULT: 4605 if (!IsUnCmp && InnerCC != ISD::SETNE) 4606 return false; 4607 IsUnCmp = true; 4608 LLVM_FALLTHROUGH; 4609 case ISD::SETLT: 4610 if (InnerCC == ISD::SETNE || (InnerCC == ISD::SETGT && !InnerSwapped) || 4611 (InnerCC == ISD::SETLT && InnerSwapped)) 4612 NeedSwapOps = (TrueResVal == 1); 4613 else 4614 return false; 4615 break; 4616 4617 // (select_cc lhs, rhs, 1, (sext (setcc [lr]hs, [lr]hs, setne)), setu?gt) 4618 // (select_cc lhs, rhs, 1, (sext (setcc lhs, rhs, setlt)), setu?gt) 4619 // (select_cc lhs, rhs, 1, (sext (setcc rhs, lhs, setgt)), setu?gt) 4620 // (select_cc lhs, rhs, -1, (zext (setcc [lr]hs, [lr]hs, setne)), setu?gt) 4621 // (select_cc lhs, rhs, -1, (zext (setcc lhs, rhs, setlt)), setu?gt) 4622 // (select_cc lhs, rhs, -1, (zext (setcc rhs, lhs, setgt)), setu?gt) 4623 case ISD::SETUGT: 4624 if (!IsUnCmp && InnerCC != ISD::SETNE) 4625 return false; 4626 IsUnCmp = true; 4627 LLVM_FALLTHROUGH; 4628 case ISD::SETGT: 4629 if (InnerCC == ISD::SETNE || (InnerCC == ISD::SETLT && !InnerSwapped) || 4630 (InnerCC == ISD::SETGT && InnerSwapped)) 4631 NeedSwapOps = (TrueResVal == -1); 4632 else 4633 return false; 4634 break; 4635 4636 default: 4637 return false; 4638 } 4639 4640 LLVM_DEBUG(dbgs() << "Found a node that can be lowered to a SETB: "); 4641 LLVM_DEBUG(N->dump()); 4642 4643 return true; 4644 } 4645 4646 // Return true if it's a software square-root/divide operand. 4647 static bool isSWTestOp(SDValue N) { 4648 if (N.getOpcode() == PPCISD::FTSQRT) 4649 return true; 4650 if (N.getNumOperands() < 1 || !isa<ConstantSDNode>(N.getOperand(0))) 4651 return false; 4652 switch (N.getConstantOperandVal(0)) { 4653 case Intrinsic::ppc_vsx_xvtdivdp: 4654 case Intrinsic::ppc_vsx_xvtdivsp: 4655 case Intrinsic::ppc_vsx_xvtsqrtdp: 4656 case Intrinsic::ppc_vsx_xvtsqrtsp: 4657 return true; 4658 } 4659 return false; 4660 } 4661 4662 bool PPCDAGToDAGISel::tryFoldSWTestBRCC(SDNode *N) { 4663 assert(N->getOpcode() == ISD::BR_CC && "ISD::BR_CC is expected."); 4664 // We are looking for following patterns, where `truncate to i1` actually has 4665 // the same semantic with `and 1`. 4666 // (br_cc seteq, (truncateToi1 SWTestOp), 0) -> (BCC PRED_NU, SWTestOp) 4667 // (br_cc seteq, (and SWTestOp, 2), 0) -> (BCC PRED_NE, SWTestOp) 4668 // (br_cc seteq, (and SWTestOp, 4), 0) -> (BCC PRED_LE, SWTestOp) 4669 // (br_cc seteq, (and SWTestOp, 8), 0) -> (BCC PRED_GE, SWTestOp) 4670 // (br_cc setne, (truncateToi1 SWTestOp), 0) -> (BCC PRED_UN, SWTestOp) 4671 // (br_cc setne, (and SWTestOp, 2), 0) -> (BCC PRED_EQ, SWTestOp) 4672 // (br_cc setne, (and SWTestOp, 4), 0) -> (BCC PRED_GT, SWTestOp) 4673 // (br_cc setne, (and SWTestOp, 8), 0) -> (BCC PRED_LT, SWTestOp) 4674 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 4675 if (CC != ISD::SETEQ && CC != ISD::SETNE) 4676 return false; 4677 4678 SDValue CmpRHS = N->getOperand(3); 4679 if (!isa<ConstantSDNode>(CmpRHS) || 4680 cast<ConstantSDNode>(CmpRHS)->getSExtValue() != 0) 4681 return false; 4682 4683 SDValue CmpLHS = N->getOperand(2); 4684 if (CmpLHS.getNumOperands() < 1 || !isSWTestOp(CmpLHS.getOperand(0))) 4685 return false; 4686 4687 unsigned PCC = 0; 4688 bool IsCCNE = CC == ISD::SETNE; 4689 if (CmpLHS.getOpcode() == ISD::AND && 4690 isa<ConstantSDNode>(CmpLHS.getOperand(1))) 4691 switch (CmpLHS.getConstantOperandVal(1)) { 4692 case 1: 4693 PCC = IsCCNE ? PPC::PRED_UN : PPC::PRED_NU; 4694 break; 4695 case 2: 4696 PCC = IsCCNE ? PPC::PRED_EQ : PPC::PRED_NE; 4697 break; 4698 case 4: 4699 PCC = IsCCNE ? PPC::PRED_GT : PPC::PRED_LE; 4700 break; 4701 case 8: 4702 PCC = IsCCNE ? PPC::PRED_LT : PPC::PRED_GE; 4703 break; 4704 default: 4705 return false; 4706 } 4707 else if (CmpLHS.getOpcode() == ISD::TRUNCATE && 4708 CmpLHS.getValueType() == MVT::i1) 4709 PCC = IsCCNE ? PPC::PRED_UN : PPC::PRED_NU; 4710 4711 if (PCC) { 4712 SDLoc dl(N); 4713 SDValue Ops[] = {getI32Imm(PCC, dl), CmpLHS.getOperand(0), N->getOperand(4), 4714 N->getOperand(0)}; 4715 CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops); 4716 return true; 4717 } 4718 return false; 4719 } 4720 4721 bool PPCDAGToDAGISel::tryAsSingleRLWINM(SDNode *N) { 4722 assert(N->getOpcode() == ISD::AND && "ISD::AND SDNode expected"); 4723 unsigned Imm; 4724 if (!isInt32Immediate(N->getOperand(1), Imm)) 4725 return false; 4726 4727 SDLoc dl(N); 4728 SDValue Val = N->getOperand(0); 4729 unsigned SH, MB, ME; 4730 // If this is an and of a value rotated between 0 and 31 bits and then and'd 4731 // with a mask, emit rlwinm 4732 if (isRotateAndMask(Val.getNode(), Imm, false, SH, MB, ME)) { 4733 Val = Val.getOperand(0); 4734 SDValue Ops[] = {Val, getI32Imm(SH, dl), getI32Imm(MB, dl), 4735 getI32Imm(ME, dl)}; 4736 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops); 4737 return true; 4738 } 4739 4740 // If this is just a masked value where the input is not handled, and 4741 // is not a rotate-left (handled by a pattern in the .td file), emit rlwinm 4742 if (isRunOfOnes(Imm, MB, ME) && Val.getOpcode() != ISD::ROTL) { 4743 SDValue Ops[] = {Val, getI32Imm(0, dl), getI32Imm(MB, dl), 4744 getI32Imm(ME, dl)}; 4745 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops); 4746 return true; 4747 } 4748 4749 // AND X, 0 -> 0, not "rlwinm 32". 4750 if (Imm == 0) { 4751 ReplaceUses(SDValue(N, 0), N->getOperand(1)); 4752 return true; 4753 } 4754 4755 return false; 4756 } 4757 4758 bool PPCDAGToDAGISel::tryAsSingleRLWINM8(SDNode *N) { 4759 assert(N->getOpcode() == ISD::AND && "ISD::AND SDNode expected"); 4760 uint64_t Imm64; 4761 if (!isInt64Immediate(N->getOperand(1).getNode(), Imm64)) 4762 return false; 4763 4764 unsigned MB, ME; 4765 if (isRunOfOnes64(Imm64, MB, ME) && MB >= 32 && MB <= ME) { 4766 // MB ME 4767 // +----------------------+ 4768 // |xxxxxxxxxxx00011111000| 4769 // +----------------------+ 4770 // 0 32 64 4771 // We can only do it if the MB is larger than 32 and MB <= ME 4772 // as RLWINM will replace the contents of [0 - 32) with [32 - 64) even 4773 // we didn't rotate it. 4774 SDLoc dl(N); 4775 SDValue Ops[] = {N->getOperand(0), getI64Imm(0, dl), getI64Imm(MB - 32, dl), 4776 getI64Imm(ME - 32, dl)}; 4777 CurDAG->SelectNodeTo(N, PPC::RLWINM8, MVT::i64, Ops); 4778 return true; 4779 } 4780 4781 return false; 4782 } 4783 4784 bool PPCDAGToDAGISel::tryAsPairOfRLDICL(SDNode *N) { 4785 assert(N->getOpcode() == ISD::AND && "ISD::AND SDNode expected"); 4786 uint64_t Imm64; 4787 if (!isInt64Immediate(N->getOperand(1).getNode(), Imm64)) 4788 return false; 4789 4790 // Do nothing if it is 16-bit imm as the pattern in the .td file handle 4791 // it well with "andi.". 4792 if (isUInt<16>(Imm64)) 4793 return false; 4794 4795 SDLoc Loc(N); 4796 SDValue Val = N->getOperand(0); 4797 4798 // Optimized with two rldicl's as follows: 4799 // Add missing bits on left to the mask and check that the mask is a 4800 // wrapped run of ones, i.e. 4801 // Change pattern |0001111100000011111111| 4802 // to |1111111100000011111111|. 4803 unsigned NumOfLeadingZeros = countLeadingZeros(Imm64); 4804 if (NumOfLeadingZeros != 0) 4805 Imm64 |= maskLeadingOnes<uint64_t>(NumOfLeadingZeros); 4806 4807 unsigned MB, ME; 4808 if (!isRunOfOnes64(Imm64, MB, ME)) 4809 return false; 4810 4811 // ME MB MB-ME+63 4812 // +----------------------+ +----------------------+ 4813 // |1111111100000011111111| -> |0000001111111111111111| 4814 // +----------------------+ +----------------------+ 4815 // 0 63 0 63 4816 // There are ME + 1 ones on the left and (MB - ME + 63) & 63 zeros in between. 4817 unsigned OnesOnLeft = ME + 1; 4818 unsigned ZerosInBetween = (MB - ME + 63) & 63; 4819 // Rotate left by OnesOnLeft (so leading ones are now trailing ones) and clear 4820 // on the left the bits that are already zeros in the mask. 4821 Val = SDValue(CurDAG->getMachineNode(PPC::RLDICL, Loc, MVT::i64, Val, 4822 getI64Imm(OnesOnLeft, Loc), 4823 getI64Imm(ZerosInBetween, Loc)), 4824 0); 4825 // MB-ME+63 ME MB 4826 // +----------------------+ +----------------------+ 4827 // |0000001111111111111111| -> |0001111100000011111111| 4828 // +----------------------+ +----------------------+ 4829 // 0 63 0 63 4830 // Rotate back by 64 - OnesOnLeft to undo previous rotate. Then clear on the 4831 // left the number of ones we previously added. 4832 SDValue Ops[] = {Val, getI64Imm(64 - OnesOnLeft, Loc), 4833 getI64Imm(NumOfLeadingZeros, Loc)}; 4834 CurDAG->SelectNodeTo(N, PPC::RLDICL, MVT::i64, Ops); 4835 return true; 4836 } 4837 4838 bool PPCDAGToDAGISel::tryAsSingleRLWIMI(SDNode *N) { 4839 assert(N->getOpcode() == ISD::AND && "ISD::AND SDNode expected"); 4840 unsigned Imm; 4841 if (!isInt32Immediate(N->getOperand(1), Imm)) 4842 return false; 4843 4844 SDValue Val = N->getOperand(0); 4845 unsigned Imm2; 4846 // ISD::OR doesn't get all the bitfield insertion fun. 4847 // (and (or x, c1), c2) where isRunOfOnes(~(c1^c2)) might be a 4848 // bitfield insert. 4849 if (Val.getOpcode() != ISD::OR || !isInt32Immediate(Val.getOperand(1), Imm2)) 4850 return false; 4851 4852 // The idea here is to check whether this is equivalent to: 4853 // (c1 & m) | (x & ~m) 4854 // where m is a run-of-ones mask. The logic here is that, for each bit in 4855 // c1 and c2: 4856 // - if both are 1, then the output will be 1. 4857 // - if both are 0, then the output will be 0. 4858 // - if the bit in c1 is 0, and the bit in c2 is 1, then the output will 4859 // come from x. 4860 // - if the bit in c1 is 1, and the bit in c2 is 0, then the output will 4861 // be 0. 4862 // If that last condition is never the case, then we can form m from the 4863 // bits that are the same between c1 and c2. 4864 unsigned MB, ME; 4865 if (isRunOfOnes(~(Imm ^ Imm2), MB, ME) && !(~Imm & Imm2)) { 4866 SDLoc dl(N); 4867 SDValue Ops[] = {Val.getOperand(0), Val.getOperand(1), getI32Imm(0, dl), 4868 getI32Imm(MB, dl), getI32Imm(ME, dl)}; 4869 ReplaceNode(N, CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops)); 4870 return true; 4871 } 4872 4873 return false; 4874 } 4875 4876 bool PPCDAGToDAGISel::tryAsSingleRLDICL(SDNode *N) { 4877 assert(N->getOpcode() == ISD::AND && "ISD::AND SDNode expected"); 4878 uint64_t Imm64; 4879 if (!isInt64Immediate(N->getOperand(1).getNode(), Imm64) || !isMask_64(Imm64)) 4880 return false; 4881 4882 // If this is a 64-bit zero-extension mask, emit rldicl. 4883 unsigned MB = 64 - countTrailingOnes(Imm64); 4884 unsigned SH = 0; 4885 unsigned Imm; 4886 SDValue Val = N->getOperand(0); 4887 SDLoc dl(N); 4888 4889 if (Val.getOpcode() == ISD::ANY_EXTEND) { 4890 auto Op0 = Val.getOperand(0); 4891 if (Op0.getOpcode() == ISD::SRL && 4892 isInt32Immediate(Op0.getOperand(1).getNode(), Imm) && Imm <= MB) { 4893 4894 auto ResultType = Val.getNode()->getValueType(0); 4895 auto ImDef = CurDAG->getMachineNode(PPC::IMPLICIT_DEF, dl, ResultType); 4896 SDValue IDVal(ImDef, 0); 4897 4898 Val = SDValue(CurDAG->getMachineNode(PPC::INSERT_SUBREG, dl, ResultType, 4899 IDVal, Op0.getOperand(0), 4900 getI32Imm(1, dl)), 4901 0); 4902 SH = 64 - Imm; 4903 } 4904 } 4905 4906 // If the operand is a logical right shift, we can fold it into this 4907 // instruction: rldicl(rldicl(x, 64-n, n), 0, mb) -> rldicl(x, 64-n, mb) 4908 // for n <= mb. The right shift is really a left rotate followed by a 4909 // mask, and this mask is a more-restrictive sub-mask of the mask implied 4910 // by the shift. 4911 if (Val.getOpcode() == ISD::SRL && 4912 isInt32Immediate(Val.getOperand(1).getNode(), Imm) && Imm <= MB) { 4913 assert(Imm < 64 && "Illegal shift amount"); 4914 Val = Val.getOperand(0); 4915 SH = 64 - Imm; 4916 } 4917 4918 SDValue Ops[] = {Val, getI32Imm(SH, dl), getI32Imm(MB, dl)}; 4919 CurDAG->SelectNodeTo(N, PPC::RLDICL, MVT::i64, Ops); 4920 return true; 4921 } 4922 4923 bool PPCDAGToDAGISel::tryAsSingleRLDICR(SDNode *N) { 4924 assert(N->getOpcode() == ISD::AND && "ISD::AND SDNode expected"); 4925 uint64_t Imm64; 4926 if (!isInt64Immediate(N->getOperand(1).getNode(), Imm64) || 4927 !isMask_64(~Imm64)) 4928 return false; 4929 4930 // If this is a negated 64-bit zero-extension mask, 4931 // i.e. the immediate is a sequence of ones from most significant side 4932 // and all zero for reminder, we should use rldicr. 4933 unsigned MB = 63 - countTrailingOnes(~Imm64); 4934 unsigned SH = 0; 4935 SDLoc dl(N); 4936 SDValue Ops[] = {N->getOperand(0), getI32Imm(SH, dl), getI32Imm(MB, dl)}; 4937 CurDAG->SelectNodeTo(N, PPC::RLDICR, MVT::i64, Ops); 4938 return true; 4939 } 4940 4941 bool PPCDAGToDAGISel::tryAsSingleRLDIMI(SDNode *N) { 4942 assert(N->getOpcode() == ISD::OR && "ISD::OR SDNode expected"); 4943 uint64_t Imm64; 4944 unsigned MB, ME; 4945 SDValue N0 = N->getOperand(0); 4946 4947 // We won't get fewer instructions if the imm is 32-bit integer. 4948 // rldimi requires the imm to have consecutive ones with both sides zero. 4949 // Also, make sure the first Op has only one use, otherwise this may increase 4950 // register pressure since rldimi is destructive. 4951 if (!isInt64Immediate(N->getOperand(1).getNode(), Imm64) || 4952 isUInt<32>(Imm64) || !isRunOfOnes64(Imm64, MB, ME) || !N0.hasOneUse()) 4953 return false; 4954 4955 unsigned SH = 63 - ME; 4956 SDLoc Dl(N); 4957 // Use select64Imm for making LI instr instead of directly putting Imm64 4958 SDValue Ops[] = { 4959 N->getOperand(0), 4960 SDValue(selectI64Imm(CurDAG, getI64Imm(-1, Dl).getNode()), 0), 4961 getI32Imm(SH, Dl), getI32Imm(MB, Dl)}; 4962 CurDAG->SelectNodeTo(N, PPC::RLDIMI, MVT::i64, Ops); 4963 return true; 4964 } 4965 4966 // Select - Convert the specified operand from a target-independent to a 4967 // target-specific node if it hasn't already been changed. 4968 void PPCDAGToDAGISel::Select(SDNode *N) { 4969 SDLoc dl(N); 4970 if (N->isMachineOpcode()) { 4971 N->setNodeId(-1); 4972 return; // Already selected. 4973 } 4974 4975 // In case any misguided DAG-level optimizations form an ADD with a 4976 // TargetConstant operand, crash here instead of miscompiling (by selecting 4977 // an r+r add instead of some kind of r+i add). 4978 if (N->getOpcode() == ISD::ADD && 4979 N->getOperand(1).getOpcode() == ISD::TargetConstant) 4980 llvm_unreachable("Invalid ADD with TargetConstant operand"); 4981 4982 // Try matching complex bit permutations before doing anything else. 4983 if (tryBitPermutation(N)) 4984 return; 4985 4986 // Try to emit integer compares as GPR-only sequences (i.e. no use of CR). 4987 if (tryIntCompareInGPR(N)) 4988 return; 4989 4990 switch (N->getOpcode()) { 4991 default: break; 4992 4993 case ISD::Constant: 4994 if (N->getValueType(0) == MVT::i64) { 4995 ReplaceNode(N, selectI64Imm(CurDAG, N)); 4996 return; 4997 } 4998 break; 4999 5000 case ISD::INTRINSIC_VOID: { 5001 auto IntrinsicID = N->getConstantOperandVal(1); 5002 if (IntrinsicID == Intrinsic::ppc_tdw || IntrinsicID == Intrinsic::ppc_tw) { 5003 unsigned Opcode = IntrinsicID == Intrinsic::ppc_tdw ? PPC::TDI : PPC::TWI; 5004 SDValue Ops[] = {N->getOperand(4), N->getOperand(2), N->getOperand(3)}; 5005 int16_t SImmOperand2; 5006 int16_t SImmOperand3; 5007 int16_t SImmOperand4; 5008 bool isOperand2IntS16Immediate = 5009 isIntS16Immediate(N->getOperand(2), SImmOperand2); 5010 bool isOperand3IntS16Immediate = 5011 isIntS16Immediate(N->getOperand(3), SImmOperand3); 5012 // We will emit PPC::TD or PPC::TW if the 2nd and 3rd operands are reg + 5013 // reg or imm + imm. The imm + imm form will be optimized to either an 5014 // unconditional trap or a nop in a later pass. 5015 if (isOperand2IntS16Immediate == isOperand3IntS16Immediate) 5016 Opcode = IntrinsicID == Intrinsic::ppc_tdw ? PPC::TD : PPC::TW; 5017 else if (isOperand3IntS16Immediate) 5018 // The 2nd and 3rd operands are reg + imm. 5019 Ops[2] = getI32Imm(int(SImmOperand3) & 0xFFFF, dl); 5020 else { 5021 // The 2nd and 3rd operands are imm + reg. 5022 bool isOperand4IntS16Immediate = 5023 isIntS16Immediate(N->getOperand(4), SImmOperand4); 5024 (void)isOperand4IntS16Immediate; 5025 assert(isOperand4IntS16Immediate && 5026 "The 4th operand is not an Immediate"); 5027 // We need to flip the condition immediate TO. 5028 int16_t TO = int(SImmOperand4) & 0x1F; 5029 // We swap the first and second bit of TO if they are not same. 5030 if ((TO & 0x1) != ((TO & 0x2) >> 1)) 5031 TO = (TO & 0x1) ? TO + 1 : TO - 1; 5032 // We swap the fourth and fifth bit of TO if they are not same. 5033 if ((TO & 0x8) != ((TO & 0x10) >> 1)) 5034 TO = (TO & 0x8) ? TO + 8 : TO - 8; 5035 Ops[0] = getI32Imm(TO, dl); 5036 Ops[1] = N->getOperand(3); 5037 Ops[2] = getI32Imm(int(SImmOperand2) & 0xFFFF, dl); 5038 } 5039 CurDAG->SelectNodeTo(N, Opcode, MVT::Other, Ops); 5040 return; 5041 } 5042 break; 5043 } 5044 5045 case ISD::INTRINSIC_WO_CHAIN: { 5046 // We emit the PPC::FSELS instruction here because of type conflicts with 5047 // the comparison operand. The FSELS instruction is defined to use an 8-byte 5048 // comparison like the FSELD version. The fsels intrinsic takes a 4-byte 5049 // value for the comparison. When selecting through a .td file, a type 5050 // error is raised. Must check this first so we never break on the 5051 // !Subtarget->isISA3_1() check. 5052 if (N->getConstantOperandVal(0) == Intrinsic::ppc_fsels) { 5053 SDValue Ops[] = {N->getOperand(1), N->getOperand(2), N->getOperand(3)}; 5054 CurDAG->SelectNodeTo(N, PPC::FSELS, MVT::f32, Ops); 5055 return; 5056 } 5057 5058 if (!Subtarget->isISA3_1()) 5059 break; 5060 unsigned Opcode = 0; 5061 switch (N->getConstantOperandVal(0)) { 5062 default: 5063 break; 5064 case Intrinsic::ppc_altivec_vstribr_p: 5065 Opcode = PPC::VSTRIBR_rec; 5066 break; 5067 case Intrinsic::ppc_altivec_vstribl_p: 5068 Opcode = PPC::VSTRIBL_rec; 5069 break; 5070 case Intrinsic::ppc_altivec_vstrihr_p: 5071 Opcode = PPC::VSTRIHR_rec; 5072 break; 5073 case Intrinsic::ppc_altivec_vstrihl_p: 5074 Opcode = PPC::VSTRIHL_rec; 5075 break; 5076 } 5077 if (!Opcode) 5078 break; 5079 5080 // Generate the appropriate vector string isolate intrinsic to match. 5081 EVT VTs[] = {MVT::v16i8, MVT::Glue}; 5082 SDValue VecStrOp = 5083 SDValue(CurDAG->getMachineNode(Opcode, dl, VTs, N->getOperand(2)), 0); 5084 // Vector string isolate instructions update the EQ bit of CR6. 5085 // Generate a SETBC instruction to extract the bit and place it in a GPR. 5086 SDValue SubRegIdx = CurDAG->getTargetConstant(PPC::sub_eq, dl, MVT::i32); 5087 SDValue CR6Reg = CurDAG->getRegister(PPC::CR6, MVT::i32); 5088 SDValue CRBit = SDValue( 5089 CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::i1, 5090 CR6Reg, SubRegIdx, VecStrOp.getValue(1)), 5091 0); 5092 CurDAG->SelectNodeTo(N, PPC::SETBC, MVT::i32, CRBit); 5093 return; 5094 } 5095 5096 case ISD::SETCC: 5097 case ISD::STRICT_FSETCC: 5098 case ISD::STRICT_FSETCCS: 5099 if (trySETCC(N)) 5100 return; 5101 break; 5102 // These nodes will be transformed into GETtlsADDR32 node, which 5103 // later becomes BL_TLS __tls_get_addr(sym at tlsgd)@PLT 5104 case PPCISD::ADDI_TLSLD_L_ADDR: 5105 case PPCISD::ADDI_TLSGD_L_ADDR: { 5106 const Module *Mod = MF->getFunction().getParent(); 5107 if (PPCLowering->getPointerTy(CurDAG->getDataLayout()) != MVT::i32 || 5108 !Subtarget->isSecurePlt() || !Subtarget->isTargetELF() || 5109 Mod->getPICLevel() == PICLevel::SmallPIC) 5110 break; 5111 // Attach global base pointer on GETtlsADDR32 node in order to 5112 // generate secure plt code for TLS symbols. 5113 getGlobalBaseReg(); 5114 } break; 5115 case PPCISD::CALL: { 5116 if (PPCLowering->getPointerTy(CurDAG->getDataLayout()) != MVT::i32 || 5117 !TM.isPositionIndependent() || !Subtarget->isSecurePlt() || 5118 !Subtarget->isTargetELF()) 5119 break; 5120 5121 SDValue Op = N->getOperand(1); 5122 5123 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) { 5124 if (GA->getTargetFlags() == PPCII::MO_PLT) 5125 getGlobalBaseReg(); 5126 } 5127 else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) { 5128 if (ES->getTargetFlags() == PPCII::MO_PLT) 5129 getGlobalBaseReg(); 5130 } 5131 } 5132 break; 5133 5134 case PPCISD::GlobalBaseReg: 5135 ReplaceNode(N, getGlobalBaseReg()); 5136 return; 5137 5138 case ISD::FrameIndex: 5139 selectFrameIndex(N, N); 5140 return; 5141 5142 case PPCISD::MFOCRF: { 5143 SDValue InFlag = N->getOperand(1); 5144 ReplaceNode(N, CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32, 5145 N->getOperand(0), InFlag)); 5146 return; 5147 } 5148 5149 case PPCISD::READ_TIME_BASE: 5150 ReplaceNode(N, CurDAG->getMachineNode(PPC::ReadTB, dl, MVT::i32, MVT::i32, 5151 MVT::Other, N->getOperand(0))); 5152 return; 5153 5154 case PPCISD::SRA_ADDZE: { 5155 SDValue N0 = N->getOperand(0); 5156 SDValue ShiftAmt = 5157 CurDAG->getTargetConstant(*cast<ConstantSDNode>(N->getOperand(1))-> 5158 getConstantIntValue(), dl, 5159 N->getValueType(0)); 5160 if (N->getValueType(0) == MVT::i64) { 5161 SDNode *Op = 5162 CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, MVT::Glue, 5163 N0, ShiftAmt); 5164 CurDAG->SelectNodeTo(N, PPC::ADDZE8, MVT::i64, SDValue(Op, 0), 5165 SDValue(Op, 1)); 5166 return; 5167 } else { 5168 assert(N->getValueType(0) == MVT::i32 && 5169 "Expecting i64 or i32 in PPCISD::SRA_ADDZE"); 5170 SDNode *Op = 5171 CurDAG->getMachineNode(PPC::SRAWI, dl, MVT::i32, MVT::Glue, 5172 N0, ShiftAmt); 5173 CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32, SDValue(Op, 0), 5174 SDValue(Op, 1)); 5175 return; 5176 } 5177 } 5178 5179 case ISD::STORE: { 5180 // Change TLS initial-exec D-form stores to X-form stores. 5181 StoreSDNode *ST = cast<StoreSDNode>(N); 5182 if (EnableTLSOpt && Subtarget->isELFv2ABI() && 5183 ST->getAddressingMode() != ISD::PRE_INC) 5184 if (tryTLSXFormStore(ST)) 5185 return; 5186 break; 5187 } 5188 case ISD::LOAD: { 5189 // Handle preincrement loads. 5190 LoadSDNode *LD = cast<LoadSDNode>(N); 5191 EVT LoadedVT = LD->getMemoryVT(); 5192 5193 // Normal loads are handled by code generated from the .td file. 5194 if (LD->getAddressingMode() != ISD::PRE_INC) { 5195 // Change TLS initial-exec D-form loads to X-form loads. 5196 if (EnableTLSOpt && Subtarget->isELFv2ABI()) 5197 if (tryTLSXFormLoad(LD)) 5198 return; 5199 break; 5200 } 5201 5202 SDValue Offset = LD->getOffset(); 5203 if (Offset.getOpcode() == ISD::TargetConstant || 5204 Offset.getOpcode() == ISD::TargetGlobalAddress) { 5205 5206 unsigned Opcode; 5207 bool isSExt = LD->getExtensionType() == ISD::SEXTLOAD; 5208 if (LD->getValueType(0) != MVT::i64) { 5209 // Handle PPC32 integer and normal FP loads. 5210 assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load"); 5211 switch (LoadedVT.getSimpleVT().SimpleTy) { 5212 default: llvm_unreachable("Invalid PPC load type!"); 5213 case MVT::f64: Opcode = PPC::LFDU; break; 5214 case MVT::f32: Opcode = PPC::LFSU; break; 5215 case MVT::i32: Opcode = PPC::LWZU; break; 5216 case MVT::i16: Opcode = isSExt ? PPC::LHAU : PPC::LHZU; break; 5217 case MVT::i1: 5218 case MVT::i8: Opcode = PPC::LBZU; break; 5219 } 5220 } else { 5221 assert(LD->getValueType(0) == MVT::i64 && "Unknown load result type!"); 5222 assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load"); 5223 switch (LoadedVT.getSimpleVT().SimpleTy) { 5224 default: llvm_unreachable("Invalid PPC load type!"); 5225 case MVT::i64: Opcode = PPC::LDU; break; 5226 case MVT::i32: Opcode = PPC::LWZU8; break; 5227 case MVT::i16: Opcode = isSExt ? PPC::LHAU8 : PPC::LHZU8; break; 5228 case MVT::i1: 5229 case MVT::i8: Opcode = PPC::LBZU8; break; 5230 } 5231 } 5232 5233 SDValue Chain = LD->getChain(); 5234 SDValue Base = LD->getBasePtr(); 5235 SDValue Ops[] = { Offset, Base, Chain }; 5236 SDNode *MN = CurDAG->getMachineNode( 5237 Opcode, dl, LD->getValueType(0), 5238 PPCLowering->getPointerTy(CurDAG->getDataLayout()), MVT::Other, Ops); 5239 transferMemOperands(N, MN); 5240 ReplaceNode(N, MN); 5241 return; 5242 } else { 5243 unsigned Opcode; 5244 bool isSExt = LD->getExtensionType() == ISD::SEXTLOAD; 5245 if (LD->getValueType(0) != MVT::i64) { 5246 // Handle PPC32 integer and normal FP loads. 5247 assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load"); 5248 switch (LoadedVT.getSimpleVT().SimpleTy) { 5249 default: llvm_unreachable("Invalid PPC load type!"); 5250 case MVT::f64: Opcode = PPC::LFDUX; break; 5251 case MVT::f32: Opcode = PPC::LFSUX; break; 5252 case MVT::i32: Opcode = PPC::LWZUX; break; 5253 case MVT::i16: Opcode = isSExt ? PPC::LHAUX : PPC::LHZUX; break; 5254 case MVT::i1: 5255 case MVT::i8: Opcode = PPC::LBZUX; break; 5256 } 5257 } else { 5258 assert(LD->getValueType(0) == MVT::i64 && "Unknown load result type!"); 5259 assert((!isSExt || LoadedVT == MVT::i16 || LoadedVT == MVT::i32) && 5260 "Invalid sext update load"); 5261 switch (LoadedVT.getSimpleVT().SimpleTy) { 5262 default: llvm_unreachable("Invalid PPC load type!"); 5263 case MVT::i64: Opcode = PPC::LDUX; break; 5264 case MVT::i32: Opcode = isSExt ? PPC::LWAUX : PPC::LWZUX8; break; 5265 case MVT::i16: Opcode = isSExt ? PPC::LHAUX8 : PPC::LHZUX8; break; 5266 case MVT::i1: 5267 case MVT::i8: Opcode = PPC::LBZUX8; break; 5268 } 5269 } 5270 5271 SDValue Chain = LD->getChain(); 5272 SDValue Base = LD->getBasePtr(); 5273 SDValue Ops[] = { Base, Offset, Chain }; 5274 SDNode *MN = CurDAG->getMachineNode( 5275 Opcode, dl, LD->getValueType(0), 5276 PPCLowering->getPointerTy(CurDAG->getDataLayout()), MVT::Other, Ops); 5277 transferMemOperands(N, MN); 5278 ReplaceNode(N, MN); 5279 return; 5280 } 5281 } 5282 5283 case ISD::AND: 5284 // If this is an 'and' with a mask, try to emit rlwinm/rldicl/rldicr 5285 if (tryAsSingleRLWINM(N) || tryAsSingleRLWIMI(N) || tryAsSingleRLDICL(N) || 5286 tryAsSingleRLDICR(N) || tryAsSingleRLWINM8(N) || tryAsPairOfRLDICL(N)) 5287 return; 5288 5289 // Other cases are autogenerated. 5290 break; 5291 case ISD::OR: { 5292 if (N->getValueType(0) == MVT::i32) 5293 if (tryBitfieldInsert(N)) 5294 return; 5295 5296 int16_t Imm; 5297 if (N->getOperand(0)->getOpcode() == ISD::FrameIndex && 5298 isIntS16Immediate(N->getOperand(1), Imm)) { 5299 KnownBits LHSKnown = CurDAG->computeKnownBits(N->getOperand(0)); 5300 5301 // If this is equivalent to an add, then we can fold it with the 5302 // FrameIndex calculation. 5303 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)Imm) == ~0ULL) { 5304 selectFrameIndex(N, N->getOperand(0).getNode(), (int)Imm); 5305 return; 5306 } 5307 } 5308 5309 // If this is 'or' against an imm with consecutive ones and both sides zero, 5310 // try to emit rldimi 5311 if (tryAsSingleRLDIMI(N)) 5312 return; 5313 5314 // OR with a 32-bit immediate can be handled by ori + oris 5315 // without creating an immediate in a GPR. 5316 uint64_t Imm64 = 0; 5317 bool IsPPC64 = Subtarget->isPPC64(); 5318 if (IsPPC64 && isInt64Immediate(N->getOperand(1), Imm64) && 5319 (Imm64 & ~0xFFFFFFFFuLL) == 0) { 5320 // If ImmHi (ImmHi) is zero, only one ori (oris) is generated later. 5321 uint64_t ImmHi = Imm64 >> 16; 5322 uint64_t ImmLo = Imm64 & 0xFFFF; 5323 if (ImmHi != 0 && ImmLo != 0) { 5324 SDNode *Lo = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64, 5325 N->getOperand(0), 5326 getI16Imm(ImmLo, dl)); 5327 SDValue Ops1[] = { SDValue(Lo, 0), getI16Imm(ImmHi, dl)}; 5328 CurDAG->SelectNodeTo(N, PPC::ORIS8, MVT::i64, Ops1); 5329 return; 5330 } 5331 } 5332 5333 // Other cases are autogenerated. 5334 break; 5335 } 5336 case ISD::XOR: { 5337 // XOR with a 32-bit immediate can be handled by xori + xoris 5338 // without creating an immediate in a GPR. 5339 uint64_t Imm64 = 0; 5340 bool IsPPC64 = Subtarget->isPPC64(); 5341 if (IsPPC64 && isInt64Immediate(N->getOperand(1), Imm64) && 5342 (Imm64 & ~0xFFFFFFFFuLL) == 0) { 5343 // If ImmHi (ImmHi) is zero, only one xori (xoris) is generated later. 5344 uint64_t ImmHi = Imm64 >> 16; 5345 uint64_t ImmLo = Imm64 & 0xFFFF; 5346 if (ImmHi != 0 && ImmLo != 0) { 5347 SDNode *Lo = CurDAG->getMachineNode(PPC::XORI8, dl, MVT::i64, 5348 N->getOperand(0), 5349 getI16Imm(ImmLo, dl)); 5350 SDValue Ops1[] = { SDValue(Lo, 0), getI16Imm(ImmHi, dl)}; 5351 CurDAG->SelectNodeTo(N, PPC::XORIS8, MVT::i64, Ops1); 5352 return; 5353 } 5354 } 5355 5356 break; 5357 } 5358 case ISD::ADD: { 5359 int16_t Imm; 5360 if (N->getOperand(0)->getOpcode() == ISD::FrameIndex && 5361 isIntS16Immediate(N->getOperand(1), Imm)) { 5362 selectFrameIndex(N, N->getOperand(0).getNode(), (int)Imm); 5363 return; 5364 } 5365 5366 break; 5367 } 5368 case ISD::SHL: { 5369 unsigned Imm, SH, MB, ME; 5370 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, Imm) && 5371 isRotateAndMask(N, Imm, true, SH, MB, ME)) { 5372 SDValue Ops[] = { N->getOperand(0).getOperand(0), 5373 getI32Imm(SH, dl), getI32Imm(MB, dl), 5374 getI32Imm(ME, dl) }; 5375 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops); 5376 return; 5377 } 5378 5379 // Other cases are autogenerated. 5380 break; 5381 } 5382 case ISD::SRL: { 5383 unsigned Imm, SH, MB, ME; 5384 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, Imm) && 5385 isRotateAndMask(N, Imm, true, SH, MB, ME)) { 5386 SDValue Ops[] = { N->getOperand(0).getOperand(0), 5387 getI32Imm(SH, dl), getI32Imm(MB, dl), 5388 getI32Imm(ME, dl) }; 5389 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops); 5390 return; 5391 } 5392 5393 // Other cases are autogenerated. 5394 break; 5395 } 5396 case ISD::MUL: { 5397 SDValue Op1 = N->getOperand(1); 5398 if (Op1.getOpcode() != ISD::Constant || Op1.getValueType() != MVT::i64) 5399 break; 5400 5401 // If the multiplier fits int16, we can handle it with mulli. 5402 int64_t Imm = cast<ConstantSDNode>(Op1)->getZExtValue(); 5403 unsigned Shift = countTrailingZeros<uint64_t>(Imm); 5404 if (isInt<16>(Imm) || !Shift) 5405 break; 5406 5407 // If the shifted value fits int16, we can do this transformation: 5408 // (mul X, c1 << c2) -> (rldicr (mulli X, c1) c2). We do this in ISEL due to 5409 // DAGCombiner prefers (shl (mul X, c1), c2) -> (mul X, c1 << c2). 5410 uint64_t ImmSh = Imm >> Shift; 5411 if (isInt<16>(ImmSh)) { 5412 uint64_t SextImm = SignExtend64(ImmSh & 0xFFFF, 16); 5413 SDValue SDImm = CurDAG->getTargetConstant(SextImm, dl, MVT::i64); 5414 SDNode *MulNode = CurDAG->getMachineNode(PPC::MULLI8, dl, MVT::i64, 5415 N->getOperand(0), SDImm); 5416 CurDAG->SelectNodeTo(N, PPC::RLDICR, MVT::i64, SDValue(MulNode, 0), 5417 getI32Imm(Shift, dl), getI32Imm(63 - Shift, dl)); 5418 return; 5419 } 5420 break; 5421 } 5422 // FIXME: Remove this once the ANDI glue bug is fixed: 5423 case PPCISD::ANDI_rec_1_EQ_BIT: 5424 case PPCISD::ANDI_rec_1_GT_BIT: { 5425 if (!ANDIGlueBug) 5426 break; 5427 5428 EVT InVT = N->getOperand(0).getValueType(); 5429 assert((InVT == MVT::i64 || InVT == MVT::i32) && 5430 "Invalid input type for ANDI_rec_1_EQ_BIT"); 5431 5432 unsigned Opcode = (InVT == MVT::i64) ? PPC::ANDI8_rec : PPC::ANDI_rec; 5433 SDValue AndI(CurDAG->getMachineNode(Opcode, dl, InVT, MVT::Glue, 5434 N->getOperand(0), 5435 CurDAG->getTargetConstant(1, dl, InVT)), 5436 0); 5437 SDValue CR0Reg = CurDAG->getRegister(PPC::CR0, MVT::i32); 5438 SDValue SRIdxVal = CurDAG->getTargetConstant( 5439 N->getOpcode() == PPCISD::ANDI_rec_1_EQ_BIT ? PPC::sub_eq : PPC::sub_gt, 5440 dl, MVT::i32); 5441 5442 CurDAG->SelectNodeTo(N, TargetOpcode::EXTRACT_SUBREG, MVT::i1, CR0Reg, 5443 SRIdxVal, SDValue(AndI.getNode(), 1) /* glue */); 5444 return; 5445 } 5446 case ISD::SELECT_CC: { 5447 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 5448 EVT PtrVT = 5449 CurDAG->getTargetLoweringInfo().getPointerTy(CurDAG->getDataLayout()); 5450 bool isPPC64 = (PtrVT == MVT::i64); 5451 5452 // If this is a select of i1 operands, we'll pattern match it. 5453 if (Subtarget->useCRBits() && N->getOperand(0).getValueType() == MVT::i1) 5454 break; 5455 5456 if (Subtarget->isISA3_0() && Subtarget->isPPC64()) { 5457 bool NeedSwapOps = false; 5458 bool IsUnCmp = false; 5459 if (mayUseP9Setb(N, CC, CurDAG, NeedSwapOps, IsUnCmp)) { 5460 SDValue LHS = N->getOperand(0); 5461 SDValue RHS = N->getOperand(1); 5462 if (NeedSwapOps) 5463 std::swap(LHS, RHS); 5464 5465 // Make use of SelectCC to generate the comparison to set CR bits, for 5466 // equality comparisons having one literal operand, SelectCC probably 5467 // doesn't need to materialize the whole literal and just use xoris to 5468 // check it first, it leads the following comparison result can't 5469 // exactly represent GT/LT relationship. So to avoid this we specify 5470 // SETGT/SETUGT here instead of SETEQ. 5471 SDValue GenCC = 5472 SelectCC(LHS, RHS, IsUnCmp ? ISD::SETUGT : ISD::SETGT, dl); 5473 CurDAG->SelectNodeTo( 5474 N, N->getSimpleValueType(0) == MVT::i64 ? PPC::SETB8 : PPC::SETB, 5475 N->getValueType(0), GenCC); 5476 NumP9Setb++; 5477 return; 5478 } 5479 } 5480 5481 // Handle the setcc cases here. select_cc lhs, 0, 1, 0, cc 5482 if (!isPPC64) 5483 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1))) 5484 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N->getOperand(2))) 5485 if (ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N->getOperand(3))) 5486 if (N1C->isZero() && N3C->isZero() && N2C->getZExtValue() == 1ULL && 5487 CC == ISD::SETNE && 5488 // FIXME: Implement this optzn for PPC64. 5489 N->getValueType(0) == MVT::i32) { 5490 SDNode *Tmp = 5491 CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue, 5492 N->getOperand(0), getI32Imm(~0U, dl)); 5493 CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, SDValue(Tmp, 0), 5494 N->getOperand(0), SDValue(Tmp, 1)); 5495 return; 5496 } 5497 5498 SDValue CCReg = SelectCC(N->getOperand(0), N->getOperand(1), CC, dl); 5499 5500 if (N->getValueType(0) == MVT::i1) { 5501 // An i1 select is: (c & t) | (!c & f). 5502 bool Inv; 5503 unsigned Idx = getCRIdxForSetCC(CC, Inv); 5504 5505 unsigned SRI; 5506 switch (Idx) { 5507 default: llvm_unreachable("Invalid CC index"); 5508 case 0: SRI = PPC::sub_lt; break; 5509 case 1: SRI = PPC::sub_gt; break; 5510 case 2: SRI = PPC::sub_eq; break; 5511 case 3: SRI = PPC::sub_un; break; 5512 } 5513 5514 SDValue CCBit = CurDAG->getTargetExtractSubreg(SRI, dl, MVT::i1, CCReg); 5515 5516 SDValue NotCCBit(CurDAG->getMachineNode(PPC::CRNOR, dl, MVT::i1, 5517 CCBit, CCBit), 0); 5518 SDValue C = Inv ? NotCCBit : CCBit, 5519 NotC = Inv ? CCBit : NotCCBit; 5520 5521 SDValue CAndT(CurDAG->getMachineNode(PPC::CRAND, dl, MVT::i1, 5522 C, N->getOperand(2)), 0); 5523 SDValue NotCAndF(CurDAG->getMachineNode(PPC::CRAND, dl, MVT::i1, 5524 NotC, N->getOperand(3)), 0); 5525 5526 CurDAG->SelectNodeTo(N, PPC::CROR, MVT::i1, CAndT, NotCAndF); 5527 return; 5528 } 5529 5530 unsigned BROpc = 5531 getPredicateForSetCC(CC, N->getOperand(0).getValueType(), Subtarget); 5532 5533 unsigned SelectCCOp; 5534 if (N->getValueType(0) == MVT::i32) 5535 SelectCCOp = PPC::SELECT_CC_I4; 5536 else if (N->getValueType(0) == MVT::i64) 5537 SelectCCOp = PPC::SELECT_CC_I8; 5538 else if (N->getValueType(0) == MVT::f32) { 5539 if (Subtarget->hasP8Vector()) 5540 SelectCCOp = PPC::SELECT_CC_VSSRC; 5541 else if (Subtarget->hasSPE()) 5542 SelectCCOp = PPC::SELECT_CC_SPE4; 5543 else 5544 SelectCCOp = PPC::SELECT_CC_F4; 5545 } else if (N->getValueType(0) == MVT::f64) { 5546 if (Subtarget->hasVSX()) 5547 SelectCCOp = PPC::SELECT_CC_VSFRC; 5548 else if (Subtarget->hasSPE()) 5549 SelectCCOp = PPC::SELECT_CC_SPE; 5550 else 5551 SelectCCOp = PPC::SELECT_CC_F8; 5552 } else if (N->getValueType(0) == MVT::f128) 5553 SelectCCOp = PPC::SELECT_CC_F16; 5554 else if (Subtarget->hasSPE()) 5555 SelectCCOp = PPC::SELECT_CC_SPE; 5556 else if (N->getValueType(0) == MVT::v2f64 || 5557 N->getValueType(0) == MVT::v2i64) 5558 SelectCCOp = PPC::SELECT_CC_VSRC; 5559 else 5560 SelectCCOp = PPC::SELECT_CC_VRRC; 5561 5562 SDValue Ops[] = { CCReg, N->getOperand(2), N->getOperand(3), 5563 getI32Imm(BROpc, dl) }; 5564 CurDAG->SelectNodeTo(N, SelectCCOp, N->getValueType(0), Ops); 5565 return; 5566 } 5567 case ISD::VECTOR_SHUFFLE: 5568 if (Subtarget->hasVSX() && (N->getValueType(0) == MVT::v2f64 || 5569 N->getValueType(0) == MVT::v2i64)) { 5570 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 5571 5572 SDValue Op1 = N->getOperand(SVN->getMaskElt(0) < 2 ? 0 : 1), 5573 Op2 = N->getOperand(SVN->getMaskElt(1) < 2 ? 0 : 1); 5574 unsigned DM[2]; 5575 5576 for (int i = 0; i < 2; ++i) 5577 if (SVN->getMaskElt(i) <= 0 || SVN->getMaskElt(i) == 2) 5578 DM[i] = 0; 5579 else 5580 DM[i] = 1; 5581 5582 if (Op1 == Op2 && DM[0] == 0 && DM[1] == 0 && 5583 Op1.getOpcode() == ISD::SCALAR_TO_VECTOR && 5584 isa<LoadSDNode>(Op1.getOperand(0))) { 5585 LoadSDNode *LD = cast<LoadSDNode>(Op1.getOperand(0)); 5586 SDValue Base, Offset; 5587 5588 if (LD->isUnindexed() && LD->hasOneUse() && Op1.hasOneUse() && 5589 (LD->getMemoryVT() == MVT::f64 || 5590 LD->getMemoryVT() == MVT::i64) && 5591 SelectAddrIdxOnly(LD->getBasePtr(), Base, Offset)) { 5592 SDValue Chain = LD->getChain(); 5593 SDValue Ops[] = { Base, Offset, Chain }; 5594 MachineMemOperand *MemOp = LD->getMemOperand(); 5595 SDNode *NewN = CurDAG->SelectNodeTo(N, PPC::LXVDSX, 5596 N->getValueType(0), Ops); 5597 CurDAG->setNodeMemRefs(cast<MachineSDNode>(NewN), {MemOp}); 5598 return; 5599 } 5600 } 5601 5602 // For little endian, we must swap the input operands and adjust 5603 // the mask elements (reverse and invert them). 5604 if (Subtarget->isLittleEndian()) { 5605 std::swap(Op1, Op2); 5606 unsigned tmp = DM[0]; 5607 DM[0] = 1 - DM[1]; 5608 DM[1] = 1 - tmp; 5609 } 5610 5611 SDValue DMV = CurDAG->getTargetConstant(DM[1] | (DM[0] << 1), dl, 5612 MVT::i32); 5613 SDValue Ops[] = { Op1, Op2, DMV }; 5614 CurDAG->SelectNodeTo(N, PPC::XXPERMDI, N->getValueType(0), Ops); 5615 return; 5616 } 5617 5618 break; 5619 case PPCISD::BDNZ: 5620 case PPCISD::BDZ: { 5621 bool IsPPC64 = Subtarget->isPPC64(); 5622 SDValue Ops[] = { N->getOperand(1), N->getOperand(0) }; 5623 CurDAG->SelectNodeTo(N, N->getOpcode() == PPCISD::BDNZ 5624 ? (IsPPC64 ? PPC::BDNZ8 : PPC::BDNZ) 5625 : (IsPPC64 ? PPC::BDZ8 : PPC::BDZ), 5626 MVT::Other, Ops); 5627 return; 5628 } 5629 case PPCISD::COND_BRANCH: { 5630 // Op #0 is the Chain. 5631 // Op #1 is the PPC::PRED_* number. 5632 // Op #2 is the CR# 5633 // Op #3 is the Dest MBB 5634 // Op #4 is the Flag. 5635 // Prevent PPC::PRED_* from being selected into LI. 5636 unsigned PCC = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 5637 if (EnableBranchHint) 5638 PCC |= getBranchHint(PCC, *FuncInfo, N->getOperand(3)); 5639 5640 SDValue Pred = getI32Imm(PCC, dl); 5641 SDValue Ops[] = { Pred, N->getOperand(2), N->getOperand(3), 5642 N->getOperand(0), N->getOperand(4) }; 5643 CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops); 5644 return; 5645 } 5646 case ISD::BR_CC: { 5647 if (tryFoldSWTestBRCC(N)) 5648 return; 5649 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 5650 unsigned PCC = 5651 getPredicateForSetCC(CC, N->getOperand(2).getValueType(), Subtarget); 5652 5653 if (N->getOperand(2).getValueType() == MVT::i1) { 5654 unsigned Opc; 5655 bool Swap; 5656 switch (PCC) { 5657 default: llvm_unreachable("Unexpected Boolean-operand predicate"); 5658 case PPC::PRED_LT: Opc = PPC::CRANDC; Swap = true; break; 5659 case PPC::PRED_LE: Opc = PPC::CRORC; Swap = true; break; 5660 case PPC::PRED_EQ: Opc = PPC::CREQV; Swap = false; break; 5661 case PPC::PRED_GE: Opc = PPC::CRORC; Swap = false; break; 5662 case PPC::PRED_GT: Opc = PPC::CRANDC; Swap = false; break; 5663 case PPC::PRED_NE: Opc = PPC::CRXOR; Swap = false; break; 5664 } 5665 5666 // A signed comparison of i1 values produces the opposite result to an 5667 // unsigned one if the condition code includes less-than or greater-than. 5668 // This is because 1 is the most negative signed i1 number and the most 5669 // positive unsigned i1 number. The CR-logical operations used for such 5670 // comparisons are non-commutative so for signed comparisons vs. unsigned 5671 // ones, the input operands just need to be swapped. 5672 if (ISD::isSignedIntSetCC(CC)) 5673 Swap = !Swap; 5674 5675 SDValue BitComp(CurDAG->getMachineNode(Opc, dl, MVT::i1, 5676 N->getOperand(Swap ? 3 : 2), 5677 N->getOperand(Swap ? 2 : 3)), 0); 5678 CurDAG->SelectNodeTo(N, PPC::BC, MVT::Other, BitComp, N->getOperand(4), 5679 N->getOperand(0)); 5680 return; 5681 } 5682 5683 if (EnableBranchHint) 5684 PCC |= getBranchHint(PCC, *FuncInfo, N->getOperand(4)); 5685 5686 SDValue CondCode = SelectCC(N->getOperand(2), N->getOperand(3), CC, dl); 5687 SDValue Ops[] = { getI32Imm(PCC, dl), CondCode, 5688 N->getOperand(4), N->getOperand(0) }; 5689 CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops); 5690 return; 5691 } 5692 case ISD::BRIND: { 5693 // FIXME: Should custom lower this. 5694 SDValue Chain = N->getOperand(0); 5695 SDValue Target = N->getOperand(1); 5696 unsigned Opc = Target.getValueType() == MVT::i32 ? PPC::MTCTR : PPC::MTCTR8; 5697 unsigned Reg = Target.getValueType() == MVT::i32 ? PPC::BCTR : PPC::BCTR8; 5698 Chain = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, Target, 5699 Chain), 0); 5700 CurDAG->SelectNodeTo(N, Reg, MVT::Other, Chain); 5701 return; 5702 } 5703 case PPCISD::TOC_ENTRY: { 5704 const bool isPPC64 = Subtarget->isPPC64(); 5705 const bool isELFABI = Subtarget->isSVR4ABI(); 5706 const bool isAIXABI = Subtarget->isAIXABI(); 5707 5708 // PowerPC only support small, medium and large code model. 5709 const CodeModel::Model CModel = TM.getCodeModel(); 5710 assert(!(CModel == CodeModel::Tiny || CModel == CodeModel::Kernel) && 5711 "PowerPC doesn't support tiny or kernel code models."); 5712 5713 if (isAIXABI && CModel == CodeModel::Medium) 5714 report_fatal_error("Medium code model is not supported on AIX."); 5715 5716 // For 64-bit small code model, we allow SelectCodeCommon to handle this, 5717 // selecting one of LDtoc, LDtocJTI, LDtocCPT, and LDtocBA. 5718 if (isPPC64 && CModel == CodeModel::Small) 5719 break; 5720 5721 // Handle 32-bit small code model. 5722 if (!isPPC64) { 5723 // Transforms the ISD::TOC_ENTRY node to passed in Opcode, either 5724 // PPC::ADDItoc, or PPC::LWZtoc 5725 auto replaceWith = [this, &dl](unsigned OpCode, SDNode *TocEntry) { 5726 SDValue GA = TocEntry->getOperand(0); 5727 SDValue TocBase = TocEntry->getOperand(1); 5728 SDNode *MN = CurDAG->getMachineNode(OpCode, dl, MVT::i32, GA, TocBase); 5729 transferMemOperands(TocEntry, MN); 5730 ReplaceNode(TocEntry, MN); 5731 }; 5732 5733 if (isELFABI) { 5734 assert(TM.isPositionIndependent() && 5735 "32-bit ELF can only have TOC entries in position independent" 5736 " code."); 5737 // 32-bit ELF always uses a small code model toc access. 5738 replaceWith(PPC::LWZtoc, N); 5739 return; 5740 } 5741 5742 if (isAIXABI && CModel == CodeModel::Small) { 5743 if (hasTocDataAttr(N->getOperand(0), 5744 CurDAG->getDataLayout().getPointerSize())) 5745 replaceWith(PPC::ADDItoc, N); 5746 else 5747 replaceWith(PPC::LWZtoc, N); 5748 5749 return; 5750 } 5751 } 5752 5753 assert(CModel != CodeModel::Small && "All small code models handled."); 5754 5755 assert((isPPC64 || (isAIXABI && !isPPC64)) && "We are dealing with 64-bit" 5756 " ELF/AIX or 32-bit AIX in the following."); 5757 5758 // Transforms the ISD::TOC_ENTRY node for 32-bit AIX large code model mode 5759 // or 64-bit medium (ELF-only) or large (ELF and AIX) code model code. We 5760 // generate two instructions as described below. The first source operand 5761 // is a symbol reference. If it must be toc-referenced according to 5762 // Subtarget, we generate: 5763 // [32-bit AIX] 5764 // LWZtocL(@sym, ADDIStocHA(%r2, @sym)) 5765 // [64-bit ELF/AIX] 5766 // LDtocL(@sym, ADDIStocHA8(%x2, @sym)) 5767 // Otherwise we generate: 5768 // ADDItocL(ADDIStocHA8(%x2, @sym), @sym) 5769 SDValue GA = N->getOperand(0); 5770 SDValue TOCbase = N->getOperand(1); 5771 5772 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 5773 SDNode *Tmp = CurDAG->getMachineNode( 5774 isPPC64 ? PPC::ADDIStocHA8 : PPC::ADDIStocHA, dl, VT, TOCbase, GA); 5775 5776 if (PPCLowering->isAccessedAsGotIndirect(GA)) { 5777 // If it is accessed as got-indirect, we need an extra LWZ/LD to load 5778 // the address. 5779 SDNode *MN = CurDAG->getMachineNode( 5780 isPPC64 ? PPC::LDtocL : PPC::LWZtocL, dl, VT, GA, SDValue(Tmp, 0)); 5781 5782 transferMemOperands(N, MN); 5783 ReplaceNode(N, MN); 5784 return; 5785 } 5786 5787 // Build the address relative to the TOC-pointer. 5788 ReplaceNode(N, CurDAG->getMachineNode(PPC::ADDItocL, dl, MVT::i64, 5789 SDValue(Tmp, 0), GA)); 5790 return; 5791 } 5792 case PPCISD::PPC32_PICGOT: 5793 // Generate a PIC-safe GOT reference. 5794 assert(Subtarget->is32BitELFABI() && 5795 "PPCISD::PPC32_PICGOT is only supported for 32-bit SVR4"); 5796 CurDAG->SelectNodeTo(N, PPC::PPC32PICGOT, 5797 PPCLowering->getPointerTy(CurDAG->getDataLayout()), 5798 MVT::i32); 5799 return; 5800 5801 case PPCISD::VADD_SPLAT: { 5802 // This expands into one of three sequences, depending on whether 5803 // the first operand is odd or even, positive or negative. 5804 assert(isa<ConstantSDNode>(N->getOperand(0)) && 5805 isa<ConstantSDNode>(N->getOperand(1)) && 5806 "Invalid operand on VADD_SPLAT!"); 5807 5808 int Elt = N->getConstantOperandVal(0); 5809 int EltSize = N->getConstantOperandVal(1); 5810 unsigned Opc1, Opc2, Opc3; 5811 EVT VT; 5812 5813 if (EltSize == 1) { 5814 Opc1 = PPC::VSPLTISB; 5815 Opc2 = PPC::VADDUBM; 5816 Opc3 = PPC::VSUBUBM; 5817 VT = MVT::v16i8; 5818 } else if (EltSize == 2) { 5819 Opc1 = PPC::VSPLTISH; 5820 Opc2 = PPC::VADDUHM; 5821 Opc3 = PPC::VSUBUHM; 5822 VT = MVT::v8i16; 5823 } else { 5824 assert(EltSize == 4 && "Invalid element size on VADD_SPLAT!"); 5825 Opc1 = PPC::VSPLTISW; 5826 Opc2 = PPC::VADDUWM; 5827 Opc3 = PPC::VSUBUWM; 5828 VT = MVT::v4i32; 5829 } 5830 5831 if ((Elt & 1) == 0) { 5832 // Elt is even, in the range [-32,-18] + [16,30]. 5833 // 5834 // Convert: VADD_SPLAT elt, size 5835 // Into: tmp = VSPLTIS[BHW] elt 5836 // VADDU[BHW]M tmp, tmp 5837 // Where: [BHW] = B for size = 1, H for size = 2, W for size = 4 5838 SDValue EltVal = getI32Imm(Elt >> 1, dl); 5839 SDNode *Tmp = CurDAG->getMachineNode(Opc1, dl, VT, EltVal); 5840 SDValue TmpVal = SDValue(Tmp, 0); 5841 ReplaceNode(N, CurDAG->getMachineNode(Opc2, dl, VT, TmpVal, TmpVal)); 5842 return; 5843 } else if (Elt > 0) { 5844 // Elt is odd and positive, in the range [17,31]. 5845 // 5846 // Convert: VADD_SPLAT elt, size 5847 // Into: tmp1 = VSPLTIS[BHW] elt-16 5848 // tmp2 = VSPLTIS[BHW] -16 5849 // VSUBU[BHW]M tmp1, tmp2 5850 SDValue EltVal = getI32Imm(Elt - 16, dl); 5851 SDNode *Tmp1 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal); 5852 EltVal = getI32Imm(-16, dl); 5853 SDNode *Tmp2 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal); 5854 ReplaceNode(N, CurDAG->getMachineNode(Opc3, dl, VT, SDValue(Tmp1, 0), 5855 SDValue(Tmp2, 0))); 5856 return; 5857 } else { 5858 // Elt is odd and negative, in the range [-31,-17]. 5859 // 5860 // Convert: VADD_SPLAT elt, size 5861 // Into: tmp1 = VSPLTIS[BHW] elt+16 5862 // tmp2 = VSPLTIS[BHW] -16 5863 // VADDU[BHW]M tmp1, tmp2 5864 SDValue EltVal = getI32Imm(Elt + 16, dl); 5865 SDNode *Tmp1 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal); 5866 EltVal = getI32Imm(-16, dl); 5867 SDNode *Tmp2 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal); 5868 ReplaceNode(N, CurDAG->getMachineNode(Opc2, dl, VT, SDValue(Tmp1, 0), 5869 SDValue(Tmp2, 0))); 5870 return; 5871 } 5872 } 5873 case PPCISD::LD_SPLAT: { 5874 // Here we want to handle splat load for type v16i8 and v8i16 when there is 5875 // no direct move, we don't need to use stack for this case. If target has 5876 // direct move, we should be able to get the best selection in the .td file. 5877 if (!Subtarget->hasAltivec() || Subtarget->hasDirectMove()) 5878 break; 5879 5880 EVT Type = N->getValueType(0); 5881 if (Type != MVT::v16i8 && Type != MVT::v8i16) 5882 break; 5883 5884 SDValue ZeroReg = 5885 CurDAG->getRegister(Subtarget->isPPC64() ? PPC::ZERO8 : PPC::ZERO, 5886 Subtarget->isPPC64() ? MVT::i64 : MVT::i32); 5887 unsigned LIOpcode = Subtarget->isPPC64() ? PPC::LI8 : PPC::LI; 5888 // v16i8 LD_SPLAT addr 5889 // ======> 5890 // Mask = LVSR/LVSL 0, addr 5891 // LoadLow = LVX 0, addr 5892 // Perm = VPERM LoadLow, LoadLow, Mask 5893 // Splat = VSPLTB 15/0, Perm 5894 // 5895 // v8i16 LD_SPLAT addr 5896 // ======> 5897 // Mask = LVSR/LVSL 0, addr 5898 // LoadLow = LVX 0, addr 5899 // LoadHigh = LVX (LI, 1), addr 5900 // Perm = VPERM LoadLow, LoadHigh, Mask 5901 // Splat = VSPLTH 7/0, Perm 5902 unsigned SplatOp = (Type == MVT::v16i8) ? PPC::VSPLTB : PPC::VSPLTH; 5903 unsigned SplatElemIndex = 5904 Subtarget->isLittleEndian() ? ((Type == MVT::v16i8) ? 15 : 7) : 0; 5905 5906 SDNode *Mask = CurDAG->getMachineNode( 5907 Subtarget->isLittleEndian() ? PPC::LVSR : PPC::LVSL, dl, Type, ZeroReg, 5908 N->getOperand(1)); 5909 5910 SDNode *LoadLow = 5911 CurDAG->getMachineNode(PPC::LVX, dl, MVT::v16i8, MVT::Other, 5912 {ZeroReg, N->getOperand(1), N->getOperand(0)}); 5913 5914 SDNode *LoadHigh = LoadLow; 5915 if (Type == MVT::v8i16) { 5916 LoadHigh = CurDAG->getMachineNode( 5917 PPC::LVX, dl, MVT::v16i8, MVT::Other, 5918 {SDValue(CurDAG->getMachineNode( 5919 LIOpcode, dl, MVT::i32, 5920 CurDAG->getTargetConstant(1, dl, MVT::i8)), 5921 0), 5922 N->getOperand(1), SDValue(LoadLow, 1)}); 5923 } 5924 5925 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 1), SDValue(LoadHigh, 1)); 5926 transferMemOperands(N, LoadHigh); 5927 5928 SDNode *Perm = 5929 CurDAG->getMachineNode(PPC::VPERM, dl, Type, SDValue(LoadLow, 0), 5930 SDValue(LoadHigh, 0), SDValue(Mask, 0)); 5931 CurDAG->SelectNodeTo(N, SplatOp, Type, 5932 CurDAG->getTargetConstant(SplatElemIndex, dl, MVT::i8), 5933 SDValue(Perm, 0)); 5934 return; 5935 } 5936 } 5937 5938 SelectCode(N); 5939 } 5940 5941 // If the target supports the cmpb instruction, do the idiom recognition here. 5942 // We don't do this as a DAG combine because we don't want to do it as nodes 5943 // are being combined (because we might miss part of the eventual idiom). We 5944 // don't want to do it during instruction selection because we want to reuse 5945 // the logic for lowering the masking operations already part of the 5946 // instruction selector. 5947 SDValue PPCDAGToDAGISel::combineToCMPB(SDNode *N) { 5948 SDLoc dl(N); 5949 5950 assert(N->getOpcode() == ISD::OR && 5951 "Only OR nodes are supported for CMPB"); 5952 5953 SDValue Res; 5954 if (!Subtarget->hasCMPB()) 5955 return Res; 5956 5957 if (N->getValueType(0) != MVT::i32 && 5958 N->getValueType(0) != MVT::i64) 5959 return Res; 5960 5961 EVT VT = N->getValueType(0); 5962 5963 SDValue RHS, LHS; 5964 bool BytesFound[8] = {false, false, false, false, false, false, false, false}; 5965 uint64_t Mask = 0, Alt = 0; 5966 5967 auto IsByteSelectCC = [this](SDValue O, unsigned &b, 5968 uint64_t &Mask, uint64_t &Alt, 5969 SDValue &LHS, SDValue &RHS) { 5970 if (O.getOpcode() != ISD::SELECT_CC) 5971 return false; 5972 ISD::CondCode CC = cast<CondCodeSDNode>(O.getOperand(4))->get(); 5973 5974 if (!isa<ConstantSDNode>(O.getOperand(2)) || 5975 !isa<ConstantSDNode>(O.getOperand(3))) 5976 return false; 5977 5978 uint64_t PM = O.getConstantOperandVal(2); 5979 uint64_t PAlt = O.getConstantOperandVal(3); 5980 for (b = 0; b < 8; ++b) { 5981 uint64_t Mask = UINT64_C(0xFF) << (8*b); 5982 if (PM && (PM & Mask) == PM && (PAlt & Mask) == PAlt) 5983 break; 5984 } 5985 5986 if (b == 8) 5987 return false; 5988 Mask |= PM; 5989 Alt |= PAlt; 5990 5991 if (!isa<ConstantSDNode>(O.getOperand(1)) || 5992 O.getConstantOperandVal(1) != 0) { 5993 SDValue Op0 = O.getOperand(0), Op1 = O.getOperand(1); 5994 if (Op0.getOpcode() == ISD::TRUNCATE) 5995 Op0 = Op0.getOperand(0); 5996 if (Op1.getOpcode() == ISD::TRUNCATE) 5997 Op1 = Op1.getOperand(0); 5998 5999 if (Op0.getOpcode() == ISD::SRL && Op1.getOpcode() == ISD::SRL && 6000 Op0.getOperand(1) == Op1.getOperand(1) && CC == ISD::SETEQ && 6001 isa<ConstantSDNode>(Op0.getOperand(1))) { 6002 6003 unsigned Bits = Op0.getValueSizeInBits(); 6004 if (b != Bits/8-1) 6005 return false; 6006 if (Op0.getConstantOperandVal(1) != Bits-8) 6007 return false; 6008 6009 LHS = Op0.getOperand(0); 6010 RHS = Op1.getOperand(0); 6011 return true; 6012 } 6013 6014 // When we have small integers (i16 to be specific), the form present 6015 // post-legalization uses SETULT in the SELECT_CC for the 6016 // higher-order byte, depending on the fact that the 6017 // even-higher-order bytes are known to all be zero, for example: 6018 // select_cc (xor $lhs, $rhs), 256, 65280, 0, setult 6019 // (so when the second byte is the same, because all higher-order 6020 // bits from bytes 3 and 4 are known to be zero, the result of the 6021 // xor can be at most 255) 6022 if (Op0.getOpcode() == ISD::XOR && CC == ISD::SETULT && 6023 isa<ConstantSDNode>(O.getOperand(1))) { 6024 6025 uint64_t ULim = O.getConstantOperandVal(1); 6026 if (ULim != (UINT64_C(1) << b*8)) 6027 return false; 6028 6029 // Now we need to make sure that the upper bytes are known to be 6030 // zero. 6031 unsigned Bits = Op0.getValueSizeInBits(); 6032 if (!CurDAG->MaskedValueIsZero( 6033 Op0, APInt::getHighBitsSet(Bits, Bits - (b + 1) * 8))) 6034 return false; 6035 6036 LHS = Op0.getOperand(0); 6037 RHS = Op0.getOperand(1); 6038 return true; 6039 } 6040 6041 return false; 6042 } 6043 6044 if (CC != ISD::SETEQ) 6045 return false; 6046 6047 SDValue Op = O.getOperand(0); 6048 if (Op.getOpcode() == ISD::AND) { 6049 if (!isa<ConstantSDNode>(Op.getOperand(1))) 6050 return false; 6051 if (Op.getConstantOperandVal(1) != (UINT64_C(0xFF) << (8*b))) 6052 return false; 6053 6054 SDValue XOR = Op.getOperand(0); 6055 if (XOR.getOpcode() == ISD::TRUNCATE) 6056 XOR = XOR.getOperand(0); 6057 if (XOR.getOpcode() != ISD::XOR) 6058 return false; 6059 6060 LHS = XOR.getOperand(0); 6061 RHS = XOR.getOperand(1); 6062 return true; 6063 } else if (Op.getOpcode() == ISD::SRL) { 6064 if (!isa<ConstantSDNode>(Op.getOperand(1))) 6065 return false; 6066 unsigned Bits = Op.getValueSizeInBits(); 6067 if (b != Bits/8-1) 6068 return false; 6069 if (Op.getConstantOperandVal(1) != Bits-8) 6070 return false; 6071 6072 SDValue XOR = Op.getOperand(0); 6073 if (XOR.getOpcode() == ISD::TRUNCATE) 6074 XOR = XOR.getOperand(0); 6075 if (XOR.getOpcode() != ISD::XOR) 6076 return false; 6077 6078 LHS = XOR.getOperand(0); 6079 RHS = XOR.getOperand(1); 6080 return true; 6081 } 6082 6083 return false; 6084 }; 6085 6086 SmallVector<SDValue, 8> Queue(1, SDValue(N, 0)); 6087 while (!Queue.empty()) { 6088 SDValue V = Queue.pop_back_val(); 6089 6090 for (const SDValue &O : V.getNode()->ops()) { 6091 unsigned b = 0; 6092 uint64_t M = 0, A = 0; 6093 SDValue OLHS, ORHS; 6094 if (O.getOpcode() == ISD::OR) { 6095 Queue.push_back(O); 6096 } else if (IsByteSelectCC(O, b, M, A, OLHS, ORHS)) { 6097 if (!LHS) { 6098 LHS = OLHS; 6099 RHS = ORHS; 6100 BytesFound[b] = true; 6101 Mask |= M; 6102 Alt |= A; 6103 } else if ((LHS == ORHS && RHS == OLHS) || 6104 (RHS == ORHS && LHS == OLHS)) { 6105 BytesFound[b] = true; 6106 Mask |= M; 6107 Alt |= A; 6108 } else { 6109 return Res; 6110 } 6111 } else { 6112 return Res; 6113 } 6114 } 6115 } 6116 6117 unsigned LastB = 0, BCnt = 0; 6118 for (unsigned i = 0; i < 8; ++i) 6119 if (BytesFound[LastB]) { 6120 ++BCnt; 6121 LastB = i; 6122 } 6123 6124 if (!LastB || BCnt < 2) 6125 return Res; 6126 6127 // Because we'll be zero-extending the output anyway if don't have a specific 6128 // value for each input byte (via the Mask), we can 'anyext' the inputs. 6129 if (LHS.getValueType() != VT) { 6130 LHS = CurDAG->getAnyExtOrTrunc(LHS, dl, VT); 6131 RHS = CurDAG->getAnyExtOrTrunc(RHS, dl, VT); 6132 } 6133 6134 Res = CurDAG->getNode(PPCISD::CMPB, dl, VT, LHS, RHS); 6135 6136 bool NonTrivialMask = ((int64_t) Mask) != INT64_C(-1); 6137 if (NonTrivialMask && !Alt) { 6138 // Res = Mask & CMPB 6139 Res = CurDAG->getNode(ISD::AND, dl, VT, Res, 6140 CurDAG->getConstant(Mask, dl, VT)); 6141 } else if (Alt) { 6142 // Res = (CMPB & Mask) | (~CMPB & Alt) 6143 // Which, as suggested here: 6144 // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge 6145 // can be written as: 6146 // Res = Alt ^ ((Alt ^ Mask) & CMPB) 6147 // useful because the (Alt ^ Mask) can be pre-computed. 6148 Res = CurDAG->getNode(ISD::AND, dl, VT, Res, 6149 CurDAG->getConstant(Mask ^ Alt, dl, VT)); 6150 Res = CurDAG->getNode(ISD::XOR, dl, VT, Res, 6151 CurDAG->getConstant(Alt, dl, VT)); 6152 } 6153 6154 return Res; 6155 } 6156 6157 // When CR bit registers are enabled, an extension of an i1 variable to a i32 6158 // or i64 value is lowered in terms of a SELECT_I[48] operation, and thus 6159 // involves constant materialization of a 0 or a 1 or both. If the result of 6160 // the extension is then operated upon by some operator that can be constant 6161 // folded with a constant 0 or 1, and that constant can be materialized using 6162 // only one instruction (like a zero or one), then we should fold in those 6163 // operations with the select. 6164 void PPCDAGToDAGISel::foldBoolExts(SDValue &Res, SDNode *&N) { 6165 if (!Subtarget->useCRBits()) 6166 return; 6167 6168 if (N->getOpcode() != ISD::ZERO_EXTEND && 6169 N->getOpcode() != ISD::SIGN_EXTEND && 6170 N->getOpcode() != ISD::ANY_EXTEND) 6171 return; 6172 6173 if (N->getOperand(0).getValueType() != MVT::i1) 6174 return; 6175 6176 if (!N->hasOneUse()) 6177 return; 6178 6179 SDLoc dl(N); 6180 EVT VT = N->getValueType(0); 6181 SDValue Cond = N->getOperand(0); 6182 SDValue ConstTrue = 6183 CurDAG->getConstant(N->getOpcode() == ISD::SIGN_EXTEND ? -1 : 1, dl, VT); 6184 SDValue ConstFalse = CurDAG->getConstant(0, dl, VT); 6185 6186 do { 6187 SDNode *User = *N->use_begin(); 6188 if (User->getNumOperands() != 2) 6189 break; 6190 6191 auto TryFold = [this, N, User, dl](SDValue Val) { 6192 SDValue UserO0 = User->getOperand(0), UserO1 = User->getOperand(1); 6193 SDValue O0 = UserO0.getNode() == N ? Val : UserO0; 6194 SDValue O1 = UserO1.getNode() == N ? Val : UserO1; 6195 6196 return CurDAG->FoldConstantArithmetic(User->getOpcode(), dl, 6197 User->getValueType(0), {O0, O1}); 6198 }; 6199 6200 // FIXME: When the semantics of the interaction between select and undef 6201 // are clearly defined, it may turn out to be unnecessary to break here. 6202 SDValue TrueRes = TryFold(ConstTrue); 6203 if (!TrueRes || TrueRes.isUndef()) 6204 break; 6205 SDValue FalseRes = TryFold(ConstFalse); 6206 if (!FalseRes || FalseRes.isUndef()) 6207 break; 6208 6209 // For us to materialize these using one instruction, we must be able to 6210 // represent them as signed 16-bit integers. 6211 uint64_t True = cast<ConstantSDNode>(TrueRes)->getZExtValue(), 6212 False = cast<ConstantSDNode>(FalseRes)->getZExtValue(); 6213 if (!isInt<16>(True) || !isInt<16>(False)) 6214 break; 6215 6216 // We can replace User with a new SELECT node, and try again to see if we 6217 // can fold the select with its user. 6218 Res = CurDAG->getSelect(dl, User->getValueType(0), Cond, TrueRes, FalseRes); 6219 N = User; 6220 ConstTrue = TrueRes; 6221 ConstFalse = FalseRes; 6222 } while (N->hasOneUse()); 6223 } 6224 6225 void PPCDAGToDAGISel::PreprocessISelDAG() { 6226 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end(); 6227 6228 bool MadeChange = false; 6229 while (Position != CurDAG->allnodes_begin()) { 6230 SDNode *N = &*--Position; 6231 if (N->use_empty()) 6232 continue; 6233 6234 SDValue Res; 6235 switch (N->getOpcode()) { 6236 default: break; 6237 case ISD::OR: 6238 Res = combineToCMPB(N); 6239 break; 6240 } 6241 6242 if (!Res) 6243 foldBoolExts(Res, N); 6244 6245 if (Res) { 6246 LLVM_DEBUG(dbgs() << "PPC DAG preprocessing replacing:\nOld: "); 6247 LLVM_DEBUG(N->dump(CurDAG)); 6248 LLVM_DEBUG(dbgs() << "\nNew: "); 6249 LLVM_DEBUG(Res.getNode()->dump(CurDAG)); 6250 LLVM_DEBUG(dbgs() << "\n"); 6251 6252 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res); 6253 MadeChange = true; 6254 } 6255 } 6256 6257 if (MadeChange) 6258 CurDAG->RemoveDeadNodes(); 6259 } 6260 6261 /// PostprocessISelDAG - Perform some late peephole optimizations 6262 /// on the DAG representation. 6263 void PPCDAGToDAGISel::PostprocessISelDAG() { 6264 // Skip peepholes at -O0. 6265 if (TM.getOptLevel() == CodeGenOpt::None) 6266 return; 6267 6268 PeepholePPC64(); 6269 PeepholeCROps(); 6270 PeepholePPC64ZExt(); 6271 } 6272 6273 // Check if all users of this node will become isel where the second operand 6274 // is the constant zero. If this is so, and if we can negate the condition, 6275 // then we can flip the true and false operands. This will allow the zero to 6276 // be folded with the isel so that we don't need to materialize a register 6277 // containing zero. 6278 bool PPCDAGToDAGISel::AllUsersSelectZero(SDNode *N) { 6279 for (const SDNode *User : N->uses()) { 6280 if (!User->isMachineOpcode()) 6281 return false; 6282 if (User->getMachineOpcode() != PPC::SELECT_I4 && 6283 User->getMachineOpcode() != PPC::SELECT_I8) 6284 return false; 6285 6286 SDNode *Op1 = User->getOperand(1).getNode(); 6287 SDNode *Op2 = User->getOperand(2).getNode(); 6288 // If we have a degenerate select with two equal operands, swapping will 6289 // not do anything, and we may run into an infinite loop. 6290 if (Op1 == Op2) 6291 return false; 6292 6293 if (!Op2->isMachineOpcode()) 6294 return false; 6295 6296 if (Op2->getMachineOpcode() != PPC::LI && 6297 Op2->getMachineOpcode() != PPC::LI8) 6298 return false; 6299 6300 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op2->getOperand(0)); 6301 if (!C) 6302 return false; 6303 6304 if (!C->isZero()) 6305 return false; 6306 } 6307 6308 return true; 6309 } 6310 6311 void PPCDAGToDAGISel::SwapAllSelectUsers(SDNode *N) { 6312 SmallVector<SDNode *, 4> ToReplace; 6313 for (SDNode *User : N->uses()) { 6314 assert((User->getMachineOpcode() == PPC::SELECT_I4 || 6315 User->getMachineOpcode() == PPC::SELECT_I8) && 6316 "Must have all select users"); 6317 ToReplace.push_back(User); 6318 } 6319 6320 for (SDNode *User : ToReplace) { 6321 SDNode *ResNode = 6322 CurDAG->getMachineNode(User->getMachineOpcode(), SDLoc(User), 6323 User->getValueType(0), User->getOperand(0), 6324 User->getOperand(2), 6325 User->getOperand(1)); 6326 6327 LLVM_DEBUG(dbgs() << "CR Peephole replacing:\nOld: "); 6328 LLVM_DEBUG(User->dump(CurDAG)); 6329 LLVM_DEBUG(dbgs() << "\nNew: "); 6330 LLVM_DEBUG(ResNode->dump(CurDAG)); 6331 LLVM_DEBUG(dbgs() << "\n"); 6332 6333 ReplaceUses(User, ResNode); 6334 } 6335 } 6336 6337 void PPCDAGToDAGISel::PeepholeCROps() { 6338 bool IsModified; 6339 do { 6340 IsModified = false; 6341 for (SDNode &Node : CurDAG->allnodes()) { 6342 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(&Node); 6343 if (!MachineNode || MachineNode->use_empty()) 6344 continue; 6345 SDNode *ResNode = MachineNode; 6346 6347 bool Op1Set = false, Op1Unset = false, 6348 Op1Not = false, 6349 Op2Set = false, Op2Unset = false, 6350 Op2Not = false; 6351 6352 unsigned Opcode = MachineNode->getMachineOpcode(); 6353 switch (Opcode) { 6354 default: break; 6355 case PPC::CRAND: 6356 case PPC::CRNAND: 6357 case PPC::CROR: 6358 case PPC::CRXOR: 6359 case PPC::CRNOR: 6360 case PPC::CREQV: 6361 case PPC::CRANDC: 6362 case PPC::CRORC: { 6363 SDValue Op = MachineNode->getOperand(1); 6364 if (Op.isMachineOpcode()) { 6365 if (Op.getMachineOpcode() == PPC::CRSET) 6366 Op2Set = true; 6367 else if (Op.getMachineOpcode() == PPC::CRUNSET) 6368 Op2Unset = true; 6369 else if (Op.getMachineOpcode() == PPC::CRNOR && 6370 Op.getOperand(0) == Op.getOperand(1)) 6371 Op2Not = true; 6372 } 6373 LLVM_FALLTHROUGH; 6374 } 6375 case PPC::BC: 6376 case PPC::BCn: 6377 case PPC::SELECT_I4: 6378 case PPC::SELECT_I8: 6379 case PPC::SELECT_F4: 6380 case PPC::SELECT_F8: 6381 case PPC::SELECT_SPE: 6382 case PPC::SELECT_SPE4: 6383 case PPC::SELECT_VRRC: 6384 case PPC::SELECT_VSFRC: 6385 case PPC::SELECT_VSSRC: 6386 case PPC::SELECT_VSRC: { 6387 SDValue Op = MachineNode->getOperand(0); 6388 if (Op.isMachineOpcode()) { 6389 if (Op.getMachineOpcode() == PPC::CRSET) 6390 Op1Set = true; 6391 else if (Op.getMachineOpcode() == PPC::CRUNSET) 6392 Op1Unset = true; 6393 else if (Op.getMachineOpcode() == PPC::CRNOR && 6394 Op.getOperand(0) == Op.getOperand(1)) 6395 Op1Not = true; 6396 } 6397 } 6398 break; 6399 } 6400 6401 bool SelectSwap = false; 6402 switch (Opcode) { 6403 default: break; 6404 case PPC::CRAND: 6405 if (MachineNode->getOperand(0) == MachineNode->getOperand(1)) 6406 // x & x = x 6407 ResNode = MachineNode->getOperand(0).getNode(); 6408 else if (Op1Set) 6409 // 1 & y = y 6410 ResNode = MachineNode->getOperand(1).getNode(); 6411 else if (Op2Set) 6412 // x & 1 = x 6413 ResNode = MachineNode->getOperand(0).getNode(); 6414 else if (Op1Unset || Op2Unset) 6415 // x & 0 = 0 & y = 0 6416 ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode), 6417 MVT::i1); 6418 else if (Op1Not) 6419 // ~x & y = andc(y, x) 6420 ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode), 6421 MVT::i1, MachineNode->getOperand(1), 6422 MachineNode->getOperand(0). 6423 getOperand(0)); 6424 else if (Op2Not) 6425 // x & ~y = andc(x, y) 6426 ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode), 6427 MVT::i1, MachineNode->getOperand(0), 6428 MachineNode->getOperand(1). 6429 getOperand(0)); 6430 else if (AllUsersSelectZero(MachineNode)) { 6431 ResNode = CurDAG->getMachineNode(PPC::CRNAND, SDLoc(MachineNode), 6432 MVT::i1, MachineNode->getOperand(0), 6433 MachineNode->getOperand(1)); 6434 SelectSwap = true; 6435 } 6436 break; 6437 case PPC::CRNAND: 6438 if (MachineNode->getOperand(0) == MachineNode->getOperand(1)) 6439 // nand(x, x) -> nor(x, x) 6440 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6441 MVT::i1, MachineNode->getOperand(0), 6442 MachineNode->getOperand(0)); 6443 else if (Op1Set) 6444 // nand(1, y) -> nor(y, y) 6445 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6446 MVT::i1, MachineNode->getOperand(1), 6447 MachineNode->getOperand(1)); 6448 else if (Op2Set) 6449 // nand(x, 1) -> nor(x, x) 6450 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6451 MVT::i1, MachineNode->getOperand(0), 6452 MachineNode->getOperand(0)); 6453 else if (Op1Unset || Op2Unset) 6454 // nand(x, 0) = nand(0, y) = 1 6455 ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode), 6456 MVT::i1); 6457 else if (Op1Not) 6458 // nand(~x, y) = ~(~x & y) = x | ~y = orc(x, y) 6459 ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode), 6460 MVT::i1, MachineNode->getOperand(0). 6461 getOperand(0), 6462 MachineNode->getOperand(1)); 6463 else if (Op2Not) 6464 // nand(x, ~y) = ~x | y = orc(y, x) 6465 ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode), 6466 MVT::i1, MachineNode->getOperand(1). 6467 getOperand(0), 6468 MachineNode->getOperand(0)); 6469 else if (AllUsersSelectZero(MachineNode)) { 6470 ResNode = CurDAG->getMachineNode(PPC::CRAND, SDLoc(MachineNode), 6471 MVT::i1, MachineNode->getOperand(0), 6472 MachineNode->getOperand(1)); 6473 SelectSwap = true; 6474 } 6475 break; 6476 case PPC::CROR: 6477 if (MachineNode->getOperand(0) == MachineNode->getOperand(1)) 6478 // x | x = x 6479 ResNode = MachineNode->getOperand(0).getNode(); 6480 else if (Op1Set || Op2Set) 6481 // x | 1 = 1 | y = 1 6482 ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode), 6483 MVT::i1); 6484 else if (Op1Unset) 6485 // 0 | y = y 6486 ResNode = MachineNode->getOperand(1).getNode(); 6487 else if (Op2Unset) 6488 // x | 0 = x 6489 ResNode = MachineNode->getOperand(0).getNode(); 6490 else if (Op1Not) 6491 // ~x | y = orc(y, x) 6492 ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode), 6493 MVT::i1, MachineNode->getOperand(1), 6494 MachineNode->getOperand(0). 6495 getOperand(0)); 6496 else if (Op2Not) 6497 // x | ~y = orc(x, y) 6498 ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode), 6499 MVT::i1, MachineNode->getOperand(0), 6500 MachineNode->getOperand(1). 6501 getOperand(0)); 6502 else if (AllUsersSelectZero(MachineNode)) { 6503 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6504 MVT::i1, MachineNode->getOperand(0), 6505 MachineNode->getOperand(1)); 6506 SelectSwap = true; 6507 } 6508 break; 6509 case PPC::CRXOR: 6510 if (MachineNode->getOperand(0) == MachineNode->getOperand(1)) 6511 // xor(x, x) = 0 6512 ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode), 6513 MVT::i1); 6514 else if (Op1Set) 6515 // xor(1, y) -> nor(y, y) 6516 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6517 MVT::i1, MachineNode->getOperand(1), 6518 MachineNode->getOperand(1)); 6519 else if (Op2Set) 6520 // xor(x, 1) -> nor(x, x) 6521 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6522 MVT::i1, MachineNode->getOperand(0), 6523 MachineNode->getOperand(0)); 6524 else if (Op1Unset) 6525 // xor(0, y) = y 6526 ResNode = MachineNode->getOperand(1).getNode(); 6527 else if (Op2Unset) 6528 // xor(x, 0) = x 6529 ResNode = MachineNode->getOperand(0).getNode(); 6530 else if (Op1Not) 6531 // xor(~x, y) = eqv(x, y) 6532 ResNode = CurDAG->getMachineNode(PPC::CREQV, SDLoc(MachineNode), 6533 MVT::i1, MachineNode->getOperand(0). 6534 getOperand(0), 6535 MachineNode->getOperand(1)); 6536 else if (Op2Not) 6537 // xor(x, ~y) = eqv(x, y) 6538 ResNode = CurDAG->getMachineNode(PPC::CREQV, SDLoc(MachineNode), 6539 MVT::i1, MachineNode->getOperand(0), 6540 MachineNode->getOperand(1). 6541 getOperand(0)); 6542 else if (AllUsersSelectZero(MachineNode)) { 6543 ResNode = CurDAG->getMachineNode(PPC::CREQV, SDLoc(MachineNode), 6544 MVT::i1, MachineNode->getOperand(0), 6545 MachineNode->getOperand(1)); 6546 SelectSwap = true; 6547 } 6548 break; 6549 case PPC::CRNOR: 6550 if (Op1Set || Op2Set) 6551 // nor(1, y) -> 0 6552 ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode), 6553 MVT::i1); 6554 else if (Op1Unset) 6555 // nor(0, y) = ~y -> nor(y, y) 6556 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6557 MVT::i1, MachineNode->getOperand(1), 6558 MachineNode->getOperand(1)); 6559 else if (Op2Unset) 6560 // nor(x, 0) = ~x 6561 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6562 MVT::i1, MachineNode->getOperand(0), 6563 MachineNode->getOperand(0)); 6564 else if (Op1Not) 6565 // nor(~x, y) = andc(x, y) 6566 ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode), 6567 MVT::i1, MachineNode->getOperand(0). 6568 getOperand(0), 6569 MachineNode->getOperand(1)); 6570 else if (Op2Not) 6571 // nor(x, ~y) = andc(y, x) 6572 ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode), 6573 MVT::i1, MachineNode->getOperand(1). 6574 getOperand(0), 6575 MachineNode->getOperand(0)); 6576 else if (AllUsersSelectZero(MachineNode)) { 6577 ResNode = CurDAG->getMachineNode(PPC::CROR, SDLoc(MachineNode), 6578 MVT::i1, MachineNode->getOperand(0), 6579 MachineNode->getOperand(1)); 6580 SelectSwap = true; 6581 } 6582 break; 6583 case PPC::CREQV: 6584 if (MachineNode->getOperand(0) == MachineNode->getOperand(1)) 6585 // eqv(x, x) = 1 6586 ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode), 6587 MVT::i1); 6588 else if (Op1Set) 6589 // eqv(1, y) = y 6590 ResNode = MachineNode->getOperand(1).getNode(); 6591 else if (Op2Set) 6592 // eqv(x, 1) = x 6593 ResNode = MachineNode->getOperand(0).getNode(); 6594 else if (Op1Unset) 6595 // eqv(0, y) = ~y -> nor(y, y) 6596 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6597 MVT::i1, MachineNode->getOperand(1), 6598 MachineNode->getOperand(1)); 6599 else if (Op2Unset) 6600 // eqv(x, 0) = ~x 6601 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6602 MVT::i1, MachineNode->getOperand(0), 6603 MachineNode->getOperand(0)); 6604 else if (Op1Not) 6605 // eqv(~x, y) = xor(x, y) 6606 ResNode = CurDAG->getMachineNode(PPC::CRXOR, SDLoc(MachineNode), 6607 MVT::i1, MachineNode->getOperand(0). 6608 getOperand(0), 6609 MachineNode->getOperand(1)); 6610 else if (Op2Not) 6611 // eqv(x, ~y) = xor(x, y) 6612 ResNode = CurDAG->getMachineNode(PPC::CRXOR, SDLoc(MachineNode), 6613 MVT::i1, MachineNode->getOperand(0), 6614 MachineNode->getOperand(1). 6615 getOperand(0)); 6616 else if (AllUsersSelectZero(MachineNode)) { 6617 ResNode = CurDAG->getMachineNode(PPC::CRXOR, SDLoc(MachineNode), 6618 MVT::i1, MachineNode->getOperand(0), 6619 MachineNode->getOperand(1)); 6620 SelectSwap = true; 6621 } 6622 break; 6623 case PPC::CRANDC: 6624 if (MachineNode->getOperand(0) == MachineNode->getOperand(1)) 6625 // andc(x, x) = 0 6626 ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode), 6627 MVT::i1); 6628 else if (Op1Set) 6629 // andc(1, y) = ~y 6630 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6631 MVT::i1, MachineNode->getOperand(1), 6632 MachineNode->getOperand(1)); 6633 else if (Op1Unset || Op2Set) 6634 // andc(0, y) = andc(x, 1) = 0 6635 ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode), 6636 MVT::i1); 6637 else if (Op2Unset) 6638 // andc(x, 0) = x 6639 ResNode = MachineNode->getOperand(0).getNode(); 6640 else if (Op1Not) 6641 // andc(~x, y) = ~(x | y) = nor(x, y) 6642 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6643 MVT::i1, MachineNode->getOperand(0). 6644 getOperand(0), 6645 MachineNode->getOperand(1)); 6646 else if (Op2Not) 6647 // andc(x, ~y) = x & y 6648 ResNode = CurDAG->getMachineNode(PPC::CRAND, SDLoc(MachineNode), 6649 MVT::i1, MachineNode->getOperand(0), 6650 MachineNode->getOperand(1). 6651 getOperand(0)); 6652 else if (AllUsersSelectZero(MachineNode)) { 6653 ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode), 6654 MVT::i1, MachineNode->getOperand(1), 6655 MachineNode->getOperand(0)); 6656 SelectSwap = true; 6657 } 6658 break; 6659 case PPC::CRORC: 6660 if (MachineNode->getOperand(0) == MachineNode->getOperand(1)) 6661 // orc(x, x) = 1 6662 ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode), 6663 MVT::i1); 6664 else if (Op1Set || Op2Unset) 6665 // orc(1, y) = orc(x, 0) = 1 6666 ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode), 6667 MVT::i1); 6668 else if (Op2Set) 6669 // orc(x, 1) = x 6670 ResNode = MachineNode->getOperand(0).getNode(); 6671 else if (Op1Unset) 6672 // orc(0, y) = ~y 6673 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6674 MVT::i1, MachineNode->getOperand(1), 6675 MachineNode->getOperand(1)); 6676 else if (Op1Not) 6677 // orc(~x, y) = ~(x & y) = nand(x, y) 6678 ResNode = CurDAG->getMachineNode(PPC::CRNAND, SDLoc(MachineNode), 6679 MVT::i1, MachineNode->getOperand(0). 6680 getOperand(0), 6681 MachineNode->getOperand(1)); 6682 else if (Op2Not) 6683 // orc(x, ~y) = x | y 6684 ResNode = CurDAG->getMachineNode(PPC::CROR, SDLoc(MachineNode), 6685 MVT::i1, MachineNode->getOperand(0), 6686 MachineNode->getOperand(1). 6687 getOperand(0)); 6688 else if (AllUsersSelectZero(MachineNode)) { 6689 ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode), 6690 MVT::i1, MachineNode->getOperand(1), 6691 MachineNode->getOperand(0)); 6692 SelectSwap = true; 6693 } 6694 break; 6695 case PPC::SELECT_I4: 6696 case PPC::SELECT_I8: 6697 case PPC::SELECT_F4: 6698 case PPC::SELECT_F8: 6699 case PPC::SELECT_SPE: 6700 case PPC::SELECT_SPE4: 6701 case PPC::SELECT_VRRC: 6702 case PPC::SELECT_VSFRC: 6703 case PPC::SELECT_VSSRC: 6704 case PPC::SELECT_VSRC: 6705 if (Op1Set) 6706 ResNode = MachineNode->getOperand(1).getNode(); 6707 else if (Op1Unset) 6708 ResNode = MachineNode->getOperand(2).getNode(); 6709 else if (Op1Not) 6710 ResNode = CurDAG->getMachineNode(MachineNode->getMachineOpcode(), 6711 SDLoc(MachineNode), 6712 MachineNode->getValueType(0), 6713 MachineNode->getOperand(0). 6714 getOperand(0), 6715 MachineNode->getOperand(2), 6716 MachineNode->getOperand(1)); 6717 break; 6718 case PPC::BC: 6719 case PPC::BCn: 6720 if (Op1Not) 6721 ResNode = CurDAG->getMachineNode(Opcode == PPC::BC ? PPC::BCn : 6722 PPC::BC, 6723 SDLoc(MachineNode), 6724 MVT::Other, 6725 MachineNode->getOperand(0). 6726 getOperand(0), 6727 MachineNode->getOperand(1), 6728 MachineNode->getOperand(2)); 6729 // FIXME: Handle Op1Set, Op1Unset here too. 6730 break; 6731 } 6732 6733 // If we're inverting this node because it is used only by selects that 6734 // we'd like to swap, then swap the selects before the node replacement. 6735 if (SelectSwap) 6736 SwapAllSelectUsers(MachineNode); 6737 6738 if (ResNode != MachineNode) { 6739 LLVM_DEBUG(dbgs() << "CR Peephole replacing:\nOld: "); 6740 LLVM_DEBUG(MachineNode->dump(CurDAG)); 6741 LLVM_DEBUG(dbgs() << "\nNew: "); 6742 LLVM_DEBUG(ResNode->dump(CurDAG)); 6743 LLVM_DEBUG(dbgs() << "\n"); 6744 6745 ReplaceUses(MachineNode, ResNode); 6746 IsModified = true; 6747 } 6748 } 6749 if (IsModified) 6750 CurDAG->RemoveDeadNodes(); 6751 } while (IsModified); 6752 } 6753 6754 // Gather the set of 32-bit operations that are known to have their 6755 // higher-order 32 bits zero, where ToPromote contains all such operations. 6756 static bool PeepholePPC64ZExtGather(SDValue Op32, 6757 SmallPtrSetImpl<SDNode *> &ToPromote) { 6758 if (!Op32.isMachineOpcode()) 6759 return false; 6760 6761 // First, check for the "frontier" instructions (those that will clear the 6762 // higher-order 32 bits. 6763 6764 // For RLWINM and RLWNM, we need to make sure that the mask does not wrap 6765 // around. If it does not, then these instructions will clear the 6766 // higher-order bits. 6767 if ((Op32.getMachineOpcode() == PPC::RLWINM || 6768 Op32.getMachineOpcode() == PPC::RLWNM) && 6769 Op32.getConstantOperandVal(2) <= Op32.getConstantOperandVal(3)) { 6770 ToPromote.insert(Op32.getNode()); 6771 return true; 6772 } 6773 6774 // SLW and SRW always clear the higher-order bits. 6775 if (Op32.getMachineOpcode() == PPC::SLW || 6776 Op32.getMachineOpcode() == PPC::SRW) { 6777 ToPromote.insert(Op32.getNode()); 6778 return true; 6779 } 6780 6781 // For LI and LIS, we need the immediate to be positive (so that it is not 6782 // sign extended). 6783 if (Op32.getMachineOpcode() == PPC::LI || 6784 Op32.getMachineOpcode() == PPC::LIS) { 6785 if (!isUInt<15>(Op32.getConstantOperandVal(0))) 6786 return false; 6787 6788 ToPromote.insert(Op32.getNode()); 6789 return true; 6790 } 6791 6792 // LHBRX and LWBRX always clear the higher-order bits. 6793 if (Op32.getMachineOpcode() == PPC::LHBRX || 6794 Op32.getMachineOpcode() == PPC::LWBRX) { 6795 ToPromote.insert(Op32.getNode()); 6796 return true; 6797 } 6798 6799 // CNT[LT]ZW always produce a 64-bit value in [0,32], and so is zero extended. 6800 if (Op32.getMachineOpcode() == PPC::CNTLZW || 6801 Op32.getMachineOpcode() == PPC::CNTTZW) { 6802 ToPromote.insert(Op32.getNode()); 6803 return true; 6804 } 6805 6806 // Next, check for those instructions we can look through. 6807 6808 // Assuming the mask does not wrap around, then the higher-order bits are 6809 // taken directly from the first operand. 6810 if (Op32.getMachineOpcode() == PPC::RLWIMI && 6811 Op32.getConstantOperandVal(3) <= Op32.getConstantOperandVal(4)) { 6812 SmallPtrSet<SDNode *, 16> ToPromote1; 6813 if (!PeepholePPC64ZExtGather(Op32.getOperand(0), ToPromote1)) 6814 return false; 6815 6816 ToPromote.insert(Op32.getNode()); 6817 ToPromote.insert(ToPromote1.begin(), ToPromote1.end()); 6818 return true; 6819 } 6820 6821 // For OR, the higher-order bits are zero if that is true for both operands. 6822 // For SELECT_I4, the same is true (but the relevant operand numbers are 6823 // shifted by 1). 6824 if (Op32.getMachineOpcode() == PPC::OR || 6825 Op32.getMachineOpcode() == PPC::SELECT_I4) { 6826 unsigned B = Op32.getMachineOpcode() == PPC::SELECT_I4 ? 1 : 0; 6827 SmallPtrSet<SDNode *, 16> ToPromote1; 6828 if (!PeepholePPC64ZExtGather(Op32.getOperand(B+0), ToPromote1)) 6829 return false; 6830 if (!PeepholePPC64ZExtGather(Op32.getOperand(B+1), ToPromote1)) 6831 return false; 6832 6833 ToPromote.insert(Op32.getNode()); 6834 ToPromote.insert(ToPromote1.begin(), ToPromote1.end()); 6835 return true; 6836 } 6837 6838 // For ORI and ORIS, we need the higher-order bits of the first operand to be 6839 // zero, and also for the constant to be positive (so that it is not sign 6840 // extended). 6841 if (Op32.getMachineOpcode() == PPC::ORI || 6842 Op32.getMachineOpcode() == PPC::ORIS) { 6843 SmallPtrSet<SDNode *, 16> ToPromote1; 6844 if (!PeepholePPC64ZExtGather(Op32.getOperand(0), ToPromote1)) 6845 return false; 6846 if (!isUInt<15>(Op32.getConstantOperandVal(1))) 6847 return false; 6848 6849 ToPromote.insert(Op32.getNode()); 6850 ToPromote.insert(ToPromote1.begin(), ToPromote1.end()); 6851 return true; 6852 } 6853 6854 // The higher-order bits of AND are zero if that is true for at least one of 6855 // the operands. 6856 if (Op32.getMachineOpcode() == PPC::AND) { 6857 SmallPtrSet<SDNode *, 16> ToPromote1, ToPromote2; 6858 bool Op0OK = 6859 PeepholePPC64ZExtGather(Op32.getOperand(0), ToPromote1); 6860 bool Op1OK = 6861 PeepholePPC64ZExtGather(Op32.getOperand(1), ToPromote2); 6862 if (!Op0OK && !Op1OK) 6863 return false; 6864 6865 ToPromote.insert(Op32.getNode()); 6866 6867 if (Op0OK) 6868 ToPromote.insert(ToPromote1.begin(), ToPromote1.end()); 6869 6870 if (Op1OK) 6871 ToPromote.insert(ToPromote2.begin(), ToPromote2.end()); 6872 6873 return true; 6874 } 6875 6876 // For ANDI and ANDIS, the higher-order bits are zero if either that is true 6877 // of the first operand, or if the second operand is positive (so that it is 6878 // not sign extended). 6879 if (Op32.getMachineOpcode() == PPC::ANDI_rec || 6880 Op32.getMachineOpcode() == PPC::ANDIS_rec) { 6881 SmallPtrSet<SDNode *, 16> ToPromote1; 6882 bool Op0OK = 6883 PeepholePPC64ZExtGather(Op32.getOperand(0), ToPromote1); 6884 bool Op1OK = isUInt<15>(Op32.getConstantOperandVal(1)); 6885 if (!Op0OK && !Op1OK) 6886 return false; 6887 6888 ToPromote.insert(Op32.getNode()); 6889 6890 if (Op0OK) 6891 ToPromote.insert(ToPromote1.begin(), ToPromote1.end()); 6892 6893 return true; 6894 } 6895 6896 return false; 6897 } 6898 6899 void PPCDAGToDAGISel::PeepholePPC64ZExt() { 6900 if (!Subtarget->isPPC64()) 6901 return; 6902 6903 // When we zero-extend from i32 to i64, we use a pattern like this: 6904 // def : Pat<(i64 (zext i32:$in)), 6905 // (RLDICL (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $in, sub_32), 6906 // 0, 32)>; 6907 // There are several 32-bit shift/rotate instructions, however, that will 6908 // clear the higher-order bits of their output, rendering the RLDICL 6909 // unnecessary. When that happens, we remove it here, and redefine the 6910 // relevant 32-bit operation to be a 64-bit operation. 6911 6912 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end(); 6913 6914 bool MadeChange = false; 6915 while (Position != CurDAG->allnodes_begin()) { 6916 SDNode *N = &*--Position; 6917 // Skip dead nodes and any non-machine opcodes. 6918 if (N->use_empty() || !N->isMachineOpcode()) 6919 continue; 6920 6921 if (N->getMachineOpcode() != PPC::RLDICL) 6922 continue; 6923 6924 if (N->getConstantOperandVal(1) != 0 || 6925 N->getConstantOperandVal(2) != 32) 6926 continue; 6927 6928 SDValue ISR = N->getOperand(0); 6929 if (!ISR.isMachineOpcode() || 6930 ISR.getMachineOpcode() != TargetOpcode::INSERT_SUBREG) 6931 continue; 6932 6933 if (!ISR.hasOneUse()) 6934 continue; 6935 6936 if (ISR.getConstantOperandVal(2) != PPC::sub_32) 6937 continue; 6938 6939 SDValue IDef = ISR.getOperand(0); 6940 if (!IDef.isMachineOpcode() || 6941 IDef.getMachineOpcode() != TargetOpcode::IMPLICIT_DEF) 6942 continue; 6943 6944 // We now know that we're looking at a canonical i32 -> i64 zext. See if we 6945 // can get rid of it. 6946 6947 SDValue Op32 = ISR->getOperand(1); 6948 if (!Op32.isMachineOpcode()) 6949 continue; 6950 6951 // There are some 32-bit instructions that always clear the high-order 32 6952 // bits, there are also some instructions (like AND) that we can look 6953 // through. 6954 SmallPtrSet<SDNode *, 16> ToPromote; 6955 if (!PeepholePPC64ZExtGather(Op32, ToPromote)) 6956 continue; 6957 6958 // If the ToPromote set contains nodes that have uses outside of the set 6959 // (except for the original INSERT_SUBREG), then abort the transformation. 6960 bool OutsideUse = false; 6961 for (SDNode *PN : ToPromote) { 6962 for (SDNode *UN : PN->uses()) { 6963 if (!ToPromote.count(UN) && UN != ISR.getNode()) { 6964 OutsideUse = true; 6965 break; 6966 } 6967 } 6968 6969 if (OutsideUse) 6970 break; 6971 } 6972 if (OutsideUse) 6973 continue; 6974 6975 MadeChange = true; 6976 6977 // We now know that this zero extension can be removed by promoting to 6978 // nodes in ToPromote to 64-bit operations, where for operations in the 6979 // frontier of the set, we need to insert INSERT_SUBREGs for their 6980 // operands. 6981 for (SDNode *PN : ToPromote) { 6982 unsigned NewOpcode; 6983 switch (PN->getMachineOpcode()) { 6984 default: 6985 llvm_unreachable("Don't know the 64-bit variant of this instruction"); 6986 case PPC::RLWINM: NewOpcode = PPC::RLWINM8; break; 6987 case PPC::RLWNM: NewOpcode = PPC::RLWNM8; break; 6988 case PPC::SLW: NewOpcode = PPC::SLW8; break; 6989 case PPC::SRW: NewOpcode = PPC::SRW8; break; 6990 case PPC::LI: NewOpcode = PPC::LI8; break; 6991 case PPC::LIS: NewOpcode = PPC::LIS8; break; 6992 case PPC::LHBRX: NewOpcode = PPC::LHBRX8; break; 6993 case PPC::LWBRX: NewOpcode = PPC::LWBRX8; break; 6994 case PPC::CNTLZW: NewOpcode = PPC::CNTLZW8; break; 6995 case PPC::CNTTZW: NewOpcode = PPC::CNTTZW8; break; 6996 case PPC::RLWIMI: NewOpcode = PPC::RLWIMI8; break; 6997 case PPC::OR: NewOpcode = PPC::OR8; break; 6998 case PPC::SELECT_I4: NewOpcode = PPC::SELECT_I8; break; 6999 case PPC::ORI: NewOpcode = PPC::ORI8; break; 7000 case PPC::ORIS: NewOpcode = PPC::ORIS8; break; 7001 case PPC::AND: NewOpcode = PPC::AND8; break; 7002 case PPC::ANDI_rec: 7003 NewOpcode = PPC::ANDI8_rec; 7004 break; 7005 case PPC::ANDIS_rec: 7006 NewOpcode = PPC::ANDIS8_rec; 7007 break; 7008 } 7009 7010 // Note: During the replacement process, the nodes will be in an 7011 // inconsistent state (some instructions will have operands with values 7012 // of the wrong type). Once done, however, everything should be right 7013 // again. 7014 7015 SmallVector<SDValue, 4> Ops; 7016 for (const SDValue &V : PN->ops()) { 7017 if (!ToPromote.count(V.getNode()) && V.getValueType() == MVT::i32 && 7018 !isa<ConstantSDNode>(V)) { 7019 SDValue ReplOpOps[] = { ISR.getOperand(0), V, ISR.getOperand(2) }; 7020 SDNode *ReplOp = 7021 CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, SDLoc(V), 7022 ISR.getNode()->getVTList(), ReplOpOps); 7023 Ops.push_back(SDValue(ReplOp, 0)); 7024 } else { 7025 Ops.push_back(V); 7026 } 7027 } 7028 7029 // Because all to-be-promoted nodes only have users that are other 7030 // promoted nodes (or the original INSERT_SUBREG), we can safely replace 7031 // the i32 result value type with i64. 7032 7033 SmallVector<EVT, 2> NewVTs; 7034 SDVTList VTs = PN->getVTList(); 7035 for (unsigned i = 0, ie = VTs.NumVTs; i != ie; ++i) 7036 if (VTs.VTs[i] == MVT::i32) 7037 NewVTs.push_back(MVT::i64); 7038 else 7039 NewVTs.push_back(VTs.VTs[i]); 7040 7041 LLVM_DEBUG(dbgs() << "PPC64 ZExt Peephole morphing:\nOld: "); 7042 LLVM_DEBUG(PN->dump(CurDAG)); 7043 7044 CurDAG->SelectNodeTo(PN, NewOpcode, CurDAG->getVTList(NewVTs), Ops); 7045 7046 LLVM_DEBUG(dbgs() << "\nNew: "); 7047 LLVM_DEBUG(PN->dump(CurDAG)); 7048 LLVM_DEBUG(dbgs() << "\n"); 7049 } 7050 7051 // Now we replace the original zero extend and its associated INSERT_SUBREG 7052 // with the value feeding the INSERT_SUBREG (which has now been promoted to 7053 // return an i64). 7054 7055 LLVM_DEBUG(dbgs() << "PPC64 ZExt Peephole replacing:\nOld: "); 7056 LLVM_DEBUG(N->dump(CurDAG)); 7057 LLVM_DEBUG(dbgs() << "\nNew: "); 7058 LLVM_DEBUG(Op32.getNode()->dump(CurDAG)); 7059 LLVM_DEBUG(dbgs() << "\n"); 7060 7061 ReplaceUses(N, Op32.getNode()); 7062 } 7063 7064 if (MadeChange) 7065 CurDAG->RemoveDeadNodes(); 7066 } 7067 7068 static bool isVSXSwap(SDValue N) { 7069 if (!N->isMachineOpcode()) 7070 return false; 7071 unsigned Opc = N->getMachineOpcode(); 7072 7073 // Single-operand XXPERMDI or the regular XXPERMDI/XXSLDWI where the immediate 7074 // operand is 2. 7075 if (Opc == PPC::XXPERMDIs) { 7076 return isa<ConstantSDNode>(N->getOperand(1)) && 7077 N->getConstantOperandVal(1) == 2; 7078 } else if (Opc == PPC::XXPERMDI || Opc == PPC::XXSLDWI) { 7079 return N->getOperand(0) == N->getOperand(1) && 7080 isa<ConstantSDNode>(N->getOperand(2)) && 7081 N->getConstantOperandVal(2) == 2; 7082 } 7083 7084 return false; 7085 } 7086 7087 // TODO: Make this complete and replace with a table-gen bit. 7088 static bool isLaneInsensitive(SDValue N) { 7089 if (!N->isMachineOpcode()) 7090 return false; 7091 unsigned Opc = N->getMachineOpcode(); 7092 7093 switch (Opc) { 7094 default: 7095 return false; 7096 case PPC::VAVGSB: 7097 case PPC::VAVGUB: 7098 case PPC::VAVGSH: 7099 case PPC::VAVGUH: 7100 case PPC::VAVGSW: 7101 case PPC::VAVGUW: 7102 case PPC::VMAXFP: 7103 case PPC::VMAXSB: 7104 case PPC::VMAXUB: 7105 case PPC::VMAXSH: 7106 case PPC::VMAXUH: 7107 case PPC::VMAXSW: 7108 case PPC::VMAXUW: 7109 case PPC::VMINFP: 7110 case PPC::VMINSB: 7111 case PPC::VMINUB: 7112 case PPC::VMINSH: 7113 case PPC::VMINUH: 7114 case PPC::VMINSW: 7115 case PPC::VMINUW: 7116 case PPC::VADDFP: 7117 case PPC::VADDUBM: 7118 case PPC::VADDUHM: 7119 case PPC::VADDUWM: 7120 case PPC::VSUBFP: 7121 case PPC::VSUBUBM: 7122 case PPC::VSUBUHM: 7123 case PPC::VSUBUWM: 7124 case PPC::VAND: 7125 case PPC::VANDC: 7126 case PPC::VOR: 7127 case PPC::VORC: 7128 case PPC::VXOR: 7129 case PPC::VNOR: 7130 case PPC::VMULUWM: 7131 return true; 7132 } 7133 } 7134 7135 // Try to simplify (xxswap (vec-op (xxswap) (xxswap))) where vec-op is 7136 // lane-insensitive. 7137 static void reduceVSXSwap(SDNode *N, SelectionDAG *DAG) { 7138 // Our desired xxswap might be source of COPY_TO_REGCLASS. 7139 // TODO: Can we put this a common method for DAG? 7140 auto SkipRCCopy = [](SDValue V) { 7141 while (V->isMachineOpcode() && 7142 V->getMachineOpcode() == TargetOpcode::COPY_TO_REGCLASS) { 7143 // All values in the chain should have single use. 7144 if (V->use_empty() || !V->use_begin()->isOnlyUserOf(V.getNode())) 7145 return SDValue(); 7146 V = V->getOperand(0); 7147 } 7148 return V.hasOneUse() ? V : SDValue(); 7149 }; 7150 7151 SDValue VecOp = SkipRCCopy(N->getOperand(0)); 7152 if (!VecOp || !isLaneInsensitive(VecOp)) 7153 return; 7154 7155 SDValue LHS = SkipRCCopy(VecOp.getOperand(0)), 7156 RHS = SkipRCCopy(VecOp.getOperand(1)); 7157 if (!LHS || !RHS || !isVSXSwap(LHS) || !isVSXSwap(RHS)) 7158 return; 7159 7160 // These swaps may still have chain-uses here, count on dead code elimination 7161 // in following passes to remove them. 7162 DAG->ReplaceAllUsesOfValueWith(LHS, LHS.getOperand(0)); 7163 DAG->ReplaceAllUsesOfValueWith(RHS, RHS.getOperand(0)); 7164 DAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), N->getOperand(0)); 7165 } 7166 7167 void PPCDAGToDAGISel::PeepholePPC64() { 7168 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end(); 7169 7170 while (Position != CurDAG->allnodes_begin()) { 7171 SDNode *N = &*--Position; 7172 // Skip dead nodes and any non-machine opcodes. 7173 if (N->use_empty() || !N->isMachineOpcode()) 7174 continue; 7175 7176 if (isVSXSwap(SDValue(N, 0))) 7177 reduceVSXSwap(N, CurDAG); 7178 7179 unsigned FirstOp; 7180 unsigned StorageOpcode = N->getMachineOpcode(); 7181 bool RequiresMod4Offset = false; 7182 7183 switch (StorageOpcode) { 7184 default: continue; 7185 7186 case PPC::LWA: 7187 case PPC::LD: 7188 case PPC::DFLOADf64: 7189 case PPC::DFLOADf32: 7190 RequiresMod4Offset = true; 7191 LLVM_FALLTHROUGH; 7192 case PPC::LBZ: 7193 case PPC::LBZ8: 7194 case PPC::LFD: 7195 case PPC::LFS: 7196 case PPC::LHA: 7197 case PPC::LHA8: 7198 case PPC::LHZ: 7199 case PPC::LHZ8: 7200 case PPC::LWZ: 7201 case PPC::LWZ8: 7202 FirstOp = 0; 7203 break; 7204 7205 case PPC::STD: 7206 case PPC::DFSTOREf64: 7207 case PPC::DFSTOREf32: 7208 RequiresMod4Offset = true; 7209 LLVM_FALLTHROUGH; 7210 case PPC::STB: 7211 case PPC::STB8: 7212 case PPC::STFD: 7213 case PPC::STFS: 7214 case PPC::STH: 7215 case PPC::STH8: 7216 case PPC::STW: 7217 case PPC::STW8: 7218 FirstOp = 1; 7219 break; 7220 } 7221 7222 // If this is a load or store with a zero offset, or within the alignment, 7223 // we may be able to fold an add-immediate into the memory operation. 7224 // The check against alignment is below, as it can't occur until we check 7225 // the arguments to N 7226 if (!isa<ConstantSDNode>(N->getOperand(FirstOp))) 7227 continue; 7228 7229 SDValue Base = N->getOperand(FirstOp + 1); 7230 if (!Base.isMachineOpcode()) 7231 continue; 7232 7233 unsigned Flags = 0; 7234 bool ReplaceFlags = true; 7235 7236 // When the feeding operation is an add-immediate of some sort, 7237 // determine whether we need to add relocation information to the 7238 // target flags on the immediate operand when we fold it into the 7239 // load instruction. 7240 // 7241 // For something like ADDItocL, the relocation information is 7242 // inferred from the opcode; when we process it in the AsmPrinter, 7243 // we add the necessary relocation there. A load, though, can receive 7244 // relocation from various flavors of ADDIxxx, so we need to carry 7245 // the relocation information in the target flags. 7246 switch (Base.getMachineOpcode()) { 7247 default: continue; 7248 7249 case PPC::ADDI8: 7250 case PPC::ADDI: 7251 // In some cases (such as TLS) the relocation information 7252 // is already in place on the operand, so copying the operand 7253 // is sufficient. 7254 ReplaceFlags = false; 7255 // For these cases, the immediate may not be divisible by 4, in 7256 // which case the fold is illegal for DS-form instructions. (The 7257 // other cases provide aligned addresses and are always safe.) 7258 if (RequiresMod4Offset && 7259 (!isa<ConstantSDNode>(Base.getOperand(1)) || 7260 Base.getConstantOperandVal(1) % 4 != 0)) 7261 continue; 7262 break; 7263 case PPC::ADDIdtprelL: 7264 Flags = PPCII::MO_DTPREL_LO; 7265 break; 7266 case PPC::ADDItlsldL: 7267 Flags = PPCII::MO_TLSLD_LO; 7268 break; 7269 case PPC::ADDItocL: 7270 Flags = PPCII::MO_TOC_LO; 7271 break; 7272 } 7273 7274 SDValue ImmOpnd = Base.getOperand(1); 7275 7276 // On PPC64, the TOC base pointer is guaranteed by the ABI only to have 7277 // 8-byte alignment, and so we can only use offsets less than 8 (otherwise, 7278 // we might have needed different @ha relocation values for the offset 7279 // pointers). 7280 int MaxDisplacement = 7; 7281 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(ImmOpnd)) { 7282 const GlobalValue *GV = GA->getGlobal(); 7283 Align Alignment = GV->getPointerAlignment(CurDAG->getDataLayout()); 7284 MaxDisplacement = std::min((int)Alignment.value() - 1, MaxDisplacement); 7285 } 7286 7287 bool UpdateHBase = false; 7288 SDValue HBase = Base.getOperand(0); 7289 7290 int Offset = N->getConstantOperandVal(FirstOp); 7291 if (ReplaceFlags) { 7292 if (Offset < 0 || Offset > MaxDisplacement) { 7293 // If we have a addi(toc@l)/addis(toc@ha) pair, and the addis has only 7294 // one use, then we can do this for any offset, we just need to also 7295 // update the offset (i.e. the symbol addend) on the addis also. 7296 if (Base.getMachineOpcode() != PPC::ADDItocL) 7297 continue; 7298 7299 if (!HBase.isMachineOpcode() || 7300 HBase.getMachineOpcode() != PPC::ADDIStocHA8) 7301 continue; 7302 7303 if (!Base.hasOneUse() || !HBase.hasOneUse()) 7304 continue; 7305 7306 SDValue HImmOpnd = HBase.getOperand(1); 7307 if (HImmOpnd != ImmOpnd) 7308 continue; 7309 7310 UpdateHBase = true; 7311 } 7312 } else { 7313 // If we're directly folding the addend from an addi instruction, then: 7314 // 1. In general, the offset on the memory access must be zero. 7315 // 2. If the addend is a constant, then it can be combined with a 7316 // non-zero offset, but only if the result meets the encoding 7317 // requirements. 7318 if (auto *C = dyn_cast<ConstantSDNode>(ImmOpnd)) { 7319 Offset += C->getSExtValue(); 7320 7321 if (RequiresMod4Offset && (Offset % 4) != 0) 7322 continue; 7323 7324 if (!isInt<16>(Offset)) 7325 continue; 7326 7327 ImmOpnd = CurDAG->getTargetConstant(Offset, SDLoc(ImmOpnd), 7328 ImmOpnd.getValueType()); 7329 } else if (Offset != 0) { 7330 continue; 7331 } 7332 } 7333 7334 // We found an opportunity. Reverse the operands from the add 7335 // immediate and substitute them into the load or store. If 7336 // needed, update the target flags for the immediate operand to 7337 // reflect the necessary relocation information. 7338 LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase: "); 7339 LLVM_DEBUG(Base->dump(CurDAG)); 7340 LLVM_DEBUG(dbgs() << "\nN: "); 7341 LLVM_DEBUG(N->dump(CurDAG)); 7342 LLVM_DEBUG(dbgs() << "\n"); 7343 7344 // If the relocation information isn't already present on the 7345 // immediate operand, add it now. 7346 if (ReplaceFlags) { 7347 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(ImmOpnd)) { 7348 SDLoc dl(GA); 7349 const GlobalValue *GV = GA->getGlobal(); 7350 Align Alignment = GV->getPointerAlignment(CurDAG->getDataLayout()); 7351 // We can't perform this optimization for data whose alignment 7352 // is insufficient for the instruction encoding. 7353 if (Alignment < 4 && (RequiresMod4Offset || (Offset % 4) != 0)) { 7354 LLVM_DEBUG(dbgs() << "Rejected this candidate for alignment.\n\n"); 7355 continue; 7356 } 7357 ImmOpnd = CurDAG->getTargetGlobalAddress(GV, dl, MVT::i64, Offset, Flags); 7358 } else if (ConstantPoolSDNode *CP = 7359 dyn_cast<ConstantPoolSDNode>(ImmOpnd)) { 7360 const Constant *C = CP->getConstVal(); 7361 ImmOpnd = CurDAG->getTargetConstantPool(C, MVT::i64, CP->getAlign(), 7362 Offset, Flags); 7363 } 7364 } 7365 7366 if (FirstOp == 1) // Store 7367 (void)CurDAG->UpdateNodeOperands(N, N->getOperand(0), ImmOpnd, 7368 Base.getOperand(0), N->getOperand(3)); 7369 else // Load 7370 (void)CurDAG->UpdateNodeOperands(N, ImmOpnd, Base.getOperand(0), 7371 N->getOperand(2)); 7372 7373 if (UpdateHBase) 7374 (void)CurDAG->UpdateNodeOperands(HBase.getNode(), HBase.getOperand(0), 7375 ImmOpnd); 7376 7377 // The add-immediate may now be dead, in which case remove it. 7378 if (Base.getNode()->use_empty()) 7379 CurDAG->RemoveDeadNode(Base.getNode()); 7380 } 7381 } 7382 7383 /// createPPCISelDag - This pass converts a legalized DAG into a 7384 /// PowerPC-specific DAG, ready for instruction scheduling. 7385 /// 7386 FunctionPass *llvm::createPPCISelDag(PPCTargetMachine &TM, 7387 CodeGenOpt::Level OptLevel) { 7388 return new PPCDAGToDAGISel(TM, OptLevel); 7389 } 7390