1 //===-- SystemZISelLowering.h - SystemZ DAG lowering interface --*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that SystemZ uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #ifndef LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZISELLOWERING_H 15 #define LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZISELLOWERING_H 16 17 #include "SystemZ.h" 18 #include "SystemZInstrInfo.h" 19 #include "llvm/CodeGen/MachineBasicBlock.h" 20 #include "llvm/CodeGen/SelectionDAG.h" 21 #include "llvm/CodeGen/TargetLowering.h" 22 23 namespace llvm { 24 namespace SystemZISD { 25 enum NodeType : unsigned { 26 FIRST_NUMBER = ISD::BUILTIN_OP_END, 27 28 // Return with a flag operand. Operand 0 is the chain operand. 29 RET_FLAG, 30 31 // Calls a function. Operand 0 is the chain operand and operand 1 32 // is the target address. The arguments start at operand 2. 33 // There is an optional glue operand at the end. 34 CALL, 35 SIBCALL, 36 37 // TLS calls. Like regular calls, except operand 1 is the TLS symbol. 38 // (The call target is implicitly __tls_get_offset.) 39 TLS_GDCALL, 40 TLS_LDCALL, 41 42 // Wraps a TargetGlobalAddress that should be loaded using PC-relative 43 // accesses (LARL). Operand 0 is the address. 44 PCREL_WRAPPER, 45 46 // Used in cases where an offset is applied to a TargetGlobalAddress. 47 // Operand 0 is the full TargetGlobalAddress and operand 1 is a 48 // PCREL_WRAPPER for an anchor point. This is used so that we can 49 // cheaply refer to either the full address or the anchor point 50 // as a register base. 51 PCREL_OFFSET, 52 53 // Integer comparisons. There are three operands: the two values 54 // to compare, and an integer of type SystemZICMP. 55 ICMP, 56 57 // Floating-point comparisons. The two operands are the values to compare. 58 FCMP, 59 60 // Test under mask. The first operand is ANDed with the second operand 61 // and the condition codes are set on the result. The third operand is 62 // a boolean that is true if the condition codes need to distinguish 63 // between CCMASK_TM_MIXED_MSB_0 and CCMASK_TM_MIXED_MSB_1 (which the 64 // register forms do but the memory forms don't). 65 TM, 66 67 // Branches if a condition is true. Operand 0 is the chain operand; 68 // operand 1 is the 4-bit condition-code mask, with bit N in 69 // big-endian order meaning "branch if CC=N"; operand 2 is the 70 // target block and operand 3 is the flag operand. 71 BR_CCMASK, 72 73 // Selects between operand 0 and operand 1. Operand 2 is the 74 // mask of condition-code values for which operand 0 should be 75 // chosen over operand 1; it has the same form as BR_CCMASK. 76 // Operand 3 is the flag operand. 77 SELECT_CCMASK, 78 79 // Evaluates to the gap between the stack pointer and the 80 // base of the dynamically-allocatable area. 81 ADJDYNALLOC, 82 83 // For allocating stack space when using stack clash protector. 84 // Allocation is performed by block, and each block is probed. 85 PROBED_ALLOCA, 86 87 // Count number of bits set in operand 0 per byte. 88 POPCNT, 89 90 // Wrappers around the ISD opcodes of the same name. The output is GR128. 91 // Input operands may be GR64 or GR32, depending on the instruction. 92 SMUL_LOHI, 93 UMUL_LOHI, 94 SDIVREM, 95 UDIVREM, 96 97 // Add/subtract with overflow/carry. These have the same operands as 98 // the corresponding standard operations, except with the carry flag 99 // replaced by a condition code value. 100 SADDO, SSUBO, UADDO, USUBO, ADDCARRY, SUBCARRY, 101 102 // Set the condition code from a boolean value in operand 0. 103 // Operand 1 is a mask of all condition-code values that may result of this 104 // operation, operand 2 is a mask of condition-code values that may result 105 // if the boolean is true. 106 // Note that this operation is always optimized away, we will never 107 // generate any code for it. 108 GET_CCMASK, 109 110 // Use a series of MVCs to copy bytes from one memory location to another. 111 // The operands are: 112 // - the target address 113 // - the source address 114 // - the constant length 115 // 116 // This isn't a memory opcode because we'd need to attach two 117 // MachineMemOperands rather than one. 118 MVC, 119 120 // Similar to MVC, but for logic operations (AND, OR, XOR). 121 NC, 122 OC, 123 XC, 124 125 // Use CLC to compare two blocks of memory, with the same comments 126 // as for MVC. 127 CLC, 128 129 // Use MVC to set a block of memory after storing the first byte. 130 MEMSET_MVC, 131 132 // Use an MVST-based sequence to implement stpcpy(). 133 STPCPY, 134 135 // Use a CLST-based sequence to implement strcmp(). The two input operands 136 // are the addresses of the strings to compare. 137 STRCMP, 138 139 // Use an SRST-based sequence to search a block of memory. The first 140 // operand is the end address, the second is the start, and the third 141 // is the character to search for. CC is set to 1 on success and 2 142 // on failure. 143 SEARCH_STRING, 144 145 // Store the CC value in bits 29 and 28 of an integer. 146 IPM, 147 148 // Compiler barrier only; generate a no-op. 149 MEMBARRIER, 150 151 // Transaction begin. The first operand is the chain, the second 152 // the TDB pointer, and the third the immediate control field. 153 // Returns CC value and chain. 154 TBEGIN, 155 TBEGIN_NOFLOAT, 156 157 // Transaction end. Just the chain operand. Returns CC value and chain. 158 TEND, 159 160 // Create a vector constant by filling byte N of the result with bit 161 // 15-N of the single operand. 162 BYTE_MASK, 163 164 // Create a vector constant by replicating an element-sized RISBG-style mask. 165 // The first operand specifies the starting set bit and the second operand 166 // specifies the ending set bit. Both operands count from the MSB of the 167 // element. 168 ROTATE_MASK, 169 170 // Replicate a GPR scalar value into all elements of a vector. 171 REPLICATE, 172 173 // Create a vector from two i64 GPRs. 174 JOIN_DWORDS, 175 176 // Replicate one element of a vector into all elements. The first operand 177 // is the vector and the second is the index of the element to replicate. 178 SPLAT, 179 180 // Interleave elements from the high half of operand 0 and the high half 181 // of operand 1. 182 MERGE_HIGH, 183 184 // Likewise for the low halves. 185 MERGE_LOW, 186 187 // Concatenate the vectors in the first two operands, shift them left 188 // by the third operand, and take the first half of the result. 189 SHL_DOUBLE, 190 191 // Take one element of the first v2i64 operand and the one element of 192 // the second v2i64 operand and concatenate them to form a v2i64 result. 193 // The third operand is a 4-bit value of the form 0A0B, where A and B 194 // are the element selectors for the first operand and second operands 195 // respectively. 196 PERMUTE_DWORDS, 197 198 // Perform a general vector permute on vector operands 0 and 1. 199 // Each byte of operand 2 controls the corresponding byte of the result, 200 // in the same way as a byte-level VECTOR_SHUFFLE mask. 201 PERMUTE, 202 203 // Pack vector operands 0 and 1 into a single vector with half-sized elements. 204 PACK, 205 206 // Likewise, but saturate the result and set CC. PACKS_CC does signed 207 // saturation and PACKLS_CC does unsigned saturation. 208 PACKS_CC, 209 PACKLS_CC, 210 211 // Unpack the first half of vector operand 0 into double-sized elements. 212 // UNPACK_HIGH sign-extends and UNPACKL_HIGH zero-extends. 213 UNPACK_HIGH, 214 UNPACKL_HIGH, 215 216 // Likewise for the second half. 217 UNPACK_LOW, 218 UNPACKL_LOW, 219 220 // Shift each element of vector operand 0 by the number of bits specified 221 // by scalar operand 1. 222 VSHL_BY_SCALAR, 223 VSRL_BY_SCALAR, 224 VSRA_BY_SCALAR, 225 226 // For each element of the output type, sum across all sub-elements of 227 // operand 0 belonging to the corresponding element, and add in the 228 // rightmost sub-element of the corresponding element of operand 1. 229 VSUM, 230 231 // Compare integer vector operands 0 and 1 to produce the usual 0/-1 232 // vector result. VICMPE is for equality, VICMPH for "signed greater than" 233 // and VICMPHL for "unsigned greater than". 234 VICMPE, 235 VICMPH, 236 VICMPHL, 237 238 // Likewise, but also set the condition codes on the result. 239 VICMPES, 240 VICMPHS, 241 VICMPHLS, 242 243 // Compare floating-point vector operands 0 and 1 to produce the usual 0/-1 244 // vector result. VFCMPE is for "ordered and equal", VFCMPH for "ordered and 245 // greater than" and VFCMPHE for "ordered and greater than or equal to". 246 VFCMPE, 247 VFCMPH, 248 VFCMPHE, 249 250 // Likewise, but also set the condition codes on the result. 251 VFCMPES, 252 VFCMPHS, 253 VFCMPHES, 254 255 // Test floating-point data class for vectors. 256 VFTCI, 257 258 // Extend the even f32 elements of vector operand 0 to produce a vector 259 // of f64 elements. 260 VEXTEND, 261 262 // Round the f64 elements of vector operand 0 to f32s and store them in the 263 // even elements of the result. 264 VROUND, 265 266 // AND the two vector operands together and set CC based on the result. 267 VTM, 268 269 // String operations that set CC as a side-effect. 270 VFAE_CC, 271 VFAEZ_CC, 272 VFEE_CC, 273 VFEEZ_CC, 274 VFENE_CC, 275 VFENEZ_CC, 276 VISTR_CC, 277 VSTRC_CC, 278 VSTRCZ_CC, 279 VSTRS_CC, 280 VSTRSZ_CC, 281 282 // Test Data Class. 283 // 284 // Operand 0: the value to test 285 // Operand 1: the bit mask 286 TDC, 287 288 // Strict variants of scalar floating-point comparisons. 289 // Quiet and signaling versions. 290 STRICT_FCMP = ISD::FIRST_TARGET_STRICTFP_OPCODE, 291 STRICT_FCMPS, 292 293 // Strict variants of vector floating-point comparisons. 294 // Quiet and signaling versions. 295 STRICT_VFCMPE, 296 STRICT_VFCMPH, 297 STRICT_VFCMPHE, 298 STRICT_VFCMPES, 299 STRICT_VFCMPHS, 300 STRICT_VFCMPHES, 301 302 // Strict variants of VEXTEND and VROUND. 303 STRICT_VEXTEND, 304 STRICT_VROUND, 305 306 // Wrappers around the inner loop of an 8- or 16-bit ATOMIC_SWAP or 307 // ATOMIC_LOAD_<op>. 308 // 309 // Operand 0: the address of the containing 32-bit-aligned field 310 // Operand 1: the second operand of <op>, in the high bits of an i32 311 // for everything except ATOMIC_SWAPW 312 // Operand 2: how many bits to rotate the i32 left to bring the first 313 // operand into the high bits 314 // Operand 3: the negative of operand 2, for rotating the other way 315 // Operand 4: the width of the field in bits (8 or 16) 316 ATOMIC_SWAPW = ISD::FIRST_TARGET_MEMORY_OPCODE, 317 ATOMIC_LOADW_ADD, 318 ATOMIC_LOADW_SUB, 319 ATOMIC_LOADW_AND, 320 ATOMIC_LOADW_OR, 321 ATOMIC_LOADW_XOR, 322 ATOMIC_LOADW_NAND, 323 ATOMIC_LOADW_MIN, 324 ATOMIC_LOADW_MAX, 325 ATOMIC_LOADW_UMIN, 326 ATOMIC_LOADW_UMAX, 327 328 // A wrapper around the inner loop of an ATOMIC_CMP_SWAP. 329 // 330 // Operand 0: the address of the containing 32-bit-aligned field 331 // Operand 1: the compare value, in the low bits of an i32 332 // Operand 2: the swap value, in the low bits of an i32 333 // Operand 3: how many bits to rotate the i32 left to bring the first 334 // operand into the high bits 335 // Operand 4: the negative of operand 2, for rotating the other way 336 // Operand 5: the width of the field in bits (8 or 16) 337 ATOMIC_CMP_SWAPW, 338 339 // Atomic compare-and-swap returning CC value. 340 // Val, CC, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) 341 ATOMIC_CMP_SWAP, 342 343 // 128-bit atomic load. 344 // Val, OUTCHAIN = ATOMIC_LOAD_128(INCHAIN, ptr) 345 ATOMIC_LOAD_128, 346 347 // 128-bit atomic store. 348 // OUTCHAIN = ATOMIC_STORE_128(INCHAIN, val, ptr) 349 ATOMIC_STORE_128, 350 351 // 128-bit atomic compare-and-swap. 352 // Val, CC, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) 353 ATOMIC_CMP_SWAP_128, 354 355 // Byte swapping load/store. Same operands as regular load/store. 356 LRV, STRV, 357 358 // Element swapping load/store. Same operands as regular load/store. 359 VLER, VSTER, 360 361 // Prefetch from the second operand using the 4-bit control code in 362 // the first operand. The code is 1 for a load prefetch and 2 for 363 // a store prefetch. 364 PREFETCH 365 }; 366 367 // Return true if OPCODE is some kind of PC-relative address. 368 inline bool isPCREL(unsigned Opcode) { 369 return Opcode == PCREL_WRAPPER || Opcode == PCREL_OFFSET; 370 } 371 } // end namespace SystemZISD 372 373 namespace SystemZICMP { 374 // Describes whether an integer comparison needs to be signed or unsigned, 375 // or whether either type is OK. 376 enum { 377 Any, 378 UnsignedOnly, 379 SignedOnly 380 }; 381 } // end namespace SystemZICMP 382 383 class SystemZSubtarget; 384 385 class SystemZTargetLowering : public TargetLowering { 386 public: 387 explicit SystemZTargetLowering(const TargetMachine &TM, 388 const SystemZSubtarget &STI); 389 390 bool useSoftFloat() const override; 391 392 // Override TargetLowering. 393 MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override { 394 return MVT::i32; 395 } 396 MVT getVectorIdxTy(const DataLayout &DL) const override { 397 // Only the lower 12 bits of an element index are used, so we don't 398 // want to clobber the upper 32 bits of a GPR unnecessarily. 399 return MVT::i32; 400 } 401 TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) 402 const override { 403 // Widen subvectors to the full width rather than promoting integer 404 // elements. This is better because: 405 // 406 // (a) it means that we can handle the ABI for passing and returning 407 // sub-128 vectors without having to handle them as legal types. 408 // 409 // (b) we don't have instructions to extend on load and truncate on store, 410 // so promoting the integers is less efficient. 411 // 412 // (c) there are no multiplication instructions for the widest integer 413 // type (v2i64). 414 if (VT.getScalarSizeInBits() % 8 == 0) 415 return TypeWidenVector; 416 return TargetLoweringBase::getPreferredVectorAction(VT); 417 } 418 unsigned 419 getNumRegisters(LLVMContext &Context, EVT VT, 420 Optional<MVT> RegisterVT) const override { 421 // i128 inline assembly operand. 422 if (VT == MVT::i128 && 423 RegisterVT.hasValue() && RegisterVT.getValue() == MVT::Untyped) 424 return 1; 425 return TargetLowering::getNumRegisters(Context, VT); 426 } 427 bool isCheapToSpeculateCtlz() const override { return true; } 428 bool preferZeroCompareBranch() const override { return true; } 429 bool hasBitPreservingFPLogic(EVT VT) const override { 430 EVT ScVT = VT.getScalarType(); 431 return ScVT == MVT::f32 || ScVT == MVT::f64 || ScVT == MVT::f128; 432 } 433 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override { 434 ConstantInt* Mask = dyn_cast<ConstantInt>(AndI.getOperand(1)); 435 return Mask && Mask->getValue().isIntN(16); 436 } 437 bool convertSetCCLogicToBitwiseLogic(EVT VT) const override { 438 return VT.isScalarInteger(); 439 } 440 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, 441 EVT) const override; 442 bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 443 EVT VT) const override; 444 bool isFPImmLegal(const APFloat &Imm, EVT VT, 445 bool ForCodeSize) const override; 446 bool ShouldShrinkFPConstant(EVT VT) const override { 447 // Do not shrink 64-bit FP constpool entries since LDEB is slower than 448 // LD, and having the full constant in memory enables reg/mem opcodes. 449 return VT != MVT::f64; 450 } 451 bool hasInlineStackProbe(MachineFunction &MF) const override; 452 bool isLegalICmpImmediate(int64_t Imm) const override; 453 bool isLegalAddImmediate(int64_t Imm) const override; 454 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, 455 unsigned AS, 456 Instruction *I = nullptr) const override; 457 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, 458 MachineMemOperand::Flags Flags, 459 bool *Fast) const override; 460 bool isTruncateFree(Type *, Type *) const override; 461 bool isTruncateFree(EVT, EVT) const override; 462 463 bool shouldFormOverflowOp(unsigned Opcode, EVT VT, 464 bool MathUsed) const override { 465 // Form add and sub with overflow intrinsics regardless of any extra 466 // users of the math result. 467 return VT == MVT::i32 || VT == MVT::i64; 468 } 469 470 const char *getTargetNodeName(unsigned Opcode) const override; 471 std::pair<unsigned, const TargetRegisterClass *> 472 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 473 StringRef Constraint, MVT VT) const override; 474 TargetLowering::ConstraintType 475 getConstraintType(StringRef Constraint) const override; 476 TargetLowering::ConstraintWeight 477 getSingleConstraintMatchWeight(AsmOperandInfo &info, 478 const char *constraint) const override; 479 void LowerAsmOperandForConstraint(SDValue Op, 480 std::string &Constraint, 481 std::vector<SDValue> &Ops, 482 SelectionDAG &DAG) const override; 483 484 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override { 485 if (ConstraintCode.size() == 1) { 486 switch(ConstraintCode[0]) { 487 default: 488 break; 489 case 'o': 490 return InlineAsm::Constraint_o; 491 case 'Q': 492 return InlineAsm::Constraint_Q; 493 case 'R': 494 return InlineAsm::Constraint_R; 495 case 'S': 496 return InlineAsm::Constraint_S; 497 case 'T': 498 return InlineAsm::Constraint_T; 499 } 500 } 501 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 502 } 503 504 Register getRegisterByName(const char *RegName, LLT VT, 505 const MachineFunction &MF) const override; 506 507 /// If a physical register, this returns the register that receives the 508 /// exception address on entry to an EH pad. 509 Register 510 getExceptionPointerRegister(const Constant *PersonalityFn) const override { 511 return SystemZ::R6D; 512 } 513 514 /// If a physical register, this returns the register that receives the 515 /// exception typeid on entry to a landing pad. 516 Register 517 getExceptionSelectorRegister(const Constant *PersonalityFn) const override { 518 return SystemZ::R7D; 519 } 520 521 /// Override to support customized stack guard loading. 522 bool useLoadStackGuardNode() const override { 523 return true; 524 } 525 void insertSSPDeclarations(Module &M) const override { 526 } 527 528 MachineBasicBlock * 529 EmitInstrWithCustomInserter(MachineInstr &MI, 530 MachineBasicBlock *BB) const override; 531 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 532 void LowerOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results, 533 SelectionDAG &DAG) const override; 534 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 535 SelectionDAG &DAG) const override; 536 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override; 537 bool allowTruncateForTailCall(Type *, Type *) const override; 538 bool mayBeEmittedAsTailCall(const CallInst *CI) const override; 539 bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, 540 SDValue Val, SDValue *Parts, 541 unsigned NumParts, MVT PartVT, 542 Optional<CallingConv::ID> CC) const override; 543 SDValue 544 joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, 545 const SDValue *Parts, unsigned NumParts, 546 MVT PartVT, EVT ValueVT, 547 Optional<CallingConv::ID> CC) const override; 548 SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, 549 bool isVarArg, 550 const SmallVectorImpl<ISD::InputArg> &Ins, 551 const SDLoc &DL, SelectionDAG &DAG, 552 SmallVectorImpl<SDValue> &InVals) const override; 553 SDValue LowerCall(CallLoweringInfo &CLI, 554 SmallVectorImpl<SDValue> &InVals) const override; 555 556 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 557 bool isVarArg, 558 const SmallVectorImpl<ISD::OutputArg> &Outs, 559 LLVMContext &Context) const override; 560 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 561 const SmallVectorImpl<ISD::OutputArg> &Outs, 562 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL, 563 SelectionDAG &DAG) const override; 564 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; 565 566 /// Determine which of the bits specified in Mask are known to be either 567 /// zero or one and return them in the KnownZero/KnownOne bitsets. 568 void computeKnownBitsForTargetNode(const SDValue Op, 569 KnownBits &Known, 570 const APInt &DemandedElts, 571 const SelectionDAG &DAG, 572 unsigned Depth = 0) const override; 573 574 /// Determine the number of bits in the operation that are sign bits. 575 unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 576 const APInt &DemandedElts, 577 const SelectionDAG &DAG, 578 unsigned Depth) const override; 579 580 ISD::NodeType getExtendForAtomicOps() const override { 581 return ISD::ANY_EXTEND; 582 } 583 ISD::NodeType getExtendForAtomicCmpSwapArg() const override { 584 return ISD::ZERO_EXTEND; 585 } 586 587 bool supportSwiftError() const override { 588 return true; 589 } 590 591 unsigned getStackProbeSize(MachineFunction &MF) const; 592 593 private: 594 const SystemZSubtarget &Subtarget; 595 596 // Implement LowerOperation for individual opcodes. 597 SDValue getVectorCmp(SelectionDAG &DAG, unsigned Opcode, 598 const SDLoc &DL, EVT VT, 599 SDValue CmpOp0, SDValue CmpOp1, SDValue Chain) const; 600 SDValue lowerVectorSETCC(SelectionDAG &DAG, const SDLoc &DL, 601 EVT VT, ISD::CondCode CC, 602 SDValue CmpOp0, SDValue CmpOp1, 603 SDValue Chain = SDValue(), 604 bool IsSignaling = false) const; 605 SDValue lowerSETCC(SDValue Op, SelectionDAG &DAG) const; 606 SDValue lowerSTRICT_FSETCC(SDValue Op, SelectionDAG &DAG, 607 bool IsSignaling) const; 608 SDValue lowerBR_CC(SDValue Op, SelectionDAG &DAG) const; 609 SDValue lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 610 SDValue lowerGlobalAddress(GlobalAddressSDNode *Node, 611 SelectionDAG &DAG) const; 612 SDValue lowerTLSGetOffset(GlobalAddressSDNode *Node, 613 SelectionDAG &DAG, unsigned Opcode, 614 SDValue GOTOffset) const; 615 SDValue lowerThreadPointer(const SDLoc &DL, SelectionDAG &DAG) const; 616 SDValue lowerGlobalTLSAddress(GlobalAddressSDNode *Node, 617 SelectionDAG &DAG) const; 618 SDValue lowerBlockAddress(BlockAddressSDNode *Node, 619 SelectionDAG &DAG) const; 620 SDValue lowerJumpTable(JumpTableSDNode *JT, SelectionDAG &DAG) const; 621 SDValue lowerConstantPool(ConstantPoolSDNode *CP, SelectionDAG &DAG) const; 622 SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 623 SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 624 SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const; 625 SDValue lowerVACOPY(SDValue Op, SelectionDAG &DAG) const; 626 SDValue lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 627 SDValue lowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const; 628 SDValue lowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const; 629 SDValue lowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const; 630 SDValue lowerSDIVREM(SDValue Op, SelectionDAG &DAG) const; 631 SDValue lowerUDIVREM(SDValue Op, SelectionDAG &DAG) const; 632 SDValue lowerXALUO(SDValue Op, SelectionDAG &DAG) const; 633 SDValue lowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) const; 634 SDValue lowerBITCAST(SDValue Op, SelectionDAG &DAG) const; 635 SDValue lowerOR(SDValue Op, SelectionDAG &DAG) const; 636 SDValue lowerCTPOP(SDValue Op, SelectionDAG &DAG) const; 637 SDValue lowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const; 638 SDValue lowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const; 639 SDValue lowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const; 640 SDValue lowerATOMIC_LOAD_OP(SDValue Op, SelectionDAG &DAG, 641 unsigned Opcode) const; 642 SDValue lowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const; 643 SDValue lowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const; 644 SDValue lowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const; 645 SDValue lowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const; 646 SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const; 647 SDValue lowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const; 648 SDValue lowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; 649 bool isVectorElementLoad(SDValue Op) const; 650 SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, 651 SmallVectorImpl<SDValue> &Elems) const; 652 SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; 653 SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; 654 SDValue lowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; 655 SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 656 SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 657 SDValue lowerSIGN_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const; 658 SDValue lowerZERO_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const; 659 SDValue lowerShift(SDValue Op, SelectionDAG &DAG, unsigned ByScalar) const; 660 661 bool canTreatAsByteVector(EVT VT) const; 662 SDValue combineExtract(const SDLoc &DL, EVT ElemVT, EVT VecVT, SDValue OrigOp, 663 unsigned Index, DAGCombinerInfo &DCI, 664 bool Force) const; 665 SDValue combineTruncateExtract(const SDLoc &DL, EVT TruncVT, SDValue Op, 666 DAGCombinerInfo &DCI) const; 667 SDValue combineZERO_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const; 668 SDValue combineSIGN_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const; 669 SDValue combineSIGN_EXTEND_INREG(SDNode *N, DAGCombinerInfo &DCI) const; 670 SDValue combineMERGE(SDNode *N, DAGCombinerInfo &DCI) const; 671 bool canLoadStoreByteSwapped(EVT VT) const; 672 SDValue combineLOAD(SDNode *N, DAGCombinerInfo &DCI) const; 673 SDValue combineSTORE(SDNode *N, DAGCombinerInfo &DCI) const; 674 SDValue combineVECTOR_SHUFFLE(SDNode *N, DAGCombinerInfo &DCI) const; 675 SDValue combineEXTRACT_VECTOR_ELT(SDNode *N, DAGCombinerInfo &DCI) const; 676 SDValue combineJOIN_DWORDS(SDNode *N, DAGCombinerInfo &DCI) const; 677 SDValue combineFP_ROUND(SDNode *N, DAGCombinerInfo &DCI) const; 678 SDValue combineFP_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const; 679 SDValue combineINT_TO_FP(SDNode *N, DAGCombinerInfo &DCI) const; 680 SDValue combineBSWAP(SDNode *N, DAGCombinerInfo &DCI) const; 681 SDValue combineBR_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const; 682 SDValue combineSELECT_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const; 683 SDValue combineGET_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const; 684 SDValue combineIntDIVREM(SDNode *N, DAGCombinerInfo &DCI) const; 685 SDValue combineINTRINSIC(SDNode *N, DAGCombinerInfo &DCI) const; 686 687 SDValue unwrapAddress(SDValue N) const override; 688 689 // If the last instruction before MBBI in MBB was some form of COMPARE, 690 // try to replace it with a COMPARE AND BRANCH just before MBBI. 691 // CCMask and Target are the BRC-like operands for the branch. 692 // Return true if the change was made. 693 bool convertPrevCompareToBranch(MachineBasicBlock *MBB, 694 MachineBasicBlock::iterator MBBI, 695 unsigned CCMask, 696 MachineBasicBlock *Target) const; 697 698 // Implement EmitInstrWithCustomInserter for individual operation types. 699 MachineBasicBlock *emitSelect(MachineInstr &MI, MachineBasicBlock *BB) const; 700 MachineBasicBlock *emitCondStore(MachineInstr &MI, MachineBasicBlock *BB, 701 unsigned StoreOpcode, unsigned STOCOpcode, 702 bool Invert) const; 703 MachineBasicBlock *emitPair128(MachineInstr &MI, 704 MachineBasicBlock *MBB) const; 705 MachineBasicBlock *emitExt128(MachineInstr &MI, MachineBasicBlock *MBB, 706 bool ClearEven) const; 707 MachineBasicBlock *emitAtomicLoadBinary(MachineInstr &MI, 708 MachineBasicBlock *BB, 709 unsigned BinOpcode, unsigned BitSize, 710 bool Invert = false) const; 711 MachineBasicBlock *emitAtomicLoadMinMax(MachineInstr &MI, 712 MachineBasicBlock *MBB, 713 unsigned CompareOpcode, 714 unsigned KeepOldMask, 715 unsigned BitSize) const; 716 MachineBasicBlock *emitAtomicCmpSwapW(MachineInstr &MI, 717 MachineBasicBlock *BB) const; 718 MachineBasicBlock *emitMemMemWrapper(MachineInstr &MI, MachineBasicBlock *BB, 719 unsigned Opcode, 720 bool IsMemset = false) const; 721 MachineBasicBlock *emitStringWrapper(MachineInstr &MI, MachineBasicBlock *BB, 722 unsigned Opcode) const; 723 MachineBasicBlock *emitTransactionBegin(MachineInstr &MI, 724 MachineBasicBlock *MBB, 725 unsigned Opcode, bool NoFloat) const; 726 MachineBasicBlock *emitLoadAndTestCmp0(MachineInstr &MI, 727 MachineBasicBlock *MBB, 728 unsigned Opcode) const; 729 MachineBasicBlock *emitProbedAlloca(MachineInstr &MI, 730 MachineBasicBlock *MBB) const; 731 732 SDValue getBackchainAddress(SDValue SP, SelectionDAG &DAG) const; 733 734 MachineMemOperand::Flags 735 getTargetMMOFlags(const Instruction &I) const override; 736 const TargetRegisterClass *getRepRegClassFor(MVT VT) const override; 737 }; 738 739 struct SystemZVectorConstantInfo { 740 private: 741 APInt IntBits; // The 128 bits as an integer. 742 APInt SplatBits; // Smallest splat value. 743 APInt SplatUndef; // Bits correspoding to undef operands of the BVN. 744 unsigned SplatBitSize = 0; 745 bool isFP128 = false; 746 747 public: 748 unsigned Opcode = 0; 749 SmallVector<unsigned, 2> OpVals; 750 MVT VecVT; 751 SystemZVectorConstantInfo(APFloat FPImm); 752 SystemZVectorConstantInfo(BuildVectorSDNode *BVN); 753 bool isVectorConstantLegal(const SystemZSubtarget &Subtarget); 754 }; 755 756 } // end namespace llvm 757 758 #endif 759