1 //===-- SystemZISelLowering.h - SystemZ DAG lowering interface --*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that SystemZ uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #ifndef LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZISELLOWERING_H 15 #define LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZISELLOWERING_H 16 17 #include "SystemZ.h" 18 #include "SystemZInstrInfo.h" 19 #include "llvm/CodeGen/MachineBasicBlock.h" 20 #include "llvm/CodeGen/SelectionDAG.h" 21 #include "llvm/CodeGen/TargetLowering.h" 22 #include <optional> 23 24 namespace llvm { 25 namespace SystemZISD { 26 enum NodeType : unsigned { 27 FIRST_NUMBER = ISD::BUILTIN_OP_END, 28 29 // Return with a glue operand. Operand 0 is the chain operand. 30 RET_GLUE, 31 32 // Calls a function. Operand 0 is the chain operand and operand 1 33 // is the target address. The arguments start at operand 2. 34 // There is an optional glue operand at the end. 35 CALL, 36 SIBCALL, 37 38 // TLS calls. Like regular calls, except operand 1 is the TLS symbol. 39 // (The call target is implicitly __tls_get_offset.) 40 TLS_GDCALL, 41 TLS_LDCALL, 42 43 // Wraps a TargetGlobalAddress that should be loaded using PC-relative 44 // accesses (LARL). Operand 0 is the address. 45 PCREL_WRAPPER, 46 47 // Used in cases where an offset is applied to a TargetGlobalAddress. 48 // Operand 0 is the full TargetGlobalAddress and operand 1 is a 49 // PCREL_WRAPPER for an anchor point. This is used so that we can 50 // cheaply refer to either the full address or the anchor point 51 // as a register base. 52 PCREL_OFFSET, 53 54 // Integer comparisons. There are three operands: the two values 55 // to compare, and an integer of type SystemZICMP. 56 ICMP, 57 58 // Floating-point comparisons. The two operands are the values to compare. 59 FCMP, 60 61 // Test under mask. The first operand is ANDed with the second operand 62 // and the condition codes are set on the result. The third operand is 63 // a boolean that is true if the condition codes need to distinguish 64 // between CCMASK_TM_MIXED_MSB_0 and CCMASK_TM_MIXED_MSB_1 (which the 65 // register forms do but the memory forms don't). 66 TM, 67 68 // Branches if a condition is true. Operand 0 is the chain operand; 69 // operand 1 is the 4-bit condition-code mask, with bit N in 70 // big-endian order meaning "branch if CC=N"; operand 2 is the 71 // target block and operand 3 is the flag operand. 72 BR_CCMASK, 73 74 // Selects between operand 0 and operand 1. Operand 2 is the 75 // mask of condition-code values for which operand 0 should be 76 // chosen over operand 1; it has the same form as BR_CCMASK. 77 // Operand 3 is the flag operand. 78 SELECT_CCMASK, 79 80 // Evaluates to the gap between the stack pointer and the 81 // base of the dynamically-allocatable area. 82 ADJDYNALLOC, 83 84 // For allocating stack space when using stack clash protector. 85 // Allocation is performed by block, and each block is probed. 86 PROBED_ALLOCA, 87 88 // Count number of bits set in operand 0 per byte. 89 POPCNT, 90 91 // Wrappers around the ISD opcodes of the same name. The output is GR128. 92 // Input operands may be GR64 or GR32, depending on the instruction. 93 SMUL_LOHI, 94 UMUL_LOHI, 95 SDIVREM, 96 UDIVREM, 97 98 // Add/subtract with overflow/carry. These have the same operands as 99 // the corresponding standard operations, except with the carry flag 100 // replaced by a condition code value. 101 SADDO, SSUBO, UADDO, USUBO, ADDCARRY, SUBCARRY, 102 103 // Set the condition code from a boolean value in operand 0. 104 // Operand 1 is a mask of all condition-code values that may result of this 105 // operation, operand 2 is a mask of condition-code values that may result 106 // if the boolean is true. 107 // Note that this operation is always optimized away, we will never 108 // generate any code for it. 109 GET_CCMASK, 110 111 // Use a series of MVCs to copy bytes from one memory location to another. 112 // The operands are: 113 // - the target address 114 // - the source address 115 // - the constant length 116 // 117 // This isn't a memory opcode because we'd need to attach two 118 // MachineMemOperands rather than one. 119 MVC, 120 121 // Similar to MVC, but for logic operations (AND, OR, XOR). 122 NC, 123 OC, 124 XC, 125 126 // Use CLC to compare two blocks of memory, with the same comments 127 // as for MVC. 128 CLC, 129 130 // Use MVC to set a block of memory after storing the first byte. 131 MEMSET_MVC, 132 133 // Use an MVST-based sequence to implement stpcpy(). 134 STPCPY, 135 136 // Use a CLST-based sequence to implement strcmp(). The two input operands 137 // are the addresses of the strings to compare. 138 STRCMP, 139 140 // Use an SRST-based sequence to search a block of memory. The first 141 // operand is the end address, the second is the start, and the third 142 // is the character to search for. CC is set to 1 on success and 2 143 // on failure. 144 SEARCH_STRING, 145 146 // Store the CC value in bits 29 and 28 of an integer. 147 IPM, 148 149 // Transaction begin. The first operand is the chain, the second 150 // the TDB pointer, and the third the immediate control field. 151 // Returns CC value and chain. 152 TBEGIN, 153 TBEGIN_NOFLOAT, 154 155 // Transaction end. Just the chain operand. Returns CC value and chain. 156 TEND, 157 158 // Create a vector constant by filling byte N of the result with bit 159 // 15-N of the single operand. 160 BYTE_MASK, 161 162 // Create a vector constant by replicating an element-sized RISBG-style mask. 163 // The first operand specifies the starting set bit and the second operand 164 // specifies the ending set bit. Both operands count from the MSB of the 165 // element. 166 ROTATE_MASK, 167 168 // Replicate a GPR scalar value into all elements of a vector. 169 REPLICATE, 170 171 // Create a vector from two i64 GPRs. 172 JOIN_DWORDS, 173 174 // Replicate one element of a vector into all elements. The first operand 175 // is the vector and the second is the index of the element to replicate. 176 SPLAT, 177 178 // Interleave elements from the high half of operand 0 and the high half 179 // of operand 1. 180 MERGE_HIGH, 181 182 // Likewise for the low halves. 183 MERGE_LOW, 184 185 // Concatenate the vectors in the first two operands, shift them left 186 // by the third operand, and take the first half of the result. 187 SHL_DOUBLE, 188 189 // Take one element of the first v2i64 operand and the one element of 190 // the second v2i64 operand and concatenate them to form a v2i64 result. 191 // The third operand is a 4-bit value of the form 0A0B, where A and B 192 // are the element selectors for the first operand and second operands 193 // respectively. 194 PERMUTE_DWORDS, 195 196 // Perform a general vector permute on vector operands 0 and 1. 197 // Each byte of operand 2 controls the corresponding byte of the result, 198 // in the same way as a byte-level VECTOR_SHUFFLE mask. 199 PERMUTE, 200 201 // Pack vector operands 0 and 1 into a single vector with half-sized elements. 202 PACK, 203 204 // Likewise, but saturate the result and set CC. PACKS_CC does signed 205 // saturation and PACKLS_CC does unsigned saturation. 206 PACKS_CC, 207 PACKLS_CC, 208 209 // Unpack the first half of vector operand 0 into double-sized elements. 210 // UNPACK_HIGH sign-extends and UNPACKL_HIGH zero-extends. 211 UNPACK_HIGH, 212 UNPACKL_HIGH, 213 214 // Likewise for the second half. 215 UNPACK_LOW, 216 UNPACKL_LOW, 217 218 // Shift each element of vector operand 0 by the number of bits specified 219 // by scalar operand 1. 220 VSHL_BY_SCALAR, 221 VSRL_BY_SCALAR, 222 VSRA_BY_SCALAR, 223 224 // For each element of the output type, sum across all sub-elements of 225 // operand 0 belonging to the corresponding element, and add in the 226 // rightmost sub-element of the corresponding element of operand 1. 227 VSUM, 228 229 // Compare integer vector operands 0 and 1 to produce the usual 0/-1 230 // vector result. VICMPE is for equality, VICMPH for "signed greater than" 231 // and VICMPHL for "unsigned greater than". 232 VICMPE, 233 VICMPH, 234 VICMPHL, 235 236 // Likewise, but also set the condition codes on the result. 237 VICMPES, 238 VICMPHS, 239 VICMPHLS, 240 241 // Compare floating-point vector operands 0 and 1 to produce the usual 0/-1 242 // vector result. VFCMPE is for "ordered and equal", VFCMPH for "ordered and 243 // greater than" and VFCMPHE for "ordered and greater than or equal to". 244 VFCMPE, 245 VFCMPH, 246 VFCMPHE, 247 248 // Likewise, but also set the condition codes on the result. 249 VFCMPES, 250 VFCMPHS, 251 VFCMPHES, 252 253 // Test floating-point data class for vectors. 254 VFTCI, 255 256 // Extend the even f32 elements of vector operand 0 to produce a vector 257 // of f64 elements. 258 VEXTEND, 259 260 // Round the f64 elements of vector operand 0 to f32s and store them in the 261 // even elements of the result. 262 VROUND, 263 264 // AND the two vector operands together and set CC based on the result. 265 VTM, 266 267 // String operations that set CC as a side-effect. 268 VFAE_CC, 269 VFAEZ_CC, 270 VFEE_CC, 271 VFEEZ_CC, 272 VFENE_CC, 273 VFENEZ_CC, 274 VISTR_CC, 275 VSTRC_CC, 276 VSTRCZ_CC, 277 VSTRS_CC, 278 VSTRSZ_CC, 279 280 // Test Data Class. 281 // 282 // Operand 0: the value to test 283 // Operand 1: the bit mask 284 TDC, 285 286 // z/OS XPLINK ADA Entry 287 // Wraps a TargetGlobalAddress that should be loaded from a function's 288 // AssociatedData Area (ADA). Tha ADA is passed to the function by the 289 // caller in the XPLink ABI defined register R5. 290 // Operand 0: the GlobalValue/External Symbol 291 // Operand 1: the ADA register 292 // Operand 2: the offset (0 for the first and 8 for the second element in the 293 // function descriptor) 294 ADA_ENTRY, 295 296 // Strict variants of scalar floating-point comparisons. 297 // Quiet and signaling versions. 298 STRICT_FCMP = ISD::FIRST_TARGET_STRICTFP_OPCODE, 299 STRICT_FCMPS, 300 301 // Strict variants of vector floating-point comparisons. 302 // Quiet and signaling versions. 303 STRICT_VFCMPE, 304 STRICT_VFCMPH, 305 STRICT_VFCMPHE, 306 STRICT_VFCMPES, 307 STRICT_VFCMPHS, 308 STRICT_VFCMPHES, 309 310 // Strict variants of VEXTEND and VROUND. 311 STRICT_VEXTEND, 312 STRICT_VROUND, 313 314 // Wrappers around the inner loop of an 8- or 16-bit ATOMIC_SWAP or 315 // ATOMIC_LOAD_<op>. 316 // 317 // Operand 0: the address of the containing 32-bit-aligned field 318 // Operand 1: the second operand of <op>, in the high bits of an i32 319 // for everything except ATOMIC_SWAPW 320 // Operand 2: how many bits to rotate the i32 left to bring the first 321 // operand into the high bits 322 // Operand 3: the negative of operand 2, for rotating the other way 323 // Operand 4: the width of the field in bits (8 or 16) 324 ATOMIC_SWAPW = ISD::FIRST_TARGET_MEMORY_OPCODE, 325 ATOMIC_LOADW_ADD, 326 ATOMIC_LOADW_SUB, 327 ATOMIC_LOADW_AND, 328 ATOMIC_LOADW_OR, 329 ATOMIC_LOADW_XOR, 330 ATOMIC_LOADW_NAND, 331 ATOMIC_LOADW_MIN, 332 ATOMIC_LOADW_MAX, 333 ATOMIC_LOADW_UMIN, 334 ATOMIC_LOADW_UMAX, 335 336 // A wrapper around the inner loop of an ATOMIC_CMP_SWAP. 337 // 338 // Operand 0: the address of the containing 32-bit-aligned field 339 // Operand 1: the compare value, in the low bits of an i32 340 // Operand 2: the swap value, in the low bits of an i32 341 // Operand 3: how many bits to rotate the i32 left to bring the first 342 // operand into the high bits 343 // Operand 4: the negative of operand 2, for rotating the other way 344 // Operand 5: the width of the field in bits (8 or 16) 345 ATOMIC_CMP_SWAPW, 346 347 // Atomic compare-and-swap returning CC value. 348 // Val, CC, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) 349 ATOMIC_CMP_SWAP, 350 351 // 128-bit atomic load. 352 // Val, OUTCHAIN = ATOMIC_LOAD_128(INCHAIN, ptr) 353 ATOMIC_LOAD_128, 354 355 // 128-bit atomic store. 356 // OUTCHAIN = ATOMIC_STORE_128(INCHAIN, val, ptr) 357 ATOMIC_STORE_128, 358 359 // 128-bit atomic compare-and-swap. 360 // Val, CC, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) 361 ATOMIC_CMP_SWAP_128, 362 363 // Byte swapping load/store. Same operands as regular load/store. 364 LRV, STRV, 365 366 // Element swapping load/store. Same operands as regular load/store. 367 VLER, VSTER, 368 369 // Prefetch from the second operand using the 4-bit control code in 370 // the first operand. The code is 1 for a load prefetch and 2 for 371 // a store prefetch. 372 PREFETCH 373 }; 374 375 // Return true if OPCODE is some kind of PC-relative address. 376 inline bool isPCREL(unsigned Opcode) { 377 return Opcode == PCREL_WRAPPER || Opcode == PCREL_OFFSET; 378 } 379 } // end namespace SystemZISD 380 381 namespace SystemZICMP { 382 // Describes whether an integer comparison needs to be signed or unsigned, 383 // or whether either type is OK. 384 enum { 385 Any, 386 UnsignedOnly, 387 SignedOnly 388 }; 389 } // end namespace SystemZICMP 390 391 class SystemZSubtarget; 392 393 class SystemZTargetLowering : public TargetLowering { 394 public: 395 explicit SystemZTargetLowering(const TargetMachine &TM, 396 const SystemZSubtarget &STI); 397 398 bool useSoftFloat() const override; 399 400 // Override TargetLowering. 401 MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override { 402 return MVT::i32; 403 } 404 MVT getVectorIdxTy(const DataLayout &DL) const override { 405 // Only the lower 12 bits of an element index are used, so we don't 406 // want to clobber the upper 32 bits of a GPR unnecessarily. 407 return MVT::i32; 408 } 409 TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) 410 const override { 411 // Widen subvectors to the full width rather than promoting integer 412 // elements. This is better because: 413 // 414 // (a) it means that we can handle the ABI for passing and returning 415 // sub-128 vectors without having to handle them as legal types. 416 // 417 // (b) we don't have instructions to extend on load and truncate on store, 418 // so promoting the integers is less efficient. 419 // 420 // (c) there are no multiplication instructions for the widest integer 421 // type (v2i64). 422 if (VT.getScalarSizeInBits() % 8 == 0) 423 return TypeWidenVector; 424 return TargetLoweringBase::getPreferredVectorAction(VT); 425 } 426 unsigned 427 getNumRegisters(LLVMContext &Context, EVT VT, 428 std::optional<MVT> RegisterVT) const override { 429 // i128 inline assembly operand. 430 if (VT == MVT::i128 && RegisterVT && *RegisterVT == MVT::Untyped) 431 return 1; 432 return TargetLowering::getNumRegisters(Context, VT); 433 } 434 bool isCheapToSpeculateCtlz(Type *) const override { return true; } 435 bool preferZeroCompareBranch() const override { return true; } 436 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override { 437 ConstantInt* Mask = dyn_cast<ConstantInt>(AndI.getOperand(1)); 438 return Mask && Mask->getValue().isIntN(16); 439 } 440 bool convertSetCCLogicToBitwiseLogic(EVT VT) const override { 441 return VT.isScalarInteger(); 442 } 443 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, 444 EVT) const override; 445 bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 446 EVT VT) const override; 447 bool isFPImmLegal(const APFloat &Imm, EVT VT, 448 bool ForCodeSize) const override; 449 bool ShouldShrinkFPConstant(EVT VT) const override { 450 // Do not shrink 64-bit FP constpool entries since LDEB is slower than 451 // LD, and having the full constant in memory enables reg/mem opcodes. 452 return VT != MVT::f64; 453 } 454 bool hasInlineStackProbe(const MachineFunction &MF) const override; 455 bool isLegalICmpImmediate(int64_t Imm) const override; 456 bool isLegalAddImmediate(int64_t Imm) const override; 457 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, 458 unsigned AS, 459 Instruction *I = nullptr) const override; 460 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, 461 MachineMemOperand::Flags Flags, 462 unsigned *Fast) const override; 463 bool 464 findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit, 465 const MemOp &Op, unsigned DstAS, unsigned SrcAS, 466 const AttributeList &FuncAttributes) const override; 467 EVT getOptimalMemOpType(const MemOp &Op, 468 const AttributeList &FuncAttributes) const override; 469 bool isTruncateFree(Type *, Type *) const override; 470 bool isTruncateFree(EVT, EVT) const override; 471 472 bool shouldFormOverflowOp(unsigned Opcode, EVT VT, 473 bool MathUsed) const override { 474 // Form add and sub with overflow intrinsics regardless of any extra 475 // users of the math result. 476 return VT == MVT::i32 || VT == MVT::i64; 477 } 478 479 bool shouldConsiderGEPOffsetSplit() const override { return true; } 480 481 const char *getTargetNodeName(unsigned Opcode) const override; 482 std::pair<unsigned, const TargetRegisterClass *> 483 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 484 StringRef Constraint, MVT VT) const override; 485 TargetLowering::ConstraintType 486 getConstraintType(StringRef Constraint) const override; 487 TargetLowering::ConstraintWeight 488 getSingleConstraintMatchWeight(AsmOperandInfo &info, 489 const char *constraint) const override; 490 void LowerAsmOperandForConstraint(SDValue Op, 491 std::string &Constraint, 492 std::vector<SDValue> &Ops, 493 SelectionDAG &DAG) const override; 494 495 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override { 496 if (ConstraintCode.size() == 1) { 497 switch(ConstraintCode[0]) { 498 default: 499 break; 500 case 'o': 501 return InlineAsm::Constraint_o; 502 case 'Q': 503 return InlineAsm::Constraint_Q; 504 case 'R': 505 return InlineAsm::Constraint_R; 506 case 'S': 507 return InlineAsm::Constraint_S; 508 case 'T': 509 return InlineAsm::Constraint_T; 510 } 511 } else if (ConstraintCode.size() == 2 && ConstraintCode[0] == 'Z') { 512 switch (ConstraintCode[1]) { 513 default: 514 break; 515 case 'Q': 516 return InlineAsm::Constraint_ZQ; 517 case 'R': 518 return InlineAsm::Constraint_ZR; 519 case 'S': 520 return InlineAsm::Constraint_ZS; 521 case 'T': 522 return InlineAsm::Constraint_ZT; 523 } 524 } 525 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 526 } 527 528 Register getRegisterByName(const char *RegName, LLT VT, 529 const MachineFunction &MF) const override; 530 531 /// If a physical register, this returns the register that receives the 532 /// exception address on entry to an EH pad. 533 Register 534 getExceptionPointerRegister(const Constant *PersonalityFn) const override { 535 return SystemZ::R6D; 536 } 537 538 /// If a physical register, this returns the register that receives the 539 /// exception typeid on entry to a landing pad. 540 Register 541 getExceptionSelectorRegister(const Constant *PersonalityFn) const override { 542 return SystemZ::R7D; 543 } 544 545 /// Override to support customized stack guard loading. 546 bool useLoadStackGuardNode() const override { 547 return true; 548 } 549 void insertSSPDeclarations(Module &M) const override { 550 } 551 552 MachineBasicBlock * 553 EmitInstrWithCustomInserter(MachineInstr &MI, 554 MachineBasicBlock *BB) const override; 555 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 556 void LowerOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results, 557 SelectionDAG &DAG) const override; 558 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 559 SelectionDAG &DAG) const override; 560 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override; 561 bool allowTruncateForTailCall(Type *, Type *) const override; 562 bool mayBeEmittedAsTailCall(const CallInst *CI) const override; 563 bool splitValueIntoRegisterParts( 564 SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, 565 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) 566 const override; 567 SDValue joinRegisterPartsIntoValue( 568 SelectionDAG & DAG, const SDLoc &DL, const SDValue *Parts, 569 unsigned NumParts, MVT PartVT, EVT ValueVT, 570 std::optional<CallingConv::ID> CC) const override; 571 SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, 572 bool isVarArg, 573 const SmallVectorImpl<ISD::InputArg> &Ins, 574 const SDLoc &DL, SelectionDAG &DAG, 575 SmallVectorImpl<SDValue> &InVals) const override; 576 SDValue LowerCall(CallLoweringInfo &CLI, 577 SmallVectorImpl<SDValue> &InVals) const override; 578 579 std::pair<SDValue, SDValue> 580 makeExternalCall(SDValue Chain, SelectionDAG &DAG, const char *CalleeName, 581 EVT RetVT, ArrayRef<SDValue> Ops, CallingConv::ID CallConv, 582 bool IsSigned, SDLoc DL, bool DoesNotReturn, 583 bool IsReturnValueUsed) const; 584 585 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 586 bool isVarArg, 587 const SmallVectorImpl<ISD::OutputArg> &Outs, 588 LLVMContext &Context) const override; 589 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 590 const SmallVectorImpl<ISD::OutputArg> &Outs, 591 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL, 592 SelectionDAG &DAG) const override; 593 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; 594 595 /// Determine which of the bits specified in Mask are known to be either 596 /// zero or one and return them in the KnownZero/KnownOne bitsets. 597 void computeKnownBitsForTargetNode(const SDValue Op, 598 KnownBits &Known, 599 const APInt &DemandedElts, 600 const SelectionDAG &DAG, 601 unsigned Depth = 0) const override; 602 603 /// Determine the number of bits in the operation that are sign bits. 604 unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 605 const APInt &DemandedElts, 606 const SelectionDAG &DAG, 607 unsigned Depth) const override; 608 609 bool isGuaranteedNotToBeUndefOrPoisonForTargetNode( 610 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 611 bool PoisonOnly, unsigned Depth) const override; 612 613 ISD::NodeType getExtendForAtomicOps() const override { 614 return ISD::ANY_EXTEND; 615 } 616 ISD::NodeType getExtendForAtomicCmpSwapArg() const override { 617 return ISD::ZERO_EXTEND; 618 } 619 620 bool supportSwiftError() const override { 621 return true; 622 } 623 624 unsigned getStackProbeSize(const MachineFunction &MF) const; 625 626 private: 627 const SystemZSubtarget &Subtarget; 628 629 // Implement LowerOperation for individual opcodes. 630 SDValue getVectorCmp(SelectionDAG &DAG, unsigned Opcode, 631 const SDLoc &DL, EVT VT, 632 SDValue CmpOp0, SDValue CmpOp1, SDValue Chain) const; 633 SDValue lowerVectorSETCC(SelectionDAG &DAG, const SDLoc &DL, 634 EVT VT, ISD::CondCode CC, 635 SDValue CmpOp0, SDValue CmpOp1, 636 SDValue Chain = SDValue(), 637 bool IsSignaling = false) const; 638 SDValue lowerSETCC(SDValue Op, SelectionDAG &DAG) const; 639 SDValue lowerSTRICT_FSETCC(SDValue Op, SelectionDAG &DAG, 640 bool IsSignaling) const; 641 SDValue lowerBR_CC(SDValue Op, SelectionDAG &DAG) const; 642 SDValue lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 643 SDValue lowerGlobalAddress(GlobalAddressSDNode *Node, 644 SelectionDAG &DAG) const; 645 SDValue lowerTLSGetOffset(GlobalAddressSDNode *Node, 646 SelectionDAG &DAG, unsigned Opcode, 647 SDValue GOTOffset) const; 648 SDValue lowerThreadPointer(const SDLoc &DL, SelectionDAG &DAG) const; 649 SDValue lowerGlobalTLSAddress(GlobalAddressSDNode *Node, 650 SelectionDAG &DAG) const; 651 SDValue lowerBlockAddress(BlockAddressSDNode *Node, 652 SelectionDAG &DAG) const; 653 SDValue lowerJumpTable(JumpTableSDNode *JT, SelectionDAG &DAG) const; 654 SDValue lowerConstantPool(ConstantPoolSDNode *CP, SelectionDAG &DAG) const; 655 SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 656 SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 657 SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const; 658 SDValue lowerVASTART_ELF(SDValue Op, SelectionDAG &DAG) const; 659 SDValue lowerVASTART_XPLINK(SDValue Op, SelectionDAG &DAG) const; 660 SDValue lowerVACOPY(SDValue Op, SelectionDAG &DAG) const; 661 SDValue lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 662 SDValue lowerDYNAMIC_STACKALLOC_ELF(SDValue Op, SelectionDAG &DAG) const; 663 SDValue lowerDYNAMIC_STACKALLOC_XPLINK(SDValue Op, SelectionDAG &DAG) const; 664 SDValue lowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const; 665 SDValue lowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const; 666 SDValue lowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const; 667 SDValue lowerSDIVREM(SDValue Op, SelectionDAG &DAG) const; 668 SDValue lowerUDIVREM(SDValue Op, SelectionDAG &DAG) const; 669 SDValue lowerXALUO(SDValue Op, SelectionDAG &DAG) const; 670 SDValue lowerUADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) const; 671 SDValue lowerBITCAST(SDValue Op, SelectionDAG &DAG) const; 672 SDValue lowerOR(SDValue Op, SelectionDAG &DAG) const; 673 SDValue lowerCTPOP(SDValue Op, SelectionDAG &DAG) const; 674 SDValue lowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const; 675 SDValue lowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const; 676 SDValue lowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const; 677 SDValue lowerATOMIC_LOAD_OP(SDValue Op, SelectionDAG &DAG, 678 unsigned Opcode) const; 679 SDValue lowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const; 680 SDValue lowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const; 681 SDValue lowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const; 682 SDValue lowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const; 683 SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const; 684 SDValue lowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const; 685 SDValue lowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; 686 bool isVectorElementLoad(SDValue Op) const; 687 SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, 688 SmallVectorImpl<SDValue> &Elems) const; 689 SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; 690 SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; 691 SDValue lowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; 692 SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 693 SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 694 SDValue lowerSIGN_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const; 695 SDValue lowerZERO_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const; 696 SDValue lowerShift(SDValue Op, SelectionDAG &DAG, unsigned ByScalar) const; 697 SDValue lowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG) const; 698 SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; 699 700 bool canTreatAsByteVector(EVT VT) const; 701 SDValue combineExtract(const SDLoc &DL, EVT ElemVT, EVT VecVT, SDValue OrigOp, 702 unsigned Index, DAGCombinerInfo &DCI, 703 bool Force) const; 704 SDValue combineTruncateExtract(const SDLoc &DL, EVT TruncVT, SDValue Op, 705 DAGCombinerInfo &DCI) const; 706 SDValue combineZERO_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const; 707 SDValue combineSIGN_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const; 708 SDValue combineSIGN_EXTEND_INREG(SDNode *N, DAGCombinerInfo &DCI) const; 709 SDValue combineMERGE(SDNode *N, DAGCombinerInfo &DCI) const; 710 bool canLoadStoreByteSwapped(EVT VT) const; 711 SDValue combineLOAD(SDNode *N, DAGCombinerInfo &DCI) const; 712 SDValue combineSTORE(SDNode *N, DAGCombinerInfo &DCI) const; 713 SDValue combineVECTOR_SHUFFLE(SDNode *N, DAGCombinerInfo &DCI) const; 714 SDValue combineEXTRACT_VECTOR_ELT(SDNode *N, DAGCombinerInfo &DCI) const; 715 SDValue combineJOIN_DWORDS(SDNode *N, DAGCombinerInfo &DCI) const; 716 SDValue combineFP_ROUND(SDNode *N, DAGCombinerInfo &DCI) const; 717 SDValue combineFP_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const; 718 SDValue combineINT_TO_FP(SDNode *N, DAGCombinerInfo &DCI) const; 719 SDValue combineBSWAP(SDNode *N, DAGCombinerInfo &DCI) const; 720 SDValue combineBR_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const; 721 SDValue combineSELECT_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const; 722 SDValue combineGET_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const; 723 SDValue combineIntDIVREM(SDNode *N, DAGCombinerInfo &DCI) const; 724 SDValue combineINTRINSIC(SDNode *N, DAGCombinerInfo &DCI) const; 725 726 SDValue unwrapAddress(SDValue N) const override; 727 728 // If the last instruction before MBBI in MBB was some form of COMPARE, 729 // try to replace it with a COMPARE AND BRANCH just before MBBI. 730 // CCMask and Target are the BRC-like operands for the branch. 731 // Return true if the change was made. 732 bool convertPrevCompareToBranch(MachineBasicBlock *MBB, 733 MachineBasicBlock::iterator MBBI, 734 unsigned CCMask, 735 MachineBasicBlock *Target) const; 736 737 // Implement EmitInstrWithCustomInserter for individual operation types. 738 MachineBasicBlock *emitSelect(MachineInstr &MI, MachineBasicBlock *BB) const; 739 MachineBasicBlock *emitCondStore(MachineInstr &MI, MachineBasicBlock *BB, 740 unsigned StoreOpcode, unsigned STOCOpcode, 741 bool Invert) const; 742 MachineBasicBlock *emitPair128(MachineInstr &MI, 743 MachineBasicBlock *MBB) const; 744 MachineBasicBlock *emitExt128(MachineInstr &MI, MachineBasicBlock *MBB, 745 bool ClearEven) const; 746 MachineBasicBlock *emitAtomicLoadBinary(MachineInstr &MI, 747 MachineBasicBlock *BB, 748 unsigned BinOpcode, unsigned BitSize, 749 bool Invert = false) const; 750 MachineBasicBlock *emitAtomicLoadMinMax(MachineInstr &MI, 751 MachineBasicBlock *MBB, 752 unsigned CompareOpcode, 753 unsigned KeepOldMask, 754 unsigned BitSize) const; 755 MachineBasicBlock *emitAtomicCmpSwapW(MachineInstr &MI, 756 MachineBasicBlock *BB) const; 757 MachineBasicBlock *emitMemMemWrapper(MachineInstr &MI, MachineBasicBlock *BB, 758 unsigned Opcode, 759 bool IsMemset = false) const; 760 MachineBasicBlock *emitStringWrapper(MachineInstr &MI, MachineBasicBlock *BB, 761 unsigned Opcode) const; 762 MachineBasicBlock *emitTransactionBegin(MachineInstr &MI, 763 MachineBasicBlock *MBB, 764 unsigned Opcode, bool NoFloat) const; 765 MachineBasicBlock *emitLoadAndTestCmp0(MachineInstr &MI, 766 MachineBasicBlock *MBB, 767 unsigned Opcode) const; 768 MachineBasicBlock *emitProbedAlloca(MachineInstr &MI, 769 MachineBasicBlock *MBB) const; 770 771 SDValue getBackchainAddress(SDValue SP, SelectionDAG &DAG) const; 772 773 MachineMemOperand::Flags 774 getTargetMMOFlags(const Instruction &I) const override; 775 const TargetRegisterClass *getRepRegClassFor(MVT VT) const override; 776 }; 777 778 struct SystemZVectorConstantInfo { 779 private: 780 APInt IntBits; // The 128 bits as an integer. 781 APInt SplatBits; // Smallest splat value. 782 APInt SplatUndef; // Bits correspoding to undef operands of the BVN. 783 unsigned SplatBitSize = 0; 784 bool isFP128 = false; 785 public: 786 unsigned Opcode = 0; 787 SmallVector<unsigned, 2> OpVals; 788 MVT VecVT; 789 SystemZVectorConstantInfo(APInt IntImm); 790 SystemZVectorConstantInfo(APFloat FPImm) 791 : SystemZVectorConstantInfo(FPImm.bitcastToAPInt()) { 792 isFP128 = (&FPImm.getSemantics() == &APFloat::IEEEquad()); 793 } 794 SystemZVectorConstantInfo(BuildVectorSDNode *BVN); 795 bool isVectorConstantLegal(const SystemZSubtarget &Subtarget); 796 }; 797 798 } // end namespace llvm 799 800 #endif 801