1 //===-- SystemZISelLowering.h - SystemZ DAG lowering interface --*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that SystemZ uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #ifndef LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZISELLOWERING_H 15 #define LLVM_LIB_TARGET_SYSTEMZ_SYSTEMZISELLOWERING_H 16 17 #include "SystemZ.h" 18 #include "SystemZInstrInfo.h" 19 #include "llvm/CodeGen/MachineBasicBlock.h" 20 #include "llvm/CodeGen/SelectionDAG.h" 21 #include "llvm/CodeGen/TargetLowering.h" 22 #include <optional> 23 24 namespace llvm { 25 namespace SystemZISD { 26 enum NodeType : unsigned { 27 FIRST_NUMBER = ISD::BUILTIN_OP_END, 28 29 // Return with a flag operand. Operand 0 is the chain operand. 30 RET_FLAG, 31 32 // Calls a function. Operand 0 is the chain operand and operand 1 33 // is the target address. The arguments start at operand 2. 34 // There is an optional glue operand at the end. 35 CALL, 36 SIBCALL, 37 38 // TLS calls. Like regular calls, except operand 1 is the TLS symbol. 39 // (The call target is implicitly __tls_get_offset.) 40 TLS_GDCALL, 41 TLS_LDCALL, 42 43 // Wraps a TargetGlobalAddress that should be loaded using PC-relative 44 // accesses (LARL). Operand 0 is the address. 45 PCREL_WRAPPER, 46 47 // Used in cases where an offset is applied to a TargetGlobalAddress. 48 // Operand 0 is the full TargetGlobalAddress and operand 1 is a 49 // PCREL_WRAPPER for an anchor point. This is used so that we can 50 // cheaply refer to either the full address or the anchor point 51 // as a register base. 52 PCREL_OFFSET, 53 54 // Integer comparisons. There are three operands: the two values 55 // to compare, and an integer of type SystemZICMP. 56 ICMP, 57 58 // Floating-point comparisons. The two operands are the values to compare. 59 FCMP, 60 61 // Test under mask. The first operand is ANDed with the second operand 62 // and the condition codes are set on the result. The third operand is 63 // a boolean that is true if the condition codes need to distinguish 64 // between CCMASK_TM_MIXED_MSB_0 and CCMASK_TM_MIXED_MSB_1 (which the 65 // register forms do but the memory forms don't). 66 TM, 67 68 // Branches if a condition is true. Operand 0 is the chain operand; 69 // operand 1 is the 4-bit condition-code mask, with bit N in 70 // big-endian order meaning "branch if CC=N"; operand 2 is the 71 // target block and operand 3 is the flag operand. 72 BR_CCMASK, 73 74 // Selects between operand 0 and operand 1. Operand 2 is the 75 // mask of condition-code values for which operand 0 should be 76 // chosen over operand 1; it has the same form as BR_CCMASK. 77 // Operand 3 is the flag operand. 78 SELECT_CCMASK, 79 80 // Evaluates to the gap between the stack pointer and the 81 // base of the dynamically-allocatable area. 82 ADJDYNALLOC, 83 84 // For allocating stack space when using stack clash protector. 85 // Allocation is performed by block, and each block is probed. 86 PROBED_ALLOCA, 87 88 // Count number of bits set in operand 0 per byte. 89 POPCNT, 90 91 // Wrappers around the ISD opcodes of the same name. The output is GR128. 92 // Input operands may be GR64 or GR32, depending on the instruction. 93 SMUL_LOHI, 94 UMUL_LOHI, 95 SDIVREM, 96 UDIVREM, 97 98 // Add/subtract with overflow/carry. These have the same operands as 99 // the corresponding standard operations, except with the carry flag 100 // replaced by a condition code value. 101 SADDO, SSUBO, UADDO, USUBO, ADDCARRY, SUBCARRY, 102 103 // Set the condition code from a boolean value in operand 0. 104 // Operand 1 is a mask of all condition-code values that may result of this 105 // operation, operand 2 is a mask of condition-code values that may result 106 // if the boolean is true. 107 // Note that this operation is always optimized away, we will never 108 // generate any code for it. 109 GET_CCMASK, 110 111 // Use a series of MVCs to copy bytes from one memory location to another. 112 // The operands are: 113 // - the target address 114 // - the source address 115 // - the constant length 116 // 117 // This isn't a memory opcode because we'd need to attach two 118 // MachineMemOperands rather than one. 119 MVC, 120 121 // Similar to MVC, but for logic operations (AND, OR, XOR). 122 NC, 123 OC, 124 XC, 125 126 // Use CLC to compare two blocks of memory, with the same comments 127 // as for MVC. 128 CLC, 129 130 // Use MVC to set a block of memory after storing the first byte. 131 MEMSET_MVC, 132 133 // Use an MVST-based sequence to implement stpcpy(). 134 STPCPY, 135 136 // Use a CLST-based sequence to implement strcmp(). The two input operands 137 // are the addresses of the strings to compare. 138 STRCMP, 139 140 // Use an SRST-based sequence to search a block of memory. The first 141 // operand is the end address, the second is the start, and the third 142 // is the character to search for. CC is set to 1 on success and 2 143 // on failure. 144 SEARCH_STRING, 145 146 // Store the CC value in bits 29 and 28 of an integer. 147 IPM, 148 149 // Transaction begin. The first operand is the chain, the second 150 // the TDB pointer, and the third the immediate control field. 151 // Returns CC value and chain. 152 TBEGIN, 153 TBEGIN_NOFLOAT, 154 155 // Transaction end. Just the chain operand. Returns CC value and chain. 156 TEND, 157 158 // Create a vector constant by filling byte N of the result with bit 159 // 15-N of the single operand. 160 BYTE_MASK, 161 162 // Create a vector constant by replicating an element-sized RISBG-style mask. 163 // The first operand specifies the starting set bit and the second operand 164 // specifies the ending set bit. Both operands count from the MSB of the 165 // element. 166 ROTATE_MASK, 167 168 // Replicate a GPR scalar value into all elements of a vector. 169 REPLICATE, 170 171 // Create a vector from two i64 GPRs. 172 JOIN_DWORDS, 173 174 // Replicate one element of a vector into all elements. The first operand 175 // is the vector and the second is the index of the element to replicate. 176 SPLAT, 177 178 // Interleave elements from the high half of operand 0 and the high half 179 // of operand 1. 180 MERGE_HIGH, 181 182 // Likewise for the low halves. 183 MERGE_LOW, 184 185 // Concatenate the vectors in the first two operands, shift them left 186 // by the third operand, and take the first half of the result. 187 SHL_DOUBLE, 188 189 // Take one element of the first v2i64 operand and the one element of 190 // the second v2i64 operand and concatenate them to form a v2i64 result. 191 // The third operand is a 4-bit value of the form 0A0B, where A and B 192 // are the element selectors for the first operand and second operands 193 // respectively. 194 PERMUTE_DWORDS, 195 196 // Perform a general vector permute on vector operands 0 and 1. 197 // Each byte of operand 2 controls the corresponding byte of the result, 198 // in the same way as a byte-level VECTOR_SHUFFLE mask. 199 PERMUTE, 200 201 // Pack vector operands 0 and 1 into a single vector with half-sized elements. 202 PACK, 203 204 // Likewise, but saturate the result and set CC. PACKS_CC does signed 205 // saturation and PACKLS_CC does unsigned saturation. 206 PACKS_CC, 207 PACKLS_CC, 208 209 // Unpack the first half of vector operand 0 into double-sized elements. 210 // UNPACK_HIGH sign-extends and UNPACKL_HIGH zero-extends. 211 UNPACK_HIGH, 212 UNPACKL_HIGH, 213 214 // Likewise for the second half. 215 UNPACK_LOW, 216 UNPACKL_LOW, 217 218 // Shift each element of vector operand 0 by the number of bits specified 219 // by scalar operand 1. 220 VSHL_BY_SCALAR, 221 VSRL_BY_SCALAR, 222 VSRA_BY_SCALAR, 223 224 // For each element of the output type, sum across all sub-elements of 225 // operand 0 belonging to the corresponding element, and add in the 226 // rightmost sub-element of the corresponding element of operand 1. 227 VSUM, 228 229 // Compare integer vector operands 0 and 1 to produce the usual 0/-1 230 // vector result. VICMPE is for equality, VICMPH for "signed greater than" 231 // and VICMPHL for "unsigned greater than". 232 VICMPE, 233 VICMPH, 234 VICMPHL, 235 236 // Likewise, but also set the condition codes on the result. 237 VICMPES, 238 VICMPHS, 239 VICMPHLS, 240 241 // Compare floating-point vector operands 0 and 1 to produce the usual 0/-1 242 // vector result. VFCMPE is for "ordered and equal", VFCMPH for "ordered and 243 // greater than" and VFCMPHE for "ordered and greater than or equal to". 244 VFCMPE, 245 VFCMPH, 246 VFCMPHE, 247 248 // Likewise, but also set the condition codes on the result. 249 VFCMPES, 250 VFCMPHS, 251 VFCMPHES, 252 253 // Test floating-point data class for vectors. 254 VFTCI, 255 256 // Extend the even f32 elements of vector operand 0 to produce a vector 257 // of f64 elements. 258 VEXTEND, 259 260 // Round the f64 elements of vector operand 0 to f32s and store them in the 261 // even elements of the result. 262 VROUND, 263 264 // AND the two vector operands together and set CC based on the result. 265 VTM, 266 267 // String operations that set CC as a side-effect. 268 VFAE_CC, 269 VFAEZ_CC, 270 VFEE_CC, 271 VFEEZ_CC, 272 VFENE_CC, 273 VFENEZ_CC, 274 VISTR_CC, 275 VSTRC_CC, 276 VSTRCZ_CC, 277 VSTRS_CC, 278 VSTRSZ_CC, 279 280 // Test Data Class. 281 // 282 // Operand 0: the value to test 283 // Operand 1: the bit mask 284 TDC, 285 286 // Strict variants of scalar floating-point comparisons. 287 // Quiet and signaling versions. 288 STRICT_FCMP = ISD::FIRST_TARGET_STRICTFP_OPCODE, 289 STRICT_FCMPS, 290 291 // Strict variants of vector floating-point comparisons. 292 // Quiet and signaling versions. 293 STRICT_VFCMPE, 294 STRICT_VFCMPH, 295 STRICT_VFCMPHE, 296 STRICT_VFCMPES, 297 STRICT_VFCMPHS, 298 STRICT_VFCMPHES, 299 300 // Strict variants of VEXTEND and VROUND. 301 STRICT_VEXTEND, 302 STRICT_VROUND, 303 304 // Wrappers around the inner loop of an 8- or 16-bit ATOMIC_SWAP or 305 // ATOMIC_LOAD_<op>. 306 // 307 // Operand 0: the address of the containing 32-bit-aligned field 308 // Operand 1: the second operand of <op>, in the high bits of an i32 309 // for everything except ATOMIC_SWAPW 310 // Operand 2: how many bits to rotate the i32 left to bring the first 311 // operand into the high bits 312 // Operand 3: the negative of operand 2, for rotating the other way 313 // Operand 4: the width of the field in bits (8 or 16) 314 ATOMIC_SWAPW = ISD::FIRST_TARGET_MEMORY_OPCODE, 315 ATOMIC_LOADW_ADD, 316 ATOMIC_LOADW_SUB, 317 ATOMIC_LOADW_AND, 318 ATOMIC_LOADW_OR, 319 ATOMIC_LOADW_XOR, 320 ATOMIC_LOADW_NAND, 321 ATOMIC_LOADW_MIN, 322 ATOMIC_LOADW_MAX, 323 ATOMIC_LOADW_UMIN, 324 ATOMIC_LOADW_UMAX, 325 326 // A wrapper around the inner loop of an ATOMIC_CMP_SWAP. 327 // 328 // Operand 0: the address of the containing 32-bit-aligned field 329 // Operand 1: the compare value, in the low bits of an i32 330 // Operand 2: the swap value, in the low bits of an i32 331 // Operand 3: how many bits to rotate the i32 left to bring the first 332 // operand into the high bits 333 // Operand 4: the negative of operand 2, for rotating the other way 334 // Operand 5: the width of the field in bits (8 or 16) 335 ATOMIC_CMP_SWAPW, 336 337 // Atomic compare-and-swap returning CC value. 338 // Val, CC, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) 339 ATOMIC_CMP_SWAP, 340 341 // 128-bit atomic load. 342 // Val, OUTCHAIN = ATOMIC_LOAD_128(INCHAIN, ptr) 343 ATOMIC_LOAD_128, 344 345 // 128-bit atomic store. 346 // OUTCHAIN = ATOMIC_STORE_128(INCHAIN, val, ptr) 347 ATOMIC_STORE_128, 348 349 // 128-bit atomic compare-and-swap. 350 // Val, CC, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) 351 ATOMIC_CMP_SWAP_128, 352 353 // Byte swapping load/store. Same operands as regular load/store. 354 LRV, STRV, 355 356 // Element swapping load/store. Same operands as regular load/store. 357 VLER, VSTER, 358 359 // Prefetch from the second operand using the 4-bit control code in 360 // the first operand. The code is 1 for a load prefetch and 2 for 361 // a store prefetch. 362 PREFETCH 363 }; 364 365 // Return true if OPCODE is some kind of PC-relative address. 366 inline bool isPCREL(unsigned Opcode) { 367 return Opcode == PCREL_WRAPPER || Opcode == PCREL_OFFSET; 368 } 369 } // end namespace SystemZISD 370 371 namespace SystemZICMP { 372 // Describes whether an integer comparison needs to be signed or unsigned, 373 // or whether either type is OK. 374 enum { 375 Any, 376 UnsignedOnly, 377 SignedOnly 378 }; 379 } // end namespace SystemZICMP 380 381 class SystemZSubtarget; 382 383 class SystemZTargetLowering : public TargetLowering { 384 public: 385 explicit SystemZTargetLowering(const TargetMachine &TM, 386 const SystemZSubtarget &STI); 387 388 bool useSoftFloat() const override; 389 390 // Override TargetLowering. 391 MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override { 392 return MVT::i32; 393 } 394 MVT getVectorIdxTy(const DataLayout &DL) const override { 395 // Only the lower 12 bits of an element index are used, so we don't 396 // want to clobber the upper 32 bits of a GPR unnecessarily. 397 return MVT::i32; 398 } 399 TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) 400 const override { 401 // Widen subvectors to the full width rather than promoting integer 402 // elements. This is better because: 403 // 404 // (a) it means that we can handle the ABI for passing and returning 405 // sub-128 vectors without having to handle them as legal types. 406 // 407 // (b) we don't have instructions to extend on load and truncate on store, 408 // so promoting the integers is less efficient. 409 // 410 // (c) there are no multiplication instructions for the widest integer 411 // type (v2i64). 412 if (VT.getScalarSizeInBits() % 8 == 0) 413 return TypeWidenVector; 414 return TargetLoweringBase::getPreferredVectorAction(VT); 415 } 416 unsigned 417 getNumRegisters(LLVMContext &Context, EVT VT, 418 std::optional<MVT> RegisterVT) const override { 419 // i128 inline assembly operand. 420 if (VT == MVT::i128 && RegisterVT && *RegisterVT == MVT::Untyped) 421 return 1; 422 return TargetLowering::getNumRegisters(Context, VT); 423 } 424 bool isCheapToSpeculateCtlz(Type *) const override { return true; } 425 bool preferZeroCompareBranch() const override { return true; } 426 bool hasBitPreservingFPLogic(EVT VT) const override { 427 EVT ScVT = VT.getScalarType(); 428 return ScVT == MVT::f32 || ScVT == MVT::f64 || ScVT == MVT::f128; 429 } 430 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override { 431 ConstantInt* Mask = dyn_cast<ConstantInt>(AndI.getOperand(1)); 432 return Mask && Mask->getValue().isIntN(16); 433 } 434 bool convertSetCCLogicToBitwiseLogic(EVT VT) const override { 435 return VT.isScalarInteger(); 436 } 437 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, 438 EVT) const override; 439 bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 440 EVT VT) const override; 441 bool isFPImmLegal(const APFloat &Imm, EVT VT, 442 bool ForCodeSize) const override; 443 bool ShouldShrinkFPConstant(EVT VT) const override { 444 // Do not shrink 64-bit FP constpool entries since LDEB is slower than 445 // LD, and having the full constant in memory enables reg/mem opcodes. 446 return VT != MVT::f64; 447 } 448 bool hasInlineStackProbe(const MachineFunction &MF) const override; 449 bool isLegalICmpImmediate(int64_t Imm) const override; 450 bool isLegalAddImmediate(int64_t Imm) const override; 451 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, 452 unsigned AS, 453 Instruction *I = nullptr) const override; 454 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, 455 MachineMemOperand::Flags Flags, 456 unsigned *Fast) const override; 457 bool 458 findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit, 459 const MemOp &Op, unsigned DstAS, unsigned SrcAS, 460 const AttributeList &FuncAttributes) const override; 461 EVT getOptimalMemOpType(const MemOp &Op, 462 const AttributeList &FuncAttributes) const override; 463 bool isTruncateFree(Type *, Type *) const override; 464 bool isTruncateFree(EVT, EVT) const override; 465 466 bool shouldFormOverflowOp(unsigned Opcode, EVT VT, 467 bool MathUsed) const override { 468 // Form add and sub with overflow intrinsics regardless of any extra 469 // users of the math result. 470 return VT == MVT::i32 || VT == MVT::i64; 471 } 472 473 bool shouldConsiderGEPOffsetSplit() const override { return true; } 474 475 const char *getTargetNodeName(unsigned Opcode) const override; 476 std::pair<unsigned, const TargetRegisterClass *> 477 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 478 StringRef Constraint, MVT VT) const override; 479 TargetLowering::ConstraintType 480 getConstraintType(StringRef Constraint) const override; 481 TargetLowering::ConstraintWeight 482 getSingleConstraintMatchWeight(AsmOperandInfo &info, 483 const char *constraint) const override; 484 void LowerAsmOperandForConstraint(SDValue Op, 485 std::string &Constraint, 486 std::vector<SDValue> &Ops, 487 SelectionDAG &DAG) const override; 488 489 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override { 490 if (ConstraintCode.size() == 1) { 491 switch(ConstraintCode[0]) { 492 default: 493 break; 494 case 'o': 495 return InlineAsm::Constraint_o; 496 case 'Q': 497 return InlineAsm::Constraint_Q; 498 case 'R': 499 return InlineAsm::Constraint_R; 500 case 'S': 501 return InlineAsm::Constraint_S; 502 case 'T': 503 return InlineAsm::Constraint_T; 504 } 505 } else if (ConstraintCode.size() == 2 && ConstraintCode[0] == 'Z') { 506 switch (ConstraintCode[1]) { 507 default: 508 break; 509 case 'Q': 510 return InlineAsm::Constraint_ZQ; 511 case 'R': 512 return InlineAsm::Constraint_ZR; 513 case 'S': 514 return InlineAsm::Constraint_ZS; 515 case 'T': 516 return InlineAsm::Constraint_ZT; 517 } 518 } 519 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 520 } 521 522 Register getRegisterByName(const char *RegName, LLT VT, 523 const MachineFunction &MF) const override; 524 525 /// If a physical register, this returns the register that receives the 526 /// exception address on entry to an EH pad. 527 Register 528 getExceptionPointerRegister(const Constant *PersonalityFn) const override { 529 return SystemZ::R6D; 530 } 531 532 /// If a physical register, this returns the register that receives the 533 /// exception typeid on entry to a landing pad. 534 Register 535 getExceptionSelectorRegister(const Constant *PersonalityFn) const override { 536 return SystemZ::R7D; 537 } 538 539 /// Override to support customized stack guard loading. 540 bool useLoadStackGuardNode() const override { 541 return true; 542 } 543 void insertSSPDeclarations(Module &M) const override { 544 } 545 546 MachineBasicBlock * 547 EmitInstrWithCustomInserter(MachineInstr &MI, 548 MachineBasicBlock *BB) const override; 549 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 550 void LowerOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results, 551 SelectionDAG &DAG) const override; 552 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 553 SelectionDAG &DAG) const override; 554 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override; 555 bool allowTruncateForTailCall(Type *, Type *) const override; 556 bool mayBeEmittedAsTailCall(const CallInst *CI) const override; 557 bool splitValueIntoRegisterParts( 558 SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, 559 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) 560 const override; 561 SDValue joinRegisterPartsIntoValue( 562 SelectionDAG & DAG, const SDLoc &DL, const SDValue *Parts, 563 unsigned NumParts, MVT PartVT, EVT ValueVT, 564 std::optional<CallingConv::ID> CC) const override; 565 SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, 566 bool isVarArg, 567 const SmallVectorImpl<ISD::InputArg> &Ins, 568 const SDLoc &DL, SelectionDAG &DAG, 569 SmallVectorImpl<SDValue> &InVals) const override; 570 SDValue LowerCall(CallLoweringInfo &CLI, 571 SmallVectorImpl<SDValue> &InVals) const override; 572 573 std::pair<SDValue, SDValue> 574 makeExternalCall(SDValue Chain, SelectionDAG &DAG, const char *CalleeName, 575 EVT RetVT, ArrayRef<SDValue> Ops, CallingConv::ID CallConv, 576 bool IsSigned, SDLoc DL, bool DoesNotReturn, 577 bool IsReturnValueUsed) const; 578 579 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 580 bool isVarArg, 581 const SmallVectorImpl<ISD::OutputArg> &Outs, 582 LLVMContext &Context) const override; 583 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 584 const SmallVectorImpl<ISD::OutputArg> &Outs, 585 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL, 586 SelectionDAG &DAG) const override; 587 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; 588 589 /// Determine which of the bits specified in Mask are known to be either 590 /// zero or one and return them in the KnownZero/KnownOne bitsets. 591 void computeKnownBitsForTargetNode(const SDValue Op, 592 KnownBits &Known, 593 const APInt &DemandedElts, 594 const SelectionDAG &DAG, 595 unsigned Depth = 0) const override; 596 597 /// Determine the number of bits in the operation that are sign bits. 598 unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 599 const APInt &DemandedElts, 600 const SelectionDAG &DAG, 601 unsigned Depth) const override; 602 603 ISD::NodeType getExtendForAtomicOps() const override { 604 return ISD::ANY_EXTEND; 605 } 606 ISD::NodeType getExtendForAtomicCmpSwapArg() const override { 607 return ISD::ZERO_EXTEND; 608 } 609 610 bool supportSwiftError() const override { 611 return true; 612 } 613 614 unsigned getStackProbeSize(const MachineFunction &MF) const; 615 616 private: 617 const SystemZSubtarget &Subtarget; 618 619 // Implement LowerOperation for individual opcodes. 620 SDValue getVectorCmp(SelectionDAG &DAG, unsigned Opcode, 621 const SDLoc &DL, EVT VT, 622 SDValue CmpOp0, SDValue CmpOp1, SDValue Chain) const; 623 SDValue lowerVectorSETCC(SelectionDAG &DAG, const SDLoc &DL, 624 EVT VT, ISD::CondCode CC, 625 SDValue CmpOp0, SDValue CmpOp1, 626 SDValue Chain = SDValue(), 627 bool IsSignaling = false) const; 628 SDValue lowerSETCC(SDValue Op, SelectionDAG &DAG) const; 629 SDValue lowerSTRICT_FSETCC(SDValue Op, SelectionDAG &DAG, 630 bool IsSignaling) const; 631 SDValue lowerBR_CC(SDValue Op, SelectionDAG &DAG) const; 632 SDValue lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 633 SDValue lowerGlobalAddress(GlobalAddressSDNode *Node, 634 SelectionDAG &DAG) const; 635 SDValue lowerTLSGetOffset(GlobalAddressSDNode *Node, 636 SelectionDAG &DAG, unsigned Opcode, 637 SDValue GOTOffset) const; 638 SDValue lowerThreadPointer(const SDLoc &DL, SelectionDAG &DAG) const; 639 SDValue lowerGlobalTLSAddress(GlobalAddressSDNode *Node, 640 SelectionDAG &DAG) const; 641 SDValue lowerBlockAddress(BlockAddressSDNode *Node, 642 SelectionDAG &DAG) const; 643 SDValue lowerJumpTable(JumpTableSDNode *JT, SelectionDAG &DAG) const; 644 SDValue lowerConstantPool(ConstantPoolSDNode *CP, SelectionDAG &DAG) const; 645 SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 646 SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 647 SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const; 648 SDValue lowerVASTART_ELF(SDValue Op, SelectionDAG &DAG) const; 649 SDValue lowerVASTART_XPLINK(SDValue Op, SelectionDAG &DAG) const; 650 SDValue lowerVACOPY(SDValue Op, SelectionDAG &DAG) const; 651 SDValue lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 652 SDValue lowerDYNAMIC_STACKALLOC_ELF(SDValue Op, SelectionDAG &DAG) const; 653 SDValue lowerDYNAMIC_STACKALLOC_XPLINK(SDValue Op, SelectionDAG &DAG) const; 654 SDValue lowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const; 655 SDValue lowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const; 656 SDValue lowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const; 657 SDValue lowerSDIVREM(SDValue Op, SelectionDAG &DAG) const; 658 SDValue lowerUDIVREM(SDValue Op, SelectionDAG &DAG) const; 659 SDValue lowerXALUO(SDValue Op, SelectionDAG &DAG) const; 660 SDValue lowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) const; 661 SDValue lowerBITCAST(SDValue Op, SelectionDAG &DAG) const; 662 SDValue lowerOR(SDValue Op, SelectionDAG &DAG) const; 663 SDValue lowerCTPOP(SDValue Op, SelectionDAG &DAG) const; 664 SDValue lowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const; 665 SDValue lowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const; 666 SDValue lowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const; 667 SDValue lowerATOMIC_LOAD_OP(SDValue Op, SelectionDAG &DAG, 668 unsigned Opcode) const; 669 SDValue lowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const; 670 SDValue lowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const; 671 SDValue lowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const; 672 SDValue lowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const; 673 SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const; 674 SDValue lowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const; 675 SDValue lowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; 676 bool isVectorElementLoad(SDValue Op) const; 677 SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, 678 SmallVectorImpl<SDValue> &Elems) const; 679 SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; 680 SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; 681 SDValue lowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; 682 SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 683 SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 684 SDValue lowerSIGN_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const; 685 SDValue lowerZERO_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const; 686 SDValue lowerShift(SDValue Op, SelectionDAG &DAG, unsigned ByScalar) const; 687 SDValue lowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG) const; 688 SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; 689 690 bool canTreatAsByteVector(EVT VT) const; 691 SDValue combineExtract(const SDLoc &DL, EVT ElemVT, EVT VecVT, SDValue OrigOp, 692 unsigned Index, DAGCombinerInfo &DCI, 693 bool Force) const; 694 SDValue combineTruncateExtract(const SDLoc &DL, EVT TruncVT, SDValue Op, 695 DAGCombinerInfo &DCI) const; 696 SDValue combineZERO_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const; 697 SDValue combineSIGN_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const; 698 SDValue combineSIGN_EXTEND_INREG(SDNode *N, DAGCombinerInfo &DCI) const; 699 SDValue combineMERGE(SDNode *N, DAGCombinerInfo &DCI) const; 700 bool canLoadStoreByteSwapped(EVT VT) const; 701 SDValue combineLOAD(SDNode *N, DAGCombinerInfo &DCI) const; 702 SDValue combineSTORE(SDNode *N, DAGCombinerInfo &DCI) const; 703 SDValue combineVECTOR_SHUFFLE(SDNode *N, DAGCombinerInfo &DCI) const; 704 SDValue combineEXTRACT_VECTOR_ELT(SDNode *N, DAGCombinerInfo &DCI) const; 705 SDValue combineJOIN_DWORDS(SDNode *N, DAGCombinerInfo &DCI) const; 706 SDValue combineFP_ROUND(SDNode *N, DAGCombinerInfo &DCI) const; 707 SDValue combineFP_EXTEND(SDNode *N, DAGCombinerInfo &DCI) const; 708 SDValue combineINT_TO_FP(SDNode *N, DAGCombinerInfo &DCI) const; 709 SDValue combineBSWAP(SDNode *N, DAGCombinerInfo &DCI) const; 710 SDValue combineBR_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const; 711 SDValue combineSELECT_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const; 712 SDValue combineGET_CCMASK(SDNode *N, DAGCombinerInfo &DCI) const; 713 SDValue combineIntDIVREM(SDNode *N, DAGCombinerInfo &DCI) const; 714 SDValue combineINTRINSIC(SDNode *N, DAGCombinerInfo &DCI) const; 715 716 SDValue unwrapAddress(SDValue N) const override; 717 718 // If the last instruction before MBBI in MBB was some form of COMPARE, 719 // try to replace it with a COMPARE AND BRANCH just before MBBI. 720 // CCMask and Target are the BRC-like operands for the branch. 721 // Return true if the change was made. 722 bool convertPrevCompareToBranch(MachineBasicBlock *MBB, 723 MachineBasicBlock::iterator MBBI, 724 unsigned CCMask, 725 MachineBasicBlock *Target) const; 726 727 // Implement EmitInstrWithCustomInserter for individual operation types. 728 MachineBasicBlock *emitSelect(MachineInstr &MI, MachineBasicBlock *BB) const; 729 MachineBasicBlock *emitCondStore(MachineInstr &MI, MachineBasicBlock *BB, 730 unsigned StoreOpcode, unsigned STOCOpcode, 731 bool Invert) const; 732 MachineBasicBlock *emitPair128(MachineInstr &MI, 733 MachineBasicBlock *MBB) const; 734 MachineBasicBlock *emitExt128(MachineInstr &MI, MachineBasicBlock *MBB, 735 bool ClearEven) const; 736 MachineBasicBlock *emitAtomicLoadBinary(MachineInstr &MI, 737 MachineBasicBlock *BB, 738 unsigned BinOpcode, unsigned BitSize, 739 bool Invert = false) const; 740 MachineBasicBlock *emitAtomicLoadMinMax(MachineInstr &MI, 741 MachineBasicBlock *MBB, 742 unsigned CompareOpcode, 743 unsigned KeepOldMask, 744 unsigned BitSize) const; 745 MachineBasicBlock *emitAtomicCmpSwapW(MachineInstr &MI, 746 MachineBasicBlock *BB) const; 747 MachineBasicBlock *emitMemMemWrapper(MachineInstr &MI, MachineBasicBlock *BB, 748 unsigned Opcode, 749 bool IsMemset = false) const; 750 MachineBasicBlock *emitStringWrapper(MachineInstr &MI, MachineBasicBlock *BB, 751 unsigned Opcode) const; 752 MachineBasicBlock *emitTransactionBegin(MachineInstr &MI, 753 MachineBasicBlock *MBB, 754 unsigned Opcode, bool NoFloat) const; 755 MachineBasicBlock *emitLoadAndTestCmp0(MachineInstr &MI, 756 MachineBasicBlock *MBB, 757 unsigned Opcode) const; 758 MachineBasicBlock *emitProbedAlloca(MachineInstr &MI, 759 MachineBasicBlock *MBB) const; 760 761 SDValue getBackchainAddress(SDValue SP, SelectionDAG &DAG) const; 762 763 MachineMemOperand::Flags 764 getTargetMMOFlags(const Instruction &I) const override; 765 const TargetRegisterClass *getRepRegClassFor(MVT VT) const override; 766 }; 767 768 struct SystemZVectorConstantInfo { 769 private: 770 APInt IntBits; // The 128 bits as an integer. 771 APInt SplatBits; // Smallest splat value. 772 APInt SplatUndef; // Bits correspoding to undef operands of the BVN. 773 unsigned SplatBitSize = 0; 774 bool isFP128 = false; 775 public: 776 unsigned Opcode = 0; 777 SmallVector<unsigned, 2> OpVals; 778 MVT VecVT; 779 SystemZVectorConstantInfo(APInt IntImm); 780 SystemZVectorConstantInfo(APFloat FPImm) 781 : SystemZVectorConstantInfo(FPImm.bitcastToAPInt()) { 782 isFP128 = (&FPImm.getSemantics() == &APFloat::IEEEquad()); 783 } 784 SystemZVectorConstantInfo(BuildVectorSDNode *BVN); 785 bool isVectorConstantLegal(const SystemZSubtarget &Subtarget); 786 }; 787 788 } // end namespace llvm 789 790 #endif 791