Lines Matching +full:high +full:- +full:vt

1 //===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
11 //===----------------------------------------------------------------------===//
35 #define DEBUG_TYPE "systemz-lower"
47 // Chain if this is a strict floating-point comparison.
64 // Classify VT as either 32 or 64 bit.
65 static bool is32Bit(EVT VT) { in is32Bit() argument
66 switch (VT.getSimpleVT().SimpleTy) { in is32Bit()
127 setStackPointerRegisterToSaveRestore(Regs->getStackPointerRegister()); in SystemZTargetLowering()
129 // TODO: It may be better to default to latency-oriented scheduling, however in SystemZTargetLowering()
130 // LLVM's current latency-oriented scheduler can't handle physreg definitions in SystemZTargetLowering()
131 // such as SystemZ has with CC, so set this to the register-pressure in SystemZTargetLowering()
140 // Instructions are strings of 2-byte aligned 2-byte values. in SystemZTargetLowering()
142 // For performance reasons we prefer 16-byte alignment. in SystemZTargetLowering()
149 MVT VT = MVT::SimpleValueType(I); in SystemZTargetLowering() local
150 if (isTypeLegal(VT)) { in SystemZTargetLowering()
151 // Lower SET_CC into an IPM-based sequence. in SystemZTargetLowering()
152 setOperationAction(ISD::SETCC, VT, Custom); in SystemZTargetLowering()
153 setOperationAction(ISD::STRICT_FSETCC, VT, Custom); in SystemZTargetLowering()
154 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom); in SystemZTargetLowering()
157 setOperationAction(ISD::SELECT, VT, Expand); in SystemZTargetLowering()
160 setOperationAction(ISD::SELECT_CC, VT, Custom); in SystemZTargetLowering()
161 setOperationAction(ISD::BR_CC, VT, Custom); in SystemZTargetLowering()
176 MVT VT = MVT::SimpleValueType(I); in SystemZTargetLowering() local
177 if (isTypeLegal(VT) && VT != MVT::i128) { in SystemZTargetLowering()
178 setOperationAction(ISD::ABS, VT, Legal); in SystemZTargetLowering()
181 setOperationAction(ISD::SDIV, VT, Expand); in SystemZTargetLowering()
182 setOperationAction(ISD::UDIV, VT, Expand); in SystemZTargetLowering()
183 setOperationAction(ISD::SREM, VT, Expand); in SystemZTargetLowering()
184 setOperationAction(ISD::UREM, VT, Expand); in SystemZTargetLowering()
185 setOperationAction(ISD::SDIVREM, VT, Custom); in SystemZTargetLowering()
186 setOperationAction(ISD::UDIVREM, VT, Custom); in SystemZTargetLowering()
189 setOperationAction(ISD::SADDO, VT, Custom); in SystemZTargetLowering()
190 setOperationAction(ISD::SSUBO, VT, Custom); in SystemZTargetLowering()
193 setOperationAction(ISD::UADDO, VT, Custom); in SystemZTargetLowering()
194 setOperationAction(ISD::USUBO, VT, Custom); in SystemZTargetLowering()
197 setOperationAction(ISD::UADDO_CARRY, VT, Custom); in SystemZTargetLowering()
198 setOperationAction(ISD::USUBO_CARRY, VT, Custom); in SystemZTargetLowering()
202 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); in SystemZTargetLowering()
206 setOperationAction(ISD::CTPOP, VT, Custom); in SystemZTargetLowering()
208 setOperationAction(ISD::CTPOP, VT, Expand); in SystemZTargetLowering()
211 setOperationAction(ISD::CTTZ, VT, Expand); in SystemZTargetLowering()
212 setOperationAction(ISD::ROTR, VT, Expand); in SystemZTargetLowering()
215 setOperationAction(ISD::MULHS, VT, Expand); in SystemZTargetLowering()
216 setOperationAction(ISD::MULHU, VT, Expand); in SystemZTargetLowering()
217 setOperationAction(ISD::SMUL_LOHI, VT, Custom); in SystemZTargetLowering()
218 setOperationAction(ISD::UMUL_LOHI, VT, Custom); in SystemZTargetLowering()
225 setOperationAction(ISD::FP_TO_UINT, VT, Expand); in SystemZTargetLowering()
229 setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Legal); in SystemZTargetLowering()
231 setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Legal); in SystemZTargetLowering()
234 setOperationAction(ISD::STRICT_SINT_TO_FP, VT, Legal); in SystemZTargetLowering()
236 setOperationAction(ISD::STRICT_UINT_TO_FP, VT, Legal); in SystemZTargetLowering()
279 // Type legalization will convert 8- and 16-bit atomic operations into in SystemZTargetLowering()
280 // forms that operate on i32s (but still keeping the original memory VT). in SystemZTargetLowering()
310 // We can use the CC result of compare-and-swap to implement in SystemZTargetLowering()
322 // Handle unsigned 32-bit types as signed 64-bit types. in SystemZTargetLowering()
330 // We have native support for a 64-bit CTLZ, via FLOGR. in SystemZTargetLowering()
335 // On z15 we have native support for a 64-bit CTPOP. in SystemZTargetLowering()
341 // Give LowerOperation the chance to replace 64-bit ORs with subregs. in SystemZTargetLowering()
362 for (MVT VT : MVT::integer_valuetypes()) { in SystemZTargetLowering() local
363 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); in SystemZTargetLowering()
364 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); in SystemZTargetLowering()
365 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); in SystemZTargetLowering()
376 // 160-byte area at the bottom of the stack. in SystemZTargetLowering()
389 for (MVT VT : MVT::fixedlen_vector_valuetypes()) { in SystemZTargetLowering() local
392 if (getOperationAction(Opcode, VT) == Legal) in SystemZTargetLowering()
393 setOperationAction(Opcode, VT, Expand); in SystemZTargetLowering()
397 setTruncStoreAction(VT, InnerVT, Expand); in SystemZTargetLowering()
398 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); in SystemZTargetLowering()
399 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); in SystemZTargetLowering()
400 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); in SystemZTargetLowering()
403 if (isTypeLegal(VT)) { in SystemZTargetLowering()
408 setOperationAction(ISD::LOAD, VT, Legal); in SystemZTargetLowering()
409 setOperationAction(ISD::STORE, VT, Legal); in SystemZTargetLowering()
410 setOperationAction(ISD::VSELECT, VT, Legal); in SystemZTargetLowering()
411 setOperationAction(ISD::BITCAST, VT, Legal); in SystemZTargetLowering()
412 setOperationAction(ISD::UNDEF, VT, Legal); in SystemZTargetLowering()
416 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); in SystemZTargetLowering()
417 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); in SystemZTargetLowering()
422 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { in SystemZTargetLowering() local
423 if (isTypeLegal(VT)) { in SystemZTargetLowering()
425 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Legal); in SystemZTargetLowering()
426 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Legal); in SystemZTargetLowering()
427 setOperationAction(ISD::ADD, VT, Legal); in SystemZTargetLowering()
428 setOperationAction(ISD::SUB, VT, Legal); in SystemZTargetLowering()
429 if (VT != MVT::v2i64) in SystemZTargetLowering()
430 setOperationAction(ISD::MUL, VT, Legal); in SystemZTargetLowering()
431 setOperationAction(ISD::ABS, VT, Legal); in SystemZTargetLowering()
432 setOperationAction(ISD::AND, VT, Legal); in SystemZTargetLowering()
433 setOperationAction(ISD::OR, VT, Legal); in SystemZTargetLowering()
434 setOperationAction(ISD::XOR, VT, Legal); in SystemZTargetLowering()
436 setOperationAction(ISD::CTPOP, VT, Legal); in SystemZTargetLowering()
438 setOperationAction(ISD::CTPOP, VT, Custom); in SystemZTargetLowering()
439 setOperationAction(ISD::CTTZ, VT, Legal); in SystemZTargetLowering()
440 setOperationAction(ISD::CTLZ, VT, Legal); in SystemZTargetLowering()
443 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); in SystemZTargetLowering()
446 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom); in SystemZTargetLowering()
447 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom); in SystemZTargetLowering()
451 setOperationAction(ISD::SHL, VT, Custom); in SystemZTargetLowering()
452 setOperationAction(ISD::SRA, VT, Custom); in SystemZTargetLowering()
453 setOperationAction(ISD::SRL, VT, Custom); in SystemZTargetLowering()
454 setOperationAction(ISD::ROTL, VT, Custom); in SystemZTargetLowering()
458 setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); in SystemZTargetLowering()
462 setOperationAction(ISD::SETCC, VT, Custom); in SystemZTargetLowering()
508 // Handle floating-point types. in SystemZTargetLowering()
512 MVT VT = MVT::SimpleValueType(I); in SystemZTargetLowering() local
513 if (isTypeLegal(VT)) { in SystemZTargetLowering()
515 setOperationAction(ISD::FRINT, VT, Legal); in SystemZTargetLowering()
519 setOperationAction(ISD::FNEARBYINT, VT, Legal); in SystemZTargetLowering()
520 setOperationAction(ISD::FFLOOR, VT, Legal); in SystemZTargetLowering()
521 setOperationAction(ISD::FCEIL, VT, Legal); in SystemZTargetLowering()
522 setOperationAction(ISD::FTRUNC, VT, Legal); in SystemZTargetLowering()
523 setOperationAction(ISD::FROUND, VT, Legal); in SystemZTargetLowering()
527 setOperationAction(ISD::FSIN, VT, Expand); in SystemZTargetLowering()
528 setOperationAction(ISD::FCOS, VT, Expand); in SystemZTargetLowering()
529 setOperationAction(ISD::FSINCOS, VT, Expand); in SystemZTargetLowering()
530 setOperationAction(ISD::FREM, VT, Expand); in SystemZTargetLowering()
531 setOperationAction(ISD::FPOW, VT, Expand); in SystemZTargetLowering()
534 setOperationAction(ISD::IS_FPCLASS, VT, Custom); in SystemZTargetLowering()
536 // Handle constrained floating-point operations. in SystemZTargetLowering()
537 setOperationAction(ISD::STRICT_FADD, VT, Legal); in SystemZTargetLowering()
538 setOperationAction(ISD::STRICT_FSUB, VT, Legal); in SystemZTargetLowering()
539 setOperationAction(ISD::STRICT_FMUL, VT, Legal); in SystemZTargetLowering()
540 setOperationAction(ISD::STRICT_FDIV, VT, Legal); in SystemZTargetLowering()
541 setOperationAction(ISD::STRICT_FMA, VT, Legal); in SystemZTargetLowering()
542 setOperationAction(ISD::STRICT_FSQRT, VT, Legal); in SystemZTargetLowering()
543 setOperationAction(ISD::STRICT_FRINT, VT, Legal); in SystemZTargetLowering()
544 setOperationAction(ISD::STRICT_FP_ROUND, VT, Legal); in SystemZTargetLowering()
545 setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal); in SystemZTargetLowering()
547 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal); in SystemZTargetLowering()
548 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal); in SystemZTargetLowering()
549 setOperationAction(ISD::STRICT_FCEIL, VT, Legal); in SystemZTargetLowering()
550 setOperationAction(ISD::STRICT_FROUND, VT, Legal); in SystemZTargetLowering()
551 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal); in SystemZTargetLowering()
556 // Handle floating-point vector types. in SystemZTargetLowering()
558 // Scalar-to-vector conversion is just a subreg. in SystemZTargetLowering()
585 // Handle constrained floating-point operations. in SystemZTargetLowering()
651 // Handle constrained floating-point operations. in SystemZTargetLowering()
664 for (auto VT : { MVT::f32, MVT::f64, MVT::f128, in SystemZTargetLowering()
666 setOperationAction(ISD::STRICT_FMAXNUM, VT, Legal); in SystemZTargetLowering()
667 setOperationAction(ISD::STRICT_FMINNUM, VT, Legal); in SystemZTargetLowering()
668 setOperationAction(ISD::STRICT_FMAXIMUM, VT, Legal); in SystemZTargetLowering()
669 setOperationAction(ISD::STRICT_FMINIMUM, VT, Legal); in SystemZTargetLowering()
673 // We only have fused f128 multiply-addition on vector registers. in SystemZTargetLowering()
684 // a load-and-extend of a f80 constant (in cases where the constant in SystemZTargetLowering()
686 for (MVT VT : MVT::fp_valuetypes()) in SystemZTargetLowering() local
687 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand); in SystemZTargetLowering()
695 // Floating-point truncation and stores need to be done separately. in SystemZTargetLowering()
700 // We have 64-bit FPR<->GPR moves, but need special handling for in SystemZTargetLowering()
701 // 32-bit forms. in SystemZTargetLowering()
707 // VASTART and VACOPY need to deal with the SystemZ-specific varargs in SystemZTargetLowering()
708 // structure, but VAEND is a no-op. in SystemZTargetLowering()
715 // Codes for which we want to perform some z-specific combinations. in SystemZTargetLowering()
747 // generated by target-independent code don't when the byte value is in SystemZTargetLowering()
749 // than "STC;MVC". Handle the choice in target-specific code instead. in SystemZTargetLowering()
753 // Default to having -disable-strictnode-mutation on in SystemZTargetLowering()
775 LLVMContext &, EVT VT) const { in getSetCCResultType()
776 if (!VT.isVector()) in getSetCCResultType()
778 return VT.changeVectorElementTypeToInteger(); in getSetCCResultType()
782 const MachineFunction &MF, EVT VT) const { in isFMAFasterThanFMulAndFAdd()
783 VT = VT.getScalarType(); in isFMAFasterThanFMulAndFAdd()
785 if (!VT.isSimple()) in isFMAFasterThanFMulAndFAdd()
788 switch (VT.getSimpleVT().SimpleTy) { in isFMAFasterThanFMulAndFAdd()
810 // Try using VECTOR GENERATE BYTE MASK. This is the architecturally- in isVectorConstantLegal()
811 // preferred way of creating all-zero and all-one vectors so give it in isVectorConstantLegal()
832 auto tryValue = [&](uint64_t Value) -> bool { in isVectorConstantLegal()
844 if (TII->isRxSBGMask(Value, SplatBitSize, Start, End)) { in isVectorConstantLegal()
845 // isRxSBGMask returns the bit numbers for a full 64-bit value, with 0 in isVectorConstantLegal()
847 // an SplatBitSize value, so that 0 denotes 1 << (SplatBitSize-1). in isVectorConstantLegal()
848 OpVals.push_back(Start - (64 - SplatBitSize)); in isVectorConstantLegal()
849 OpVals.push_back(End - (64 - SplatBitSize)); in isVectorConstantLegal()
860 // being able to use a sign-extended element value in VECTOR REPLICATE in isVectorConstantLegal()
873 // using a non-wraparound mask. in isVectorConstantLegal()
881 IntBits <<= (SystemZ::VectorBits - IntImm.getBitWidth()); in SystemZVectorConstantInfo()
906 assert(BVN->isConstant() && "Expected a constant BUILD_VECTOR"); in SystemZVectorConstantInfo()
910 BVN->isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128, in SystemZVectorConstantInfo()
914 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8, in SystemZVectorConstantInfo()
918 bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, in isFPImmLegal() argument
930 if (MF.getFunction().hasFnAttribute("probe-stack")) in hasInlineStackProbe()
931 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() == in hasInlineStackProbe()
932 "inline-asm"; in hasInlineStackProbe()
949 if (RMW->getType()->isIntegerTy(8) || RMW->getType()->isIntegerTy(16)) in shouldExpandAtomicRMWInIR()
954 (RMW->getType()->isIntegerTy(32) || RMW->getType()->isIntegerTy(64)) && in shouldExpandAtomicRMWInIR()
955 (RMW->getOperation() == AtomicRMWInst::BinOp::Add || in shouldExpandAtomicRMWInIR()
956 RMW->getOperation() == AtomicRMWInst::BinOp::Sub || in shouldExpandAtomicRMWInIR()
957 RMW->getOperation() == AtomicRMWInst::BinOp::And || in shouldExpandAtomicRMWInIR()
958 RMW->getOperation() == AtomicRMWInst::BinOp::Or || in shouldExpandAtomicRMWInIR()
959 RMW->getOperation() == AtomicRMWInst::BinOp::Xor)) in shouldExpandAtomicRMWInIR()
972 return isUInt<32>(Imm) || isUInt<32>(-Imm); in isLegalAddImmediate()
976 EVT VT, unsigned, Align, MachineMemOperand::Flags, unsigned *Fast) const { in allowsMisalignedMemoryAccesses() argument
1001 // With vector support a Load->Store combination may be combined to either in getLoadStoreAddrMode()
1008 bool MVC = Ty->isIntegerTy(8); in getLoadStoreAddrMode()
1017 switch (II->getIntrinsicID()) { in supportedAddressingMode()
1026 if (isa<LoadInst>(I) && I->hasOneUse()) { in supportedAddressingMode()
1027 auto *SingleUser = cast<Instruction>(*I->user_begin()); in supportedAddressingMode()
1028 if (SingleUser->getParent() == I->getParent()) { in supportedAddressingMode()
1030 if (auto *C = dyn_cast<ConstantInt>(SingleUser->getOperand(1))) in supportedAddressingMode()
1031 if (C->getBitWidth() <= 64 && in supportedAddressingMode()
1032 (isInt<16>(C->getSExtValue()) || isUInt<16>(C->getZExtValue()))) in supportedAddressingMode()
1036 // Load->Store in supportedAddressingMode()
1037 return getLoadStoreAddrMode(HasVector, I->getType()); in supportedAddressingMode()
1040 if (auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand())) in supportedAddressingMode()
1041 if (LoadI->hasOneUse() && LoadI->getParent() == I->getParent()) in supportedAddressingMode()
1042 // Load->Store in supportedAddressingMode()
1043 return getLoadStoreAddrMode(HasVector, LoadI->getType()); in supportedAddressingMode()
1054 Type *MemAccessTy = (isa<LoadInst>(I) ? I->getType() : in supportedAddressingMode()
1055 I->getOperand(0)->getType()); in supportedAddressingMode()
1056 bool IsFPAccess = MemAccessTy->isFloatingPointTy(); in supportedAddressingMode()
1057 bool IsVectorAccess = MemAccessTy->isVectorTy(); in supportedAddressingMode()
1062 Value *DataOp = I->getOperand(0); in supportedAddressingMode()
1069 if (!IsVectorAccess && isa<LoadInst>(I) && I->hasOneUse()) { in supportedAddressingMode()
1070 User *LoadUser = *I->user_begin(); in supportedAddressingMode()
1089 // Require a 20-bit signed offset. in isLegalAddressingMode()
1094 Subtarget.hasVector() && (Ty->isVectorTy() || Ty->isIntegerTy(128)); in isLegalAddressingMode()
1119 if (Op.isMemset() && Op.size() - 1 <= MVCFastLen) in findOptimalMemOpLowering()
1135 if (!FromType->isIntegerTy() || !ToType->isIntegerTy()) in isTruncateFree()
1137 unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedValue(); in isTruncateFree()
1138 unsigned ToBits = ToType->getPrimitiveSizeInBits().getFixedValue(); in isTruncateFree()
1150 //===----------------------------------------------------------------------===//
1152 //===----------------------------------------------------------------------===//
1160 case 'f': // Floating-point register in getConstraintType()
1161 case 'h': // High-part register in getConstraintType()
1162 case 'r': // General-purpose register in getConstraintType()
1166 case 'Q': // Memory with base and unsigned 12-bit displacement in getConstraintType()
1168 case 'S': // Memory with base and signed 20-bit displacement in getConstraintType()
1173 case 'I': // Unsigned 8-bit constant in getConstraintType()
1174 case 'J': // Unsigned 12-bit constant in getConstraintType()
1175 case 'K': // Signed 16-bit constant in getConstraintType()
1176 case 'L': // Signed 20-bit displacement (on all targets we support) in getConstraintType()
1185 case 'Q': // Address with base and unsigned 12-bit displacement in getConstraintType()
1187 case 'S': // Address with base and signed 20-bit displacement in getConstraintType()
1207 Type *type = CallOperandVal->getType(); in getSingleConstraintMatchWeight()
1216 case 'h': // High-part register in getSingleConstraintMatchWeight()
1217 case 'r': // General-purpose register in getSingleConstraintMatchWeight()
1218 weight = CallOperandVal->getType()->isIntegerTy() ? CW_Register : CW_Default; in getSingleConstraintMatchWeight()
1221 case 'f': // Floating-point register in getSingleConstraintMatchWeight()
1223 weight = type->isFloatingPointTy() ? CW_Register : CW_Default; in getSingleConstraintMatchWeight()
1228 weight = (type->isVectorTy() || type->isFloatingPointTy()) ? CW_Register in getSingleConstraintMatchWeight()
1232 case 'I': // Unsigned 8-bit constant in getSingleConstraintMatchWeight()
1234 if (isUInt<8>(C->getZExtValue())) in getSingleConstraintMatchWeight()
1238 case 'J': // Unsigned 12-bit constant in getSingleConstraintMatchWeight()
1240 if (isUInt<12>(C->getZExtValue())) in getSingleConstraintMatchWeight()
1244 case 'K': // Signed 16-bit constant in getSingleConstraintMatchWeight()
1246 if (isInt<16>(C->getSExtValue())) in getSingleConstraintMatchWeight()
1250 case 'L': // Signed 20-bit displacement (on all targets we support) in getSingleConstraintMatchWeight()
1252 if (isInt<20>(C->getSExtValue())) in getSingleConstraintMatchWeight()
1258 if (C->getZExtValue() == 0x7fffffff) in getSingleConstraintMatchWeight()
1267 // Map maps 0-based register numbers to LLVM register numbers.
1271 assert(*(Constraint.end()-1) == '}' && "Missing '}'"); in parseRegisterNumber()
1275 Constraint.slice(2, Constraint.size() - 1).getAsInteger(10, Index); in parseRegisterNumber()
1284 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { in getRegForInlineAsmConstraint()
1290 case 'r': // General-purpose register in getRegForInlineAsmConstraint()
1291 if (VT.getSizeInBits() == 64) in getRegForInlineAsmConstraint()
1293 else if (VT.getSizeInBits() == 128) in getRegForInlineAsmConstraint()
1298 if (VT == MVT::i64) in getRegForInlineAsmConstraint()
1300 else if (VT == MVT::i128) in getRegForInlineAsmConstraint()
1304 case 'h': // High-part register (an LLVM extension) in getRegForInlineAsmConstraint()
1307 case 'f': // Floating-point register in getRegForInlineAsmConstraint()
1309 if (VT.getSizeInBits() == 64) in getRegForInlineAsmConstraint()
1311 else if (VT.getSizeInBits() == 128) in getRegForInlineAsmConstraint()
1319 if (VT.getSizeInBits() == 32) in getRegForInlineAsmConstraint()
1321 if (VT.getSizeInBits() == 64) in getRegForInlineAsmConstraint()
1332 auto getVTSizeInBits = [&VT]() { in getRegForInlineAsmConstraint()
1333 return VT == MVT::Other ? 0 : VT.getSizeInBits(); in getRegForInlineAsmConstraint()
1337 // because the interpretation depends on VT. The internal names of in getRegForInlineAsmConstraint()
1377 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); in getRegForInlineAsmConstraint()
1383 SystemZTargetLowering::getRegisterByName(const char *RegName, LLT VT, in getRegisterByName() argument
1412 case 'I': // Unsigned 8-bit constant in LowerAsmOperandForConstraint()
1414 if (isUInt<8>(C->getZExtValue())) in LowerAsmOperandForConstraint()
1415 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), in LowerAsmOperandForConstraint()
1419 case 'J': // Unsigned 12-bit constant in LowerAsmOperandForConstraint()
1421 if (isUInt<12>(C->getZExtValue())) in LowerAsmOperandForConstraint()
1422 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), in LowerAsmOperandForConstraint()
1426 case 'K': // Signed 16-bit constant in LowerAsmOperandForConstraint()
1428 if (isInt<16>(C->getSExtValue())) in LowerAsmOperandForConstraint()
1429 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), in LowerAsmOperandForConstraint()
1433 case 'L': // Signed 20-bit displacement (on all targets we support) in LowerAsmOperandForConstraint()
1435 if (isInt<20>(C->getSExtValue())) in LowerAsmOperandForConstraint()
1436 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), in LowerAsmOperandForConstraint()
1442 if (C->getZExtValue() == 0x7fffffff) in LowerAsmOperandForConstraint()
1443 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), in LowerAsmOperandForConstraint()
1451 //===----------------------------------------------------------------------===//
1453 //===----------------------------------------------------------------------===//
1470 return CI->isTailCall(); in mayBeEmittedAsTailCall()
1618 FuncInfo->setSizeOfFnParams(CCInfo.getStackSize()); in LowerFormalArguments()
1670 // FIXME: Pre-include call frame size in the offset, should not in LowerFormalArguments()
1683 // passed as right-justified 8-byte values. in LowerFormalArguments()
1715 // Save the number of non-varargs registers for later use by va_start, etc. in LowerFormalArguments()
1716 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); in LowerFormalArguments()
1717 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); in LowerFormalArguments()
1723 // first stack vararg would be. The 1-byte size here is arbitrary. in LowerFormalArguments()
1724 // FIXME: Pre-include call frame size in the offset, should not in LowerFormalArguments()
1726 int64_t VarArgOffset = CCInfo.getStackSize() + Regs->getCallFrameSize(); in LowerFormalArguments()
1728 FuncInfo->setVarArgsFrameIndex(FI); in LowerFormalArguments()
1732 // Save the number of non-varargs registers for later use by va_start, etc. in LowerFormalArguments()
1733 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs); in LowerFormalArguments()
1734 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs); in LowerFormalArguments()
1737 // first stack vararg would be. The 1-byte size here is arbitrary. in LowerFormalArguments()
1739 FuncInfo->setVarArgsFrameIndex( in LowerFormalArguments()
1742 // ...and a similar frame index for the caller-allocated save area in LowerFormalArguments()
1745 -SystemZMC::ELFCallFrameSize + TFL->getRegSpillOffset(MF, SystemZ::R2D) - 16; in LowerFormalArguments()
1747 FuncInfo->setRegSaveFrameIndex(RegSaveIndex); in LowerFormalArguments()
1754 unsigned Offset = TFL->getRegSpillOffset(MF, SystemZ::ELFArgFPRs[I]); in LowerFormalArguments()
1756 MFI.CreateFixedObject(8, -SystemZMC::ELFCallFrameSize + Offset, true); in LowerFormalArguments()
1767 SystemZ::ELFNumArgFPRs - NumFixedFPRs)); in LowerFormalArguments()
1777 MRI.addLiveIn(Regs->getADARegister(), ADAvReg); in LowerFormalArguments()
1778 FuncInfo->setADAVirtualRegister(ADAvReg); in LowerFormalArguments()
1787 // needs the callee-saved argument register R6, or if the call uses in canUseSiblingCall()
1788 // the callee-saved register arguments SwiftSelf and SwiftError. in canUseSiblingCall()
1808 unsigned ADAvReg = MFI->getADAVirtualRegister(); in getADAEntry()
1831 (isa<Function>(GV)) || (GA && isa<Function>(GA->getAliaseeObject())); in getADAEntry()
1832 bool IsInternal = (GV->hasInternalLinkage() || GV->hasPrivateLinkage()); in getADAEntry()
1857 bool IsInternal = (G->getGlobal()->hasInternalLinkage() || in getzOSCalleeAndADA()
1858 G->getGlobal()->hasPrivateLinkage()); in getzOSCalleeAndADA()
1862 unsigned ADAvReg = MFI->getADAVirtualRegister(); in getzOSCalleeAndADA()
1864 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); in getzOSCalleeAndADA()
1869 G->getGlobal(), DL, PtrVT, 0, SystemZII::MO_ADA_DIRECT_FUNC_DESC); in getzOSCalleeAndADA()
1875 E->getSymbol(), PtrVT, SystemZII::MO_ADA_DIRECT_FUNC_DESC); in getzOSCalleeAndADA()
1919 // We don't support GuaranteedTailCallOpt, only automatically-detected in LowerCall()
1951 SlotVT = Outs[I].VT; in LowerCall()
1954 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); in LowerCall()
1978 // In XPLINK64, for the 128-bit vararg case, ArgValue is bitcasted to a in LowerCall()
1979 // MVT::i128 type. We decompose the 128-bit type to a pair of its high in LowerCall()
1989 // floats are passed as right-justified 8-byte values. in LowerCall()
1992 Regs->getStackPointerRegister(), PtrVT); in LowerCall()
1993 unsigned Offset = Regs->getStackPointerBias() + Regs->getCallFrameSize() + in LowerCall()
2005 // they are vararg (non-fixed arguments), if a long double or vector in LowerCall()
2031 ->getAddressOfCalleeRegister(); in LowerCall()
2037 static_cast<SystemZXPLINK64Registers *>(Regs)->getADARegister(), ADA)); in LowerCall()
2040 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT); in LowerCall()
2043 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT); in LowerCall()
2052 // Build a sequence of copy-to-reg nodes, chained and glued together. in LowerCall()
2070 // Add a register mask operand representing the call-preserved registers. in LowerCall()
2072 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); in LowerCall()
2408 Ops.reserve(NumOps - 1); in emitIntrinsicWithCCAndChain()
2413 assert(Op->getNumValues() == 2 && "Expected only CC result and chain"); in emitIntrinsicWithCCAndChain()
2428 Ops.reserve(NumOps - 1); in emitIntrinsicWithCC()
2432 SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), Op->getVTList(), Ops); in emitIntrinsicWithCC()
2437 // floating-point comparison. Return the condition code mask for
2439 // unsigned comparisons and clear for signed ones. In the floating-point
2471 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64) in adjustZeroCmp()
2474 int64_t Value = ConstOp1->getSExtValue(); in adjustZeroCmp()
2475 if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) || in adjustZeroCmp()
2476 (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) || in adjustZeroCmp()
2488 // For us to make any changes, it must a comparison between a single-use in adjustSubwordCmp()
2495 // We must have an 8- or 16-bit load. in adjustSubwordCmp()
2497 unsigned NumBits = Load->getMemoryVT().getSizeInBits(); in adjustSubwordCmp()
2499 NumBits != Load->getMemoryVT().getStoreSizeInBits()) in adjustSubwordCmp()
2505 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64) in adjustSubwordCmp()
2507 uint64_t Value = ConstOp1->getZExtValue(); in adjustSubwordCmp()
2508 uint64_t Mask = (1 << NumBits) - 1; in adjustSubwordCmp()
2509 if (Load->getExtensionType() == ISD::SEXTLOAD) { in adjustSubwordCmp()
2511 int64_t SignedValue = ConstOp1->getSExtValue(); in adjustSubwordCmp()
2512 if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask) in adjustSubwordCmp()
2515 // Unsigned comparison between two sign-extended values is equivalent in adjustSubwordCmp()
2516 // to unsigned comparison between two zero-extended values. in adjustSubwordCmp()
2522 // Test whether the high bit of the byte is set. in adjustSubwordCmp()
2525 // Test whether the high bit of the byte is clear. in adjustSubwordCmp()
2532 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) { in adjustSubwordCmp()
2545 Load->getExtensionType() != ExtType) { in adjustSubwordCmp()
2546 C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(), in adjustSubwordCmp()
2547 Load->getBasePtr(), Load->getPointerInfo(), in adjustSubwordCmp()
2548 Load->getMemoryVT(), Load->getAlign(), in adjustSubwordCmp()
2549 Load->getMemOperand()->getFlags()); in adjustSubwordCmp()
2556 Value != ConstOp1->getZExtValue()) in adjustSubwordCmp()
2561 // for integer register-memory comparisons of type ICmpType.
2566 if (Load->getMemoryVT() == MVT::i8) in isNaturalMemoryOperand()
2569 switch (Load->getExtensionType()) { in isNaturalMemoryOperand()
2591 // Always keep a floating-point constant second, since comparisons with in shouldSwapCmpOperands()
2600 if (ConstOp1 && ConstOp1->getZExtValue() == 0) in shouldSwapCmpOperands()
2608 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't. in shouldSwapCmpOperands()
2615 // The unsigned memory-immediate instructions can handle 16-bit in shouldSwapCmpOperands()
2618 isUInt<16>(ConstOp1->getZExtValue())) in shouldSwapCmpOperands()
2620 // The signed memory-immediate instructions can handle 16-bit in shouldSwapCmpOperands()
2623 isInt<16>(ConstOp1->getSExtValue())) in shouldSwapCmpOperands()
2642 // Check whether C tests for equality between X and Y and whether X - Y
2643 // or Y - X is also computed. In that case it's better to compare the
2649 for (SDNode *N : C.Op0->uses()) { in adjustForSubtraction()
2650 if (N->getOpcode() == ISD::SUB && in adjustForSubtraction()
2651 ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) || in adjustForSubtraction()
2652 (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) { in adjustForSubtraction()
2655 SDNodeFlags Flags = N->getFlags(); in adjustForSubtraction()
2658 N->setFlags(Flags); in adjustForSubtraction()
2660 C.Op1 = DAG.getConstant(0, DL, N->getValueType(0)); in adjustForSubtraction()
2667 // Check whether C compares a floating-point value with zero and if that
2668 // floating-point value is also negated. In this case we can use the
2677 if (C1 && C1->isZero()) { in adjustForFNeg()
2678 for (SDNode *N : C.Op0->uses()) { in adjustForFNeg()
2679 if (N->getOpcode() == ISD::FNEG) { in adjustForFNeg()
2689 // also sign-extended. In that case it is better to test the result
2697 C.Op1.getOpcode() == ISD::Constant && C.Op1->getAsZExtVal() == 0) { in adjustForLTGFR()
2699 if (C1 && C1->getZExtValue() == 32) { in adjustForLTGFR()
2702 for (SDNode *N : ShlOp0->uses()) { in adjustForLTGFR()
2703 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG && in adjustForLTGFR()
2704 cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) { in adjustForLTGFR()
2721 cast<ConstantSDNode>(C.Op1)->getValueSizeInBits(0) <= 64 && in adjustICmpTruncate()
2722 C.Op1->getAsZExtVal() == 0) { in adjustICmpTruncate()
2724 if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <= in adjustICmpTruncate()
2726 unsigned Type = L->getExtensionType(); in adjustICmpTruncate()
2736 // Return true if shift operation N has an in-range constant shift value.
2743 uint64_t Amount = Shift->getZExtValue(); in isSimpleShift()
2768 uint64_t High = llvm::bit_floor(Mask); in getTestUnderMaskCond() local
2802 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) { in getTestUnderMaskCond()
2808 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) { in getTestUnderMaskCond()
2816 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) { in getTestUnderMaskCond()
2822 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) { in getTestUnderMaskCond()
2829 // If there are just two bits, we can do equality checks for Low and High in getTestUnderMaskCond()
2831 if (Mask == Low + High) { in getTestUnderMaskCond()
2836 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High) in getTestUnderMaskCond()
2838 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High) in getTestUnderMaskCond()
2857 if (Mask && Mask->getAPIntValue() == 0) { in adjustForTestUnderMask()
2875 uint64_t CmpVal = ConstOp1->getZExtValue(); in adjustForTestUnderMask()
2887 MaskVal = Mask->getZExtValue(); in adjustForTestUnderMask()
2889 // There is no instruction to compare with a 64-bit immediate in adjustForTestUnderMask()
2900 if (CmpVal == uint64_t(-1)) in adjustForTestUnderMask()
2907 MaskVal = -(CmpVal & -CmpVal); in adjustForTestUnderMask()
2949 if (Mask && Mask->getZExtValue() == MaskVal) in adjustForTestUnderMask()
2965 // (In-)Equality comparisons can be implemented via VCEQGS. in adjustICmp128()
3010 if (!Mask || Mask->getValueSizeInBits(0) > 64) in adjustForRedundantAnd()
3013 if ((~Known.Zero).getZExtValue() & ~Mask->getZExtValue()) in adjustForRedundantAnd()
3019 // Return a Comparison that tests the condition-code result of intrinsic
3022 // and CCValid is the set of possible condition-code results.
3031 C.CCMask = CC < 4 ? 1 << (3 - CC) : 0; in getIntrinsicCmp()
3034 C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1; in getIntrinsicCmp()
3038 C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1; in getIntrinsicCmp()
3041 C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0; in getIntrinsicCmp()
3045 C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1; in getIntrinsicCmp()
3048 C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0; in getIntrinsicCmp()
3064 CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) && in getCmp()
3067 CmpOp1->getAsZExtVal(), Cond); in getCmp()
3069 CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 && in getCmp()
3072 CmpOp1->getAsZExtVal(), Cond); in getCmp()
3131 return SDValue(Node, Node->getNumValues() - 1); in emitCmp()
3157 // Implement a 32-bit *MUL_LOHI operation by extending both operands to
3158 // 64 bits. Extend is the extension type to use. Store the high part
3172 // Lower a binary operation that produces two VT results, one in each
3173 // half of a GR128 pair. Op0 and Op1 are the VT operands to the operation,
3176 static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, in lowerGR128Binary() argument
3180 bool Is32Bit = is32Bit(VT); in lowerGR128Binary()
3181 Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result); in lowerGR128Binary()
3182 Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result); in lowerGR128Binary()
3199 // for regular floating-point comparisons, CmpMode::StrictFP for strict (quiet)
3200 // floating-point comparisons, and CmpMode::SignalingFP for strict signaling
3201 // floating-point comparisons.
3272 int Mask[] = { Start, -1, Start + 1, -1 }; in expandV4F32ToV2F64()
3282 // producing a result of type VT. If Chain is nonnull, return the strict form.
3284 const SDLoc &DL, EVT VT, in getVectorCmp() argument
3301 SDValue Res = DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes); in getVectorCmp()
3311 return DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes); in getVectorCmp()
3314 SDVTList VTs = DAG.getVTList(VT, MVT::Other); in getVectorCmp()
3317 return DAG.getNode(Opcode, DL, VT, CmpOp0, CmpOp1); in getVectorCmp()
3321 // an integer mask of type VT. If Chain is nonnull, we have a strict
3322 // floating-point comparison. If in addition IsSignaling is true, we have
3323 // a strict signaling floating-point comparison.
3325 const SDLoc &DL, EVT VT, in lowerVectorSETCC() argument
3346 DL, VT, CmpOp1, CmpOp0, Chain); in lowerVectorSETCC()
3348 DL, VT, CmpOp0, CmpOp1, Chain); in lowerVectorSETCC()
3349 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GE); in lowerVectorSETCC()
3363 DL, VT, CmpOp1, CmpOp0, Chain); in lowerVectorSETCC()
3365 DL, VT, CmpOp0, CmpOp1, Chain); in lowerVectorSETCC()
3366 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GT); in lowerVectorSETCC()
3378 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp0, CmpOp1, Chain); in lowerVectorSETCC()
3382 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp1, CmpOp0, Chain); in lowerVectorSETCC()
3392 DAG.getSplatBuildVector(VT, DL, DAG.getConstant(-1, DL, MVT::i64)); in lowerVectorSETCC()
3393 Cmp = DAG.getNode(ISD::XOR, DL, VT, Cmp, Mask); in lowerVectorSETCC()
3406 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); in lowerSETCC()
3408 EVT VT = Op.getValueType(); in lowerSETCC() local
3409 if (VT.isVector()) in lowerSETCC()
3410 return lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1); in lowerSETCC()
3423 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(3))->get(); in lowerSTRICT_FSETCC()
3425 EVT VT = Op.getNode()->getValueType(0); in lowerSTRICT_FSETCC() local
3426 if (VT.isVector()) { in lowerSTRICT_FSETCC()
3427 SDValue Res = lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1, in lowerSTRICT_FSETCC()
3434 CCReg->setFlags(Op->getFlags()); in lowerSTRICT_FSETCC()
3441 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); in lowerBR_CC()
3481 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); in lowerSELECT_CC()
3486 // Check for absolute and negative-absolute selections, including those in lowerSELECT_CC()
3487 // where the comparison value is sign-extended (for LPGFR and LNGFR). in lowerSELECT_CC()
3492 cast<ConstantSDNode>(C.Op1)->getValueSizeInBits(0) <= 64 && in lowerSELECT_CC()
3493 C.Op1->getAsZExtVal() == 0) { in lowerSELECT_CC()
3511 const GlobalValue *GV = Node->getGlobal(); in lowerGlobalAddress()
3512 int64_t Offset = Node->getOffset(); in lowerGlobalAddress()
3526 Offset -= Anchor; in lowerGlobalAddress()
3549 // If there was a non-zero offset that we didn't fold, create an explicit in lowerGlobalAddress()
3581 Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL, in lowerTLSGetOffset()
3582 Node->getValueType(0), in lowerTLSGetOffset()
3590 // Add a register mask operand representing the call-preserved registers. in lowerTLSGetOffset()
3593 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C); in lowerTLSGetOffset()
3614 // The high part of the thread pointer is in access register 0. in lowerThreadPointer()
3622 // Merge them into a single 64-bit address. in lowerThreadPointer()
3633 const GlobalValue *GV = Node->getGlobal(); in lowerGlobalTLSAddress()
3647 // Load the GOT offset of the tls_index (module ID / per-symbol offset). in lowerGlobalTLSAddress()
3675 // of the module base offset. Count total number of local-dynamic in lowerGlobalTLSAddress()
3679 MFI->incNumLocalDynamicTLSAccesses(); in lowerGlobalTLSAddress()
3681 // Add the per-symbol offset. in lowerGlobalTLSAddress()
3724 const BlockAddress *BA = Node->getBlockAddress(); in lowerBlockAddress()
3725 int64_t Offset = Node->getOffset(); in lowerBlockAddress()
3737 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); in lowerJumpTable()
3749 if (CP->isMachineConstantPoolEntry()) in lowerConstantPool()
3751 DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, CP->getAlign()); in lowerConstantPool()
3753 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign(), in lowerConstantPool()
3754 CP->getOffset()); in lowerConstantPool()
3775 int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF); in lowerFRAMEADDR()
3783 SDValue Offset = DAG.getConstant(TFL->getBackchainOffset(MF), DL, PtrVT); in lowerFRAMEADDR()
3784 while (Depth--) { in lowerFRAMEADDR()
3814 int Offset = TFL->getReturnAddressOffset(MF); in lowerRETURNADDR()
3822 // implicit live-in. in lowerRETURNADDR()
3824 Register LinkReg = MF.addLiveIn(CCR->getReturnFunctionAddressRegister(), in lowerRETURNADDR()
3841 SDValue NewLoad = DAG.getLoad(ResVT, DL, LoadN->getChain(), in lowerBITCAST()
3842 LoadN->getBasePtr(), LoadN->getMemOperand()); in lowerBITCAST()
3899 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); in lowerVASTART_XPLINK()
3900 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); in lowerVASTART_XPLINK()
3914 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); in lowerVASTART_ELF()
3920 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), DL, PtrVT), in lowerVASTART_ELF()
3921 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), DL, PtrVT), in lowerVASTART_ELF()
3922 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT), in lowerVASTART_ELF()
3923 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT) in lowerVASTART_ELF()
3946 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); in lowerVACOPY()
3947 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); in lowerVACOPY()
3972 bool RealignOpt = !MF.getFunction().hasFnAttribute("no-realign-stack"); in lowerDYNAMIC_STACKALLOC_XPLINK()
3980 uint64_t AlignVal = (RealignOpt ? Align->getAsZExtVal() : 0); in lowerDYNAMIC_STACKALLOC_XPLINK()
3982 uint64_t StackAlign = TFI->getStackAlignment(); in lowerDYNAMIC_STACKALLOC_XPLINK()
3984 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign; in lowerDYNAMIC_STACKALLOC_XPLINK()
3997 EVT VT = Op.getValueType(); in lowerDYNAMIC_STACKALLOC_XPLINK() local
3999 makeExternalCall(Chain, DAG, "@@ALCAXP", VT, ArrayRef(NeededSpace), in lowerDYNAMIC_STACKALLOC_XPLINK()
4023 DAG.getConstant(~(RequiredAlign - 1), DL, PtrVT)); in lowerDYNAMIC_STACKALLOC_XPLINK()
4035 bool RealignOpt = !MF.getFunction().hasFnAttribute("no-realign-stack"); in lowerDYNAMIC_STACKALLOC_ELF()
4045 uint64_t AlignVal = (RealignOpt ? Align->getAsZExtVal() : 0); in lowerDYNAMIC_STACKALLOC_ELF()
4047 uint64_t StackAlign = TFI->getStackAlignment(); in lowerDYNAMIC_STACKALLOC_ELF()
4049 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign; in lowerDYNAMIC_STACKALLOC_ELF()
4094 DAG.getConstant(~(RequiredAlign - 1), DL, MVT::i64)); in lowerDYNAMIC_STACKALLOC_ELF()
4114 EVT VT = Op.getValueType(); in lowerSMUL_LOHI() local
4117 if (is32Bit(VT)) in lowerSMUL_LOHI()
4118 // Just do a normal 64-bit multiplication and extract the results. in lowerSMUL_LOHI()
4124 // the high result in the even register. ISD::SMUL_LOHI is defined to in lowerSMUL_LOHI()
4126 lowerGR128Binary(DAG, DL, VT, SystemZISD::SMUL_LOHI, in lowerSMUL_LOHI()
4129 // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI: in lowerSMUL_LOHI()
4136 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64) in lowerSMUL_LOHI()
4141 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64) in lowerSMUL_LOHI()
4145 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63); in lowerSMUL_LOHI()
4146 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63); in lowerSMUL_LOHI()
4148 // the high result in the even register. ISD::SMUL_LOHI is defined to in lowerSMUL_LOHI()
4150 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI, in lowerSMUL_LOHI()
4152 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH); in lowerSMUL_LOHI()
4153 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL); in lowerSMUL_LOHI()
4154 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL); in lowerSMUL_LOHI()
4155 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum); in lowerSMUL_LOHI()
4162 EVT VT = Op.getValueType(); in lowerUMUL_LOHI() local
4165 if (is32Bit(VT)) in lowerUMUL_LOHI()
4166 // Just do a normal 64-bit multiplication and extract the results. in lowerUMUL_LOHI()
4172 // the high result in the even register. ISD::UMUL_LOHI is defined to in lowerUMUL_LOHI()
4174 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI, in lowerUMUL_LOHI()
4183 EVT VT = Op.getValueType(); in lowerSDIVREM() local
4186 // We use DSGF for 32-bit division. This means the first operand must in lowerSDIVREM()
4187 // always be 64-bit, and the second operand should be 32-bit whenever in lowerSDIVREM()
4189 if (is32Bit(VT)) in lowerSDIVREM()
4197 lowerGR128Binary(DAG, DL, VT, SystemZISD::SDIVREM, Op0, Op1, Ops[1], Ops[0]); in lowerSDIVREM()
4203 EVT VT = Op.getValueType(); in lowerUDIVREM() local
4209 lowerGR128Binary(DAG, DL, VT, SystemZISD::UDIVREM, in lowerUDIVREM()
4215 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation"); in lowerOR()
4217 // Get the known-zero masks for each operand. in lowerOR()
4223 // other are known zero. They are the low and high operands respectively. in lowerOR()
4226 unsigned High, Low; in lowerOR() local
4228 High = 1, Low = 0; in lowerOR()
4230 High = 0, Low = 1; in lowerOR()
4235 SDValue HighOp = Ops[High]; in lowerOR()
4237 // If the high part is a constant, we're better off using IILH. in lowerOR()
4244 int64_t Value = int32_t(LowOp->getAsZExtVal()); in lowerOR()
4249 // Check whether the high part is an AND that doesn't change the in lowerOR()
4250 // high 32 bits and just masks out low bits. We can skip it if so. in lowerOR()
4273 SDValue LHS = N->getOperand(0); in lowerXALUO()
4274 SDValue RHS = N->getOperand(1); in lowerXALUO()
4277 if (N->getValueType(0) == MVT::i128) { in lowerXALUO()
4297 Flag = DAG.getZExtOrTrunc(Flag, DL, N->getValueType(1)); in lowerXALUO()
4301 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Flag); in lowerXALUO()
4332 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32); in lowerXALUO()
4336 if (N->getValueType(1) == MVT::i1) in lowerXALUO()
4339 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC); in lowerXALUO()
4359 MVT VT = N->getSimpleValueType(0); in lowerUADDSUBO_CARRY() local
4362 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) in lowerUADDSUBO_CARRY()
4365 SDValue LHS = N->getOperand(0); in lowerUADDSUBO_CARRY()
4366 SDValue RHS = N->getOperand(1); in lowerUADDSUBO_CARRY()
4370 if (VT == MVT::i128) { in lowerUADDSUBO_CARRY()
4394 Flag = DAG.getZExtOrTrunc(Flag, DL, N->getValueType(1)); in lowerUADDSUBO_CARRY()
4398 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Flag); in lowerUADDSUBO_CARRY()
4430 SDVTList VTs = DAG.getVTList(VT, MVT::i32); in lowerUADDSUBO_CARRY()
4434 if (N->getValueType(1) == MVT::i1) in lowerUADDSUBO_CARRY()
4437 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC); in lowerUADDSUBO_CARRY()
4442 EVT VT = Op.getValueType(); in lowerCTPOP() local
4446 if (VT.getScalarSizeInBits() == 128) { in lowerCTPOP()
4451 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp); in lowerCTPOP()
4456 if (VT.isVector()) { in lowerCTPOP()
4459 switch (VT.getScalarSizeInBits()) { in lowerCTPOP()
4463 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op); in lowerCTPOP()
4465 SDValue Tmp = DAG.getNode(SystemZISD::VSHL_BY_SCALAR, DL, VT, Op, Shift); in lowerCTPOP()
4466 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp); in lowerCTPOP()
4467 Op = DAG.getNode(SystemZISD::VSRL_BY_SCALAR, DL, VT, Op, Shift); in lowerCTPOP()
4473 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp); in lowerCTPOP()
4480 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp); in lowerCTPOP()
4489 // Get the known-zero mask for the operand. in lowerCTPOP()
4493 return DAG.getConstant(0, DL, VT); in lowerCTPOP()
4495 // Skip known-zero high parts of the operand. in lowerCTPOP()
4496 int64_t OrigBitSize = VT.getSizeInBits(); in lowerCTPOP()
4503 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op); in lowerCTPOP()
4505 // Add up per-byte counts in a binary tree. All bits of Op at in lowerCTPOP()
4508 SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, DL, VT)); in lowerCTPOP()
4510 Tmp = DAG.getNode(ISD::AND, DL, VT, Tmp, in lowerCTPOP()
4511 DAG.getConstant(((uint64_t)1 << BitSize) - 1, DL, VT)); in lowerCTPOP()
4512 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp); in lowerCTPOP()
4515 // Extract overall result from high byte. in lowerCTPOP()
4517 Op = DAG.getNode(ISD::SRL, DL, VT, Op, in lowerCTPOP()
4518 DAG.getConstant(BitSize - 8, DL, VT)); in lowerCTPOP()
4531 // The only fence that needs an instruction is a sequentially-consistent in lowerATOMIC_FENCE()
4532 // cross-thread fence. in lowerATOMIC_FENCE()
4540 // MEMBARRIER is a compiler barrier; it codegens to a no-op. in lowerATOMIC_FENCE()
4548 (Node->getMemoryVT() == MVT::i128 || Node->getMemoryVT() == MVT::f128) && in lowerATOMIC_LDST_I128()
4550 // Use same code to handle both legal and non-legal i128 types. in lowerATOMIC_LDST_I128()
4566 DAG.getConstant(-4, DL, PtrVT)); in getCSAddressAndShifts()
4581 // Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first
4588 // 32-bit operations need no special handling. in lowerATOMIC_LOAD_OP()
4589 EVT NarrowVT = Node->getMemoryVT(); in lowerATOMIC_LOAD_OP()
4595 SDValue ChainIn = Node->getChain(); in lowerATOMIC_LOAD_OP()
4596 SDValue Addr = Node->getBasePtr(); in lowerATOMIC_LOAD_OP()
4597 SDValue Src2 = Node->getVal(); in lowerATOMIC_LOAD_OP()
4598 MachineMemOperand *MMO = Node->getMemOperand(); in lowerATOMIC_LOAD_OP()
4605 Src2 = DAG.getConstant(-Const->getSExtValue(), DL, Src2.getValueType()); in lowerATOMIC_LOAD_OP()
4618 DAG.getConstant(32 - BitSize, DL, WideVT)); in lowerATOMIC_LOAD_OP()
4622 DAG.getConstant(uint32_t(-1) >> BitSize, DL, WideVT)); in lowerATOMIC_LOAD_OP()
4641 // Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations into
4642 // ATOMIC_LOADW_SUBs and convert 32- and 64-bit operations into additions.
4646 EVT MemVT = Node->getMemoryVT(); in lowerATOMIC_LOAD_SUB()
4648 // A full-width operation: negate and use LAA(G). in lowerATOMIC_LOAD_SUB()
4652 SDValue Src2 = Node->getVal(); in lowerATOMIC_LOAD_SUB()
4657 Node->getChain(), Node->getBasePtr(), NegSrc2, in lowerATOMIC_LOAD_SUB()
4658 Node->getMemOperand()); in lowerATOMIC_LOAD_SUB()
4664 // Lower 8/16/32/64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS node.
4668 SDValue ChainIn = Node->getOperand(0); in lowerATOMIC_CMP_SWAP()
4669 SDValue Addr = Node->getOperand(1); in lowerATOMIC_CMP_SWAP()
4670 SDValue CmpVal = Node->getOperand(2); in lowerATOMIC_CMP_SWAP()
4671 SDValue SwapVal = Node->getOperand(3); in lowerATOMIC_CMP_SWAP()
4672 MachineMemOperand *MMO = Node->getMemOperand(); in lowerATOMIC_CMP_SWAP()
4675 if (Node->getMemoryVT() == MVT::i128) { in lowerATOMIC_CMP_SWAP()
4676 // Use same code to handle both legal and non-legal i128 types. in lowerATOMIC_CMP_SWAP()
4682 // We have native support for 32-bit and 64-bit compare and swap, but we in lowerATOMIC_CMP_SWAP()
4684 EVT NarrowVT = Node->getMemoryVT(); in lowerATOMIC_CMP_SWAP()
4700 // Convert 8-bit and 16-bit compare and swap to a loop, implemented in lowerATOMIC_CMP_SWAP()
4732 if (SI->isAtomic()) in getTargetMMOFlags()
4735 if (LI->isAtomic()) in getTargetMMOFlags()
4738 if (AI->isAtomic()) in getTargetMMOFlags()
4741 if (AI->isAtomic()) in getTargetMMOFlags()
4751 report_fatal_error("Variable-sized stack allocations are not supported " in lowerSTACKSAVE()
4754 Regs->getStackPointerRegister(), Op.getValueType()); in lowerSTACKSAVE()
4764 report_fatal_error("Variable-sized stack allocations are not supported " in lowerSTACKRESTORE()
4774 Chain, DL, Regs->getStackPointerRegister(), MVT::i64); in lowerSTACKRESTORE()
4779 Chain = DAG.getCopyToReg(Chain, DL, Regs->getStackPointerRegister(), NewSP); in lowerSTACKRESTORE()
4802 Node->getVTList(), Ops, in lowerPREFETCH()
4803 Node->getMemoryVT(), Node->getMemOperand()); in lowerPREFETCH()
4819 assert(Op->getNumValues() == 2 && "Expected only CC result and chain"); in lowerINTRINSIC_W_CHAIN()
4835 if (Op->getNumValues() == 1) in lowerINTRINSIC_WO_CHAIN()
4837 assert(Op->getNumValues() == 2 && "Expected a CC and non-CC result"); in lowerINTRINSIC_WO_CHAIN()
4838 return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), Op->getVTList(), in lowerINTRINSIC_WO_CHAIN()
4972 // VPDI V1, V2, 4 (low half of V1, high half of V2)
4975 // VPDI V1, V2, 1 (high half of V1, low half of V2)
4983 // operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything.
4984 // OpNos[1] is the same for operand 1 of the pattern. Resolve these -1s and
5001 // Bytes is a VPERM-like permute vector, except that -1 is used for
5012 int OpNos[] = { -1, -1 }; in matchPermute()
5017 // byte number. Only the operand numbers (the high bits) are in matchPermute()
5019 if ((Elt ^ P.Bytes[I]) & (SystemZ::VectorBytes - 1)) in matchPermute()
5025 if (OpNos[ModelOpNo] == 1 - RealOpNo) in matchPermute()
5042 // Bytes is a VPERM-like permute vector, except that -1 is used for
5044 // See whether redistributing the -1 bytes gives a shuffle that can be
5045 // implemented using P. If so, set Transform to a VPERM-like permute vector
5055 Transform[From] = -1; in matchDoublePermute()
5077 // Convert the mask of the given shuffle op into a byte-level mask,
5081 EVT VT = ShuffleOp.getValueType(); in getVPermMask() local
5082 unsigned NumElements = VT.getVectorNumElements(); in getVPermMask()
5083 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); in getVPermMask()
5086 Bytes.resize(NumElements * BytesPerElement, -1); in getVPermMask()
5088 int Index = VSN->getMaskElt(I); in getVPermMask()
5098 Bytes.resize(NumElements * BytesPerElement, -1); in getVPermMask()
5107 // Bytes is a VPERM-like permute vector, except that -1 is used for
5113 Base = -1; in getShuffleInput()
5118 Base = Elem - I; in getShuffleInput()
5122 } else if (unsigned(Base) != Elem - I) in getShuffleInput()
5129 // Bytes is a VPERM-like permute vector, except that -1 is used for
5137 int OpNos[] = { -1, -1 }; in isShlDoublePermute()
5138 int Shift = -1; in isShlDoublePermute()
5142 int ExpectedShift = (Index - I) % SystemZ::VectorBytes; in isShlDoublePermute()
5151 if (OpNos[ModelOpNo] == 1 - RealOpNo) in isShlDoublePermute()
5189 if (N->getOpcode() == ISD::BITCAST) in isZeroVector()
5190 N = N->getOperand(0); in isZeroVector()
5191 if (N->getOpcode() == ISD::SPLAT_VECTOR) in isZeroVector()
5192 if (auto *Op = dyn_cast<ConstantSDNode>(N->getOperand(0))) in isZeroVector()
5193 return Op->getZExtValue() == 0; in isZeroVector()
5205 // Bytes is a VPERM-like permute vector, except that -1 is used for
5226 int ZeroIdx = -1; in getGeneralPermuteNode()
5242 if (ZeroIdx != -1) { in getGeneralPermuteNode()
5280 // Describes a general N-operand vector shuffle.
5282 GeneralShuffle(EVT vt) : VT(vt), UnpackFromEltSize(UINT_MAX) {} in GeneralShuffle()
5293 // Index I is -1 if byte I of the result is undefined. Otherwise the
5299 EVT VT; member
5308 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); in addUndef()
5310 Bytes.push_back(-1); in addUndef()
5320 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize(); in add()
5325 EVT FromVT = Op.getNode() ? Op.getValueType() : VT; in add()
5334 (FromBytesPerElement - BytesPerElement)); in add()
5383 return DAG.getUNDEF(VT); in getNode()
5393 // Try to redistribute the undefined elements of non-root nodes so that in getNode()
5394 // the non-root shuffles match something like a pack or merge, then adjust in getNode()
5403 for (unsigned I = 0; I < Ops.size() - Stride; I += Stride * 2) { in getNode()
5416 NewBytes[J] = -1; in getNode()
5446 Bytes[I] -= (Stride - 1) * SystemZ::VectorBytes; in getNode()
5462 return DAG.getNode(ISD::BITCAST, DL, VT, Op); in getNode()
5485 Log2_32_Ceil(Ops.size()) == Log2_32_Ceil(Ops.size() - 1)) in tryPrepareForUnpack()
5498 if (Bytes[Elt] != -1) { in tryPrepareForUnpack()
5510 if (SrcBytes[i] != -1 && SrcBytes[i] % 16 != int(i)) { in tryPrepareForUnpack()
5534 Bytes[B++] = -1; in tryPrepareForUnpack()
5542 Bytes[I] -= SystemZ::VectorBytes; in tryPrepareForUnpack()
5564 // Return true if the given BUILD_VECTOR is a scalar-to-vector conversion.
5572 // Return a vector of type VT that contains Value in the first element.
5574 static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, in buildScalarToVector() argument
5580 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Value); in buildScalarToVector()
5581 return DAG.getBuildVector(VT, DL, Ops); in buildScalarToVector()
5584 return DAG.getUNDEF(VT); in buildScalarToVector()
5585 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value); in buildScalarToVector()
5588 // Return a vector of type VT in which Op0 is in element 0 and Op1 is in
5590 static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, in buildMergeScalars() argument
5594 return DAG.getUNDEF(VT); in buildMergeScalars()
5595 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op1); in buildMergeScalars()
5598 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0); in buildMergeScalars()
5599 return DAG.getNode(SystemZISD::MERGE_HIGH, DL, VT, in buildMergeScalars()
5600 buildScalarToVector(DAG, DL, VT, Op0), in buildMergeScalars()
5601 buildScalarToVector(DAG, DL, VT, Op1)); in buildMergeScalars()
5625 // the non-EXTRACT_VECTOR_ELT elements. See if the given BUILD_VECTOR
5629 EVT VT = BVN->getValueType(0); in tryBuildVectorShuffle() local
5630 unsigned NumElements = VT.getVectorNumElements(); in tryBuildVectorShuffle()
5632 // Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation in tryBuildVectorShuffle()
5633 // on byte vectors. If there are non-EXTRACT_VECTOR_ELT elements that still in tryBuildVectorShuffle()
5636 GeneralShuffle GS(VT); in tryBuildVectorShuffle()
5640 SDValue Op = BVN->getOperand(I); in tryBuildVectorShuffle()
5654 ResidueOps.push_back(BVN->getOperand(I)); in tryBuildVectorShuffle()
5668 Op = DAG.getBuildVector(VT, SDLoc(BVN), ResidueOps); in tryBuildVectorShuffle()
5677 if (Op.getOpcode() == ISD::LOAD && cast<LoadSDNode>(Op)->isUnindexed()) in isVectorElementLoad()
5680 if (AL->getOpcode() == ISD::ATOMIC_LOAD) in isVectorElementLoad()
5687 // Combine GPR scalar values Elems into a vector of type VT.
5689 SystemZTargetLowering::buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, in buildVector() argument
5708 // - if the only defined element is a loaded one, the best sequence in buildVector()
5711 // - otherwise, if the only defined element is an i64 value, we will in buildVector()
5712 // end up with the same VLVGP sequence regardless of whether we short-cut in buildVector()
5715 // - otherwise, if the only defined element is an i32 or smaller value, in buildVector()
5720 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Single); in buildVector()
5731 if (VT == MVT::v2i64 && !AllLoads) in buildVector()
5734 // Use a 64-bit merge high to combine two doubles. in buildVector()
5735 if (VT == MVT::v2f64 && !AllLoads) in buildVector()
5736 return buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]); in buildVector()
5745 if (VT == MVT::v4f32 && !AllLoads) { in buildVector()
5746 SDValue Op01 = buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]); in buildVector()
5747 SDValue Op23 = buildMergeScalars(DAG, DL, VT, Elems[2], Elems[3]); in buildVector()
5753 // Merging identical replications is a no-op. in buildVector()
5760 return DAG.getNode(ISD::BITCAST, DL, VT, Op); in buildVector()
5786 Result = DAG.getBuildVector(VT, DL, Constants); in buildVector()
5805 Result = DAG.getNode(SystemZISD::REPLICATE, DL, VT, ReplicatedVal); in buildVector()
5808 unsigned I1 = NumElements / 2 - 1; in buildVector()
5809 unsigned I2 = NumElements - 1; in buildVector()
5815 Result = DAG.getNode(ISD::BITCAST, DL, VT, in buildVector()
5820 Result = DAG.getUNDEF(VT); in buildVector()
5827 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Result, Elems[I], in buildVector()
5836 EVT VT = Op.getValueType(); in lowerBUILD_VECTOR() local
5838 if (BVN->isConstant()) { in lowerBUILD_VECTOR()
5851 if (isOperationLegal(ISD::SCALAR_TO_VECTOR, VT) && isScalarToVector(Op)) in lowerBUILD_VECTOR()
5852 return buildScalarToVector(DAG, DL, VT, Op.getOperand(0)); in lowerBUILD_VECTOR()
5859 return buildVector(DAG, DL, VT, Ops); in lowerBUILD_VECTOR()
5866 EVT VT = Op.getValueType(); in lowerVECTOR_SHUFFLE() local
5867 unsigned NumElements = VT.getVectorNumElements(); in lowerVECTOR_SHUFFLE()
5869 if (VSN->isSplat()) { in lowerVECTOR_SHUFFLE()
5871 unsigned Index = VSN->getSplatIndex(); in lowerVECTOR_SHUFFLE()
5872 assert(Index < VT.getVectorNumElements() && in lowerVECTOR_SHUFFLE()
5877 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0.getOperand(Index)); in lowerVECTOR_SHUFFLE()
5878 // Otherwise keep it as a vector-to-vector operation. in lowerVECTOR_SHUFFLE()
5879 return DAG.getNode(SystemZISD::SPLAT, DL, VT, Op.getOperand(0), in lowerVECTOR_SHUFFLE()
5883 GeneralShuffle GS(VT); in lowerVECTOR_SHUFFLE()
5885 int Elt = VSN->getMaskElt(I); in lowerVECTOR_SHUFFLE()
5906 // Handle insertions of floating-point values. in lowerINSERT_VECTOR_ELT()
5911 EVT VT = Op.getValueType(); in lowerINSERT_VECTOR_ELT() local
5916 if (VT == MVT::v2f64 && in lowerINSERT_VECTOR_ELT()
5920 uint64_t Index = Op2->getAsZExtVal(); in lowerINSERT_VECTOR_ELT()
5921 unsigned Mask = VT.getVectorNumElements() - 1; in lowerINSERT_VECTOR_ELT()
5927 MVT IntVT = MVT::getIntegerVT(VT.getScalarSizeInBits()); in lowerINSERT_VECTOR_ELT()
5928 MVT IntVecVT = MVT::getVectorVT(IntVT, VT.getVectorNumElements()); in lowerINSERT_VECTOR_ELT()
5932 return DAG.getNode(ISD::BITCAST, DL, VT, Res); in lowerINSERT_VECTOR_ELT()
5938 // Handle extractions of floating-point values. in lowerEXTRACT_VECTOR_ELT()
5942 EVT VT = Op.getValueType(); in lowerEXTRACT_VECTOR_ELT() local
5947 uint64_t Index = CIndexN->getZExtValue(); in lowerEXTRACT_VECTOR_ELT()
5948 unsigned Mask = VecVT.getVectorNumElements() - 1; in lowerEXTRACT_VECTOR_ELT()
5954 MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits()); in lowerEXTRACT_VECTOR_ELT()
5958 return DAG.getNode(ISD::BITCAST, DL, VT, Res); in lowerEXTRACT_VECTOR_ELT()
5996 unsigned End = MaskElt + NumInPerOut - 1; in lowerZERO_EXTEND_VECTOR_INREG()
6011 EVT VT = Op.getValueType(); in lowerShift() local
6012 unsigned ElemBitSize = VT.getScalarSizeInBits(); in lowerShift()
6021 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, in lowerShift()
6026 return DAG.getNode(ByScalar, DL, VT, Op0, Shift); in lowerShift()
6030 SDValue Splat = BVN->getSplatValue(&UndefElements); in lowerShift()
6032 // Since i32 is the smallest legal type, we either need a no-op in lowerShift()
6035 return DAG.getNode(ByScalar, DL, VT, Op0, Shift); in lowerShift()
6042 if (VSN->isSplat()) { in lowerShift()
6043 SDValue VSNOp0 = VSN->getOperand(0); in lowerShift()
6044 unsigned Index = VSN->getSplatIndex(); in lowerShift()
6045 assert(Index < VT.getVectorNumElements() && in lowerShift()
6049 // Since i32 is the smallest legal type, we either need a no-op in lowerShift()
6053 return DAG.getNode(ByScalar, DL, VT, Op0, Shift); in lowerShift()
6103 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); in lowerREADCYCLECOUNTER()
6293 // only for 128-bit integer types).
6298 switch (N->getOpcode()) { in LowerOperationWrapper()
6302 SDValue Ops[] = { N->getOperand(0), N->getOperand(1) }; in LowerOperationWrapper()
6303 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); in LowerOperationWrapper()
6308 if (N->getValueType(0) == MVT::f128) in LowerOperationWrapper()
6317 SDValue Val = N->getOperand(1); in LowerOperationWrapper()
6322 SDValue Ops[] = {N->getOperand(0), Val, N->getOperand(2)}; in LowerOperationWrapper()
6323 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); in LowerOperationWrapper()
6328 if (cast<AtomicSDNode>(N)->getSuccessOrdering() == in LowerOperationWrapper()
6338 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), in LowerOperationWrapper()
6339 lowerI128ToGR128(DAG, N->getOperand(2)), in LowerOperationWrapper()
6340 lowerI128ToGR128(DAG, N->getOperand(3)) }; in LowerOperationWrapper()
6341 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); in LowerOperationWrapper()
6346 Success = DAG.getZExtOrTrunc(Success, DL, N->getValueType(1)); in LowerOperationWrapper()
6353 SDValue Src = N->getOperand(0); in LowerOperationWrapper()
6354 if (N->getValueType(0) == MVT::i128 && Src.getValueType() == MVT::f128 && in LowerOperationWrapper()
6512 // Return true if VT is a vector whose elements are a whole number of bytes
6514 bool SystemZTargetLowering::canTreatAsByteVector(EVT VT) const { in canTreatAsByteVector()
6518 return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0 && VT.isSimple(); in canTreatAsByteVector()
6543 // Get a VPERM-like permute mask and see whether the bytes covered in combineExtract()
6572 // Make sure that the least-significant bit of the extracted value in combineExtract()
6578 Op = Op.getOperand(End / OpBytesPerElement - 1); in combineExtract()
6580 EVT VT = MVT::getIntegerVT(Op.getValueSizeInBits()); in combineExtract() local
6581 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op); in combineExtract()
6584 EVT VT = MVT::getIntegerVT(ResVT.getSizeInBits()); in combineExtract() local
6585 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op); in combineExtract()
6586 if (VT != ResVT) { in combineExtract()
6603 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement; in combineExtract()
6610 Byte += SubByte - MinSubByte; in combineExtract()
6647 // splitting the original elements into Scale equal-sized pieces in combineTruncateExtract()
6648 // and for truncation purposes want the last (least-significant) in combineTruncateExtract()
6652 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1; in combineTruncateExtract()
6672 SDValue N0 = N->getOperand(0); in combineZERO_EXTEND()
6673 EVT VT = N->getValueType(0); in combineZERO_EXTEND() local
6679 SDValue Ops[] = { DAG.getConstant(TrueOp->getZExtValue(), DL, VT), in combineZERO_EXTEND()
6680 DAG.getConstant(FalseOp->getZExtValue(), DL, VT), in combineZERO_EXTEND()
6682 SDValue NewSelect = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VT, Ops); in combineZERO_EXTEND()
6700 if (VT.isScalarInteger() && VT.getSizeInBits() < X.getValueSizeInBits()) { in combineZERO_EXTEND()
6704 VT.getSizeInBits()); in combineZERO_EXTEND()
6706 X = DAG.getNode(ISD::TRUNCATE, SDLoc(X), VT, X); in combineZERO_EXTEND()
6707 APInt Mask = N0.getConstantOperandAPInt(1).zext(VT.getSizeInBits()); in combineZERO_EXTEND()
6708 return DAG.getNode(ISD::XOR, SDLoc(N0), VT, in combineZERO_EXTEND()
6709 X, DAG.getConstant(Mask, SDLoc(N0), VT)); in combineZERO_EXTEND()
6721 // into (select_cc LHS, RHS, -1, 0, COND) in combineSIGN_EXTEND_INREG()
6723 SDValue N0 = N->getOperand(0); in combineSIGN_EXTEND_INREG()
6724 EVT VT = N->getValueType(0); in combineSIGN_EXTEND_INREG() local
6725 EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); in combineSIGN_EXTEND_INREG()
6731 DAG.getAllOnesConstant(DL, VT), in combineSIGN_EXTEND_INREG()
6732 DAG.getConstant(0, DL, VT), N0.getOperand(2) }; in combineSIGN_EXTEND_INREG()
6733 return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops); in combineSIGN_EXTEND_INREG()
6744 SDValue N0 = N->getOperand(0); in combineSIGN_EXTEND()
6745 EVT VT = N->getValueType(0); in combineSIGN_EXTEND() local
6751 unsigned Extra = (VT.getSizeInBits() - N0.getValueSizeInBits()); in combineSIGN_EXTEND()
6752 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra; in combineSIGN_EXTEND()
6753 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra; in combineSIGN_EXTEND()
6755 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT, in combineSIGN_EXTEND()
6757 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext, in combineSIGN_EXTEND()
6760 return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, in combineSIGN_EXTEND()
6772 unsigned Opcode = N->getOpcode(); in combineMERGE()
6773 SDValue Op0 = N->getOperand(0); in combineMERGE()
6774 SDValue Op1 = N->getOperand(1); in combineMERGE()
6778 // (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF in combineMERGE()
6780 if (Op1 == N->getOperand(0)) in combineMERGE()
6782 // (z_merge_? 0, X) -> (z_unpackl_? 0, X). in combineMERGE()
6783 EVT VT = Op1.getValueType(); in combineMERGE() local
6784 unsigned ElemBytes = VT.getVectorElementType().getStoreSize(); in combineMERGE()
6788 EVT InVT = VT.changeVectorElementTypeToInteger(); in combineMERGE()
6791 if (VT != InVT) { in combineMERGE()
6797 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op); in combineMERGE()
6808 for (SDNode::use_iterator UI = LD->use_begin(), UIEnd = LD->use_end(); in isI128MovedToParts()
6814 // Verify every user is a TRUNCATE to i64 of the low or high half. in isI128MovedToParts()
6817 if (User->getOpcode() == ISD::SRL && in isI128MovedToParts()
6818 User->getOperand(1).getOpcode() == ISD::Constant && in isI128MovedToParts()
6819 User->getConstantOperandVal(1) == 64 && User->hasOneUse()) { in isI128MovedToParts()
6820 User = *User->use_begin(); in isI128MovedToParts()
6823 if (User->getOpcode() != ISD::TRUNCATE || User->getValueType(0) != MVT::i64) in isI128MovedToParts()
6844 for (SDNode::use_iterator UI = LD->use_begin(), UIEnd = LD->use_end(); in isF128MovedToParts()
6850 // Verify every user is an EXTRACT_SUBREG of the low or high half. in isF128MovedToParts()
6852 if (!User->hasOneUse() || !User->isMachineOpcode() || in isF128MovedToParts()
6853 User->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG) in isF128MovedToParts()
6856 switch (User->getConstantOperandVal(1)) { in isF128MovedToParts()
6877 EVT LdVT = N->getValueType(0); in combineLOAD()
6880 // Replace a 128-bit load that is used solely to move its value into GPRs in combineLOAD()
6883 if (LD->isSimple() && ISD::isNormalLoad(LD)) { in combineLOAD()
6891 HiPart->getValueType(0), DL, LD->getChain(), LD->getBasePtr(), in combineLOAD()
6892 LD->getPointerInfo(), LD->getOriginalAlign(), in combineLOAD()
6893 LD->getMemOperand()->getFlags(), LD->getAAInfo()); in combineLOAD()
6900 LoPart->getValueType(0), DL, LD->getChain(), in combineLOAD()
6901 DAG.getObjectPtrOffset(DL, LD->getBasePtr(), TypeSize::getFixed(8)), in combineLOAD()
6902 LD->getPointerInfo().getWithOffset(8), LD->getOriginalAlign(), in combineLOAD()
6903 LD->getMemOperand()->getFlags(), LD->getAAInfo()); in combineLOAD()
6927 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); in combineLOAD()
6929 if (UI->getOpcode() == SystemZISD::REPLICATE) { in combineLOAD()
6945 for (SDValue Op : U->ops()) in combineLOAD()
6952 bool SystemZTargetLowering::canLoadStoreByteSwapped(EVT VT) const { in canLoadStoreByteSwapped()
6953 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) in canLoadStoreByteSwapped()
6956 if (VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64 || VT == MVT::i128) in canLoadStoreByteSwapped()
6961 static bool isVectorElementSwap(ArrayRef<int> M, EVT VT) { in isVectorElementSwap() argument
6962 if (!VT.isVector() || !VT.isSimple() || in isVectorElementSwap()
6963 VT.getSizeInBits() != 128 || in isVectorElementSwap()
6964 VT.getScalarSizeInBits() % 8 != 0) in isVectorElementSwap()
6967 unsigned NumElts = VT.getVectorNumElements(); in isVectorElementSwap()
6970 if ((unsigned) M[i] != NumElts - 1 - i) in isVectorElementSwap()
6978 for (auto *U : StoredVal->uses()) { in isOnlyUsedByStores()
6980 EVT CurrMemVT = ST->getMemoryVT().getScalarType(); in isOnlyUsedByStores()
6996 if (Val.getOpcode() != ISD::OR || !Val.getNode()->hasOneUse()) in isI128MovedFromParts()
7004 if (Op1.getOpcode() != ISD::SHL || !Op1.getNode()->hasOneUse() || in isI128MovedFromParts()
7010 if (Op0.getOpcode() != ISD::ZERO_EXTEND || !Op0.getNode()->hasOneUse() || in isI128MovedFromParts()
7013 if (Op1.getOpcode() != ISD::ANY_EXTEND || !Op1.getNode()->hasOneUse() || in isI128MovedFromParts()
7024 if (!Val.getNode()->hasOneUse() || !Val.isMachineOpcode() || in isF128MovedFromParts()
7028 if (Val->getNumOperands() != 5 || in isF128MovedFromParts()
7029 Val->getOperand(0)->getAsZExtVal() != SystemZ::FP128BitRegClassID || in isF128MovedFromParts()
7030 Val->getOperand(2)->getAsZExtVal() != SystemZ::subreg_l64 || in isF128MovedFromParts()
7031 Val->getOperand(4)->getAsZExtVal() != SystemZ::subreg_h64) in isF128MovedFromParts()
7034 LoPart = Val->getOperand(1); in isF128MovedFromParts()
7035 HiPart = Val->getOperand(3); in isF128MovedFromParts()
7043 auto &Op1 = N->getOperand(1); in combineSTORE()
7044 EVT MemVT = SN->getMemoryVT(); in combineSTORE()
7049 if (MemVT.isInteger() && SN->isTruncatingStore()) { in combineSTORE()
7051 combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) { in combineSTORE()
7055 return DAG.getTruncStore(SN->getChain(), SDLoc(SN), Value, in combineSTORE()
7056 SN->getBasePtr(), SN->getMemoryVT(), in combineSTORE()
7057 SN->getMemOperand()); in combineSTORE()
7061 if (!SN->isTruncatingStore() && in combineSTORE()
7063 Op1.getNode()->hasOneUse() && in combineSTORE()
7072 N->getOperand(0), BSwapOp, N->getOperand(2) in combineSTORE()
7077 Ops, MemVT, SN->getMemOperand()); in combineSTORE()
7079 // Combine STORE (element-swap) into VSTER in combineSTORE()
7080 if (!SN->isTruncatingStore() && in combineSTORE()
7082 Op1.getNode()->hasOneUse() && in combineSTORE()
7085 ArrayRef<int> ShuffleMask = SVN->getMask(); in combineSTORE()
7088 N->getOperand(0), Op1.getOperand(0), N->getOperand(2) in combineSTORE()
7093 Ops, MemVT, SN->getMemOperand()); in combineSTORE()
7098 if (!SN->isTruncatingStore() && in combineSTORE()
7101 N->getOperand(0).reachesChainWithoutSideEffects(SDValue(Op1.getNode(), 1))) { in combineSTORE()
7102 SDValue Ops[] = { Op1.getOperand(0), N->getOperand(2) }; in combineSTORE()
7105 Ops, MemVT, SN->getMemOperand()); in combineSTORE()
7108 // Transform a store of a 128-bit value moved from parts into two stores. in combineSTORE()
7109 if (SN->isSimple() && ISD::isNormalStore(SN)) { in combineSTORE()
7115 DAG.getStore(SN->getChain(), DL, HiPart, SN->getBasePtr(), in combineSTORE()
7116 SN->getPointerInfo(), SN->getOriginalAlign(), in combineSTORE()
7117 SN->getMemOperand()->getFlags(), SN->getAAInfo()); in combineSTORE()
7119 DAG.getStore(SN->getChain(), DL, LoPart, in combineSTORE()
7120 DAG.getObjectPtrOffset(DL, SN->getBasePtr(), in combineSTORE()
7122 SN->getPointerInfo().getWithOffset(8), in combineSTORE()
7123 SN->getOriginalAlign(), in combineSTORE()
7124 SN->getMemOperand()->getFlags(), SN->getAAInfo()); in combineSTORE()
7132 // it is straight-forward to handle the zero-extend node in the initial in combineSTORE()
7144 if (C->getAPIntValue().getBitWidth() > 64 || C->isAllOnes() || in combineSTORE()
7145 isInt<16>(C->getSExtValue()) || MemVT.getStoreSize() <= 2) in combineSTORE()
7147 SystemZVectorConstantInfo VCI(APInt(TotBytes * 8, C->getZExtValue())); in combineSTORE()
7159 if (MulOp->getOpcode() == ISD::MUL && in combineSTORE()
7162 SDValue LHS = MulOp->getOperand(0); in combineSTORE()
7163 if (LHS->getOpcode() == ISD::ZERO_EXTEND) in combineSTORE()
7164 WordVT = LHS->getOperand(0).getValueType(); in combineSTORE()
7165 else if (LHS->getOpcode() == ISD::AssertZext) in combineSTORE()
7166 WordVT = cast<VTSDNode>(LHS->getOperand(1))->getVT(); in combineSTORE()
7170 if (auto *C = dyn_cast<ConstantSDNode>(MulOp->getOperand(1))) { in combineSTORE()
7172 APInt(MulVT.getSizeInBits(), C->getZExtValue())); in combineSTORE()
7176 Word = DAG.getZExtOrTrunc(LHS->getOperand(0), SDLoc(SN), WordVT); in combineSTORE()
7183 SDValue SplatVal = Op1->getOperand(0); in combineSTORE()
7201 return DAG.getStore(SN->getChain(), SDLoc(SN), SplatVal, in combineSTORE()
7202 SN->getBasePtr(), SN->getMemOperand()); in combineSTORE()
7212 // Combine element-swap (LOAD) into VLER in combineVECTOR_SHUFFLE()
7213 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && in combineVECTOR_SHUFFLE()
7214 N->getOperand(0).hasOneUse() && in combineVECTOR_SHUFFLE()
7217 ArrayRef<int> ShuffleMask = SVN->getMask(); in combineVECTOR_SHUFFLE()
7218 if (isVectorElementSwap(ShuffleMask, N->getValueType(0))) { in combineVECTOR_SHUFFLE()
7219 SDValue Load = N->getOperand(0); in combineVECTOR_SHUFFLE()
7222 // Create the element-swapping load. in combineVECTOR_SHUFFLE()
7224 LD->getChain(), // Chain in combineVECTOR_SHUFFLE()
7225 LD->getBasePtr() // Ptr in combineVECTOR_SHUFFLE()
7229 DAG.getVTList(LD->getValueType(0), MVT::Other), in combineVECTOR_SHUFFLE()
7230 Ops, LD->getMemoryVT(), LD->getMemOperand()); in combineVECTOR_SHUFFLE()
7256 SDValue Op = N->getOperand(0); in combineEXTRACT_VECTOR_ELT()
7269 Op.getOperand(0), N->getOperand(1)); in combineEXTRACT_VECTOR_ELT()
7272 if (EltVT != N->getValueType(0)) { in combineEXTRACT_VECTOR_ELT()
7274 Op = DAG.getNode(ISD::BITCAST, SDLoc(N), N->getValueType(0), Op); in combineEXTRACT_VECTOR_ELT()
7280 if (auto *IndexN = dyn_cast<ConstantSDNode>(N->getOperand(1))) { in combineEXTRACT_VECTOR_ELT()
7281 SDValue Op0 = N->getOperand(0); in combineEXTRACT_VECTOR_ELT()
7283 return combineExtract(SDLoc(N), N->getValueType(0), VecVT, Op0, in combineEXTRACT_VECTOR_ELT()
7284 IndexN->getZExtValue(), DCI, false); in combineEXTRACT_VECTOR_ELT()
7293 if (N->getOperand(0) == N->getOperand(1)) in combineJOIN_DWORDS()
7294 return DAG.getNode(SystemZISD::REPLICATE, SDLoc(N), N->getValueType(0), in combineJOIN_DWORDS()
7295 N->getOperand(0)); in combineJOIN_DWORDS()
7300 SDValue Chain1 = N1->getOperand(0); in MergeInputChains()
7301 SDValue Chain2 = N2->getOperand(0); in MergeInputChains()
7307 // FIXME - we could handle more complex cases via TokenFactor, in MergeInputChains()
7319 // (fpround (extract_vector_elt X 1)) -> in combineFP_ROUND()
7324 unsigned OpNo = N->isStrictFPOpcode() ? 1 : 0; in combineFP_ROUND()
7326 SDValue Op0 = N->getOperand(OpNo); in combineFP_ROUND()
7327 if (N->getValueType(0) == MVT::f32 && Op0.hasOneUse() && in combineFP_ROUND()
7333 for (auto *U : Vec->uses()) { in combineFP_ROUND()
7334 if (U != Op0.getNode() && U->hasOneUse() && in combineFP_ROUND()
7335 U->getOpcode() == ISD::EXTRACT_VECTOR_ELT && in combineFP_ROUND()
7336 U->getOperand(0) == Vec && in combineFP_ROUND()
7337 U->getOperand(1).getOpcode() == ISD::Constant && in combineFP_ROUND()
7338 U->getConstantOperandVal(1) == 1) { in combineFP_ROUND()
7339 SDValue OtherRound = SDValue(*U->use_begin(), 0); in combineFP_ROUND()
7340 if (OtherRound.getOpcode() == N->getOpcode() && in combineFP_ROUND()
7344 if (N->isStrictFPOpcode()) { in combineFP_ROUND()
7367 N->getVTList(), Extract0, Chain); in combineFP_ROUND()
7383 // (fpextend (extract_vector_elt X 2)) -> in combineFP_EXTEND()
7388 unsigned OpNo = N->isStrictFPOpcode() ? 1 : 0; in combineFP_EXTEND()
7390 SDValue Op0 = N->getOperand(OpNo); in combineFP_EXTEND()
7391 if (N->getValueType(0) == MVT::f64 && Op0.hasOneUse() && in combineFP_EXTEND()
7397 for (auto *U : Vec->uses()) { in combineFP_EXTEND()
7398 if (U != Op0.getNode() && U->hasOneUse() && in combineFP_EXTEND()
7399 U->getOpcode() == ISD::EXTRACT_VECTOR_ELT && in combineFP_EXTEND()
7400 U->getOperand(0) == Vec && in combineFP_EXTEND()
7401 U->getOperand(1).getOpcode() == ISD::Constant && in combineFP_EXTEND()
7402 U->getConstantOperandVal(1) == 2) { in combineFP_EXTEND()
7403 SDValue OtherExtend = SDValue(*U->use_begin(), 0); in combineFP_EXTEND()
7404 if (OtherExtend.getOpcode() == N->getOpcode() && in combineFP_EXTEND()
7408 if (N->isStrictFPOpcode()) { in combineFP_EXTEND()
7431 N->getVTList(), Extract0, Chain); in combineFP_EXTEND()
7446 unsigned Opcode = N->getOpcode(); in combineINT_TO_FP()
7447 EVT OutVT = N->getValueType(0); in combineINT_TO_FP()
7449 SDValue Op = N->getOperand(0); in combineINT_TO_FP()
7450 unsigned OutScalarBits = OutLLVMTy->getScalarSizeInBits(); in combineINT_TO_FP()
7451 unsigned InScalarBits = Op->getValueType(0).getScalarSizeInBits(); in combineINT_TO_FP()
7453 // Insert an extension before type-legalization to avoid scalarization, e.g.: in combineINT_TO_FP()
7457 if (OutLLVMTy->isVectorTy() && OutScalarBits > InScalarBits && in combineINT_TO_FP()
7459 unsigned NumElts = cast<FixedVectorType>(OutLLVMTy)->getNumElements(); in combineINT_TO_FP()
7461 Ctx, EVT::getIntegerVT(Ctx, OutLLVMTy->getScalarSizeInBits()), NumElts); in combineINT_TO_FP()
7474 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && in combineBSWAP()
7475 N->getOperand(0).hasOneUse() && in combineBSWAP()
7476 canLoadStoreByteSwapped(N->getValueType(0))) { in combineBSWAP()
7477 SDValue Load = N->getOperand(0); in combineBSWAP()
7480 // Create the byte-swapping load. in combineBSWAP()
7482 LD->getChain(), // Chain in combineBSWAP()
7483 LD->getBasePtr() // Ptr in combineBSWAP()
7485 EVT LoadVT = N->getValueType(0); in combineBSWAP()
7491 Ops, LD->getMemoryVT(), LD->getMemOperand()); in combineBSWAP()
7495 if (N->getValueType(0) == MVT::i16) in combineBSWAP()
7511 SDValue Op = N->getOperand(0); in combineBSWAP()
7529 (canLoadStoreByteSwapped(N->getValueType(0)) && in combineBSWAP()
7531 EVT VecVT = N->getValueType(0); in combineBSWAP()
7532 EVT EltVT = N->getValueType(0).getVectorElementType(); in combineBSWAP()
7560 EVT VecVT = N->getValueType(0); in combineBSWAP()
7573 return DAG.getVectorShuffle(VecVT, SDLoc(N), Op0, Op1, SV->getMask()); in combineBSWAP()
7591 if (ICmp->getOpcode() != SystemZISD::ICMP) in combineCCMask()
7593 auto *CompareLHS = ICmp->getOperand(0).getNode(); in combineCCMask()
7594 auto *CompareRHS = dyn_cast<ConstantSDNode>(ICmp->getOperand(1)); in combineCCMask()
7599 if (CompareLHS->getOpcode() == SystemZISD::SELECT_CCMASK) { in combineCCMask()
7608 auto *TrueVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(0)); in combineCCMask()
7611 auto *FalseVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1)); in combineCCMask()
7614 if (CompareRHS->getZExtValue() == FalseVal->getZExtValue()) in combineCCMask()
7616 else if (CompareRHS->getZExtValue() != TrueVal->getZExtValue()) in combineCCMask()
7620 auto *NewCCValid = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(2)); in combineCCMask()
7621 auto *NewCCMask = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(3)); in combineCCMask()
7624 CCValid = NewCCValid->getZExtValue(); in combineCCMask()
7625 CCMask = NewCCMask->getZExtValue(); in combineCCMask()
7630 CCReg = CompareLHS->getOperand(4); in combineCCMask()
7635 if (CompareLHS->getOpcode() == ISD::SRA) { in combineCCMask()
7636 auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1)); in combineCCMask()
7637 if (!SRACount || SRACount->getZExtValue() != 30) in combineCCMask()
7639 auto *SHL = CompareLHS->getOperand(0).getNode(); in combineCCMask()
7640 if (SHL->getOpcode() != ISD::SHL) in combineCCMask()
7642 auto *SHLCount = dyn_cast<ConstantSDNode>(SHL->getOperand(1)); in combineCCMask()
7643 if (!SHLCount || SHLCount->getZExtValue() != 30 - SystemZ::IPM_CC) in combineCCMask()
7645 auto *IPM = SHL->getOperand(0).getNode(); in combineCCMask()
7646 if (IPM->getOpcode() != SystemZISD::IPM) in combineCCMask()
7650 if (!CompareLHS->hasOneUse()) in combineCCMask()
7653 if (CompareRHS->getZExtValue() != 0) in combineCCMask()
7660 CCReg = IPM->getOperand(0); in combineCCMask()
7672 auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1)); in combineBR_CCMASK()
7673 auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2)); in combineBR_CCMASK()
7677 int CCValidVal = CCValid->getZExtValue(); in combineBR_CCMASK()
7678 int CCMaskVal = CCMask->getZExtValue(); in combineBR_CCMASK()
7679 SDValue Chain = N->getOperand(0); in combineBR_CCMASK()
7680 SDValue CCReg = N->getOperand(4); in combineBR_CCMASK()
7683 return DAG.getNode(SystemZISD::BR_CCMASK, SDLoc(N), N->getValueType(0), in combineBR_CCMASK()
7687 N->getOperand(3), CCReg); in combineBR_CCMASK()
7696 auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(2)); in combineSELECT_CCMASK()
7697 auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(3)); in combineSELECT_CCMASK()
7701 int CCValidVal = CCValid->getZExtValue(); in combineSELECT_CCMASK()
7702 int CCMaskVal = CCMask->getZExtValue(); in combineSELECT_CCMASK()
7703 SDValue CCReg = N->getOperand(4); in combineSELECT_CCMASK()
7706 return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0), in combineSELECT_CCMASK()
7707 N->getOperand(0), N->getOperand(1), in combineSELECT_CCMASK()
7719 auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1)); in combineGET_CCMASK()
7720 auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2)); in combineGET_CCMASK()
7723 int CCValidVal = CCValid->getZExtValue(); in combineGET_CCMASK()
7724 int CCMaskVal = CCMask->getZExtValue(); in combineGET_CCMASK()
7726 SDValue Select = N->getOperand(0); in combineGET_CCMASK()
7727 if (Select->getOpcode() == ISD::TRUNCATE) in combineGET_CCMASK()
7728 Select = Select->getOperand(0); in combineGET_CCMASK()
7729 if (Select->getOpcode() != SystemZISD::SELECT_CCMASK) in combineGET_CCMASK()
7732 auto *SelectCCValid = dyn_cast<ConstantSDNode>(Select->getOperand(2)); in combineGET_CCMASK()
7733 auto *SelectCCMask = dyn_cast<ConstantSDNode>(Select->getOperand(3)); in combineGET_CCMASK()
7736 int SelectCCValidVal = SelectCCValid->getZExtValue(); in combineGET_CCMASK()
7737 int SelectCCMaskVal = SelectCCMask->getZExtValue(); in combineGET_CCMASK()
7739 auto *TrueVal = dyn_cast<ConstantSDNode>(Select->getOperand(0)); in combineGET_CCMASK()
7740 auto *FalseVal = dyn_cast<ConstantSDNode>(Select->getOperand(1)); in combineGET_CCMASK()
7743 if (TrueVal->getZExtValue() == 1 && FalseVal->getZExtValue() == 0) in combineGET_CCMASK()
7745 else if (TrueVal->getZExtValue() == 0 && FalseVal->getZExtValue() == 1) in combineGET_CCMASK()
7755 return Select->getOperand(4); in combineGET_CCMASK()
7761 EVT VT = N->getValueType(0); in combineIntDIVREM() local
7769 if (DCI.Level == BeforeLegalizeTypes && VT.isVector() && isTypeLegal(VT) && in combineIntDIVREM()
7770 DAG.isConstantIntBuildVectorOrConstantInt(N->getOperand(1))) in combineIntDIVREM()
7779 unsigned Id = N->getConstantOperandVal(1); in combineINTRINSIC()
7785 if (auto *C = dyn_cast<ConstantSDNode>(N->getOperand(2))) in combineINTRINSIC()
7786 if (C->getZExtValue() >= 15) in combineINTRINSIC()
7787 return DAG.getLoad(N->getValueType(0), SDLoc(N), N->getOperand(0), in combineINTRINSIC()
7788 N->getOperand(3), MachinePointerInfo()); in combineINTRINSIC()
7793 if (auto *C = dyn_cast<ConstantSDNode>(N->getOperand(3))) in combineINTRINSIC()
7794 if (C->getZExtValue() >= 15) in combineINTRINSIC()
7795 return DAG.getStore(N->getOperand(0), SDLoc(N), N->getOperand(2), in combineINTRINSIC()
7796 N->getOperand(4), MachinePointerInfo()); in combineINTRINSIC()
7804 if (N->getOpcode() == SystemZISD::PCREL_WRAPPER) in unwrapAddress()
7805 return N->getOperand(0); in unwrapAddress()
7811 switch(N->getOpcode()) { in PerformDAGCombine()
7848 EVT VT = Op.getValueType(); in getDemandedSrcElements() local
7849 unsigned NumElts = (VT.isVector() ? VT.getVectorNumElements() : 1); in getDemandedSrcElements()
7874 case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH in getDemandedSrcElements()
7877 case Intrinsic::s390_vuplhb: // VECTOR UNPACK LOGICAL HIGH in getDemandedSrcElements()
7895 if (!DemandedElts[OpNo - 1]) in getDemandedSrcElements()
7898 unsigned MaskBit = ((OpNo - 1) ? 1 : 4); in getDemandedSrcElements()
7905 assert(VT == MVT::v16i8 && "Unexpected type."); in getDemandedSrcElements()
7908 unsigned NumSrc0Els = 16 - FirstIdx; in getDemandedSrcElements()
7920 SrcDemE = APInt(NumElts, -1); in getDemandedSrcElements()
7970 EVT VT = Op.getValueType(); in computeKnownBitsForTargetNode() local
7971 if (Op.getResNo() != 0 || VT == MVT::Untyped) in computeKnownBitsForTargetNode()
7973 assert (Known.getBitWidth() == VT.getScalarSizeInBits() && in computeKnownBitsForTargetNode()
7974 "KnownBits does not match VT in bitwidth"); in computeKnownBitsForTargetNode()
7975 assert ((!VT.isVector() || in computeKnownBitsForTargetNode()
7976 (DemandedElts.getBitWidth() == VT.getVectorNumElements())) && in computeKnownBitsForTargetNode()
7977 "DemandedElts does not match VT number of elements"); in computeKnownBitsForTargetNode()
8001 case Intrinsic::s390_vuplhb: // VECTOR UNPACK LOGICAL HIGH in computeKnownBitsForTargetNode()
8009 case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH in computeKnownBitsForTargetNode()
8062 EVT VT = Op.getValueType(); in computeNumSignBitsBinOp() local
8063 unsigned VTBits = VT.getScalarSizeInBits(); in computeNumSignBitsBinOp()
8065 unsigned SrcExtraBits = SrcBitWidth - VTBits; in computeNumSignBitsBinOp()
8067 return (Common - SrcExtraBits); in computeNumSignBitsBinOp()
8100 case Intrinsic::s390_vuphb: // VECTOR UNPACK HIGH in ComputeNumSignBitsForTargetNode()
8109 EVT VT = Op.getValueType(); in ComputeNumSignBitsForTargetNode() local
8110 unsigned VTBits = VT.getScalarSizeInBits(); in ComputeNumSignBitsForTargetNode()
8111 Tmp += VTBits - PackedOp.getScalarValueSizeInBits(); in ComputeNumSignBitsForTargetNode()
8133 switch (Op->getOpcode()) { in isGuaranteedNotToBeUndefOrPoisonForTargetNode()
8144 unsigned StackAlign = TFI->getStackAlignment(); in getStackProbeSize()
8148 // stack-probe-size attribute. in getStackProbeSize()
8150 MF.getFunction().getFnAttributeAsParsedInteger("stack-probe-size", 4096); in getStackProbeSize()
8152 StackProbeSize &= ~(StackAlign - 1); in getStackProbeSize()
8156 //===----------------------------------------------------------------------===//
8158 //===----------------------------------------------------------------------===//
8164 MachineFunction &MF = *MBB->getParent(); in forceReg()
8171 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::COPY), Reg) in forceReg()
8177 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg) in forceReg()
8190 for (MachineBasicBlock::iterator miE = MBB->end(); miI != miE; ++miI) { in checkCCKill()
8195 break; // Should have kill-flag - update below. in checkCCKill()
8200 if (miI == MBB->end()) { in checkCCKill()
8201 for (const MachineBasicBlock *Succ : MBB->successors()) in checkCCKill()
8202 if (Succ->isLiveIn(SystemZ::CC)) in checkCCKill()
8209 // Return true if it is OK for this Select pseudo-opcode to be cascaded
8210 // together with other Select pseudo-opcodes into a single basic-block with
8237 MachineFunction *MF = TrueMBB->getParent(); in createPHIsForSelects()
8238 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); in createPHIsForSelects()
8241 unsigned CCValid = FirstMI->getOperand(3).getImm(); in createPHIsForSelects()
8242 unsigned CCMask = FirstMI->getOperand(4).getImm(); in createPHIsForSelects()
8244 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin(); in createPHIsForSelects()
8255 Register DestReg = MI->getOperand(0).getReg(); in createPHIsForSelects()
8256 Register TrueReg = MI->getOperand(1).getReg(); in createPHIsForSelects()
8257 Register FalseReg = MI->getOperand(2).getReg(); in createPHIsForSelects()
8262 if (MI->getOperand(4).getImm() == (CCValid ^ CCMask)) in createPHIsForSelects()
8271 DebugLoc DL = MI->getDebugLoc(); in createPHIsForSelects()
8272 BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(SystemZ::PHI), DestReg) in createPHIsForSelects()
8280 MF->getProperties().reset(MachineFunctionProperties::Property::NoPHIs); in createPHIsForSelects()
8286 MachineFunction &MF = *BB->getParent(); in emitAdjCallStack()
8289 assert(TFL->hasReservedCallFrame(MF) && in emitAdjCallStack()
8290 "ADJSTACKDOWN and ADJSTACKUP should be no-ops"); in emitAdjCallStack()
8322 std::next(MachineBasicBlock::iterator(MI)), MBB->end())) { in emitSelect()
8338 if (NextMI.readsVirtualRegister(SelMI->getOperand(0).getReg())) { in emitSelect()
8352 bool CCKilled = (LastMI->killsRegister(SystemZ::CC, /*TRI=*/nullptr) || in emitSelect()
8359 // live-in to both FalseMBB and JoinMBB. in emitSelect()
8361 FalseMBB->addLiveIn(SystemZ::CC); in emitSelect()
8362 JoinMBB->addLiveIn(SystemZ::CC); in emitSelect()
8369 BuildMI(MBB, MI.getDebugLoc(), TII->get(SystemZ::BRC)) in emitSelect()
8371 MBB->addSuccessor(JoinMBB); in emitSelect()
8372 MBB->addSuccessor(FalseMBB); in emitSelect()
8377 MBB->addSuccessor(JoinMBB); in emitSelect()
8385 SelMI->eraseFromParent(); in emitSelect()
8387 MachineBasicBlock::iterator InsertPos = MBB->getFirstNonPHI(); in emitSelect()
8389 MBB->splice(InsertPos, StartMBB, DbgMI); in emitSelect()
8413 StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp); in emitCondStore()
8419 if (I->isStore()) { in emitCondStore()
8425 // order to avoid matching the index register, but the performance trade-offs in emitCondStore()
8431 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) in emitCondStore()
8452 // live-in to both FalseMBB and JoinMBB. in emitCondStore()
8455 FalseMBB->addLiveIn(SystemZ::CC); in emitCondStore()
8456 JoinMBB->addLiveIn(SystemZ::CC); in emitCondStore()
8463 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) in emitCondStore()
8465 MBB->addSuccessor(JoinMBB); in emitCondStore()
8466 MBB->addSuccessor(FalseMBB); in emitCondStore()
8472 BuildMI(MBB, DL, TII->get(StoreOpcode)) in emitCondStore()
8478 MBB->addSuccessor(JoinMBB); in emitCondStore()
8489 MachineFunction &MF = *MBB->getParent(); in emitICmp128Hi()
8493 // Synthetic instruction to compare 128-bit values. in emitICmp128Hi()
8504 // Use VECTOR ELEMENT COMPARE [LOGICAL] to compare the high parts. in emitICmp128Hi()
8506 // CC 1 if high(Op0) > high(Op1) in emitICmp128Hi()
8507 // CC 2 if high(Op0) < high(Op1) in emitICmp128Hi()
8508 // CC 0 if high(Op0) == high(Op1) in emitICmp128Hi()
8517 BuildMI(MBB, MI.getDebugLoc(), TII->get(HiOpcode)) in emitICmp128Hi()
8519 BuildMI(MBB, MI.getDebugLoc(), TII->get(SystemZ::BRC)) in emitICmp128Hi()
8521 MBB->addSuccessor(JoinMBB); in emitICmp128Hi()
8522 MBB->addSuccessor(HiEqMBB); in emitICmp128Hi()
8526 // Otherwise, use VECTOR COMPARE HIGH LOGICAL. in emitICmp128Hi()
8527 // Since we already know the high parts are equal, the CC in emitICmp128Hi()
8536 BuildMI(MBB, MI.getDebugLoc(), TII->get(SystemZ::VCHLGS), Temp) in emitICmp128Hi()
8538 MBB->addSuccessor(JoinMBB); in emitICmp128Hi()
8540 // Mark CC as live-in to JoinMBB. in emitICmp128Hi()
8541 JoinMBB->addLiveIn(SystemZ::CC); in emitICmp128Hi()
8555 MachineFunction &MF = *MBB->getParent(); in emitAtomicLoadBinary()
8571 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); in emitAtomicLoadBinary()
8572 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); in emitAtomicLoadBinary()
8592 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); in emitAtomicLoadBinary()
8593 MBB->addSuccessor(LoopMBB); in emitAtomicLoadBinary()
8604 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) in emitAtomicLoadBinary()
8607 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) in emitAtomicLoadBinary()
8612 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp).addReg(RotatedOldVal).add(Src2); in emitAtomicLoadBinary()
8614 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) in emitAtomicLoadBinary()
8615 .addReg(Tmp).addImm(-1U << (32 - BitSize)); in emitAtomicLoadBinary()
8618 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) in emitAtomicLoadBinary()
8624 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal) in emitAtomicLoadBinary()
8626 .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize); in emitAtomicLoadBinary()
8627 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) in emitAtomicLoadBinary()
8629 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) in emitAtomicLoadBinary()
8634 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) in emitAtomicLoadBinary()
8636 MBB->addSuccessor(LoopMBB); in emitAtomicLoadBinary()
8637 MBB->addSuccessor(DoneMBB); in emitAtomicLoadBinary()
8646 // minimum or maximum value. KeepOldMask is the BRC condition-code mask
8651 MachineFunction &MF = *MBB->getParent(); in emitAtomicLoadMinMax()
8666 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); in emitAtomicLoadMinMax()
8667 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); in emitAtomicLoadMinMax()
8690 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); in emitAtomicLoadMinMax()
8691 MBB->addSuccessor(LoopMBB); in emitAtomicLoadMinMax()
8699 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) in emitAtomicLoadMinMax()
8702 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal) in emitAtomicLoadMinMax()
8704 BuildMI(MBB, DL, TII->get(CompareOpcode)) in emitAtomicLoadMinMax()
8706 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) in emitAtomicLoadMinMax()
8708 MBB->addSuccessor(UpdateMBB); in emitAtomicLoadMinMax()
8709 MBB->addSuccessor(UseAltMBB); in emitAtomicLoadMinMax()
8715 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal) in emitAtomicLoadMinMax()
8718 MBB->addSuccessor(UpdateMBB); in emitAtomicLoadMinMax()
8728 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal) in emitAtomicLoadMinMax()
8731 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) in emitAtomicLoadMinMax()
8733 BuildMI(MBB, DL, TII->get(CSOpcode), Dest) in emitAtomicLoadMinMax()
8738 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) in emitAtomicLoadMinMax()
8740 MBB->addSuccessor(LoopMBB); in emitAtomicLoadMinMax()
8741 MBB->addSuccessor(DoneMBB); in emitAtomicLoadMinMax()
8752 MachineFunction &MF = *MBB->getParent(); in emitAtomicCmpSwapW()
8769 // Get the right opcodes for the displacement and zero-extension. in emitAtomicCmpSwapW()
8770 unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp); in emitAtomicCmpSwapW()
8771 unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp); in emitAtomicCmpSwapW()
8795 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) in emitAtomicCmpSwapW()
8799 MBB->addSuccessor(LoopMBB); in emitAtomicCmpSwapW()
8807 // %RetrySwapVal = RISBG32 %SwapVal, %OldValRot, 32, 63-BitSize, 0 in emitAtomicCmpSwapW()
8808 // ^^ Replace the upper 32-BitSize bits of the in emitAtomicCmpSwapW()
8815 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal) in emitAtomicCmpSwapW()
8818 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal) in emitAtomicCmpSwapW()
8821 BuildMI(MBB, DL, TII->get(SystemZ::RLL), OldValRot) in emitAtomicCmpSwapW()
8823 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal) in emitAtomicCmpSwapW()
8824 .addReg(SwapVal).addReg(OldValRot).addImm(32).addImm(63 - BitSize).addImm(0); in emitAtomicCmpSwapW()
8825 BuildMI(MBB, DL, TII->get(ZExtOpcode), Dest) in emitAtomicCmpSwapW()
8827 BuildMI(MBB, DL, TII->get(SystemZ::CR)) in emitAtomicCmpSwapW()
8829 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) in emitAtomicCmpSwapW()
8832 MBB->addSuccessor(DoneMBB); in emitAtomicCmpSwapW()
8833 MBB->addSuccessor(SetMBB); in emitAtomicCmpSwapW()
8836 // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift) in emitAtomicCmpSwapW()
8842 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) in emitAtomicCmpSwapW()
8843 .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); in emitAtomicCmpSwapW()
8844 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) in emitAtomicCmpSwapW()
8849 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) in emitAtomicCmpSwapW()
8851 MBB->addSuccessor(LoopMBB); in emitAtomicCmpSwapW()
8852 MBB->addSuccessor(DoneMBB); in emitAtomicCmpSwapW()
8854 // If the CC def wasn't dead in the ATOMIC_CMP_SWAPW, mark CC as live-in in emitAtomicCmpSwapW()
8858 DoneMBB->addLiveIn(SystemZ::CC); in emitAtomicCmpSwapW()
8872 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::REG_SEQUENCE), Dest) in emitPair128()
8882 // if the high register of the GR128 value must be cleared or false if
8887 MachineFunction &MF = *MBB->getParent(); in emitExt128()
8896 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128); in emitExt128()
8901 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64) in emitExt128()
8903 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128) in emitExt128()
8907 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest) in emitExt128()
8918 MachineFunction &MF = *MBB->getParent(); in emitMemMemWrapper()
8929 auto foldDisplIfNeeded = [&](MachineOperand &Base, uint64_t &Disp) -> void { in emitMemMemWrapper()
8932 unsigned Opcode = TII->getOpcodeForOffset(SystemZ::LA, Disp); in emitMemMemWrapper()
8933 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(Opcode), Reg) in emitMemMemWrapper()
8958 unsigned Length) -> void { in emitMemMemWrapper()
8963 BuildMI(*InsMBB, InsPos, DL, TII->get(SystemZ::MVI)) in emitMemMemWrapper()
8966 BuildMI(*InsMBB, InsPos, DL, TII->get(SystemZ::STC)) in emitMemMemWrapper()
8968 if (--Length == 0) in emitMemMemWrapper()
8971 BuildMI(*MBB, InsPos, DL, TII->get(Opcode)) in emitMemMemWrapper()
8989 // A two-CLC sequence is a clear win over a loop, not least because in emitMemMemWrapper()
8990 // it needs only one branch. A three-CLC sequence needs the same in emitMemMemWrapper()
9001 // to choose between straight-line code and looping code, since the in emitMemMemWrapper()
9020 TII->loadImmediate(*MBB, MI, StartCountReg, ImmLength / 256); in emitMemMemWrapper()
9023 BuildMI(*MBB, MI, DL, TII->get(SystemZ::SRLG), StartCountReg) in emitMemMemWrapper()
9030 auto loadZeroAddress = [&]() -> MachineOperand { in emitMemMemWrapper()
9032 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LGHI), Reg).addImm(0); in emitMemMemWrapper()
9070 BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) in emitMemMemWrapper()
9071 .addReg(LenAdjReg).addImm(IsMemset ? -2 : -1); in emitMemMemWrapper()
9072 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) in emitMemMemWrapper()
9075 MBB->addSuccessor(AllDoneMBB); in emitMemMemWrapper()
9077 MBB->addSuccessor(StartMBB); in emitMemMemWrapper()
9084 MBB->addSuccessor(MemsetOneCheckMBB); in emitMemMemWrapper()
9086 BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) in emitMemMemWrapper()
9087 .addReg(LenAdjReg).addImm(-1); in emitMemMemWrapper()
9088 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) in emitMemMemWrapper()
9091 MBB->addSuccessor(MemsetOneMBB, {10, 100}); in emitMemMemWrapper()
9092 MBB->addSuccessor(StartMBB, {90, 100}); in emitMemMemWrapper()
9097 insertMemMemOp(MBB, MBB->end(), in emitMemMemWrapper()
9101 BuildMI(MBB, DL, TII->get(SystemZ::J)).addMBB(AllDoneMBB); in emitMemMemWrapper()
9102 MBB->addSuccessor(AllDoneMBB); in emitMemMemWrapper()
9108 BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) in emitMemMemWrapper()
9110 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) in emitMemMemWrapper()
9113 MBB->addSuccessor(DoneMBB); in emitMemMemWrapper()
9114 MBB->addSuccessor(LoopMBB); in emitMemMemWrapper()
9124 MBB->addSuccessor(LoopMBB); in emitMemMemWrapper()
9130 // CC live-through into EndMBB, so add it as live-in. in emitMemMemWrapper()
9131 DoneMBB->addLiveIn(SystemZ::CC); in emitMemMemWrapper()
9147 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg) in emitMemMemWrapper()
9151 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg) in emitMemMemWrapper()
9154 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg) in emitMemMemWrapper()
9158 BuildMI(MBB, DL, TII->get(SystemZ::PFD)) in emitMemMemWrapper()
9160 .addReg(ThisDestReg).addImm(DestDisp - IsMemset + 768).addReg(0); in emitMemMemWrapper()
9161 insertMemMemOp(MBB, MBB->end(), in emitMemMemWrapper()
9165 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) in emitMemMemWrapper()
9168 MBB->addSuccessor(EndMBB); in emitMemMemWrapper()
9169 MBB->addSuccessor(NextMBB); in emitMemMemWrapper()
9175 // %NextCountReg = AGHI %ThisCountReg, -1 in emitMemMemWrapper()
9182 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg) in emitMemMemWrapper()
9185 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg) in emitMemMemWrapper()
9187 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg) in emitMemMemWrapper()
9188 .addReg(ThisCountReg).addImm(-1); in emitMemMemWrapper()
9189 BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) in emitMemMemWrapper()
9191 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) in emitMemMemWrapper()
9194 MBB->addSuccessor(LoopMBB); in emitMemMemWrapper()
9195 MBB->addSuccessor(DoneMBB); in emitMemMemWrapper()
9209 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RemDestReg) in emitMemMemWrapper()
9213 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RemSrcReg) in emitMemMemWrapper()
9217 insertMemMemOp(MBB, MBB->end(), in emitMemMemWrapper()
9221 BuildMI(MBB, DL, TII->get(SystemZ::EXRL_Pseudo)) in emitMemMemWrapper()
9226 MBB->addSuccessor(AllDoneMBB); in emitMemMemWrapper()
9231 MBB->addLiveIn(SystemZ::CC); in emitMemMemWrapper()
9237 // Handle any remaining bytes with straight-line code. in emitMemMemWrapper()
9240 // The previous iteration might have created out-of-range displacements. in emitMemMemWrapper()
9247 ImmLength -= ThisLength; in emitMemMemWrapper()
9252 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) in emitMemMemWrapper()
9255 MBB->addSuccessor(EndMBB); in emitMemMemWrapper()
9256 MBB->addSuccessor(NextMBB); in emitMemMemWrapper()
9261 MBB->addSuccessor(EndMBB); in emitMemMemWrapper()
9263 MBB->addLiveIn(SystemZ::CC); in emitMemMemWrapper()
9270 // Decompose string pseudo-instruction MI into a loop that continually performs
9274 MachineFunction &MF = *MBB->getParent(); in emitStringWrapper()
9295 MBB->addSuccessor(LoopMBB); in emitStringWrapper()
9301 // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L in emitStringWrapper()
9305 // The load of R0L can be hoisted by post-RA LICM. in emitStringWrapper()
9308 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg) in emitStringWrapper()
9311 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg) in emitStringWrapper()
9314 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg); in emitStringWrapper()
9315 BuildMI(MBB, DL, TII->get(Opcode)) in emitStringWrapper()
9318 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) in emitStringWrapper()
9320 MBB->addSuccessor(LoopMBB); in emitStringWrapper()
9321 MBB->addSuccessor(DoneMBB); in emitStringWrapper()
9323 DoneMBB->addLiveIn(SystemZ::CC); in emitStringWrapper()
9333 MachineFunction &MF = *MBB->getParent(); in emitTransactionBegin()
9338 MI.setDesc(TII->get(Opcode)); in emitTransactionBegin()
9348 if (TFI->hasFP(MF)) in emitTransactionBegin()
9378 MachineFunction &MF = *MBB->getParent(); in emitLoadAndTestCmp0()
9386 const TargetRegisterClass *RC = MRI->getRegClass(SrcReg); in emitLoadAndTestCmp0()
9387 Register DstReg = MRI->createVirtualRegister(RC); in emitLoadAndTestCmp0()
9389 // Replace pseudo with a normal load-and-test that models the def as in emitLoadAndTestCmp0()
9391 BuildMI(*MBB, MI, DL, TII->get(Opcode), DstReg) in emitLoadAndTestCmp0()
9401 MachineFunction &MF = *MBB->getParent(); in emitProbedAlloca()
9419 Register PHIReg = MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass); in emitProbedAlloca()
9420 Register IncReg = MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass); in emitProbedAlloca()
9425 StartMBB->addSuccessor(LoopTestMBB); in emitProbedAlloca()
9427 BuildMI(MBB, DL, TII->get(SystemZ::PHI), PHIReg) in emitProbedAlloca()
9432 BuildMI(MBB, DL, TII->get(SystemZ::CLGFI)) in emitProbedAlloca()
9435 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) in emitProbedAlloca()
9438 MBB->addSuccessor(LoopBodyMBB); in emitProbedAlloca()
9439 MBB->addSuccessor(TailTestMBB); in emitProbedAlloca()
9444 BuildMI(MBB, DL, TII->get(SystemZ::SLGFI), IncReg) in emitProbedAlloca()
9447 BuildMI(MBB, DL, TII->get(SystemZ::SLGFI), SystemZ::R15D) in emitProbedAlloca()
9450 BuildMI(MBB, DL, TII->get(SystemZ::CG)).addReg(SystemZ::R15D) in emitProbedAlloca()
9451 .addReg(SystemZ::R15D).addImm(ProbeSize - 8).addReg(0) in emitProbedAlloca()
9453 BuildMI(MBB, DL, TII->get(SystemZ::J)).addMBB(LoopTestMBB); in emitProbedAlloca()
9454 MBB->addSuccessor(LoopTestMBB); in emitProbedAlloca()
9460 BuildMI(MBB, DL, TII->get(SystemZ::CGHI)) in emitProbedAlloca()
9463 BuildMI(MBB, DL, TII->get(SystemZ::BRC)) in emitProbedAlloca()
9466 MBB->addSuccessor(TailMBB); in emitProbedAlloca()
9467 MBB->addSuccessor(DoneMBB); in emitProbedAlloca()
9472 BuildMI(MBB, DL, TII->get(SystemZ::SLGR), SystemZ::R15D) in emitProbedAlloca()
9475 BuildMI(MBB, DL, TII->get(SystemZ::CG)).addReg(SystemZ::R15D) in emitProbedAlloca()
9476 .addReg(SystemZ::R15D).addImm(-8).addReg(PHIReg) in emitProbedAlloca()
9478 MBB->addSuccessor(DoneMBB); in emitProbedAlloca()
9482 BuildMI(*MBB, MBB->begin(), DL, TII->get(TargetOpcode::COPY), DstReg) in emitProbedAlloca()
9495 DAG.getIntPtrConstant(TFL->getBackchainOffset(MF), DL)); in getBackchainAddress()
9657 // compiler from crashing when list-ilp is used.
9659 SystemZTargetLowering::getRepRegClassFor(MVT VT) const { in getRepRegClassFor()
9660 if (VT == MVT::Untyped) in getRepRegClassFor()
9662 return TargetLowering::getRepRegClassFor(VT); in getRepRegClassFor()
9669 The rounding method is in FPC Byte 3 bits 6-7, and has the following in lowerGET_ROUNDING()
9674 11 Round to -inf in lowerGET_ROUNDING()
9677 -1 Undefined in lowerGET_ROUNDING()
9681 3 Round to -inf in lowerGET_ROUNDING()
9707 EVT VT = Op.getValueType(); in lowerVECREDUCE_ADD() local
9716 SDValue Zero = DAG.getSplatBuildVector(OpVT, DL, DAG.getConstant(0, DL, VT)); in lowerVECREDUCE_ADD()
9736 ISD::EXTRACT_VECTOR_ELT, DL, VT, DAG.getBitcast(OpVT, Op), in lowerVECREDUCE_ADD()
9737 DAG.getConstant(OpVT.getVectorNumElements() - 1, DL, MVT::i32)); in lowerVECREDUCE_ADD()