Lines Matching +full:nand +full:- +full:style

1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
11 //===----------------------------------------------------------------------===//
119 /// LimitFloatPrecision - Generate low-precision inline sequences for
124 InsertAssertAlign("insert-assert-align", cl::init(true),
129 LimitFPPrecision("limit-float-precision",
130 cl::desc("Generate low-precision inline sequences "
136 "switch-peel-threshold", cl::Hidden, cl::init(66),
142 // DAG-based analysis from blowing up. For example, alias analysis and
149 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
163 /// getCopyFromParts - Create a value that contains the specified legal parts
218 // Assemble the trailing non-power-of-2 part. in getCopyFromParts()
219 unsigned OddParts = NumParts - RoundParts; in getCopyFromParts()
282 // zero or sign-extension. in getCopyFromParts()
330 if (CI->isInlineAsm()) in diagnosePossiblyInvalidConstraint()
336 /// getCopyFromPartsVector - Create a value that contains the specified legal
353 // Handle a multi-element vector. in getCopyFromPartsVector()
423 // have a vector widening case (e.g. <2 x float> -> <4 x float>). in getCopyFromPartsVector()
441 // Vector/Vector bitcast (e.g. <2 x bfloat> -> <2 x half>). in getCopyFromPartsVector()
470 *DAG.getContext(), V, "non-trivial scalar-to-vector conversion"); in getCopyFromPartsVector()
474 // Handle cases such as i8 -> <1 x i1> in getCopyFromPartsVector()
503 /// getCopyToParts - Create a series of nodes that contain the specified value
533 assert(NumParts == 1 && "No-op copy with multiple parts!"); in getCopyToParts()
574 // The value may have changed - recompute ValueVT. in getCopyToParts()
582 "scalar-to-vector conversion failed"); in getCopyToParts()
591 if (NumParts & (NumParts - 1)) { in getCopyToParts()
597 unsigned OddParts = NumParts - RoundParts; in getCopyToParts()
605 // The odd parts were reversed by getCopyToParts - unreverse them. in getCopyToParts()
655 // fixed/scalable properties. If a target needs to widen a fixed-length type in widenVectorToPartType()
677 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in in widenVectorToPartType()
682 Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef); in widenVectorToPartType()
684 // FIXME: Use CONCAT for 2x -> 4x. in widenVectorToPartType()
688 /// getCopyToPartsVector - Create a series of nodes that contain the specified
704 // Bitconvert vector->vector case. in getCopyToPartsVector()
759 // Handle a multi-element vector. in getCopyToPartsVector()
794 // Bitconvert vector->vector case. in getCopyToPartsVector()
817 // This does something sensible for scalable vectors - see the in getCopyToPartsVector()
924 unsigned NumSignBits = LOI->NumSignBits; in getCopyFromRegs()
925 unsigned NumZeroBits = LOI->Known.countMinLeadingZeros(); in getCopyFromRegs()
940 FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits); in getCopyFromRegs()
944 EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1); in getCopyFromRegs()
1015 Chain = Chains[NumRegs-1]; in getCopyToRegs()
1037 Flag.setRegClass(RC->getID()); in AddInlineAsmOperands()
1096 SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout()); in init()
1130 assert(Pending[i].getNode()->getNumOperands() > 1); in updateRoot()
1131 if (Pending[i].getNode()->getOperand(0) == Root) in updateRoot()
1186 (Address->use_empty() && !isa<Argument>(Address))) { in handleDebugDeclare()
1189 << "dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n"); in handleDebugDeclare()
1193 bool IsParameter = Variable->isParameter() || isa<Argument>(Address); in handleDebugDeclare()
1202 Address = BCI->getOperand(0); in handleDebugDeclare()
1207 SDV = DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(), in handleDebugDeclare()
1226 << " (could not emit func-arg dbg_value)\n"); in handleDebugDeclare()
1234 // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}. in visitDbgInfo()
1237 // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}. in visitDbgInfo()
1238 for (auto It = FnVarLocs->locs_begin(&I), End = FnVarLocs->locs_end(&I); in visitDbgInfo()
1240 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID); in visitDbgInfo()
1241 dropDanglingDebugInfo(Var, It->Expr); in visitDbgInfo()
1242 if (It->Values.isKillLocation(It->Expr)) { in visitDbgInfo()
1243 handleKillDebugValue(Var, It->Expr, It->DL, SDNodeOrder); in visitDbgInfo()
1246 SmallVector<Value *> Values(It->Values.location_ops()); in visitDbgInfo()
1247 if (!handleDebugValue(Values, Var, It->Expr, It->DL, SDNodeOrder, in visitDbgInfo()
1248 It->Values.hasArgList())) { in visitDbgInfo()
1250 for (Value *V : It->Values.location_ops()) in visitDbgInfo()
1253 FnVarLocs->getDILocalVariable(It->VariableID), in visitDbgInfo()
1254 It->Expr, Vals.size() > 1, It->DL, SDNodeOrder); in visitDbgInfo()
1268 // Is there is any debug-info attached to this instruction, in the form of in visitDbgInfo()
1269 // DbgRecord non-instruction debug-info records. in visitDbgInfo()
1272 assert(DLR->getLabel() && "Missing label"); in visitDbgInfo()
1274 DAG.getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder); in visitDbgInfo()
1330 // Increase the SDNodeOrder if dealing with a non-debug instruction. in visit()
1357 DAG.addPCSections(It->second.getNode(), PCSectionsMD); in visit()
1359 DAG.addMMRAMetadata(It->second.getNode(), MMRA); in visit()
1364 << I.getModule()->getName() << "]\n"; in visit()
1398 auto *Undef = UndefValue::get(V->getType()); in handleDanglingVariadicDebugInfo()
1430 if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) { in dropDanglingDebugInfo()
1451 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1459 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second; in resolveDanglingDebugInfo()
1462 unsigned ValSDNodeOrder = Val.getNode()->getIROrder(); in resolveDanglingDebugInfo()
1466 assert(Variable->isValidLocationForIntrinsic(DL) && in resolveDanglingDebugInfo()
1467 "Expected inlined-at fields to agree"); in resolveDanglingDebugInfo()
1498 auto Undef = UndefValue::get(V->getType()); in resolveDanglingDebugInfo()
1519 // Currently we consider only dbg.value intrinsics -- we tell the salvager in salvageUnresolvedDbgValue()
1528 // a non-instruction is seen, such as a constant expression or global in salvageUnresolvedDbgValue()
1536 Expr->getNumLocationOperands(), Ops, in salvageUnresolvedDbgValue()
1566 auto *Undef = UndefValue::get(OrigV->getType()); in salvageUnresolvedDbgValue()
1607 if (CE->getOpcode() == Instruction::IntToPtr) { in handleDebugValue()
1608 LocationOps.emplace_back(SDDbgOperand::fromConst(CE->getOperand(0))); in handleDebugValue()
1617 LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(SI->second)); in handleDebugValue()
1628 // Only emit func arg dbg value for non-variadic dbg.values for now. in handleDebugValue()
1645 LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(FISDN->getIndex())); in handleDebugValue()
1659 isa<Argument>(V) && Var->isParameter() && !DbgLoc.getInlinedAt(); in handleDebugValue()
1664 // We still want the value to appear for the user if possible -- if it has in handleDebugValue()
1668 unsigned Reg = VMI->second; in handleDebugValue()
1671 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, in handleDebugValue()
1672 V->getType(), std::nullopt); in handleDebugValue()
1679 if (auto VarSize = Var->getSizeInBits()) in handleDebugValue()
1681 if (auto Fragment = Expr->getFragmentInfo()) in handleDebugValue()
1682 BitsToDescribe = Fragment->SizeInBits; in handleDebugValue()
1690 ? BitsToDescribe - Offset in handleDebugValue()
1721 // Try to fixup any remaining dangling debug info -- and drop it if we can't. in resolveOrClearDbgInfo()
1728 /// getCopyFromRegs - If there was virtual register allocated for the value V
1735 Register InReg = It->second; in getCopyFromRegs()
1749 /// getValue - Return an SDValue for the given Value.
1759 if (SDValue copyFromReg = getCopyFromRegs(V, V->getType())) in getValue()
1769 /// getNonRegisterValue - Return an SDValue for the given Value, but
1780 N->setDebugLoc(DebugLoc()); in getNonRegisterValue()
1792 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1798 EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true); in getValueImpl()
1808 getValue(CPA->getPointer()), getValue(CPA->getKey()), in getValueImpl()
1809 getValue(CPA->getAddrDiscriminator()), in getValueImpl()
1810 getValue(CPA->getDiscriminator())); in getValueImpl()
1814 unsigned AS = V->getType()->getPointerAddressSpace(); in getValueImpl()
1825 if (isa<UndefValue>(C) && !V->getType()->isAggregateType()) in getValueImpl()
1829 visit(CE->getOpcode(), *CE); in getValueImpl()
1837 for (const Use &U : C->operands()) { in getValueImpl()
1843 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i) in getValueImpl()
1853 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { in getValueImpl()
1854 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode(); in getValueImpl()
1857 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i) in getValueImpl()
1861 if (isa<ArrayType>(CDS->getType())) in getValueImpl()
1866 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) { in getValueImpl()
1871 ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs); in getValueImpl()
1893 return getValue(Equiv->getGlobalValue()); in getValueImpl()
1896 return getValue(NC->getGlobalValue()); in getValueImpl()
1899 assert(C->isNullValue() && "Can only zero this target type!"); in getValueImpl()
1904 VectorType *VecTy = cast<VectorType>(V->getType()); in getValueImpl()
1910 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements(); in getValueImpl()
1912 Ops.push_back(getValue(CV->getOperand(i))); in getValueImpl()
1919 TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType()); in getValueImpl()
1940 SI->second, TLI.getValueType(DAG.getDataLayout(), AI->getType())); in getValueImpl()
1943 // If this is an instruction which fast-isel has deferred, select it now. in getValueImpl()
1948 Inst->getType(), std::nullopt); in getValueImpl()
1954 return DAG.getMDNode(cast<MDNode>(MD->getMetadata())); in getValueImpl()
1963 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); in visitCatchPad()
1969 CatchPadMBB->setIsEHScopeEntry(); in visitCatchPad()
1972 CatchPadMBB->setIsEHFuncletEntry(); in visitCatchPad()
1976 // Update machine-CFG edge. in visitCatchRet()
1978 FuncInfo.MBB->addSuccessor(TargetMBB); in visitCatchRet()
1979 TargetMBB->setIsEHCatchretTarget(true); in visitCatchRet()
1982 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); in visitCatchRet()
1985 // If this is not a fall-through branch or optimizations are switched off, in visitCatchRet()
2001 SuccessorColor = &FuncInfo.Fn->getEntryBlock(); in visitCatchRet()
2003 SuccessorColor = cast<Instruction>(ParentPad)->getParent(); in visitCatchRet()
2018 FuncInfo.MBB->setIsEHScopeEntry(); in visitCleanupPad()
2019 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); in visitCleanupPad()
2021 FuncInfo.MBB->setIsEHFuncletEntry(); in visitCleanupPad()
2022 FuncInfo.MBB->setIsCleanupFuncletEntry(); in visitCleanupPad()
2058 const Instruction *Pad = EHPadBB->getFirstNonPHI(); in findWasmUnwindDestinations()
2062 UnwindDests.back().first->setIsEHScopeEntry(); in findWasmUnwindDestinations()
2067 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) { in findWasmUnwindDestinations()
2069 UnwindDests.back().first->setIsEHScopeEntry(); in findWasmUnwindDestinations()
2085 /// The passed-in Prob is the edge probability to EHPadBB.
2092 classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); in findUnwindDestinations()
2106 const Instruction *Pad = EHPadBB->getFirstNonPHI(); in findUnwindDestinations()
2116 UnwindDests.back().first->setIsEHScopeEntry(); in findUnwindDestinations()
2117 UnwindDests.back().first->setIsEHFuncletEntry(); in findUnwindDestinations()
2121 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) { in findUnwindDestinations()
2125 UnwindDests.back().first->setIsEHFuncletEntry(); in findUnwindDestinations()
2127 UnwindDests.back().first->setIsEHScopeEntry(); in findUnwindDestinations()
2129 NewEHPadBB = CatchSwitch->getUnwindDest(); in findUnwindDestinations()
2136 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB); in findUnwindDestinations()
2148 ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest) in visitCleanupRet()
2152 UnwindDest.first->setIsEHPad(); in visitCleanupRet()
2155 FuncInfo.MBB->normalizeSuccProbs(); in visitCleanupRet()
2159 FuncInfo.MBBMap[I.getCleanupPad()->getParent()]; in visitCleanupRet()
2183 if (I.getParent()->getTerminatingDeoptimizeCall()) { in visitRet()
2190 const Function *F = I.getParent()->getParent(); in visitRet()
2197 PointerType::get(F->getContext(), in visitRet()
2207 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs, in visitRet()
2212 Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType()); in visitRet()
2233 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs); in visitRet()
2238 const Function *F = I.getParent()->getParent(); in visitRet()
2241 I.getOperand(0)->getType(), F->getCallingConv(), in visitRet()
2245 if (F->getAttributes().hasRetAttr(Attribute::SExt)) in visitRet()
2247 else if (F->getAttributes().hasRetAttr(Attribute::ZExt)) in visitRet()
2250 LLVMContext &Context = F->getContext(); in visitRet()
2251 bool RetInReg = F->getAttributes().hasRetAttr(Attribute::InReg); in visitRet()
2259 CallingConv::ID CC = F->getCallingConv(); in visitRet()
2273 if (I.getOperand(0)->getType()->isPointerTy()) { in visitRet()
2276 cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace()); in visitRet()
2281 if (j == NumValues - 1) in visitRet()
2304 const Function *F = I.getParent()->getParent(); in visitRet()
2306 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) { in visitRet()
2334 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
2339 if (V->getType()->isEmptyTy()) in CopyToExportRegsIfNeeded()
2344 assert((!V->use_empty() || isa<CallBrInst>(V)) && in CopyToExportRegsIfNeeded()
2346 CopyValueToVirtualRegister(V, VMI->second); in CopyToExportRegsIfNeeded()
2350 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
2370 if (VI->getParent() == FromBB) in isExportableFromCurrentBlock()
2380 if (FromBB->isEntryBlock()) in isExportableFromCurrentBlock()
2396 const BasicBlock *SrcBB = Src->getBasicBlock(); in getEdgeProbability()
2397 const BasicBlock *DstBB = Dst->getBasicBlock(); in getEdgeProbability()
2404 return BPI->getEdgeProbability(SrcBB, DstBB); in getEdgeProbability()
2411 Src->addSuccessorWithoutProb(Dst); in addSuccessorWithProb()
2415 Src->addSuccessor(Dst, Prob); in addSuccessorWithProb()
2421 return I->getParent() == BB; in InBlock()
2425 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2437 const BasicBlock *BB = CurBB->getBasicBlock(); in EmitBranchForMergedCondition()
2446 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) && in EmitBranchForMergedCondition()
2447 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) { in EmitBranchForMergedCondition()
2451 InvertCond ? IC->getInversePredicate() : IC->getPredicate(); in EmitBranchForMergedCondition()
2456 InvertCond ? FC->getInversePredicate() : FC->getPredicate(); in EmitBranchForMergedCondition()
2462 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr, in EmitBranchForMergedCondition()
2464 SL->SwitchCases.push_back(CB); in EmitBranchForMergedCondition()
2473 SL->SwitchCases.push_back(CB); in EmitBranchForMergedCondition()
2493 if (Necessary->contains(I)) in collectInstructionDeps()
2498 if (!Deps->try_emplace(I, false).second) in collectInstructionDeps()
2501 for (unsigned OpIdx = 0, E = I->getNumOperands(); OpIdx < E; ++OpIdx) in collectInstructionDeps()
2502 if (!collectInstructionDeps(Deps, I->getOperand(OpIdx), Necessary, in collectInstructionDeps()
2534 if (BPI->isEdgeHot(I.getParent(), IfTrue)) in shouldKeepJumpConditionsTogether()
2536 else if (BPI->isEdgeHot(I.getParent(), IfFalse)) in shouldKeepJumpConditionsTogether()
2547 CostThresh -= Params.UnlikelyBias; in shouldKeepJumpConditionsTogether()
2556 // Use map for stable iteration (to avoid non-determanism of iteration of in shouldKeepJumpConditionsTogether()
2579 for (const auto *U : Ins->users()) { in shouldKeepJumpConditionsTogether()
2635 InBlock(NotCond, CurBB->getBasicBlock())) { in FindMergedConditions()
2665 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse(); in FindMergedConditions()
2666 if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() || in FindMergedConditions()
2667 !InBlock(BOpOp0, CurBB->getBasicBlock()) || in FindMergedConditions()
2668 !InBlock(BOpOp1, CurBB->getBasicBlock())) { in FindMergedConditions()
2677 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock()); in FindMergedConditions()
2678 CurBB->getParent()->insert(++BBI, TmpBB); in FindMergedConditions()
2765 // Handle: (X != null) | (Y != null) --> (X|Y) != 0 in ShouldEmitAsBranches()
2766 // Handle: (X == null) & (Y == null) --> (X|Y) == 0 in ShouldEmitAsBranches()
2770 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) { in ShouldEmitAsBranches()
2783 // Update machine-CFG edges. in visitBr()
2787 // Update machine-CFG edges. in visitBr()
2788 BrMBB->addSuccessor(Succ0MBB); in visitBr()
2790 // If this is not a fall-through branch or optimizations are switched off, in visitBr()
2810 // As long as jumps are not expensive (exceptions for multi-use logic ops, in visitBr()
2827 BOp->hasOneUse() && !I.hasMetadata(LLVMContext::MD_unpredictable)) { in visitBr()
2850 assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!"); in visitBr()
2853 if (ShouldEmitAsBranches(SL->SwitchCases)) { in visitBr()
2854 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) { in visitBr()
2855 ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS); in visitBr()
2856 ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS); in visitBr()
2860 visitSwitchCase(SL->SwitchCases[0], BrMBB); in visitBr()
2861 SL->SwitchCases.erase(SL->SwitchCases.begin()); in visitBr()
2867 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) in visitBr()
2868 FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB); in visitBr()
2870 SL->SwitchCases.clear(); in visitBr()
2883 /// visitSwitchCase - Emits the necessary code to represent a single node in
2894 SwitchBB->normalizeSuccProbs(); in visitSwitchCase()
2903 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType()); in visitSwitchCase()
2920 // values are zero-extended. This breaks signed comparisons so truncate in visitSwitchCase()
2931 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue(); in visitSwitchCase()
2932 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue(); in visitSwitchCase()
2937 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) { in visitSwitchCase()
2944 DAG.getConstant(High-Low, dl, VT), ISD::SETULE); in visitSwitchCase()
2954 SwitchBB->normalizeSuccProbs(); in visitSwitchCase()
2979 /// visitJumpTable - Emit JumpTable node in the current MBB
2983 assert(JT.Reg != -1U && "Should lower JT Header first!"); in visitJumpTable()
2992 /// visitJumpTableHeader - This function emits necessary code to produce index
3027 Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT); in visitJumpTableHeader()
3088 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo(); in visitSPDescriptorParent()
3094 const Module &M = *ParentBB->getParent()->getFunction().getParent(); in visitSPDescriptorParent()
3112 FunctionType *FnTy = GuardCheckFn->getFunctionType(); in visitSPDescriptorParent()
3113 assert(FnTy->getNumParams() == 1 && "Invalid function signature"); in visitSPDescriptorParent()
3118 Entry.Ty = FnTy->getParamType(0); in visitSPDescriptorParent()
3119 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg)) in visitSPDescriptorParent()
3126 .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(), in visitSPDescriptorParent()
3188 // WebAssembly needs an unreachable instruction after a non-returning call, in visitSPDescriptorFailure()
3197 /// visitBitTestHeader - This function emits necessary code to produce value
3238 SwitchBB->normalizeSuccProbs(); in visitBitTestHeader()
3260 /// visitBitTestCase - this function produces one "bit test"
3305 SwitchBB->normalizeSuccProbs(); in visitBitTestCase()
3341 else if (Fn && Fn->isIntrinsic()) { in visitInvoke()
3342 switch (Fn->getIntrinsicID()) { in visitInvoke()
3353 // so dtor-funclet not removed by opts in visitInvoke()
3354 EHPadMBB->setMachineBlockAddressTaken(); in visitInvoke()
3401 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB) in visitInvoke()
3408 UnwindDest.first->setIsEHPad(); in visitInvoke()
3411 InvokeMBB->normalizeSuccProbs(); in visitInvoke()
3441 Target->setIsInlineAsmBrIndirectTarget(); in visitCallBr()
3442 Target->setMachineBlockAddressTaken(); in visitCallBr()
3443 Target->setLabelMustBeEmitted(); in visitCallBr()
3448 CallBrMBB->normalizeSuccProbs(); in visitCallBr()
3461 assert(FuncInfo.MBB->isEHPad() && in visitLandingPad()
3467 const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn(); in visitLandingPad()
3476 if (LP.getType()->isTokenTy()) in visitLandingPad()
3482 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported"); in visitLandingPad()
3484 // Get the two live-in registers as SDValues. The physregs have already been in visitLandingPad()
3511 for (JumpTableBlock &JTB : SL->JTCases) in UpdateSplitBlock()
3516 for (BitTestBlock &BTB : SL->BitTestCases) in UpdateSplitBlock()
3524 // Update machine-CFG edges with unique successors. in visitIndirectBr()
3535 IndirectBrMBB->normalizeSuccProbs(); in visitIndirectBr()
3548 Call && Call->doesNotReturn()) { in visitUnreachable()
3552 if (Call->isNonContinuableTrap()) in visitUnreachable()
3573 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap()); in visitBinary()
3574 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap()); in visitBinary()
3577 Flags.setExact(ExactOp->isExact()); in visitBinary()
3579 Flags.setDisjoint(DisjointOp->isDisjoint()); in visitBinary()
3599 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) { in visitShift()
3613 nuw = OFBinOp->hasNoUnsignedWrap(); in visitShift()
3614 nsw = OFBinOp->hasNoSignedWrap(); in visitShift()
3618 exact = ExactOp->isExact(); in visitShift()
3635 cast<PossiblyExactOperator>(&I)->isExact()); in visitSDiv()
3648 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType()); in visitICmp()
3651 // are zero-extended. This breaks signed comparisons so truncate back to the in visitICmp()
3670 if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath) in visitFCmp()
3685 return llvm::all_of(Cond->users(), [](const Value *V) { in hasOnlySelectUsers()
3732 // ValueTracking's select pattern matching does not account for -0.0, in visitSelect()
3734 // -0.0 is less than +0.0. in visitSelect()
3803 EVT VT = LHSVal.getNode()->getValueType(LHSVal.getResNo() + i); in visitSelect()
3816 LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops, Flags); in visitSelect()
3825 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest). in visitTrunc()
3833 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest). in visitZExt()
3841 Flags.setNonNeg(PNI->hasNonNeg()); in visitZExt()
3856 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest). in visitSExt()
3865 // FPTrunc is never a no-op cast, no need to check in visitFPTrunc()
3876 // FPExt is never a no-op cast, no need to check in visitFPExt()
3884 // FPToUI is never a no-op cast, no need to check in visitFPToUI()
3892 // FPToSI is never a no-op cast, no need to check in visitFPToSI()
3900 // UIToFP is never a no-op cast, no need to check in visitUIToFP()
3906 Flags.setNonNeg(PNI->hasNonNeg()); in visitUIToFP()
3912 // SIToFP is never a no-op cast, no need to check in visitSIToFP()
3921 // We can either truncate, zero extend, or no-op, accordingly. in visitPtrToInt()
3927 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType()); in visitPtrToInt()
3935 // We can either truncate, zero extend, or no-op, accordingly. in visitIntToPtr()
3952 // either a BITCAST or a no-op. in visitBitCast()
3961 setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false, in visitBitCast()
3973 unsigned SrcAS = SV->getType()->getPointerAddressSpace(); in visitAddrSpaceCast()
3974 unsigned DestAS = I.getType()->getPointerAddressSpace(); in visitAddrSpaceCast()
4008 Mask = SVI->getShuffleMask(); in visitShuffleVector()
4027 // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation in visitShuffleVector()
4028 // for targets that support a SPLAT_VECTOR for non-scalable vector types. in visitShuffleVector()
4050 SmallVector<int, 8> ConcatSrcs(NumConcat, -1); in visitShuffleVector()
4101 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1); in visitShuffleVector()
4105 Idx -= SrcNumElts - PaddedMaskNumElts; in visitShuffleVector()
4124 int StartIdx[2] = { -1, -1 }; // StartIdx to extract from in visitShuffleVector()
4133 Idx -= SrcNumElts; in visitShuffleVector()
4168 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts; in visitShuffleVector()
4170 Idx -= StartIdx[0]; in visitShuffleVector()
4190 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts; in visitShuffleVector()
4207 Type *ValTy = Op1->getType(); in visitInsertValue()
4240 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex); in visitInsertValue()
4254 Type *AggTy = Op0->getType(); in visitExtractValue()
4277 Values[i - LinearIndex] = in visitExtractValue()
4279 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) : in visitExtractValue()
4290 unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace(); in visitGetElementPtr()
4295 // Normalize Vector GEP - all scalar operands should be converted to the in visitGetElementPtr()
4297 bool IsVectorGEP = I.getType()->isVectorTy(); in visitGetElementPtr()
4299 IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount() in visitGetElementPtr()
4312 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); in visitGetElementPtr()
4316 DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(Field); in visitGetElementPtr()
4343 if (C && isa<VectorType>(C->getType())) in visitGetElementPtr()
4344 C = C->getSplatValue(); in visitGetElementPtr()
4347 if (CI && CI->isZero()) in visitGetElementPtr()
4350 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize); in visitGetElementPtr()
4432 return; // getValue will auto-populate this. in visitAlloca()
4462 Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign(); in visitAlloca()
4466 const uint64_t StackAlignMask = StackAlign.value() - 1U; in visitAlloca()
4468 // by add SA-1 to the size. This doesn't overflow because we're computing in visitAlloca()
4481 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)}; in visitAlloca()
4487 assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects()); in visitAlloca()
4494 // transforms that are known not to be poison-safe, such as folding logical in getRangeMetadata()
4505 if (CB->hasRetAttr(Attribute::NoUndef)) in getRange()
4506 return CB->getRange(); in getRange()
4523 if (Arg->hasSwiftErrorAttr()) in visitLoad()
4528 if (Alloca->isSwiftError()) in visitLoad()
4558 AA->pointsToConstantMemory(MemoryLocation( in visitLoad()
4562 // Do not serialize (non-volatile) loads of constant memory with anything. in visitLoad()
4567 // Do not serialize non-volatile loads against each other. in visitLoad()
4584 // they are side-effect free or do not alias. The optimizer should really in visitLoad()
4633 SrcV->getType(), ValueVTs, &Offsets, 0); in visitStoreToSwiftError()
4661 !AA->pointsToConstantMemory(MemoryLocation( in visitLoadFromSwiftError()
4693 if (Arg->hasSwiftErrorAttr()) in visitStore()
4698 if (Alloca->isSwiftError()) in visitStore()
4706 SrcV->getType(), ValueVTs, &MemVTs, &Offsets); in visitStore()
4765 Alignment = cast<ConstantInt>(I.getArgOperand(2))->getAlignValue(); in visitMaskedStore()
4804 TTI.hasConditionalLoadStoreForType(I.getArgOperand(0)->getType()) in visitMaskedStore()
4825 // When the first GEP operand is a single pointer - it is the uniform base we
4826 // are looking for. If first operand of the GEP is a splat vector - we
4833 SelectionDAG& DAG = SDB->DAG; in getUniformBase()
4837 assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type"); in getUniformBase()
4841 C = C->getSplatValue(); in getUniformBase()
4845 Base = SDB->getValue(C); in getUniformBase()
4847 ElementCount NumElts = cast<VectorType>(Ptr->getType())->getElementCount(); in getUniformBase()
4849 Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT); in getUniformBase()
4851 Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL)); in getUniformBase()
4856 if (!GEP || GEP->getParent() != CurBB) in getUniformBase()
4859 if (GEP->getNumOperands() != 2) in getUniformBase()
4862 const Value *BasePtr = GEP->getPointerOperand(); in getUniformBase()
4863 const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1); in getUniformBase()
4866 if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy()) in getUniformBase()
4869 TypeSize ScaleVal = DL.getTypeAllocSize(GEP->getResultElementType()); in getUniformBase()
4878 Base = SDB->getValue(BasePtr); in getUniformBase()
4879 Index = SDB->getValue(IndexVal); in getUniformBase()
4883 DAG.getTargetConstant(ScaleVal, SDB->getCurSDLoc(), TLI.getPointerTy(DL)); in getUniformBase()
4896 ->getMaybeAlignValue() in visitMaskedScatter()
4907 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace(); in visitMaskedScatter()
4939 Alignment = cast<ConstantInt>(I.getArgOperand(1))->getAlignValue(); in visitMaskedLoad()
4970 bool AddToChain = !AA || !AA->pointsToConstantMemory(ML); in visitMaskedLoad()
4990 TTI.hasConditionalLoadStoreForType(Src0Operand->getType())) in visitMaskedLoad()
5012 ->getMaybeAlignValue() in visitMaskedGather()
5024 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace(); in visitMaskedGather()
5093 case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break; in visitAtomicRMW()
5197 TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType()); in visitAtomicStore()
5223 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
5231 bool HasChain = !F->doesNotAccessMemory(); in visitTargetIntrinsic()
5232 bool OnlyLoad = HasChain && F->onlyReadsMemory(); in visitTargetIntrinsic()
5236 if (HasChain) { // If this intrinsic has side-effects, chainify it. in visitTargetIntrinsic()
5267 EVT VT = TLI.getValueType(DAG.getDataLayout(), Arg->getType(), true); in visitTargetIntrinsic()
5269 assert(CI->getBitWidth() <= 64 && in visitTargetIntrinsic()
5286 // Propagate fast-math-flags from IR to node(s). in visitTargetIntrinsic()
5296 auto *Token = Bundle->Inputs[0].get(); in visitTargetIntrinsic()
5322 } else if (!I.getType()->isVoidTy()) { in visitTargetIntrinsic()
5329 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1); in visitTargetIntrinsic()
5336 if (!I.getType()->isVoidTy()) { in visitTargetIntrinsic()
5352 /// GetSignificand - Get the significand and build it into a floating-point
5366 /// GetExponent - Get the exponent:
5368 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
5384 /// getF32Constant - Get 32-bit floating point constant.
5393 // TODO: What fast-math-flags should be set on the floating-point nodes? in getLimitedPrecisionExp2()
5398 // FractionalPartOfX = t0 - (float)IntegerPartOfX; in getLimitedPrecisionExp2()
5411 // For floating-point precision of 6: in getLimitedPrecisionExp2()
5426 // For floating-point precision of 12: in getLimitedPrecisionExp2()
5431 // (0.224338339f + 0.792043434e-1f * x) * x) * x; in getLimitedPrecisionExp2()
5445 // For floating-point precision of 18: in getLimitedPrecisionExp2()
5451 // (0.554906021e-1f + in getLimitedPrecisionExp2()
5452 // (0.961591928e-2f + in getLimitedPrecisionExp2()
5453 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x; in getLimitedPrecisionExp2()
5454 // error 2.47208000*10^(-7), which is better than 18 bits in getLimitedPrecisionExp2()
5482 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
5483 /// limited-precision mode.
5494 // TODO: What fast-math-flags should be set here? in expandExp()
5504 /// expandLog - Lower a log intrinsic. Handles the special sequences for
5505 /// limited-precision mode.
5508 // TODO: What fast-math-flags should be set on the floating-point nodes? in expandLog()
5520 // Get the significand and build it into a floating-point number with in expandLog()
5526 // For floating-point precision of 6: in expandLog()
5529 // -1.1609546f + in expandLog()
5530 // (1.4034025f - 0.23903021f * x) * x; in expandLog()
5541 // For floating-point precision of 12: in expandLog()
5544 // -1.7417939f + in expandLog()
5546 // (-1.4699568f + in expandLog()
5547 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x; in expandLog()
5564 // For floating-point precision of 18: in expandLog()
5567 // -2.1072184f + in expandLog()
5569 // (-3.7029485f + in expandLog()
5571 // (-0.87823314f + in expandLog()
5572 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x; in expandLog()
5603 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
5604 /// limited-precision mode.
5607 // TODO: What fast-math-flags should be set on the floating-point nodes? in expandLog2()
5616 // Get the significand and build it into a floating-point number with in expandLog2()
5621 // floating-point for various degrees of accuracy over [1,2]. in expandLog2()
5624 // For floating-point precision of 6: in expandLog2()
5626 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x; in expandLog2()
5637 // For floating-point precision of 12: in expandLog2()
5640 // -2.51285454f + in expandLog2()
5642 // (-2.12067489f + in expandLog2()
5643 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x; in expandLog2()
5660 // For floating-point precision of 18: in expandLog2()
5663 // -3.0400495f + in expandLog2()
5665 // (-5.3420409f + in expandLog2()
5667 // (-1.2669343f + in expandLog2()
5668 // (0.27515199f - in expandLog2()
5669 // 0.25691327e-1f * x) * x) * x) * x) * x) * x; in expandLog2()
5700 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5701 /// limited-precision mode.
5704 // TODO: What fast-math-flags should be set on the floating-point nodes? in expandLog10()
5715 // Get the significand and build it into a floating-point number with in expandLog10()
5721 // For floating-point precision of 6: in expandLog10()
5724 // -0.50419619f + in expandLog10()
5725 // (0.60948995f - 0.10380950f * x) * x; in expandLog10()
5736 // For floating-point precision of 12: in expandLog10()
5739 // -0.64831180f + in expandLog10()
5741 // (-0.31664806f + 0.47637168e-1f * x) * x) * x; in expandLog10()
5755 // For floating-point precision of 18: in expandLog10()
5758 // -0.84299375f + in expandLog10()
5760 // (-1.0688956f + in expandLog10()
5762 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x; in expandLog10()
5790 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5791 /// limited-precision mode.
5802 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
5803 /// limited-precision mode with x == 10.0f.
5812 IsExp10 = LHSC->isExactlyValue(Ten); in expandPow()
5816 // TODO: What fast-math-flags should be set on the FMUL node? in expandPow()
5832 /// ExpandPowI - Expand a llvm.powi intrinsic.
5839 unsigned Val = RHSC->getSExtValue(); in ExpandPowI()
5841 // powi(x, 0) -> 1.0 in ExpandPowI()
5849 Val = -Val; in ExpandPowI()
5856 // TODO: Intrinsics should have fast-math-flags that propagate to these in ExpandPowI()
5873 if (RHSC->getSExtValue() < 0) in ExpandPowI()
5909 unsigned ScaleInt = Scale->getAsZExtVal(); in expandDivFix()
5944 // getUnderlyingArgRegs - Find underlying registers used for a truncated,
5952 Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(), in getUnderlyingArgRegs()
5965 for (SDValue Op : N->op_values()) in getUnderlyingArgRegs()
5995 auto &Inst = TII->get(TargetOpcode::DBG_INSTR_REF); in EmitFuncArgumentDbgValue()
6012 auto &Inst = TII->get(TargetOpcode::DBG_VALUE); in EmitFuncArgumentDbgValue()
6021 bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front(); in EmitFuncArgumentDbgValue()
6038 bool VariableIsFunctionInputArg = Variable->isParameter() && in EmitFuncArgumentDbgValue()
6039 !DL->getInlinedAt(); in EmitFuncArgumentDbgValue()
6076 unsigned ArgNo = Arg->getArgNo(); in EmitFuncArgumentDbgValue()
6116 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) in EmitFuncArgumentDbgValue()
6117 Op = MachineOperand::CreateFI(FINode->getIndex()); in EmitFuncArgumentDbgValue()
6130 if (auto ExprFragmentInfo = Expr->getFragmentInfo()) { in EmitFuncArgumentDbgValue()
6131 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits; in EmitFuncArgumentDbgValue()
6139 RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset; in EmitFuncArgumentDbgValue()
6150 Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder); in EmitFuncArgumentDbgValue()
6166 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second, in EmitFuncArgumentDbgValue()
6167 V->getType(), std::nullopt); in EmitFuncArgumentDbgValue()
6173 Op = MachineOperand::CreateReg(VMI->second, false); in EmitFuncArgumentDbgValue()
6186 assert(Variable->isValidLocationForIntrinsic(DL) && in EmitFuncArgumentDbgValue()
6187 "Expected inlined-at fields to agree"); in EmitFuncArgumentDbgValue()
6190 if (Op->isReg()) in EmitFuncArgumentDbgValue()
6191 NewMI = MakeVRegDbgValue(Op->getReg(), Expr, IsIndirect); in EmitFuncArgumentDbgValue()
6193 NewMI = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), true, *Op, in EmitFuncArgumentDbgValue()
6218 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(), in getDbgValue()
6261 ->getCalledFunction() in FindPreallocatedCall()
6262 ->getIntrinsicID() == Intrinsic::call_preallocated_setup && in FindPreallocatedCall()
6264 for (const auto *U : PreallocatedSetup->users()) { in FindPreallocatedCall()
6266 const Function *Fn = UseCall->getCalledFunction(); in FindPreallocatedCall()
6267 if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) { in FindPreallocatedCall()
6280 if (!Expr->isEntryValue() || !hasSingleElement(Values)) in visitEntryValueDbgValue()
6285 assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync)); in visitEntryValueDbgValue()
6294 Register ArgVReg = ArgIt->getSecond(); in visitEntryValueDbgValue()
6296 for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins()) in visitEntryValueDbgValue()
6321 auto *Token = Bundle->Inputs[0].get(); in visitConvergenceControl()
6355 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace(); in visitVectorHistogram()
6437 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata())); in visitIntrinsicCall()
6450 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata())); in visitIntrinsicCall()
6556 Type *LengthTy = MI.getLength()->getType(); in visitIntrinsicCall()
6572 Type *LengthTy = MI.getLength()->getType(); in visitIntrinsicCall()
6588 Type *LengthTy = MI.getLength()->getType(); in visitIntrinsicCall()
6633 // it is non-variadic. in visitIntrinsicCall()
6716 MMI.setCurrentCallSite(CI->getZExtValue()); in visitIntrinsicCall()
6723 cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts()); in visitIntrinsicCall()
6811 // clang-format off in visitIntrinsicCall()
6835 // clang-format on in visitIntrinsicCall()
6847 // clang-format off in visitIntrinsicCall()
6855 // clang-format on in visitIntrinsicCall()
6930 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(1))->getMetadata(); in visitIntrinsicCall()
6932 convertStrToRoundingMode(cast<MDString>(MD)->getString()); in visitIntrinsicCall()
6936 // Propagate fast-math-flags from IR to node(s). in visitIntrinsicCall()
6960 // TODO: Intrinsic calls should have fast-math-flags. in visitIntrinsicCall()
7007 EVT ArgVT = TLI.getValueType(DLayout, I.getArgOperand(0)->getType()); in visitIntrinsicCall()
7009 cast<ConstantInt>(I.getArgOperand(1))->getZExtValue()); in visitIntrinsicCall()
7035 // Use GET_FPENV if it is legal or custom. Otherwise use memory-based node in visitIntrinsicCall()
7045 int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex(); in visitIntrinsicCall()
7072 int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex(); in visitIntrinsicCall()
7142 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF, in visitIntrinsicCall()
7150 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF, in visitIntrinsicCall()
7316 Align Align = DAG.getDataLayout().getPrefTypeAlign(Global->getType()); in visitIntrinsicCall()
7374 // artificial side-effects. in visitIntrinsicCall()
7381 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata(); in visitIntrinsicCall()
7389 const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts()); in visitIntrinsicCall()
7413 const Value *Alloca = I.getArgOperand(0)->stripPointerCasts(); in visitIntrinsicCall()
7417 GFI->addStackRoot(FI->getIndex(), TypeMap); in visitIntrinsicCall()
7438 I.getAttributes().getFnAttr("trap-func-name").getValueAsString(); in visitIntrinsicCall()
7451 cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(), sdl, in visitIntrinsicCall()
7463 Args[0].Ty = Args[0].Val->getType(); in visitIntrinsicCall()
7514 unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue(); in visitIntrinsicCall()
7544 cast<ConstantInt>(I.getArgOperand(0))->getSExtValue(); in visitIntrinsicCall()
7562 const int FrameIndex = SI->second; in visitIntrinsicCall()
7566 Offset = -1; // Cannot determine offset from alloca to lifetime object. in visitIntrinsicCall()
7574 auto Guid = cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(); in visitIntrinsicCall()
7575 auto Index = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue(); in visitIntrinsicCall()
7576 auto Attr = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue(); in visitIntrinsicCall()
7641 Value *Arg = I.getArgOperand(Idx)->stripPointerCasts(); in visitIntrinsicCall()
7651 TII->get(TargetOpcode::LOCAL_ESCAPE)) in visitIntrinsicCall()
7664 auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts()); in visitIntrinsicCall()
7667 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max())); in visitIntrinsicCall()
7669 GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal); in visitIntrinsicCall()
7818 GA->getGlobal(), sdl, Val.getValueType(), in visitIntrinsicCall()
7819 GA->getOffset())}); in visitIntrinsicCall()
7861 assert(cast<ConstantInt>(I.getOperand(4))->isZero() && in visitIntrinsicCall()
7862 "Non-zero flags not supported yet"); in visitIntrinsicCall()
7869 assert(RetTy->isVoidTy() && "Should not return"); in visitIntrinsicCall()
7881 Arg.Ty = I.getOperand(Idx)->getType(); in visitIntrinsicCall()
7912 // zero-extended to 64 bits when in registers. Thus the mask is 32 bits to in visitIntrinsicCall()
7914 // zero-extended up to 64 bits to match the pointer. in visitIntrinsicCall()
7916 TLI.getValueType(DAG.getDataLayout(), I.getOperand(0)->getType()); in visitIntrinsicCall()
7918 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType()); in visitIntrinsicCall()
7956 assert(cast<ConstantInt>(I.getOperand(1))->getSExtValue() > 0 && in visitIntrinsicCall()
7958 unsigned VF = cast<ConstantInt>(I.getOperand(1))->getZExtValue(); in visitIntrinsicCall()
7959 bool IsScalable = cast<ConstantInt>(I.getOperand(2))->isOne(); in visitIntrinsicCall()
8038 // If the zero-is-poison flag is set, we can assume the upper limit in visitIntrinsicCall()
8039 // of the result is VF-1. in visitIntrinsicCall()
8041 !cast<ConstantSDNode>(getValue(I.getOperand(1)))->isZero(); in visitIntrinsicCall()
8043 if (isa<ScalableVectorType>(I.getOperand(0)->getType())) in visitIntrinsicCall()
8080 Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl); in visitIntrinsicCall()
8096 Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl); in visitIntrinsicCall()
8150 assert(Result.getNode()->getNumValues() == 2); in visitConstrainedFPIntrinsic()
8163 // floating-point exception masks. in visitConstrainedFPIntrinsic()
8168 // floating-point exception masks or read floating-point exception flags. in visitConstrainedFPIntrinsic()
8224 ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate()); in visitConstrainedFPIntrinsic()
8243 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne(); in getISDForVPIntrinsic()
8248 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne(); in getISDForVPIntrinsic()
8253 bool IsZeroPoison = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne(); in getISDForVPIntrinsic()
8287 // Do not serialize variable-length loads of constant memory with in visitVPLoad()
8292 bool AddToChain = !AA || !AA->pointsToConstantMemory(ML); in visitVPLoad()
8317 PtrOperand->getType()->getScalarType()->getPointerAddressSpace(); in visitVPGather()
8380 PtrOperand->getType()->getScalarType()->getPointerAddressSpace(); in visitVPScatter()
8421 bool AddToChain = !AA || !AA->pointsToConstantMemory(ML); in visitVPStridedLoad()
8423 unsigned AS = PtrOperand->getType()->getPointerAddressSpace(); in visitVPStridedLoad()
8446 unsigned AS = PtrOperand->getType()->getPointerAddressSpace(); in visitVPStridedStore()
8467 bool IsFP = VPIntrin.getOperand(0)->getType()->isFPOrFPVectorTy(); in visitVPCmp()
8469 // FIXME: Regular fcmps are FPMathOperators which may have fast-math (nnan) in visitVPCmp()
8470 // flags, but calls that don't return floating-point types can't be in visitVPCmp()
8574 auto Constant = OpValues[1]->getAsZExtVal(); in visitVectorPredicationIntrinsic()
8597 VPIntrin.getOperand(0)->getType()); in visitVectorPredicationIntrinsic()
8657 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); in lowerEndEH()
8658 // There is a platform (e.g. wasm) that uses funclet style IR but does not in lowerEndEH()
8659 // actually use outlined funclets and their LSDA info style. in lowerEndEH()
8663 EHInfo->addIPToStateRange(II, BeginLabel, EndLabel); in lowerEndEH()
8689 "Non-null chain expected with non-tail call!"); in lowerInvokable()
8729 // Avoid emitting tail calls in functions with the disable-tail-calls in LowerCallTo()
8731 auto *Caller = CB.getParent()->getParent(); in LowerCallTo()
8732 if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() == in LowerCallTo()
8740 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) in LowerCallTo()
8749 if (V->getType()->isEmptyTy()) in LowerCallTo()
8753 Entry.Node = ArgNode; Entry.Ty = V->getType(); in LowerCallTo()
8755 Entry.setAttributes(&CB, I - CB.arg_begin()); in LowerCallTo()
8770 // might point to function-local memory), we can't meaningfully tail-call. in LowerCallTo()
8779 Value *V = Bundle->Inputs[0]; in LowerCallTo()
8782 Entry.Ty = V->getType(); in LowerCallTo()
8787 // Check if target-independent constraints permit a tail call here. in LowerCallTo()
8788 // Target-dependent constraints are checked within TLI->LowerCallTo. in LowerCallTo()
8803 CFIType = cast<ConstantInt>(Bundle->Inputs[0]); in LowerCallTo()
8804 assert(CFIType->getType()->isIntegerTy(32) && "Invalid CFI type"); in LowerCallTo()
8810 auto *Token = Bundle->Inputs[0].get(); in LowerCallTo()
8842 // book-keeping. in LowerCallTo()
8860 Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits()); in getMemCmpLoad()
8878 // Do not serialize (non-volatile) loads of constant memory with anything. in getMemCmpLoad()
8879 if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) { in getMemCmpLoad()
8883 // Do not serialize non-volatile loads against each other. in getMemCmpLoad()
8917 if (CSize && CSize->getZExtValue() == 0) { in visitMemCmpBCmpCall()
8934 // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0 in visitMemCmpBCmpCall()
8935 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0 in visitMemCmpBCmpCall()
8947 // TODO: Handle 5 byte compare as 4-byte + 1 byte. in visitMemCmpBCmpCall()
8948 // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads. in visitMemCmpBCmpCall()
8950 unsigned DstAS = LHS->getType()->getPointerAddressSpace(); in visitMemCmpBCmpCall()
8951 unsigned SrcAS = RHS->getType()->getPointerAddressSpace(); in visitMemCmpBCmpCall()
8965 unsigned NumBitsToCompare = CSize->getZExtValue() * 8; in visitMemCmpBCmpCall()
8990 EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits()); in visitMemCmpBCmpCall()
9152 /// See if we can lower a unary floating-point operation into an SDNode with
9172 /// See if we can lower a binary floating-point operation into an SDNode with
9203 if (F->isDeclaration()) { in visitCall()
9204 // Is this an LLVM intrinsic or a target-specific intrinsic? in visitCall()
9205 unsigned IID = F->getIntrinsicID(); in visitCall()
9208 IID = II->getIntrinsicID(F); in visitCall()
9216 // Check for well-known libc/libm calls. If the function is internal, it in visitCall()
9220 if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() && in visitCall()
9221 F->hasName() && LibInfo->getLibFunc(*F, Func) && in visitCall()
9222 LibInfo->hasOptimizedCodeGen(Func)) { in visitCall()
9452 const auto *Key = cast<ConstantInt>(PAB->Inputs[0]); in LowerCallSiteWithPtrAuthBundle()
9453 const Value *Discriminator = PAB->Inputs[1]; in LowerCallSiteWithPtrAuthBundle()
9455 assert(Key->getType()->isIntegerTy(32) && "Invalid ptrauth key"); in LowerCallSiteWithPtrAuthBundle()
9456 assert(Discriminator->getType()->isIntegerTy(64) && in LowerCallSiteWithPtrAuthBundle()
9462 if (CalleeCPA->isKnownCompatibleWith(Key, Discriminator, in LowerCallSiteWithPtrAuthBundle()
9464 return LowerCallTo(CB, getValue(CalleeCPA->getPointer()), CB.isTailCall(), in LowerCallSiteWithPtrAuthBundle()
9467 // Functions should never be ptrauth-called directly. in LowerCallSiteWithPtrAuthBundle()
9471 TargetLowering::PtrAuthInfo PAI = {Key->getZExtValue(), in LowerCallSiteWithPtrAuthBundle()
9480 /// AsmOperandInfo - This contains information for each constraint that we are
9484 /// CallOperand - If this is the result output operand or a clobber
9489 /// AssignedRegs - If this is a register or register class operand, this
9569 Type *Ty = OpVal->getType(); in getAddressForMemoryInput()
9576 StackID = TFI->getStackIDForScalableVectors(); in getAddressForMemoryInput()
9589 /// GetRegistersForValue - Assign registers (virtual or physical) for the
9645 // refers to the input address rather than the pointed-to value. in getRegistersForValue()
9652 // i64, which can be passed with two i32 values on a 32-bit machine. in getRegistersForValue()
9684 TargetRegisterClass::iterator I = RC->begin(); in getRegistersForValue()
9689 I = std::find(I, RC->end(), AssignedReg); in getRegistersForValue()
9690 if (I == RC->end()) { in getRegistersForValue()
9697 for (; NumRegs; --NumRegs, ++I) { in getRegistersForValue()
9698 assert(I != RC->end() && "Ran out of registers to allocate!"); in getRegistersForValue()
9712 for (; OperandNo; --OperandNo) { in findMatchingInlineAsmOperand()
9714 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal(); in findMatchingInlineAsmOperand()
9732 if (IA->hasSideEffects()) in ExtraFlags()
9734 if (IA->isAlignStack()) in ExtraFlags()
9738 Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect; in ExtraFlags()
9743 // meaning of an Other constraint can be target-specific and we can't easily in update()
9765 auto Fn = dyn_cast_or_null<Function>(GA->getGlobal()); in isFunction()
9767 // In normal "call dllimport func" instruction (non-inlineasm) it force in isFunction()
9772 if (Fn && !Fn->hasDLLImportStorageClass()) in isFunction()
9779 /// visitInlineAsm - Handle a call to an InlineAsm object.
9784 /// ConstraintOperands - Information about all of the constraints. in visitInlineAsm()
9793 bool HasSideEffect = IA->hasSideEffects(); in visitInlineAsm()
9845 int OpNo = -1; in visitInlineAsm()
9847 IA->collectAsmStrs(AsmStrs); in visitInlineAsm()
9882 // pc-related, but lea/mov a function adress may use got. in visitInlineAsm()
9917 // AsmNodeOperands - The operands for the ISD::INLINEASM node. in visitInlineAsm()
9921 IA->getAsmString().c_str(), TLI.getProgramPointerTy(DAG.getDataLayout()))); in visitInlineAsm()
9934 // Third pass: Loop over operands to prepare DAG-level operands.. As part of in visitInlineAsm()
9990 // C_RegisterClass, and a target-defined fashion for in visitInlineAsm()
10020 InlineAsm::Flag Flag(AsmNodeOperands[CurOp]->getAsZExtVal()); in visitInlineAsm()
10023 // This happens on gcc/testsuite/gcc.dg/pr8788-1.c in visitInlineAsm()
10035 Register TiedReg = R->getReg(); in visitInlineAsm()
10036 MVT RegVT = R->getSimpleValueType(0); in visitInlineAsm()
10135 AsmOp = DAG.getTargetGlobalAddress(GA->getGlobal(), getCurSDLoc(), in visitInlineAsm()
10137 GA->getOffset()); in visitInlineAsm()
10215 ResultTypes = StructResult->elements(); in visitInlineAsm()
10216 else if (!CallResultType->isVoidTy()) in visitInlineAsm()
10222 assert((*CurResultType)->isSized() && "Unexpected unsized type"); in visitInlineAsm()
10233 // class it is put in, eg. a double in a general-purpose register on a in visitInlineAsm()
10234 // 32-bit machine. in visitInlineAsm()
10286 assert(!Call.getType()->isVoidTy() && "Bad inline asm!"); in visitInlineAsm()
10288 for (const SDValue &V : Val->op_values()) in visitInlineAsm()
10358 if (I.getType()->isPointerTy()) in visitVAArg()
10385 if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped()) in lowerRangeToAssertZExt()
10388 APInt Lo = CR->getUnsignedMin(); in lowerRangeToAssertZExt()
10392 APInt Hi = CR->getUnsignedMax(); in lowerRangeToAssertZExt()
10402 unsigned NumVals = Op.getNode()->getNumValues(); in lowerRangeToAssertZExt()
10432 const Value *V = Call->getOperand(ArgI); in populateCallLoweringInfo()
10434 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); in populateCallLoweringInfo()
10438 Entry.Ty = V->getType(); in populateCallLoweringInfo()
10445 .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args), in populateCallLoweringInfo()
10447 .setDiscardResult(Call->use_empty()) in populateCallLoweringInfo()
10450 Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0); in populateCallLoweringInfo()
10477 // Things on the stack are pointer-typed, meaning that they are already in addStackMapLiveVars()
10480 Ops.push_back(DAG.getTargetFrameIndex(FI->getIndex(), Op.getValueType())); in addStackMapLiveVars()
10493 assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value."); in visitStackmap()
10514 // Add the STACKMAP operands, starting with DAG house-keeping. in visitStackmap()
10525 DAG.getTargetConstant(ID->getAsZExtVal(), DL, ID.getValueType()); in visitStackmap()
10531 DAG.getTargetConstant(Shad->getAsZExtVal(), DL, Shad.getValueType()); in visitStackmap()
10546 // Set the root to the target-lowered call chain. in visitStackmap()
10550 FuncInfo.MF->getFrameInfo().setHasStackMap(); in visitStackmap()
10565 bool HasDef = !CB.getType()->isVoidTy(); in visitPatchpoint()
10571 Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl, in visitPatchpoint()
10574 Callee = DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(), in visitPatchpoint()
10576 SymbolicCallee->getValueType(0)); in visitPatchpoint()
10580 unsigned NumArgs = NArgVal->getAsZExtVal(); in visitPatchpoint()
10583 // Intrinsics include all meta-operands up to but not including CC. in visitPatchpoint()
10599 if (CallEnd->getOpcode() == ISD::EH_LABEL) in visitPatchpoint()
10600 CallEnd = CallEnd->getOperand(0).getNode(); in visitPatchpoint()
10601 if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg)) in visitPatchpoint()
10602 CallEnd = CallEnd->getOperand(0).getNode(); in visitPatchpoint()
10606 assert(CallEnd->getOpcode() == ISD::CALLSEQ_END && in visitPatchpoint()
10608 SDNode *Call = CallEnd->getOperand(0).getNode(); in visitPatchpoint()
10609 bool HasGlue = Call->getGluedNode(); in visitPatchpoint()
10615 Ops.push_back(*(Call->op_begin())); in visitPatchpoint()
10619 Ops.push_back(*(Call->op_end() - 1)); in visitPatchpoint()
10623 Ops.push_back(*(Call->op_end() - 2)); in visitPatchpoint()
10625 Ops.push_back(*(Call->op_end() - 1)); in visitPatchpoint()
10629 Ops.push_back(DAG.getTargetConstant(IDVal->getAsZExtVal(), dl, MVT::i64)); in visitPatchpoint()
10631 Ops.push_back(DAG.getTargetConstant(NBytesVal->getAsZExtVal(), dl, MVT::i32)); in visitPatchpoint()
10639 unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3); in visitPatchpoint()
10653 SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1; in visitPatchpoint()
10654 Ops.append(Call->op_begin() + 2, e); in visitPatchpoint()
10698 FuncInfo.MF->getFrameInfo().setHasPatchPoint(); in visitPatchpoint()
10788 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex, in getReturnAttrs()
10792 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
10816 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT); in LowerCallTo()
10817 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT); in LowerCallTo()
10829 this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(), in LowerCallTo()
10830 CLI.IsVarArg, Outs, CLI.RetTy->getContext()); in LowerCallTo()
10833 int DemoteStackIdx = -100; in LowerCallTo()
10866 CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext()); in LowerCallTo()
10868 // sret demotion isn't compatible with tail-calls, since the sret argument in LowerCallTo()
10878 if (I == RetTys.size() - 1) in LowerCallTo()
10882 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(), in LowerCallTo()
10884 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(), in LowerCallTo()
10892 if (CLI.RetTy->isPointerTy()) { in LowerCallTo()
10895 cast<PointerType>(CLI.RetTy)->getAddressSpace()); in LowerCallTo()
10937 Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext()); in LowerCallTo()
10948 if (Args[i].Ty->isPointerTy()) { in LowerCallTo()
10951 cast<PointerType>(Args[i].Ty)->getAddressSpace()); in LowerCallTo()
10959 // passed InReg - is surely an HVA in LowerCallTo()
11023 MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(), in LowerCallTo()
11025 unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(), in LowerCallTo()
11035 // Conservatively only handle 'returned' on non-vectors that can be lowered, in LowerCallTo()
11040 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() && in LowerCallTo()
11041 CLI.RetTy->getPointerAddressSpace() == in LowerCallTo()
11042 Args[i].Ty->getPointerAddressSpace())) && in LowerCallTo()
11075 if (j == NumParts - 1) in LowerCallTo()
11083 if (NeedsRegBlock && Value == NumValues - 1) in LowerCallTo()
11084 CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast(); in LowerCallTo()
11102 // For a tail call, the return value is merely live-out and there aren't in LowerCallTo()
11125 PointerType::get(OrigRetTy->getContext(), DL.getAllocaAddrSpace()); in LowerCallTo()
11166 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(), in LowerCallTo()
11168 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(), in LowerCallTo()
11203 if (N->getNumValues() == 1) { in LowerOperationWrapper()
11210 assert((N->getNumValues() == Res->getNumValues()) && in LowerOperationWrapper()
11214 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I) in LowerOperationWrapper()
11227 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) && in CopyValueToVirtualRegister()
11235 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(), in CopyValueToVirtualRegister()
11242 ExtendType = PreferredExtendIt->second; in CopyValueToVirtualRegister()
11250 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
11255 // of virtual registers for all non-dead arguments. in isOnlyUsedInEntryBlock()
11257 return A->use_empty(); in isOnlyUsedInEntryBlock()
11259 const BasicBlock &Entry = A->getParent()->front(); in isOnlyUsedInEntryBlock()
11260 for (const User *U : A->users()) in isOnlyUsedInEntryBlock()
11261 if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U)) in isOnlyUsedInEntryBlock()
11283 unsigned NumArgs = FuncInfo->Fn->arg_size(); in findArgumentCopyElisionCandidates()
11286 auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * { in findArgumentCopyElisionCandidates()
11289 V = V->stripPointerCasts(); in findArgumentCopyElisionCandidates()
11291 if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI)) in findArgumentCopyElisionCandidates()
11294 return &Iter.first->second; in findArgumentCopyElisionCandidates()
11299 // by the store. Any non-store use of an alloca escapes it and any subsequent in findArgumentCopyElisionCandidates()
11302 for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) { in findArgumentCopyElisionCandidates()
11303 // Look for stores, and handle non-store uses conservatively. in findArgumentCopyElisionCandidates()
11323 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand())) in findArgumentCopyElisionCandidates()
11327 const Value *Dst = SI->getPointerOperand()->stripPointerCasts(); in findArgumentCopyElisionCandidates()
11342 const Value *Val = SI->getValueOperand()->stripPointerCasts(); in findArgumentCopyElisionCandidates()
11344 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() || in findArgumentCopyElisionCandidates()
11345 Arg->getType()->isEmptyTy() || in findArgumentCopyElisionCandidates()
11346 DL.getTypeStoreSize(Arg->getType()) != in findArgumentCopyElisionCandidates()
11347 DL.getTypeAllocSize(AI->getAllocatedType()) || in findArgumentCopyElisionCandidates()
11348 !DL.typeSizeEqualsStoreSize(Arg->getType()) || in findArgumentCopyElisionCandidates()
11361 // Stop scanning if we've seen all arguments. This will happen early in -O0 in findArgumentCopyElisionCandidates()
11362 // builds, which is useful, because -O0 builds have large entry blocks and in findArgumentCopyElisionCandidates()
11381 auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()); in tryToElideArgumentCopy()
11390 const AllocaInst *AI = ArgCopyIter->second.first; in tryToElideArgumentCopy()
11391 int FixedIndex = FINode->getIndex(); in tryToElideArgumentCopy()
11394 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo(); in tryToElideArgumentCopy()
11401 Align RequiredAlignment = AI->getAlign(); in tryToElideArgumentCopy()
11426 const StoreInst *SI = ArgCopyIter->second.second; in tryToElideArgumentCopy()
11440 SelectionDAG &DAG = SDB->DAG; in LowerArguments()
11441 SDLoc dl = SDB->getCurSDLoc(); in LowerArguments()
11449 if (!FuncInfo->CanLowerReturn) { in LowerArguments()
11461 MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]); in LowerArguments()
11484 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters( in LowerArguments()
11493 if (Arg.getType()->isPointerTy()) { in LowerArguments()
11496 cast<PointerType>(Arg.getType())->getAddressSpace()); in LowerArguments()
11504 // passed InReg - is surely an HVA in LowerArguments()
11550 TLI->getABIAlignmentForCallingConv(ArgTy, DL)); in LowerArguments()
11562 // For in-memory arguments, size and alignment should be passed from FE. in LowerArguments()
11570 MemAlign = Align(TLI->getByValTypeAlignment(ArgMemTy, DL)); in LowerArguments()
11591 MVT RegisterVT = TLI->getRegisterTypeForCallingConv( in LowerArguments()
11592 *CurDAG->getContext(), F.getCallingConv(), VT); in LowerArguments()
11593 unsigned NumRegs = TLI->getNumRegistersForCallingConv( in LowerArguments()
11594 *CurDAG->getContext(), F.getCallingConv(), VT); in LowerArguments()
11607 if (i == NumRegs - 1) in LowerArguments()
11612 if (NeedsRegBlock && Value == NumValues - 1) in LowerArguments()
11613 Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast(); in LowerArguments()
11620 SDValue NewRoot = TLI->LowerFormalArguments( in LowerArguments()
11642 if (!FuncInfo->CanLowerReturn) { in LowerArguments()
11651 MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT); in LowerArguments()
11657 MachineFunction& MF = SDB->DAG.getMachineFunction(); in LowerArguments()
11660 RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT)); in LowerArguments()
11661 FuncInfo->DemoteRegister = SRetReg; in LowerArguments()
11663 SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue); in LowerArguments()
11687 NumParts += TLI->getNumRegistersForCallingConv(*CurDAG->getContext(), in LowerArguments()
11698 TLI->supportSwiftError() && in LowerArguments()
11701 SDB->setUnusedArgValue(&Arg, InVals[i]); in LowerArguments()
11706 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex()); in LowerArguments()
11711 MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(), in LowerArguments()
11713 unsigned NumParts = TLI->getNumRegistersForCallingConv( in LowerArguments()
11714 *CurDAG->getContext(), F.getCallingConv(), VT); in LowerArguments()
11741 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex()); in LowerArguments()
11744 SDB->getCurSDLoc()); in LowerArguments()
11746 SDB->setValue(&Arg, Res); in LowerArguments()
11759 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) in LowerArguments()
11760 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex()); in LowerArguments()
11769 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg(); in LowerArguments()
11771 SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(), in LowerArguments()
11781 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg(); in LowerArguments()
11783 FuncInfo->ValueMap[&Arg] = Reg; in LowerArguments()
11788 FuncInfo->InitializeRegForValue(&Arg); in LowerArguments()
11789 SDB->CopyToExportRegsIfNeeded(&Arg); in LowerArguments()
11806 MF->getInStackSlotVariableDbgInfo()) { in LowerArguments()
11809 VI.updateStackSlot(I->second); in LowerArguments()
11831 for (const BasicBlock *SuccBB : successors(LLVMBB->getTerminator())) { in HandlePHINodesInSuccessorBlocks()
11832 if (!isa<PHINode>(SuccBB->begin())) continue; in HandlePHINodesInSuccessorBlocks()
11840 MachineBasicBlock::iterator MBBI = SuccMBB->begin(); in HandlePHINodesInSuccessorBlocks()
11842 // At this point we know that there is a 1-1 correspondence between LLVM PHI in HandlePHINodesInSuccessorBlocks()
11845 for (const PHINode &PN : SuccBB->phis()) { in HandlePHINodesInSuccessorBlocks()
11851 if (PN.getType()->isEmptyTy()) in HandlePHINodesInSuccessorBlocks()
11874 Reg = I->second; in HandlePHINodesInSuccessorBlocks()
11903 if (++I == FuncInfo.MF->end()) in NextBlock()
11927 if (++BBI != FuncInfo.MF->end()) in lowerWorkItem()
11930 unsigned Size = W.LastCluster - W.FirstCluster + 1; in lowerWorkItem()
11938 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)" in lowerWorkItem()
11947 const APInt &SmallValue = Small.Low->getValue(); in lowerWorkItem()
11948 const APInt &BigValue = Big.Low->getValue(); in lowerWorkItem()
11971 BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0)); in lowerWorkItem()
11992 // which case their relative ordering is non-deterministic. So we use Low in lowerWorkItem()
11993 // as a tie-breaker as clusters are guaranteed to never overlap. in lowerWorkItem()
11998 a.Low->getValue().slt(b.Low->getValue()); in lowerWorkItem()
12004 --I; in lowerWorkItem()
12005 if (I->Prob > W.LastCluster->Prob) in lowerWorkItem()
12007 if (I->Kind == CC_Range && I->MBB == NextMBB) { in lowerWorkItem()
12018 UnhandledProbs += I->Prob; in lowerWorkItem()
12028 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg()); in lowerWorkItem()
12030 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock()); in lowerWorkItem()
12031 CurMF->insert(BBI, Fallthrough); in lowerWorkItem()
12035 UnhandledProbs -= I->Prob; in lowerWorkItem()
12037 switch (I->Kind) { in lowerWorkItem()
12040 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first; in lowerWorkItem()
12041 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second; in lowerWorkItem()
12044 MachineBasicBlock *JumpMBB = JT->MBB; in lowerWorkItem()
12045 CurMF->insert(BBI, JumpMBB); in lowerWorkItem()
12047 auto JumpProb = I->Prob; in lowerWorkItem()
12053 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(), in lowerWorkItem()
12054 SE = JumpMBB->succ_end(); in lowerWorkItem()
12058 FallthroughProb -= DefaultProb / 2; in lowerWorkItem()
12059 JumpMBB->setSuccProbability(SI, DefaultProb / 2); in lowerWorkItem()
12060 JumpMBB->normalizeSuccProbs(); in lowerWorkItem()
12066 // JTH->FallthroughUnreachable which will use it to suppress the range in lowerWorkItem()
12071 // gadget - out-of-bounds inputs that are impossible in correct in lowerWorkItem()
12077 Function &CurFunc = CurMF->getFunction(); in lowerWorkItem()
12078 if (!CurFunc.hasFnAttribute("branch-target-enforcement")) in lowerWorkItem()
12079 JTH->FallthroughUnreachable = true; in lowerWorkItem()
12082 if (!JTH->FallthroughUnreachable) in lowerWorkItem()
12085 CurMBB->normalizeSuccProbs(); in lowerWorkItem()
12089 JTH->HeaderBB = CurMBB; in lowerWorkItem()
12090 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader. in lowerWorkItem()
12095 JTH->Emitted = true; in lowerWorkItem()
12101 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex]; in lowerWorkItem()
12104 for (BitTestCase &BTC : BTB->Cases) in lowerWorkItem()
12105 CurMF->insert(BBI, BTC.ThisBB); in lowerWorkItem()
12108 BTB->Parent = CurMBB; in lowerWorkItem()
12109 BTB->Default = Fallthrough; in lowerWorkItem()
12111 BTB->DefaultProb = UnhandledProbs; in lowerWorkItem()
12115 if (!BTB->ContiguousRange) { in lowerWorkItem()
12116 BTB->Prob += DefaultProb / 2; in lowerWorkItem()
12117 BTB->DefaultProb -= DefaultProb / 2; in lowerWorkItem()
12121 BTB->FallthroughUnreachable = true; in lowerWorkItem()
12126 BTB->Emitted = true; in lowerWorkItem()
12133 if (I->Low == I->High) { in lowerWorkItem()
12134 // Check Cond == I->Low. in lowerWorkItem()
12137 RHS=I->Low; in lowerWorkItem()
12140 // Check I->Low <= Cond <= I->High. in lowerWorkItem()
12142 LHS = I->Low; in lowerWorkItem()
12144 RHS = I->High; in lowerWorkItem()
12152 CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB, in lowerWorkItem()
12153 getCurSDLoc(), I->Prob, UnhandledProbs); in lowerWorkItem()
12158 SL->SwitchCases.push_back(CB); in lowerWorkItem()
12171 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) && in splitWorkItem()
12173 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!"); in splitWorkItem()
12176 SL->computeSplitWorkItemInfo(W); in splitWorkItem()
12178 // Use the first element on the right as pivot since we will make less-than in splitWorkItem()
12187 const ConstantInt *Pivot = PivotCluster->Low; in splitWorkItem()
12195 // between the known lower bound and Pivot - 1. in splitWorkItem()
12197 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range && in splitWorkItem()
12198 FirstLeft->Low == W.GE && in splitWorkItem()
12199 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) { in splitWorkItem()
12200 LeftMBB = FirstLeft->MBB; in splitWorkItem()
12202 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock()); in splitWorkItem()
12203 FuncInfo.MF->insert(BBI, LeftMBB); in splitWorkItem()
12214 if (FirstRight == LastRight && FirstRight->Kind == CC_Range && in splitWorkItem()
12215 W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) { in splitWorkItem()
12216 RightMBB = FirstRight->MBB; in splitWorkItem()
12218 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock()); in splitWorkItem()
12219 FuncInfo.MF->insert(BBI, RightMBB); in splitWorkItem()
12233 SL->SwitchCases.push_back(CB); in splitWorkItem()
12262 SwitchMBB->getParent()->getFunction().hasMinSize()) in peelDominantCaseCluster()
12286 FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock()); in peelDominantCaseCluster()
12287 FuncInfo.MF->insert(BBI, PeeledSwitchMBB); in peelDominantCaseCluster()
12316 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex()) in visitSwitch()
12337 SwitchMBB->addSuccessor(DefaultMBB); in visitSwitch()
12345 SL->findJumpTables(Clusters, &SI, getCurSDLoc(), DefaultMBB, DAG.getPSI(), in visitSwitch()
12347 SL->findBitTestClusters(Clusters, &SI); in visitSwitch()
12357 C.Low->getValue().print(dbgs(), true); in visitSwitch()
12359 dbgs() << '-'; in visitSwitch()
12360 C.High->getValue().print(dbgs(), true); in visitSwitch()
12370 CaseClusterIt Last = Clusters.end() - 1; in visitSwitch()
12382 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1; in visitSwitch()
12385 !DefaultMBB->getParent()->getFunction().hasMinSize()) { in visitSwitch()
12415 // Use VECTOR_SHUFFLE for the fixed-length vector in visitVectorReverse()
12420 Mask.push_back(NumElts - 1 - i); in visitVectorReverse()
12439 // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing in visitVectorDeinterleave()
12464 // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing in visitVectorInterleave()
12506 int64_t Imm = cast<ConstantInt>(I.getOperand(2))->getSExtValue(); in visitVectorSplice()
12519 // Use VECTOR_SHUFFLE to maintain original behaviour for fixed-length vectors. in visitVectorSplice()
12529 // INLINEASM_BR ..., implicit-def $ebx, ..., implicit-def $edx
12548 MachineInstr *MI = MRI.def_begin(Reg)->getParent(); in FollowCopyChain()
12550 assert(MI->getOpcode() == TargetOpcode::COPY && in FollowCopyChain()
12552 Reg = MI->getOperand(1).getReg(); in FollowCopyChain()
12553 MI = MRI.def_begin(Reg)->getParent(); in FollowCopyChain()
12555 if (MI->getOpcode() == TargetOpcode::COPY) { in FollowCopyChain()
12557 Reg = MI->getOperand(1).getReg(); in FollowCopyChain()
12559 MI = MRI.def_begin(Reg)->getParent(); in FollowCopyChain()
12562 assert(MI->getOpcode() == TargetOpcode::INLINEASM_BR && in FollowCopyChain()
12568 // setValue(&I, getCopyFromRegs(CBR, CBR->getType()));
12575 cast<CallBrInst>(I.getParent()->getUniquePredecessor()->getTerminator()); in visitCallBrLandingPad()
12584 // Re-parse the asm constraints string. in visitCallBrLandingPad()
12607 FuncInfo.MBB->addLiveIn(OriginalDef); in visitCallBrLandingPad()