Lines Matching +full:fsin +full:- +full:output
1 //===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
12 //===----------------------------------------------------------------------===//
69 #define DEBUG_TYPE "hexagon-lowering"
71 static cl::opt<bool> EmitJumpTables("hexagon-emit-jump-tables",
76 EnableHexSDNodeSched("enable-hexagon-sdnode-sched", cl::Hidden,
79 static cl::opt<bool> EnableFastMath("ffast-math", cl::Hidden,
82 static cl::opt<int> MinimumJumpTables("minimum-jump-tables", cl::Hidden,
87 MaxStoresPerMemcpyCL("max-store-memcpy", cl::Hidden, cl::init(6),
91 MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os", cl::Hidden, cl::init(4),
95 MaxStoresPerMemmoveCL("max-store-memmove", cl::Hidden, cl::init(6),
99 MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os", cl::Hidden,
104 MaxStoresPerMemsetCL("max-store-memset", cl::Hidden, cl::init(8),
108 MaxStoresPerMemsetOptSizeCL("max-store-memset-Os", cl::Hidden, cl::init(4),
111 static cl::opt<bool> AlignLoads("hexagon-align-loads",
116 DisableArgsMinAlignment("hexagon-disable-args-min-alignment", cl::Hidden,
169 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
197 // LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is
206 // CCValAssign - represent the assignment of the return value to locations. in LowerReturn()
209 // CCState - Info about the registers and stack slot. in LowerReturn()
222 // Copy the result values into the output registers. in LowerReturn()
265 return CI->isTailCall(); in mayBeEmittedAsTailCall()
344 /// LowerCallResult - Lower the result values of an ISD::CALL into the
400 /// LowerCall - Functions arguments are copied from virtual regs to
421 unsigned NumParams = CLI.CB ? CLI.CB->getFunctionType()->getNumParams() : 0; in LowerCall()
423 Callee = DAG.getTargetGlobalAddress(GAN->getGlobal(), dl, MVT::i32); in LowerCall()
425 // Linux ABI treats var-arg calls the same way as regular ones. in LowerCall()
541 // Build a sequence of copy-to-reg nodes chained together with token in LowerCall()
576 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, PtrVT, 0, Flags); in LowerCall()
579 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, Flags); in LowerCall()
606 // The target-independent code calls getFrameRegister before setting it, and in LowerCall()
626 /// form a post-indexed load / store.
633 EVT VT = LSN->getMemoryVT(); in getPostIndexedAddressParts()
644 if (Op->getOpcode() != ISD::ADD) in getPostIndexedAddressParts()
646 Base = Op->getOperand(0); in getPostIndexedAddressParts()
647 Offset = Op->getOperand(1); in getPostIndexedAddressParts()
652 int32_t V = cast<ConstantSDNode>(Offset.getNode())->getSExtValue(); in getPostIndexedAddressParts()
653 return Subtarget.getInstrInfo()->isValidAutoIncImm(VT, V); in getPostIndexedAddressParts()
675 if (Op.getOperand(NumOps-1).getValueType() == MVT::Glue) in LowerINLINEASM()
676 --NumOps; // Ignore the flag operand. in LowerINLINEASM()
694 for (; NumVals; --NumVals, ++i) { in LowerINLINEASM()
695 Register Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg(); in LowerINLINEASM()
723 // Custom-handle ISD::READCYCLECOUNTER because the target-independent SDNode
724 // is marked as having side-effects, while the register read on Hexagon does
735 // Custom-handle ISD::READSTEADYCOUNTER because the target-independent SDNode
736 // is marked as having side-effects, while the register read on Hexagon does
770 assert(AlignConst && "Non-constant Align in LowerDYNAMIC_STACKALLOC"); in LowerDYNAMIC_STACKALLOC()
772 unsigned A = AlignConst->getSExtValue(); in LowerDYNAMIC_STACKALLOC()
780 Size.getNode()->dump(&DAG); in LowerDYNAMIC_STACKALLOC()
800 // Linux ABI treats var-arg calls the same way as regular ones. in LowerFormalArguments()
807 MF.getFunction().getFunctionType()->getNumParams()); in LowerFormalArguments()
825 return Reg - Hexagon::R0 + 1; in LowerFormalArguments()
827 return (Reg - Hexagon::D0 + 1) * 2; in LowerFormalArguments()
829 return Reg - Hexagon::V0 + 1; in LowerFormalArguments()
831 return (Reg - Hexagon::W0 + 1) * 2; in LowerFormalArguments()
839 HMFI.setFirstNamedArgFrameIndex(-int(MFI.getNumFixedObjects())); in LowerFormalArguments()
847 // 1. 32- and 64-bit values and HVX vectors are passed directly, in LowerFormalArguments()
899 // If it's a pass-by-value aggregate, then do not dereference the stack in LowerFormalArguments()
917 HMFI.setFirstNamedArgFrameIndex(HMFI.getFirstNamedArgFrameIndex() - 1); in LowerFormalArguments()
918 HMFI.setLastNamedArgFrameIndex(-int(MFI.getNumFixedObjects())); in LowerFormalArguments()
921 int NumVarArgRegs = 6 - HFL.FirstVarArgSavedReg; in LowerFormalArguments()
931 RegAreaStart = (RegAreaStart + 7) & -8; in LowerFormalArguments()
968 SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32); in LowerVASTART()
969 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); in LowerVASTART()
1036 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); in LowerVACOPY()
1037 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); in LowerVACOPY()
1051 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); in LowerSETCC()
1069 // Comparisons of short integers should use sign-extend, not zero-extend, in LowerSETCC()
1071 // The LLVM default is to use zero-extend arbitrarily in these cases. in LowerSETCC()
1075 // A sign-extend of a truncate of a sign-extend is free. in LowerSETCC()
1079 EVT OrigTy = cast<VTSDNode>(Op.getOperand(1))->getVT(); in LowerSETCC()
1082 // The type that was sign-extended to get the AssertSext must be in LowerSETCC()
1088 // We have sign-extended loads. in LowerSETCC()
1096 bool IsNegative = C && C->getAPIntValue().isNegative(); in LowerSETCC()
1135 if (auto *CV = dyn_cast<ConstantVector>(CPN->getConstVal())) { in LowerConstantPool()
1136 if (cast<VectorType>(CV->getType())->getElementType()->isIntegerTy(1)) { in LowerConstantPool()
1137 IRBuilder<> IRB(CV->getContext()); in LowerConstantPool()
1139 unsigned VecLen = CV->getNumOperands(); in LowerConstantPool()
1143 NewConst.push_back(IRB.getInt8(CV->getOperand(i)->isZeroValue())); in LowerConstantPool()
1149 Align Alignment = CPN->getAlign(); in LowerConstantPool()
1155 if (CPN->isMachineConstantPoolEntry()) in LowerConstantPool()
1156 T = DAG.getTargetConstantPool(CPN->getMachineCPVal(), ValTy, Alignment, in LowerConstantPool()
1161 T = DAG.getTargetConstantPool(CPN->getConstVal(), ValTy, Alignment, Offset, in LowerConstantPool()
1164 assert(cast<ConstantPoolSDNode>(T)->getTargetFlags() == TF && in LowerConstantPool()
1175 int Idx = cast<JumpTableSDNode>(Op)->getIndex(); in LowerJumpTable()
1206 // Return LR, which contains the return address. Mark it an implicit live-in. in LowerRETURNADDR()
1222 while (Depth--) in LowerFRAMEADDR()
1239 auto *GV = GAN->getGlobal(); in LowerGLOBALADDRESS()
1240 int64_t Offset = GAN->getOffset(); in LowerGLOBALADDRESS()
1247 const GlobalObject *GO = GV->getAliaseeObject(); in LowerGLOBALADDRESS()
1270 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); in LowerBlockAddress()
1301 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, in GetDynamicTLSAddr()
1302 GA->getValueType(0), in GetDynamicTLSAddr()
1303 GA->getOffset(), in GetDynamicTLSAddr()
1334 int64_t Offset = GA->getOffset(); in LowerToTLSInitialExecModel()
1345 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, in LowerToTLSInitialExecModel()
1376 int64_t Offset = GA->getOffset(); in LowerToTLSLocalExecModel()
1382 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset, in LowerToTLSLocalExecModel()
1398 int64_t Offset = GA->getOffset(); in LowerToTLSGeneralDynamicModel()
1402 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, PtrVT, Offset, in LowerToTLSGeneralDynamicModel()
1435 switch (HTM.getTLSModel(GA->getGlobal())) { in LowerGlobalTLSAddress()
1447 //===----------------------------------------------------------------------===//
1449 //===----------------------------------------------------------------------===//
1503 // - indexed loads and stores (pre-/post-incremented), in HexagonTargetLowering()
1504 // - ANY_EXTEND_VECTOR_INREG, ATOMIC_CMP_SWAP_WITH_SUCCESS, CONCAT_VECTORS, in HexagonTargetLowering()
1609 {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FSINCOS, in HexagonTargetLowering()
1654 ISD::FREM, ISD::FNEG, ISD::FABS, ISD::FSQRT, ISD::FSIN, in HexagonTargetLowering()
1740 // in case it is a compile-time constant. This is a usability feature to in HexagonTargetLowering()
1748 // Custom-lower load/stores of boolean vectors. in HexagonTargetLowering()
1774 // Custom-lower bitcasts from i8 to v8i1. in HexagonTargetLowering()
1806 // Special handling for half-precision floating point conversions. in HexagonTargetLowering()
1826 // Subtarget-specific operation actions. in HexagonTargetLowering()
1878 // Prefix is: nothing for "slow-math", in HexagonTargetLowering()
1879 // "fast2_" for V5+ fast-math double-precision in HexagonTargetLowering()
1880 // (actually, keep fast-math and fast-math2 separate for now) in HexagonTargetLowering()
1970 unsigned Addr = CA->getZExtValue(); in validateConstPtrAlignment()
1985 return DI->getKind() == DK_MisalignedTrap; in validateConstPtrAlignment()
1999 DAG.getContext()->diagnose(DiagnosticInfoMisalignedTrap(O.str())); in validateConstPtrAlignment()
2008 assert(!LS->isIndexed() && "Not expecting indexed ops on constant address"); in replaceMemWithUndef()
2010 SDValue Chain = LS->getChain(); in replaceMemWithUndef()
2012 if (LS->getOpcode() == ISD::LOAD) in replaceMemWithUndef()
2017 // Bit-reverse Load Intrinsic: Check if the instruction is a bit reverse load
2020 unsigned ID = cast<IntrinsicInst>(Inst)->getIntrinsicID(); in isBrevLdIntrinsic()
2029 // Bit-reverse Load Intrinsic :Crawl up and figure out the object from previous
2035 V = cast<Operator>(V)->getOperand(0); in getBrevLdObject()
2037 V = cast<Instruction>(V)->getOperand(0); in getBrevLdObject()
2041 // Bit-reverse Load Intrinsic: For a PHI Node return either an incoming edge or
2045 const BasicBlock *Parent = PN->getParent(); in returnEdge()
2046 int Idx = -1; in returnEdge()
2047 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) { in returnEdge()
2048 BasicBlock *Blk = PN->getIncomingBlock(i); in returnEdge()
2051 Value *BackEdgeVal = PN->getIncomingValue(i); in returnEdge()
2068 return PN->getIncomingValue(Idx); in returnEdge()
2071 // Bit-reverse Load Intrinsic: Figure out the underlying object the base
2072 // pointer points to, for the bit-reverse load intrinsic. Setting this to
2109 auto &Cont = I.getCalledFunction()->getParent()->getContext(); in getTgtMemIntrinsic()
2113 Type *ElTy = I.getCalledFunction()->getReturnType()->getStructElementType(0); in getTgtMemIntrinsic()
2136 const Module &M = *I.getParent()->getParent()->getParent(); in getTgtMemIntrinsic()
2138 Type *VecTy = I.getArgOperand(1)->getType(); in getTgtMemIntrinsic()
2190 // Non-HVX bool vectors are relatively cheap. in isExtractSubvectorCheap()
2221 // Widen non-power-of-2 vectors. Such types cannot be split right now, in getPreferredVectorAction()
2245 return { Addr.getOperand(0), CN->getSExtValue() }; in getBaseAndOffset()
2256 ArrayRef<int> AM = SVN->getMask(); in LowerVECTOR_SHUFFLE()
2269 // If the inputs are not the same as the output, bail. This is not an in LowerVECTOR_SHUFFLE()
2275 // Normalize the mask so that the first non-negative index comes from in LowerVECTOR_SHUFFLE()
2278 unsigned F = llvm::find_if(AM, [](int M) { return M >= 0; }) - AM.data(); in LowerVECTOR_SHUFFLE()
2292 ByteMask.push_back(-1); in LowerVECTOR_SHUFFLE()
2300 // All non-undef (non-negative) indexes are well within [0..127], so they in LowerVECTOR_SHUFFLE()
2301 // fit in a single byte. Build two 64-bit words: in LowerVECTOR_SHUFFLE()
2302 // - MaskIdx where each byte is the corresponding index (for non-negative in LowerVECTOR_SHUFFLE()
2304 // - MaskUnd that has 0xFF for each negative index. in LowerVECTOR_SHUFFLE()
2381 if (SDValue S = cast<BuildVectorSDNode>(Op)->getSplatValue()) in getSplatValue()
2390 // Create a Hexagon-specific node for shifting a vector by an integer.
2432 // No instructions for shifts by non-scalars. in LowerVECTOR_SHIFT()
2506 const ConstantInt *CI = CN->getConstantIntValue(); in getBuildVectorConstInts()
2507 Consts[i] = ConstantInt::get(IntTy, CI->getValue().getSExtValue()); in getBuildVectorConstInts()
2509 const ConstantFP *CF = CN->getConstantFPValue(); in getBuildVectorConstInts()
2510 APInt A = CF->getValueAPF().bitcastToAPInt(); in getBuildVectorConstInts()
2537 llvm::all_of(Consts, [](ConstantInt *CI) { return CI->isZero(); })) in buildVector32()
2545 uint32_t V = (Consts[0]->getZExtValue() & 0xFFFF) | in buildVector32()
2546 Consts[1]->getZExtValue() << 16; in buildVector32()
2564 int32_t V = (Consts[0]->getZExtValue() & 0xFF) | in buildVector32()
2565 (Consts[1]->getZExtValue() & 0xFF) << 8 | in buildVector32()
2566 (Consts[2]->getZExtValue() & 0xFF) << 16 | in buildVector32()
2567 Consts[3]->getZExtValue() << 24; in buildVector32()
2628 llvm::all_of(Consts, [](ConstantInt *CI) { return CI->isZero(); })) in buildVector64()
2653 uint64_t Mask = (1ull << W) - 1; in buildVector64()
2655 Val = (Val << W) | (Consts[Num-1-i]->getZExtValue() & Mask); in buildVector64()
2660 // Build two 32-bit vectors and concatenate. in buildVector64()
2695 unsigned Off = IdxN->getZExtValue() * ElemWidth; in extractVector()
2738 // Extracting the lowest bit is a no-op, but it changes the type, in extractVectorPred()
2804 unsigned W = C->getZExtValue() * ValWidth; in insertVector()
2958 // Check if this is a special case or all-0 or all-1. in LowerBUILD_VECTOR()
2966 uint32_t C = CN->getZExtValue(); in LowerBUILD_VECTOR()
2976 // shifted by the index of the element into a general-purpose register, in LowerBUILD_VECTOR()
3019 // 32-bit integer, so keep them as i32 to use 32-bit inserts. in LowerCONCAT_VECTORS()
3023 for (SDValue P : Op.getNode()->op_values()) { in LowerCONCAT_VECTORS()
3092 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) in allowTruncateForTailCall()
3095 // FIXME: in principle up to 64-bit could be made safe, but it would be very in allowTruncateForTailCall()
3097 // liable to disallow tail calls involving i64 -> iN truncation in many cases. in allowTruncateForTailCall()
3098 return Ty1->getPrimitiveSizeInBits() <= 32; in allowTruncateForTailCall()
3106 MVT MemTy = LN->getMemoryVT().getSimpleVT(); in LowerLoad()
3107 ISD::LoadExtType ET = LN->getExtensionType(); in LowerLoad()
3112 LN->getAddressingMode(), ISD::ZEXTLOAD, MVT::i32, dl, LN->getChain(), in LowerLoad()
3113 LN->getBasePtr(), LN->getOffset(), LN->getPointerInfo(), in LowerLoad()
3114 /*MemoryVT*/ MVT::i8, LN->getAlign(), LN->getMemOperand()->getFlags(), in LowerLoad()
3115 LN->getAAInfo(), LN->getRanges()); in LowerLoad()
3119 Align ClaimAlign = LN->getAlign(); in LowerLoad()
3120 if (!validateConstPtrAlignment(LN->getBasePtr(), ClaimAlign, dl, DAG)) in LowerLoad()
3133 SDValue Ch = cast<LoadSDNode>(LU.getNode())->getChain(); in LowerLoad()
3143 SDValue Val = SN->getValue(); in LowerStore()
3149 SDValue NS = DAG.getTruncStore(SN->getChain(), dl, TR, SN->getBasePtr(), in LowerStore()
3150 MVT::i8, SN->getMemOperand()); in LowerStore()
3151 if (SN->isIndexed()) { in LowerStore()
3152 NS = DAG.getIndexedStore(NS, dl, SN->getBasePtr(), SN->getOffset(), in LowerStore()
3153 SN->getAddressingMode()); in LowerStore()
3158 Align ClaimAlign = SN->getAlign(); in LowerStore()
3159 if (!validateConstPtrAlignment(SN->getBasePtr(), ClaimAlign, dl, DAG)) in LowerStore()
3162 MVT StoreTy = SN->getMemoryVT().getSimpleVT(); in LowerStore()
3175 unsigned HaveAlign = LN->getAlign().value(); in LowerUnalignedLoad()
3184 // smaller legal loads, do the default (target-independent) expansion. in LowerUnalignedLoad()
3187 if (!LN->isUnindexed()) in LowerUnalignedLoad()
3191 if (allowsMemoryAccessForAlignment(Ctx, DL, LN->getMemoryVT(), in LowerUnalignedLoad()
3192 *LN->getMemOperand())) in LowerUnalignedLoad()
3201 allowsMemoryAccessForAlignment(Ctx, DL, PartTy, *LN->getMemOperand()); in LowerUnalignedLoad()
3216 SDValue Base = LN->getBasePtr(); in LowerUnalignedLoad()
3217 SDValue Chain = LN->getChain(); in LowerUnalignedLoad()
3226 BO.second -= BO.second % LoadLen; in LowerUnalignedLoad()
3238 if (MachineMemOperand *MMO = LN->getMemOperand()) { in LowerUnalignedLoad()
3241 MMO->getPointerInfo(), MMO->getFlags(), 2 * LoadLen, Align(LoadLen), in LowerUnalignedLoad()
3242 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(), in LowerUnalignedLoad()
3243 MMO->getSuccessOrdering(), MMO->getFailureOrdering()); in LowerUnalignedLoad()
3265 SDVTList VTs = Op.getNode()->getVTList(); in LowerUAddSubO()
3271 uint64_t VY = CY->getZExtValue(); in LowerUAddSubO()
3273 // X +/- 1 in LowerUAddSubO()
3286 DAG.getConstant(-1, dl, ty(Op)), ISD::SETEQ); in LowerUAddSubO()
3301 return DAG.getNode(HexagonISD::ADDC, dl, Op.getNode()->getVTList(), in LowerUAddSubOCarry()
3305 SDValue SubC = DAG.getNode(HexagonISD::SUBC, dl, Op.getNode()->getVTList(), in LowerUAddSubOCarry()
3323 FuncInfo->setHasEHReturn(); in LowerEH_RETURN()
3356 Op.getNode()->dumpr(&DAG); in LowerOperation()
3358 errs() << "Error: check for a non-legal type in this operation\n"; in LowerOperation()
3419 unsigned Opc = N->getOpcode(); in LowerOperationWrapper()
3427 // We are only custom-lowering stores to verify the alignment of the in LowerOperationWrapper()
3428 // address if it is a compile-time constant. Since a store can be in LowerOperationWrapper()
3429 // modified during type-legalization (the value being stored may need in LowerOperationWrapper()
3450 switch (N->getOpcode()) { in ReplaceNodeResults()
3457 if (N->getValueType(0) == MVT::i8) { in ReplaceNodeResults()
3458 if (N->getOperand(0).getValueType() == MVT::v8i1) { in ReplaceNodeResults()
3460 N->getOperand(0), DAG); in ReplaceNodeResults()
3484 // fold (truncate (build pair x, y)) -> (truncate x) or x in PerformDAGCombine()
3504 return DCI.DAG.getConstant(-1, dl, ty(Op)); in PerformDAGCombine()
3513 // (vselect (xor x, ptrue), v0, v1) -> (vselect x, v1, v0) in PerformDAGCombine()
3515 if (Cond->getOpcode() == ISD::XOR) { in PerformDAGCombine()
3517 if (C1->getOpcode() == HexagonISD::PTRUE) { in PerformDAGCombine()
3525 // fold (truncate (build pair x, y)) -> (truncate x) or x in PerformDAGCombine()
3537 // fold (or (shl xx, s), (zext y)) -> (COMBINE (shl xx, s-32), y) in PerformDAGCombine()
3552 if (Amt && Amt->getZExtValue() >= 32 && ty(Z).getSizeInBits() <= 32) { in PerformDAGCombine()
3553 unsigned A = Amt->getZExtValue(); in PerformDAGCombine()
3556 DCI.DAG.getConstant(A - 32, dl, MVT::i32)); in PerformDAGCombine()
3575 int Idx = cast<JumpTableSDNode>(Table)->getIndex(); in getPICJumpTableRelocBase()
3581 //===----------------------------------------------------------------------===//
3583 //===----------------------------------------------------------------------===//
3609 case 'r': // R0-R31 in getRegForInlineAsmConstraint()
3624 case 'a': // M0-M1 in getRegForInlineAsmConstraint()
3628 case 'q': // q0-q3 in getRegForInlineAsmConstraint()
3637 case 'v': // V0-V31 in getRegForInlineAsmConstraint()
3659 /// isFPImmLegal - Returns true if the target can instruction select the
3667 /// isLegalAddressingMode - Return true if the addressing mode represented by
3672 if (Ty->isSized()) { in isLegalAddressingMode()
3695 Scale = -Scale; in isLegalAddressingMode()
3712 /// isLegalICmpImmediate - Return true if the specified immediate is legal
3717 return Imm >= -512 && Imm <= 511; in isLegalICmpImmediate()
3720 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3784 /// determined using generic target-independent logic.
3842 std::pair<SDValue,int> BO = getBaseAndOffset(L->getBasePtr()); in shouldReduceLoadWidth()
3843 // Small-data object, do not shrink. in shouldReduceLoadWidth()
3848 const auto *GO = dyn_cast_or_null<const GlobalObject>(GA->getGlobal()); in shouldReduceLoadWidth()
3849 return !GO || !HTM.getObjFileLowering()->isGlobalInSmallSection(GO, HTM); in shouldReduceLoadWidth()
3863 Module *M = BB->getParent()->getParent(); in emitLoadLinked()
3864 unsigned SZ = ValueTy->getPrimitiveSizeInBits(); in emitLoadLinked()
3865 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported"); in emitLoadLinked()
3875 /// Perform a store-conditional operation to Addr. Return the status of the
3876 /// store. This should be 0 if the store succeeded, non-zero otherwise.
3881 Module *M = BB->getParent()->getParent(); in emitStoreConditional()
3882 Type *Ty = Val->getType(); in emitStoreConditional()
3883 unsigned SZ = Ty->getPrimitiveSizeInBits(); in emitStoreConditional()
3886 assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported"); in emitStoreConditional()
3895 Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext())); in emitStoreConditional()
3902 return LI->getType()->getPrimitiveSizeInBits() > 64 in shouldExpandAtomicLoadInIR()
3910 return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64 in shouldExpandAtomicStoreInIR()