Lines Matching +full:v +full:- +full:neg +full:- +full:supply
1 //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
11 //===----------------------------------------------------------------------===//
93 I->addAnnotationMetadata("auto-init"); in initializeAlloca()
96 /// getBuiltinLibFunction - Given a builtin id for a function like
106 // TODO: This list should be expanded or refactored after all GCC-compatible in getBuiltinLibFunction()
134 // The AIX library functions frexpl, ldexpl, and modfl are for 128-bit in getBuiltinLibFunction()
136 // if it is 64-bit 'long double' mode. in getBuiltinLibFunction()
146 if (FD->hasAttr<AsmLabelAttr>()) in getBuiltinLibFunction()
150 // PPC, after backend supports IEEE 128-bit style libcalls. in getBuiltinLibFunction()
165 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType())); in getBuiltinLibFunction()
172 static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, in EmitToInt() argument
174 V = CGF.EmitToMemory(V, T); in EmitToInt()
176 if (V->getType()->isPointerTy()) in EmitToInt()
177 return CGF.Builder.CreatePtrToInt(V, IntType); in EmitToInt()
179 assert(V->getType() == IntType); in EmitToInt()
180 return V; in EmitToInt()
183 static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, in EmitFromInt() argument
185 V = CGF.EmitFromMemory(V, T); in EmitFromInt()
187 if (ResultType->isPointerTy()) in EmitFromInt()
188 return CGF.Builder.CreateIntToPtr(V, ResultType); in EmitFromInt()
190 assert(V->getType() == ResultType); in EmitFromInt()
191 return V; in EmitFromInt()
196 Address Ptr = CGF.EmitPointerWithAlignment(E->getArg(0)); in CheckAtomicAlignment()
197 unsigned Bytes = Ptr.getElementType()->isPointerTy() in CheckAtomicAlignment()
199 : Ptr.getElementType()->getScalarSizeInBits() / 8; in CheckAtomicAlignment()
203 Diags.Report(E->getBeginLoc(), diag::warn_sync_op_misaligned); in CheckAtomicAlignment()
204 // Force address to be at least naturally-aligned. in CheckAtomicAlignment()
216 QualType T = E->getType(); in MakeBinaryAtomicValue()
217 assert(E->getArg(0)->getType()->isPointerType()); in MakeBinaryAtomicValue()
219 E->getArg(0)->getType()->getPointeeType())); in MakeBinaryAtomicValue()
220 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); in MakeBinaryAtomicValue()
227 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1)); in MakeBinaryAtomicValue()
228 llvm::Type *ValueType = Val->getType(); in MakeBinaryAtomicValue()
237 Value *Val = CGF.EmitScalarExpr(E->getArg(0)); in EmitNontemporalStore()
238 Address Addr = CGF.EmitPointerWithAlignment(E->getArg(1)); in EmitNontemporalStore()
240 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType()); in EmitNontemporalStore()
241 LValue LV = CGF.MakeAddrLValue(Addr, E->getArg(0)->getType()); in EmitNontemporalStore()
248 Address Addr = CGF.EmitPointerWithAlignment(E->getArg(0)); in EmitNontemporalLoad()
250 LValue LV = CGF.MakeAddrLValue(Addr, E->getType()); in EmitNontemporalLoad()
252 return CGF.EmitLoadOfScalar(LV, E->getExprLoc()); in EmitNontemporalLoad()
269 QualType T = E->getType(); in EmitBinaryAtomicPost()
270 assert(E->getArg(0)->getType()->isPointerType()); in EmitBinaryAtomicPost()
272 E->getArg(0)->getType()->getPointeeType())); in EmitBinaryAtomicPost()
273 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); in EmitBinaryAtomicPost()
280 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1)); in EmitBinaryAtomicPost()
281 llvm::Type *ValueType = Val->getType(); in EmitBinaryAtomicPost()
299 /// arg0 - address to operate on
300 /// arg1 - value to compare with
301 /// arg2 - new value
311 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType(); in MakeAtomicCmpXchgValue()
317 Value *Cmp = CGF.EmitScalarExpr(E->getArg(1)); in MakeAtomicCmpXchgValue()
318 llvm::Type *ValueType = Cmp->getType(); in MakeAtomicCmpXchgValue()
320 Value *New = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType); in MakeAtomicCmpXchgValue()
328 CGF.ConvertType(E->getType())); in MakeAtomicCmpXchgValue()
351 assert(E->getArg(0)->getType()->isPointerType()); in EmitAtomicCmpXchgForMSIntrin()
353 E->getType(), E->getArg(0)->getType()->getPointeeType())); in EmitAtomicCmpXchgForMSIntrin()
354 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), in EmitAtomicCmpXchgForMSIntrin()
355 E->getArg(1)->getType())); in EmitAtomicCmpXchgForMSIntrin()
356 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), in EmitAtomicCmpXchgForMSIntrin()
357 E->getArg(2)->getType())); in EmitAtomicCmpXchgForMSIntrin()
361 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2)); in EmitAtomicCmpXchgForMSIntrin()
362 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1)); in EmitAtomicCmpXchgForMSIntrin()
375 Result->setVolatile(true); in EmitAtomicCmpXchgForMSIntrin()
379 // 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
388 // Note that Destination is assumed to be at least 16-byte aligned, despite
394 assert(E->getNumArgs() == 4); in EmitAtomicCmpXchg128ForMSIntrin()
395 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); in EmitAtomicCmpXchg128ForMSIntrin()
396 llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1)); in EmitAtomicCmpXchg128ForMSIntrin()
397 llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2)); in EmitAtomicCmpXchg128ForMSIntrin()
398 Address ComparandAddr = CGF.EmitPointerWithAlignment(E->getArg(3)); in EmitAtomicCmpXchg128ForMSIntrin()
400 assert(DestPtr->getType()->isPointerTy()); in EmitAtomicCmpXchg128ForMSIntrin()
401 assert(!ExchangeHigh->getType()->isPointerTy()); in EmitAtomicCmpXchg128ForMSIntrin()
402 assert(!ExchangeLow->getType()->isPointerTy()); in EmitAtomicCmpXchg128ForMSIntrin()
433 CXI->setVolatile(true); in EmitAtomicCmpXchg128ForMSIntrin()
446 assert(E->getArg(0)->getType()->isPointerType()); in EmitAtomicIncrementValue()
448 auto *IntTy = CGF.ConvertType(E->getType()); in EmitAtomicIncrementValue()
458 assert(E->getArg(0)->getType()->isPointerType()); in EmitAtomicDecrementValue()
460 auto *IntTy = CGF.ConvertType(E->getType()); in EmitAtomicDecrementValue()
469 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0)); in EmitISOVolatileLoad()
470 QualType ElTy = E->getArg(0)->getType()->getPointeeType(); in EmitISOVolatileLoad()
475 Load->setVolatile(true); in EmitISOVolatileLoad()
481 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0)); in EmitISOVolatileStore()
482 Value *Value = CGF.EmitScalarExpr(E->getArg(1)); in EmitISOVolatileStore()
483 QualType ElTy = E->getArg(0)->getType()->getPointeeType(); in EmitISOVolatileStore()
487 Store->setVolatile(true); in EmitISOVolatileStore()
493 // floating-point intrinsic.
497 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitUnaryMaybeConstrainedFPBuiltin()
501 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); in emitUnaryMaybeConstrainedFPBuiltin()
504 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); in emitUnaryMaybeConstrainedFPBuiltin()
510 // Depending on mode, this may be a constrained floating-point intrinsic.
514 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitBinaryMaybeConstrainedFPBuiltin()
515 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); in emitBinaryMaybeConstrainedFPBuiltin()
519 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); in emitBinaryMaybeConstrainedFPBuiltin()
522 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); in emitBinaryMaybeConstrainedFPBuiltin()
531 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitBinaryExpMaybeConstrainedFPBuiltin()
532 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); in emitBinaryExpMaybeConstrainedFPBuiltin()
537 {Src0->getType(), Src1->getType()}); in emitBinaryExpMaybeConstrainedFPBuiltin()
542 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), Src1->getType()}); in emitBinaryExpMaybeConstrainedFPBuiltin()
547 // Depending on mode, this may be a constrained floating-point intrinsic.
551 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitTernaryMaybeConstrainedFPBuiltin()
552 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); in emitTernaryMaybeConstrainedFPBuiltin()
553 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2)); in emitTernaryMaybeConstrainedFPBuiltin()
557 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); in emitTernaryMaybeConstrainedFPBuiltin()
560 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); in emitTernaryMaybeConstrainedFPBuiltin()
566 // Depending on mode, this may be a constrained floating-point intrinsic.
591 static_assert(N, "expect non-empty argument"); in emitBuiltinWithOneOverloadedType()
594 Args.push_back(CGF.EmitScalarExpr(E->getArg(I))); in emitBuiltinWithOneOverloadedType()
595 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Args[0]->getType()); in emitBuiltinWithOneOverloadedType()
603 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitFPIntBuiltin()
604 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); in emitFPIntBuiltin()
606 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); in emitFPIntBuiltin()
615 llvm::Type *ResultType = CGF.ConvertType(E->getType()); in emitMaybeConstrainedFPToIntRoundBuiltin()
616 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitMaybeConstrainedFPToIntRoundBuiltin()
621 {ResultType, Src0->getType()}); in emitMaybeConstrainedFPToIntRoundBuiltin()
625 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()}); in emitMaybeConstrainedFPToIntRoundBuiltin()
632 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitFrexpBuiltin()
633 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); in emitFrexpBuiltin()
635 QualType IntPtrTy = E->getArg(1)->getType()->getPointeeType(); in emitFrexpBuiltin()
638 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), IntTy}); in emitFrexpBuiltin()
648 /// EmitFAbs - Emit a call to @llvm.fabs().
649 static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) { in EmitFAbs() argument
650 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType()); in EmitFAbs()
651 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V); in EmitFAbs()
652 Call->setDoesNotAccessMemory(); in EmitFAbs()
658 static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) { in EmitSignBit() argument
661 llvm::Type *Ty = V->getType(); in EmitSignBit()
662 int Width = Ty->getPrimitiveSizeInBits(); in EmitSignBit()
664 V = CGF.Builder.CreateBitCast(V, IntTy); in EmitSignBit()
665 if (Ty->isPPC_FP128Ty()) { in EmitSignBit()
666 // We want the sign bit of the higher-order double. The bitcast we just in EmitSignBit()
667 // did works as if the double-double was stored to memory and then in EmitSignBit()
668 // read as an i128. The "store" will put the higher-order double in the in EmitSignBit()
669 // lower address in both little- and big-Endian modes, but the "load" in EmitSignBit()
671 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian in EmitSignBit()
676 V = CGF.Builder.CreateLShr(V, ShiftCst); in EmitSignBit()
678 // We are truncating value in order to extract the higher-order in EmitSignBit()
681 V = CGF.Builder.CreateTrunc(V, IntTy); in EmitSignBit()
684 return CGF.Builder.CreateICmpSLT(V, Zero); in EmitSignBit()
692 CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot()); in emitLibraryCall()
695 if (unsigned BuiltinID = FD->getBuiltinID()) { in emitLibraryCall()
696 auto IsErrnoIntrinsic = [&]() -> unsigned { in emitLibraryCall()
735 assert(X->getType() == Y->getType() && in EmitOverflowIntrinsic()
739 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType()); in EmitOverflowIntrinsic()
750 Call->addRangeRetAttr(CR); in emitRangedBuiltin()
751 Call->addRetAttr(llvm::Attribute::AttrKind::NoUndef); in emitRangedBuiltin()
765 assert(Type->isIntegerType() && "Given type is not an integer."); in getIntegerWidthAndSignedness()
766 unsigned Width = Type->isBooleanType() ? 1 in getIntegerWidthAndSignedness()
767 : Type->isBitIntType() ? context.getIntWidth(Type) in getIntegerWidthAndSignedness()
769 bool Signed = Type->isSignedIntegerType(); in getIntegerWidthAndSignedness()
803 return Builder.CreateCall(CGM.getIntrinsic(inst, {ArgValue->getType()}), in EmitVAStartEnd()
818 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true); in getDefaultBuiltinObjectSizeResult()
827 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type)) in evaluateOrEmitBuiltinObjectSize()
839 if (RD->isImplicit()) in FindFlexibleArrayMemberFieldAndOffset()
842 for (const FieldDecl *FD : RD->fields()) { in FindFlexibleArrayMemberFieldAndOffset()
845 Ctx, FD, FD->getType(), StrictFlexArraysLevel, in FindFlexibleArrayMemberFieldAndOffset()
852 QualType Ty = FD->getType(); in FindFlexibleArrayMemberFieldAndOffset()
853 if (Ty->isRecordType()) { in FindFlexibleArrayMemberFieldAndOffset()
855 Ctx, Ty->getAsRecordDecl(), FAMDecl, Offset)) { in FindFlexibleArrayMemberFieldAndOffset()
862 if (!RD->isUnion()) in FindFlexibleArrayMemberFieldAndOffset()
872 for (const FieldDecl *FD : RD->fields()) { in CountCountedByAttrs()
873 if (FD->getType()->isCountAttributedType()) in CountCountedByAttrs()
876 QualType Ty = FD->getType(); in CountCountedByAttrs()
877 if (Ty->isRecordType()) in CountCountedByAttrs()
878 Num += CountCountedByAttrs(Ty->getAsRecordDecl()); in CountCountedByAttrs()
899 // __builtin_dynamic_object_size(p->array, 1) == in emitFlexibleArrayMemberSize()
900 // p->count * sizeof(*p->array) in emitFlexibleArrayMemberSize()
904 // __builtin_dynamic_object_size(&p->array[42], 1) == in emitFlexibleArrayMemberSize()
905 // (p->count - 42) * sizeof(*p->array) in emitFlexibleArrayMemberSize()
911 // offsetof(struct s, array) + p->count * sizeof(*p->array)) in emitFlexibleArrayMemberSize()
914 const Expr *Base = E->IgnoreParenImpCasts(); in emitFlexibleArrayMemberSize()
918 UO && UO->getOpcode() == UO_AddrOf) { in emitFlexibleArrayMemberSize()
919 Expr *SubExpr = UO->getSubExpr()->IgnoreParenImpCasts(); in emitFlexibleArrayMemberSize()
921 Base = ASE->getBase()->IgnoreParenImpCasts(); in emitFlexibleArrayMemberSize()
922 Idx = ASE->getIdx()->IgnoreParenImpCasts(); in emitFlexibleArrayMemberSize()
925 int64_t Val = IL->getValue().getSExtValue(); in emitFlexibleArrayMemberSize()
944 const ValueDecl *VD = ME->getMemberDecl(); in emitFlexibleArrayMemberSize()
945 OuterRD = VD->getDeclContext()->getOuterLexicalRecordContext(); in emitFlexibleArrayMemberSize()
951 QualType Ty = DRE->getDecl()->getType(); in emitFlexibleArrayMemberSize()
952 if (Ty->isPointerType()) in emitFlexibleArrayMemberSize()
953 Ty = Ty->getPointeeType(); in emitFlexibleArrayMemberSize()
954 OuterRD = Ty->getAsRecordDecl(); in emitFlexibleArrayMemberSize()
987 // We call FindFlexibleArrayMemberAndOffset even if FAMDecl is non-null to in emitFlexibleArrayMemberSize()
994 if (!FAMDecl || !FAMDecl->getType()->isCountAttributedType()) in emitFlexibleArrayMemberSize()
1023 bool IsSigned = CountedByFD->getType()->isSignedIntegerType(); in emitFlexibleArrayMemberSize()
1033 if (Idx->HasSideEffects(getContext())) in emitFlexibleArrayMemberSize()
1034 // We can't have side-effects. in emitFlexibleArrayMemberSize()
1037 bool IdxSigned = Idx->getType()->isSignedIntegerType(); in emitFlexibleArrayMemberSize()
1048 const ArrayType *ArrayTy = Ctx.getAsArrayType(FAMDecl->getType()); in emitFlexibleArrayMemberSize()
1049 CharUnits Size = Ctx.getTypeSizeInChars(ArrayTy->getElementType()); in emitFlexibleArrayMemberSize()
1068 /// - A llvm::Argument (if E is a param with the pass_object_size attribute on
1070 /// - A call to the @llvm.objectsize intrinsic
1072 /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
1081 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) { in emitBuiltinObjectSize()
1082 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl()); in emitBuiltinObjectSize()
1083 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>(); in emitBuiltinObjectSize()
1085 areBOSTypesCompatible(PS->getType(), Type)) { in emitBuiltinObjectSize()
1089 const ImplicitParamDecl *D = Iter->second; in emitBuiltinObjectSize()
1093 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false, in emitBuiltinObjectSize()
1094 getContext().getSizeType(), E->getBeginLoc()); in emitBuiltinObjectSize()
1101 if (Value *V = emitFlexibleArrayMemberSize(E, Type, ResType)) in emitBuiltinObjectSize() local
1102 return V; in emitBuiltinObjectSize()
1106 // evaluate E for side-effects. In either case, we shouldn't lower to in emitBuiltinObjectSize()
1108 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext()))) in emitBuiltinObjectSize()
1112 assert(Ptr->getType()->isPointerTy() && in emitBuiltinObjectSize()
1113 "Non-pointer passed to __builtin_object_size?"); in emitBuiltinObjectSize()
1116 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()}); in emitBuiltinObjectSize()
1163 // X86-specific 64-bit variants. in decodeBitTestBuiltin()
1177 // ARM/AArch64-specific ordering variants. in decodeBitTestBuiltin()
1230 CGF.getContext().getTypeSize(E->getArg(1)->getType())); in EmitX86BitTestIntrinsic()
1258 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0)); in EmitBitTestIntrinsic()
1259 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1)); in EmitBitTestIntrinsic()
1273 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx"); in EmitBitTestIntrinsic()
1303 // Emit a plain load for the non-interlocked intrinsics. in EmitBitTestIntrinsic()
1334 Value *Addr = CGF.EmitScalarExpr(E->getArg(0)); in emitPPCLoadReserveIntrinsic()
1376 CI->addParamAttr( in emitPPCLoadReserveIntrinsic()
1390 /// architecture except 32-bit x86, the frame address is passed. On x86, extra
1425 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy); in EmitMSVCRTSetJmp()
1428 CB->setAttributes(ReturnsTwiceAttr); in EmitMSVCRTSetJmp()
1816 Address IndexAddress(EmitPointerWithAlignment(E->getArg(0))); in EmitMSVCBuiltinExpr()
1817 Value *ArgValue = EmitScalarExpr(E->getArg(1)); in EmitMSVCBuiltinExpr()
1819 llvm::Type *ArgType = ArgValue->getType(); in EmitMSVCBuiltinExpr()
1821 llvm::Type *ResultType = ConvertType(E->getType()); in EmitMSVCBuiltinExpr()
1828 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn); in EmitMSVCBuiltinExpr()
1834 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn); in EmitMSVCBuiltinExpr()
1836 Result->addIncoming(ResZero, Begin); in EmitMSVCBuiltinExpr()
1846 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth(); in EmitMSVCBuiltinExpr()
1847 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1); in EmitMSVCBuiltinExpr()
1856 Result->addIncoming(ResOne, NotZero); in EmitMSVCBuiltinExpr()
1954 // https://msdn.microsoft.com/en-us/library/dn774154.aspx in EmitMSVCBuiltinExpr()
1980 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0))); in EmitMSVCBuiltinExpr()
1981 CI->setAttributes(NoReturnAttr); in EmitMSVCBuiltinExpr()
2011 ArgValue, llvm::Constant::getNullValue(ArgValue->getType())); in EmitCheckedArgForBuiltin()
2014 {EmitCheckSourceLocation(E->getExprLoc()), in EmitCheckedArgForBuiltin()
2028 Value *ArgValue = CGF.EmitScalarExpr(E->getArg(0)); in EmitOverflowCheckedAbs()
2032 if (!VCI->isMinSignedValue()) in EmitOverflowCheckedAbs()
2038 Constant *Zero = Constant::getNullValue(ArgValue->getType()); in EmitOverflowCheckedAbs()
2045 // TODO: support -ftrapv-handler. in EmitOverflowCheckedAbs()
2049 {CGF.EmitCheckSourceLocation(E->getArg(0)->getExprLoc()), in EmitOverflowCheckedAbs()
2050 CGF.EmitCheckTypeDescriptor(E->getType())}, in EmitOverflowCheckedAbs()
2109 // attached to the function when compiling with -Oz. in generateBuiltinOSLogHelperFunction()
2115 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility); in generateBuiltinOSLogHelperFunction()
2118 Fn->setDoesNotThrow(); in generateBuiltinOSLogHelperFunction()
2120 // Attach 'noinline' at -Oz. in generateBuiltinOSLogHelperFunction()
2122 Fn->addFnAttr(llvm::Attribute::NoInline); in generateBuiltinOSLogHelperFunction()
2194 // expression is passed, push a lifetime-extended cleanup to extend its in emitBuiltinOSLogFormat()
2197 E = E->IgnoreParenCasts(); in emitBuiltinOSLogFormat()
2202 // created including arguments of non-ARC types (e.g., C++ in emitBuiltinOSLogFormat()
2209 if (TheExpr->getType()->isObjCRetainableType() && in emitBuiltinOSLogFormat()
2211 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar && in emitBuiltinOSLogFormat()
2215 QualType Ty = TheExpr->getType(); in emitBuiltinOSLogFormat()
2235 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType()); in emitBuiltinOSLogFormat()
2240 // If ArgVal has type x86_fp80, zero-extend ArgVal. in emitBuiltinOSLogFormat()
2281 llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax); in EmitCheckedUnsignedMultiplySignedResult()
2287 ResultArg->getType()->getPointeeType().isVolatileQualified(); in EmitCheckedUnsignedMultiplySignedResult()
2294 /// Determine if a binop is a checked mixed-sign multiply we can specialize.
2304 /// Emit a checked mixed-sign multiply. This is a cheaper specialization of
2305 /// the generic checked-binop irgen.
2314 "Not a mixed-sign multipliction we can specialize"); in EmitCheckedMixedSignMultiply()
2326 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext"); in EmitCheckedMixedSignMultiply()
2328 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext"); in EmitCheckedMixedSignMultiply()
2330 llvm::Type *OpTy = Signed->getType(); in EmitCheckedMixedSignMultiply()
2388 ResultArg->getType()->getPointeeType().isVolatileQualified(); in EmitCheckedMixedSignMultiply()
2400 const auto *Record = Ty->getAsCXXRecordDecl(); in TypeRequiresBuiltinLaunderImp()
2408 assert(Record->hasDefinition() && in TypeRequiresBuiltinLaunderImp()
2411 if (Record->isDynamicClass()) in TypeRequiresBuiltinLaunderImp()
2414 for (FieldDecl *F : Record->fields()) { in TypeRequiresBuiltinLaunderImp()
2415 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen)) in TypeRequiresBuiltinLaunderImp()
2431 llvm::Value *Src = EmitScalarExpr(E->getArg(0)); in emitRotate()
2432 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1)); in emitRotate()
2436 llvm::Type *Ty = Src->getType(); in emitRotate()
2439 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same. in emitRotate()
2445 // Map math builtins for long-double to f128 version.
2518 Value *V) { in tryUseTestFPKind() argument
2522 CGF.getTargetHooks().testFPKind(V, BuiltinID, CGF.Builder, CGF.CGM)) in tryUseTestFPKind()
2530 auto Name = FD->getNameAsString() + "__hipstdpar_unsupported"; in EmitHipStdParUnsupportedBuiltin()
2531 auto FnTy = CGF->CGM.getTypes().GetFunctionType(FD); in EmitHipStdParUnsupportedBuiltin()
2532 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy); in EmitHipStdParUnsupportedBuiltin()
2535 for (auto &&FormalTy : FnTy->params()) in EmitHipStdParUnsupportedBuiltin()
2538 return RValue::get(CGF->Builder.CreateCall(UBF, Args)); in EmitHipStdParUnsupportedBuiltin()
2544 const FunctionDecl *FD = GD.getDecl()->getAsFunction(); in EmitBuiltinExpr()
2546 // TODO: Extend this handling to all builtin calls that we can constant-fold. in EmitBuiltinExpr()
2548 if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getContext()) && in EmitBuiltinExpr()
2558 // If current long-double semantics is IEEE 128-bit, replace math builtins in EmitBuiltinExpr()
2559 // of long-double with f128 equivalent. in EmitBuiltinExpr()
2561 // after backend supports IEEE 128-bit style libcalls. in EmitBuiltinExpr()
2571 FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID; in EmitBuiltinExpr()
2574 // ErrnoOverriden is true if math-errno is overriden via the in EmitBuiltinExpr()
2575 // '#pragma float_control(precise, on)'. This pragma disables fast-math, in EmitBuiltinExpr()
2576 // which implies math-errno. in EmitBuiltinExpr()
2577 if (E->hasStoredFPFeatures()) { in EmitBuiltinExpr()
2578 FPOptionsOverride OP = E->getFPFeatures(); in EmitBuiltinExpr()
2583 // fast-math which implies math-errno. in EmitBuiltinExpr()
2584 bool OptNone = CurFuncDecl && CurFuncDecl->hasAttr<OptimizeNoneAttr>(); in EmitBuiltinExpr()
2586 // True if we are compiling at -O2 and errno has been disabled in EmitBuiltinExpr()
2588 // attribute opt-none hasn't been seen. in EmitBuiltinExpr()
2630 // ConstAttr is enabled in fast-math mode. In fast-math mode, math-errno is in EmitBuiltinExpr()
2632 // Math intrinsics are generated only when math-errno is disabled. Any pragmas in EmitBuiltinExpr()
2633 // or attributes that affect math-errno should prevent or allow math in EmitBuiltinExpr()
2635 // 1- In fast math mode, unless math-errno is overriden in EmitBuiltinExpr()
2638 // 2- If math-errno was enabled on command line but overriden in EmitBuiltinExpr()
2641 // 3- If we are compiling with optimization and errno has been disabled in EmitBuiltinExpr()
2837 // fmod() is a special-case. It maps to the frem instruction rather than an in EmitBuiltinExpr()
2848 Value *Arg1 = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
2849 Value *Arg2 = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
3081 EmitNonNullArgCheck(RValue::get(Val), Arg->getType(), Arg->getExprLoc(), FD, in EmitBuiltinExpr()
3088 SourceLocation Loc = Arg->getExprLoc(); in EmitBuiltinExpr()
3091 if (CE->getCastKind() == CK_BitCast) in EmitBuiltinExpr()
3092 Arg = CE->getSubExpr(); in EmitBuiltinExpr()
3093 EmitTypeCheck(Kind, Loc, Val, Arg->getType(), A.getAlignment(), in EmitBuiltinExpr()
3102 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType())); in EmitBuiltinExpr()
3108 ? EmitScalarExpr(E->getArg(0)) in EmitBuiltinExpr()
3109 : EmitVAListRef(E->getArg(0)).emitRawPointer(*this), in EmitBuiltinExpr()
3113 Value *DstPtr = EmitVAListRef(E->getArg(0)).emitRawPointer(*this); in EmitBuiltinExpr()
3114 Value *SrcPtr = EmitVAListRef(E->getArg(1)).emitRawPointer(*this); in EmitBuiltinExpr()
3115 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy, {DstPtr->getType()}), in EmitBuiltinExpr()
3130 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), false); in EmitBuiltinExpr()
3134 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), true); in EmitBuiltinExpr()
3146 Value *Real = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
3147 Value *Imag = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
3156 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); in EmitBuiltinExpr()
3159 Imag = Builder.CreateFNeg(Imag, "neg"); in EmitBuiltinExpr()
3168 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); in EmitBuiltinExpr()
3174 // is available as debuginfo is needed to preserve user-level in EmitBuiltinExpr()
3177 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g"); in EmitBuiltinExpr()
3178 return RValue::get(EmitScalarExpr(E->getArg(0))); in EmitBuiltinExpr()
3183 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported"); in EmitBuiltinExpr()
3184 return RValue::get(EmitScalarExpr(E->getArg(0))); in EmitBuiltinExpr()
3188 Value *Res = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
3199 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); in EmitBuiltinExpr()
3206 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or in EmitBuiltinExpr()
3207 Value *ArgValue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
3209 llvm::Type *ArgType = ArgValue->getType(); in EmitBuiltinExpr()
3212 llvm::Type *ResultType = ConvertType(E->getType()); in EmitBuiltinExpr()
3229 E->getNumArgs() > 1; in EmitBuiltinExpr()
3232 HasFallback ? EmitScalarExpr(E->getArg(0)) in EmitBuiltinExpr()
3233 : EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero); in EmitBuiltinExpr()
3235 llvm::Type *ArgType = ArgValue->getType(); in EmitBuiltinExpr()
3238 llvm::Type *ResultType = ConvertType(E->getType()); in EmitBuiltinExpr()
3242 if (Result->getType() != ResultType) in EmitBuiltinExpr()
3250 Value *FallbackValue = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
3261 E->getNumArgs() > 1; in EmitBuiltinExpr()
3264 HasFallback ? EmitScalarExpr(E->getArg(0)) in EmitBuiltinExpr()
3265 : EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero); in EmitBuiltinExpr()
3267 llvm::Type *ArgType = ArgValue->getType(); in EmitBuiltinExpr()
3270 llvm::Type *ResultType = ConvertType(E->getType()); in EmitBuiltinExpr()
3274 if (Result->getType() != ResultType) in EmitBuiltinExpr()
3282 Value *FallbackValue = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
3290 // ffs(x) -> x ? cttz(x) + 1 : 0 in EmitBuiltinExpr()
3291 Value *ArgValue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
3293 llvm::Type *ArgType = ArgValue->getType(); in EmitBuiltinExpr()
3296 llvm::Type *ResultType = ConvertType(E->getType()); in EmitBuiltinExpr()
3303 if (Result->getType() != ResultType) in EmitBuiltinExpr()
3311 // parity(x) -> ctpop(x) & 1 in EmitBuiltinExpr()
3312 Value *ArgValue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
3314 llvm::Type *ArgType = ArgValue->getType(); in EmitBuiltinExpr()
3317 llvm::Type *ResultType = ConvertType(E->getType()); in EmitBuiltinExpr()
3320 if (Result->getType() != ResultType) in EmitBuiltinExpr()
3328 Value *ArgValue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
3330 llvm::Type *ArgType = ArgValue->getType(); in EmitBuiltinExpr()
3333 llvm::Type *ResultType = ConvertType(E->getType()); in EmitBuiltinExpr()
3335 if (Result->getType() != ResultType) in EmitBuiltinExpr()
3347 Value *ArgValue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
3349 llvm::Type *ArgType = ArgValue->getType(); in EmitBuiltinExpr()
3352 llvm::Type *ResultType = ConvertType(E->getType()); in EmitBuiltinExpr()
3354 if (Result->getType() != ResultType) in EmitBuiltinExpr()
3363 return RValue::get(EmitScalarExpr(E->getArg(0))); in EmitBuiltinExpr()
3366 Value *ArgValue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
3367 llvm::Type *ArgType = ArgValue->getType(); in EmitBuiltinExpr()
3369 Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
3370 // Don't generate llvm.expect on -O0 as the backend won't use it for in EmitBuiltinExpr()
3372 // Note, we still IRGen ExpectedValue because it could have side-effects. in EmitBuiltinExpr()
3382 Value *ArgValue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
3383 llvm::Type *ArgType = ArgValue->getType(); in EmitBuiltinExpr()
3385 Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
3387 const Expr *ProbArg = E->getArg(2); in EmitBuiltinExpr()
3388 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext()); in EmitBuiltinExpr()
3394 llvm::Type *Ty = ConvertType(ProbArg->getType()); in EmitBuiltinExpr()
3396 // Don't generate llvm.expect.with.probability on -O0 as the backend in EmitBuiltinExpr()
3398 // Note, we still IRGen ExpectedValue because it could have side-effects. in EmitBuiltinExpr()
3409 const Expr *Ptr = E->getArg(0); in EmitBuiltinExpr()
3412 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr; in EmitBuiltinExpr()
3414 Value *AlignmentValue = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
3416 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment)) in EmitBuiltinExpr()
3417 AlignmentCI = ConstantInt::get(AlignmentCI->getIntegerType(), in EmitBuiltinExpr()
3427 if (E->getArg(0)->HasSideEffects(getContext())) in EmitBuiltinExpr()
3430 Value *ArgValue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
3436 const Expr *Arg0 = E->getArg(0); in EmitBuiltinExpr()
3437 const Expr *Arg1 = E->getArg(1); in EmitBuiltinExpr()
3449 cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())->getString(); in EmitBuiltinExpr()
3464 QualType ArgType = E->getArg(0)->getType(); in EmitBuiltinExpr()
3465 if (ArgType->isComplexType()) { in EmitBuiltinExpr()
3467 QualType ElementType = ArgType->castAs<ComplexType>()->getElementType(); in EmitBuiltinExpr()
3468 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); in EmitBuiltinExpr()
3475 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); in EmitBuiltinExpr()
3480 Value *ArgValue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
3525 llvm::Type *ResultType = ConvertType(E->getType()); in EmitBuiltinExpr()
3527 const Expr *Arg = E->getArg(0); in EmitBuiltinExpr()
3528 QualType ArgType = Arg->getType(); in EmitBuiltinExpr()
3529 // FIXME: The allowance for Obj-C pointers and block pointers is historical in EmitBuiltinExpr()
3531 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() && in EmitBuiltinExpr()
3532 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType()) in EmitBuiltinExpr()
3537 if (Arg->HasSideEffects(getContext())) in EmitBuiltinExpr()
3539 // side-effects. in EmitBuiltinExpr()
3543 if (ArgType->isObjCObjectPointerType()) { in EmitBuiltinExpr()
3544 // Convert Objective-C objects to id because we cannot distinguish between in EmitBuiltinExpr()
3545 // LLVM types for Obj-C classes as they are opaque. in EmitBuiltinExpr()
3552 if (Result->getType() != ResultType) in EmitBuiltinExpr()
3559 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue(); in EmitBuiltinExpr()
3560 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType())); in EmitBuiltinExpr()
3565 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType, in EmitBuiltinExpr()
3569 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
3571 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : in EmitBuiltinExpr()
3573 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : in EmitBuiltinExpr()
3576 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType()); in EmitBuiltinExpr()
3589 Value *Begin = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
3590 Value *End = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
3600 TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor( in EmitBuiltinExpr()
3601 TrapLocation, *E->getArg(0)->tryEvaluateString(getContext()), in EmitBuiltinExpr()
3602 *E->getArg(1)->tryEvaluateString(getContext())); in EmitBuiltinExpr()
3613 EmitUnreachable(E->getExprLoc()); in EmitBuiltinExpr()
3624 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
3625 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
3632 Src0->getType()); in EmitBuiltinExpr()
3637 { Src0->getType(), Src1->getType() }); in EmitBuiltinExpr()
3662 Value *LHS = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
3663 Value *RHS = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
3687 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()))); in EmitBuiltinExpr()
3692 Value *V = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr() local
3693 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V)) in EmitBuiltinExpr()
3696 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNan), in EmitBuiltinExpr()
3697 ConvertType(E->getType()))); in EmitBuiltinExpr()
3702 Value *V = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr() local
3704 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSNan), in EmitBuiltinExpr()
3705 ConvertType(E->getType()))); in EmitBuiltinExpr()
3710 Value *V = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr() local
3711 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V)) in EmitBuiltinExpr()
3714 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcInf), in EmitBuiltinExpr()
3715 ConvertType(E->getType()))); in EmitBuiltinExpr()
3726 Value *V = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr() local
3727 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V)) in EmitBuiltinExpr()
3730 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcFinite), in EmitBuiltinExpr()
3731 ConvertType(E->getType()))); in EmitBuiltinExpr()
3736 Value *V = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr() local
3738 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNormal), in EmitBuiltinExpr()
3739 ConvertType(E->getType()))); in EmitBuiltinExpr()
3744 Value *V = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr() local
3746 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSubnormal), in EmitBuiltinExpr()
3747 ConvertType(E->getType()))); in EmitBuiltinExpr()
3752 Value *V = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr() local
3754 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcZero), in EmitBuiltinExpr()
3755 ConvertType(E->getType()))); in EmitBuiltinExpr()
3760 if (!E->getArg(1)->EvaluateAsInt(Result, CGM.getContext())) in EmitBuiltinExpr()
3764 Value *V = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr() local
3765 return RValue::get(Builder.CreateZExt(Builder.createIsFPClass(V, Test), in EmitBuiltinExpr()
3766 ConvertType(E->getType()))); in EmitBuiltinExpr()
3770 llvm::Type *Ty = ConvertType(E->getArg(0)->getType()); in EmitBuiltinExpr()
3780 QualType QT = E->getArg(0)->getType(); in EmitBuiltinExpr()
3782 if (auto *VecTy = QT->getAs<VectorType>()) in EmitBuiltinExpr()
3783 QT = VecTy->getElementType(); in EmitBuiltinExpr()
3784 if (QT->isIntegerType()) in EmitBuiltinExpr()
3786 llvm::Intrinsic::abs, EmitScalarExpr(E->getArg(0)), in EmitBuiltinExpr()
3875 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
3876 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
3878 assert(Op0->getType()->isIntOrIntVectorTy() && "integer type expected"); in EmitBuiltinExpr()
3879 QualType Ty = E->getArg(0)->getType(); in EmitBuiltinExpr()
3880 if (auto *VecTy = Ty->getAs<VectorType>()) in EmitBuiltinExpr()
3881 Ty = VecTy->getElementType(); in EmitBuiltinExpr()
3882 bool IsSigned = Ty->isSignedIntegerType(); in EmitBuiltinExpr()
3893 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
3894 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
3896 if (Op0->getType()->isIntOrIntVectorTy()) { in EmitBuiltinExpr()
3897 QualType Ty = E->getArg(0)->getType(); in EmitBuiltinExpr()
3898 if (auto *VecTy = Ty->getAs<VectorType>()) in EmitBuiltinExpr()
3899 Ty = VecTy->getElementType(); in EmitBuiltinExpr()
3900 Result = Builder.CreateBinaryIntrinsic(Ty->isSignedIntegerType() in EmitBuiltinExpr()
3909 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
3910 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
3912 if (Op0->getType()->isIntOrIntVectorTy()) { in EmitBuiltinExpr()
3913 QualType Ty = E->getArg(0)->getType(); in EmitBuiltinExpr()
3914 if (auto *VecTy = Ty->getAs<VectorType>()) in EmitBuiltinExpr()
3915 Ty = VecTy->getElementType(); in EmitBuiltinExpr()
3916 Result = Builder.CreateBinaryIntrinsic(Ty->isSignedIntegerType() in EmitBuiltinExpr()
3927 if (auto *VecTy = QT->getAs<VectorType>()) in EmitBuiltinExpr()
3928 QT = VecTy->getElementType(); in EmitBuiltinExpr()
3929 else if (QT->isSizelessVectorType()) in EmitBuiltinExpr()
3930 QT = QT->getSizelessVectorEltType(CGM.getContext()); in EmitBuiltinExpr()
3932 if (QT->isSignedIntegerType()) in EmitBuiltinExpr()
3934 if (QT->isUnsignedIntegerType()) in EmitBuiltinExpr()
3936 assert(QT->isFloatingType() && "must have a float here"); in EmitBuiltinExpr()
3940 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min")); in EmitBuiltinExpr()
3945 if (auto *VecTy = QT->getAs<VectorType>()) in EmitBuiltinExpr()
3946 QT = VecTy->getElementType(); in EmitBuiltinExpr()
3947 else if (QT->isSizelessVectorType()) in EmitBuiltinExpr()
3948 QT = QT->getSizelessVectorEltType(CGM.getContext()); in EmitBuiltinExpr()
3950 if (QT->isSignedIntegerType()) in EmitBuiltinExpr()
3952 if (QT->isUnsignedIntegerType()) in EmitBuiltinExpr()
3954 assert(QT->isFloatingType() && "must have a float here"); in EmitBuiltinExpr()
3959 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min")); in EmitBuiltinExpr()
3979 auto *MatrixTy = E->getArg(0)->getType()->castAs<ConstantMatrixType>(); in EmitBuiltinExpr()
3980 Value *MatValue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
3982 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(), in EmitBuiltinExpr()
3983 MatrixTy->getNumColumns()); in EmitBuiltinExpr()
3990 Value *Stride = EmitScalarExpr(E->getArg(3)); in EmitBuiltinExpr()
3991 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>(); in EmitBuiltinExpr()
3992 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>(); in EmitBuiltinExpr()
3994 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified(); in EmitBuiltinExpr()
3996 Address Src = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
3998 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD, in EmitBuiltinExpr()
4003 ResultTy->getNumRows(), ResultTy->getNumColumns(), "matrix"); in EmitBuiltinExpr()
4009 Value *Matrix = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
4010 Address Dst = EmitPointerWithAlignment(E->getArg(1)); in EmitBuiltinExpr()
4011 Value *Stride = EmitScalarExpr(E->getArg(2)); in EmitBuiltinExpr()
4013 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>(); in EmitBuiltinExpr()
4014 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>(); in EmitBuiltinExpr()
4016 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified(); in EmitBuiltinExpr()
4019 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD, in EmitBuiltinExpr()
4024 MatrixTy->getNumRows(), MatrixTy->getNumColumns()); in EmitBuiltinExpr()
4029 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0 in EmitBuiltinExpr()
4031 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here. in EmitBuiltinExpr()
4032 Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
4035 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf"); in EmitBuiltinExpr()
4038 llvm::Type *IntTy = ConvertType(E->getType()); in EmitBuiltinExpr()
4041 Value *NegativeOne = ConstantInt::get(IntTy, -1); in EmitBuiltinExpr()
4050 llvm::Type *ResultType = ConvertType(E->getType()); in EmitBuiltinExpr()
4052 if (Result->getType() != ResultType) in EmitBuiltinExpr()
4061 Value *V = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr() local
4062 Builder.CreateCall(F, V); in EmitBuiltinExpr()
4068 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here. in EmitBuiltinExpr()
4069 Value *V = EmitScalarExpr(E->getArg(5)); in EmitBuiltinExpr() local
4070 llvm::Type *Ty = ConvertType(E->getArg(5)->getType()); in EmitBuiltinExpr()
4074 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn); in EmitBuiltinExpr()
4077 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4, in EmitBuiltinExpr()
4080 // if (V==0) return FP_ZERO in EmitBuiltinExpr()
4082 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty), in EmitBuiltinExpr()
4084 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4)); in EmitBuiltinExpr()
4085 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn); in EmitBuiltinExpr()
4087 Result->addIncoming(ZeroLiteral, Begin); in EmitBuiltinExpr()
4089 // if (V != V) return FP_NAN in EmitBuiltinExpr()
4091 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp"); in EmitBuiltinExpr()
4092 Value *NanLiteral = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
4093 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn); in EmitBuiltinExpr()
4095 Result->addIncoming(NanLiteral, NotZero); in EmitBuiltinExpr()
4097 // if (fabs(V) == infinity) return FP_INFINITY in EmitBuiltinExpr()
4099 Value *VAbs = EmitFAbs(*this, V); in EmitBuiltinExpr()
4101 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()), in EmitBuiltinExpr()
4103 Value *InfLiteral = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
4104 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn); in EmitBuiltinExpr()
4106 Result->addIncoming(InfLiteral, NotNan); in EmitBuiltinExpr()
4108 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL in EmitBuiltinExpr()
4111 getContext().getFloatTypeSemantics(E->getArg(5)->getType())); in EmitBuiltinExpr()
4113 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest), in EmitBuiltinExpr()
4116 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)), in EmitBuiltinExpr()
4117 EmitScalarExpr(E->getArg(3))); in EmitBuiltinExpr()
4119 Result->addIncoming(NormalResult, NotInf); in EmitBuiltinExpr()
4136 Value *Size = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
4144 AI->setAlignment(SuitableAlignmentInBytes); in EmitBuiltinExpr()
4148 LangAS EAS = E->getType()->getPointeeType().getAddressSpace(); in EmitBuiltinExpr()
4150 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType()); in EmitBuiltinExpr()
4159 Value *Size = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
4160 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
4162 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue(); in EmitBuiltinExpr()
4166 AI->setAlignment(AlignmentInBytes); in EmitBuiltinExpr()
4170 LangAS EAS = E->getType()->getPointeeType().getAddressSpace(); in EmitBuiltinExpr()
4172 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType()); in EmitBuiltinExpr()
4181 Address Dest = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
4182 Value *SizeVal = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
4183 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(), in EmitBuiltinExpr()
4184 E->getArg(0)->getExprLoc(), FD, 0); in EmitBuiltinExpr()
4191 Address Src = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
4192 Address Dest = EmitPointerWithAlignment(E->getArg(1)); in EmitBuiltinExpr()
4193 Value *SizeVal = EmitScalarExpr(E->getArg(2)); in EmitBuiltinExpr()
4195 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD, in EmitBuiltinExpr()
4198 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD, in EmitBuiltinExpr()
4208 Address Dest = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
4209 Address Src = EmitPointerWithAlignment(E->getArg(1)); in EmitBuiltinExpr()
4210 Value *SizeVal = EmitScalarExpr(E->getArg(2)); in EmitBuiltinExpr()
4211 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0); in EmitBuiltinExpr()
4212 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1); in EmitBuiltinExpr()
4223 Address Dest = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
4224 Address Src = EmitPointerWithAlignment(E->getArg(1)); in EmitBuiltinExpr()
4226 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue(); in EmitBuiltinExpr()
4227 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0); in EmitBuiltinExpr()
4228 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1); in EmitBuiltinExpr()
4240 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) || in EmitBuiltinExpr()
4241 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext())) in EmitBuiltinExpr()
4247 Address Dest = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
4248 Address Src = EmitPointerWithAlignment(E->getArg(1)); in EmitBuiltinExpr()
4255 Address DestAddr = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
4256 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1)); in EmitBuiltinExpr()
4257 Value *SizeVal = EmitScalarExpr(E->getArg(2)); in EmitBuiltinExpr()
4266 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) || in EmitBuiltinExpr()
4267 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext())) in EmitBuiltinExpr()
4273 Address Dest = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
4274 Address Src = EmitPointerWithAlignment(E->getArg(1)); in EmitBuiltinExpr()
4282 Address Dest = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
4283 Address Src = EmitPointerWithAlignment(E->getArg(1)); in EmitBuiltinExpr()
4284 Value *SizeVal = EmitScalarExpr(E->getArg(2)); in EmitBuiltinExpr()
4285 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0); in EmitBuiltinExpr()
4286 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1); in EmitBuiltinExpr()
4292 Address Dest = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
4293 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), in EmitBuiltinExpr()
4295 Value *SizeVal = EmitScalarExpr(E->getArg(2)); in EmitBuiltinExpr()
4296 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(), in EmitBuiltinExpr()
4297 E->getArg(0)->getExprLoc(), FD, 0); in EmitBuiltinExpr()
4302 Address Dest = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
4304 Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty()); in EmitBuiltinExpr()
4306 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue(); in EmitBuiltinExpr()
4308 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD, in EmitBuiltinExpr()
4316 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) || in EmitBuiltinExpr()
4317 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext())) in EmitBuiltinExpr()
4323 Address Dest = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
4324 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), in EmitBuiltinExpr()
4337 Value *Str = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
4338 Value *Chr = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
4339 Value *Size = EmitScalarExpr(E->getArg(2)); in EmitBuiltinExpr()
4349 PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2); in EmitBuiltinExpr()
4350 StrPhi->addIncoming(Str, Entry); in EmitBuiltinExpr()
4352 SizePhi->addIncoming(Size, Entry); in EmitBuiltinExpr()
4366 StrPhi->addIncoming(NextStr, Next); in EmitBuiltinExpr()
4367 SizePhi->addIncoming(NextSize, Next); in EmitBuiltinExpr()
4370 PHINode *Ret = Builder.CreatePHI(Str->getType(), 3); in EmitBuiltinExpr()
4371 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry); in EmitBuiltinExpr()
4372 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next); in EmitBuiltinExpr()
4373 Ret->addIncoming(FoundChr, CmpEq); in EmitBuiltinExpr()
4384 Value *Dst = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
4385 Value *Src = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
4386 Value *Size = EmitScalarExpr(E->getArg(2)); in EmitBuiltinExpr()
4397 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2); in EmitBuiltinExpr()
4398 DstPhi->addIncoming(Dst, Entry); in EmitBuiltinExpr()
4399 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2); in EmitBuiltinExpr()
4400 SrcPhi->addIncoming(Src, Entry); in EmitBuiltinExpr()
4402 SizePhi->addIncoming(Size, Entry); in EmitBuiltinExpr()
4421 DstPhi->addIncoming(NextDst, Next); in EmitBuiltinExpr()
4422 SrcPhi->addIncoming(NextSrc, Next); in EmitBuiltinExpr()
4423 SizePhi->addIncoming(NextSize, Next); in EmitBuiltinExpr()
4427 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry); in EmitBuiltinExpr()
4428 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT); in EmitBuiltinExpr()
4429 Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT); in EmitBuiltinExpr()
4430 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next); in EmitBuiltinExpr()
4441 // this instead of hard-coding 0, which is correct for most targets. in EmitBuiltinExpr()
4449 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0), in EmitBuiltinExpr()
4459 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0), in EmitBuiltinExpr()
4465 Value *Address = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
4470 Value *Address = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
4476 = cast<llvm::IntegerType>(ConvertType(E->getType())); in EmitBuiltinExpr()
4478 if (Column == -1) { in EmitBuiltinExpr()
4485 Value *Address = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
4488 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType()))); in EmitBuiltinExpr()
4491 Value *Int = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
4492 Value *Ptr = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
4494 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType()); in EmitBuiltinExpr()
4495 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && in EmitBuiltinExpr()
4496 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants"); in EmitBuiltinExpr()
4498 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32 in EmitBuiltinExpr()
4518 // doesn't implicitly ignore high-order bits when doing in EmitBuiltinExpr()
4522 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html in EmitBuiltinExpr()
4525 Value *Ptr = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
4529 if (IntPtrTy->getBitWidth() == 64) in EmitBuiltinExpr()
4540 Address Buf = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
4550 assert(Buf.emitRawPointer(*this)->getType() == StackAddr->getType()); in EmitBuiltinExpr()
4560 Value *Buf = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
4574 const Expr *Arg = E->getArg(0); in EmitBuiltinExpr()
4575 QualType ArgTy = Arg->getType()->getPointeeType(); in EmitBuiltinExpr()
4724 QualType ElTy = E->getArg(0)->getType()->getPointeeType(); in EmitBuiltinExpr()
4730 Store->setAtomic(llvm::AtomicOrdering::Release); in EmitBuiltinExpr()
4735 // We assume this is supposed to correspond to a C++0x-style in EmitBuiltinExpr()
4736 // sequentially-consistent fence (i.e. this is only usable for in EmitBuiltinExpr()
4740 // to use it with non-atomic loads and stores to get acquire/release in EmitBuiltinExpr()
4753 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since in EmitBuiltinExpr()
4754 // _Atomic(T) is always properly-aligned. in EmitBuiltinExpr()
4757 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))), in EmitBuiltinExpr()
4760 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))), in EmitBuiltinExpr()
4766 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args); in EmitBuiltinExpr()
4776 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); in EmitBuiltinExpr()
4778 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); in EmitBuiltinExpr()
4781 EmitPointerWithAlignment(E->getArg(0)).withElementType(Int8Ty); in EmitBuiltinExpr()
4784 Value *Order = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
4786 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); in EmitBuiltinExpr()
4814 Result->setVolatile(Volatile); in EmitBuiltinExpr()
4842 RMW->setVolatile(Volatile); in EmitBuiltinExpr()
4843 Result->addIncoming(RMW, BBs[i]); in EmitBuiltinExpr()
4847 SI->addCase(Builder.getInt32(0), BBs[0]); in EmitBuiltinExpr()
4848 SI->addCase(Builder.getInt32(1), BBs[1]); in EmitBuiltinExpr()
4849 SI->addCase(Builder.getInt32(2), BBs[1]); in EmitBuiltinExpr()
4850 SI->addCase(Builder.getInt32(3), BBs[2]); in EmitBuiltinExpr()
4851 SI->addCase(Builder.getInt32(4), BBs[3]); in EmitBuiltinExpr()
4852 SI->addCase(Builder.getInt32(5), BBs[4]); in EmitBuiltinExpr()
4859 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); in EmitBuiltinExpr()
4861 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); in EmitBuiltinExpr()
4863 Address Ptr = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
4866 Value *Order = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
4868 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); in EmitBuiltinExpr()
4873 Store->setOrdering(llvm::AtomicOrdering::Monotonic); in EmitBuiltinExpr()
4876 Store->setOrdering(llvm::AtomicOrdering::Release); in EmitBuiltinExpr()
4879 Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent); in EmitBuiltinExpr()
4902 Store->setOrdering(Orders[i]); in EmitBuiltinExpr()
4906 SI->addCase(Builder.getInt32(0), BBs[0]); in EmitBuiltinExpr()
4907 SI->addCase(Builder.getInt32(3), BBs[1]); in EmitBuiltinExpr()
4908 SI->addCase(Builder.getInt32(5), BBs[2]); in EmitBuiltinExpr()
4924 Value *Order = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
4926 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); in EmitBuiltinExpr()
4961 SI->addCase(Builder.getInt32(1), AcquireBB); in EmitBuiltinExpr()
4962 SI->addCase(Builder.getInt32(2), AcquireBB); in EmitBuiltinExpr()
4967 SI->addCase(Builder.getInt32(3), ReleaseBB); in EmitBuiltinExpr()
4972 SI->addCase(Builder.getInt32(4), AcqRelBB); in EmitBuiltinExpr()
4977 SI->addCase(Builder.getInt32(5), SeqCstBB); in EmitBuiltinExpr()
4987 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))), in EmitBuiltinExpr()
4988 ConvertType(E->getType()))); in EmitBuiltinExpr()
4993 // Re-encode each wide string to UTF8 and make an MDString. in EmitBuiltinExpr()
4995 for (const Expr *Arg : E->arguments()) { in EmitBuiltinExpr()
4996 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts()); in EmitBuiltinExpr()
4997 assert(Str->getCharByteWidth() == 2); in EmitBuiltinExpr()
4998 StringRef WideBytes = Str->getBytes(); in EmitBuiltinExpr()
5002 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument"); in EmitBuiltinExpr()
5016 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
5019 {AnnVal->getType(), CGM.ConstGlobalsPtrTy}); in EmitBuiltinExpr()
5022 // non-wide string literal, potentially casted, so the cast<> is safe. in EmitBuiltinExpr()
5023 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts(); in EmitBuiltinExpr()
5024 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString(); in EmitBuiltinExpr()
5026 EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr)); in EmitBuiltinExpr()
5057 llvm::Value *X = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
5058 llvm::Value *Y = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
5059 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2)); in EmitBuiltinExpr()
5060 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3)); in EmitBuiltinExpr()
5090 X->getType()); in EmitBuiltinExpr()
5098 const clang::Expr *LeftArg = E->getArg(0); in EmitBuiltinExpr()
5099 const clang::Expr *RightArg = E->getArg(1); in EmitBuiltinExpr()
5100 const clang::Expr *ResultArg = E->getArg(2); in EmitBuiltinExpr()
5103 ResultArg->getType()->castAs<PointerType>()->getPointeeType(); in EmitBuiltinExpr()
5106 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType()); in EmitBuiltinExpr()
5108 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType()); in EmitBuiltinExpr()
5112 // Handle mixed-sign multiplication as a special case, because adding in EmitBuiltinExpr()
5184 ResultArg->getType()->getPointeeType().isVolatileQualified(); in EmitBuiltinExpr()
5212 llvm::Value *X = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
5213 llvm::Value *Y = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
5214 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2)); in EmitBuiltinExpr()
5262 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this)); in EmitBuiltinExpr()
5265 E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext()))); in EmitBuiltinExpr()
5268 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false); in EmitBuiltinExpr()
5271 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true); in EmitBuiltinExpr()
5285 const CallExpr *Call = cast<CallExpr>(E->getArg(0)); in EmitBuiltinExpr()
5286 const Expr *Chain = E->getArg(1); in EmitBuiltinExpr()
5287 return EmitCall(Call->getCallee()->getType(), in EmitBuiltinExpr()
5288 EmitCallee(Call->getCallee()), Call, ReturnValue, in EmitBuiltinExpr()
5301 getLLVMContext(), getContext().getTypeSize(E->getType())); in EmitBuiltinExpr()
5305 llvm::Value *Exchange = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
5306 RTy = Exchange->getType(); in EmitBuiltinExpr()
5310 Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType); in EmitBuiltinExpr()
5318 Result->setVolatile(true); in EmitBuiltinExpr()
5381 // affected by the -fms-volatile setting. in EmitBuiltinExpr()
5394 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType())); in EmitBuiltinExpr()
5404 for (auto argExpr : E->arguments()) in EmitBuiltinExpr()
5408 llvm::Type *OrigValueType = Args[0]->getType(); in EmitBuiltinExpr()
5409 if (OrigValueType->isPointerTy()) in EmitBuiltinExpr()
5414 if (Args[4]->getType()->isPointerTy()) in EmitBuiltinExpr()
5420 if (Args[2]->getType()->isPointerTy()) in EmitBuiltinExpr()
5425 if (Args[1]->getType()->isPointerTy()) in EmitBuiltinExpr()
5435 auto IntrinsicID = [&]() -> unsigned { in EmitBuiltinExpr()
5457 OrigValueType->isPointerTy()) { in EmitBuiltinExpr()
5473 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 && in EmitBuiltinExpr()
5474 E->getArg(0)->getType()->isPointerType()) in EmitBuiltinExpr()
5478 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 && in EmitBuiltinExpr()
5479 E->getArg(0)->getType()->isPointerType()) { in EmitBuiltinExpr()
5494 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this)); in EmitBuiltinExpr()
5497 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType())) in EmitBuiltinExpr()
5536 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions in EmitBuiltinExpr()
5539 Value *Arg0 = EmitScalarExpr(E->getArg(0)), in EmitBuiltinExpr()
5540 *Arg1 = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
5542 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); in EmitBuiltinExpr()
5543 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); in EmitBuiltinExpr()
5551 if (2U == E->getNumArgs()) { in EmitBuiltinExpr()
5556 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty}; in EmitBuiltinExpr()
5564 assert(4 == E->getNumArgs() && in EmitBuiltinExpr()
5569 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy, in EmitBuiltinExpr()
5571 Value *Arg2 = EmitScalarExpr(E->getArg(2)), in EmitBuiltinExpr()
5572 *Arg3 = EmitScalarExpr(E->getArg(3)); in EmitBuiltinExpr()
5578 if (Arg2->getType() != Int32Ty) in EmitBuiltinExpr()
5585 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write in EmitBuiltinExpr()
5608 Value *Arg0 = EmitScalarExpr(E->getArg(0)), in EmitBuiltinExpr()
5609 *Arg1 = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
5612 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); in EmitBuiltinExpr()
5613 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); in EmitBuiltinExpr()
5616 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty}; in EmitBuiltinExpr()
5621 if (Arg1->getType() != Int32Ty) in EmitBuiltinExpr()
5626 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write in EmitBuiltinExpr()
5648 Value *Arg0 = EmitScalarExpr(E->getArg(0)), in EmitBuiltinExpr()
5649 *Arg1 = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
5651 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); in EmitBuiltinExpr()
5652 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); in EmitBuiltinExpr()
5655 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty}; in EmitBuiltinExpr()
5663 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions in EmitBuiltinExpr()
5667 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>(); in EmitBuiltinExpr()
5673 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo"); in EmitBuiltinExpr()
5676 Value *Arg0 = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
5678 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0)); in EmitBuiltinExpr()
5679 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0)); in EmitBuiltinExpr()
5680 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty}; in EmitBuiltinExpr()
5688 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. in EmitBuiltinExpr()
5692 auto Arg0 = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
5699 E->getType()->getPointeeType().getAddressSpace())); in EmitBuiltinExpr()
5702 if (Arg0->getType()->getPointerAddressSpace() != in EmitBuiltinExpr()
5703 NewArgT->getPointerAddressSpace()) in EmitBuiltinExpr()
5707 auto NewName = std::string("__") + E->getDirectCallee()->getName().str(); in EmitBuiltinExpr()
5711 ConvertType(E->getType()))); in EmitBuiltinExpr()
5714 // OpenCL v2.0, s6.13.17 - Enqueue kernel function. in EmitBuiltinExpr()
5724 unsigned NumArgs = E->getNumArgs(); in EmitBuiltinExpr()
5730 llvm::Value *Queue = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
5731 llvm::Value *Flags = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
5732 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2)); in EmitBuiltinExpr()
5746 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3)); in EmitBuiltinExpr()
5760 RTCall->setAttributes(ByValAttrSet); in EmitBuiltinExpr()
5768 -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> { in EmitBuiltinExpr()
5769 llvm::APInt ArraySize(32, NumArgs - First); in EmitBuiltinExpr()
5783 auto *Index = llvm::ConstantInt::get(IntTy, I - First); in EmitBuiltinExpr()
5788 auto *V = in EmitBuiltinExpr() local
5789 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy); in EmitBuiltinExpr()
5791 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy)); in EmitBuiltinExpr()
5797 if (E->getArg(3)->getType()->isBlockPointerType()) { in EmitBuiltinExpr()
5801 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3)); in EmitBuiltinExpr()
5812 Block, ConstantInt::get(IntTy, NumArgs - 4), in EmitBuiltinExpr()
5816 GenericVoidPtrTy, IntTy, ElemPtr->getType()}; in EmitBuiltinExpr()
5832 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty); in EmitBuiltinExpr()
5838 if (E->getArg(4)->isNullPointerConstant( in EmitBuiltinExpr()
5843 E->getArg(4)->getType()->isArrayType() in EmitBuiltinExpr()
5844 ? EmitArrayToPointerDecay(E->getArg(4)).emitRawPointer(*this) in EmitBuiltinExpr()
5845 : EmitScalarExpr(E->getArg(4)); in EmitBuiltinExpr()
5850 if (E->getArg(5)->isNullPointerConstant( in EmitBuiltinExpr()
5855 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), PtrTy); in EmitBuiltinExpr()
5859 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6)); in EmitBuiltinExpr()
5884 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7)); in EmitBuiltinExpr()
5891 ArgTys.push_back(ElemPtr->getType()); in EmitBuiltinExpr()
5904 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block in EmitBuiltinExpr()
5910 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0)); in EmitBuiltinExpr()
5925 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0)); in EmitBuiltinExpr()
5940 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0)); in EmitBuiltinExpr()
5943 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1)); in EmitBuiltinExpr()
5954 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy}, in EmitBuiltinExpr()
5961 Value *Val = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
5962 Address Address = EmitPointerWithAlignment(E->getArg(1)); in EmitBuiltinExpr()
5968 Address Address = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
5973 Address Address = EmitPointerWithAlignment(E->getArg(0)); in EmitBuiltinExpr()
6004 // Fall through - it's already mapped to the intrinsic by ClangBuiltin. in EmitBuiltinExpr()
6018 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>()) in EmitBuiltinExpr()
6019 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents()) in EmitBuiltinExpr()
6023 auto FTy = F->getFunctionType(); in EmitBuiltinExpr()
6024 auto Arg0 = E->getArg(0); in EmitBuiltinExpr()
6026 auto Arg0Ty = Arg0->getType(); in EmitBuiltinExpr()
6027 auto PTy0 = FTy->getParamType(0); in EmitBuiltinExpr()
6028 if (PTy0 != Arg0Val->getType()) { in EmitBuiltinExpr()
6029 if (Arg0Ty->isArrayType()) in EmitBuiltinExpr()
6034 auto Arg1 = EmitScalarExpr(E->getArg(1)); in EmitBuiltinExpr()
6035 auto PTy1 = FTy->getParamType(1); in EmitBuiltinExpr()
6036 if (PTy1 != Arg1->getType()) in EmitBuiltinExpr()
6052 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>()) in EmitBuiltinExpr()
6053 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents()) in EmitBuiltinExpr()
6057 auto FTy = F->getFunctionType(); in EmitBuiltinExpr()
6058 auto Arg0 = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr()
6059 auto PTy0 = FTy->getParamType(0); in EmitBuiltinExpr()
6060 if (PTy0 != Arg0->getType()) in EmitBuiltinExpr()
6062 auto Arg1 = E->getArg(1); in EmitBuiltinExpr()
6064 auto Arg1Ty = Arg1->getType(); in EmitBuiltinExpr()
6065 auto PTy1 = FTy->getParamType(1); in EmitBuiltinExpr()
6066 if (PTy1 != Arg1Val->getType()) { in EmitBuiltinExpr()
6067 if (Arg1Ty->isArrayType()) in EmitBuiltinExpr()
6072 auto Arg2 = EmitScalarExpr(E->getArg(2)); in EmitBuiltinExpr()
6073 auto PTy2 = FTy->getParamType(2); in EmitBuiltinExpr()
6074 if (PTy2 != Arg2->getType()) in EmitBuiltinExpr()
6082 EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).emitRawPointer(*this), in EmitBuiltinExpr()
6088 // alone, because it's legal to do this from a System V ABI function. in EmitBuiltinExpr()
6092 Address DestAddr = EmitMSVAListRef(E->getArg(0)); in EmitBuiltinExpr()
6093 Address SrcAddr = EmitMSVAListRef(E->getArg(1)); in EmitBuiltinExpr()
6104 cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl()); in EmitBuiltinExpr()
6124 // This is down here to avoid non-target specific builtins, however, if in EmitBuiltinExpr()
6160 llvm::FunctionType *FTy = F->getFunctionType(); in EmitBuiltinExpr()
6162 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { in EmitBuiltinExpr()
6166 llvm::Type *PTy = FTy->getParamType(i); in EmitBuiltinExpr()
6167 if (PTy != ArgValue->getType()) { in EmitBuiltinExpr()
6168 // XXX - vector of pointers? in EmitBuiltinExpr()
6170 if (PtrTy->getAddressSpace() != in EmitBuiltinExpr()
6171 ArgValue->getType()->getPointerAddressSpace()) { in EmitBuiltinExpr()
6174 PtrTy->getAddressSpace())); in EmitBuiltinExpr()
6180 if (PTy->isX86_AMXTy()) in EmitBuiltinExpr()
6182 {ArgValue->getType()}, {ArgValue}); in EmitBuiltinExpr()
6190 Value *V = Builder.CreateCall(F, Args); in EmitBuiltinExpr() local
6191 QualType BuiltinRetType = E->getType(); in EmitBuiltinExpr()
6194 if (!BuiltinRetType->isVoidType()) in EmitBuiltinExpr()
6197 if (RetTy != V->getType()) { in EmitBuiltinExpr()
6198 // XXX - vector of pointers? in EmitBuiltinExpr()
6200 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) { in EmitBuiltinExpr()
6201 V = Builder.CreateAddrSpaceCast( in EmitBuiltinExpr()
6202 V, llvm::PointerType::get(getLLVMContext(), in EmitBuiltinExpr()
6203 PtrTy->getAddressSpace())); in EmitBuiltinExpr()
6209 if (V->getType()->isX86_AMXTy()) in EmitBuiltinExpr()
6210 V = Builder.CreateIntrinsic(Intrinsic::x86_cast_tile_to_vector, {RetTy}, in EmitBuiltinExpr()
6211 {V}); in EmitBuiltinExpr()
6213 V = Builder.CreateBitCast(V, RetTy); in EmitBuiltinExpr()
6216 if (RetTy->isVoidTy()) in EmitBuiltinExpr()
6219 return RValue::get(V); in EmitBuiltinExpr()
6222 // Some target-specific builtins can have aggregate return values, e.g. in EmitBuiltinExpr()
6224 // ReturnValue to be non-null, so that the target-specific emission code can in EmitBuiltinExpr()
6226 TypeEvaluationKind EvalKind = getEvaluationKind(E->getType()); in EmitBuiltinExpr()
6228 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp"); in EmitBuiltinExpr()
6232 // Now see if we can emit a target-specific builtin. in EmitBuiltinExpr()
6233 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) { in EmitBuiltinExpr() local
6236 if (V->getType()->isVoidTy()) in EmitBuiltinExpr()
6238 return RValue::get(V); in EmitBuiltinExpr()
6249 if (Value *V = EmitHLSLBuiltinExpr(BuiltinID, E)) in EmitBuiltinExpr() local
6250 return RValue::get(V); in EmitBuiltinExpr()
6258 return GetUndefRValue(E->getType()); in EmitBuiltinExpr()
6271 if (CGF->getLangOpts().HIPStdPar && CGF->getLangOpts().CUDAIsDevice && in EmitTargetArchBuiltinExpr()
6272 Arch != CGF->getTarget().getTriple().getArch()) in EmitTargetArchBuiltinExpr()
6280 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch); in EmitTargetArchBuiltinExpr()
6284 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch); in EmitTargetArchBuiltinExpr()
6287 return CGF->EmitBPFBuiltinExpr(BuiltinID, E); in EmitTargetArchBuiltinExpr()
6290 return CGF->EmitX86BuiltinExpr(BuiltinID, E); in EmitTargetArchBuiltinExpr()
6295 return CGF->EmitPPCBuiltinExpr(BuiltinID, E); in EmitTargetArchBuiltinExpr()
6298 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E); in EmitTargetArchBuiltinExpr()
6300 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E); in EmitTargetArchBuiltinExpr()
6303 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E); in EmitTargetArchBuiltinExpr()
6306 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E); in EmitTargetArchBuiltinExpr()
6308 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E); in EmitTargetArchBuiltinExpr()
6311 return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue); in EmitTargetArchBuiltinExpr()
6313 if (CGF->getTarget().getTriple().getOS() != llvm::Triple::OSType::AMDHSA) in EmitTargetArchBuiltinExpr()
6315 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E); in EmitTargetArchBuiltinExpr()
6328 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch()); in EmitTargetBuiltinExpr()
6344 return llvm::FixedVectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad)); in GetNeonType()
6347 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); in GetNeonType()
6350 return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad)); in GetNeonType()
6352 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); in GetNeonType()
6355 return llvm::FixedVectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad)); in GetNeonType()
6357 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); in GetNeonType()
6359 return llvm::FixedVectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad)); in GetNeonType()
6362 return llvm::FixedVectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad)); in GetNeonType()
6367 return llvm::FixedVectorType::get(CGF->Int8Ty, 16); in GetNeonType()
6369 return llvm::FixedVectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad)); in GetNeonType()
6371 return llvm::FixedVectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad)); in GetNeonType()
6381 return llvm::FixedVectorType::get(CGF->HalfTy, (4 << IsQuad)); in GetFloatNeonType()
6383 return llvm::FixedVectorType::get(CGF->FloatTy, (2 << IsQuad)); in GetFloatNeonType()
6385 return llvm::FixedVectorType::get(CGF->DoubleTy, (1 << IsQuad)); in GetFloatNeonType()
6387 llvm_unreachable("Type can't be converted to floating-point!"); in GetFloatNeonType()
6391 Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C, in EmitNeonSplat() argument
6394 return Builder.CreateShuffleVector(V, V, SV, "lane"); in EmitNeonSplat()
6397 Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) { in EmitNeonSplat() argument
6398 ElementCount EC = cast<llvm::VectorType>(V->getType())->getElementCount(); in EmitNeonSplat()
6399 return EmitNeonSplat(V, C, EC); in EmitNeonSplat()
6406 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); in EmitNeonCall()
6408 if (F->isConstrainedFPIntrinsic()) in EmitNeonCall()
6409 if (ai->getType()->isMetadataTy()) in EmitNeonCall()
6412 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift); in EmitNeonCall()
6414 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name); in EmitNeonCall()
6417 if (F->isConstrainedFPIntrinsic()) in EmitNeonCall()
6423 Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty, in EmitNeonShiftVector() argument
6424 bool neg) { in EmitNeonShiftVector() argument
6425 int SV = cast<ConstantInt>(V)->getSExtValue(); in EmitNeonShiftVector()
6426 return ConstantInt::get(Ty, neg ? -SV : SV); in EmitNeonShiftVector()
6429 // Right-shift a vector by a constant.
6435 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue(); in EmitNeonRShiftImm()
6436 int EltSize = VTy->getScalarSizeInBits(); in EmitNeonRShiftImm()
6444 // Right-shifting an unsigned value by its size yields 0. in EmitNeonRShiftImm()
6447 // Right-shifting a signed value by its size is equivalent in EmitNeonRShiftImm()
6448 // to a shift of size-1. in EmitNeonRShiftImm()
6449 --ShiftAmt; in EmitNeonRShiftImm()
6450 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt); in EmitNeonRShiftImm()
7552 if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID) in findARMVectorIntrinsicInMap()
7571 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext())); in LookupNeonLLVMIntrinsic()
7574 Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1); in LookupNeonLLVMIntrinsic()
7581 int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1; in LookupNeonLLVMIntrinsic()
7628 const Expr *Arg = E->getArg(0); in EmitCommonNeonSISDBuiltinExpr()
7629 llvm::Type *ArgTy = CGF.ConvertType(Arg->getType()); in EmitCommonNeonSISDBuiltinExpr()
7634 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); in EmitCommonNeonSISDBuiltinExpr()
7636 llvm::Type *ArgTy = ai->getType(); in EmitCommonNeonSISDBuiltinExpr()
7637 if (Ops[j]->getType()->getPrimitiveSizeInBits() == in EmitCommonNeonSISDBuiltinExpr()
7638 ArgTy->getPrimitiveSizeInBits()) in EmitCommonNeonSISDBuiltinExpr()
7641 assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy()); in EmitCommonNeonSISDBuiltinExpr()
7645 Ops[j], cast<llvm::VectorType>(ArgTy)->getElementType()); in EmitCommonNeonSISDBuiltinExpr()
7651 llvm::Type *ResultType = CGF.ConvertType(E->getType()); in EmitCommonNeonSISDBuiltinExpr()
7652 if (ResultType->getPrimitiveSizeInBits().getFixedValue() < in EmitCommonNeonSISDBuiltinExpr()
7653 Result->getType()->getPrimitiveSizeInBits().getFixedValue()) in EmitCommonNeonSISDBuiltinExpr()
7665 const Expr *Arg = E->getArg(E->getNumArgs() - 1); in EmitCommonNeonBuiltinExpr()
7667 Arg->getIntegerConstantExpr(getContext()); in EmitCommonNeonBuiltinExpr()
7672 NeonTypeFlags Type(NeonTypeConst->getZExtValue()); in EmitCommonNeonBuiltinExpr()
7685 auto getAlignmentValue32 = [&](Address addr) -> Value* { in EmitCommonNeonBuiltinExpr()
7699 auto NumElements = VTy->getElementCount(); in EmitCommonNeonBuiltinExpr()
7711 if (VTy->getElementType()->isFloatingPointTy() && in EmitCommonNeonBuiltinExpr()
7717 if (VTy->getElementType()->isFloatingPointTy()) in EmitCommonNeonBuiltinExpr()
7739 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2); in EmitCommonNeonBuiltinExpr()
7756 switch (VTy->getScalarSizeInBits()) { in EmitCommonNeonBuiltinExpr()
7768 auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements()); in EmitCommonNeonBuiltinExpr()
7795 // We generate target-independent intrinsic, which needs a second argument in EmitCommonNeonBuiltinExpr()
7916 llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty}; in EmitCommonNeonBuiltinExpr()
7922 int CV = cast<ConstantInt>(Ops[2])->getSExtValue(); in EmitCommonNeonBuiltinExpr()
7924 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) in EmitCommonNeonBuiltinExpr()
7979 Value *V = PoisonValue::get(Ty); in EmitCommonNeonBuiltinExpr() local
7980 PtrOp0 = PtrOp0.withElementType(VTy->getElementType()); in EmitCommonNeonBuiltinExpr()
7983 Ops[0] = Builder.CreateInsertElement(V, Ld, CI); in EmitCommonNeonBuiltinExpr()
7994 for (unsigned I = 2; I < Ops.size() - 1; ++I) in EmitCommonNeonBuiltinExpr()
8026 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); in EmitCommonNeonBuiltinExpr()
8030 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2); in EmitCommonNeonBuiltinExpr()
8037 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); in EmitCommonNeonBuiltinExpr()
8040 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2); in EmitCommonNeonBuiltinExpr()
8059 RTy = llvm::FixedVectorType::get(RTy->getElementType(), in EmitCommonNeonBuiltinExpr()
8060 RTy->getNumElements() * 2); in EmitCommonNeonBuiltinExpr()
8087 Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic; in EmitCommonNeonBuiltinExpr()
8198 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2); in EmitCommonNeonBuiltinExpr()
8212 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { in EmitCommonNeonBuiltinExpr()
8239 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) in EmitCommonNeonBuiltinExpr()
8261 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { in EmitCommonNeonBuiltinExpr()
8276 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); in EmitCommonNeonBuiltinExpr()
8283 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); in EmitCommonNeonBuiltinExpr()
8290 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); in EmitCommonNeonBuiltinExpr()
8297 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); in EmitCommonNeonBuiltinExpr()
8304 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); in EmitCommonNeonBuiltinExpr()
8311 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); in EmitCommonNeonBuiltinExpr()
8317 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); in EmitCommonNeonBuiltinExpr()
8324 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); in EmitCommonNeonBuiltinExpr()
8331 llvm::FixedVectorType::get(BFloatTy, Ty->getPrimitiveSizeInBits() / 16); in EmitCommonNeonBuiltinExpr()
8349 llvm::Type *ResultType = ConvertType(E->getType()); in EmitCommonNeonBuiltinExpr()
8350 // AArch64 intrinsic one-element vector type cast to in EmitCommonNeonBuiltinExpr()
8358 llvm::Type *OTy = Op->getType(); in EmitAArch64CompareBuiltinExpr()
8365 OTy = BI->getOperand(0)->getType(); in EmitAArch64CompareBuiltinExpr()
8368 if (OTy->getScalarType()->isFloatingPointTy()) { in EmitAArch64CompareBuiltinExpr()
8389 auto *TblTy = cast<llvm::FixedVectorType>(Ops[0]->getType()); in packTBLDVectorList()
8390 for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) { in packTBLDVectorList()
8395 int PairPos = 0, End = Ops.size() - 1; in packTBLDVectorList()
8403 // If there's an odd number of 64-bit lookup table, fill the high 64-bit in packTBLDVectorList()
8404 // of the 128-bit lookup table with zero. in packTBLDVectorList()
8488 assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64) || in EmitSpecialRegisterBuiltin()
8489 RegisterType->isIntegerTy(128)) && in EmitSpecialRegisterBuiltin()
8497 const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts(); in EmitSpecialRegisterBuiltin()
8498 SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString(); in EmitSpecialRegisterBuiltin()
8507 bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32); in EmitSpecialRegisterBuiltin()
8508 assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) in EmitSpecialRegisterBuiltin()
8509 && "Can't fit 64-bit value in 32-bit register"); in EmitSpecialRegisterBuiltin()
8523 if (ValueType->isPointerTy()) in EmitSpecialRegisterBuiltin()
8531 llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1)); in EmitSpecialRegisterBuiltin()
8538 if (ValueType->isPointerTy()) { in EmitSpecialRegisterBuiltin()
8603 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext())) in EmitARMBuiltinExpr()
8619 Value *Option = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
8624 Value *Address = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
8625 Value *RW = EmitScalarExpr(E->getArg(1)); in EmitARMBuiltinExpr()
8626 Value *IsData = EmitScalarExpr(E->getArg(2)); in EmitARMBuiltinExpr()
8631 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType()); in EmitARMBuiltinExpr()
8636 llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
8638 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit"); in EmitARMBuiltinExpr()
8643 llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
8644 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Arg->getType()); in EmitARMBuiltinExpr()
8653 llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
8657 llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
8663 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments"); in EmitARMBuiltinExpr()
8664 const FunctionDecl *FD = E->getDirectCallee(); in EmitARMBuiltinExpr()
8667 Ops[i] = EmitScalarExpr(E->getArg(i)); in EmitARMBuiltinExpr()
8668 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); in EmitARMBuiltinExpr()
8670 StringRef Name = FD->getName(); in EmitARMBuiltinExpr()
8695 Value *Coproc = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
8696 Value *Opc1 = EmitScalarExpr(E->getArg(1)); in EmitARMBuiltinExpr()
8697 Value *RtAndRt2 = EmitScalarExpr(E->getArg(2)); in EmitARMBuiltinExpr()
8698 Value *CRm = EmitScalarExpr(E->getArg(3)); in EmitARMBuiltinExpr()
8722 Value *Coproc = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
8723 Value *Opc1 = EmitScalarExpr(E->getArg(1)); in EmitARMBuiltinExpr()
8724 Value *CRm = EmitScalarExpr(E->getArg(2)); in EmitARMBuiltinExpr()
8739 return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType())); in EmitARMBuiltinExpr()
8745 getContext().getTypeSize(E->getType()) == 64) || in EmitARMBuiltinExpr()
8761 Value *LdPtr = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
8772 return Builder.CreateBitCast(Val, ConvertType(E->getType())); in EmitARMBuiltinExpr()
8777 Value *LoadAddr = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
8779 QualType Ty = E->getType(); in EmitARMBuiltinExpr()
8789 Val->addParamAttr( in EmitARMBuiltinExpr()
8792 if (RealResTy->isPointerTy()) in EmitARMBuiltinExpr()
8805 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) { in EmitARMBuiltinExpr()
8811 Address Tmp = CreateMemTemp(E->getArg(0)->getType()); in EmitARMBuiltinExpr()
8812 Value *Val = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
8820 Value *StPtr = EmitScalarExpr(E->getArg(1)); in EmitARMBuiltinExpr()
8826 Value *StoreVal = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
8827 Value *StoreAddr = EmitScalarExpr(E->getArg(1)); in EmitARMBuiltinExpr()
8829 QualType Ty = E->getArg(0)->getType(); in EmitARMBuiltinExpr()
8833 if (StoreVal->getType()->isPointerTy()) in EmitARMBuiltinExpr()
8838 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType())); in EmitARMBuiltinExpr()
8846 StoreAddr->getType()); in EmitARMBuiltinExpr()
8849 CI->addParamAttr( in EmitARMBuiltinExpr()
8879 Value *Arg0 = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
8880 Value *Arg1 = EmitScalarExpr(E->getArg(1)); in EmitARMBuiltinExpr()
8953 // Some intrinsics are equivalent - if they are use the base intrinsic ID. in EmitARMBuiltinExpr()
8958 BuiltinID = It->second; in EmitARMBuiltinExpr()
8967 auto getAlignmentValue32 = [&](Address addr) -> Value* { in EmitARMBuiltinExpr()
8975 unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0); in EmitARMBuiltinExpr()
9003 PtrOp0 = EmitPointerWithAlignment(E->getArg(0)); in EmitARMBuiltinExpr()
9030 PtrOp1 = EmitPointerWithAlignment(E->getArg(1)); in EmitARMBuiltinExpr()
9059 Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitARMBuiltinExpr()
9060 llvm::Type *Tys[] = {Arg->getType()}; in EmitARMBuiltinExpr()
9110 const Expr *Arg = E->getArg(E->getNumArgs()-1); in EmitARMBuiltinExpr()
9112 Arg->getIntegerConstantExpr(getContext()); in EmitARMBuiltinExpr()
9126 bool usgn = Result->getZExtValue() == 1; in EmitARMBuiltinExpr()
9135 NeonTypeFlags Type = Result->getZExtValue(); in EmitARMBuiltinExpr()
9153 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, in EmitARMBuiltinExpr()
9154 Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch); in EmitARMBuiltinExpr()
9160 // Handle 64-bit integer elements as a special case. Use shuffles of in EmitARMBuiltinExpr()
9161 // one-element vectors to avoid poor code for i64 in the backend. in EmitARMBuiltinExpr()
9162 if (VTy->getElementType()->isIntegerTy(64)) { in EmitARMBuiltinExpr()
9165 int Lane = cast<ConstantInt>(Ops[2])->getZExtValue(); in EmitARMBuiltinExpr()
9166 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane)); in EmitARMBuiltinExpr()
9168 // Load the value as a one-element vector. in EmitARMBuiltinExpr()
9169 Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1); in EmitARMBuiltinExpr()
9175 int Indices[] = {1 - Lane, Lane}; in EmitARMBuiltinExpr()
9181 PtrOp0 = PtrOp0.withElementType(VTy->getElementType()); in EmitARMBuiltinExpr()
9230 // Handle 64-bit integer elements as a special case. Use a shuffle to get in EmitARMBuiltinExpr()
9231 // a one-element vector and avoid poor code for i64 in the backend. in EmitARMBuiltinExpr()
9232 if (VTy->getElementType()->isIntegerTy(64)) { in EmitARMBuiltinExpr()
9237 llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()}; in EmitARMBuiltinExpr()
9246 PtrOp0.withElementType(Ops[1]->getType())); in EmitARMBuiltinExpr()
9277 return E->getIntegerConstantExpr(Context)->getExtValue(); in GetIntegerConstantValue()
9280 static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V, in SignOrZeroExtend() argument
9282 // Helper function called by Tablegen-constructed ARM MVE builtin codegen, in SignOrZeroExtend()
9284 return Unsigned ? Builder.CreateZExt(V, T) : Builder.CreateSExt(V, T); in SignOrZeroExtend()
9287 static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V, in MVEImmediateShr() argument
9294 unsigned LaneBits = cast<llvm::VectorType>(V->getType()) in MVEImmediateShr()
9295 ->getElementType() in MVEImmediateShr()
9296 ->getPrimitiveSizeInBits(); in MVEImmediateShr()
9302 return llvm::Constant::getNullValue(V->getType()); in MVEImmediateShr()
9304 --Shift; in MVEImmediateShr()
9306 return Unsigned ? Builder.CreateLShr(V, Shift) : Builder.CreateAShr(V, Shift); in MVEImmediateShr()
9309 static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) { in ARMMVEVectorSplat() argument
9310 // MVE-specific helper function for a vector splat, which infers the element in ARMMVEVectorSplat()
9313 unsigned Elements = 128 / V->getType()->getPrimitiveSizeInBits(); in ARMMVEVectorSplat()
9314 return Builder.CreateVectorSplat(Elements, V); in ARMMVEVectorSplat()
9319 llvm::Value *V, in ARMMVEVectorReinterpret() argument
9321 // Convert one MVE vector type into another by reinterpreting its in-register in ARMMVEVectorReinterpret()
9324 // Little-endian, this is identical to a bitcast (which reinterprets the in ARMMVEVectorReinterpret()
9325 // memory format). But big-endian, they're not necessarily the same, because in ARMMVEVectorReinterpret()
9329 // We generate a bitcast whenever we can (if we're little-endian, or if the in ARMMVEVectorReinterpret()
9332 if (CGF->getTarget().isBigEndian() && in ARMMVEVectorReinterpret()
9333 V->getType()->getScalarSizeInBits() != DestType->getScalarSizeInBits()) { in ARMMVEVectorReinterpret()
9335 CGF->CGM.getIntrinsic(Intrinsic::arm_mve_vreinterpretq, in ARMMVEVectorReinterpret()
9336 {DestType, V->getType()}), in ARMMVEVectorReinterpret()
9337 V); in ARMMVEVectorReinterpret()
9339 return Builder.CreateBitCast(V, DestType); in ARMMVEVectorReinterpret()
9343 static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) { in VectorUnzip() argument
9348 cast<llvm::FixedVectorType>(V->getType())->getNumElements(); in VectorUnzip()
9351 return Builder.CreateShuffleVector(V, Indices); in VectorUnzip()
9357 assert(V0->getType() == V1->getType() && "Can't zip different vector types"); in VectorZip()
9360 cast<llvm::FixedVectorType>(V0->getType())->getNumElements(); in VectorZip()
9370 // MVE-specific helper function to make a vector splat of a constant such as in ARMMVEConstantSplat()
9372 llvm::Type *T = cast<llvm::VectorType>(VT)->getElementType(); in ARMMVEConstantSplat()
9373 unsigned LaneBits = T->getPrimitiveSizeInBits(); in ARMMVEConstantSplat()
9374 uint32_t Value = HighBit << (LaneBits - 1); in ARMMVEConstantSplat()
9376 Value |= (1UL << (LaneBits - 1)) - 1; in ARMMVEConstantSplat()
9382 llvm::Value *V, in ARMMVEVectorElementReverse() argument
9384 // MVE-specific helper function which reverses the elements of a in ARMMVEVectorElementReverse()
9385 // vector within every (ReverseWidth)-bit collection of lanes. in ARMMVEVectorElementReverse()
9387 unsigned LaneSize = V->getType()->getScalarSizeInBits(); in ARMMVEVectorElementReverse()
9389 unsigned Mask = ReverseWidth / LaneSize - 1; in ARMMVEVectorElementReverse()
9392 return Builder.CreateShuffleVector(V, Indices); in ARMMVEVectorElementReverse()
9422 auto MvecCType = E->getType(); in EmitARMMVEBuiltinExpr()
9424 assert(MvecLType->isStructTy() && in EmitARMMVEBuiltinExpr()
9426 assert(MvecLType->getStructNumElements() == 1 && in EmitARMMVEBuiltinExpr()
9427 "Return-type struct for vld[24]q should have one element"); in EmitARMMVEBuiltinExpr()
9428 auto MvecLTypeInner = MvecLType->getStructElementType(0); in EmitARMMVEBuiltinExpr()
9429 assert(MvecLTypeInner->isArrayTy() && in EmitARMMVEBuiltinExpr()
9430 "Return-type struct for vld[24]q should contain an array"); in EmitARMMVEBuiltinExpr()
9431 assert(MvecLTypeInner->getArrayNumElements() == NumVectors && in EmitARMMVEBuiltinExpr()
9432 "Array member of return-type struct vld[24]q has wrong length"); in EmitARMMVEBuiltinExpr()
9433 auto VecLType = MvecLTypeInner->getArrayElementType(); in EmitARMMVEBuiltinExpr()
9437 auto Addr = E->getArg(0); in EmitARMMVEBuiltinExpr()
9439 Tys.push_back(ConvertType(Addr->getType())); in EmitARMMVEBuiltinExpr()
9459 auto Addr = E->getArg(0); in EmitARMMVEBuiltinExpr()
9461 Tys.push_back(ConvertType(Addr->getType())); in EmitARMMVEBuiltinExpr()
9463 auto MvecCType = E->getArg(1)->getType(); in EmitARMMVEBuiltinExpr()
9465 assert(MvecLType->isStructTy() && "Data type for vst2q should be a struct"); in EmitARMMVEBuiltinExpr()
9466 assert(MvecLType->getStructNumElements() == 1 && in EmitARMMVEBuiltinExpr()
9467 "Data-type struct for vst2q should have one element"); in EmitARMMVEBuiltinExpr()
9468 auto MvecLTypeInner = MvecLType->getStructElementType(0); in EmitARMMVEBuiltinExpr()
9469 assert(MvecLTypeInner->isArrayTy() && in EmitARMMVEBuiltinExpr()
9470 "Data-type struct for vst2q should contain an array"); in EmitARMMVEBuiltinExpr()
9471 assert(MvecLTypeInner->getArrayNumElements() == NumVectors && in EmitARMMVEBuiltinExpr()
9472 "Array member of return-type struct vld[24]q has wrong length"); in EmitARMMVEBuiltinExpr()
9473 auto VecLType = MvecLTypeInner->getArrayElementType(); in EmitARMMVEBuiltinExpr()
9478 EmitAggExpr(E->getArg(1), MvecSlot); in EmitARMMVEBuiltinExpr()
9545 assert(E->getNumArgs() >= 3); in EmitAArch64TblBuiltinExpr()
9548 const Expr *Arg = E->getArg(E->getNumArgs() - 1); in EmitAArch64TblBuiltinExpr()
9550 Arg->getIntegerConstantExpr(CGF.getContext()); in EmitAArch64TblBuiltinExpr()
9555 NeonTypeFlags Type = Result->getZExtValue(); in EmitAArch64TblBuiltinExpr()
9653 Value *V = PoisonValue::get(VTy); in vectorWrapScalar16() local
9655 Op = Builder.CreateInsertElement(V, Op, CI); in vectorWrapScalar16()
9659 /// SVEBuiltinMemEltTy - Returns the memory element type for this memory
9794 unsigned NumElts = SVEBitsPerBlock / EltTy->getScalarSizeInBits(); in getSVEVectorForElementType()
9803 if (isa<TargetExtType>(Pred->getType()) && in EmitSVEPredicateCast()
9804 cast<TargetExtType>(Pred->getType())->getName() == "aarch64.svcount") in EmitSVEPredicateCast()
9808 if (Pred->getType() == RTy) in EmitSVEPredicateCast()
9813 switch (VTy->getMinNumElements()) { in EmitSVEPredicateCast()
9825 IntrinsicTy = Pred->getType(); in EmitSVEPredicateCast()
9831 assert(C->getType() == RTy && "Unexpected return type!"); in EmitSVEPredicateCast()
9843 if (Ops[1]->getType()->isVectorTy()) in EmitSVEGatherLoad()
9845 // map this built-in to an LLVM IR intrinsic, we need both the return type in EmitSVEGatherLoad()
9847 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[1]->getType()}); in EmitSVEGatherLoad()
9851 // return type in order to uniquely map this built-in to an LLVM IR in EmitSVEGatherLoad()
9863 Ops[0], cast<llvm::ScalableVectorType>(F->getArg(0)->getType())); in EmitSVEGatherLoad()
9869 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset"); in EmitSVEGatherLoad()
9875 if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) { in EmitSVEGatherLoad()
9877 OverloadedTy->getElementType()->getScalarSizeInBits() / 8; in EmitSVEGatherLoad()
9901 if (Ops[2]->getType()->isVectorTy()) in EmitSVEScatterStore()
9903 // map this built-in to an LLVM IR intrinsic, we need both the return type in EmitSVEScatterStore()
9905 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[2]->getType()}); in EmitSVEScatterStore()
9909 // return type in order to uniquely map this built-in to an LLVM IR in EmitSVEScatterStore()
9917 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset"); in EmitSVEScatterStore()
9933 Ops[1], cast<llvm::ScalableVectorType>(F->getArg(1)->getType())); in EmitSVEScatterStore()
9937 if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) { in EmitSVEScatterStore()
9939 OverloadedTy->getElementType()->getScalarSizeInBits() / 8; in EmitSVEScatterStore()
9949 // The gather prefetches are overloaded on the vector input - this can either in EmitSVEGatherPrefetch()
9951 auto *OverloadedTy = dyn_cast<llvm::ScalableVectorType>(Ops[1]->getType()); in EmitSVEGatherPrefetch()
9953 OverloadedTy = cast<llvm::ScalableVectorType>(Ops[2]->getType()); in EmitSVEGatherPrefetch()
9959 if (Ops[1]->getType()->isVectorTy()) { in EmitSVEGatherPrefetch()
9969 unsigned BytesPerElt = MemEltTy->getPrimitiveSizeInBits() / 8; in EmitSVEGatherPrefetch()
10005 auto RetTy = llvm::VectorType::get(VTy->getElementType(), in EmitSVEStructLoad()
10006 VTy->getElementCount() * N); in EmitSVEStructLoad()
10017 unsigned MinElts = VTy->getMinNumElements(); in EmitSVEStructLoad()
10064 for (unsigned I = Ops.size() - N; I < Ops.size(); ++I) in EmitSVEStructStore()
10084 // The pair-wise function has a narrower overloaded type. in EmitSVEPMull()
10085 Function *F = CGM.getIntrinsic(BuiltinID, Ops[0]->getType()); in EmitSVEPMull()
10116 Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType()); in EmitSVEPrefetchLoad()
10125 QualType LangPTy = E->getArg(1)->getType(); in EmitSVEMaskedLoad()
10127 LangPTy->castAs<PointerType>()->getPointeeType()); in EmitSVEMaskedLoad()
10159 auto TBAAInfo = CGM.getTBAAAccessInfo(LangPTy->getPointeeType()); in EmitSVEMaskedLoad()
10172 QualType LangPTy = E->getArg(1)->getType(); in EmitSVEMaskedStore()
10174 LangPTy->castAs<PointerType>()->getPointeeType()); in EmitSVEMaskedStore()
10178 auto VectorTy = cast<llvm::ScalableVectorType>(Ops.back()->getType()); in EmitSVEMaskedStore()
10211 auto TBAAInfo = CGM.getTBAAAccessInfo(LangPTy->getPointeeType()); in EmitSVEMaskedStore()
10283 cast<llvm::VectorType>(Ty)->getElementCount(), Scalar); in EmitSVEDupX()
10287 return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType())); in EmitSVEDupX()
10292 // intrinsic that is code-generated as a no-op, because the LLVM bitcast in EmitSVEReinterpret()
10296 // LLVM CodeGen for a bitcast must add an explicit REV for big-endian. in EmitSVEReinterpret()
10322 return {DefaultType, Ops[1]->getType()}; in getSVEOverloadTypes()
10325 return {getSVEPredType(TypeFlags), Ops[0]->getType()}; in getSVEOverloadTypes()
10328 return {Ops[0]->getType(), Ops.back()->getType()}; in getSVEOverloadTypes()
10330 if (TypeFlags.isReductionQV() && !ResultType->isScalableTy() && in getSVEOverloadTypes()
10331 ResultType->isVectorTy()) in getSVEOverloadTypes()
10332 return {ResultType, Ops[1]->getType()}; in getSVEOverloadTypes()
10344 unsigned I = cast<ConstantInt>(Ops[1])->getSExtValue(); in EmitSVETupleSetOrGet()
10346 TypeFlags.isTupleSet() ? Ops[2]->getType() : Ty); in EmitSVETupleSetOrGet()
10352 I * SingleVecTy->getMinNumElements()); in EmitSVETupleSetOrGet()
10364 auto *SrcTy = dyn_cast<llvm::ScalableVectorType>(Ops[0]->getType()); in EmitSVETupleCreate()
10369 unsigned MinElts = SrcTy->getMinNumElements(); in EmitSVETupleCreate()
10380 // Multi-vector results should be broken up into a single (wide) result in FormSVEBuiltinResult()
10382 auto *StructTy = dyn_cast<StructType>(Call->getType()); in FormSVEBuiltinResult()
10386 auto *VTy = dyn_cast<ScalableVectorType>(StructTy->getTypeAtIndex(0U)); in FormSVEBuiltinResult()
10389 unsigned N = StructTy->getNumElements(); in FormSVEBuiltinResult()
10392 bool IsPredTy = VTy->getElementType()->isIntegerTy(1); in FormSVEBuiltinResult()
10393 unsigned MinElts = IsPredTy ? 16 : VTy->getMinNumElements(); in FormSVEBuiltinResult()
10396 ScalableVectorType::get(VTy->getElementType(), MinElts * N); in FormSVEBuiltinResult()
10400 assert(SRet->getType() == VTy && "Unexpected type for result value"); in FormSVEBuiltinResult()
10427 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { in GetAArch64SVEProcessedOperands()
10429 Value *Arg = EmitScalarExpr(E->getArg(i)); in GetAArch64SVEProcessedOperands()
10435 E->getArg(i)->getIntegerConstantExpr(getContext()); in GetAArch64SVEProcessedOperands()
10441 *Result = Result->extOrTrunc(32); in GetAArch64SVEProcessedOperands()
10446 if (IsTupleGetOrSet || !isa<ScalableVectorType>(Arg->getType())) { in GetAArch64SVEProcessedOperands()
10451 auto *VTy = cast<ScalableVectorType>(Arg->getType()); in GetAArch64SVEProcessedOperands()
10452 unsigned MinElts = VTy->getMinNumElements(); in GetAArch64SVEProcessedOperands()
10453 bool IsPred = VTy->getElementType()->isIntegerTy(1); in GetAArch64SVEProcessedOperands()
10454 unsigned N = (MinElts * VTy->getScalarSizeInBits()) / (IsPred ? 16 : 128); in GetAArch64SVEProcessedOperands()
10464 ScalableVectorType::get(VTy->getElementType(), MinElts / N); in GetAArch64SVEProcessedOperands()
10472 llvm::Type *Ty = ConvertType(E->getType()); in EmitAArch64SVEBuiltinExpr()
10475 Value *Val = EmitScalarExpr(E->getArg(0)); in EmitAArch64SVEBuiltinExpr()
10483 SVETypeFlags TypeFlags(Builtin->TypeModifier); in EmitAArch64SVEBuiltinExpr()
10487 return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic, in EmitAArch64SVEBuiltinExpr()
10490 return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic); in EmitAArch64SVEBuiltinExpr()
10492 return EmitSVEGatherLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic); in EmitAArch64SVEBuiltinExpr()
10494 return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic); in EmitAArch64SVEBuiltinExpr()
10496 return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic); in EmitAArch64SVEBuiltinExpr()
10498 return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic); in EmitAArch64SVEBuiltinExpr()
10500 return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic); in EmitAArch64SVEBuiltinExpr()
10502 return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic); in EmitAArch64SVEBuiltinExpr()
10509 else if (Builtin->LLVMIntrinsic != 0) { in EmitAArch64SVEBuiltinExpr()
10525 if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType())) in EmitAArch64SVEBuiltinExpr()
10526 if (PredTy->getElementType()->isIntegerTy(1)) in EmitAArch64SVEBuiltinExpr()
10548 llvm::Type *OpndTy = Ops[1]->getType(); in EmitAArch64SVEBuiltinExpr()
10553 Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic, in EmitAArch64SVEBuiltinExpr()
10558 if (auto PredTy = dyn_cast<llvm::VectorType>(Call->getType())) in EmitAArch64SVEBuiltinExpr()
10559 if (PredTy->getScalarType()->isIntegerTy(1)) in EmitAArch64SVEBuiltinExpr()
10592 bool IsSVCount = isa<TargetExtType>(Ops[0]->getType()); in EmitAArch64SVEBuiltinExpr()
10593 assert(((!IsSVCount || cast<TargetExtType>(Ops[0]->getType())->getName() == in EmitAArch64SVEBuiltinExpr()
10603 auto OverloadedTy = getSVEType(SVETypeFlags(Builtin->TypeModifier)); in EmitAArch64SVEBuiltinExpr()
10613 SVETypeFlags TypeFlags(Builtin->TypeModifier); in EmitAArch64SVEBuiltinExpr()
10621 SVETypeFlags TypeFlags(Builtin->TypeModifier); in EmitAArch64SVEBuiltinExpr()
10664 Builder.CreateICmpNE(Ops[0], Constant::getNullValue(Ops[0]->getType())); in EmitAArch64SVEBuiltinExpr()
10691 cast<llvm::VectorType>(Ty)->getElementType()->isIntegerTy(1); in EmitAArch64SVEBuiltinExpr()
10696 llvm::Type *EltTy = Ops[0]->getType(); in EmitAArch64SVEBuiltinExpr()
10717 SVETypeFlags TypeFlags(Builtin->TypeModifier); in EmitAArch64SVEBuiltinExpr()
10751 SVETypeFlags TF(Builtin->TypeModifier); in EmitAArch64SVEBuiltinExpr()
10754 llvm::ConstantInt::get(Ty, VTy->getElementCount().getKnownMinValue()); in EmitAArch64SVEBuiltinExpr()
10772 SVETypeFlags TF(Builtin->TypeModifier); in EmitAArch64SVEBuiltinExpr()
10861 SVETypeFlags TypeFlags(Builtin->TypeModifier); in EmitAArch64SMEBuiltinExpr()
10865 return EmitSMELd1St1(TypeFlags, Ops, Builtin->LLVMIntrinsic); in EmitAArch64SMEBuiltinExpr()
10867 return EmitSMEReadWrite(TypeFlags, Ops, Builtin->LLVMIntrinsic); in EmitAArch64SMEBuiltinExpr()
10870 return EmitSMEZero(TypeFlags, Ops, Builtin->LLVMIntrinsic); in EmitAArch64SMEBuiltinExpr()
10875 return EmitSMELdrStr(TypeFlags, Ops, Builtin->LLVMIntrinsic); in EmitAArch64SMEBuiltinExpr()
10877 // Handle builtins which require their multi-vector operands to be swapped in EmitAArch64SMEBuiltinExpr()
10881 if (Builtin->LLVMIntrinsic == 0) in EmitAArch64SMEBuiltinExpr()
10886 if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType())) in EmitAArch64SMEBuiltinExpr()
10887 if (PredTy->getElementType()->isIntegerTy(1)) in EmitAArch64SMEBuiltinExpr()
10892 ? CGM.getIntrinsic(Builtin->LLVMIntrinsic) in EmitAArch64SMEBuiltinExpr()
10893 : CGM.getIntrinsic(Builtin->LLVMIntrinsic, {getSVEType(TypeFlags)}); in EmitAArch64SMEBuiltinExpr()
10913 unsigned HintID = static_cast<unsigned>(-1); in EmitAArch64BuiltinExpr()
10941 if (HintID != static_cast<unsigned>(-1)) { in EmitAArch64BuiltinExpr()
10948 llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
10960 CI->setAttributes(Attrs); in EmitAArch64BuiltinExpr()
10961 CI->setCallingConv( in EmitAArch64BuiltinExpr()
10965 EmitPointerWithAlignment(E->getArg(0))); in EmitAArch64BuiltinExpr()
10967 EmitPointerWithAlignment(E->getArg(1))); in EmitAArch64BuiltinExpr()
10971 assert((getContext().getTypeSize(E->getType()) == 32) && in EmitAArch64BuiltinExpr()
10973 llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
10975 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit"); in EmitAArch64BuiltinExpr()
10978 assert((getContext().getTypeSize(E->getType()) == 64) && in EmitAArch64BuiltinExpr()
10980 llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
10982 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit"); in EmitAArch64BuiltinExpr()
10987 llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
10988 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Arg->getType()); in EmitAArch64BuiltinExpr()
10996 llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11001 llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11008 llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11009 llvm::Type *Ty = Arg->getType(); in EmitAArch64BuiltinExpr()
11016 llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11017 llvm::Type *Ty = Arg->getType(); in EmitAArch64BuiltinExpr()
11024 llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11025 llvm::Type *Ty = Arg->getType(); in EmitAArch64BuiltinExpr()
11032 llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11033 llvm::Type *Ty = Arg->getType(); in EmitAArch64BuiltinExpr()
11039 assert((getContext().getTypeSize(E->getType()) == 32) && in EmitAArch64BuiltinExpr()
11041 llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11050 llvm::Value *MemAddr = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11051 llvm::Value *ValPtr = EmitScalarExpr(E->getArg(1)); in EmitAArch64BuiltinExpr()
11101 Address MemAddress = EmitPointerWithAlignment(E->getArg(0)); in EmitAArch64BuiltinExpr()
11108 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments"); in EmitAArch64BuiltinExpr()
11109 const FunctionDecl *FD = E->getDirectCallee(); in EmitAArch64BuiltinExpr()
11112 Ops[i] = EmitScalarExpr(E->getArg(i)); in EmitAArch64BuiltinExpr()
11113 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); in EmitAArch64BuiltinExpr()
11115 StringRef Name = FD->getName(); in EmitAArch64BuiltinExpr()
11121 getContext().getTypeSize(E->getType()) == 128) { in EmitAArch64BuiltinExpr()
11127 Value *LdPtr = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11139 return Builder.CreateBitCast(Val, ConvertType(E->getType())); in EmitAArch64BuiltinExpr()
11142 Value *LoadAddr = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11144 QualType Ty = E->getType(); in EmitAArch64BuiltinExpr()
11155 Val->addParamAttr( in EmitAArch64BuiltinExpr()
11158 if (RealResTy->isPointerTy()) in EmitAArch64BuiltinExpr()
11169 getContext().getTypeSize(E->getArg(0)->getType()) == 128) { in EmitAArch64BuiltinExpr()
11176 Address Tmp = CreateMemTemp(E->getArg(0)->getType()); in EmitAArch64BuiltinExpr()
11177 EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true); in EmitAArch64BuiltinExpr()
11184 Value *StPtr = EmitScalarExpr(E->getArg(1)); in EmitAArch64BuiltinExpr()
11190 Value *StoreVal = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11191 Value *StoreAddr = EmitScalarExpr(E->getArg(1)); in EmitAArch64BuiltinExpr()
11193 QualType Ty = E->getArg(0)->getType(); in EmitAArch64BuiltinExpr()
11197 if (StoreVal->getType()->isPointerTy()) in EmitAArch64BuiltinExpr()
11202 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType())); in EmitAArch64BuiltinExpr()
11211 StoreAddr->getType()); in EmitAArch64BuiltinExpr()
11213 CI->addParamAttr( in EmitAArch64BuiltinExpr()
11220 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext())) in EmitAArch64BuiltinExpr()
11238 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext())) in EmitAArch64BuiltinExpr()
11242 return Builder.CreateCall(F, {EmitScalarExpr(E->getArg(0))}); in EmitAArch64BuiltinExpr()
11276 Value *Arg0 = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11277 Value *Arg1 = EmitScalarExpr(E->getArg(1)); in EmitAArch64BuiltinExpr()
11280 llvm::Type *DataTy = F->getFunctionType()->getParamType(1); in EmitAArch64BuiltinExpr()
11288 Value *Dst = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11289 Value *Val = EmitScalarExpr(E->getArg(1)); in EmitAArch64BuiltinExpr()
11290 Value *Size = EmitScalarExpr(E->getArg(2)); in EmitAArch64BuiltinExpr()
11316 llvm::Type *T = ConvertType(E->getType()); in EmitAArch64BuiltinExpr()
11319 Value *Pointer = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11320 Value *Mask = EmitScalarExpr(E->getArg(1)); in EmitAArch64BuiltinExpr()
11329 Value *Pointer = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11330 Value *TagOffset = EmitScalarExpr(E->getArg(1)); in EmitAArch64BuiltinExpr()
11339 Value *Pointer = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11340 Value *ExcludedMask = EmitScalarExpr(E->getArg(1)); in EmitAArch64BuiltinExpr()
11347 // Although it is possible to supply a different return in EmitAArch64BuiltinExpr()
11351 Value *TagAddress = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11357 // Although it is possible to supply a different tag (to set) in EmitAArch64BuiltinExpr()
11358 // to this intrinsic (as first arg), for now we supply in EmitAArch64BuiltinExpr()
11361 Value *TagAddress = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11367 Value *PointerA = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11368 Value *PointerB = EmitScalarExpr(E->getArg(1)); in EmitAArch64BuiltinExpr()
11425 E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue(); in EmitAArch64BuiltinExpr()
11449 llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1)); in EmitAArch64BuiltinExpr()
11467 llvm::Type *ResType = ConvertType(E->getType()); in EmitAArch64BuiltinExpr()
11472 Builder.CreateIntCast(EmitScalarExpr(E->getArg(0)), Int128Ty, IsSigned); in EmitAArch64BuiltinExpr()
11474 Builder.CreateIntCast(EmitScalarExpr(E->getArg(1)), Int128Ty, IsSigned); in EmitAArch64BuiltinExpr()
11504 Value *Offset = Builder.CreateZExt(EmitScalarExpr(E->getArg(0)), Int64Ty); in EmitAArch64BuiltinExpr()
11506 Value *Val = EmitScalarExpr(E->getArg(1)); in EmitAArch64BuiltinExpr()
11515 llvm::Type *IntTy = ConvertType(E->getType()); in EmitAArch64BuiltinExpr()
11528 Value *Offset = Builder.CreateZExt(EmitScalarExpr(E->getArg(0)), Int64Ty); in EmitAArch64BuiltinExpr()
11538 Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11539 llvm::Type *RetTy = ConvertType(E->getType()); in EmitAArch64BuiltinExpr()
11547 Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11548 llvm::Type *ArgType = Arg->getType(); in EmitAArch64BuiltinExpr()
11565 Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11579 Value *ArgValue = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11580 llvm::Type *ArgType = ArgValue->getType(); in EmitAArch64BuiltinExpr()
11590 Value *Address = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11594 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType()); in EmitAArch64BuiltinExpr()
11600 Builder.CreateCall(F, {EmitScalarExpr(E->getArg(0))}); in EmitAArch64BuiltinExpr()
11613 // Some intrinsics are equivalent - if they are use the base intrinsic ID. in EmitAArch64BuiltinExpr()
11618 BuiltinID = It->second; in EmitAArch64BuiltinExpr()
11629 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) { in EmitAArch64BuiltinExpr()
11648 PtrOp0 = EmitPointerWithAlignment(E->getArg(0)); in EmitAArch64BuiltinExpr()
11661 Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1))); in EmitAArch64BuiltinExpr()
11667 const Expr *Arg = E->getArg(E->getNumArgs()-1); in EmitAArch64BuiltinExpr()
11670 Arg->getIntegerConstantExpr(getContext())) in EmitAArch64BuiltinExpr()
11672 Type = NeonTypeFlags(Result->getZExtValue()); in EmitAArch64BuiltinExpr()
11677 // Handle non-overloaded intrinsics first. in EmitAArch64BuiltinExpr()
11681 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
11685 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
11694 Value *Ptr = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11700 return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr); in EmitAArch64BuiltinExpr()
11708 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
11709 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64; in EmitAArch64BuiltinExpr()
11725 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
11728 if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64) in EmitAArch64BuiltinExpr()
11730 else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32) in EmitAArch64BuiltinExpr()
11753 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
11788 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
11809 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
11826 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
11842 Value *Vec = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11854 Value *Vec = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11866 Value *Vec = EmitScalarExpr(E->getArg(0)); in EmitAArch64BuiltinExpr()
11880 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
11882 Ops[0], ConvertType(E->getCallReturnType(getContext())), in EmitAArch64BuiltinExpr()
11888 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
11890 Ops[0], ConvertType(E->getCallReturnType(getContext())), in EmitAArch64BuiltinExpr()
11896 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
11898 Ops[0], ConvertType(E->getCallReturnType(getContext())), in EmitAArch64BuiltinExpr()
11904 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
11906 Ops[0], ConvertType(E->getCallReturnType(getContext())), in EmitAArch64BuiltinExpr()
11912 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
11914 Ops[0], ConvertType(E->getCallReturnType(getContext())), in EmitAArch64BuiltinExpr()
11918 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
11938 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
11961 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
11984 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12017 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12025 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12045 Ops.push_back(EmitScalarExpr(E->getArg(2))); in EmitAArch64BuiltinExpr()
12051 Ops.push_back(EmitScalarExpr(E->getArg(2))); in EmitAArch64BuiltinExpr()
12057 Ops.push_back(EmitScalarExpr(E->getArg(2))); in EmitAArch64BuiltinExpr()
12064 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
12070 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
12076 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
12082 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
12088 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
12093 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
12099 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
12105 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
12110 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
12116 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
12121 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
12126 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
12132 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
12138 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
12141 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12144 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12147 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12150 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12156 {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]}); in EmitAArch64BuiltinExpr()
12158 Value* Neg = Builder.CreateFNeg(EmitScalarExpr(E->getArg(1)), "vsubh"); in EmitAArch64BuiltinExpr() local
12163 {Neg, EmitScalarExpr(E->getArg(2)), Ops[0]}); in EmitAArch64BuiltinExpr()
12167 return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd"); in EmitAArch64BuiltinExpr()
12170 return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd"); in EmitAArch64BuiltinExpr()
12175 ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2)))); in EmitAArch64BuiltinExpr()
12188 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12198 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12207 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12208 int SV = cast<ConstantInt>(Ops[1])->getSExtValue(); in EmitAArch64BuiltinExpr()
12209 Ops[1] = ConstantInt::get(Int64Ty, -SV); in EmitAArch64BuiltinExpr()
12218 Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2)))); in EmitAArch64BuiltinExpr()
12225 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12227 Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n"); in EmitAArch64BuiltinExpr()
12230 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12233 Amt->getZExtValue())), in EmitAArch64BuiltinExpr()
12237 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12238 uint64_t ShiftAmt = Amt->getZExtValue(); in EmitAArch64BuiltinExpr()
12239 // Right-shifting an unsigned value by its size yields 0. in EmitAArch64BuiltinExpr()
12246 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2))); in EmitAArch64BuiltinExpr()
12249 Amt->getZExtValue())), in EmitAArch64BuiltinExpr()
12254 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2))); in EmitAArch64BuiltinExpr()
12255 uint64_t ShiftAmt = Amt->getZExtValue(); in EmitAArch64BuiltinExpr()
12256 // Right-shifting an unsigned value by its size yields 0. in EmitAArch64BuiltinExpr()
12268 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)), in EmitAArch64BuiltinExpr()
12290 ProductOps.push_back(EmitScalarExpr(E->getArg(2))); in EmitAArch64BuiltinExpr()
12304 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)), in EmitAArch64BuiltinExpr()
12323 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
12329 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), in EmitAArch64BuiltinExpr()
12336 Value *Val = EmitScalarExpr(E->getArg(1)); in EmitAArch64BuiltinExpr()
12356 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, in EmitAArch64BuiltinExpr()
12357 Builtin->NameHint, Builtin->TypeModifier, E, Ops, in EmitAArch64BuiltinExpr()
12360 if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch)) in EmitAArch64BuiltinExpr() local
12361 return V; in EmitAArch64BuiltinExpr()
12391 ? llvm::FixedVectorType::get(VTy->getElementType(), in EmitAArch64BuiltinExpr()
12392 VTy->getNumElements() / 2) in EmitAArch64BuiltinExpr()
12395 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst); in EmitAArch64BuiltinExpr()
12407 if (VTy && VTy->getElementType() == DoubleTy) { in EmitAArch64BuiltinExpr()
12423 auto *STy = llvm::FixedVectorType::get(VTy->getElementType(), in EmitAArch64BuiltinExpr()
12424 VTy->getNumElements() * 2); in EmitAArch64BuiltinExpr()
12426 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), in EmitAArch64BuiltinExpr()
12450 Ops.push_back(EmitScalarExpr(E->getArg(3))); in EmitAArch64BuiltinExpr()
12451 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext())); in EmitAArch64BuiltinExpr()
12466 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax; in EmitAArch64BuiltinExpr()
12469 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12477 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin; in EmitAArch64BuiltinExpr()
12480 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12488 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd; in EmitAArch64BuiltinExpr()
12492 unsigned ArgElts = VTy->getNumElements(); in EmitAArch64BuiltinExpr()
12493 llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType()); in EmitAArch64BuiltinExpr()
12494 unsigned BitWidth = EltTy->getBitWidth(); in EmitAArch64BuiltinExpr()
12503 llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType()); in EmitAArch64BuiltinExpr()
12510 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp; in EmitAArch64BuiltinExpr()
12516 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp; in EmitAArch64BuiltinExpr()
12523 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12531 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12535 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12540 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12544 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitAArch64BuiltinExpr()
12563 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12577 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12584 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12598 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12612 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12619 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12633 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12647 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12657 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12665 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12673 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12681 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12804 Ops.push_back(EmitScalarExpr(E->getArg(2))); in EmitAArch64BuiltinExpr()
12825 return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd"); in EmitAArch64BuiltinExpr()
12827 return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh"); in EmitAArch64BuiltinExpr()
12839 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12867 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12879 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12891 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12903 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12912 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12921 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12930 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12939 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12948 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12957 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12966 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12975 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12984 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
12993 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13002 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13011 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13020 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13029 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13038 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13047 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13056 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13065 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13074 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13083 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13092 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13101 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13110 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13119 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13125 Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy); in EmitAArch64BuiltinExpr()
13133 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13142 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13150 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13159 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13167 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13176 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13184 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13193 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitAArch64BuiltinExpr()
13235 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], in EmitAArch64BuiltinExpr()
13243 VTy->getElementType(), Ops[0], PtrOp0.getAlignment()); in EmitAArch64BuiltinExpr()
13244 LI->setAtomic(llvm::AtomicOrdering::Acquire); in EmitAArch64BuiltinExpr()
13250 Value *V = PoisonValue::get(Ty); in EmitAArch64BuiltinExpr() local
13251 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], in EmitAArch64BuiltinExpr()
13254 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI); in EmitAArch64BuiltinExpr()
13268 SI->setAtomic(llvm::AtomicOrdering::Release); in EmitAArch64BuiltinExpr()
13315 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; in EmitAArch64BuiltinExpr()
13326 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; in EmitAArch64BuiltinExpr()
13338 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; in EmitAArch64BuiltinExpr()
13352 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() }; in EmitAArch64BuiltinExpr()
13360 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; in EmitAArch64BuiltinExpr()
13367 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; in EmitAArch64BuiltinExpr()
13375 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; in EmitAArch64BuiltinExpr()
13382 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; in EmitAArch64BuiltinExpr()
13390 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() }; in EmitAArch64BuiltinExpr()
13402 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { in EmitAArch64BuiltinExpr()
13420 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) in EmitAArch64BuiltinExpr()
13437 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { in EmitAArch64BuiltinExpr()
13509 const Expr *Arg = E->getArg(0); in EmitBPFBuiltinExpr()
13510 bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField; in EmitBPFBuiltinExpr()
13513 CGM.Error(E->getExprLoc(), in EmitBPFBuiltinExpr()
13514 "using __builtin_preserve_field_info() without -g"); in EmitBPFBuiltinExpr()
13526 ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); in EmitBPFBuiltinExpr()
13527 Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue()); in EmitBPFBuiltinExpr()
13532 {FieldAddr->getType()}); in EmitBPFBuiltinExpr()
13538 CGM.Error(E->getExprLoc(), "using builtin function without -g"); in EmitBPFBuiltinExpr()
13542 const Expr *Arg0 = E->getArg(0); in EmitBPFBuiltinExpr()
13543 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType( in EmitBPFBuiltinExpr()
13544 Arg0->getType(), Arg0->getExprLoc()); in EmitBPFBuiltinExpr()
13546 ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); in EmitBPFBuiltinExpr()
13547 Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue()); in EmitBPFBuiltinExpr()
13558 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); in EmitBPFBuiltinExpr()
13563 CGM.Error(E->getExprLoc(), "using builtin function without -g"); in EmitBPFBuiltinExpr()
13567 const Expr *Arg0 = E->getArg(0); in EmitBPFBuiltinExpr()
13568 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType( in EmitBPFBuiltinExpr()
13569 Arg0->getType(), Arg0->getExprLoc()); in EmitBPFBuiltinExpr()
13572 const auto *UO = cast<UnaryOperator>(Arg0->IgnoreParens()); in EmitBPFBuiltinExpr()
13573 const auto *CE = cast<CStyleCastExpr>(UO->getSubExpr()); in EmitBPFBuiltinExpr()
13574 const auto *DR = cast<DeclRefExpr>(CE->getSubExpr()); in EmitBPFBuiltinExpr()
13575 const auto *Enumerator = cast<EnumConstantDecl>(DR->getDecl()); in EmitBPFBuiltinExpr()
13577 auto InitVal = Enumerator->getInitVal(); in EmitBPFBuiltinExpr()
13583 std::string EnumStr = Enumerator->getNameAsString() + ":" + InitValStr; in EmitBPFBuiltinExpr()
13586 ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1))); in EmitBPFBuiltinExpr()
13587 Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue()); in EmitBPFBuiltinExpr()
13594 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo); in EmitBPFBuiltinExpr()
13602 assert((Ops.size() & (Ops.size() - 1)) == 0 && in BuildVector()
13603 "Not a power-of-two sized vector!"); in BuildVector()
13618 llvm::FixedVectorType::get(Ops[0]->getType(), Ops.size())); in BuildVector()
13632 cast<IntegerType>(Mask->getType())->getBitWidth()); in getMaskVecValue()
13653 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements()); in EmitX86MaskedStore()
13660 llvm::Type *Ty = Ops[1]->getType(); in EmitX86MaskedLoad()
13664 CGF, Ops[2], cast<llvm::FixedVectorType>(Ty)->getNumElements()); in EmitX86MaskedLoad()
13671 auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType()); in EmitX86ExpandLoad()
13675 CGF, Ops[2], cast<FixedVectorType>(ResultTy)->getNumElements()); in EmitX86ExpandLoad()
13685 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType()); in EmitX86CompressExpand()
13687 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements()); in EmitX86CompressExpand()
13697 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType()); in EmitX86CompressStore()
13700 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements()); in EmitX86CompressStore()
13710 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); in EmitX86MaskLogic()
13718 Ops[0]->getType()); in EmitX86MaskLogic()
13723 llvm::Type *Ty = Op0->getType(); in EmitX86FunnelShift()
13726 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so in EmitX86FunnelShift()
13728 if (Amt->getType() != Ty) { in EmitX86FunnelShift()
13729 unsigned NumElts = cast<llvm::FixedVectorType>(Ty)->getNumElements(); in EmitX86FunnelShift()
13730 Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false); in EmitX86FunnelShift()
13743 llvm::Type *Ty = Op0->getType(); in EmitX86vpcom()
13744 uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7; in EmitX86vpcom()
13784 if (C->isAllOnesValue()) in EmitX86Select()
13788 CGF, Mask, cast<llvm::FixedVectorType>(Op0->getType())->getNumElements()); in EmitX86Select()
13797 if (C->isAllOnesValue()) in EmitX86ScalarSelect()
13801 CGF.Builder.getInt1Ty(), Mask->getType()->getIntegerBitWidth()); in EmitX86ScalarSelect()
13811 if (!C || !C->isAllOnesValue()) in EmitX86MaskedCompareResult()
13822 Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices); in EmitX86MaskedCompareResult()
13835 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); in EmitX86MaskedCompare()
13866 Value *Zero = Constant::getNullValue(In->getType()); in EmitX86ConvertToMask()
13872 unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue(); in EmitX86ConvertIntToFp()
13873 llvm::Type *Ty = Ops[1]->getType(); in EmitX86ConvertIntToFp()
13879 Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() }); in EmitX86ConvertIntToFp()
13958 (cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4 || in EmitX86FMAExpr()
13963 llvm::Type *Ty = A->getType(); in EmitX86FMAExpr()
13992 MaskFalseVal = Constant::getNullValue(Ops[0]->getType()); in EmitX86FMAExpr()
14022 Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue(); in EmitScalarFMAExpr()
14034 switch (Ops[0]->getType()->getPrimitiveSizeInBits()) { in EmitScalarFMAExpr()
14052 Intrinsic::experimental_constrained_fma, Ops[0]->getType()); in EmitScalarFMAExpr()
14055 Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType()); in EmitScalarFMAExpr()
14060 Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType()) in EmitScalarFMAExpr()
14076 llvm::Type *Ty = Ops[0]->getType(); in EmitX86Muldq()
14079 Ty->getPrimitiveSizeInBits() / 64); in EmitX86Muldq()
14105 llvm::Type *Ty = Ops[0]->getType(); in EmitX86Ternlog()
14107 unsigned VecWidth = Ty->getPrimitiveSizeInBits(); in EmitX86Ternlog()
14108 unsigned EltWidth = Ty->getScalarSizeInBits(); in EmitX86Ternlog()
14134 cast<llvm::FixedVectorType>(DstTy)->getNumElements(); in EmitX86SExtMask()
14140 const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts(); in EmitX86CpuIs()
14141 StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString(); in EmitX86CpuIs()
14153 if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) { in EmitX86CvtF16ToFloatExpr()
14159 unsigned NumDstElts = cast<llvm::FixedVectorType>(DstTy)->getNumElements(); in EmitX86CvtF16ToFloatExpr()
14164 cast<llvm::FixedVectorType>(Src->getType())->getNumElements()) { in EmitX86CvtF16ToFloatExpr()
14174 // Perform the fp-extension. in EmitX86CvtF16ToFloatExpr()
14186 // Matching the struct layout from the compiler-rt/libgcc structure that is in EmitX86CpuIs()
14197 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true); in EmitX86CpuIs()
14231 const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts(); in EmitX86CpuSupports()
14232 StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString(); in EmitX86CpuSupports()
14246 // Matching the struct layout from the compiler-rt/libgcc structure that is in EmitX86CpuSupports()
14257 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true); in EmitX86CpuSupports()
14277 cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true); in EmitX86CpuSupports()
14282 Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(i - 1)}; in EmitX86CpuSupports()
14300 cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true); in EmitAArch64CpuInit()
14302 ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); in EmitAArch64CpuInit()
14311 cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true); in EmitX86CpuInit()
14313 ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); in EmitX86CpuInit()
14318 const Expr *ArgExpr = E->getArg(0)->IgnoreParenCasts(); in EmitAArch64CpuSupports()
14319 StringRef ArgStr = cast<StringLiteral>(ArgExpr)->getString(); in EmitAArch64CpuSupports()
14344 cast<llvm::GlobalValue>(AArch64CPUFeatures)->setDSOLocal(true); in EmitAArch64CpuSupports()
14382 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { in EmitX86BuiltinExpr()
14411 llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType()); in EmitX86BuiltinExpr()
14422 Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1); in EmitX86BuiltinExpr()
14423 Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3); in EmitX86BuiltinExpr()
14425 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType()); in EmitX86BuiltinExpr()
14456 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType()); in EmitX86BuiltinExpr()
14462 Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType()); in EmitX86BuiltinExpr()
14473 return llvm::Constant::getNullValue(ConvertType(E->getType())); in EmitX86BuiltinExpr()
14490 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); in EmitX86BuiltinExpr()
14491 uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue(); in EmitX86BuiltinExpr()
14492 Index &= NumElts - 1; in EmitX86BuiltinExpr()
14506 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); in EmitX86BuiltinExpr()
14507 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue(); in EmitX86BuiltinExpr()
14508 Index &= NumElts - 1; in EmitX86BuiltinExpr()
14515 RawAddress Tmp = CreateMemTemp(E->getArg(0)->getType()); in EmitX86BuiltinExpr()
14522 RawAddress Tmp = CreateMemTemp(E->getType()); in EmitX86BuiltinExpr()
14613 llvm::Type *ResultType = ConvertType(E->getType()); in EmitX86BuiltinExpr()
14629 return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType())); in EmitX86BuiltinExpr()
14669 Constant::getNullValue(Ops[0]->getType())); in EmitX86BuiltinExpr()
14730 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign()); in EmitX86BuiltinExpr()
14771 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign()); in EmitX86BuiltinExpr()
14955 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(), in EmitX86BuiltinExpr()
14956 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements()); in EmitX86BuiltinExpr()
15064 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements(), in EmitX86BuiltinExpr()
15065 cast<llvm::FixedVectorType>(Ops[3]->getType())->getNumElements()); in EmitX86BuiltinExpr()
15087 auto *DstTy = cast<llvm::FixedVectorType>(ConvertType(E->getType())); in EmitX86BuiltinExpr()
15088 unsigned NumElts = DstTy->getNumElements(); in EmitX86BuiltinExpr()
15090 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); in EmitX86BuiltinExpr()
15092 unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue(); in EmitX86BuiltinExpr()
15094 Index &= SubVectors - 1; // Remove any extra bits. in EmitX86BuiltinExpr()
15126 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); in EmitX86BuiltinExpr()
15128 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements(); in EmitX86BuiltinExpr()
15130 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue(); in EmitX86BuiltinExpr()
15132 Index &= SubVectors - 1; // Remove any extra bits. in EmitX86BuiltinExpr()
15144 Indices[i] = (i - Index) + DstNumElts; in EmitX86BuiltinExpr()
15154 Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType()); in EmitX86BuiltinExpr()
15161 if (C->isAllOnesValue()) in EmitX86BuiltinExpr()
15162 return Builder.CreateTrunc(Ops[0], Ops[1]->getType()); in EmitX86BuiltinExpr()
15190 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); in EmitX86BuiltinExpr()
15191 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); in EmitX86BuiltinExpr()
15205 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); in EmitX86BuiltinExpr()
15206 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); in EmitX86BuiltinExpr()
15207 unsigned NumElts = Ty->getNumElements(); in EmitX86BuiltinExpr()
15209 // Splat the 8-bits of immediate 4 times to help the loop wrap around. in EmitX86BuiltinExpr()
15228 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); in EmitX86BuiltinExpr()
15229 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); in EmitX86BuiltinExpr()
15230 unsigned NumElts = Ty->getNumElements(); in EmitX86BuiltinExpr()
15232 // Splat the 8-bits of immediate 4 times to help the loop wrap around. in EmitX86BuiltinExpr()
15257 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); in EmitX86BuiltinExpr()
15258 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); in EmitX86BuiltinExpr()
15259 unsigned NumElts = Ty->getNumElements(); in EmitX86BuiltinExpr()
15260 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; in EmitX86BuiltinExpr()
15263 // Splat the 8-bits of immediate 4 times to help the loop wrap around. in EmitX86BuiltinExpr()
15283 uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); in EmitX86BuiltinExpr()
15284 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); in EmitX86BuiltinExpr()
15285 unsigned NumElts = Ty->getNumElements(); in EmitX86BuiltinExpr()
15286 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; in EmitX86BuiltinExpr()
15289 // Splat the 8-bits of immediate 4 times to help the loop wrap around. in EmitX86BuiltinExpr()
15310 unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); in EmitX86BuiltinExpr()
15311 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); in EmitX86BuiltinExpr()
15312 unsigned NumElts = Ty->getNumElements(); in EmitX86BuiltinExpr()
15314 // These intrinsics operate on 256-bit lanes of four 64-bit elements. in EmitX86BuiltinExpr()
15326 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff; in EmitX86BuiltinExpr()
15329 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); in EmitX86BuiltinExpr()
15335 return llvm::Constant::getNullValue(ConvertType(E->getType())); in EmitX86BuiltinExpr()
15340 ShiftVal -= 16; in EmitX86BuiltinExpr()
15342 Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType()); in EmitX86BuiltinExpr()
15346 // 256-bit palignr operates on 128-bit lanes so we need to handle that in EmitX86BuiltinExpr()
15351 Idx += NumElts - 16; // End of lane, switch operand. in EmitX86BuiltinExpr()
15366 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); in EmitX86BuiltinExpr()
15367 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff; in EmitX86BuiltinExpr()
15370 ShiftVal &= NumElts - 1; in EmitX86BuiltinExpr()
15387 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); in EmitX86BuiltinExpr()
15388 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType()); in EmitX86BuiltinExpr()
15389 unsigned NumElts = Ty->getNumElements(); in EmitX86BuiltinExpr()
15390 unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2; in EmitX86BuiltinExpr()
15412 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); in EmitX86BuiltinExpr()
15414 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); in EmitX86BuiltinExpr()
15426 OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType()); in EmitX86BuiltinExpr()
15450 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; in EmitX86BuiltinExpr()
15451 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType()); in EmitX86BuiltinExpr()
15453 unsigned NumElts = ResultType->getNumElements() * 8; in EmitX86BuiltinExpr()
15460 // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that in EmitX86BuiltinExpr()
15463 unsigned Idx = NumElts + i - ShiftVal; in EmitX86BuiltinExpr()
15464 if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand. in EmitX86BuiltinExpr()
15474 return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast"); in EmitX86BuiltinExpr()
15479 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; in EmitX86BuiltinExpr()
15480 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType()); in EmitX86BuiltinExpr()
15482 unsigned NumElts = ResultType->getNumElements() * 8; in EmitX86BuiltinExpr()
15489 // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that in EmitX86BuiltinExpr()
15493 if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand. in EmitX86BuiltinExpr()
15509 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; in EmitX86BuiltinExpr()
15510 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); in EmitX86BuiltinExpr()
15513 return llvm::Constant::getNullValue(Ops[0]->getType()); in EmitX86BuiltinExpr()
15519 Indices[i] = NumElts + i - ShiftVal; in EmitX86BuiltinExpr()
15521 Value *Zero = llvm::Constant::getNullValue(In->getType()); in EmitX86BuiltinExpr()
15524 return Builder.CreateBitCast(SV, Ops[0]->getType()); in EmitX86BuiltinExpr()
15530 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff; in EmitX86BuiltinExpr()
15531 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); in EmitX86BuiltinExpr()
15534 return llvm::Constant::getNullValue(Ops[0]->getType()); in EmitX86BuiltinExpr()
15542 Value *Zero = llvm::Constant::getNullValue(In->getType()); in EmitX86BuiltinExpr()
15545 return Builder.CreateBitCast(SV, Ops[0]->getType()); in EmitX86BuiltinExpr()
15564 SI->setMetadata(llvm::LLVMContext::MD_nontemporal, Node); in EmitX86BuiltinExpr()
15565 SI->setAlignment(llvm::Align(1)); in EmitX86BuiltinExpr()
15568 // Rotate is a special case of funnel shift - 1st 2 args are the same. in EmitX86BuiltinExpr()
15649 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7; in EmitX86BuiltinExpr()
15664 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7; in EmitX86BuiltinExpr()
15683 Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType()); in EmitX86BuiltinExpr()
15685 return Builder.CreateZExt(Cmp, ConvertType(E->getType())); in EmitX86BuiltinExpr()
15692 Value *C = llvm::Constant::getNullValue(Ops[0]->getType()); in EmitX86BuiltinExpr()
15694 return Builder.CreateZExt(Cmp, ConvertType(E->getType())); in EmitX86BuiltinExpr()
15734 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); in EmitX86BuiltinExpr()
15762 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); in EmitX86BuiltinExpr()
15767 return Builder.CreateBitCast(Res, Ops[0]->getType()); in EmitX86BuiltinExpr()
15798 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); in EmitX86BuiltinExpr()
15801 Ops[0]->getType()); in EmitX86BuiltinExpr()
15810 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); in EmitX86BuiltinExpr()
15812 return Builder.CreateBitCast(Res, Ops[0]->getType()); in EmitX86BuiltinExpr()
15818 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); in EmitX86BuiltinExpr()
15833 return Builder.CreateBitCast(Res, Ops[0]->getType()); in EmitX86BuiltinExpr()
15842 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType()); in EmitX86BuiltinExpr()
15852 A->getType()); in EmitX86BuiltinExpr()
15855 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType()); in EmitX86BuiltinExpr()
15863 unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue(); in EmitX86BuiltinExpr()
15889 A->getType()); in EmitX86BuiltinExpr()
15892 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType()); in EmitX86BuiltinExpr()
15909 unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue(); in EmitX86BuiltinExpr()
15934 Ops[0]->getType()); in EmitX86BuiltinExpr()
15937 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType()); in EmitX86BuiltinExpr()
16021 CGM.getIntrinsic(Intrinsic::vector_reduce_fadd, Ops[1]->getType()); in EmitX86BuiltinExpr()
16032 CGM.getIntrinsic(Intrinsic::vector_reduce_fmul, Ops[1]->getType()); in EmitX86BuiltinExpr()
16043 CGM.getIntrinsic(Intrinsic::vector_reduce_fmax, Ops[0]->getType()); in EmitX86BuiltinExpr()
16054 CGM.getIntrinsic(Intrinsic::vector_reduce_fmin, Ops[0]->getType()); in EmitX86BuiltinExpr()
16132 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); in EmitX86BuiltinExpr()
16179 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); in EmitX86BuiltinExpr()
16238 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); in EmitX86BuiltinExpr()
16303 // This is only possible if fp-model is not strict and FENV_ACCESS is off. in EmitX86BuiltinExpr()
16307 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f; in EmitX86BuiltinExpr()
16314 // Predicates for 16-31 repeat the 0-15 predicates. Only the signalling in EmitX86BuiltinExpr()
16336 // Invert the signalling behavior for 16-31. in EmitX86BuiltinExpr()
16396 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); in EmitX86BuiltinExpr()
16413 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(); in EmitX86BuiltinExpr()
16466 return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType())); in EmitX86BuiltinExpr()
16473 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements()); in EmitX86BuiltinExpr()
16498 Value *FuncId = EmitScalarExpr(E->getArg(1)); in EmitX86BuiltinExpr()
16500 ? EmitScalarExpr(E->getArg(2)) in EmitX86BuiltinExpr()
16513 // x86-64 uses %rbx as the base register, so preserve it. in EmitX86BuiltinExpr()
16523 Value *BasePtr = EmitScalarExpr(E->getArg(0)); in EmitX86BuiltinExpr()
16548 llvm::Type *ResType = ConvertType(E->getType()); in EmitX86BuiltinExpr()
16568 Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2)); in EmitX86BuiltinExpr()
16582 // Flip low/high ops and zero-extend amount to matching type. in EmitX86BuiltinExpr()
16583 // shiftleft128(Low, High, Amt) -> fshl(High, Low, Amt) in EmitX86BuiltinExpr()
16584 // shiftright128(Low, High, Amt) -> fshr(High, Low, Amt) in EmitX86BuiltinExpr()
16602 // We treat __stosb as a volatile memset - it may not generate "rep stosb" in EmitX86BuiltinExpr()
16618 CI->setAttributes(NoReturnAttr); in EmitX86BuiltinExpr()
16625 llvm::Type *IntTy = ConvertType(E->getType()); in EmitX86BuiltinExpr()
16629 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType())); in EmitX86BuiltinExpr()
16630 Load->setVolatile(true); in EmitX86BuiltinExpr()
16637 llvm::Type *IntTy = ConvertType(E->getType()); in EmitX86BuiltinExpr()
16641 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType())); in EmitX86BuiltinExpr()
16642 Load->setVolatile(true); in EmitX86BuiltinExpr()
16702 createBasicBlock(BlockName + "_no_error", this->CurFn); in EmitX86BuiltinExpr()
16703 BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn); in EmitX86BuiltinExpr()
16704 BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn); in EmitX86BuiltinExpr()
16716 Constant *Zero = llvm::Constant::getNullValue(Out->getType()); in EmitX86BuiltinExpr()
16759 createBasicBlock(BlockName + "_no_error", this->CurFn); in EmitX86BuiltinExpr()
16760 BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn); in EmitX86BuiltinExpr()
16761 BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn); in EmitX86BuiltinExpr()
16770 Value *Ptr = Builder.CreateConstGEP1_32(Extract->getType(), Ops[0], i); in EmitX86BuiltinExpr()
16778 Constant *Zero = llvm::Constant::getNullValue(Out->getType()); in EmitX86BuiltinExpr()
16779 Value *Ptr = Builder.CreateConstGEP1_32(Out->getType(), Ops[0], i); in EmitX86BuiltinExpr()
16819 CGM.getIntrinsic(Intrinsic::prefetch, Ops[0]->getType()), in EmitX86BuiltinExpr()
16832 // return Builder.CreateFDiv(EmitScalarExpr(E->getArg(0)), in EmitPPCBuiltinExpr()
16833 // EmitScalarExpr(E->getArg(1)), "swdiv"); in EmitPPCBuiltinExpr()
16835 // Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
16836 // Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
16844 unsigned OpValue) -> Value * { in EmitPPCBuiltinExpr()
16846 return llvm::ConstantInt::getFalse(ConvertType(E->getType())); in EmitPPCBuiltinExpr()
16849 return llvm::ConstantInt::getTrue(ConvertType(E->getType())); in EmitPPCBuiltinExpr()
16881 llvm::Type *ValueType = FieldValue->getType(); in EmitPPCBuiltinExpr()
16882 bool IsValueType64Bit = ValueType->isIntegerTy(64); in EmitPPCBuiltinExpr()
16884 (IsValueType64Bit || ValueType->isIntegerTy(32)) && in EmitPPCBuiltinExpr()
16885 "Only 32/64-bit integers are supported in GenAIXPPCBuiltinCpuExpr()."); in EmitPPCBuiltinExpr()
16896 const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts(); in EmitPPCBuiltinExpr()
16897 StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString(); in EmitPPCBuiltinExpr()
16926 return llvm::ConstantInt::getFalse(ConvertType(E->getType())); in EmitPPCBuiltinExpr()
16936 const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts(); in EmitPPCBuiltinExpr()
16937 StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString(); in EmitPPCBuiltinExpr()
16979 // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we in EmitPPCBuiltinExpr()
17000 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitPPCBuiltinExpr()
17001 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitPPCBuiltinExpr()
17068 Ops.push_back(EmitScalarExpr(E->getArg(0))); in EmitPPCBuiltinExpr()
17069 Ops.push_back(EmitScalarExpr(E->getArg(1))); in EmitPPCBuiltinExpr()
17070 Ops.push_back(EmitScalarExpr(E->getArg(2))); in EmitPPCBuiltinExpr()
17120 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17121 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17122 int64_t NumBytes = cast<ConstantInt>(Op1)->getZExtValue(); in EmitPPCBuiltinExpr()
17123 llvm::Type *ResTy = ConvertType(E->getType()); in EmitPPCBuiltinExpr()
17136 RevMask.push_back(15 - Idx); in EmitPPCBuiltinExpr()
17145 Int8Ty, Op0, ConstantInt::get(Op1->getType(), NumBytes - 1)); in EmitPPCBuiltinExpr()
17153 Constant *Zero = llvm::Constant::getNullValue(IsLE ? ResTy : AllElts->getType()); in EmitPPCBuiltinExpr()
17158 int Val = (NumBytes - Idx - 1 >= 0) ? (NumBytes - Idx - 1) in EmitPPCBuiltinExpr()
17159 : 16 - (NumBytes - Idx); in EmitPPCBuiltinExpr()
17173 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17174 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17175 Value *Op2 = EmitScalarExpr(E->getArg(2)); in EmitPPCBuiltinExpr()
17176 int64_t NumBytes = cast<ConstantInt>(Op1)->getZExtValue(); in EmitPPCBuiltinExpr()
17186 RevMask.push_back(15 - Idx); in EmitPPCBuiltinExpr()
17190 StVec, Address(Op0, Op2->getType(), CharUnits::fromQuantity(1))); in EmitPPCBuiltinExpr()
17232 Result = StoreSubVec(8, NumBytes - 8, IsLE ? 0 : 1); in EmitPPCBuiltinExpr()
17233 RemainingBytes -= 8; in EmitPPCBuiltinExpr()
17237 Result = StoreSubVec(4, NumBytes - Stored - 4, in EmitPPCBuiltinExpr()
17238 IsLE ? (Stored >> 2) : 3 - (Stored >> 2)); in EmitPPCBuiltinExpr()
17239 RemainingBytes -= 4; in EmitPPCBuiltinExpr()
17243 Result = StoreSubVec(2, NumBytes - Stored - 2, in EmitPPCBuiltinExpr()
17244 IsLE ? (Stored >> 1) : 7 - (Stored >> 1)); in EmitPPCBuiltinExpr()
17245 RemainingBytes -= 2; in EmitPPCBuiltinExpr()
17250 StoreSubVec(1, NumBytes - Stored - 1, IsLE ? Stored : 15 - Stored); in EmitPPCBuiltinExpr()
17256 llvm::Type *ResultType = ConvertType(E->getType()); in EmitPPCBuiltinExpr()
17257 Value *X = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17272 llvm::Type *ResultType = ConvertType(E->getType()); in EmitPPCBuiltinExpr()
17273 Value *X = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17282 llvm::Type *ResultType = ConvertType(E->getType()); in EmitPPCBuiltinExpr()
17283 Value *X = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17292 llvm::Type *ResultType = ConvertType(E->getType()); in EmitPPCBuiltinExpr()
17293 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17294 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17295 Value *Op2 = EmitScalarExpr(E->getArg(2)); in EmitPPCBuiltinExpr()
17317 int64_t ConstArg = ArgCI->getSExtValue(); in EmitPPCBuiltinExpr()
17327 CGM.Error(E->getExprLoc(), RangeErrMsg); in EmitPPCBuiltinExpr()
17334 ConstArg = (Is32bit ? 12 : 8) - ConstArg; in EmitPPCBuiltinExpr()
17352 llvm::Type *ResultType = ConvertType(E->getType()); in EmitPPCBuiltinExpr()
17353 Value *X = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17359 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17360 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17372 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17373 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17388 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17389 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17390 Value *Op2 = EmitScalarExpr(E->getArg(2)); in EmitPPCBuiltinExpr()
17416 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17417 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17418 Value *Op2 = EmitScalarExpr(E->getArg(2)); in EmitPPCBuiltinExpr()
17419 Value *Op3 = EmitScalarExpr(E->getArg(3)); in EmitPPCBuiltinExpr()
17420 // rldimi is 64-bit instruction, expand the intrinsic before isel to in EmitPPCBuiltinExpr()
17424 Function *F = CGM.getIntrinsic(Intrinsic::fshl, Op0->getType()); in EmitPPCBuiltinExpr()
17437 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17438 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17439 Value *Op2 = EmitScalarExpr(E->getArg(2)); in EmitPPCBuiltinExpr()
17445 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17446 llvm::Type *ArgType = Op0->getType(); in EmitPPCBuiltinExpr()
17450 llvm::Type *ResultType = ConvertType(E->getType()); in EmitPPCBuiltinExpr()
17452 if (Result->getType() != ResultType) in EmitPPCBuiltinExpr()
17458 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17459 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17498 llvm::Type *ResultType = ConvertType(E->getType()); in EmitPPCBuiltinExpr()
17499 Value *X = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17500 Value *Y = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17516 llvm::Type *ResultType = ConvertType(E->getType()); in EmitPPCBuiltinExpr()
17517 Value *X = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17551 llvm::Type *ResultType = ConvertType(E->getType()); in EmitPPCBuiltinExpr()
17552 Value *X = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17564 llvm::Type *ResultType = ConvertType(E->getType()); in EmitPPCBuiltinExpr()
17565 Value *X = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17569 Value *Y = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17581 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17582 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17584 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment)) in EmitPPCBuiltinExpr()
17585 AlignmentCI = ConstantInt::get(AlignmentCI->getIntegerType(), in EmitPPCBuiltinExpr()
17588 emitAlignmentAssumption(Op1, E->getArg(1), in EmitPPCBuiltinExpr()
17594 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17595 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17596 Value *Op2 = EmitScalarExpr(E->getArg(2)); in EmitPPCBuiltinExpr()
17597 llvm::Type *Ty = Op0->getType(); in EmitPPCBuiltinExpr()
17605 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17620 llvm::Type *ResultType = ConvertType(E->getType()); in EmitPPCBuiltinExpr()
17621 Value *X = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17622 Value *Y = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17623 Value *Z = EmitScalarExpr(E->getArg(2)); in EmitPPCBuiltinExpr()
17640 Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg"); in EmitPPCBuiltinExpr()
17642 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg"); in EmitPPCBuiltinExpr()
17647 F, {X, Y, Builder.CreateFNeg(Z, "neg")}); in EmitPPCBuiltinExpr()
17649 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}); in EmitPPCBuiltinExpr()
17657 F, {X, Y, Builder.CreateFNeg(Z, "neg")}), in EmitPPCBuiltinExpr()
17658 "neg"); in EmitPPCBuiltinExpr()
17664 return nullptr; // Suppress no-return warning in EmitPPCBuiltinExpr()
17668 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17669 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17670 Value *Op2 = EmitScalarExpr(E->getArg(2)); in EmitPPCBuiltinExpr()
17679 int64_t Index = std::clamp(ArgCI->getSExtValue(), (int64_t)0, MaxIndex); in EmitPPCBuiltinExpr()
17698 Index = MaxIndex - Index; in EmitPPCBuiltinExpr()
17708 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17709 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17721 int64_t Index = std::clamp(ArgCI->getSExtValue(), (int64_t)0, MaxIndex); in EmitPPCBuiltinExpr()
17725 Index = MaxIndex - Index; in EmitPPCBuiltinExpr()
17741 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17742 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17743 Value *Op2 = EmitScalarExpr(E->getArg(2)); in EmitPPCBuiltinExpr()
17747 unsigned Index = ArgCI->getZExtValue(); in EmitPPCBuiltinExpr()
17759 QualType BIRetType = E->getType(); in EmitPPCBuiltinExpr()
17765 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17766 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17767 Value *Op2 = EmitScalarExpr(E->getArg(2)); in EmitPPCBuiltinExpr()
17770 unsigned Index = ArgCI->getZExtValue() & 0x3; in EmitPPCBuiltinExpr()
17780 // Little endian element N comes from element 8+N-Index of the in EmitPPCBuiltinExpr()
17783 ElemIdx0 = (8 - Index) % 8; in EmitPPCBuiltinExpr()
17784 ElemIdx1 = (9 - Index) % 8; in EmitPPCBuiltinExpr()
17785 ElemIdx2 = (10 - Index) % 8; in EmitPPCBuiltinExpr()
17786 ElemIdx3 = (11 - Index) % 8; in EmitPPCBuiltinExpr()
17797 QualType BIRetType = E->getType(); in EmitPPCBuiltinExpr()
17803 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17804 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17807 llvm::PoisonValue::get(llvm::FixedVectorType::get(Op0->getType(), 2)); in EmitPPCBuiltinExpr()
17812 return Builder.CreateBitCast(Res, ConvertType(E->getType())); in EmitPPCBuiltinExpr()
17816 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17817 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17820 Op0, llvm::FixedVectorType::get(ConvertType(E->getType()), 2)); in EmitPPCBuiltinExpr()
17824 ConstantInt::get(Index->getIntegerType(), 1 - Index->getZExtValue()); in EmitPPCBuiltinExpr()
17831 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17832 Value *Op1 = Builder.CreateSExt(EmitScalarExpr(E->getArg(1)), Int32Ty); in EmitPPCBuiltinExpr()
17847 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) in EmitPPCBuiltinExpr()
17848 if (E->getArg(i)->getType()->isArrayType()) in EmitPPCBuiltinExpr()
17850 EmitArrayToPointerDecay(E->getArg(i)).emitRawPointer(*this)); in EmitPPCBuiltinExpr()
17852 Ops.push_back(EmitScalarExpr(E->getArg(i))); in EmitPPCBuiltinExpr()
17867 Address Addr = EmitPointerWithAlignment(E->getArg(1)); in EmitPPCBuiltinExpr()
17916 Address Addr = EmitPointerWithAlignment(E->getArg(0)); in EmitPPCBuiltinExpr()
17929 Address Addr = EmitPointerWithAlignment(E->getArg(0)); in EmitPPCBuiltinExpr()
17930 Address OldValAddr = EmitPointerWithAlignment(E->getArg(1)); in EmitPPCBuiltinExpr()
17932 QualType AtomicTy = E->getArg(0)->getType()->getPointeeType(); in EmitPPCBuiltinExpr()
17934 Value *Op2 = EmitScalarExpr(E->getArg(2)); in EmitPPCBuiltinExpr()
17936 LV, RValue::get(OldVal), RValue::get(Op2), E->getExprLoc(), in EmitPPCBuiltinExpr()
17939 // https://www.ibm.com/docs/en/xl-c-and-cpp-aix/16.1?topic=functions-compare-swap-compare-swaplp in EmitPPCBuiltinExpr()
17976 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17984 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17985 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
17993 Value *ArgValue = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
17994 llvm::Type *ArgType = ArgValue->getType(); in EmitPPCBuiltinExpr()
18001 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
18002 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
18010 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
18011 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
18054 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
18055 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
18057 CGM.getIntrinsic(Intrinsic::ppc_test_data_class, Op0->getType()), in EmitPPCBuiltinExpr()
18061 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
18062 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
18063 Value *Op2 = EmitScalarExpr(E->getArg(2)); in EmitPPCBuiltinExpr()
18064 Value *Op3 = EmitScalarExpr(E->getArg(3)); in EmitPPCBuiltinExpr()
18069 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
18070 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
18071 Value *Op2 = EmitScalarExpr(E->getArg(2)); in EmitPPCBuiltinExpr()
18072 Value *Op3 = EmitScalarExpr(E->getArg(3)); in EmitPPCBuiltinExpr()
18077 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
18078 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
18079 Value *Op2 = EmitScalarExpr(E->getArg(2)); in EmitPPCBuiltinExpr()
18080 Value *Op3 = EmitScalarExpr(E->getArg(3)); in EmitPPCBuiltinExpr()
18085 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
18086 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
18087 Value *Op2 = EmitScalarExpr(E->getArg(2)); in EmitPPCBuiltinExpr()
18088 Value *Op3 = EmitScalarExpr(E->getArg(3)); in EmitPPCBuiltinExpr()
18093 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
18094 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
18095 Value *Op2 = EmitScalarExpr(E->getArg(2)); in EmitPPCBuiltinExpr()
18096 Value *Op3 = EmitScalarExpr(E->getArg(3)); in EmitPPCBuiltinExpr()
18101 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
18102 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
18103 Value *Op2 = EmitScalarExpr(E->getArg(2)); in EmitPPCBuiltinExpr()
18104 Value *Op3 = EmitScalarExpr(E->getArg(3)); in EmitPPCBuiltinExpr()
18110 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitPPCBuiltinExpr()
18111 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitPPCBuiltinExpr()
18116 {EmitScalarExpr(E->getArg(0))}); in EmitPPCBuiltinExpr()
18129 Call->addRetAttr( in EmitAMDGPUDispatchPtr()
18130 Attribute::getWithDereferenceableBytes(Call->getContext(), 64)); in EmitAMDGPUDispatchPtr()
18131 Call->addRetAttr(Attribute::getWithAlignment(Call->getContext(), Align(4))); in EmitAMDGPUDispatchPtr()
18134 QualType BuiltinRetType = E->getType(); in EmitAMDGPUDispatchPtr()
18136 if (RetTy == Call->getType()) in EmitAMDGPUDispatchPtr()
18144 Call->addRetAttr( in EmitAMDGPUImplicitArgPtr()
18145 Attribute::getWithDereferenceableBytes(Call->getContext(), 256)); in EmitAMDGPUImplicitArgPtr()
18146 Call->addRetAttr(Attribute::getWithAlignment(Call->getContext(), Align(8))); in EmitAMDGPUImplicitArgPtr()
18156 /// compiling device libraries in an ABI-agnostic way.
18214 LD->setMetadata(llvm::LLVMContext::MD_range, RNode); in EmitAMDGPUWorkGroupSize()
18215 LD->setMetadata(llvm::LLVMContext::MD_noundef, in EmitAMDGPUWorkGroupSize()
18217 LD->setMetadata(llvm::LLVMContext::MD_invariant_load, in EmitAMDGPUWorkGroupSize()
18231 LD->setMetadata(llvm::LLVMContext::MD_invariant_load, in EmitAMDGPUGridSize()
18239 // \p Order takes a C++11 comptabile memory-ordering specifier and converts
18246 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); in ProcessOrderScopeAMDGCN()
18277 int scope = cast<llvm::ConstantInt>(Scope)->getZExtValue(); in ProcessOrderScopeAMDGCN()
18305 Arg = EmitScalarExpr(E->getArg(Idx)); in EmitScalarOrConstFoldImmArg()
18310 E->getArg(Idx)->getIntegerConstantExpr(getContext()); in EmitScalarOrConstFoldImmArg()
18318 if (QT->hasFloatingRepresentation()) { in getDotProductIntrinsic()
18328 if (QT->hasSignedIntegerRepresentation()) in getDotProductIntrinsic()
18331 assert(QT->hasUnsignedIntegerRepresentation()); in getDotProductIntrinsic()
18342 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitHLSLBuiltinExpr()
18349 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitHLSLBuiltinExpr()
18356 Value *OpX = EmitScalarExpr(E->getArg(0)); in EmitHLSLBuiltinExpr()
18357 Value *OpMin = EmitScalarExpr(E->getArg(1)); in EmitHLSLBuiltinExpr()
18358 Value *OpMax = EmitScalarExpr(E->getArg(2)); in EmitHLSLBuiltinExpr()
18360 QualType Ty = E->getArg(0)->getType(); in EmitHLSLBuiltinExpr()
18362 if (auto *VecTy = Ty->getAs<VectorType>()) in EmitHLSLBuiltinExpr()
18363 Ty = VecTy->getElementType(); in EmitHLSLBuiltinExpr()
18364 IsUnsigned = Ty->isUnsignedIntegerType(); in EmitHLSLBuiltinExpr()
18366 /*ReturnType=*/OpX->getType(), in EmitHLSLBuiltinExpr()
18371 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitHLSLBuiltinExpr()
18372 Value *Op1 = EmitScalarExpr(E->getArg(1)); in EmitHLSLBuiltinExpr()
18373 llvm::Type *T0 = Op0->getType(); in EmitHLSLBuiltinExpr()
18374 llvm::Type *T1 = Op1->getType(); in EmitHLSLBuiltinExpr()
18375 if (!T0->isVectorTy() && !T1->isVectorTy()) { in EmitHLSLBuiltinExpr()
18376 if (T0->isFloatingPointTy()) in EmitHLSLBuiltinExpr()
18379 if (T0->isIntegerTy()) in EmitHLSLBuiltinExpr()
18387 assert(T0->isVectorTy() && T1->isVectorTy() && in EmitHLSLBuiltinExpr()
18391 assert(T0->getScalarType() == T1->getScalarType() && in EmitHLSLBuiltinExpr()
18394 auto *VecTy0 = E->getArg(0)->getType()->getAs<VectorType>(); in EmitHLSLBuiltinExpr()
18396 E->getArg(1)->getType()->getAs<VectorType>(); in EmitHLSLBuiltinExpr()
18398 assert(VecTy0->getNumElements() == VecTy1->getNumElements() && in EmitHLSLBuiltinExpr()
18402 /*ReturnType=*/T0->getScalarType(), in EmitHLSLBuiltinExpr()
18403 getDotProductIntrinsic(E->getArg(0)->getType(), in EmitHLSLBuiltinExpr()
18404 VecTy0->getNumElements()), in EmitHLSLBuiltinExpr()
18408 Value *X = EmitScalarExpr(E->getArg(0)); in EmitHLSLBuiltinExpr()
18409 Value *Y = EmitScalarExpr(E->getArg(1)); in EmitHLSLBuiltinExpr()
18410 Value *S = EmitScalarExpr(E->getArg(2)); in EmitHLSLBuiltinExpr()
18411 if (!E->getArg(0)->getType()->hasFloatingRepresentation()) in EmitHLSLBuiltinExpr()
18414 /*ReturnType=*/X->getType(), CGM.getHLSLRuntime().getLerpIntrinsic(), in EmitHLSLBuiltinExpr()
18418 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitHLSLBuiltinExpr()
18419 if (!E->getArg(0)->getType()->hasFloatingRepresentation()) in EmitHLSLBuiltinExpr()
18422 /*ReturnType=*/Op0->getType(), Intrinsic::dx_frac, in EmitHLSLBuiltinExpr()
18426 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitHLSLBuiltinExpr()
18427 llvm::Type *Xty = Op0->getType(); in EmitHLSLBuiltinExpr()
18428 llvm::Type *retType = llvm::Type::getInt1Ty(this->getLLVMContext()); in EmitHLSLBuiltinExpr()
18429 if (Xty->isVectorTy()) { in EmitHLSLBuiltinExpr()
18430 auto *XVecTy = E->getArg(0)->getType()->getAs<VectorType>(); in EmitHLSLBuiltinExpr()
18432 retType, ElementCount::getFixed(XVecTy->getNumElements())); in EmitHLSLBuiltinExpr()
18434 if (!E->getArg(0)->getType()->hasFloatingRepresentation()) in EmitHLSLBuiltinExpr()
18440 Value *M = EmitScalarExpr(E->getArg(0)); in EmitHLSLBuiltinExpr()
18441 Value *A = EmitScalarExpr(E->getArg(1)); in EmitHLSLBuiltinExpr()
18442 Value *B = EmitScalarExpr(E->getArg(2)); in EmitHLSLBuiltinExpr()
18443 if (E->getArg(0)->getType()->hasFloatingRepresentation()) in EmitHLSLBuiltinExpr()
18445 /*ReturnType*/ M->getType(), Intrinsic::fmuladd, in EmitHLSLBuiltinExpr()
18448 if (E->getArg(0)->getType()->hasSignedIntegerRepresentation()) { in EmitHLSLBuiltinExpr()
18451 /*ReturnType*/ M->getType(), Intrinsic::dx_imad, in EmitHLSLBuiltinExpr()
18457 assert(E->getArg(0)->getType()->hasUnsignedIntegerRepresentation()); in EmitHLSLBuiltinExpr()
18460 /*ReturnType=*/M->getType(), Intrinsic::dx_umad, in EmitHLSLBuiltinExpr()
18467 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitHLSLBuiltinExpr()
18468 if (!E->getArg(0)->getType()->hasFloatingRepresentation()) in EmitHLSLBuiltinExpr()
18470 llvm::Type *Ty = Op0->getType(); in EmitHLSLBuiltinExpr()
18471 llvm::Type *EltTy = Ty->getScalarType(); in EmitHLSLBuiltinExpr()
18472 Constant *One = Ty->isVectorTy() in EmitHLSLBuiltinExpr()
18475 cast<FixedVectorType>(Ty)->getNumElements()), in EmitHLSLBuiltinExpr()
18481 Value *Op0 = EmitScalarExpr(E->getArg(0)); in EmitHLSLBuiltinExpr()
18482 if (!E->getArg(0)->getType()->hasFloatingRepresentation()) in EmitHLSLBuiltinExpr()
18485 /*ReturnType=*/Op0->getType(), CGM.getHLSLRuntime().getRsqrtIntrinsic(), in EmitHLSLBuiltinExpr()
18499 constexpr const char *Tag = "amdgpu-as"; in AddAMDGPUFenceAddressSpaceMMRA()
18501 LLVMContext &Ctx = Inst->getContext(); in AddAMDGPUFenceAddressSpaceMMRA()
18503 for (unsigned K = 2; K < E->getNumArgs(); ++K) { in AddAMDGPUFenceAddressSpaceMMRA()
18504 llvm::Value *V = EmitScalarExpr(E->getArg(K)); in AddAMDGPUFenceAddressSpaceMMRA() local
18506 if (llvm::getConstantStringInfo(V, AS)) { in AddAMDGPUFenceAddressSpaceMMRA()
18511 CGM.Error(E->getExprLoc(), in AddAMDGPUFenceAddressSpaceMMRA()
18517 Inst->setMetadata(LLVMContext::MD_mmra, MMRAMetadata::getMD(Ctx, MMRAs)); in AddAMDGPUFenceAddressSpaceMMRA()
18530 Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3)); in EmitAMDGPUBuiltinExpr()
18532 llvm::Value *X = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr()
18533 llvm::Value *Y = EmitScalarExpr(E->getArg(1)); in EmitAMDGPUBuiltinExpr()
18534 llvm::Value *Z = EmitScalarExpr(E->getArg(2)); in EmitAMDGPUBuiltinExpr()
18537 X->getType()); in EmitAMDGPUBuiltinExpr()
18552 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr()
18553 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); in EmitAMDGPUBuiltinExpr()
18554 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); in EmitAMDGPUBuiltinExpr()
18555 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3)); in EmitAMDGPUBuiltinExpr()
18558 Src0->getType()); in EmitAMDGPUBuiltinExpr()
18578 for (unsigned I = 0; I != E->getNumArgs(); ++I) { in EmitAMDGPUBuiltinExpr()
18583 Args.insert(Args.begin(), llvm::PoisonValue::get(Args[0]->getType())); in EmitAMDGPUBuiltinExpr()
18585 CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType()); in EmitAMDGPUBuiltinExpr()
18647 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr()
18648 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); in EmitAMDGPUBuiltinExpr()
18650 CGM.getIntrinsic(Intrinsic::ldexp, {Src0->getType(), Src1->getType()}); in EmitAMDGPUBuiltinExpr()
18656 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr()
18657 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); in EmitAMDGPUBuiltinExpr()
18659 CGM.getIntrinsic(Intrinsic::ldexp, {Src0->getType(), Int16Ty}); in EmitAMDGPUBuiltinExpr()
18669 Value *Src0 = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr()
18671 { Builder.getInt32Ty(), Src0->getType() }); in EmitAMDGPUBuiltinExpr()
18675 Value *Src0 = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr()
18677 { Builder.getInt16Ty(), Src0->getType() }); in EmitAMDGPUBuiltinExpr()
18696 llvm::Type *ResultType = ConvertType(E->getType()); in EmitAMDGPUBuiltinExpr()
18697 llvm::Value *Src = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr()
18705 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr()
18706 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); in EmitAMDGPUBuiltinExpr()
18707 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); in EmitAMDGPUBuiltinExpr()
18709 // FIXME-GFX10: How should 32 bit mask be handled? in EmitAMDGPUBuiltinExpr()
18711 { Builder.getInt64Ty(), Src0->getType() }); in EmitAMDGPUBuiltinExpr()
18716 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr()
18717 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); in EmitAMDGPUBuiltinExpr()
18718 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); in EmitAMDGPUBuiltinExpr()
18720 // FIXME-GFX10: How should 32 bit mask be handled? in EmitAMDGPUBuiltinExpr()
18722 { Builder.getInt64Ty(), Src0->getType() }); in EmitAMDGPUBuiltinExpr()
18737 Value *Src0 = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr()
18738 Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() }); in EmitAMDGPUBuiltinExpr()
18791 llvm::Value *Addr = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr()
18792 llvm::Value *Val = EmitScalarExpr(E->getArg(1)); in EmitAMDGPUBuiltinExpr()
18794 CGM.getIntrinsic(IID, {ArgTy, Addr->getType(), Val->getType()}); in EmitAMDGPUBuiltinExpr()
18808 llvm::Value *Addr = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr()
18809 llvm::Value *Val = EmitScalarExpr(E->getArg(1)); in EmitAMDGPUBuiltinExpr()
18810 llvm::Function *F = CGM.getIntrinsic(IID, {Addr->getType()}); in EmitAMDGPUBuiltinExpr()
18837 llvm::Type *LoadTy = ConvertType(E->getType()); in EmitAMDGPUBuiltinExpr()
18838 llvm::Value *Addr = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr()
18850 llvm::Value *Env = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr()
18863 llvm::Value *NodePtr = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr()
18864 llvm::Value *RayExtent = EmitScalarExpr(E->getArg(1)); in EmitAMDGPUBuiltinExpr()
18865 llvm::Value *RayOrigin = EmitScalarExpr(E->getArg(2)); in EmitAMDGPUBuiltinExpr()
18866 llvm::Value *RayDir = EmitScalarExpr(E->getArg(3)); in EmitAMDGPUBuiltinExpr()
18867 llvm::Value *RayInverseDir = EmitScalarExpr(E->getArg(4)); in EmitAMDGPUBuiltinExpr()
18868 llvm::Value *TextureDescr = EmitScalarExpr(E->getArg(5)); in EmitAMDGPUBuiltinExpr()
18880 {NodePtr->getType(), RayDir->getType()}); in EmitAMDGPUBuiltinExpr()
18887 for (int i = 0, e = E->getNumArgs(); i != e; ++i) in EmitAMDGPUBuiltinExpr()
18888 Args.push_back(EmitScalarExpr(E->getArg(i))); in EmitAMDGPUBuiltinExpr()
18894 llvm::Type *RetTy = ConvertType(E->getType()); in EmitAMDGPUBuiltinExpr()
18969 // On GFX12, the intrinsics with 16-bit accumulator use a packed layout. in EmitAMDGPUBuiltinExpr()
19115 for (int i = 0, e = E->getNumArgs(); i != e; ++i) in EmitAMDGPUBuiltinExpr()
19116 Args.push_back(EmitScalarExpr(E->getArg(i))); in EmitAMDGPUBuiltinExpr()
19122 ArgTypes.push_back(Args[ArgIdx]->getType()); in EmitAMDGPUBuiltinExpr()
19164 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr()
19165 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); in EmitAMDGPUBuiltinExpr()
19166 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); in EmitAMDGPUBuiltinExpr()
19167 Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType()); in EmitAMDGPUBuiltinExpr()
19171 ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)), in EmitAMDGPUBuiltinExpr()
19172 EmitScalarExpr(E->getArg(1)), AO, SSID); in EmitAMDGPUBuiltinExpr()
19174 if (E->getNumArgs() > 2) in EmitAMDGPUBuiltinExpr()
19215 Value *Val = EmitScalarExpr(E->getArg(1)); in EmitAMDGPUBuiltinExpr()
19216 llvm::Type *OrigTy = Val->getType(); in EmitAMDGPUBuiltinExpr()
19217 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); in EmitAMDGPUBuiltinExpr()
19226 cast<ConstantInt>(EmitScalarExpr(E->getArg(4)))->getZExtValue(); in EmitAMDGPUBuiltinExpr()
19230 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); in EmitAMDGPUBuiltinExpr()
19233 if (E->getNumArgs() >= 4) { in EmitAMDGPUBuiltinExpr()
19235 ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)), in EmitAMDGPUBuiltinExpr()
19236 EmitScalarExpr(E->getArg(3)), AO, SSID); in EmitAMDGPUBuiltinExpr()
19253 RMW->setVolatile(true); in EmitAMDGPUBuiltinExpr()
19258 llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr()
19259 llvm::Type *ResultType = ConvertType(E->getType()); in EmitAMDGPUBuiltinExpr()
19306 F, {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), in EmitAMDGPUBuiltinExpr()
19307 EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3))}); in EmitAMDGPUBuiltinExpr()
19315 /// to an int that receives the post-instruction CC value. At the LLVM level
19320 unsigned NumArgs = E->getNumArgs() - 1; in EmitSystemZIntrinsicWithCC()
19323 Args[I] = CGF.EmitScalarExpr(E->getArg(I)); in EmitSystemZIntrinsicWithCC()
19324 Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs)); in EmitSystemZIntrinsicWithCC()
19336 Value *TDB = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19342 Value *TDB = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19354 Value *Data = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19359 Value *Address = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19360 Value *Data = EmitScalarExpr(E->getArg(1)); in EmitSystemZBuiltinExpr()
19366 // to target-specific LLVM intrinsics. The ones handled specially here can in EmitSystemZBuiltinExpr()
19374 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
19375 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19384 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
19385 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19395 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
19396 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19406 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
19407 llvm::Value *Src = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19408 llvm::Value *Amt = EmitScalarExpr(E->getArg(1)); in EmitSystemZBuiltinExpr()
19410 unsigned NumElts = cast<llvm::FixedVectorType>(ResultType)->getNumElements(); in EmitSystemZBuiltinExpr()
19411 Amt = Builder.CreateIntCast(Amt, ResultType->getScalarType(), false); in EmitSystemZBuiltinExpr()
19421 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
19422 llvm::Value *Src = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19423 llvm::Value *Amt = EmitScalarExpr(E->getArg(1)); in EmitSystemZBuiltinExpr()
19430 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
19431 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19442 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
19443 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19444 Value *Y = EmitScalarExpr(E->getArg(1)); in EmitSystemZBuiltinExpr()
19445 Value *Z = EmitScalarExpr(E->getArg(2)); in EmitSystemZBuiltinExpr()
19456 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
19457 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19458 Value *Y = EmitScalarExpr(E->getArg(1)); in EmitSystemZBuiltinExpr()
19459 Value *Z = EmitScalarExpr(E->getArg(2)); in EmitSystemZBuiltinExpr()
19462 return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}); in EmitSystemZBuiltinExpr()
19465 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}); in EmitSystemZBuiltinExpr()
19470 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
19471 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19472 Value *Y = EmitScalarExpr(E->getArg(1)); in EmitSystemZBuiltinExpr()
19473 Value *Z = EmitScalarExpr(E->getArg(2)); in EmitSystemZBuiltinExpr()
19476 return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg"); in EmitSystemZBuiltinExpr()
19479 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg"); in EmitSystemZBuiltinExpr()
19484 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
19485 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19486 Value *Y = EmitScalarExpr(E->getArg(1)); in EmitSystemZBuiltinExpr()
19487 Value *Z = EmitScalarExpr(E->getArg(2)); in EmitSystemZBuiltinExpr()
19494 Value *NegZ = Builder.CreateFNeg(Z, "neg"); in EmitSystemZBuiltinExpr()
19500 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
19501 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19507 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
19508 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19510 return Builder.CreateFNeg(Builder.CreateCall(F, X), "neg"); in EmitSystemZBuiltinExpr()
19514 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
19515 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19516 // Constant-fold the M4 and M5 mask arguments. in EmitSystemZBuiltinExpr()
19517 llvm::APSInt M4 = *E->getArg(1)->getIntegerConstantExpr(getContext()); in EmitSystemZBuiltinExpr()
19518 llvm::APSInt M5 = *E->getArg(2)->getIntegerConstantExpr(getContext()); in EmitSystemZBuiltinExpr()
19525 case 0: // IEEE-inexact exception allowed in EmitSystemZBuiltinExpr()
19532 case 4: // IEEE-inexact exception suppressed in EmitSystemZBuiltinExpr()
19569 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
19570 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19571 Value *Y = EmitScalarExpr(E->getArg(1)); in EmitSystemZBuiltinExpr()
19572 // Constant-fold the M4 mask argument. in EmitSystemZBuiltinExpr()
19573 llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext()); in EmitSystemZBuiltinExpr()
19603 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
19604 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19605 Value *Y = EmitScalarExpr(E->getArg(1)); in EmitSystemZBuiltinExpr()
19606 // Constant-fold the M4 mask argument. in EmitSystemZBuiltinExpr()
19607 llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext()); in EmitSystemZBuiltinExpr()
19639 llvm::Type *ResultType = ConvertType(E->getType()); in EmitSystemZBuiltinExpr()
19640 Value *X = EmitScalarExpr(E->getArg(0)); in EmitSystemZBuiltinExpr()
19645 // Vector intrinsics that output the post-instruction CC value. in EmitSystemZBuiltinExpr()
19809 // Sub-integer MMA loads. in getNVPTXMmaLdstInfo()
19873 // Integer and sub-integer MMA stores. in getNVPTXMmaLdstInfo()
19909 // Variants are ordered by layout-A/layout-B/satf, where 'row' has priority
19910 // over 'col' for layout. The index of non-satf variants is expected to match
19925 // clang-format off in getNVPTXMmaInfo()
19937 // Sub-integer MMA only supports row.col layout. in getNVPTXMmaInfo()
19966 // clang-format on in getNVPTXMmaInfo()
20010 // Sub-integer MMA in getNVPTXMmaInfo()
20045 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0)); in MakeLdgLdu()
20046 QualType ArgType = E->getArg(0)->getType(); in MakeLdgLdu()
20048 llvm::Type *ElemTy = CGF.ConvertTypeForMem(ArgType->getPointeeType()); in MakeLdgLdu()
20050 CGF.CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}), in MakeLdgLdu()
20056 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0)); in MakeScopedAtomic()
20058 CGF.ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType()); in MakeScopedAtomic()
20060 CGF.CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}), in MakeScopedAtomic()
20061 {Ptr, CGF.EmitScalarExpr(E->getArg(1))}); in MakeScopedAtomic()
20067 return E->getNumArgs() == 3 in MakeCpAsync()
20069 {CGF.EmitScalarExpr(E->getArg(0)), in MakeCpAsync()
20070 CGF.EmitScalarExpr(E->getArg(1)), in MakeCpAsync()
20071 CGF.EmitScalarExpr(E->getArg(2))}) in MakeCpAsync()
20073 {CGF.EmitScalarExpr(E->getArg(0)), in MakeCpAsync()
20074 CGF.EmitScalarExpr(E->getArg(1))}); in MakeCpAsync()
20082 CGF.CGM.Error(E->getExprLoc(), C.BuiltinInfo.getName(BuiltinID).str() + in MakeHalfType()
20093 auto *FTy = F->getFunctionType(); in MakeHalfType()
20098 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { in MakeHalfType()
20100 auto *ArgValue = CGF.EmitScalarExpr(E->getArg(i)); in MakeHalfType()
20101 auto *PTy = FTy->getParamType(i); in MakeHalfType()
20102 if (PTy != ArgValue->getType()) in MakeHalfType()
20173 Address DestAddr = EmitPointerWithAlignment(E->getArg(0)); in EmitNVPTXBuiltinExpr()
20174 Value *Val = EmitScalarExpr(E->getArg(1)); in EmitNVPTXBuiltinExpr()
20181 Value *Ptr = EmitScalarExpr(E->getArg(0)); in EmitNVPTXBuiltinExpr()
20182 Value *Val = EmitScalarExpr(E->getArg(1)); in EmitNVPTXBuiltinExpr()
20184 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType()); in EmitNVPTXBuiltinExpr()
20189 Value *Ptr = EmitScalarExpr(E->getArg(0)); in EmitNVPTXBuiltinExpr()
20190 Value *Val = EmitScalarExpr(E->getArg(1)); in EmitNVPTXBuiltinExpr()
20192 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType()); in EmitNVPTXBuiltinExpr()
20358 Value *Ptr = EmitScalarExpr(E->getArg(0)); in EmitNVPTXBuiltinExpr()
20360 ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType()); in EmitNVPTXBuiltinExpr()
20363 Intrinsic::nvvm_atomic_cas_gen_i_cta, {ElemTy, Ptr->getType()}), in EmitNVPTXBuiltinExpr()
20364 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))}); in EmitNVPTXBuiltinExpr()
20369 Value *Ptr = EmitScalarExpr(E->getArg(0)); in EmitNVPTXBuiltinExpr()
20371 ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType()); in EmitNVPTXBuiltinExpr()
20374 Intrinsic::nvvm_atomic_cas_gen_i_sys, {ElemTy, Ptr->getType()}), in EmitNVPTXBuiltinExpr()
20375 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))}); in EmitNVPTXBuiltinExpr()
20379 Value *Mask = EmitScalarExpr(E->getArg(0)); in EmitNVPTXBuiltinExpr()
20380 Value *Val = EmitScalarExpr(E->getArg(1)); in EmitNVPTXBuiltinExpr()
20381 Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2)); in EmitNVPTXBuiltinExpr()
20422 // Sub-integer MMA loads. in EmitNVPTXBuiltinExpr()
20445 Address Dst = EmitPointerWithAlignment(E->getArg(0)); in EmitNVPTXBuiltinExpr()
20446 Value *Src = EmitScalarExpr(E->getArg(1)); in EmitNVPTXBuiltinExpr()
20447 Value *Ldm = EmitScalarExpr(E->getArg(2)); in EmitNVPTXBuiltinExpr()
20449 E->getArg(3)->getIntegerConstantExpr(getContext()); in EmitNVPTXBuiltinExpr()
20452 bool isColMajor = isColMajorArg->getSExtValue(); in EmitNVPTXBuiltinExpr()
20459 Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm}); in EmitNVPTXBuiltinExpr()
20492 Value *Dst = EmitScalarExpr(E->getArg(0)); in EmitNVPTXBuiltinExpr()
20493 Address Src = EmitPointerWithAlignment(E->getArg(1)); in EmitNVPTXBuiltinExpr()
20494 Value *Ldm = EmitScalarExpr(E->getArg(2)); in EmitNVPTXBuiltinExpr()
20496 E->getArg(3)->getIntegerConstantExpr(getContext()); in EmitNVPTXBuiltinExpr()
20499 bool isColMajor = isColMajorArg->getSExtValue(); in EmitNVPTXBuiltinExpr()
20505 CGM.getIntrinsic(IID, Dst->getType()); in EmitNVPTXBuiltinExpr()
20506 llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1); in EmitNVPTXBuiltinExpr()
20509 Value *V = Builder.CreateAlignedLoad( in EmitNVPTXBuiltinExpr() local
20514 Values.push_back(Builder.CreateBitCast(V, ParamType)); in EmitNVPTXBuiltinExpr()
20521 // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) --> in EmitNVPTXBuiltinExpr()
20550 Address Dst = EmitPointerWithAlignment(E->getArg(0)); in EmitNVPTXBuiltinExpr()
20551 Address SrcA = EmitPointerWithAlignment(E->getArg(1)); in EmitNVPTXBuiltinExpr()
20552 Address SrcB = EmitPointerWithAlignment(E->getArg(2)); in EmitNVPTXBuiltinExpr()
20553 Address SrcC = EmitPointerWithAlignment(E->getArg(3)); in EmitNVPTXBuiltinExpr()
20555 E->getArg(4)->getIntegerConstantExpr(getContext()); in EmitNVPTXBuiltinExpr()
20558 int Layout = LayoutArg->getSExtValue(); in EmitNVPTXBuiltinExpr()
20566 E->getArg(5)->getIntegerConstantExpr(getContext())) in EmitNVPTXBuiltinExpr()
20578 llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0); in EmitNVPTXBuiltinExpr()
20581 Value *V = Builder.CreateAlignedLoad( in EmitNVPTXBuiltinExpr() local
20586 Values.push_back(Builder.CreateBitCast(V, AType)); in EmitNVPTXBuiltinExpr()
20589 llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA); in EmitNVPTXBuiltinExpr()
20591 Value *V = Builder.CreateAlignedLoad( in EmitNVPTXBuiltinExpr() local
20596 Values.push_back(Builder.CreateBitCast(V, BType)); in EmitNVPTXBuiltinExpr()
20600 Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB); in EmitNVPTXBuiltinExpr()
20602 Value *V = Builder.CreateAlignedLoad( in EmitNVPTXBuiltinExpr() local
20607 Values.push_back(Builder.CreateBitCast(V, CType)); in EmitNVPTXBuiltinExpr()
20827 EmitScalarExpr(E->getArg(0))); in EmitNVPTXBuiltinExpr()
20831 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))}); in EmitNVPTXBuiltinExpr()
20835 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))}); in EmitNVPTXBuiltinExpr()
20839 EmitScalarExpr(E->getArg(0))); in EmitNVPTXBuiltinExpr()
20843 EmitScalarExpr(E->getArg(0))); in EmitNVPTXBuiltinExpr()
20870 QualType AstType = E->getArg(0)->getType(); in BuiltinAlignArgs()
20871 if (AstType->isArrayType()) in BuiltinAlignArgs()
20872 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(CGF); in BuiltinAlignArgs()
20874 Src = CGF.EmitScalarExpr(E->getArg(0)); in BuiltinAlignArgs()
20875 SrcType = Src->getType(); in BuiltinAlignArgs()
20876 if (SrcType->isPointerTy()) { in BuiltinAlignArgs()
20881 assert(SrcType->isIntegerTy()); in BuiltinAlignArgs()
20884 Alignment = CGF.EmitScalarExpr(E->getArg(1)); in BuiltinAlignArgs()
20892 /// Generate (x & (y-1)) == 0.
20896 if (Args.SrcType->isPointerTy()) in EmitBuiltinIsAligned()
20904 /// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
20915 if (Args.Src->getType()->isPointerTy()) { in EmitBuiltinAlignTo()
20923 E->getExprLoc(), "over_boundary"); in EmitBuiltinAlignTo()
20931 if (Args.Src->getType()->isPointerTy()) { in EmitBuiltinAlignTo()
20938 assert(Result->getType() == Args.SrcType); in EmitBuiltinAlignTo()
20946 llvm::Type *ResultType = ConvertType(E->getType()); in EmitWebAssemblyBuiltinExpr()
20947 Value *I = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
20953 llvm::Type *ResultType = ConvertType(E->getType()); in EmitWebAssemblyBuiltinExpr()
20954 Value *Args[] = {EmitScalarExpr(E->getArg(0)), in EmitWebAssemblyBuiltinExpr()
20955 EmitScalarExpr(E->getArg(1))}; in EmitWebAssemblyBuiltinExpr()
20961 llvm::Type *ResultType = ConvertType(E->getType()); in EmitWebAssemblyBuiltinExpr()
20966 llvm::Type *ResultType = ConvertType(E->getType()); in EmitWebAssemblyBuiltinExpr()
20975 Value *Tag = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
20976 Value *Obj = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
20985 Value *Addr = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
20986 Value *Expected = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
20987 Value *Timeout = EmitScalarExpr(E->getArg(2)); in EmitWebAssemblyBuiltinExpr()
20992 Value *Addr = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
20993 Value *Expected = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
20994 Value *Timeout = EmitScalarExpr(E->getArg(2)); in EmitWebAssemblyBuiltinExpr()
20999 Value *Addr = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21000 Value *Count = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21008 Value *Src = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21009 llvm::Type *ResT = ConvertType(E->getType()); in EmitWebAssemblyBuiltinExpr()
21011 CGM.getIntrinsic(Intrinsic::wasm_trunc_signed, {ResT, Src->getType()}); in EmitWebAssemblyBuiltinExpr()
21018 Value *Src = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21019 llvm::Type *ResT = ConvertType(E->getType()); in EmitWebAssemblyBuiltinExpr()
21021 {ResT, Src->getType()}); in EmitWebAssemblyBuiltinExpr()
21029 Value *Src = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21030 llvm::Type *ResT = ConvertType(E->getType()); in EmitWebAssemblyBuiltinExpr()
21032 CGM.getIntrinsic(Intrinsic::fptosi_sat, {ResT, Src->getType()}); in EmitWebAssemblyBuiltinExpr()
21040 Value *Src = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21041 llvm::Type *ResT = ConvertType(E->getType()); in EmitWebAssemblyBuiltinExpr()
21043 CGM.getIntrinsic(Intrinsic::fptoui_sat, {ResT, Src->getType()}); in EmitWebAssemblyBuiltinExpr()
21051 Value *LHS = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21052 Value *RHS = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21054 CGM.getIntrinsic(Intrinsic::minimum, ConvertType(E->getType())); in EmitWebAssemblyBuiltinExpr()
21062 Value *LHS = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21063 Value *RHS = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21065 CGM.getIntrinsic(Intrinsic::maximum, ConvertType(E->getType())); in EmitWebAssemblyBuiltinExpr()
21071 Value *LHS = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21072 Value *RHS = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21074 CGM.getIntrinsic(Intrinsic::wasm_pmin, ConvertType(E->getType())); in EmitWebAssemblyBuiltinExpr()
21080 Value *LHS = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21081 Value *RHS = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21083 CGM.getIntrinsic(Intrinsic::wasm_pmax, ConvertType(E->getType())); in EmitWebAssemblyBuiltinExpr()
21115 Value *Value = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21116 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType())); in EmitWebAssemblyBuiltinExpr()
21128 Value *Src = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21129 Value *Indices = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21162 Value *LHS = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21163 Value *RHS = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21164 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType())); in EmitWebAssemblyBuiltinExpr()
21171 Value *Vec = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21172 Value *Neg = Builder.CreateNeg(Vec, "neg"); in EmitWebAssemblyBuiltinExpr() local
21173 Constant *Zero = llvm::Constant::getNullValue(Vec->getType()); in EmitWebAssemblyBuiltinExpr()
21175 return Builder.CreateSelect(ICmp, Neg, Vec, "abs"); in EmitWebAssemblyBuiltinExpr()
21189 Value *LHS = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21190 Value *RHS = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21220 Value *LHS = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21221 Value *RHS = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21223 ConvertType(E->getType())); in EmitWebAssemblyBuiltinExpr()
21227 Value *LHS = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21228 Value *RHS = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21236 Value *Vec = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21251 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType())); in EmitWebAssemblyBuiltinExpr()
21255 Value *V1 = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21256 Value *V2 = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21257 Value *C = EmitScalarExpr(E->getArg(2)); in EmitWebAssemblyBuiltinExpr()
21259 CGM.getIntrinsic(Intrinsic::wasm_bitselect, ConvertType(E->getType())); in EmitWebAssemblyBuiltinExpr()
21263 Value *LHS = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21264 Value *RHS = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21269 Value *Vec = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21271 CGM.getIntrinsic(Intrinsic::ctpop, ConvertType(E->getType())); in EmitWebAssemblyBuiltinExpr()
21293 Value *Vec = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21294 Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType()); in EmitWebAssemblyBuiltinExpr()
21301 Value *Vec = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21303 CGM.getIntrinsic(Intrinsic::wasm_bitmask, Vec->getType()); in EmitWebAssemblyBuiltinExpr()
21308 Value *Vec = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21309 Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType()); in EmitWebAssemblyBuiltinExpr()
21314 Value *Vec = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21315 Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType()); in EmitWebAssemblyBuiltinExpr()
21322 Value *Low = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21323 Value *High = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21338 CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()}); in EmitWebAssemblyBuiltinExpr()
21343 Value *Vec = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21355 llvm::Type *SrcT = Vec->getType(); in EmitWebAssemblyBuiltinExpr()
21356 llvm::Type *TruncT = SrcT->getWithNewType(Builder.getInt32Ty()); in EmitWebAssemblyBuiltinExpr()
21365 Ops[OpIdx++] = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21366 Ops[OpIdx++] = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21369 E->getArg(OpIdx)->getIntegerConstantExpr(getContext()); in EmitWebAssemblyBuiltinExpr()
21382 Value *A = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21383 Value *B = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21384 Value *C = EmitScalarExpr(E->getArg(2)); in EmitWebAssemblyBuiltinExpr()
21400 Function *Callee = CGM.getIntrinsic(IntNo, A->getType()); in EmitWebAssemblyBuiltinExpr()
21407 Value *A = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21408 Value *B = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21409 Value *C = EmitScalarExpr(E->getArg(2)); in EmitWebAssemblyBuiltinExpr()
21411 CGM.getIntrinsic(Intrinsic::wasm_relaxed_laneselect, A->getType()); in EmitWebAssemblyBuiltinExpr()
21415 Value *Src = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21416 Value *Indices = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21424 Value *LHS = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21425 Value *RHS = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21439 Function *Callee = CGM.getIntrinsic(IntNo, LHS->getType()); in EmitWebAssemblyBuiltinExpr()
21446 Value *Vec = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21468 Value *LHS = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21469 Value *RHS = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21474 Value *LHS = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21475 Value *RHS = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21481 Value *LHS = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21482 Value *RHS = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21483 Value *Acc = EmitScalarExpr(E->getArg(2)); in EmitWebAssemblyBuiltinExpr()
21489 Value *LHS = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21490 Value *RHS = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21491 Value *Acc = EmitScalarExpr(E->getArg(2)); in EmitWebAssemblyBuiltinExpr()
21497 Value *Addr = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21502 Value *Val = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21503 Value *Addr = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21508 Value *Val = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21513 Value *Vector = EmitScalarExpr(E->getArg(0)); in EmitWebAssemblyBuiltinExpr()
21514 Value *Index = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21519 assert(E->getArg(0)->getType()->isArrayType()); in EmitWebAssemblyBuiltinExpr()
21520 Value *Table = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this); in EmitWebAssemblyBuiltinExpr()
21521 Value *Index = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21523 if (E->getType().isWebAssemblyExternrefType()) in EmitWebAssemblyBuiltinExpr()
21525 else if (E->getType().isWebAssemblyFuncrefType()) in EmitWebAssemblyBuiltinExpr()
21533 assert(E->getArg(0)->getType()->isArrayType()); in EmitWebAssemblyBuiltinExpr()
21534 Value *Table = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this); in EmitWebAssemblyBuiltinExpr()
21535 Value *Index = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21536 Value *Val = EmitScalarExpr(E->getArg(2)); in EmitWebAssemblyBuiltinExpr()
21538 if (E->getArg(2)->getType().isWebAssemblyExternrefType()) in EmitWebAssemblyBuiltinExpr()
21540 else if (E->getArg(2)->getType().isWebAssemblyFuncrefType()) in EmitWebAssemblyBuiltinExpr()
21548 assert(E->getArg(0)->getType()->isArrayType()); in EmitWebAssemblyBuiltinExpr()
21549 Value *Value = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this); in EmitWebAssemblyBuiltinExpr()
21554 assert(E->getArg(0)->getType()->isArrayType()); in EmitWebAssemblyBuiltinExpr()
21555 Value *Table = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this); in EmitWebAssemblyBuiltinExpr()
21556 Value *Val = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21557 Value *NElems = EmitScalarExpr(E->getArg(2)); in EmitWebAssemblyBuiltinExpr()
21560 if (E->getArg(1)->getType().isWebAssemblyExternrefType()) in EmitWebAssemblyBuiltinExpr()
21562 else if (E->getArg(2)->getType().isWebAssemblyFuncrefType()) in EmitWebAssemblyBuiltinExpr()
21571 assert(E->getArg(0)->getType()->isArrayType()); in EmitWebAssemblyBuiltinExpr()
21572 Value *Table = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this); in EmitWebAssemblyBuiltinExpr()
21573 Value *Index = EmitScalarExpr(E->getArg(1)); in EmitWebAssemblyBuiltinExpr()
21574 Value *Val = EmitScalarExpr(E->getArg(2)); in EmitWebAssemblyBuiltinExpr()
21575 Value *NElems = EmitScalarExpr(E->getArg(3)); in EmitWebAssemblyBuiltinExpr()
21578 if (E->getArg(2)->getType().isWebAssemblyExternrefType()) in EmitWebAssemblyBuiltinExpr()
21580 else if (E->getArg(2)->getType().isWebAssemblyFuncrefType()) in EmitWebAssemblyBuiltinExpr()
21589 assert(E->getArg(0)->getType()->isArrayType()); in EmitWebAssemblyBuiltinExpr()
21590 Value *TableX = EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(*this); in EmitWebAssemblyBuiltinExpr()
21591 Value *TableY = EmitArrayToPointerDecay(E->getArg(1)).emitRawPointer(*this); in EmitWebAssemblyBuiltinExpr()
21592 Value *DstIdx = EmitScalarExpr(E->getArg(2)); in EmitWebAssemblyBuiltinExpr()
21593 Value *SrcIdx = EmitScalarExpr(E->getArg(3)); in EmitWebAssemblyBuiltinExpr()
21594 Value *NElems = EmitScalarExpr(E->getArg(4)); in EmitWebAssemblyBuiltinExpr()
21655 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) in getIntrinsicForHexagonNonClangBuiltin()
21658 return {F->IntrinsicID, F->VecLen}; in getIntrinsicForHexagonNonClangBuiltin()
21669 Address A = EmitPointerWithAlignment(E->getArg(0)); in EmitHexagonBuiltinExpr()
21675 // builtin(Base, Inc, Mod, Start) -> intr(Base, Inc, Mod, Start) in EmitHexagonBuiltinExpr()
21676 // builtin(Base, Mod, Start) -> intr(Base, Mod, Start) in EmitHexagonBuiltinExpr()
21678 // builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start) in EmitHexagonBuiltinExpr()
21679 // builtin(Base, Mod, Val, Start) -> intr(Base, Mod, Val, Start) in EmitHexagonBuiltinExpr()
21681 for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i) in EmitHexagonBuiltinExpr()
21682 Ops.push_back(EmitScalarExpr(E->getArg(i))); in EmitHexagonBuiltinExpr()
21689 llvm::Value *LV = EmitScalarExpr(E->getArg(0)); in EmitHexagonBuiltinExpr()
21690 Address Dest = EmitPointerWithAlignment(E->getArg(0)); in EmitHexagonBuiltinExpr()
21698 // Handle the conversion of bit-reverse load intrinsics to bit code. in EmitHexagonBuiltinExpr()
21705 llvm::Value *BaseAddress = EmitScalarExpr(E->getArg(0)); in EmitHexagonBuiltinExpr()
21710 Address DestAddr = EmitPointerWithAlignment(E->getArg(1)); in EmitHexagonBuiltinExpr()
21718 CGM.getIntrinsic(IntID), {BaseAddress, EmitScalarExpr(E->getArg(2))}); in EmitHexagonBuiltinExpr()
21737 {Vec, Builder.getInt32(-1)}); in EmitHexagonBuiltinExpr()
21743 {Pred, Builder.getInt32(-1)}); in EmitHexagonBuiltinExpr()
21754 // Get the type from the 0-th argument. in EmitHexagonBuiltinExpr()
21755 llvm::Type *VecType = ConvertType(E->getArg(0)->getType()); in EmitHexagonBuiltinExpr()
21757 EmitPointerWithAlignment(E->getArg(2)).withElementType(VecType); in EmitHexagonBuiltinExpr()
21760 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn}); in EmitHexagonBuiltinExpr()
21768 // input carry, only generate carry-out. Since they still produce two in EmitHexagonBuiltinExpr()
21774 // Get the type from the 0-th argument. in EmitHexagonBuiltinExpr()
21775 llvm::Type *VecType = ConvertType(E->getArg(0)->getType()); in EmitHexagonBuiltinExpr()
21777 EmitPointerWithAlignment(E->getArg(2)).withElementType(VecType); in EmitHexagonBuiltinExpr()
21779 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))}); in EmitHexagonBuiltinExpr()
21796 const Expr *PredOp = E->getArg(0); in EmitHexagonBuiltinExpr()
21799 if (Cast->getCastKind() == CK_BitCast) in EmitHexagonBuiltinExpr()
21800 PredOp = Cast->getSubExpr(); in EmitHexagonBuiltinExpr()
21803 for (int i = 1, e = E->getNumArgs(); i != e; ++i) in EmitHexagonBuiltinExpr()
21804 Ops.push_back(EmitScalarExpr(E->getArg(i))); in EmitHexagonBuiltinExpr()
21853 llvm::Type *ResultType = ConvertType(E->getType()); in EmitRISCVBuiltinExpr()
21876 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { in EmitRISCVBuiltinExpr()
21878 if (hasAggregateEvaluationKind(E->getArg(i)->getType())) { in EmitRISCVBuiltinExpr()
21879 LValue L = EmitAggExprToLValue(E->getArg(i)); in EmitRISCVBuiltinExpr()
21929 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType()); in EmitRISCVBuiltinExpr()
21931 if (Result->getType() != ResultType) in EmitRISCVBuiltinExpr()
21938 Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType()); in EmitRISCVBuiltinExpr()
21940 if (Result->getType() != ResultType) in EmitRISCVBuiltinExpr()
22021 llvm::Type *ResTy = ConvertType(E->getType()); in EmitRISCVBuiltinExpr()
22024 DomainVal = cast<ConstantInt>(Ops[1])->getZExtValue(); in EmitRISCVBuiltinExpr()
22033 if(ResTy->isScalableTy()) { in EmitRISCVBuiltinExpr()
22035 llvm::Type *ScalarTy = ResTy->getScalarType(); in EmitRISCVBuiltinExpr()
22036 Width = ScalarTy->getPrimitiveSizeInBits() * in EmitRISCVBuiltinExpr()
22037 SVTy->getElementCount().getKnownMinValue(); in EmitRISCVBuiltinExpr()
22039 Width = ResTy->getPrimitiveSizeInBits(); in EmitRISCVBuiltinExpr()
22043 Load->setMetadata(llvm::LLVMContext::MD_nontemporal, NontemporalNode); in EmitRISCVBuiltinExpr()
22044 Load->setMetadata(CGM.getModule().getMDKindID("riscv-nontemporal-domain"), in EmitRISCVBuiltinExpr()
22052 DomainVal = cast<ConstantInt>(Ops[2])->getZExtValue(); in EmitRISCVBuiltinExpr()
22061 Store->setMetadata(llvm::LLVMContext::MD_nontemporal, NontemporalNode); in EmitRISCVBuiltinExpr()
22062 Store->setMetadata(CGM.getModule().getMDKindID("riscv-nontemporal-domain"), in EmitRISCVBuiltinExpr()