Lines Matching full:call
173 /// Parses the name part of the demangled builtin call.
182 // Extract the builtin function name and types of arguments from the call in lookupBuiltinNameHelper()
210 /// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
219 /// \returns Wrapper around the demangled call and found builtin definition.
239 // If the initial look up was unsuccessful and the demangled call takes at in lookupBuiltin()
373 // the function call be an intrinsic, which is not. Instead, we rely on being in getBlockStructType()
575 const SPIRV::IncomingCall *Call, in buildOpFromWrapper() argument
581 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
582 unsigned Sz = Call->Arguments.size() - ImmArgs.size();
584 Register ArgReg = Call->Arguments[i];
595 static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, in buildAtomicInitInst() argument
597 if (Call->isSpirvOp()) in buildAtomicInitInst()
598 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0)); in buildAtomicInitInst()
600 assert(Call->Arguments.size() == 2 && in buildAtomicInitInst()
602 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass); in buildAtomicInitInst()
603 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass); in buildAtomicInitInst()
605 .addUse(Call->Arguments[0]) in buildAtomicInitInst()
606 .addUse(Call->Arguments[1]); in buildAtomicInitInst()
611 static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, in buildAtomicLoadInst() argument
614 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType); in buildAtomicLoadInst()
615 if (Call->isSpirvOp()) in buildAtomicLoadInst()
616 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg); in buildAtomicLoadInst()
618 Register PtrRegister = Call->Arguments[0]; in buildAtomicLoadInst()
620 // TODO: if true insert call to __translate_ocl_memory_sccope before in buildAtomicLoadInst()
624 if (Call->Arguments.size() > 1) { in buildAtomicLoadInst()
625 ScopeRegister = Call->Arguments[1]; in buildAtomicLoadInst()
631 if (Call->Arguments.size() > 2) { in buildAtomicLoadInst()
632 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad. in buildAtomicLoadInst()
633 MemSemanticsReg = Call->Arguments[2]; in buildAtomicLoadInst()
643 .addDef(Call->ReturnRegister) in buildAtomicLoadInst()
652 static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, in buildAtomicStoreInst() argument
655 if (Call->isSpirvOp()) in buildAtomicStoreInst()
656 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call, Register(0)); in buildAtomicStoreInst()
660 Register PtrRegister = Call->Arguments[0]; in buildAtomicStoreInst()
666 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass); in buildAtomicStoreInst()
671 .addUse(Call->Arguments[1]); in buildAtomicStoreInst()
677 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, in buildAtomicCompareExchangeInst() argument
679 if (Call->isSpirvOp()) in buildAtomicCompareExchangeInst()
680 return buildOpFromWrapper(MIRBuilder, Opcode, Call, in buildAtomicCompareExchangeInst()
681 GR->getSPIRVTypeID(Call->ReturnType)); in buildAtomicCompareExchangeInst()
683 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg"); in buildAtomicCompareExchangeInst()
686 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.) in buildAtomicCompareExchangeInst()
687 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected). in buildAtomicCompareExchangeInst()
688 Register Desired = Call->Arguments[2]; // Value (C Desired). in buildAtomicCompareExchangeInst()
719 if (Call->Arguments.size() >= 4) { in buildAtomicCompareExchangeInst()
720 assert(Call->Arguments.size() >= 5 && in buildAtomicCompareExchangeInst()
723 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI)); in buildAtomicCompareExchangeInst()
725 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI)); in buildAtomicCompareExchangeInst()
729 MemSemEqualReg = Call->Arguments[3]; in buildAtomicCompareExchangeInst()
731 MemSemUnequalReg = Call->Arguments[4]; in buildAtomicCompareExchangeInst()
732 MRI->setRegClass(Call->Arguments[3], &SPIRV::IDRegClass); in buildAtomicCompareExchangeInst()
733 MRI->setRegClass(Call->Arguments[4], &SPIRV::IDRegClass); in buildAtomicCompareExchangeInst()
742 if (Call->Arguments.size() >= 6) { in buildAtomicCompareExchangeInst()
743 assert(Call->Arguments.size() == 6 && in buildAtomicCompareExchangeInst()
746 getIConstVal(Call->Arguments[5], MRI)); in buildAtomicCompareExchangeInst()
749 ScopeReg = Call->Arguments[5]; in buildAtomicCompareExchangeInst()
750 MRI->setRegClass(Call->Arguments[5], &SPIRV::IDRegClass); in buildAtomicCompareExchangeInst()
761 : Call->ReturnRegister; in buildAtomicCompareExchangeInst()
778 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected); in buildAtomicCompareExchangeInst()
784 static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, in buildAtomicRMWInst() argument
787 if (Call->isSpirvOp()) in buildAtomicRMWInst()
788 return buildOpFromWrapper(MIRBuilder, Opcode, Call, in buildAtomicRMWInst()
789 GR->getSPIRVTypeID(Call->ReturnType)); in buildAtomicRMWInst()
793 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register(); in buildAtomicRMWInst()
795 assert(Call->Arguments.size() <= 4 && in buildAtomicRMWInst()
800 Register PtrRegister = Call->Arguments[0]; in buildAtomicRMWInst()
804 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register(); in buildAtomicRMWInst()
807 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass); in buildAtomicRMWInst()
808 Register ValueReg = Call->Arguments[1]; in buildAtomicRMWInst()
809 Register ValueTypeReg = GR->getSPIRVTypeID(Call->ReturnType); in buildAtomicRMWInst()
811 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) { in buildAtomicRMWInst()
821 GR->assignSPIRVTypeToVReg(Call->ReturnType, NegValueReg, in buildAtomicRMWInst()
826 insertAssignInstr(NegValueReg, nullptr, Call->ReturnType, GR, MIRBuilder, in buildAtomicRMWInst()
832 .addDef(Call->ReturnRegister) in buildAtomicRMWInst()
842 static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, in buildAtomicFloatingRMWInst() argument
846 assert(Call->Arguments.size() == 4 && in buildAtomicFloatingRMWInst()
851 Register PtrReg = Call->Arguments[0]; in buildAtomicFloatingRMWInst()
854 Register ScopeReg = Call->Arguments[1]; in buildAtomicFloatingRMWInst()
857 Register MemSemanticsReg = Call->Arguments[2]; in buildAtomicFloatingRMWInst()
860 Register ValueReg = Call->Arguments[3]; in buildAtomicFloatingRMWInst()
864 .addDef(Call->ReturnRegister) in buildAtomicFloatingRMWInst()
865 .addUse(GR->getSPIRVTypeID(Call->ReturnType)) in buildAtomicFloatingRMWInst()
875 static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, in buildAtomicFlagInst() argument
879 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType); in buildAtomicFlagInst()
880 if (Call->isSpirvOp()) in buildAtomicFlagInst()
881 return buildOpFromWrapper(MIRBuilder, Opcode, Call, in buildAtomicFlagInst()
885 Register PtrRegister = Call->Arguments[0]; in buildAtomicFlagInst()
888 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register(); in buildAtomicFlagInst()
898 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register(); in buildAtomicFlagInst()
904 MIB.addDef(Call->ReturnRegister).addUse(TypeReg); in buildAtomicFlagInst()
912 static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, in buildBarrierInst() argument
915 if (Call->isSpirvOp()) in buildBarrierInst()
916 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0)); in buildBarrierInst()
919 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI); in buildBarrierInst()
933 static_cast<std::memory_order>(getIConstVal(Call->Arguments[1], MRI)); in buildBarrierInst()
941 MemSemanticsReg = Call->Arguments[0]; in buildBarrierInst()
949 if (Call->Arguments.size() >= 2) { in buildBarrierInst()
951 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) || in buildBarrierInst()
952 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) && in buildBarrierInst()
954 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2] in buildBarrierInst()
955 : Call->Arguments[1]; in buildBarrierInst()
964 ScopeReg = Call->Arguments[1]; in buildBarrierInst()
1008 static bool generateExtInst(const SPIRV::IncomingCall *Call, in generateExtInst() argument
1012 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; in generateExtInst()
1019 .addDef(Call->ReturnRegister) in generateExtInst()
1020 .addUse(GR->getSPIRVTypeID(Call->ReturnType)) in generateExtInst()
1024 for (auto Argument : Call->Arguments) in generateExtInst()
1029 static bool generateRelationalInst(const SPIRV::IncomingCall *Call, in generateRelationalInst() argument
1033 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; in generateRelationalInst()
1040 buildBoolRegister(MIRBuilder, Call->ReturnType, GR); in generateRelationalInst()
1047 for (auto Argument : Call->Arguments) in generateRelationalInst()
1051 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister, in generateRelationalInst()
1052 Call->ReturnType, GR); in generateRelationalInst()
1055 static bool generateGroupInst(const SPIRV::IncomingCall *Call, in generateGroupInst() argument
1058 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; in generateGroupInst()
1063 if (Call->isSpirvOp()) { in generateGroupInst()
1065 return buildOpFromWrapper(MIRBuilder, GroupBuiltin->Opcode, Call, in generateGroupInst()
1066 GR->getSPIRVTypeID(Call->ReturnType)); in generateGroupInst()
1069 Register GroupOpReg = Call->Arguments[1]; in generateGroupInst()
1075 Register ScopeReg = Call->Arguments[0]; in generateGroupInst()
1079 .addDef(Call->ReturnRegister) in generateGroupInst()
1080 .addUse(GR->getSPIRVTypeID(Call->ReturnType)) in generateGroupInst()
1083 for (unsigned i = 2; i < Call->Arguments.size(); ++i) { in generateGroupInst()
1084 Register ArgReg = Call->Arguments[i]; in generateGroupInst()
1094 Register ConstRegister = Call->Arguments[0]; in generateGroupInst()
1100 if (GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() != in generateGroupInst()
1106 Register GroupResultRegister = Call->ReturnRegister; in generateGroupInst()
1107 SPIRVType *GroupResultType = Call->ReturnType; in generateGroupInst()
1118 buildBoolRegister(MIRBuilder, Call->ReturnType, GR); in generateGroupInst()
1132 if (Call->Arguments.size() > 0) { in generateGroupInst()
1133 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]); in generateGroupInst()
1134 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass); in generateGroupInst()
1135 for (unsigned i = 1; i < Call->Arguments.size(); i++) { in generateGroupInst()
1136 MIB.addUse(Call->Arguments[i]); in generateGroupInst()
1137 MRI->setRegClass(Call->Arguments[i], &SPIRV::IDRegClass); in generateGroupInst()
1143 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister, in generateGroupInst()
1144 Call->ReturnType, GR); in generateGroupInst()
1148 static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, in generateIntelSubgroupsInst() argument
1151 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; in generateIntelSubgroupsInst()
1164 if (Call->isSpirvOp()) { in generateIntelSubgroupsInst()
1167 return buildOpFromWrapper(MIRBuilder, OpCode, Call, in generateIntelSubgroupsInst()
1168 IsSet ? GR->getSPIRVTypeID(Call->ReturnType) in generateIntelSubgroupsInst()
1175 if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) { in generateIntelSubgroupsInst()
1207 .addDef(Call->ReturnRegister) in generateIntelSubgroupsInst()
1208 .addUse(GR->getSPIRVTypeID(Call->ReturnType)); in generateIntelSubgroupsInst()
1209 for (size_t i = 0; i < Call->Arguments.size(); ++i) { in generateIntelSubgroupsInst()
1210 MIB.addUse(Call->Arguments[i]); in generateIntelSubgroupsInst()
1211 MRI->setRegClass(Call->Arguments[i], &SPIRV::IDRegClass); in generateIntelSubgroupsInst()
1217 static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, in generateGroupUniformInst() argument
1220 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; in generateGroupUniformInst()
1234 Register GroupResultReg = Call->ReturnRegister; in generateGroupUniformInst()
1238 Register ScopeReg = Call->Arguments[0]; in generateGroupUniformInst()
1242 Register ConstGroupOpReg = Call->Arguments[1]; in generateGroupUniformInst()
1255 Register ValueReg = Call->Arguments[2]; in generateGroupUniformInst()
1260 .addUse(GR->getSPIRVTypeID(Call->ReturnType)) in generateGroupUniformInst()
1268 static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, in generateKernelClockInst() argument
1271 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; in generateKernelClockInst()
1282 Register ResultReg = Call->ReturnRegister; in generateKernelClockInst()
1295 .addUse(GR->getSPIRVTypeID(Call->ReturnType)) in generateKernelClockInst()
1327 static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, in genWorkgroupQuery() argument
1332 Register IndexRegister = Call->Arguments[0]; in genWorkgroupQuery()
1333 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm(); in genWorkgroupQuery()
1341 Register ToTruncate = Call->ReturnRegister; in genWorkgroupQuery()
1350 Register DefaultReg = Call->ReturnRegister; in genWorkgroupQuery()
1368 Register Extracted = Call->ReturnRegister; in genWorkgroupQuery()
1403 Register SelectionResult = Call->ReturnRegister; in genWorkgroupQuery()
1421 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate); in genWorkgroupQuery()
1425 static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, in generateBuiltinVar() argument
1429 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; in generateBuiltinVar()
1434 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0); in generateBuiltinVar()
1437 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType); in generateBuiltinVar()
1439 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector) in generateBuiltinVar()
1441 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth); in generateBuiltinVar()
1445 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value, in generateBuiltinVar()
1446 LLType, Call->ReturnRegister); in generateBuiltinVar()
1449 static bool generateAtomicInst(const SPIRV::IncomingCall *Call, in generateAtomicInst() argument
1453 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; in generateAtomicInst()
1459 return buildAtomicInitInst(Call, MIRBuilder); in generateAtomicInst()
1461 return buildAtomicLoadInst(Call, MIRBuilder, GR); in generateAtomicInst()
1463 return buildAtomicStoreInst(Call, MIRBuilder, GR); in generateAtomicInst()
1466 return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder, in generateAtomicInst()
1474 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR); in generateAtomicInst()
1476 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR); in generateAtomicInst()
1479 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR); in generateAtomicInst()
1481 if (Call->isSpirvOp()) in generateAtomicInst()
1482 return buildOpFromWrapper(MIRBuilder, Opcode, Call, in generateAtomicInst()
1483 GR->getSPIRVTypeID(Call->ReturnType)); in generateAtomicInst()
1488 static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, in generateAtomicFloatingInst() argument
1492 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; in generateAtomicFloatingInst()
1499 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR); in generateAtomicFloatingInst()
1505 static bool generateBarrierInst(const SPIRV::IncomingCall *Call, in generateBarrierInst() argument
1509 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; in generateBarrierInst()
1513 return buildBarrierInst(Call, Opcode, MIRBuilder, GR); in generateBarrierInst()
1516 static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, in generateCastToPtrInst() argument
1519 .addDef(Call->ReturnRegister) in generateCastToPtrInst()
1520 .addUse(Call->Arguments[0]); in generateCastToPtrInst()
1524 static bool generateDotOrFMulInst(const SPIRV::IncomingCall *Call, in generateDotOrFMulInst() argument
1527 if (Call->isSpirvOp()) in generateDotOrFMulInst()
1528 return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call, in generateDotOrFMulInst()
1529 GR->getSPIRVTypeID(Call->ReturnType)); in generateDotOrFMulInst()
1530 unsigned Opcode = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode(); in generateDotOrFMulInst()
1534 .addDef(Call->ReturnRegister) in generateDotOrFMulInst()
1535 .addUse(GR->getSPIRVTypeID(Call->ReturnType)) in generateDotOrFMulInst()
1536 .addUse(Call->Arguments[0]) in generateDotOrFMulInst()
1537 .addUse(Call->Arguments[1]); in generateDotOrFMulInst()
1541 static bool generateWaveInst(const SPIRV::IncomingCall *Call, in generateWaveInst() argument
1544 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; in generateWaveInst()
1549 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt); in generateWaveInst()
1550 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType)); in generateWaveInst()
1553 MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister, in generateWaveInst()
1557 static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, in generateGetQueryInst() argument
1562 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value; in generateGetQueryInst()
1566 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefault ? 1 : 0); in generateGetQueryInst()
1569 static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, in generateImageSizeQueryInst() argument
1573 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; in generateImageSizeQueryInst()
1579 SPIRVType *RetTy = Call->ReturnType; in generateImageSizeQueryInst()
1584 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]); in generateImageSizeQueryInst()
1586 Register QueryResult = Call->ReturnRegister; in generateImageSizeQueryInst()
1587 SPIRVType *QueryResultType = Call->ReturnType; in generateImageSizeQueryInst()
1600 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass); in generateImageSizeQueryInst()
1604 .addUse(Call->Arguments[0]); in generateImageSizeQueryInst()
1615 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType); in generateImageSizeQueryInst()
1624 .addDef(Call->ReturnRegister) in generateImageSizeQueryInst()
1629 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder, in generateImageSizeQueryInst()
1634 .addDef(Call->ReturnRegister) in generateImageSizeQueryInst()
1635 .addUse(GR->getSPIRVTypeID(Call->ReturnType)) in generateImageSizeQueryInst()
1644 static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, in generateImageMiscQueryInst() argument
1647 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt && in generateImageMiscQueryInst()
1651 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; in generateImageMiscQueryInst()
1655 Register Image = Call->Arguments[0]; in generateImageMiscQueryInst()
1676 .addDef(Call->ReturnRegister) in generateImageMiscQueryInst()
1677 .addUse(GR->getSPIRVTypeID(Call->ReturnType)) in generateImageMiscQueryInst()
1715 const SPIRV::IncomingCall *Call, in generateReadImageInst() argument
1718 Register Image = Call->Arguments[0]; in generateReadImageInst()
1721 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass); in generateReadImageInst()
1725 MRI->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass); in generateReadImageInst()
1727 Register Sampler = Call->Arguments[1]; in generateReadImageInst()
1751 SPIRVType *TempType = Call->ReturnType; in generateReadImageInst()
1755 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder); in generateReadImageInst()
1764 .addDef(NeedsExtraction ? TempRegister : Call->ReturnRegister) in generateReadImageInst()
1767 .addUse(Call->Arguments[2]) // Coordinate. in generateReadImageInst()
1773 .addDef(Call->ReturnRegister) in generateReadImageInst()
1774 .addUse(GR->getSPIRVTypeID(Call->ReturnType)) in generateReadImageInst()
1779 .addDef(Call->ReturnRegister) in generateReadImageInst()
1780 .addUse(GR->getSPIRVTypeID(Call->ReturnType)) in generateReadImageInst()
1782 .addUse(Call->Arguments[1]) // Coordinate. in generateReadImageInst()
1784 .addUse(Call->Arguments[2]); in generateReadImageInst()
1787 .addDef(Call->ReturnRegister) in generateReadImageInst()
1788 .addUse(GR->getSPIRVTypeID(Call->ReturnType)) in generateReadImageInst()
1790 .addUse(Call->Arguments[1]); // Coordinate. in generateReadImageInst()
1795 static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, in generateWriteImageInst() argument
1798 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass); in generateWriteImageInst()
1799 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass); in generateWriteImageInst()
1800 MIRBuilder.getMRI()->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass); in generateWriteImageInst()
1802 .addUse(Call->Arguments[0]) // Image. in generateWriteImageInst()
1803 .addUse(Call->Arguments[1]) // Coordinate. in generateWriteImageInst()
1804 .addUse(Call->Arguments[2]); // Texel. in generateWriteImageInst()
1809 const SPIRV::IncomingCall *Call, in generateSampleImageInst() argument
1813 if (Call->Builtin->Name.contains_insensitive( in generateSampleImageInst()
1816 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI); in generateSampleImageInst()
1818 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask), in generateSampleImageInst()
1820 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder, Call->ReturnType); in generateSampleImageInst()
1822 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) { in generateSampleImageInst()
1824 Register Image = Call->Arguments[0]; in generateSampleImageInst()
1829 Call->ReturnRegister.isValid() in generateSampleImageInst()
1830 ? Call->ReturnRegister in generateSampleImageInst()
1836 .addUse(Call->Arguments[1]); // Sampler. in generateSampleImageInst()
1838 } else if (Call->Builtin->Name.contains_insensitive( in generateSampleImageInst()
1847 Call->ReturnType in generateSampleImageInst()
1848 ? Call->ReturnType in generateSampleImageInst()
1855 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass); in generateSampleImageInst()
1856 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass); in generateSampleImageInst()
1857 MRI->setRegClass(Call->Arguments[3], &SPIRV::IDRegClass); in generateSampleImageInst()
1860 .addDef(Call->ReturnRegister) in generateSampleImageInst()
1862 .addUse(Call->Arguments[0]) // Image. in generateSampleImageInst()
1863 .addUse(Call->Arguments[1]) // Coordinate. in generateSampleImageInst()
1865 .addUse(Call->Arguments[3]); in generateSampleImageInst()
1871 static bool generateSelectInst(const SPIRV::IncomingCall *Call, in generateSelectInst() argument
1873 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0], in generateSelectInst()
1874 Call->Arguments[1], Call->Arguments[2]); in generateSelectInst()
1878 static bool generateConstructInst(const SPIRV::IncomingCall *Call, in generateConstructInst() argument
1881 return buildOpFromWrapper(MIRBuilder, SPIRV::OpCompositeConstruct, Call, in generateConstructInst()
1882 GR->getSPIRVTypeID(Call->ReturnType)); in generateConstructInst()
1885 static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, in generateCoopMatrInst() argument
1888 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; in generateCoopMatrInst()
1892 unsigned ArgSz = Call->Arguments.size(); in generateCoopMatrInst()
1901 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[LiteralIdx], MRI)); in generateCoopMatrInst()
1902 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType); in generateCoopMatrInst()
1904 SPIRVType *CoopMatrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]); in generateCoopMatrInst()
1908 .addDef(Call->ReturnRegister) in generateCoopMatrInst()
1913 return buildOpFromWrapper(MIRBuilder, Opcode, Call, in generateCoopMatrInst()
1917 static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, in generateSpecConstantInst() argument
1921 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; in generateSpecConstantInst()
1930 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI)); in generateSpecConstantInst()
1931 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId, in generateSpecConstantInst()
1934 Register ConstRegister = Call->Arguments[1]; in generateSpecConstantInst()
1942 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) { in generateSpecConstantInst()
1949 .addDef(Call->ReturnRegister) in generateSpecConstantInst()
1950 .addUse(GR->getSPIRVTypeID(Call->ReturnType)); in generateSpecConstantInst()
1952 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) { in generateSpecConstantInst()
1962 .addDef(Call->ReturnRegister) in generateSpecConstantInst()
1963 .addUse(GR->getSPIRVTypeID(Call->ReturnType)); in generateSpecConstantInst()
1964 for (unsigned i = 0; i < Call->Arguments.size(); i++) in generateSpecConstantInst()
1965 MIB.addUse(Call->Arguments[i]); in generateSpecConstantInst()
1973 static bool buildNDRange(const SPIRV::IncomingCall *Call, in buildNDRange() argument
1977 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass); in buildNDRange()
1978 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]); in buildNDRange()
1988 unsigned NumArgs = Call->Arguments.size(); in buildNDRange()
1990 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2]; in buildNDRange()
1993 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3]; in buildNDRange()
1996 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1]; in buildNDRange()
2010 unsigned Size = Call->Builtin->Name == "ndrange_3D" ? 3 : 2; in buildNDRange()
2041 .addUse(Call->Arguments[0]) in buildNDRange()
2061 static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, in buildEnqueueKernel() argument
2066 bool IsSpirvOp = Call->isSpirvOp(); in buildEnqueueKernel()
2067 bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp; in buildEnqueueKernel()
2074 if (Call->Builtin->Name.contains("_varargs") || IsSpirvOp) { in buildEnqueueKernel()
2076 Register GepReg = Call->Arguments[LocalSizeArrayIdx]; in buildEnqueueKernel()
2107 .addDef(Call->ReturnRegister) in buildEnqueueKernel()
2113 MIB.addUse(Call->Arguments[i]); in buildEnqueueKernel()
2115 // If there are no event arguments in the original call, add dummy ones. in buildEnqueueKernel()
2124 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI); in buildEnqueueKernel()
2129 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1]; in buildEnqueueKernel()
2146 static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, in generateEnqueueInst() argument
2150 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; in generateEnqueueInst()
2157 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass); in generateEnqueueInst()
2158 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]); in generateEnqueueInst()
2162 .addDef(Call->ReturnRegister) in generateEnqueueInst()
2163 .addUse(GR->getSPIRVTypeID(Call->ReturnType)); in generateEnqueueInst()
2165 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass); in generateEnqueueInst()
2167 .addDef(Call->ReturnRegister) in generateEnqueueInst()
2168 .addUse(GR->getSPIRVTypeID(Call->ReturnType)) in generateEnqueueInst()
2169 .addUse(Call->Arguments[0]); in generateEnqueueInst()
2171 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass); in generateEnqueueInst()
2172 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass); in generateEnqueueInst()
2174 .addUse(Call->Arguments[0]) in generateEnqueueInst()
2175 .addUse(Call->Arguments[1]); in generateEnqueueInst()
2177 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass); in generateEnqueueInst()
2178 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass); in generateEnqueueInst()
2179 MIRBuilder.getMRI()->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass); in generateEnqueueInst()
2181 .addUse(Call->Arguments[0]) in generateEnqueueInst()
2182 .addUse(Call->Arguments[1]) in generateEnqueueInst()
2183 .addUse(Call->Arguments[2]); in generateEnqueueInst()
2185 return buildNDRange(Call, MIRBuilder, GR); in generateEnqueueInst()
2187 return buildEnqueueKernel(Call, MIRBuilder, GR); in generateEnqueueInst()
2193 static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, in generateAsyncCopy() argument
2197 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; in generateAsyncCopy()
2202 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType); in generateAsyncCopy()
2203 if (Call->isSpirvOp()) in generateAsyncCopy()
2204 return buildOpFromWrapper(MIRBuilder, Opcode, Call, in generateAsyncCopy()
2212 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent in generateAsyncCopy()
2215 Register TypeReg = GR->getSPIRVTypeID(NewType ? NewType : Call->ReturnType); in generateAsyncCopy()
2216 unsigned NumArgs = Call->Arguments.size(); in generateAsyncCopy()
2217 Register EventReg = Call->Arguments[NumArgs - 1]; in generateAsyncCopy()
2219 .addDef(Call->ReturnRegister) in generateAsyncCopy()
2222 .addUse(Call->Arguments[0]) in generateAsyncCopy()
2223 .addUse(Call->Arguments[1]) in generateAsyncCopy()
2224 .addUse(Call->Arguments[2]) in generateAsyncCopy()
2225 .addUse(Call->Arguments.size() > 4 in generateAsyncCopy()
2226 ? Call->Arguments[3] in generateAsyncCopy()
2230 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder, in generateAsyncCopy()
2237 .addUse(Call->Arguments[0]) in generateAsyncCopy()
2238 .addUse(Call->Arguments[1]); in generateAsyncCopy()
2245 const SPIRV::IncomingCall *Call, in generateConvertInst() argument
2250 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set); in generateConvertInst()
2252 if (!Builtin && Call->isSpirvOp()) { in generateConvertInst()
2253 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; in generateConvertInst()
2256 return buildOpFromWrapper(MIRBuilder, Opcode, Call, in generateConvertInst()
2257 GR->getSPIRVTypeID(Call->ReturnType)); in generateConvertInst()
2261 buildOpDecorate(Call->ReturnRegister, MIRBuilder, in generateConvertInst()
2264 buildOpDecorate(Call->ReturnRegister, MIRBuilder, in generateConvertInst()
2271 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) { in generateConvertInst()
2273 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) { in generateConvertInst()
2281 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister, in generateConvertInst()
2291 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) == in generateConvertInst()
2292 GR->getScalarOrVectorComponentCount(Call->ReturnRegister); in generateConvertInst()
2300 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0], in generateConvertInst()
2303 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) { in generateConvertInst()
2312 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) == in generateConvertInst()
2313 GR->getScalarOrVectorComponentCount(Call->ReturnRegister); in generateConvertInst()
2319 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister, in generateConvertInst()
2343 .addDef(Call->ReturnRegister) in generateConvertInst()
2344 .addUse(GR->getSPIRVTypeID(Call->ReturnType)) in generateConvertInst()
2345 .addUse(Call->Arguments[0]); in generateConvertInst()
2349 static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, in generateVectorLoadStoreInst() argument
2354 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name, in generateVectorLoadStoreInst()
2355 Call->Builtin->Set); in generateVectorLoadStoreInst()
2359 .addDef(Call->ReturnRegister) in generateVectorLoadStoreInst()
2360 .addUse(GR->getSPIRVTypeID(Call->ReturnType)) in generateVectorLoadStoreInst()
2363 for (auto Argument : Call->Arguments) in generateVectorLoadStoreInst()
2375 static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, in generateLoadStoreInst() argument
2379 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin; in generateLoadStoreInst()
2386 MIB.addDef(Call->ReturnRegister); in generateLoadStoreInst()
2387 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType)); in generateLoadStoreInst()
2390 MIB.addUse(Call->Arguments[0]); in generateLoadStoreInst()
2392 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass); in generateLoadStoreInst()
2395 MIB.addUse(Call->Arguments[1]); in generateLoadStoreInst()
2396 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass); in generateLoadStoreInst()
2399 unsigned NumArgs = Call->Arguments.size(); in generateLoadStoreInst()
2401 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI)); in generateLoadStoreInst()
2402 MRI->setRegClass(Call->Arguments[IsLoad ? 1 : 2], &SPIRV::IDRegClass); in generateLoadStoreInst()
2405 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI)); in generateLoadStoreInst()
2406 MRI->setRegClass(Call->Arguments[IsLoad ? 2 : 3], &SPIRV::IDRegClass); in generateLoadStoreInst()
2424 std::unique_ptr<const IncomingCall> Call = in mapBuiltinToOpcode() local
2426 if (!Call) in mapBuiltinToOpcode()
2429 switch (Call->Builtin->Group) { in mapBuiltinToOpcode()
2441 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set)) in mapBuiltinToOpcode()
2442 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0); in mapBuiltinToOpcode()
2445 if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name, in mapBuiltinToOpcode()
2446 Call->Builtin->Set)) in mapBuiltinToOpcode()
2447 return std::make_tuple(Call->Builtin->Group, 0, R->Number); in mapBuiltinToOpcode()
2450 if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name, in mapBuiltinToOpcode()
2451 Call->Builtin->Set)) in mapBuiltinToOpcode()
2455 if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name)) in mapBuiltinToOpcode()
2456 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0); in mapBuiltinToOpcode()
2459 if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name)) in mapBuiltinToOpcode()
2460 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0); in mapBuiltinToOpcode()
2463 if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name)) in mapBuiltinToOpcode()
2464 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0); in mapBuiltinToOpcode()
2467 if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name)) in mapBuiltinToOpcode()
2468 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0); in mapBuiltinToOpcode()
2471 return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0); in mapBuiltinToOpcode()
2473 return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0); in mapBuiltinToOpcode()
2475 return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct, in mapBuiltinToOpcode()
2478 return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0); in mapBuiltinToOpcode()
2491 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n"); in lowerBuiltin()
2507 std::unique_ptr<const IncomingCall> Call = in lowerBuiltin() local
2510 if (!Call) { in lowerBuiltin()
2516 assert(Args.size() >= Call->Builtin->MinNumArgs && in lowerBuiltin()
2518 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs) in lowerBuiltin()
2522 switch (Call->Builtin->Group) { in lowerBuiltin()
2524 return generateExtInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2526 return generateRelationalInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2528 return generateGroupInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2530 return generateBuiltinVar(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2532 return generateAtomicInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2534 return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2536 return generateBarrierInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2538 return generateCastToPtrInst(Call.get(), MIRBuilder); in lowerBuiltin()
2540 return generateDotOrFMulInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2542 return generateWaveInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2544 return generateGetQueryInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2546 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2548 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2550 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR); in lowerBuiltin()
2552 return generateWriteImageInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2554 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR); in lowerBuiltin()
2556 return generateSelectInst(Call.get(), MIRBuilder); in lowerBuiltin()
2558 return generateConstructInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2560 return generateSpecConstantInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2562 return generateEnqueueInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2564 return generateAsyncCopy(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2566 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR); in lowerBuiltin()
2568 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2570 return generateLoadStoreInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2572 return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2574 return generateGroupUniformInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2576 return generateKernelClockInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2578 return generateCoopMatrInst(Call.get(), MIRBuilder, GR); in lowerBuiltin()
2595 // OpenCL builtin types in demangled call strings have the following format: in parseBuiltinCallArgumentBaseType()