Lines Matching refs:EEW
241 // List of EEW.
762 int EEW> :
767 RISCVVLE</*Masked*/0, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
778 int EEW> :
784 RISCVVLE</*Masked*/1, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
796 int EEW> :
801 RISCVVLE</*Masked*/0, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
812 int EEW> :
818 RISCVVLE</*Masked*/1, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
830 int EEW> :
835 RISCVVLE</*Masked*/0, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
846 int EEW> :
852 RISCVVLE</*Masked*/1, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
865 int EEW,
874 RISCVVLX</*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
887 int EEW,
897 RISCVVLX</*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
910 int EEW> :
914 RISCVVSE</*Masked*/0, /*Strided*/0, !logtwo(EEW), VLMul> {
923 int EEW> :
928 RISCVVSE</*Masked*/1, /*Strided*/0, !logtwo(EEW), VLMul> {
937 int EEW> :
942 RISCVVSE</*Masked*/0, /*Strided*/1, !logtwo(EEW), VLMul> {
951 int EEW> :
956 RISCVVSE</*Masked*/1, /*Strided*/1, !logtwo(EEW), VLMul> {
1327 class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
1333 RISCVVSX</*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
1341 class VPseudoIStoreMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
1347 RISCVVSX</*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
1585 int EEW,
1591 RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
1602 int EEW,
1608 RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
1620 int EEW,
1626 RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
1637 int EEW,
1643 RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/1, !logtwo(EEW), VLMul> {
1655 int EEW,
1661 RISCVVLSEG<NF, /*Masked*/0, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
1672 int EEW,
1679 RISCVVLSEG<NF, /*Masked*/1, /*Strided*/1, /*FF*/0, !logtwo(EEW), VLMul> {
1692 int EEW,
1700 RISCVVLXSEG<NF, /*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
1714 int EEW,
1723 RISCVVLXSEG<NF, /*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
1737 int EEW,
1742 RISCVVSSEG<NF, /*Masked*/0, /*Strided*/0, !logtwo(EEW), VLMul> {
1751 int EEW,
1757 RISCVVSSEG<NF, /*Masked*/1, /*Strided*/0, !logtwo(EEW), VLMul> {
1766 int EEW,
1772 RISCVVSSEG<NF, /*Masked*/0, /*Strided*/1, !logtwo(EEW), VLMul> {
1781 int EEW,
1787 RISCVVSSEG<NF, /*Masked*/1, /*Strided*/1, !logtwo(EEW), VLMul> {
1797 int EEW,
1805 RISCVVSXSEG<NF, /*Masked*/0, Ordered, !logtwo(EEW), VLMul, LMUL> {
1815 int EEW,
1823 RISCVVSXSEG<NF, /*Masked*/1, Ordered, !logtwo(EEW), VLMul, LMUL> {
1872 def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, EEW=1>,
1947 def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, EEW=1>,
2294 // * The destination EEW is smaller than the source EEW and the overlap is
2297 // * The destination EEW is greater than the source EEW, the source EMUL is
2362 // "The destination EEW is smaller than the source EEW and the overlap is in the
2619 // The destination EEW is 1 since "For the purposes of register group overlap
2620 // constraints, mask elements have EEW=1."
2621 // The source EEW is 8, 16, 32, or 64.
2622 // When the destination EEW is different from source EEW, we need to use
2626 // "The destination EEW is smaller than the source EEW and the overlap is in the
6991 // vmulh, vmulhu, vmulhsu are not included for EEW=64 in Zve64*.
7082 // vsmul.vv and vsmul.vx are not included in EEW=64 in Zve64*.