ipa_endpoint.c (4468a3448b6aa1c00f25ce1162c57d4a7c2e7ba2) ipa_endpoint.c (216b409d0914f68c9a51760658cf687f3a5f84ba)
1// SPDX-License-Identifier: GPL-2.0
2
3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2021 Linaro Ltd.
5 */
6
7#include <linux/types.h>
8#include <linux/device.h>

--- 58 unchanged lines hidden (view full) ---

67
68/* Field masks for struct ipa_status structure fields */
69#define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4)
70#define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
71#define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
72#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
73#define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
74
1// SPDX-License-Identifier: GPL-2.0
2
3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2021 Linaro Ltd.
5 */
6
7#include <linux/types.h>
8#include <linux/device.h>

--- 58 unchanged lines hidden (view full) ---

67
68/* Field masks for struct ipa_status structure fields */
69#define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4)
70#define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
71#define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
72#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
73#define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
74
75static u32 aggr_byte_limit_max(enum ipa_version version)
76{
77 if (version < IPA_VERSION_4_5)
78 return field_max(aggr_byte_limit_fmask(true));
79
80 return field_max(aggr_byte_limit_fmask(false));
81}
82
83/* Compute the aggregation size value to use for a given buffer size */
84static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
85{
86 /* A hard aggregation limit will not be crossed; aggregation closes
87 * if saving incoming data would cross the hard byte limit boundary.
88 *
89 * With a soft limit, aggregation closes *after* the size boundary
90 * has been crossed. In that case the limit must leave enough space

--- 15 unchanged lines hidden (view full) ---

106 struct device *dev = &ipa->pdev->dev;
107 enum ipa_endpoint_name other_name;
108
109 if (ipa_gsi_endpoint_data_empty(data))
110 return true;
111
112 if (!data->toward_ipa) {
113 const struct ipa_endpoint_rx *rx_config;
75/* Compute the aggregation size value to use for a given buffer size */
76static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
77{
78 /* A hard aggregation limit will not be crossed; aggregation closes
79 * if saving incoming data would cross the hard byte limit boundary.
80 *
81 * With a soft limit, aggregation closes *after* the size boundary
82 * has been crossed. In that case the limit must leave enough space

--- 15 unchanged lines hidden (view full) ---

98 struct device *dev = &ipa->pdev->dev;
99 enum ipa_endpoint_name other_name;
100
101 if (ipa_gsi_endpoint_data_empty(data))
102 return true;
103
104 if (!data->toward_ipa) {
105 const struct ipa_endpoint_rx *rx_config;
106 const struct ipa_reg *reg;
114 u32 buffer_size;
115 u32 aggr_size;
116 u32 limit;
117
118 if (data->endpoint.filter_support) {
119 dev_err(dev, "filtering not supported for "
120 "RX endpoint %u\n",
121 data->endpoint_id);

--- 44 unchanged lines hidden (view full) ---

166 /* For an endpoint supporting receive aggregation, the byte
167 * limit defines the point at which aggregation closes. This
168 * check ensures the receive buffer size doesn't result in a
169 * limit that exceeds what's representable in the aggregation
170 * byte limit field.
171 */
172 aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
173 rx_config->aggr_hard_limit);
107 u32 buffer_size;
108 u32 aggr_size;
109 u32 limit;
110
111 if (data->endpoint.filter_support) {
112 dev_err(dev, "filtering not supported for "
113 "RX endpoint %u\n",
114 data->endpoint_id);

--- 44 unchanged lines hidden (view full) ---

159 /* For an endpoint supporting receive aggregation, the byte
160 * limit defines the point at which aggregation closes. This
161 * check ensures the receive buffer size doesn't result in a
162 * limit that exceeds what's representable in the aggregation
163 * byte limit field.
164 */
165 aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
166 rx_config->aggr_hard_limit);
174 limit = aggr_byte_limit_max(ipa->version);
167 reg = ipa_reg(ipa, ENDP_INIT_AGGR);
168
169 limit = ipa_reg_field_max(reg, BYTE_LIMIT);
175 if (aggr_size > limit) {
176 dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
177 data->endpoint_id, aggr_size, limit);
178
179 return false;
180 }
181
182 return true; /* Nothing more to check for RX */

--- 581 unchanged lines hidden (view full) ---

764 const struct ipa_reg *reg;
765 u32 offset;
766 u32 val;
767
768 if (!endpoint->toward_ipa)
769 return; /* Register not valid for RX endpoints */
770
771 reg = ipa_reg(ipa, ENDP_INIT_MODE);
170 if (aggr_size > limit) {
171 dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
172 data->endpoint_id, aggr_size, limit);
173
174 return false;
175 }
176
177 return true; /* Nothing more to check for RX */

--- 581 unchanged lines hidden (view full) ---

759 const struct ipa_reg *reg;
760 u32 offset;
761 u32 val;
762
763 if (!endpoint->toward_ipa)
764 return; /* Register not valid for RX endpoints */
765
766 reg = ipa_reg(ipa, ENDP_INIT_MODE);
772 offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
773 if (endpoint->config.dma_mode) {
774 enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
767 if (endpoint->config.dma_mode) {
768 enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
775 u32 dma_endpoint_id;
769 u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
776
770
777 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
778
779 val = u32_encode_bits(IPA_DMA, MODE_FMASK);
780 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
771 val = ipa_reg_encode(reg, ENDP_MODE, IPA_DMA);
772 val |= ipa_reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
781 } else {
773 } else {
782 val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
774 val = ipa_reg_encode(reg, ENDP_MODE, IPA_BASIC);
783 }
784 /* All other bits unspecified (and 0) */
785
775 }
776 /* All other bits unspecified (and 0) */
777
778 offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
786 iowrite32(val, ipa->reg_virt + offset);
787}
788
779 iowrite32(val, ipa->reg_virt + offset);
780}
781
789/* Encoded values for AGGR endpoint register fields */
790static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit)
791{
792 if (version < IPA_VERSION_4_5)
793 return u32_encode_bits(limit, aggr_byte_limit_fmask(true));
794
795 return u32_encode_bits(limit, aggr_byte_limit_fmask(false));
796}
797
798/* For IPA v4.5+, times are expressed using Qtime. The AP uses one of two
799 * pulse generators (0 and 1) to measure elapsed time. In ipa_qtime_config()
800 * they're configured to have granularity 100 usec and 1 msec, respectively.
801 *
802 * The return value is the positive or negative Qtime value to use to
803 * express the (microsecond) time provided. A positive return value
804 * means pulse generator 0 can be used; otherwise use pulse generator 1.
805 */

--- 9 unchanged lines hidden (view full) ---

815 /* Have to use pulse generator 1 (millisecond granularity) */
816 val = DIV_ROUND_CLOSEST(microseconds, 1000);
817 WARN_ON(val > max);
818
819 return (int)-val;
820}
821
822/* Encode the aggregation timer limit (microseconds) based on IPA version */
782/* For IPA v4.5+, times are expressed using Qtime. The AP uses one of two
783 * pulse generators (0 and 1) to measure elapsed time. In ipa_qtime_config()
784 * they're configured to have granularity 100 usec and 1 msec, respectively.
785 *
786 * The return value is the positive or negative Qtime value to use to
787 * express the (microsecond) time provided. A positive return value
788 * means pulse generator 0 can be used; otherwise use pulse generator 1.
789 */

--- 9 unchanged lines hidden (view full) ---

799 /* Have to use pulse generator 1 (millisecond granularity) */
800 val = DIV_ROUND_CLOSEST(microseconds, 1000);
801 WARN_ON(val > max);
802
803 return (int)-val;
804}
805
806/* Encode the aggregation timer limit (microseconds) based on IPA version */
823static u32 aggr_time_limit_encode(enum ipa_version version, u32 microseconds)
807static u32 aggr_time_limit_encode(struct ipa *ipa, const struct ipa_reg *reg,
808 u32 microseconds)
824{
809{
825 u32 fmask;
810 u32 max;
826 u32 val;
827
828 if (!microseconds)
829 return 0; /* Nothing to compute if time limit is 0 */
830
811 u32 val;
812
813 if (!microseconds)
814 return 0; /* Nothing to compute if time limit is 0 */
815
831 if (version >= IPA_VERSION_4_5) {
816 max = ipa_reg_field_max(reg, TIME_LIMIT);
817 if (ipa->version >= IPA_VERSION_4_5) {
832 u32 gran_sel;
833 int ret;
834
835 /* Compute the Qtime limit value to use */
818 u32 gran_sel;
819 int ret;
820
821 /* Compute the Qtime limit value to use */
836 fmask = aggr_time_limit_fmask(false);
837 ret = ipa_qtime_val(microseconds, field_max(fmask));
822 ret = ipa_qtime_val(microseconds, max);
838 if (ret < 0) {
839 val = -ret;
823 if (ret < 0) {
824 val = -ret;
840 gran_sel = AGGR_GRAN_SEL_FMASK;
825 gran_sel = ipa_reg_bit(reg, AGGR_GRAN_SEL);
841 } else {
842 val = ret;
843 gran_sel = 0;
844 }
845
826 } else {
827 val = ret;
828 gran_sel = 0;
829 }
830
846 return gran_sel | u32_encode_bits(val, fmask);
831 return gran_sel | ipa_reg_encode(reg, TIME_LIMIT, val);
847 }
848
832 }
833
849 /* We set aggregation granularity in ipa_hardware_config() */
850 fmask = aggr_time_limit_fmask(true);
834 /* We program aggregation granularity in ipa_hardware_config() */
851 val = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
835 val = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
852 WARN(val > field_max(fmask),
853 "aggr_time_limit too large (%u > %u usec)\n",
854 val, field_max(fmask) * IPA_AGGR_GRANULARITY);
836 WARN(val > max, "aggr_time_limit too large (%u > %u usec)\n",
837 microseconds, max * IPA_AGGR_GRANULARITY);
855
838
856 return u32_encode_bits(val, fmask);
839 return ipa_reg_encode(reg, TIME_LIMIT, val);
857}
858
840}
841
859static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled)
860{
861 u32 val = enabled ? 1 : 0;
862
863 if (version < IPA_VERSION_4_5)
864 return u32_encode_bits(val, aggr_sw_eof_active_fmask(true));
865
866 return u32_encode_bits(val, aggr_sw_eof_active_fmask(false));
867}
868
869static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
870{
871 u32 endpoint_id = endpoint->endpoint_id;
872 struct ipa *ipa = endpoint->ipa;
873 const struct ipa_reg *reg;
874 u32 val = 0;
875
876 reg = ipa_reg(ipa, ENDP_INIT_AGGR);
877 if (endpoint->config.aggregation) {
878 if (!endpoint->toward_ipa) {
879 const struct ipa_endpoint_rx *rx_config;
842static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
843{
844 u32 endpoint_id = endpoint->endpoint_id;
845 struct ipa *ipa = endpoint->ipa;
846 const struct ipa_reg *reg;
847 u32 val = 0;
848
849 reg = ipa_reg(ipa, ENDP_INIT_AGGR);
850 if (endpoint->config.aggregation) {
851 if (!endpoint->toward_ipa) {
852 const struct ipa_endpoint_rx *rx_config;
880 enum ipa_version version = ipa->version;
881 u32 buffer_size;
853 u32 buffer_size;
882 bool close_eof;
883 u32 limit;
884
885 rx_config = &endpoint->config.rx;
854 u32 limit;
855
856 rx_config = &endpoint->config.rx;
886 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
887 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
857 val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
858 val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
888
889 buffer_size = rx_config->buffer_size;
890 limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
891 rx_config->aggr_hard_limit);
859
860 buffer_size = rx_config->buffer_size;
861 limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
862 rx_config->aggr_hard_limit);
892 val |= aggr_byte_limit_encoded(version, limit);
863 val |= ipa_reg_encode(reg, BYTE_LIMIT, limit);
893
894 limit = rx_config->aggr_time_limit;
864
865 limit = rx_config->aggr_time_limit;
895 val |= aggr_time_limit_encode(version, limit);
866 val |= aggr_time_limit_encode(ipa, reg, limit);
896
897 /* AGGR_PKT_LIMIT is 0 (unlimited) */
898
867
868 /* AGGR_PKT_LIMIT is 0 (unlimited) */
869
899 close_eof = rx_config->aggr_close_eof;
900 val |= aggr_sw_eof_active_encoded(version, close_eof);
870 if (rx_config->aggr_close_eof)
871 val |= ipa_reg_bit(reg, SW_EOF_ACTIVE);
901 } else {
872 } else {
902 val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
903 AGGR_EN_FMASK);
904 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
873 val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
874 val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
905 /* other fields ignored */
906 }
907 /* AGGR_FORCE_CLOSE is 0 */
908 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */
909 } else {
875 /* other fields ignored */
876 }
877 /* AGGR_FORCE_CLOSE is 0 */
878 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */
879 } else {
910 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
880 val |= ipa_reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
911 /* other fields ignored */
912 }
913
914 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
915}
916
917/* The head-of-line blocking timer is defined as a tick count. For
918 * IPA version 4.5 the tick count is based on the Qtimer, which is
919 * derived from the 19.2 MHz SoC XO clock. For older IPA versions
920 * each tick represents 128 cycles of the IPA core clock.
921 *
922 * Return the encoded value representing the timeout period provided
923 * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
924 */
881 /* other fields ignored */
882 }
883
884 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
885}
886
887/* The head-of-line blocking timer is defined as a tick count. For
888 * IPA version 4.5 the tick count is based on the Qtimer, which is
889 * derived from the 19.2 MHz SoC XO clock. For older IPA versions
890 * each tick represents 128 cycles of the IPA core clock.
891 *
892 * Return the encoded value representing the timeout period provided
893 * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
894 */
925static u32 hol_block_timer_encode(struct ipa *ipa, u32 microseconds)
895static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
896 u32 microseconds)
926{
927 u32 width;
928 u32 scale;
929 u64 ticks;
930 u64 rate;
931 u32 high;
932 u32 val;
933
934 if (!microseconds)
935 return 0; /* Nothing to compute if timer period is 0 */
936
937 if (ipa->version >= IPA_VERSION_4_5) {
897{
898 u32 width;
899 u32 scale;
900 u64 ticks;
901 u64 rate;
902 u32 high;
903 u32 val;
904
905 if (!microseconds)
906 return 0; /* Nothing to compute if timer period is 0 */
907
908 if (ipa->version >= IPA_VERSION_4_5) {
909 u32 max = ipa_reg_field_max(reg, TIMER_LIMIT);
938 u32 gran_sel;
939 int ret;
940
941 /* Compute the Qtime limit value to use */
910 u32 gran_sel;
911 int ret;
912
913 /* Compute the Qtime limit value to use */
942 ret = ipa_qtime_val(microseconds, field_max(TIME_LIMIT_FMASK));
914 ret = ipa_qtime_val(microseconds, max);
943 if (ret < 0) {
944 val = -ret;
915 if (ret < 0) {
916 val = -ret;
945 gran_sel = GRAN_SEL_FMASK;
917 gran_sel = ipa_reg_bit(reg, TIMER_GRAN_SEL);
946 } else {
947 val = ret;
948 gran_sel = 0;
949 }
950
918 } else {
919 val = ret;
920 gran_sel = 0;
921 }
922
951 return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK);
923 return gran_sel | ipa_reg_encode(reg, TIMER_LIMIT, val);
952 }
953
924 }
925
954 /* Use 64 bit arithmetic to avoid overflow... */
926 /* Use 64 bit arithmetic to avoid overflow */
955 rate = ipa_core_clock_rate(ipa);
956 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
927 rate = ipa_core_clock_rate(ipa);
928 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
957 /* ...but we still need to fit into a 32-bit register */
958 WARN_ON(ticks > U32_MAX);
959
929
930 /* We still need the result to fit into the field */
931 WARN_ON(ticks > ipa_reg_field_max(reg, TIMER_BASE_VALUE));
932
960 /* IPA v3.5.1 through v4.1 just record the tick count */
961 if (ipa->version < IPA_VERSION_4_2)
933 /* IPA v3.5.1 through v4.1 just record the tick count */
934 if (ipa->version < IPA_VERSION_4_2)
962 return (u32)ticks;
935 return ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
963
964 /* For IPA v4.2, the tick count is represented by base and
965 * scale fields within the 32-bit timer register, where:
966 * ticks = base << scale;
967 * The best precision is achieved when the base value is as
968 * large as possible. Find the highest set bit in the tick
969 * count, and extract the number of bits in the base field
970 * such that high bit is included.
971 */
936
937 /* For IPA v4.2, the tick count is represented by base and
938 * scale fields within the 32-bit timer register, where:
939 * ticks = base << scale;
940 * The best precision is achieved when the base value is as
941 * large as possible. Find the highest set bit in the tick
942 * count, and extract the number of bits in the base field
943 * such that high bit is included.
944 */
972 high = fls(ticks); /* 1..32 */
973 width = HWEIGHT32(BASE_VALUE_FMASK);
945 high = fls(ticks); /* 1..32 (or warning above) */
946 width = hweight32(ipa_reg_fmask(reg, TIMER_BASE_VALUE));
974 scale = high > width ? high - width : 0;
975 if (scale) {
976 /* If we're scaling, round up to get a closer result */
977 ticks += 1 << (scale - 1);
978 /* High bit was set, so rounding might have affected it */
979 if (fls(ticks) != high)
980 scale++;
981 }
982
947 scale = high > width ? high - width : 0;
948 if (scale) {
949 /* If we're scaling, round up to get a closer result */
950 ticks += 1 << (scale - 1);
951 /* High bit was set, so rounding might have affected it */
952 if (fls(ticks) != high)
953 scale++;
954 }
955
983 val = u32_encode_bits(scale, SCALE_FMASK);
984 val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
956 val = ipa_reg_encode(reg, TIMER_SCALE, scale);
957 val |= ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
985
986 return val;
987}
988
989/* If microseconds is 0, timeout is immediate */
990static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
991 u32 microseconds)
992{
993 u32 endpoint_id = endpoint->endpoint_id;
994 struct ipa *ipa = endpoint->ipa;
995 const struct ipa_reg *reg;
996 u32 val;
997
998 /* This should only be changed when HOL_BLOCK_EN is disabled */
999 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
958
959 return val;
960}
961
962/* If microseconds is 0, timeout is immediate */
963static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
964 u32 microseconds)
965{
966 u32 endpoint_id = endpoint->endpoint_id;
967 struct ipa *ipa = endpoint->ipa;
968 const struct ipa_reg *reg;
969 u32 val;
970
971 /* This should only be changed when HOL_BLOCK_EN is disabled */
972 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
1000 val = hol_block_timer_encode(ipa, microseconds);
973 val = hol_block_timer_encode(ipa, reg, microseconds);
1001
1002 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1003}
1004
1005static void
1006ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
1007{
1008 u32 endpoint_id = endpoint->endpoint_id;
1009 struct ipa *ipa = endpoint->ipa;
1010 const struct ipa_reg *reg;
1011 u32 offset;
1012 u32 val;
1013
1014 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
1015 offset = ipa_reg_n_offset(reg, endpoint_id);
974
975 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
976}
977
978static void
979ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
980{
981 u32 endpoint_id = endpoint->endpoint_id;
982 struct ipa *ipa = endpoint->ipa;
983 const struct ipa_reg *reg;
984 u32 offset;
985 u32 val;
986
987 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
988 offset = ipa_reg_n_offset(reg, endpoint_id);
1016 val = enable ? HOL_BLOCK_EN_FMASK : 0;
989 val = enable ? ipa_reg_bit(reg, HOL_BLOCK_EN) : 0;
1017
1018 iowrite32(val, ipa->reg_virt + offset);
1019
1020 /* When enabling, the register must be written twice for IPA v4.5+ */
1021 if (enable && ipa->version >= IPA_VERSION_4_5)
1022 iowrite32(val, ipa->reg_virt + offset);
1023}
1024

--- 1004 unchanged lines hidden ---
990
991 iowrite32(val, ipa->reg_virt + offset);
992
993 /* When enabling, the register must be written twice for IPA v4.5+ */
994 if (enable && ipa->version >= IPA_VERSION_4_5)
995 iowrite32(val, ipa->reg_virt + offset);
996}
997

--- 1004 unchanged lines hidden ---