Lines Matching full:ipa
16 #include "ipa.h"
40 /** enum ipa_status_opcode - IPA status opcode field hardware values */
51 /** enum ipa_status_exception - IPA status exception field hardware values */
67 /** enum ipa_status_mask - IPA status mask field bitmask hardware values */
87 /* Special IPA filter/router rule field value indicating "rule miss" */
90 /** The IPA status nat_type field uses enum ipa_nat_type hardware values */
92 /* enum ipa_status_field_id - IPA packet status structure field identifiers */
124 /* Size in bytes of an IPA packet status structure */
127 /* IPA status structure decoder; looks up field values for a structure */
128 static u32 ipa_status_extract(struct ipa *ipa, const void *data, in ipa_status_extract() argument
131 enum ipa_version version = ipa->version; in ipa_status_extract()
147 /* Status word 1, bits 21-23 are reserved (not IPA v5.0+) */ in ipa_status_extract()
148 /* Status word 1, bits 24-26 are reserved (IPA v5.0+) */ in ipa_status_extract()
166 /* ROUTER_TABLE is in word 3, bits 14-21 (IPA v5.0+) */ in ipa_status_extract()
209 /* Status word 7, bit 31 is reserved (not IPA v5.0+) */ in ipa_status_extract()
234 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid_one() argument
240 struct device *dev = ipa->dev; in ipa_endpoint_data_valid_one()
308 reg = ipa_reg(ipa, ENDP_INIT_AGGR); in ipa_endpoint_data_valid_one()
321 /* Starting with IPA v4.5 sequencer replication is obsolete */ in ipa_endpoint_data_valid_one()
322 if (ipa->version >= IPA_VERSION_4_5) { in ipa_endpoint_data_valid_one()
390 static u32 ipa_endpoint_max(struct ipa *ipa, u32 count, in ipa_endpoint_max() argument
394 struct device *dev = ipa->dev; in ipa_endpoint_max()
424 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) in ipa_endpoint_max()
436 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_trans_alloc()
446 * Note that suspend is not supported starting with IPA v4.0, and
447 * delay mode should not be used starting with IPA v4.2.
452 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_ctrl() local
461 WARN_ON(ipa->version >= IPA_VERSION_4_2); in ipa_endpoint_init_ctrl()
463 WARN_ON(ipa->version >= IPA_VERSION_4_0); in ipa_endpoint_init_ctrl()
465 reg = ipa_reg(ipa, ENDP_INIT_CTRL); in ipa_endpoint_init_ctrl()
467 val = ioread32(ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
477 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
487 /* Delay mode should not be used for IPA v4.2+ */ in ipa_endpoint_program_delay()
488 WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2); in ipa_endpoint_program_delay()
497 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_aggr_active() local
502 WARN_ON(!test_bit(endpoint_id, ipa->available)); in ipa_endpoint_aggr_active()
504 reg = ipa_reg(ipa, STATE_AGGR_ACTIVE); in ipa_endpoint_aggr_active()
505 val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit)); in ipa_endpoint_aggr_active()
514 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_force_close() local
518 WARN_ON(!test_bit(endpoint_id, ipa->available)); in ipa_endpoint_force_close()
520 reg = ipa_reg(ipa, AGGR_FORCE_CLOSE); in ipa_endpoint_force_close()
521 iowrite32(mask, ipa->reg_virt + reg_n_offset(reg, unit)); in ipa_endpoint_force_close()
528 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
530 * issue in IPA version 3.5.1 where the suspend interrupt will not be
535 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_suspend_aggr() local
547 ipa_interrupt_simulate_suspend(ipa->interrupt); in ipa_endpoint_suspend_aggr()
556 if (endpoint->ipa->version >= IPA_VERSION_4_0) in ipa_endpoint_program_suspend()
557 return enable; /* For IPA v4.0+, no change made */ in ipa_endpoint_program_suspend()
564 * generate a SUSPEND IPA interrupt. If enabling suspend, have in ipa_endpoint_program_suspend()
574 * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is
575 * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
578 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) in ipa_endpoint_modem_pause_all() argument
582 while (endpoint_id < ipa->endpoint_count) { in ipa_endpoint_modem_pause_all()
583 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++]; in ipa_endpoint_modem_pause_all()
590 else if (ipa->version < IPA_VERSION_4_2) in ipa_endpoint_modem_pause_all()
593 gsi_modem_channel_flow_control(&ipa->gsi, in ipa_endpoint_modem_pause_all()
600 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) in ipa_endpoint_modem_exception_reset_all() argument
609 count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count(); in ipa_endpoint_modem_exception_reset_all()
610 trans = ipa_cmd_trans_alloc(ipa, count); in ipa_endpoint_modem_exception_reset_all()
612 dev_err(ipa->dev, in ipa_endpoint_modem_exception_reset_all()
617 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) { in ipa_endpoint_modem_exception_reset_all()
623 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_exception_reset_all()
627 reg = ipa_reg(ipa, ENDP_STATUS); in ipa_endpoint_modem_exception_reset_all()
641 ipa_cmd_pipeline_clear_wait(ipa); in ipa_endpoint_modem_exception_reset_all()
649 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_cfg() local
654 reg = ipa_reg(ipa, ENDP_INIT_CFG); in ipa_endpoint_init_cfg()
657 enum ipa_version version = ipa->version; in ipa_endpoint_init_cfg()
680 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_cfg()
686 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_nat() local
693 reg = ipa_reg(ipa, ENDP_INIT_NAT); in ipa_endpoint_init_nat()
696 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_nat()
734 /* IPA v4.5 adds a few more most-significant bits */ in ipa_header_size_encode()
756 /* IPA v4.5 adds a few more most-significant bits */ in ipa_metadata_offset_encode()
770 * packet size field, and we have the IPA hardware populate both for each
788 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr() local
792 reg = ipa_reg(ipa, ENDP_INIT_HDR); in ipa_endpoint_init_hdr()
794 enum ipa_version version = ipa->version; in ipa_endpoint_init_hdr()
804 /* Where IPA will write the metadata value */ in ipa_endpoint_init_hdr()
808 /* Where IPA will write the length */ in ipa_endpoint_init_hdr()
810 /* Upper bits are stored in HDR_EXT with IPA v4.5 */ in ipa_endpoint_init_hdr()
826 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_hdr()
833 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr_ext() local
837 reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT); in ipa_endpoint_init_hdr_ext()
861 /* IPA v4.5 adds some most-significant bits to a few fields, in ipa_endpoint_init_hdr_ext()
864 if (ipa->version >= IPA_VERSION_4_5) { in ipa_endpoint_init_hdr_ext()
878 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_hdr_ext()
884 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr_metadata_mask() local
892 reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK); in ipa_endpoint_init_hdr_metadata_mask()
899 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hdr_metadata_mask()
904 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_mode() local
912 reg = ipa_reg(ipa, ENDP_INIT_MODE); in ipa_endpoint_init_mode()
915 u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id; in ipa_endpoint_init_mode()
925 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_mode()
928 /* For IPA v4.5+, times are expressed using Qtime. A time is represented
930 * ipa_qtime_config(). Three (or, starting with IPA v5.0, four) pulse
934 * available to the AP; a third is available starting with IPA v5.0.
940 ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select) in ipa_qtime_val() argument
956 if (ipa->version >= IPA_VERSION_5_0) { in ipa_qtime_val()
968 /* Encode the aggregation timer limit (microseconds) based on IPA version */
969 static u32 aggr_time_limit_encode(struct ipa *ipa, const struct reg *reg, in aggr_time_limit_encode() argument
979 if (ipa->version >= IPA_VERSION_4_5) { in aggr_time_limit_encode()
982 ticks = ipa_qtime_val(ipa, microseconds, max, &select); in aggr_time_limit_encode()
999 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_aggr() local
1003 reg = ipa_reg(ipa, ENDP_INIT_AGGR); in ipa_endpoint_init_aggr()
1020 val |= aggr_time_limit_encode(ipa, reg, limit); in ipa_endpoint_init_aggr()
1032 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */ in ipa_endpoint_init_aggr()
1038 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_aggr()
1042 * IPA version 4.5 the tick count is based on the Qtimer, which is
1043 * derived from the 19.2 MHz SoC XO clock. For older IPA versions
1044 * each tick represents 128 cycles of the IPA core clock.
1049 static u32 hol_block_timer_encode(struct ipa *ipa, const struct reg *reg, in hol_block_timer_encode() argument
1062 if (ipa->version >= IPA_VERSION_4_5) { in hol_block_timer_encode()
1067 ticks = ipa_qtime_val(ipa, microseconds, max, &select); in hol_block_timer_encode()
1074 rate = ipa_core_clock_rate(ipa); in hol_block_timer_encode()
1080 /* IPA v3.5.1 through v4.1 just record the tick count */ in hol_block_timer_encode()
1081 if (ipa->version < IPA_VERSION_4_2) in hol_block_timer_encode()
1084 /* For IPA v4.2, the tick count is represented by base and in hol_block_timer_encode()
1114 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_timer() local
1119 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER); in ipa_endpoint_init_hol_block_timer()
1120 val = hol_block_timer_encode(ipa, reg, microseconds); in ipa_endpoint_init_hol_block_timer()
1122 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_hol_block_timer()
1129 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_en() local
1134 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN); in ipa_endpoint_init_hol_block_en()
1138 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_en()
1140 /* When enabling, the register must be written twice for IPA v4.5+ */ in ipa_endpoint_init_hol_block_en()
1141 if (enable && ipa->version >= IPA_VERSION_4_5) in ipa_endpoint_init_hol_block_en()
1142 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_en()
1158 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) in ipa_endpoint_modem_hol_block_clear_all() argument
1162 while (endpoint_id < ipa->endpoint_count) { in ipa_endpoint_modem_hol_block_clear_all()
1163 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++]; in ipa_endpoint_modem_hol_block_clear_all()
1176 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_deaggr() local
1183 reg = ipa_reg(ipa, ENDP_INIT_DEAGGR); in ipa_endpoint_init_deaggr()
1189 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_deaggr()
1196 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_rsrc_grp() local
1200 reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP); in ipa_endpoint_init_rsrc_grp()
1203 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_rsrc_grp()
1209 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_seq() local
1216 reg = ipa_reg(ipa, ENDP_INIT_SEQ); in ipa_endpoint_init_seq()
1222 if (ipa->version < IPA_VERSION_4_5) in ipa_endpoint_init_seq()
1226 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_seq()
1275 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status() local
1279 reg = ipa_reg(ipa, ENDP_STATUS); in ipa_endpoint_status()
1287 status_endpoint_id = ipa->name_map[name]->endpoint_id; in ipa_endpoint_status()
1291 /* STATUS_LOCATION is 0, meaning IPA packet status in ipa_endpoint_status()
1292 * precedes the packet (not present for IPA v4.5+) in ipa_endpoint_status()
1297 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_status()
1331 * The IPA hardware can hold a fixed number of receive buffers for an RX
1375 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) in ipa_endpoint_replenish()
1385 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) in ipa_endpoint_replenish_enable()
1448 /* The format of an IPA packet status structure is the same for several
1467 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_skip() local
1471 opcode = ipa_status_extract(ipa, data, STATUS_OPCODE); in ipa_endpoint_status_skip()
1475 endpoint_id = ipa_status_extract(ipa, data, STATUS_DST_ENDPOINT); in ipa_endpoint_status_skip()
1487 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_tag_valid() local
1490 status_mask = ipa_status_extract(ipa, data, STATUS_MASK); in ipa_endpoint_status_tag_valid()
1499 endpoint_id = ipa_status_extract(ipa, data, STATUS_SRC_ENDPOINT); in ipa_endpoint_status_tag_valid()
1500 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; in ipa_endpoint_status_tag_valid()
1502 complete(&ipa->completion); in ipa_endpoint_status_tag_valid()
1504 dev_err(ipa->dev, "unexpected tagged packet from endpoint %u\n", in ipa_endpoint_status_tag_valid()
1516 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_drop() local
1524 exception = ipa_status_extract(ipa, data, STATUS_EXCEPTION); in ipa_endpoint_status_drop()
1529 rule = ipa_status_extract(ipa, data, STATUS_ROUTER_RULE_INDEX); in ipa_endpoint_status_drop()
1540 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_parse() local
1541 struct device *dev = ipa->dev; in ipa_endpoint_status_parse()
1557 length = ipa_status_extract(ipa, data, STATUS_LENGTH); in ipa_endpoint_status_parse()
1622 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_trans_release() local
1625 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { in ipa_endpoint_trans_release()
1639 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) in ipa_endpoint_default_route_set() argument
1644 reg = ipa_reg(ipa, ROUTE); in ipa_endpoint_default_route_set()
1652 iowrite32(val, ipa->reg_virt + reg_offset(reg)); in ipa_endpoint_default_route_set()
1655 void ipa_endpoint_default_route_clear(struct ipa *ipa) in ipa_endpoint_default_route_clear() argument
1657 ipa_endpoint_default_route_set(ipa, 0); in ipa_endpoint_default_route_clear()
1666 * taken to ensure the IPA pipeline is properly cleared.
1672 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset_rx_aggr() local
1673 struct device *dev = ipa->dev; in ipa_endpoint_reset_rx_aggr()
1674 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_reset_rx_aggr()
1759 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset() local
1763 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation in ipa_endpoint_reset()
1767 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa && in ipa_endpoint_reset()
1772 gsi_channel_reset(&ipa->gsi, channel_id, true); in ipa_endpoint_reset()
1775 dev_err(ipa->dev, in ipa_endpoint_reset()
1783 /* Newer versions of IPA use GSI channel flow control in ipa_endpoint_program()
1789 if (endpoint->ipa->version < IPA_VERSION_4_2) in ipa_endpoint_program()
1817 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_enable_one() local
1818 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_enable_one()
1823 dev_err(ipa->dev, in ipa_endpoint_enable_one()
1831 ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id); in ipa_endpoint_enable_one()
1835 __set_bit(endpoint_id, ipa->enabled); in ipa_endpoint_enable_one()
1843 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_disable_one() local
1844 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_disable_one()
1847 if (!test_bit(endpoint_id, ipa->enabled)) in ipa_endpoint_disable_one()
1850 __clear_bit(endpoint_id, endpoint->ipa->enabled); in ipa_endpoint_disable_one()
1854 ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id); in ipa_endpoint_disable_one()
1860 dev_err(ipa->dev, "error %d attempting to stop endpoint %u\n", in ipa_endpoint_disable_one()
1866 struct device *dev = endpoint->ipa->dev; in ipa_endpoint_suspend_one()
1867 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_suspend_one()
1870 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled)) in ipa_endpoint_suspend_one()
1886 struct device *dev = endpoint->ipa->dev; in ipa_endpoint_resume_one()
1887 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_resume_one()
1890 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled)) in ipa_endpoint_resume_one()
1904 void ipa_endpoint_suspend(struct ipa *ipa) in ipa_endpoint_suspend() argument
1906 if (!ipa->setup_complete) in ipa_endpoint_suspend()
1909 if (ipa->modem_netdev) in ipa_endpoint_suspend()
1910 ipa_modem_suspend(ipa->modem_netdev); in ipa_endpoint_suspend()
1912 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_suspend()
1913 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_suspend()
1916 void ipa_endpoint_resume(struct ipa *ipa) in ipa_endpoint_resume() argument
1918 if (!ipa->setup_complete) in ipa_endpoint_resume()
1921 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_resume()
1922 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_resume()
1924 if (ipa->modem_netdev) in ipa_endpoint_resume()
1925 ipa_modem_resume(ipa->modem_netdev); in ipa_endpoint_resume()
1930 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_setup_one()
1950 __set_bit(endpoint->endpoint_id, endpoint->ipa->set_up); in ipa_endpoint_setup_one()
1955 __clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up); in ipa_endpoint_teardown_one()
1963 void ipa_endpoint_setup(struct ipa *ipa) in ipa_endpoint_setup() argument
1967 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) in ipa_endpoint_setup()
1968 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_setup()
1971 void ipa_endpoint_teardown(struct ipa *ipa) in ipa_endpoint_teardown() argument
1975 for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count) in ipa_endpoint_teardown()
1976 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_teardown()
1979 void ipa_endpoint_deconfig(struct ipa *ipa) in ipa_endpoint_deconfig() argument
1981 ipa->available_count = 0; in ipa_endpoint_deconfig()
1982 bitmap_free(ipa->available); in ipa_endpoint_deconfig()
1983 ipa->available = NULL; in ipa_endpoint_deconfig()
1986 int ipa_endpoint_config(struct ipa *ipa) in ipa_endpoint_config() argument
1988 struct device *dev = ipa->dev; in ipa_endpoint_config()
1998 /* Prior to IPA v3.5, the FLAVOR_0 register was not supported. in ipa_endpoint_config()
2008 if (ipa->version < IPA_VERSION_3_5) { in ipa_endpoint_config()
2009 ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL); in ipa_endpoint_config()
2010 if (!ipa->available) in ipa_endpoint_config()
2012 ipa->available_count = IPA_ENDPOINT_MAX; in ipa_endpoint_config()
2014 bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX); in ipa_endpoint_config()
2022 reg = ipa_reg(ipa, FLAVOR_0); in ipa_endpoint_config()
2023 val = ioread32(ipa->reg_virt + reg_offset(reg)); in ipa_endpoint_config()
2025 /* Our RX is an IPA producer; our TX is an IPA consumer. */ in ipa_endpoint_config()
2037 /* Until IPA v5.0, the max endpoint ID was 32 */ in ipa_endpoint_config()
2038 hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1; in ipa_endpoint_config()
2046 ipa->available = bitmap_zalloc(limit, GFP_KERNEL); in ipa_endpoint_config()
2047 if (!ipa->available) in ipa_endpoint_config()
2049 ipa->available_count = limit; in ipa_endpoint_config()
2052 bitmap_set(ipa->available, 0, tx_count); in ipa_endpoint_config()
2053 bitmap_set(ipa->available, rx_base, rx_count); in ipa_endpoint_config()
2055 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) { in ipa_endpoint_config()
2064 if (!test_bit(endpoint_id, ipa->available)) { in ipa_endpoint_config()
2071 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_config()
2086 ipa_endpoint_deconfig(ipa); in ipa_endpoint_config()
2091 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, in ipa_endpoint_init_one() argument
2096 endpoint = &ipa->endpoint[data->endpoint_id]; in ipa_endpoint_init_one()
2099 ipa->channel_map[data->channel_id] = endpoint; in ipa_endpoint_init_one()
2100 ipa->name_map[name] = endpoint; in ipa_endpoint_init_one()
2102 endpoint->ipa = ipa; in ipa_endpoint_init_one()
2109 __set_bit(endpoint->endpoint_id, ipa->defined); in ipa_endpoint_init_one()
2114 __clear_bit(endpoint->endpoint_id, endpoint->ipa->defined); in ipa_endpoint_exit_one()
2119 void ipa_endpoint_exit(struct ipa *ipa) in ipa_endpoint_exit() argument
2123 ipa->filtered = 0; in ipa_endpoint_exit()
2125 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) in ipa_endpoint_exit()
2126 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_exit()
2128 bitmap_free(ipa->enabled); in ipa_endpoint_exit()
2129 ipa->enabled = NULL; in ipa_endpoint_exit()
2130 bitmap_free(ipa->set_up); in ipa_endpoint_exit()
2131 ipa->set_up = NULL; in ipa_endpoint_exit()
2132 bitmap_free(ipa->defined); in ipa_endpoint_exit()
2133 ipa->defined = NULL; in ipa_endpoint_exit()
2135 memset(ipa->name_map, 0, sizeof(ipa->name_map)); in ipa_endpoint_exit()
2136 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); in ipa_endpoint_exit()
2140 int ipa_endpoint_init(struct ipa *ipa, u32 count, in ipa_endpoint_init() argument
2149 ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1; in ipa_endpoint_init()
2150 if (!ipa->endpoint_count) in ipa_endpoint_init()
2154 ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); in ipa_endpoint_init()
2155 if (!ipa->defined) in ipa_endpoint_init()
2158 ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); in ipa_endpoint_init()
2159 if (!ipa->set_up) in ipa_endpoint_init()
2162 ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); in ipa_endpoint_init()
2163 if (!ipa->enabled) in ipa_endpoint_init()
2171 ipa_endpoint_init_one(ipa, name, data); in ipa_endpoint_init()
2176 ipa->modem_tx_count++; in ipa_endpoint_init()
2180 if (!ipa_filtered_valid(ipa, filtered)) { in ipa_endpoint_init()
2181 ipa_endpoint_exit(ipa); in ipa_endpoint_init()
2186 ipa->filtered = filtered; in ipa_endpoint_init()
2191 bitmap_free(ipa->set_up); in ipa_endpoint_init()
2192 ipa->set_up = NULL; in ipa_endpoint_init()
2194 bitmap_free(ipa->defined); in ipa_endpoint_init()
2195 ipa->defined = NULL; in ipa_endpoint_init()