Lines Matching +full:ipa +full:- +full:ap +full:- +full:to +full:- +full:modem
1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2024 Linaro Ltd.
10 #include <linux/dma-direction.h>
16 #include "ipa.h"
30 #define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
33 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
35 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
40 /** enum ipa_status_opcode - IPA status opcode field hardware values */
51 /** enum ipa_status_exception - IPA status exception field hardware values */
67 /** enum ipa_status_mask - IPA status mask field bitmask hardware values */
87 /* Special IPA filter/router rule field value indicating "rule miss" */
88 #define IPA_STATUS_RULE_MISS 0x3ff /* 10-bit filter/router rule fields */
90 /** The IPA status nat_type field uses enum ipa_nat_type hardware values */
92 /* enum ipa_status_field_id - IPA packet status structure field identifiers */
114 STATUS_TAG_LOW32, /* Low-order 32 bits of 48-bit tag */
115 STATUS_TAG_HIGH16, /* High-order 16 bits of 48-bit tag */
124 /* Size in bytes of an IPA packet status structure */
127 /* IPA status structure decoder; looks up field values for a structure */
128 static u32 ipa_status_extract(struct ipa *ipa, const void *data, in ipa_status_extract() argument
131 enum ipa_version version = ipa->version; in ipa_status_extract()
147 /* Status word 1, bits 21-23 are reserved (not IPA v5.0+) */ in ipa_status_extract()
148 /* Status word 1, bits 24-26 are reserved (IPA v5.0+) */ in ipa_status_extract()
153 /* Status word 1, bits 29-31 are reserved */ in ipa_status_extract()
166 /* ROUTER_TABLE is in word 3, bits 14-21 (IPA v5.0+) */ in ipa_status_extract()
208 /* Status word 7, bits 16-30 are reserved */ in ipa_status_extract()
209 /* Status word 7, bit 31 is reserved (not IPA v5.0+) */ in ipa_status_extract()
216 /* Compute the aggregation size value to use for a given buffer size */
224 * after that limit to receive a full MTU of data plus overhead. in ipa_aggr_size_kb()
227 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; in ipa_aggr_size_kb()
234 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid_one() argument
240 struct device *dev = ipa->dev; in ipa_endpoint_data_valid_one()
245 if (!data->toward_ipa) { in ipa_endpoint_data_valid_one()
252 if (data->endpoint.filter_support) { in ipa_endpoint_data_valid_one()
255 data->endpoint_id); in ipa_endpoint_data_valid_one()
259 /* Nothing more to check for non-AP RX */ in ipa_endpoint_data_valid_one()
260 if (data->ee_id != GSI_EE_AP) in ipa_endpoint_data_valid_one()
263 rx_config = &data->endpoint.config.rx; in ipa_endpoint_data_valid_one()
266 buffer_size = rx_config->buffer_size; in ipa_endpoint_data_valid_one()
270 data->endpoint_id, buffer_size, limit); in ipa_endpoint_data_valid_one()
274 if (!data->endpoint.config.aggregation) { in ipa_endpoint_data_valid_one()
278 if (rx_config->aggr_time_limit) { in ipa_endpoint_data_valid_one()
281 data->endpoint_id); in ipa_endpoint_data_valid_one()
285 if (rx_config->aggr_hard_limit) { in ipa_endpoint_data_valid_one()
287 data->endpoint_id); in ipa_endpoint_data_valid_one()
291 if (rx_config->aggr_close_eof) { in ipa_endpoint_data_valid_one()
293 data->endpoint_id); in ipa_endpoint_data_valid_one()
297 return result; /* Nothing more to check */ in ipa_endpoint_data_valid_one()
306 aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD, in ipa_endpoint_data_valid_one()
307 rx_config->aggr_hard_limit); in ipa_endpoint_data_valid_one()
308 reg = ipa_reg(ipa, ENDP_INIT_AGGR); in ipa_endpoint_data_valid_one()
313 data->endpoint_id, aggr_size, limit); in ipa_endpoint_data_valid_one()
318 return true; /* Nothing more to check for RX */ in ipa_endpoint_data_valid_one()
321 /* Starting with IPA v4.5 sequencer replication is obsolete */ in ipa_endpoint_data_valid_one()
322 if (ipa->version >= IPA_VERSION_4_5) { in ipa_endpoint_data_valid_one()
323 if (data->endpoint.config.tx.seq_rep_type) { in ipa_endpoint_data_valid_one()
324 dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n", in ipa_endpoint_data_valid_one()
325 data->endpoint_id); in ipa_endpoint_data_valid_one()
330 if (data->endpoint.config.status_enable) { in ipa_endpoint_data_valid_one()
331 other_name = data->endpoint.config.tx.status_endpoint; in ipa_endpoint_data_valid_one()
335 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
344 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
348 /* ...and has to be an RX endpoint... */ in ipa_endpoint_data_valid_one()
349 if (other_data->toward_ipa) { in ipa_endpoint_data_valid_one()
352 data->endpoint_id); in ipa_endpoint_data_valid_one()
356 /* ...and if it's to be an AP endpoint... */ in ipa_endpoint_data_valid_one()
357 if (other_data->ee_id == GSI_EE_AP) { in ipa_endpoint_data_valid_one()
359 if (!other_data->endpoint.config.status_enable) { in ipa_endpoint_data_valid_one()
362 other_data->endpoint_id); in ipa_endpoint_data_valid_one()
368 if (data->endpoint.config.dma_mode) { in ipa_endpoint_data_valid_one()
369 other_name = data->endpoint.config.dma_endpoint; in ipa_endpoint_data_valid_one()
373 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
381 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
390 static u32 ipa_endpoint_max(struct ipa *ipa, u32 count, in ipa_endpoint_max() argument
394 struct device *dev = ipa->dev; in ipa_endpoint_max()
414 dev_err(dev, "AP->modem TX endpoint not defined\n"); in ipa_endpoint_max()
418 dev_err(dev, "AP<-modem RX endpoint not defined\n"); in ipa_endpoint_max()
424 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) in ipa_endpoint_max()
426 max = max_t(u32, max, dp->endpoint_id); in ipa_endpoint_max()
432 /* Allocate a transaction to use on a non-command endpoint */
436 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_trans_alloc()
437 u32 channel_id = endpoint->channel_id; in ipa_endpoint_trans_alloc()
440 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; in ipa_endpoint_trans_alloc()
446 * Note that suspend is not supported starting with IPA v4.0, and
447 * delay mode should not be used starting with IPA v4.2.
452 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_ctrl() local
460 if (endpoint->toward_ipa) in ipa_endpoint_init_ctrl()
461 WARN_ON(ipa->version >= IPA_VERSION_4_2); in ipa_endpoint_init_ctrl()
463 WARN_ON(ipa->version >= IPA_VERSION_4_0); in ipa_endpoint_init_ctrl()
465 reg = ipa_reg(ipa, ENDP_INIT_CTRL); in ipa_endpoint_init_ctrl()
466 offset = reg_n_offset(reg, endpoint->endpoint_id); in ipa_endpoint_init_ctrl()
467 val = ioread32(ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
469 field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND; in ipa_endpoint_init_ctrl()
477 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
487 /* Delay mode should not be used for IPA v4.2+ */ in ipa_endpoint_program_delay()
488 WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2); in ipa_endpoint_program_delay()
489 WARN_ON(!endpoint->toward_ipa); in ipa_endpoint_program_delay()
496 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_aggr_active()
497 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_aggr_active() local
502 WARN_ON(!test_bit(endpoint_id, ipa->available)); in ipa_endpoint_aggr_active()
504 reg = ipa_reg(ipa, STATE_AGGR_ACTIVE); in ipa_endpoint_aggr_active()
505 val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit)); in ipa_endpoint_aggr_active()
512 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_force_close()
514 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_force_close() local
518 WARN_ON(!test_bit(endpoint_id, ipa->available)); in ipa_endpoint_force_close()
520 reg = ipa_reg(ipa, AGGR_FORCE_CLOSE); in ipa_endpoint_force_close()
521 iowrite32(mask, ipa->reg_virt + reg_n_offset(reg, unit)); in ipa_endpoint_force_close()
525 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
526 * @endpoint: Endpoint on which to emulate a suspend
528 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
529 * with an open aggregation frame. This is to work around a hardware
530 * issue in IPA version 3.5.1 where the suspend interrupt will not be
535 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_suspend_aggr() local
537 if (!endpoint->config.aggregation) in ipa_endpoint_suspend_aggr()
540 /* Nothing to do if the endpoint doesn't have aggregation open */ in ipa_endpoint_suspend_aggr()
547 ipa_interrupt_simulate_suspend(ipa->interrupt); in ipa_endpoint_suspend_aggr()
556 if (endpoint->ipa->version >= IPA_VERSION_4_0) in ipa_endpoint_program_suspend()
557 return enable; /* For IPA v4.0+, no change made */ in ipa_endpoint_program_suspend()
559 WARN_ON(endpoint->toward_ipa); in ipa_endpoint_program_suspend()
564 * generate a SUSPEND IPA interrupt. If enabling suspend, have in ipa_endpoint_program_suspend()
573 /* Put all modem RX endpoints into suspend mode, and stop transmission
574 * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is
575 * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
578 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) in ipa_endpoint_modem_pause_all() argument
582 while (endpoint_id < ipa->endpoint_count) { in ipa_endpoint_modem_pause_all()
583 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++]; in ipa_endpoint_modem_pause_all()
585 if (endpoint->ee_id != GSI_EE_MODEM) in ipa_endpoint_modem_pause_all()
588 if (!endpoint->toward_ipa) in ipa_endpoint_modem_pause_all()
590 else if (ipa->version < IPA_VERSION_4_2) in ipa_endpoint_modem_pause_all()
593 gsi_modem_channel_flow_control(&ipa->gsi, in ipa_endpoint_modem_pause_all()
594 endpoint->channel_id, in ipa_endpoint_modem_pause_all()
599 /* Reset all modem endpoints to use the default exception endpoint */
600 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) in ipa_endpoint_modem_exception_reset_all() argument
606 /* We need one command per modem TX endpoint, plus the commands in ipa_endpoint_modem_exception_reset_all()
609 count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count(); in ipa_endpoint_modem_exception_reset_all()
610 trans = ipa_cmd_trans_alloc(ipa, count); in ipa_endpoint_modem_exception_reset_all()
612 dev_err(ipa->dev, in ipa_endpoint_modem_exception_reset_all()
613 "no transaction to reset modem exception endpoints\n"); in ipa_endpoint_modem_exception_reset_all()
614 return -EBUSY; in ipa_endpoint_modem_exception_reset_all()
617 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) { in ipa_endpoint_modem_exception_reset_all()
622 /* We only reset modem TX endpoints */ in ipa_endpoint_modem_exception_reset_all()
623 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_exception_reset_all()
624 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) in ipa_endpoint_modem_exception_reset_all()
627 reg = ipa_reg(ipa, ENDP_STATUS); in ipa_endpoint_modem_exception_reset_all()
641 ipa_cmd_pipeline_clear_wait(ipa); in ipa_endpoint_modem_exception_reset_all()
648 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_cfg()
649 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_cfg() local
654 reg = ipa_reg(ipa, ENDP_INIT_CFG); in ipa_endpoint_init_cfg()
656 if (endpoint->config.checksum) { in ipa_endpoint_init_cfg()
657 enum ipa_version version = ipa->version; in ipa_endpoint_init_cfg()
659 if (endpoint->toward_ipa) { in ipa_endpoint_init_cfg()
662 /* Checksum header offset is in 4-byte units */ in ipa_endpoint_init_cfg()
680 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_cfg()
685 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_nat()
686 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_nat() local
690 if (!endpoint->toward_ipa) in ipa_endpoint_init_nat()
693 reg = ipa_reg(ipa, ENDP_INIT_NAT); in ipa_endpoint_init_nat()
696 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_nat()
705 if (!endpoint->config.checksum) in ipa_qmap_header_size()
709 /* Checksum header inserted for AP TX endpoints only */ in ipa_qmap_header_size()
710 if (endpoint->toward_ipa) in ipa_qmap_header_size()
727 /* We know field_max can be used as a mask (2^n - 1) */ in ipa_header_size_encode()
734 /* IPA v4.5 adds a few more most-significant bits */ in ipa_header_size_encode()
749 /* We know field_max can be used as a mask (2^n - 1) */ in ipa_metadata_offset_encode()
756 /* IPA v4.5 adds a few more most-significant bits */ in ipa_metadata_offset_encode()
765 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
769 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte
770 * packet size field, and we have the IPA hardware populate both for each
772 * to use big endian format.
777 * The mux_id comes from a 4-byte metadata value supplied with each packet
778 * by the modem. It is *not* a QMAP header, but it does contain the mux_id
779 * value that we want, in its low-order byte. A bitmask defined in the
780 * endpoint's METADATA_MASK register defines which byte within the modem
787 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hdr()
788 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr() local
792 reg = ipa_reg(ipa, ENDP_INIT_HDR); in ipa_endpoint_init_hdr()
793 if (endpoint->config.qmap) { in ipa_endpoint_init_hdr()
794 enum ipa_version version = ipa->version; in ipa_endpoint_init_hdr()
800 /* Define how to fill fields in a received QMAP header */ in ipa_endpoint_init_hdr()
801 if (!endpoint->toward_ipa) { in ipa_endpoint_init_hdr()
804 /* Where IPA will write the metadata value */ in ipa_endpoint_init_hdr()
808 /* Where IPA will write the length */ in ipa_endpoint_init_hdr()
810 /* Upper bits are stored in HDR_EXT with IPA v4.5 */ in ipa_endpoint_init_hdr()
817 /* For QMAP TX, metadata offset is 0 (modem assumes this) */ in ipa_endpoint_init_hdr()
826 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_hdr()
831 u32 pad_align = endpoint->config.rx.pad_align; in ipa_endpoint_init_hdr_ext()
832 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hdr_ext()
833 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr_ext() local
837 reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT); in ipa_endpoint_init_hdr_ext()
838 if (endpoint->config.qmap) { in ipa_endpoint_init_hdr_ext()
849 if (!endpoint->toward_ipa) { in ipa_endpoint_init_hdr_ext()
858 if (!endpoint->toward_ipa) in ipa_endpoint_init_hdr_ext()
861 /* IPA v4.5 adds some most-significant bits to a few fields, in ipa_endpoint_init_hdr_ext()
864 if (ipa->version >= IPA_VERSION_4_5) { in ipa_endpoint_init_hdr_ext()
866 if (endpoint->config.qmap && !endpoint->toward_ipa) { in ipa_endpoint_init_hdr_ext()
878 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_hdr_ext()
883 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hdr_metadata_mask()
884 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr_metadata_mask() local
889 if (endpoint->toward_ipa) in ipa_endpoint_init_hdr_metadata_mask()
892 reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK); in ipa_endpoint_init_hdr_metadata_mask()
896 if (endpoint->config.qmap) in ipa_endpoint_init_hdr_metadata_mask()
899 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hdr_metadata_mask()
904 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_mode() local
909 if (!endpoint->toward_ipa) in ipa_endpoint_init_mode()
912 reg = ipa_reg(ipa, ENDP_INIT_MODE); in ipa_endpoint_init_mode()
913 if (endpoint->config.dma_mode) { in ipa_endpoint_init_mode()
914 enum ipa_endpoint_name name = endpoint->config.dma_endpoint; in ipa_endpoint_init_mode()
915 u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id; in ipa_endpoint_init_mode()
924 offset = reg_n_offset(reg, endpoint->endpoint_id); in ipa_endpoint_init_mode()
925 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_mode()
928 /* For IPA v4.5+, times are expressed using Qtime. A time is represented
930 * ipa_qtime_config(). Three (or, starting with IPA v5.0, four) pulse
934 * available to the AP; a third is available starting with IPA v5.0.
936 * represents the time period provided, and returns the tick count to
937 * use to represent that time.
940 ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select) in ipa_qtime_val() argument
956 if (ipa->version >= IPA_VERSION_5_0) { in ipa_qtime_val()
968 /* Encode the aggregation timer limit (microseconds) based on IPA version */
969 static u32 aggr_time_limit_encode(struct ipa *ipa, const struct reg *reg, in aggr_time_limit_encode() argument
976 return 0; /* Nothing to compute if time limit is 0 */ in aggr_time_limit_encode()
979 if (ipa->version >= IPA_VERSION_4_5) { in aggr_time_limit_encode()
982 ticks = ipa_qtime_val(ipa, microseconds, max, &select); in aggr_time_limit_encode()
998 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_aggr()
999 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_aggr() local
1003 reg = ipa_reg(ipa, ENDP_INIT_AGGR); in ipa_endpoint_init_aggr()
1004 if (endpoint->config.aggregation) { in ipa_endpoint_init_aggr()
1005 if (!endpoint->toward_ipa) { in ipa_endpoint_init_aggr()
1010 rx_config = &endpoint->config.rx; in ipa_endpoint_init_aggr()
1014 buffer_size = rx_config->buffer_size; in ipa_endpoint_init_aggr()
1015 limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD, in ipa_endpoint_init_aggr()
1016 rx_config->aggr_hard_limit); in ipa_endpoint_init_aggr()
1019 limit = rx_config->aggr_time_limit; in ipa_endpoint_init_aggr()
1020 val |= aggr_time_limit_encode(ipa, reg, limit); in ipa_endpoint_init_aggr()
1024 if (rx_config->aggr_close_eof) in ipa_endpoint_init_aggr()
1032 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */ in ipa_endpoint_init_aggr()
1038 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_aggr()
1041 /* The head-of-line blocking timer is defined as a tick count. For
1042 * IPA version 4.5 the tick count is based on the Qtimer, which is
1043 * derived from the 19.2 MHz SoC XO clock. For older IPA versions
1044 * each tick represents 128 cycles of the IPA core clock.
1047 * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
1049 static u32 hol_block_timer_encode(struct ipa *ipa, const struct reg *reg, in hol_block_timer_encode() argument
1060 return 0; /* Nothing to compute if timer period is 0 */ in hol_block_timer_encode()
1062 if (ipa->version >= IPA_VERSION_4_5) { in hol_block_timer_encode()
1067 ticks = ipa_qtime_val(ipa, microseconds, max, &select); in hol_block_timer_encode()
1073 /* Use 64 bit arithmetic to avoid overflow */ in hol_block_timer_encode()
1074 rate = ipa_core_clock_rate(ipa); in hol_block_timer_encode()
1077 /* We still need the result to fit into the field */ in hol_block_timer_encode()
1080 /* IPA v3.5.1 through v4.1 just record the tick count */ in hol_block_timer_encode()
1081 if (ipa->version < IPA_VERSION_4_2) in hol_block_timer_encode()
1084 /* For IPA v4.2, the tick count is represented by base and in hol_block_timer_encode()
1085 * scale fields within the 32-bit timer register, where: in hol_block_timer_encode()
1094 scale = high > width ? high - width : 0; in hol_block_timer_encode()
1096 /* If we're scaling, round up to get a closer result */ in hol_block_timer_encode()
1097 ticks += 1 << (scale - 1); in hol_block_timer_encode()
1113 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hol_block_timer()
1114 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_timer() local
1119 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER); in ipa_endpoint_init_hol_block_timer()
1120 val = hol_block_timer_encode(ipa, reg, microseconds); in ipa_endpoint_init_hol_block_timer()
1122 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_hol_block_timer()
1128 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hol_block_en()
1129 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_en() local
1134 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN); in ipa_endpoint_init_hol_block_en()
1138 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_en()
1140 /* When enabling, the register must be written twice for IPA v4.5+ */ in ipa_endpoint_init_hol_block_en()
1141 if (enable && ipa->version >= IPA_VERSION_4_5) in ipa_endpoint_init_hol_block_en()
1142 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_en()
1158 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) in ipa_endpoint_modem_hol_block_clear_all() argument
1162 while (endpoint_id < ipa->endpoint_count) { in ipa_endpoint_modem_hol_block_clear_all()
1163 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++]; in ipa_endpoint_modem_hol_block_clear_all()
1165 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) in ipa_endpoint_modem_hol_block_clear_all()
1175 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_deaggr()
1176 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_deaggr() local
1180 if (!endpoint->toward_ipa) in ipa_endpoint_init_deaggr()
1183 reg = ipa_reg(ipa, ENDP_INIT_DEAGGR); in ipa_endpoint_init_deaggr()
1189 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_deaggr()
1194 u32 resource_group = endpoint->config.resource_group; in ipa_endpoint_init_rsrc_grp()
1195 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_rsrc_grp()
1196 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_rsrc_grp() local
1200 reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP); in ipa_endpoint_init_rsrc_grp()
1203 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_rsrc_grp()
1208 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_seq()
1209 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_seq() local
1213 if (!endpoint->toward_ipa) in ipa_endpoint_init_seq()
1216 reg = ipa_reg(ipa, ENDP_INIT_SEQ); in ipa_endpoint_init_seq()
1218 /* Low-order byte configures primary packet processing */ in ipa_endpoint_init_seq()
1219 val = reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type); in ipa_endpoint_init_seq()
1222 if (ipa->version < IPA_VERSION_4_5) in ipa_endpoint_init_seq()
1224 endpoint->config.tx.seq_rep_type); in ipa_endpoint_init_seq()
1226 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_seq()
1230 * ipa_endpoint_skb_tx() - Transmit a socket buffer
1232 * @skb: Socket buffer to send
1242 /* Make sure source endpoint's TLV FIFO has enough entries to in ipa_endpoint_skb_tx()
1246 nr_frags = skb_shinfo(skb)->nr_frags; in ipa_endpoint_skb_tx()
1247 if (nr_frags > endpoint->skb_frag_max) { in ipa_endpoint_skb_tx()
1249 return -E2BIG; in ipa_endpoint_skb_tx()
1255 return -EBUSY; in ipa_endpoint_skb_tx()
1260 trans->data = skb; /* transaction owns skb now */ in ipa_endpoint_skb_tx()
1269 return -ENOMEM; in ipa_endpoint_skb_tx()
1274 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_status()
1275 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status() local
1279 reg = ipa_reg(ipa, ENDP_STATUS); in ipa_endpoint_status()
1280 if (endpoint->config.status_enable) { in ipa_endpoint_status()
1282 if (endpoint->toward_ipa) { in ipa_endpoint_status()
1286 name = endpoint->config.tx.status_endpoint; in ipa_endpoint_status()
1287 status_endpoint_id = ipa->name_map[name]->endpoint_id; in ipa_endpoint_status()
1291 /* STATUS_LOCATION is 0, meaning IPA packet status in ipa_endpoint_status()
1292 * precedes the packet (not present for IPA v4.5+) in ipa_endpoint_status()
1297 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_status()
1309 buffer_size = endpoint->config.rx.buffer_size; in ipa_endpoint_replenish_one()
1312 return -ENOMEM; in ipa_endpoint_replenish_one()
1314 /* Offset the buffer to make space for skb headroom */ in ipa_endpoint_replenish_one()
1316 len = buffer_size - offset; in ipa_endpoint_replenish_one()
1322 trans->data = page; /* transaction owns page now */ in ipa_endpoint_replenish_one()
1328 * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1329 * @endpoint: Endpoint to be replenished
1331 * The IPA hardware can hold a fixed number of receive buffers for an RX
1333 * buffer. If an endpoint's "backlog" is non-zero, it indicates how many
1334 * more receive buffers can be supplied to the hardware. Replenishing for
1335 * an endpoint can be disabled, in which case buffers are not queued to
1342 if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) in ipa_endpoint_replenish()
1346 if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) in ipa_endpoint_replenish()
1357 doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH); in ipa_endpoint_replenish()
1361 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); in ipa_endpoint_replenish()
1367 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); in ipa_endpoint_replenish()
1369 /* Whenever a receive buffer transaction completes we'll try to in ipa_endpoint_replenish()
1370 * replenish again. It's unlikely, but if we fail to supply even in ipa_endpoint_replenish()
1372 * If the hardware has no receive buffers queued, schedule work to in ipa_endpoint_replenish()
1375 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) in ipa_endpoint_replenish()
1376 schedule_delayed_work(&endpoint->replenish_work, in ipa_endpoint_replenish()
1382 set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); in ipa_endpoint_replenish_enable()
1385 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) in ipa_endpoint_replenish_enable()
1391 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); in ipa_endpoint_replenish_disable()
1409 if (!endpoint->netdev) in ipa_endpoint_skb_copy()
1416 memcpy(skb->data, data, len); in ipa_endpoint_skb_copy()
1417 skb->truesize += extra; in ipa_endpoint_skb_copy()
1420 ipa_modem_skb_rx(endpoint->netdev, skb); in ipa_endpoint_skb_copy()
1426 u32 buffer_size = endpoint->config.rx.buffer_size; in ipa_endpoint_skb_build()
1429 /* Nothing to do if there's no netdev */ in ipa_endpoint_skb_build()
1430 if (!endpoint->netdev) in ipa_endpoint_skb_build()
1433 WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD)); in ipa_endpoint_skb_build()
1442 /* Receive the buffer (or record drop if unable to build it) */ in ipa_endpoint_skb_build()
1443 ipa_modem_skb_rx(endpoint->netdev, skb); in ipa_endpoint_skb_build()
1448 /* The format of an IPA packet status structure is the same for several
1467 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_skip() local
1471 opcode = ipa_status_extract(ipa, data, STATUS_OPCODE); in ipa_endpoint_status_skip()
1475 endpoint_id = ipa_status_extract(ipa, data, STATUS_DST_ENDPOINT); in ipa_endpoint_status_skip()
1476 if (endpoint_id != endpoint->endpoint_id) in ipa_endpoint_status_skip()
1487 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_tag_valid() local
1490 status_mask = ipa_status_extract(ipa, data, STATUS_MASK); in ipa_endpoint_status_tag_valid()
1494 /* The status contains a valid tag. We know the packet was sent to in ipa_endpoint_status_tag_valid()
1496 * If the packet came from the AP->command TX endpoint we know in ipa_endpoint_status_tag_valid()
1499 endpoint_id = ipa_status_extract(ipa, data, STATUS_SRC_ENDPOINT); in ipa_endpoint_status_tag_valid()
1500 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; in ipa_endpoint_status_tag_valid()
1501 if (endpoint_id == command_endpoint->endpoint_id) { in ipa_endpoint_status_tag_valid()
1502 complete(&ipa->completion); in ipa_endpoint_status_tag_valid()
1504 dev_err(ipa->dev, "unexpected tagged packet from endpoint %u\n", in ipa_endpoint_status_tag_valid()
1516 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_drop() local
1524 exception = ipa_status_extract(ipa, data, STATUS_EXCEPTION); in ipa_endpoint_status_drop()
1528 /* Drop the packet if it fails to match a routing rule; otherwise no */ in ipa_endpoint_status_drop()
1529 rule = ipa_status_extract(ipa, data, STATUS_ROUTER_RULE_INDEX); in ipa_endpoint_status_drop()
1537 u32 buffer_size = endpoint->config.rx.buffer_size; in ipa_endpoint_status_parse()
1539 u32 unused = buffer_size - total_len; in ipa_endpoint_status_parse()
1540 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_parse() local
1541 struct device *dev = ipa->dev; in ipa_endpoint_status_parse()
1557 length = ipa_status_extract(ipa, data, STATUS_LENGTH); in ipa_endpoint_status_parse()
1560 resid -= IPA_STATUS_SIZE; in ipa_endpoint_status_parse()
1565 * including the status. If the hardware is configured to in ipa_endpoint_status_parse()
1566 * pad packet data to an aligned boundary, account for that. in ipa_endpoint_status_parse()
1570 align = endpoint->config.rx.pad_align ? : 1; in ipa_endpoint_status_parse()
1572 if (endpoint->config.checksum) in ipa_endpoint_status_parse()
1593 resid -= len; in ipa_endpoint_status_parse()
1602 if (endpoint->toward_ipa) in ipa_endpoint_trans_complete()
1605 if (trans->cancelled) in ipa_endpoint_trans_complete()
1609 page = trans->data; in ipa_endpoint_trans_complete()
1610 if (endpoint->config.status_enable) in ipa_endpoint_trans_complete()
1611 ipa_endpoint_status_parse(endpoint, page, trans->len); in ipa_endpoint_trans_complete()
1612 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) in ipa_endpoint_trans_complete()
1613 trans->data = NULL; /* Pages have been consumed */ in ipa_endpoint_trans_complete()
1621 if (endpoint->toward_ipa) { in ipa_endpoint_trans_release()
1622 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_trans_release() local
1624 /* Nothing to do for command transactions */ in ipa_endpoint_trans_release()
1625 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { in ipa_endpoint_trans_release()
1626 struct sk_buff *skb = trans->data; in ipa_endpoint_trans_release()
1632 struct page *page = trans->data; in ipa_endpoint_trans_release()
1639 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) in ipa_endpoint_default_route_set() argument
1644 reg = ipa_reg(ipa, ROUTE); in ipa_endpoint_default_route_set()
1652 iowrite32(val, ipa->reg_virt + reg_offset(reg)); in ipa_endpoint_default_route_set()
1655 void ipa_endpoint_default_route_clear(struct ipa *ipa) in ipa_endpoint_default_route_clear() argument
1657 ipa_endpoint_default_route_set(ipa, 0); in ipa_endpoint_default_route_clear()
1661 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1662 * @endpoint: Endpoint to be reset
1666 * taken to ensure the IPA pipeline is properly cleared.
1672 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset_rx_aggr() local
1673 struct device *dev = ipa->dev; in ipa_endpoint_reset_rx_aggr()
1674 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_reset_rx_aggr()
1684 return -ENOMEM; in ipa_endpoint_reset_rx_aggr()
1688 ret = -ENOMEM; in ipa_endpoint_reset_rx_aggr()
1697 * active. We'll re-enable the doorbell (if appropriate) when in ipa_endpoint_reset_rx_aggr()
1700 gsi_channel_reset(gsi, endpoint->channel_id, false); in ipa_endpoint_reset_rx_aggr()
1706 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1710 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); in ipa_endpoint_reset_rx_aggr()
1714 /* Wait for aggregation to be closed on the channel */ in ipa_endpoint_reset_rx_aggr()
1720 } while (retries--); in ipa_endpoint_reset_rx_aggr()
1725 endpoint->endpoint_id); in ipa_endpoint_reset_rx_aggr()
1727 gsi_trans_read_byte_done(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1729 ret = gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1733 /* Finally, reset and reconfigure the channel again (re-enabling in ipa_endpoint_reset_rx_aggr()
1734 * the doorbell engine if appropriate). Sleep for 1 millisecond to in ipa_endpoint_reset_rx_aggr()
1738 gsi_channel_reset(gsi, endpoint->channel_id, true); in ipa_endpoint_reset_rx_aggr()
1745 (void)gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1758 u32 channel_id = endpoint->channel_id; in ipa_endpoint_reset()
1759 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset() local
1763 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation in ipa_endpoint_reset()
1764 * is active, we need to handle things specially to recover. in ipa_endpoint_reset()
1765 * All other cases just need to reset the underlying GSI channel. in ipa_endpoint_reset()
1767 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa && in ipa_endpoint_reset()
1768 endpoint->config.aggregation; in ipa_endpoint_reset()
1772 gsi_channel_reset(&ipa->gsi, channel_id, true); in ipa_endpoint_reset()
1775 dev_err(ipa->dev, in ipa_endpoint_reset()
1777 ret, endpoint->channel_id, endpoint->endpoint_id); in ipa_endpoint_reset()
1782 if (endpoint->toward_ipa) { in ipa_endpoint_program()
1783 /* Newer versions of IPA use GSI channel flow control in ipa_endpoint_program()
1784 * instead of endpoint DELAY mode to prevent sending data. in ipa_endpoint_program()
1785 * Flow control is disabled for newly-allocated channels, in ipa_endpoint_program()
1787 * for AP TX channels. in ipa_endpoint_program()
1789 if (endpoint->ipa->version < IPA_VERSION_4_2) in ipa_endpoint_program()
1792 /* Ensure suspend mode is off on all AP RX endpoints */ in ipa_endpoint_program()
1802 if (!endpoint->toward_ipa) { in ipa_endpoint_program()
1803 if (endpoint->config.rx.holb_drop) in ipa_endpoint_program()
1816 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_enable_one()
1817 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_enable_one() local
1818 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_enable_one()
1821 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_enable_one()
1823 dev_err(ipa->dev, in ipa_endpoint_enable_one()
1825 ret, endpoint->toward_ipa ? 'T' : 'R', in ipa_endpoint_enable_one()
1826 endpoint->channel_id, endpoint_id); in ipa_endpoint_enable_one()
1830 if (!endpoint->toward_ipa) { in ipa_endpoint_enable_one()
1831 ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id); in ipa_endpoint_enable_one()
1835 __set_bit(endpoint_id, ipa->enabled); in ipa_endpoint_enable_one()
1842 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_disable_one()
1843 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_disable_one() local
1844 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_disable_one()
1847 if (!test_bit(endpoint_id, ipa->enabled)) in ipa_endpoint_disable_one()
1850 __clear_bit(endpoint_id, endpoint->ipa->enabled); in ipa_endpoint_disable_one()
1852 if (!endpoint->toward_ipa) { in ipa_endpoint_disable_one()
1854 ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id); in ipa_endpoint_disable_one()
1857 /* Note that if stop fails, the channel's state is not well-defined */ in ipa_endpoint_disable_one()
1858 ret = gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_disable_one()
1860 dev_err(ipa->dev, "error %d attempting to stop endpoint %u\n", in ipa_endpoint_disable_one()
1866 struct device *dev = endpoint->ipa->dev; in ipa_endpoint_suspend_one()
1867 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_suspend_one()
1870 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled)) in ipa_endpoint_suspend_one()
1873 if (!endpoint->toward_ipa) { in ipa_endpoint_suspend_one()
1878 ret = gsi_channel_suspend(gsi, endpoint->channel_id); in ipa_endpoint_suspend_one()
1881 endpoint->channel_id); in ipa_endpoint_suspend_one()
1886 struct device *dev = endpoint->ipa->dev; in ipa_endpoint_resume_one()
1887 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_resume_one()
1890 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled)) in ipa_endpoint_resume_one()
1893 if (!endpoint->toward_ipa) in ipa_endpoint_resume_one()
1896 ret = gsi_channel_resume(gsi, endpoint->channel_id); in ipa_endpoint_resume_one()
1899 endpoint->channel_id); in ipa_endpoint_resume_one()
1900 else if (!endpoint->toward_ipa) in ipa_endpoint_resume_one()
1904 void ipa_endpoint_suspend(struct ipa *ipa) in ipa_endpoint_suspend() argument
1906 if (!ipa->setup_complete) in ipa_endpoint_suspend()
1909 if (ipa->modem_netdev) in ipa_endpoint_suspend()
1910 ipa_modem_suspend(ipa->modem_netdev); in ipa_endpoint_suspend()
1912 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_suspend()
1913 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_suspend()
1916 void ipa_endpoint_resume(struct ipa *ipa) in ipa_endpoint_resume() argument
1918 if (!ipa->setup_complete) in ipa_endpoint_resume()
1921 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_resume()
1922 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_resume()
1924 if (ipa->modem_netdev) in ipa_endpoint_resume()
1925 ipa_modem_resume(ipa->modem_netdev); in ipa_endpoint_resume()
1930 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_setup_one()
1931 u32 channel_id = endpoint->channel_id; in ipa_endpoint_setup_one()
1933 /* Only AP endpoints get set up */ in ipa_endpoint_setup_one()
1934 if (endpoint->ee_id != GSI_EE_AP) in ipa_endpoint_setup_one()
1937 endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1; in ipa_endpoint_setup_one()
1938 if (!endpoint->toward_ipa) { in ipa_endpoint_setup_one()
1942 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); in ipa_endpoint_setup_one()
1943 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); in ipa_endpoint_setup_one()
1944 INIT_DELAYED_WORK(&endpoint->replenish_work, in ipa_endpoint_setup_one()
1950 __set_bit(endpoint->endpoint_id, endpoint->ipa->set_up); in ipa_endpoint_setup_one()
1955 __clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up); in ipa_endpoint_teardown_one()
1957 if (!endpoint->toward_ipa) in ipa_endpoint_teardown_one()
1958 cancel_delayed_work_sync(&endpoint->replenish_work); in ipa_endpoint_teardown_one()
1963 void ipa_endpoint_setup(struct ipa *ipa) in ipa_endpoint_setup() argument
1967 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) in ipa_endpoint_setup()
1968 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_setup()
1971 void ipa_endpoint_teardown(struct ipa *ipa) in ipa_endpoint_teardown() argument
1975 for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count) in ipa_endpoint_teardown()
1976 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_teardown()
1979 void ipa_endpoint_deconfig(struct ipa *ipa) in ipa_endpoint_deconfig() argument
1981 ipa->available_count = 0; in ipa_endpoint_deconfig()
1982 bitmap_free(ipa->available); in ipa_endpoint_deconfig()
1983 ipa->available = NULL; in ipa_endpoint_deconfig()
1986 int ipa_endpoint_config(struct ipa *ipa) in ipa_endpoint_config() argument
1988 struct device *dev = ipa->dev; in ipa_endpoint_config()
1998 /* Prior to IPA v3.5, the FLAVOR_0 register was not supported. in ipa_endpoint_config()
2005 * just set the available mask to support any endpoint, and in ipa_endpoint_config()
2008 if (ipa->version < IPA_VERSION_3_5) { in ipa_endpoint_config()
2009 ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL); in ipa_endpoint_config()
2010 if (!ipa->available) in ipa_endpoint_config()
2011 return -ENOMEM; in ipa_endpoint_config()
2012 ipa->available_count = IPA_ENDPOINT_MAX; in ipa_endpoint_config()
2014 bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX); in ipa_endpoint_config()
2022 reg = ipa_reg(ipa, FLAVOR_0); in ipa_endpoint_config()
2023 val = ioread32(ipa->reg_virt + reg_offset(reg)); in ipa_endpoint_config()
2025 /* Our RX is an IPA producer; our TX is an IPA consumer. */ in ipa_endpoint_config()
2034 return -EINVAL; in ipa_endpoint_config()
2037 /* Until IPA v5.0, the max endpoint ID was 32 */ in ipa_endpoint_config()
2038 hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1; in ipa_endpoint_config()
2042 return -EINVAL; in ipa_endpoint_config()
2046 ipa->available = bitmap_zalloc(limit, GFP_KERNEL); in ipa_endpoint_config()
2047 if (!ipa->available) in ipa_endpoint_config()
2048 return -ENOMEM; in ipa_endpoint_config()
2049 ipa->available_count = limit; in ipa_endpoint_config()
2052 bitmap_set(ipa->available, 0, tx_count); in ipa_endpoint_config()
2053 bitmap_set(ipa->available, rx_base, rx_count); in ipa_endpoint_config()
2055 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) { in ipa_endpoint_config()
2060 endpoint_id, limit - 1); in ipa_endpoint_config()
2064 if (!test_bit(endpoint_id, ipa->available)) { in ipa_endpoint_config()
2071 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_config()
2072 if (endpoint->toward_ipa) { in ipa_endpoint_config()
2086 ipa_endpoint_deconfig(ipa); in ipa_endpoint_config()
2088 return -EINVAL; in ipa_endpoint_config()
2091 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, in ipa_endpoint_init_one() argument
2096 endpoint = &ipa->endpoint[data->endpoint_id]; in ipa_endpoint_init_one()
2098 if (data->ee_id == GSI_EE_AP) in ipa_endpoint_init_one()
2099 ipa->channel_map[data->channel_id] = endpoint; in ipa_endpoint_init_one()
2100 ipa->name_map[name] = endpoint; in ipa_endpoint_init_one()
2102 endpoint->ipa = ipa; in ipa_endpoint_init_one()
2103 endpoint->ee_id = data->ee_id; in ipa_endpoint_init_one()
2104 endpoint->channel_id = data->channel_id; in ipa_endpoint_init_one()
2105 endpoint->endpoint_id = data->endpoint_id; in ipa_endpoint_init_one()
2106 endpoint->toward_ipa = data->toward_ipa; in ipa_endpoint_init_one()
2107 endpoint->config = data->endpoint.config; in ipa_endpoint_init_one()
2109 __set_bit(endpoint->endpoint_id, ipa->defined); in ipa_endpoint_init_one()
2114 __clear_bit(endpoint->endpoint_id, endpoint->ipa->defined); in ipa_endpoint_exit_one()
2119 void ipa_endpoint_exit(struct ipa *ipa) in ipa_endpoint_exit() argument
2123 ipa->filtered = 0; in ipa_endpoint_exit()
2125 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) in ipa_endpoint_exit()
2126 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_exit()
2128 bitmap_free(ipa->enabled); in ipa_endpoint_exit()
2129 ipa->enabled = NULL; in ipa_endpoint_exit()
2130 bitmap_free(ipa->set_up); in ipa_endpoint_exit()
2131 ipa->set_up = NULL; in ipa_endpoint_exit()
2132 bitmap_free(ipa->defined); in ipa_endpoint_exit()
2133 ipa->defined = NULL; in ipa_endpoint_exit()
2135 memset(ipa->name_map, 0, sizeof(ipa->name_map)); in ipa_endpoint_exit()
2136 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); in ipa_endpoint_exit()
2140 int ipa_endpoint_init(struct ipa *ipa, u32 count, in ipa_endpoint_init() argument
2149 ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1; in ipa_endpoint_init()
2150 if (!ipa->endpoint_count) in ipa_endpoint_init()
2151 return -EINVAL; in ipa_endpoint_init()
2154 ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); in ipa_endpoint_init()
2155 if (!ipa->defined) in ipa_endpoint_init()
2156 return -ENOMEM; in ipa_endpoint_init()
2158 ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); in ipa_endpoint_init()
2159 if (!ipa->set_up) in ipa_endpoint_init()
2162 ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); in ipa_endpoint_init()
2163 if (!ipa->enabled) in ipa_endpoint_init()
2171 ipa_endpoint_init_one(ipa, name, data); in ipa_endpoint_init()
2173 if (data->endpoint.filter_support) in ipa_endpoint_init()
2174 filtered |= BIT(data->endpoint_id); in ipa_endpoint_init()
2175 if (data->ee_id == GSI_EE_MODEM && data->toward_ipa) in ipa_endpoint_init()
2176 ipa->modem_tx_count++; in ipa_endpoint_init()
2180 if (!ipa_filtered_valid(ipa, filtered)) { in ipa_endpoint_init()
2181 ipa_endpoint_exit(ipa); in ipa_endpoint_init()
2183 return -EINVAL; in ipa_endpoint_init()
2186 ipa->filtered = filtered; in ipa_endpoint_init()
2191 bitmap_free(ipa->set_up); in ipa_endpoint_init()
2192 ipa->set_up = NULL; in ipa_endpoint_init()
2194 bitmap_free(ipa->defined); in ipa_endpoint_init()
2195 ipa->defined = NULL; in ipa_endpoint_init()
2197 return -ENOMEM; in ipa_endpoint_init()