Lines Matching +full:no +full:- +full:tick +full:- +full:in +full:- +full:suspend
1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2024 Linaro Ltd.
10 #include <linux/dma-direction.h>
30 #define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
33 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
35 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
40 /** enum ipa_status_opcode - IPA status opcode field hardware values */
51 /** enum ipa_status_exception - IPA status exception field hardware values */
53 /* 0 means no exception */
67 /** enum ipa_status_mask - IPA status mask field bitmask hardware values */
88 #define IPA_STATUS_RULE_MISS 0x3ff /* 10-bit filter/router rule fields */
92 /* enum ipa_status_field_id - IPA packet status structure field identifiers */
114 STATUS_TAG_LOW32, /* Low-order 32 bits of 48-bit tag */
115 STATUS_TAG_HIGH16, /* High-order 16 bits of 48-bit tag */
124 /* Size in bytes of an IPA packet status structure */
131 enum ipa_version version = ipa->version; in ipa_status_extract()
147 /* Status word 1, bits 21-23 are reserved (not IPA v5.0+) */ in ipa_status_extract()
148 /* Status word 1, bits 24-26 are reserved (IPA v5.0+) */ in ipa_status_extract()
153 /* Status word 1, bits 29-31 are reserved */ in ipa_status_extract()
166 /* ROUTER_TABLE is in word 3, bits 14-21 (IPA v5.0+) */ in ipa_status_extract()
208 /* Status word 7, bits 16-30 are reserved */ in ipa_status_extract()
223 * has been crossed. In that case the limit must leave enough space in ipa_aggr_size_kb()
227 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; in ipa_aggr_size_kb()
240 struct device *dev = ipa->dev; in ipa_endpoint_data_valid_one()
245 if (!data->toward_ipa) { in ipa_endpoint_data_valid_one()
252 if (data->endpoint.filter_support) { in ipa_endpoint_data_valid_one()
255 data->endpoint_id); in ipa_endpoint_data_valid_one()
259 /* Nothing more to check for non-AP RX */ in ipa_endpoint_data_valid_one()
260 if (data->ee_id != GSI_EE_AP) in ipa_endpoint_data_valid_one()
263 rx_config = &data->endpoint.config.rx; in ipa_endpoint_data_valid_one()
266 buffer_size = rx_config->buffer_size; in ipa_endpoint_data_valid_one()
270 data->endpoint_id, buffer_size, limit); in ipa_endpoint_data_valid_one()
274 if (!data->endpoint.config.aggregation) { in ipa_endpoint_data_valid_one()
277 /* No aggregation; check for bogus aggregation data */ in ipa_endpoint_data_valid_one()
278 if (rx_config->aggr_time_limit) { in ipa_endpoint_data_valid_one()
280 "time limit with no aggregation for RX endpoint %u\n", in ipa_endpoint_data_valid_one()
281 data->endpoint_id); in ipa_endpoint_data_valid_one()
285 if (rx_config->aggr_hard_limit) { in ipa_endpoint_data_valid_one()
286 dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n", in ipa_endpoint_data_valid_one()
287 data->endpoint_id); in ipa_endpoint_data_valid_one()
291 if (rx_config->aggr_close_eof) { in ipa_endpoint_data_valid_one()
292 dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n", in ipa_endpoint_data_valid_one()
293 data->endpoint_id); in ipa_endpoint_data_valid_one()
302 * check ensures the receive buffer size doesn't result in a in ipa_endpoint_data_valid_one()
303 * limit that exceeds what's representable in the aggregation in ipa_endpoint_data_valid_one()
306 aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD, in ipa_endpoint_data_valid_one()
307 rx_config->aggr_hard_limit); in ipa_endpoint_data_valid_one()
313 data->endpoint_id, aggr_size, limit); in ipa_endpoint_data_valid_one()
322 if (ipa->version >= IPA_VERSION_4_5) { in ipa_endpoint_data_valid_one()
323 if (data->endpoint.config.tx.seq_rep_type) { in ipa_endpoint_data_valid_one()
324 dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n", in ipa_endpoint_data_valid_one()
325 data->endpoint_id); in ipa_endpoint_data_valid_one()
330 if (data->endpoint.config.status_enable) { in ipa_endpoint_data_valid_one()
331 other_name = data->endpoint.config.tx.status_endpoint; in ipa_endpoint_data_valid_one()
335 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
344 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
349 if (other_data->toward_ipa) { in ipa_endpoint_data_valid_one()
352 data->endpoint_id); in ipa_endpoint_data_valid_one()
357 if (other_data->ee_id == GSI_EE_AP) { in ipa_endpoint_data_valid_one()
359 if (!other_data->endpoint.config.status_enable) { in ipa_endpoint_data_valid_one()
362 other_data->endpoint_id); in ipa_endpoint_data_valid_one()
368 if (data->endpoint.config.dma_mode) { in ipa_endpoint_data_valid_one()
369 other_name = data->endpoint.config.dma_endpoint; in ipa_endpoint_data_valid_one()
373 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
381 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
394 struct device *dev = ipa->dev; in ipa_endpoint_max()
414 dev_err(dev, "AP->modem TX endpoint not defined\n"); in ipa_endpoint_max()
418 dev_err(dev, "AP<-modem RX endpoint not defined\n"); in ipa_endpoint_max()
426 max = max_t(u32, max, dp->endpoint_id); in ipa_endpoint_max()
432 /* Allocate a transaction to use on a non-command endpoint */
436 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_trans_alloc()
437 u32 channel_id = endpoint->channel_id; in ipa_endpoint_trans_alloc()
440 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; in ipa_endpoint_trans_alloc()
445 /* suspend_delay represents suspend for RX, delay for TX endpoints.
446 * Note that suspend is not supported starting with IPA v4.0, and
452 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_ctrl()
460 if (endpoint->toward_ipa) in ipa_endpoint_init_ctrl()
461 WARN_ON(ipa->version >= IPA_VERSION_4_2); in ipa_endpoint_init_ctrl()
463 WARN_ON(ipa->version >= IPA_VERSION_4_0); in ipa_endpoint_init_ctrl()
466 offset = reg_n_offset(reg, endpoint->endpoint_id); in ipa_endpoint_init_ctrl()
467 val = ioread32(ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
469 field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND; in ipa_endpoint_init_ctrl()
474 /* Don't bother if it's already in the requested state */ in ipa_endpoint_init_ctrl()
477 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
488 WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2); in ipa_endpoint_program_delay()
489 WARN_ON(!endpoint->toward_ipa); in ipa_endpoint_program_delay()
496 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_aggr_active()
497 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_aggr_active()
502 WARN_ON(!test_bit(endpoint_id, ipa->available)); in ipa_endpoint_aggr_active()
505 val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit)); in ipa_endpoint_aggr_active()
512 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_force_close()
514 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_force_close()
518 WARN_ON(!test_bit(endpoint_id, ipa->available)); in ipa_endpoint_force_close()
521 iowrite32(mask, ipa->reg_virt + reg_n_offset(reg, unit)); in ipa_endpoint_force_close()
525 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
526 * @endpoint: Endpoint on which to emulate a suspend
528 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
530 * issue in IPA version 3.5.1 where the suspend interrupt will not be
535 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_suspend_aggr()
537 if (!endpoint->config.aggregation) in ipa_endpoint_suspend_aggr()
547 ipa_interrupt_simulate_suspend(ipa->interrupt); in ipa_endpoint_suspend_aggr()
550 /* Returns previous suspend state (true means suspend was enabled) */
556 if (endpoint->ipa->version >= IPA_VERSION_4_0) in ipa_endpoint_program_suspend()
557 return enable; /* For IPA v4.0+, no change made */ in ipa_endpoint_program_suspend()
559 WARN_ON(endpoint->toward_ipa); in ipa_endpoint_program_suspend()
564 * generate a SUSPEND IPA interrupt. If enabling suspend, have in ipa_endpoint_program_suspend()
573 /* Put all modem RX endpoints into suspend mode, and stop transmission
582 while (endpoint_id < ipa->endpoint_count) { in ipa_endpoint_modem_pause_all()
583 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++]; in ipa_endpoint_modem_pause_all()
585 if (endpoint->ee_id != GSI_EE_MODEM) in ipa_endpoint_modem_pause_all()
588 if (!endpoint->toward_ipa) in ipa_endpoint_modem_pause_all()
590 else if (ipa->version < IPA_VERSION_4_2) in ipa_endpoint_modem_pause_all()
593 gsi_modem_channel_flow_control(&ipa->gsi, in ipa_endpoint_modem_pause_all()
594 endpoint->channel_id, in ipa_endpoint_modem_pause_all()
609 count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count(); in ipa_endpoint_modem_exception_reset_all()
612 dev_err(ipa->dev, in ipa_endpoint_modem_exception_reset_all()
613 "no transaction to reset modem exception endpoints\n"); in ipa_endpoint_modem_exception_reset_all()
614 return -EBUSY; in ipa_endpoint_modem_exception_reset_all()
617 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) { in ipa_endpoint_modem_exception_reset_all()
623 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_exception_reset_all()
624 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) in ipa_endpoint_modem_exception_reset_all()
632 * result all other fields in the register are ignored. in ipa_endpoint_modem_exception_reset_all()
648 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_cfg()
649 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_cfg()
656 if (endpoint->config.checksum) { in ipa_endpoint_init_cfg()
657 enum ipa_version version = ipa->version; in ipa_endpoint_init_cfg()
659 if (endpoint->toward_ipa) { in ipa_endpoint_init_cfg()
662 /* Checksum header offset is in 4-byte units */ in ipa_endpoint_init_cfg()
680 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_cfg()
685 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_nat()
686 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_nat()
690 if (!endpoint->toward_ipa) in ipa_endpoint_init_nat()
696 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_nat()
705 if (!endpoint->config.checksum) in ipa_qmap_header_size()
710 if (endpoint->toward_ipa) in ipa_qmap_header_size()
713 /* Checksum header is used in both directions */ in ipa_qmap_header_size()
727 /* We know field_max can be used as a mask (2^n - 1) */ in ipa_header_size_encode()
734 /* IPA v4.5 adds a few more most-significant bits */ in ipa_header_size_encode()
749 /* We know field_max can be used as a mask (2^n - 1) */ in ipa_metadata_offset_encode()
756 /* IPA v4.5 adds a few more most-significant bits */ in ipa_metadata_offset_encode()
765 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
769 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte
771 * received packet. The header is configured (in the HDR_EXT register)
777 * The mux_id comes from a 4-byte metadata value supplied with each packet
779 * value that we want, in its low-order byte. A bitmask defined in the
787 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hdr()
788 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr()
793 if (endpoint->config.qmap) { in ipa_endpoint_init_hdr()
794 enum ipa_version version = ipa->version; in ipa_endpoint_init_hdr()
800 /* Define how to fill fields in a received QMAP header */ in ipa_endpoint_init_hdr()
801 if (!endpoint->toward_ipa) { in ipa_endpoint_init_hdr()
810 /* Upper bits are stored in HDR_EXT with IPA v4.5 */ in ipa_endpoint_init_hdr()
826 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_hdr()
831 u32 pad_align = endpoint->config.rx.pad_align; in ipa_endpoint_init_hdr_ext()
832 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hdr_ext()
833 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr_ext()
838 if (endpoint->config.qmap) { in ipa_endpoint_init_hdr_ext()
843 * The RMNet driver assumes this field is meaningful in in ipa_endpoint_init_hdr_ext()
849 if (!endpoint->toward_ipa) { in ipa_endpoint_init_hdr_ext()
858 if (!endpoint->toward_ipa) in ipa_endpoint_init_hdr_ext()
861 /* IPA v4.5 adds some most-significant bits to a few fields, in ipa_endpoint_init_hdr_ext()
862 * two of which are defined in the HDR (not HDR_EXT) register. in ipa_endpoint_init_hdr_ext()
864 if (ipa->version >= IPA_VERSION_4_5) { in ipa_endpoint_init_hdr_ext()
866 if (endpoint->config.qmap && !endpoint->toward_ipa) { in ipa_endpoint_init_hdr_ext()
871 /* Low bits are in the ENDP_INIT_HDR register */ in ipa_endpoint_init_hdr_ext()
878 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_hdr_ext()
883 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hdr_metadata_mask()
884 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr_metadata_mask()
889 if (endpoint->toward_ipa) in ipa_endpoint_init_hdr_metadata_mask()
896 if (endpoint->config.qmap) in ipa_endpoint_init_hdr_metadata_mask()
899 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hdr_metadata_mask()
904 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_mode()
909 if (!endpoint->toward_ipa) in ipa_endpoint_init_mode()
913 if (endpoint->config.dma_mode) { in ipa_endpoint_init_mode()
914 enum ipa_endpoint_name name = endpoint->config.dma_endpoint; in ipa_endpoint_init_mode()
915 u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id; in ipa_endpoint_init_mode()
924 offset = reg_n_offset(reg, endpoint->endpoint_id); in ipa_endpoint_init_mode()
925 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_mode()
929 * at one of several available granularities, which are configured in
931 * generators are set up with different "tick" periods. A Qtime value
932 * encodes a tick count along with an indication of a pulse generator
933 * (which has a fixed tick period). Two pulse generators are always
936 * represents the time period provided, and returns the tick count to
956 if (ipa->version >= IPA_VERSION_5_0) { in ipa_qtime_val()
979 if (ipa->version >= IPA_VERSION_4_5) { in aggr_time_limit_encode()
988 /* We program aggregation granularity in ipa_hardware_config() */ in aggr_time_limit_encode()
998 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_aggr()
999 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_aggr()
1004 if (endpoint->config.aggregation) { in ipa_endpoint_init_aggr()
1005 if (!endpoint->toward_ipa) { in ipa_endpoint_init_aggr()
1010 rx_config = &endpoint->config.rx; in ipa_endpoint_init_aggr()
1014 buffer_size = rx_config->buffer_size; in ipa_endpoint_init_aggr()
1015 limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD, in ipa_endpoint_init_aggr()
1016 rx_config->aggr_hard_limit); in ipa_endpoint_init_aggr()
1019 limit = rx_config->aggr_time_limit; in ipa_endpoint_init_aggr()
1024 if (rx_config->aggr_close_eof) in ipa_endpoint_init_aggr()
1038 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_aggr()
1041 /* The head-of-line blocking timer is defined as a tick count. For
1042 * IPA version 4.5 the tick count is based on the Qtimer, which is
1044 * each tick represents 128 cycles of the IPA core clock.
1062 if (ipa->version >= IPA_VERSION_4_5) { in hol_block_timer_encode()
1080 /* IPA v3.5.1 through v4.1 just record the tick count */ in hol_block_timer_encode()
1081 if (ipa->version < IPA_VERSION_4_2) in hol_block_timer_encode()
1084 /* For IPA v4.2, the tick count is represented by base and in hol_block_timer_encode()
1085 * scale fields within the 32-bit timer register, where: in hol_block_timer_encode()
1088 * large as possible. Find the highest set bit in the tick in hol_block_timer_encode()
1089 * count, and extract the number of bits in the base field in hol_block_timer_encode()
1094 scale = high > width ? high - width : 0; in hol_block_timer_encode()
1097 ticks += 1 << (scale - 1); in hol_block_timer_encode()
1113 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hol_block_timer()
1114 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_timer()
1122 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_hol_block_timer()
1128 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hol_block_en()
1129 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_en()
1138 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_en()
1141 if (enable && ipa->version >= IPA_VERSION_4_5) in ipa_endpoint_init_hol_block_en()
1142 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_en()
1145 /* Assumes HOL_BLOCK is in disabled state */
1162 while (endpoint_id < ipa->endpoint_count) { in ipa_endpoint_modem_hol_block_clear_all()
1163 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++]; in ipa_endpoint_modem_hol_block_clear_all()
1165 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) in ipa_endpoint_modem_hol_block_clear_all()
1175 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_deaggr()
1176 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_deaggr()
1180 if (!endpoint->toward_ipa) in ipa_endpoint_init_deaggr()
1189 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_deaggr()
1194 u32 resource_group = endpoint->config.resource_group; in ipa_endpoint_init_rsrc_grp()
1195 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_rsrc_grp()
1196 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_rsrc_grp()
1203 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_rsrc_grp()
1208 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_seq()
1209 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_seq()
1213 if (!endpoint->toward_ipa) in ipa_endpoint_init_seq()
1218 /* Low-order byte configures primary packet processing */ in ipa_endpoint_init_seq()
1219 val = reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type); in ipa_endpoint_init_seq()
1222 if (ipa->version < IPA_VERSION_4_5) in ipa_endpoint_init_seq()
1224 endpoint->config.tx.seq_rep_type); in ipa_endpoint_init_seq()
1226 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_seq()
1230 * ipa_endpoint_skb_tx() - Transmit a socket buffer
1246 nr_frags = skb_shinfo(skb)->nr_frags; in ipa_endpoint_skb_tx()
1247 if (nr_frags > endpoint->skb_frag_max) { in ipa_endpoint_skb_tx()
1249 return -E2BIG; in ipa_endpoint_skb_tx()
1255 return -EBUSY; in ipa_endpoint_skb_tx()
1260 trans->data = skb; /* transaction owns skb now */ in ipa_endpoint_skb_tx()
1269 return -ENOMEM; in ipa_endpoint_skb_tx()
1274 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_status()
1275 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status()
1280 if (endpoint->config.status_enable) { in ipa_endpoint_status()
1282 if (endpoint->toward_ipa) { in ipa_endpoint_status()
1286 name = endpoint->config.tx.status_endpoint; in ipa_endpoint_status()
1287 status_endpoint_id = ipa->name_map[name]->endpoint_id; in ipa_endpoint_status()
1297 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_status()
1309 buffer_size = endpoint->config.rx.buffer_size; in ipa_endpoint_replenish_one()
1312 return -ENOMEM; in ipa_endpoint_replenish_one()
1316 len = buffer_size - offset; in ipa_endpoint_replenish_one()
1322 trans->data = page; /* transaction owns page now */ in ipa_endpoint_replenish_one()
1328 * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1332 * endpoint, based on the number of entries in the underlying channel ring
1333 * buffer. If an endpoint's "backlog" is non-zero, it indicates how many
1335 * an endpoint can be disabled, in which case buffers are not queued to
1342 if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) in ipa_endpoint_replenish()
1346 if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) in ipa_endpoint_replenish()
1357 doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH); in ipa_endpoint_replenish()
1361 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); in ipa_endpoint_replenish()
1367 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); in ipa_endpoint_replenish()
1372 * If the hardware has no receive buffers queued, schedule work to in ipa_endpoint_replenish()
1375 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) in ipa_endpoint_replenish()
1376 schedule_delayed_work(&endpoint->replenish_work, in ipa_endpoint_replenish()
1382 set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); in ipa_endpoint_replenish_enable()
1384 /* Start replenishing if hardware currently has no buffers */ in ipa_endpoint_replenish_enable()
1385 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) in ipa_endpoint_replenish_enable()
1391 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); in ipa_endpoint_replenish_disable()
1409 if (!endpoint->netdev) in ipa_endpoint_skb_copy()
1416 memcpy(skb->data, data, len); in ipa_endpoint_skb_copy()
1417 skb->truesize += extra; in ipa_endpoint_skb_copy()
1420 ipa_modem_skb_rx(endpoint->netdev, skb); in ipa_endpoint_skb_copy()
1426 u32 buffer_size = endpoint->config.rx.buffer_size; in ipa_endpoint_skb_build()
1429 /* Nothing to do if there's no netdev */ in ipa_endpoint_skb_build()
1430 if (!endpoint->netdev) in ipa_endpoint_skb_build()
1433 WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD)); in ipa_endpoint_skb_build()
1443 ipa_modem_skb_rx(endpoint->netdev, skb); in ipa_endpoint_skb_build()
1467 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_skip()
1476 if (endpoint_id != endpoint->endpoint_id) in ipa_endpoint_status_skip()
1487 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_tag_valid()
1492 return false; /* No valid tag */ in ipa_endpoint_status_tag_valid()
1496 * If the packet came from the AP->command TX endpoint we know in ipa_endpoint_status_tag_valid()
1500 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; in ipa_endpoint_status_tag_valid()
1501 if (endpoint_id == command_endpoint->endpoint_id) { in ipa_endpoint_status_tag_valid()
1502 complete(&ipa->completion); in ipa_endpoint_status_tag_valid()
1504 dev_err(ipa->dev, "unexpected tagged packet from endpoint %u\n", in ipa_endpoint_status_tag_valid()
1516 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_drop()
1528 /* Drop the packet if it fails to match a routing rule; otherwise no */ in ipa_endpoint_status_drop()
1537 u32 buffer_size = endpoint->config.rx.buffer_size; in ipa_endpoint_status_parse()
1539 u32 unused = buffer_size - total_len; in ipa_endpoint_status_parse()
1540 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_parse()
1541 struct device *dev = ipa->dev; in ipa_endpoint_status_parse()
1560 resid -= IPA_STATUS_SIZE; in ipa_endpoint_status_parse()
1570 align = endpoint->config.rx.pad_align ? : 1; in ipa_endpoint_status_parse()
1572 if (endpoint->config.checksum) in ipa_endpoint_status_parse()
1579 /* Client receives only packet data (no status) */ in ipa_endpoint_status_parse()
1582 /* Have the true size reflect the extra unused space in in ipa_endpoint_status_parse()
1584 * proportionately across all aggregated packets in the in ipa_endpoint_status_parse()
1593 resid -= len; in ipa_endpoint_status_parse()
1602 if (endpoint->toward_ipa) in ipa_endpoint_trans_complete()
1605 if (trans->cancelled) in ipa_endpoint_trans_complete()
1609 page = trans->data; in ipa_endpoint_trans_complete()
1610 if (endpoint->config.status_enable) in ipa_endpoint_trans_complete()
1611 ipa_endpoint_status_parse(endpoint, page, trans->len); in ipa_endpoint_trans_complete()
1612 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) in ipa_endpoint_trans_complete()
1613 trans->data = NULL; /* Pages have been consumed */ in ipa_endpoint_trans_complete()
1621 if (endpoint->toward_ipa) { in ipa_endpoint_trans_release()
1622 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_trans_release()
1625 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { in ipa_endpoint_trans_release()
1626 struct sk_buff *skb = trans->data; in ipa_endpoint_trans_release()
1632 struct page *page = trans->data; in ipa_endpoint_trans_release()
1652 iowrite32(val, ipa->reg_virt + reg_offset(reg)); in ipa_endpoint_default_route_set()
1661 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1672 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset_rx_aggr()
1673 struct device *dev = ipa->dev; in ipa_endpoint_reset_rx_aggr()
1674 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_reset_rx_aggr()
1684 return -ENOMEM; in ipa_endpoint_reset_rx_aggr()
1688 ret = -ENOMEM; in ipa_endpoint_reset_rx_aggr()
1696 * disabled. Then poll until we know aggregation is no longer in ipa_endpoint_reset_rx_aggr()
1697 * active. We'll re-enable the doorbell (if appropriate) when in ipa_endpoint_reset_rx_aggr()
1700 gsi_channel_reset(gsi, endpoint->channel_id, false); in ipa_endpoint_reset_rx_aggr()
1706 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1710 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); in ipa_endpoint_reset_rx_aggr()
1720 } while (retries--); in ipa_endpoint_reset_rx_aggr()
1725 endpoint->endpoint_id); in ipa_endpoint_reset_rx_aggr()
1727 gsi_trans_read_byte_done(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1729 ret = gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1733 /* Finally, reset and reconfigure the channel again (re-enabling in ipa_endpoint_reset_rx_aggr()
1738 gsi_channel_reset(gsi, endpoint->channel_id, true); in ipa_endpoint_reset_rx_aggr()
1745 (void)gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1758 u32 channel_id = endpoint->channel_id; in ipa_endpoint_reset()
1759 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset()
1767 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa && in ipa_endpoint_reset()
1768 endpoint->config.aggregation; in ipa_endpoint_reset()
1772 gsi_channel_reset(&ipa->gsi, channel_id, true); in ipa_endpoint_reset()
1775 dev_err(ipa->dev, in ipa_endpoint_reset()
1777 ret, endpoint->channel_id, endpoint->endpoint_id); in ipa_endpoint_reset()
1782 if (endpoint->toward_ipa) { in ipa_endpoint_program()
1785 * Flow control is disabled for newly-allocated channels, in ipa_endpoint_program()
1789 if (endpoint->ipa->version < IPA_VERSION_4_2) in ipa_endpoint_program()
1792 /* Ensure suspend mode is off on all AP RX endpoints */ in ipa_endpoint_program()
1802 if (!endpoint->toward_ipa) { in ipa_endpoint_program()
1803 if (endpoint->config.rx.holb_drop) in ipa_endpoint_program()
1816 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_enable_one()
1817 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_enable_one()
1818 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_enable_one()
1821 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_enable_one()
1823 dev_err(ipa->dev, in ipa_endpoint_enable_one()
1825 ret, endpoint->toward_ipa ? 'T' : 'R', in ipa_endpoint_enable_one()
1826 endpoint->channel_id, endpoint_id); in ipa_endpoint_enable_one()
1830 if (!endpoint->toward_ipa) { in ipa_endpoint_enable_one()
1831 ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id); in ipa_endpoint_enable_one()
1835 __set_bit(endpoint_id, ipa->enabled); in ipa_endpoint_enable_one()
1842 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_disable_one()
1843 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_disable_one()
1844 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_disable_one()
1847 if (!test_bit(endpoint_id, ipa->enabled)) in ipa_endpoint_disable_one()
1850 __clear_bit(endpoint_id, endpoint->ipa->enabled); in ipa_endpoint_disable_one()
1852 if (!endpoint->toward_ipa) { in ipa_endpoint_disable_one()
1854 ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id); in ipa_endpoint_disable_one()
1857 /* Note that if stop fails, the channel's state is not well-defined */ in ipa_endpoint_disable_one()
1858 ret = gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_disable_one()
1860 dev_err(ipa->dev, "error %d attempting to stop endpoint %u\n", in ipa_endpoint_disable_one()
1866 struct device *dev = endpoint->ipa->dev; in ipa_endpoint_suspend_one()
1867 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_suspend_one()
1870 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled)) in ipa_endpoint_suspend_one()
1873 if (!endpoint->toward_ipa) { in ipa_endpoint_suspend_one()
1878 ret = gsi_channel_suspend(gsi, endpoint->channel_id); in ipa_endpoint_suspend_one()
1881 endpoint->channel_id); in ipa_endpoint_suspend_one()
1886 struct device *dev = endpoint->ipa->dev; in ipa_endpoint_resume_one()
1887 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_resume_one()
1890 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled)) in ipa_endpoint_resume_one()
1893 if (!endpoint->toward_ipa) in ipa_endpoint_resume_one()
1896 ret = gsi_channel_resume(gsi, endpoint->channel_id); in ipa_endpoint_resume_one()
1899 endpoint->channel_id); in ipa_endpoint_resume_one()
1900 else if (!endpoint->toward_ipa) in ipa_endpoint_resume_one()
1906 if (!ipa->setup_complete) in ipa_endpoint_suspend()
1909 if (ipa->modem_netdev) in ipa_endpoint_suspend()
1910 ipa_modem_suspend(ipa->modem_netdev); in ipa_endpoint_suspend()
1912 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_suspend()
1913 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_suspend()
1918 if (!ipa->setup_complete) in ipa_endpoint_resume()
1921 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_resume()
1922 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_resume()
1924 if (ipa->modem_netdev) in ipa_endpoint_resume()
1925 ipa_modem_resume(ipa->modem_netdev); in ipa_endpoint_resume()
1930 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_setup_one()
1931 u32 channel_id = endpoint->channel_id; in ipa_endpoint_setup_one()
1934 if (endpoint->ee_id != GSI_EE_AP) in ipa_endpoint_setup_one()
1937 endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1; in ipa_endpoint_setup_one()
1938 if (!endpoint->toward_ipa) { in ipa_endpoint_setup_one()
1942 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); in ipa_endpoint_setup_one()
1943 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); in ipa_endpoint_setup_one()
1944 INIT_DELAYED_WORK(&endpoint->replenish_work, in ipa_endpoint_setup_one()
1950 __set_bit(endpoint->endpoint_id, endpoint->ipa->set_up); in ipa_endpoint_setup_one()
1955 __clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up); in ipa_endpoint_teardown_one()
1957 if (!endpoint->toward_ipa) in ipa_endpoint_teardown_one()
1958 cancel_delayed_work_sync(&endpoint->replenish_work); in ipa_endpoint_teardown_one()
1967 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) in ipa_endpoint_setup()
1968 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_setup()
1975 for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count) in ipa_endpoint_teardown()
1976 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_teardown()
1981 ipa->available_count = 0; in ipa_endpoint_deconfig()
1982 bitmap_free(ipa->available); in ipa_endpoint_deconfig()
1983 ipa->available = NULL; in ipa_endpoint_deconfig()
1988 struct device *dev = ipa->dev; in ipa_endpoint_config()
2008 if (ipa->version < IPA_VERSION_3_5) { in ipa_endpoint_config()
2009 ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL); in ipa_endpoint_config()
2010 if (!ipa->available) in ipa_endpoint_config()
2011 return -ENOMEM; in ipa_endpoint_config()
2012 ipa->available_count = IPA_ENDPOINT_MAX; in ipa_endpoint_config()
2014 bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX); in ipa_endpoint_config()
2023 val = ioread32(ipa->reg_virt + reg_offset(reg)); in ipa_endpoint_config()
2034 return -EINVAL; in ipa_endpoint_config()
2038 hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1; in ipa_endpoint_config()
2042 return -EINVAL; in ipa_endpoint_config()
2046 ipa->available = bitmap_zalloc(limit, GFP_KERNEL); in ipa_endpoint_config()
2047 if (!ipa->available) in ipa_endpoint_config()
2048 return -ENOMEM; in ipa_endpoint_config()
2049 ipa->available_count = limit; in ipa_endpoint_config()
2052 bitmap_set(ipa->available, 0, tx_count); in ipa_endpoint_config()
2053 bitmap_set(ipa->available, rx_base, rx_count); in ipa_endpoint_config()
2055 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) { in ipa_endpoint_config()
2060 endpoint_id, limit - 1); in ipa_endpoint_config()
2064 if (!test_bit(endpoint_id, ipa->available)) { in ipa_endpoint_config()
2070 /* Make sure it's pointing in the right direction */ in ipa_endpoint_config()
2071 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_config()
2072 if (endpoint->toward_ipa) { in ipa_endpoint_config()
2088 return -EINVAL; in ipa_endpoint_config()
2096 endpoint = &ipa->endpoint[data->endpoint_id]; in ipa_endpoint_init_one()
2098 if (data->ee_id == GSI_EE_AP) in ipa_endpoint_init_one()
2099 ipa->channel_map[data->channel_id] = endpoint; in ipa_endpoint_init_one()
2100 ipa->name_map[name] = endpoint; in ipa_endpoint_init_one()
2102 endpoint->ipa = ipa; in ipa_endpoint_init_one()
2103 endpoint->ee_id = data->ee_id; in ipa_endpoint_init_one()
2104 endpoint->channel_id = data->channel_id; in ipa_endpoint_init_one()
2105 endpoint->endpoint_id = data->endpoint_id; in ipa_endpoint_init_one()
2106 endpoint->toward_ipa = data->toward_ipa; in ipa_endpoint_init_one()
2107 endpoint->config = data->endpoint.config; in ipa_endpoint_init_one()
2109 __set_bit(endpoint->endpoint_id, ipa->defined); in ipa_endpoint_init_one()
2114 __clear_bit(endpoint->endpoint_id, endpoint->ipa->defined); in ipa_endpoint_exit_one()
2123 ipa->filtered = 0; in ipa_endpoint_exit()
2125 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) in ipa_endpoint_exit()
2126 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_exit()
2128 bitmap_free(ipa->enabled); in ipa_endpoint_exit()
2129 ipa->enabled = NULL; in ipa_endpoint_exit()
2130 bitmap_free(ipa->set_up); in ipa_endpoint_exit()
2131 ipa->set_up = NULL; in ipa_endpoint_exit()
2132 bitmap_free(ipa->defined); in ipa_endpoint_exit()
2133 ipa->defined = NULL; in ipa_endpoint_exit()
2135 memset(ipa->name_map, 0, sizeof(ipa->name_map)); in ipa_endpoint_exit()
2136 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); in ipa_endpoint_exit()
2149 ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1; in ipa_endpoint_init()
2150 if (!ipa->endpoint_count) in ipa_endpoint_init()
2151 return -EINVAL; in ipa_endpoint_init()
2154 ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); in ipa_endpoint_init()
2155 if (!ipa->defined) in ipa_endpoint_init()
2156 return -ENOMEM; in ipa_endpoint_init()
2158 ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); in ipa_endpoint_init()
2159 if (!ipa->set_up) in ipa_endpoint_init()
2162 ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); in ipa_endpoint_init()
2163 if (!ipa->enabled) in ipa_endpoint_init()
2173 if (data->endpoint.filter_support) in ipa_endpoint_init()
2174 filtered |= BIT(data->endpoint_id); in ipa_endpoint_init()
2175 if (data->ee_id == GSI_EE_MODEM && data->toward_ipa) in ipa_endpoint_init()
2176 ipa->modem_tx_count++; in ipa_endpoint_init()
2183 return -EINVAL; in ipa_endpoint_init()
2186 ipa->filtered = filtered; in ipa_endpoint_init()
2191 bitmap_free(ipa->set_up); in ipa_endpoint_init()
2192 ipa->set_up = NULL; in ipa_endpoint_init()
2194 bitmap_free(ipa->defined); in ipa_endpoint_init()
2195 ipa->defined = NULL; in ipa_endpoint_init()
2197 return -ENOMEM; in ipa_endpoint_init()