184f9bd12SAlex Elder // SPDX-License-Identifier: GPL-2.0 284f9bd12SAlex Elder 384f9bd12SAlex Elder /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. 484f9bd12SAlex Elder * Copyright (C) 2019-2020 Linaro Ltd. 584f9bd12SAlex Elder */ 684f9bd12SAlex Elder 784f9bd12SAlex Elder #include <linux/types.h> 884f9bd12SAlex Elder #include <linux/device.h> 984f9bd12SAlex Elder #include <linux/slab.h> 1084f9bd12SAlex Elder #include <linux/bitfield.h> 1184f9bd12SAlex Elder #include <linux/if_rmnet.h> 1284f9bd12SAlex Elder #include <linux/dma-direction.h> 1384f9bd12SAlex Elder 1484f9bd12SAlex Elder #include "gsi.h" 1584f9bd12SAlex Elder #include "gsi_trans.h" 1684f9bd12SAlex Elder #include "ipa.h" 1784f9bd12SAlex Elder #include "ipa_data.h" 1884f9bd12SAlex Elder #include "ipa_endpoint.h" 1984f9bd12SAlex Elder #include "ipa_cmd.h" 2084f9bd12SAlex Elder #include "ipa_mem.h" 2184f9bd12SAlex Elder #include "ipa_modem.h" 2284f9bd12SAlex Elder #include "ipa_table.h" 2384f9bd12SAlex Elder #include "ipa_gsi.h" 2484f9bd12SAlex Elder 2584f9bd12SAlex Elder #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) 2684f9bd12SAlex Elder 2784f9bd12SAlex Elder #define IPA_REPLENISH_BATCH 16 2884f9bd12SAlex Elder 29*6fcd4224SAlex Elder /* RX buffer is 1 page (or a power-of-2 contiguous pages) */ 30*6fcd4224SAlex Elder #define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */ 3184f9bd12SAlex Elder 3284f9bd12SAlex Elder /* The amount of RX buffer space consumed by standard skb overhead */ 3384f9bd12SAlex Elder #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) 3484f9bd12SAlex Elder 3584f9bd12SAlex Elder #define IPA_ENDPOINT_STOP_RX_RETRIES 10 3684f9bd12SAlex Elder #define IPA_ENDPOINT_STOP_RX_SIZE 1 /* bytes */ 3784f9bd12SAlex Elder 3884f9bd12SAlex Elder #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 3984f9bd12SAlex Elder #define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */ 4084f9bd12SAlex Elder 4184f9bd12SAlex Elder #define ENDPOINT_STOP_DMA_TIMEOUT 15 /* milliseconds */ 4284f9bd12SAlex Elder 4384f9bd12SAlex Elder /** enum ipa_status_opcode - status element opcode hardware values */ 4484f9bd12SAlex Elder enum ipa_status_opcode { 4584f9bd12SAlex Elder IPA_STATUS_OPCODE_PACKET = 0x01, 4684f9bd12SAlex Elder IPA_STATUS_OPCODE_NEW_FRAG_RULE = 0x02, 4784f9bd12SAlex Elder IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04, 4884f9bd12SAlex Elder IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08, 4984f9bd12SAlex Elder IPA_STATUS_OPCODE_LOG = 0x10, 5084f9bd12SAlex Elder IPA_STATUS_OPCODE_DCMP = 0x20, 5184f9bd12SAlex Elder IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40, 5284f9bd12SAlex Elder }; 5384f9bd12SAlex Elder 5484f9bd12SAlex Elder /** enum ipa_status_exception - status element exception type */ 5584f9bd12SAlex Elder enum ipa_status_exception { 5684f9bd12SAlex Elder /* 0 means no exception */ 5784f9bd12SAlex Elder IPA_STATUS_EXCEPTION_DEAGGR = 0x01, 5884f9bd12SAlex Elder IPA_STATUS_EXCEPTION_IPTYPE = 0x04, 5984f9bd12SAlex Elder IPA_STATUS_EXCEPTION_PACKET_LENGTH = 0x08, 6084f9bd12SAlex Elder IPA_STATUS_EXCEPTION_FRAG_RULE_MISS = 0x10, 6184f9bd12SAlex Elder IPA_STATUS_EXCEPTION_SW_FILT = 0x20, 6284f9bd12SAlex Elder /* The meaning of the next value depends on whether the IP version */ 6384f9bd12SAlex Elder IPA_STATUS_EXCEPTION_NAT = 0x40, /* IPv4 */ 6484f9bd12SAlex Elder IPA_STATUS_EXCEPTION_IPV6CT = IPA_STATUS_EXCEPTION_NAT, 6584f9bd12SAlex Elder }; 6684f9bd12SAlex Elder 6784f9bd12SAlex Elder /* Status element provided by hardware */ 6884f9bd12SAlex Elder struct ipa_status { 6984f9bd12SAlex Elder u8 opcode; /* enum ipa_status_opcode */ 7084f9bd12SAlex Elder u8 exception; /* enum ipa_status_exception */ 7184f9bd12SAlex Elder __le16 mask; 7284f9bd12SAlex Elder __le16 pkt_len; 7384f9bd12SAlex Elder u8 endp_src_idx; 7484f9bd12SAlex Elder u8 endp_dst_idx; 7584f9bd12SAlex Elder __le32 metadata; 7684f9bd12SAlex Elder __le32 flags1; 7784f9bd12SAlex Elder __le64 flags2; 7884f9bd12SAlex Elder __le32 flags3; 7984f9bd12SAlex Elder __le32 flags4; 8084f9bd12SAlex Elder }; 8184f9bd12SAlex Elder 8284f9bd12SAlex Elder /* Field masks for struct ipa_status structure fields */ 8384f9bd12SAlex Elder 8484f9bd12SAlex Elder #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0) 8584f9bd12SAlex Elder 8684f9bd12SAlex Elder #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0) 8784f9bd12SAlex Elder 8884f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_FLT_LOCAL_FMASK GENMASK(0, 0) 8984f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_FLT_HASH_FMASK GENMASK(1, 1) 9084f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_FLT_GLOBAL_FMASK GENMASK(2, 2) 9184f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_FLT_RET_HDR_FMASK GENMASK(3, 3) 9284f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_FLT_RULE_ID_FMASK GENMASK(13, 4) 9384f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_RT_LOCAL_FMASK GENMASK(14, 14) 9484f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_RT_HASH_FMASK GENMASK(15, 15) 9584f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_UCP_FMASK GENMASK(16, 16) 9684f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_RT_TBL_IDX_FMASK GENMASK(21, 17) 9784f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22) 9884f9bd12SAlex Elder 9984f9bd12SAlex Elder #define IPA_STATUS_FLAGS2_NAT_HIT_FMASK GENMASK_ULL(0, 0) 10084f9bd12SAlex Elder #define IPA_STATUS_FLAGS2_NAT_ENTRY_IDX_FMASK GENMASK_ULL(13, 1) 10184f9bd12SAlex Elder #define IPA_STATUS_FLAGS2_NAT_TYPE_FMASK GENMASK_ULL(15, 14) 10284f9bd12SAlex Elder #define IPA_STATUS_FLAGS2_TAG_INFO_FMASK GENMASK_ULL(63, 16) 10384f9bd12SAlex Elder 10484f9bd12SAlex Elder #define IPA_STATUS_FLAGS3_SEQ_NUM_FMASK GENMASK(7, 0) 10584f9bd12SAlex Elder #define IPA_STATUS_FLAGS3_TOD_CTR_FMASK GENMASK(31, 8) 10684f9bd12SAlex Elder 10784f9bd12SAlex Elder #define IPA_STATUS_FLAGS4_HDR_LOCAL_FMASK GENMASK(0, 0) 10884f9bd12SAlex Elder #define IPA_STATUS_FLAGS4_HDR_OFFSET_FMASK GENMASK(10, 1) 10984f9bd12SAlex Elder #define IPA_STATUS_FLAGS4_FRAG_HIT_FMASK GENMASK(11, 11) 11084f9bd12SAlex Elder #define IPA_STATUS_FLAGS4_FRAG_RULE_FMASK GENMASK(15, 12) 11184f9bd12SAlex Elder #define IPA_STATUS_FLAGS4_HW_SPECIFIC_FMASK GENMASK(31, 16) 11284f9bd12SAlex Elder 11384f9bd12SAlex Elder #ifdef IPA_VALIDATE 11484f9bd12SAlex Elder 11584f9bd12SAlex Elder static void ipa_endpoint_validate_build(void) 11684f9bd12SAlex Elder { 11784f9bd12SAlex Elder /* The aggregation byte limit defines the point at which an 11884f9bd12SAlex Elder * aggregation window will close. It is programmed into the 11984f9bd12SAlex Elder * IPA hardware as a number of KB. We don't use "hard byte 12084f9bd12SAlex Elder * limit" aggregation, which means that we need to supply 12184f9bd12SAlex Elder * enough space in a receive buffer to hold a complete MTU 12284f9bd12SAlex Elder * plus normal skb overhead *after* that aggregation byte 12384f9bd12SAlex Elder * limit has been crossed. 12484f9bd12SAlex Elder * 12584f9bd12SAlex Elder * This check just ensures we don't define a receive buffer 12684f9bd12SAlex Elder * size that would exceed what we can represent in the field 12784f9bd12SAlex Elder * that is used to program its size. 12884f9bd12SAlex Elder */ 12984f9bd12SAlex Elder BUILD_BUG_ON(IPA_RX_BUFFER_SIZE > 13084f9bd12SAlex Elder field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K + 13184f9bd12SAlex Elder IPA_MTU + IPA_RX_BUFFER_OVERHEAD); 13284f9bd12SAlex Elder 13384f9bd12SAlex Elder /* I honestly don't know where this requirement comes from. But 13484f9bd12SAlex Elder * it holds, and if we someday need to loosen the constraint we 13584f9bd12SAlex Elder * can try to track it down. 13684f9bd12SAlex Elder */ 13784f9bd12SAlex Elder BUILD_BUG_ON(sizeof(struct ipa_status) % 4); 13884f9bd12SAlex Elder } 13984f9bd12SAlex Elder 14084f9bd12SAlex Elder static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, 14184f9bd12SAlex Elder const struct ipa_gsi_endpoint_data *all_data, 14284f9bd12SAlex Elder const struct ipa_gsi_endpoint_data *data) 14384f9bd12SAlex Elder { 14484f9bd12SAlex Elder const struct ipa_gsi_endpoint_data *other_data; 14584f9bd12SAlex Elder struct device *dev = &ipa->pdev->dev; 14684f9bd12SAlex Elder enum ipa_endpoint_name other_name; 14784f9bd12SAlex Elder 14884f9bd12SAlex Elder if (ipa_gsi_endpoint_data_empty(data)) 14984f9bd12SAlex Elder return true; 15084f9bd12SAlex Elder 15184f9bd12SAlex Elder if (!data->toward_ipa) { 15284f9bd12SAlex Elder if (data->endpoint.filter_support) { 15384f9bd12SAlex Elder dev_err(dev, "filtering not supported for " 15484f9bd12SAlex Elder "RX endpoint %u\n", 15584f9bd12SAlex Elder data->endpoint_id); 15684f9bd12SAlex Elder return false; 15784f9bd12SAlex Elder } 15884f9bd12SAlex Elder 15984f9bd12SAlex Elder return true; /* Nothing more to check for RX */ 16084f9bd12SAlex Elder } 16184f9bd12SAlex Elder 16284f9bd12SAlex Elder if (data->endpoint.config.status_enable) { 16384f9bd12SAlex Elder other_name = data->endpoint.config.tx.status_endpoint; 16484f9bd12SAlex Elder if (other_name >= count) { 16584f9bd12SAlex Elder dev_err(dev, "status endpoint name %u out of range " 16684f9bd12SAlex Elder "for endpoint %u\n", 16784f9bd12SAlex Elder other_name, data->endpoint_id); 16884f9bd12SAlex Elder return false; 16984f9bd12SAlex Elder } 17084f9bd12SAlex Elder 17184f9bd12SAlex Elder /* Status endpoint must be defined... */ 17284f9bd12SAlex Elder other_data = &all_data[other_name]; 17384f9bd12SAlex Elder if (ipa_gsi_endpoint_data_empty(other_data)) { 17484f9bd12SAlex Elder dev_err(dev, "DMA endpoint name %u undefined " 17584f9bd12SAlex Elder "for endpoint %u\n", 17684f9bd12SAlex Elder other_name, data->endpoint_id); 17784f9bd12SAlex Elder return false; 17884f9bd12SAlex Elder } 17984f9bd12SAlex Elder 18084f9bd12SAlex Elder /* ...and has to be an RX endpoint... */ 18184f9bd12SAlex Elder if (other_data->toward_ipa) { 18284f9bd12SAlex Elder dev_err(dev, 18384f9bd12SAlex Elder "status endpoint for endpoint %u not RX\n", 18484f9bd12SAlex Elder data->endpoint_id); 18584f9bd12SAlex Elder return false; 18684f9bd12SAlex Elder } 18784f9bd12SAlex Elder 18884f9bd12SAlex Elder /* ...and if it's to be an AP endpoint... */ 18984f9bd12SAlex Elder if (other_data->ee_id == GSI_EE_AP) { 19084f9bd12SAlex Elder /* ...make sure it has status enabled. */ 19184f9bd12SAlex Elder if (!other_data->endpoint.config.status_enable) { 19284f9bd12SAlex Elder dev_err(dev, 19384f9bd12SAlex Elder "status not enabled for endpoint %u\n", 19484f9bd12SAlex Elder other_data->endpoint_id); 19584f9bd12SAlex Elder return false; 19684f9bd12SAlex Elder } 19784f9bd12SAlex Elder } 19884f9bd12SAlex Elder } 19984f9bd12SAlex Elder 20084f9bd12SAlex Elder if (data->endpoint.config.dma_mode) { 20184f9bd12SAlex Elder other_name = data->endpoint.config.dma_endpoint; 20284f9bd12SAlex Elder if (other_name >= count) { 20384f9bd12SAlex Elder dev_err(dev, "DMA endpoint name %u out of range " 20484f9bd12SAlex Elder "for endpoint %u\n", 20584f9bd12SAlex Elder other_name, data->endpoint_id); 20684f9bd12SAlex Elder return false; 20784f9bd12SAlex Elder } 20884f9bd12SAlex Elder 20984f9bd12SAlex Elder other_data = &all_data[other_name]; 21084f9bd12SAlex Elder if (ipa_gsi_endpoint_data_empty(other_data)) { 21184f9bd12SAlex Elder dev_err(dev, "DMA endpoint name %u undefined " 21284f9bd12SAlex Elder "for endpoint %u\n", 21384f9bd12SAlex Elder other_name, data->endpoint_id); 21484f9bd12SAlex Elder return false; 21584f9bd12SAlex Elder } 21684f9bd12SAlex Elder } 21784f9bd12SAlex Elder 21884f9bd12SAlex Elder return true; 21984f9bd12SAlex Elder } 22084f9bd12SAlex Elder 22184f9bd12SAlex Elder static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 22284f9bd12SAlex Elder const struct ipa_gsi_endpoint_data *data) 22384f9bd12SAlex Elder { 22484f9bd12SAlex Elder const struct ipa_gsi_endpoint_data *dp = data; 22584f9bd12SAlex Elder struct device *dev = &ipa->pdev->dev; 22684f9bd12SAlex Elder enum ipa_endpoint_name name; 22784f9bd12SAlex Elder 22884f9bd12SAlex Elder ipa_endpoint_validate_build(); 22984f9bd12SAlex Elder 23084f9bd12SAlex Elder if (count > IPA_ENDPOINT_COUNT) { 23184f9bd12SAlex Elder dev_err(dev, "too many endpoints specified (%u > %u)\n", 23284f9bd12SAlex Elder count, IPA_ENDPOINT_COUNT); 23384f9bd12SAlex Elder return false; 23484f9bd12SAlex Elder } 23584f9bd12SAlex Elder 23684f9bd12SAlex Elder /* Make sure needed endpoints have defined data */ 23784f9bd12SAlex Elder if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) { 23884f9bd12SAlex Elder dev_err(dev, "command TX endpoint not defined\n"); 23984f9bd12SAlex Elder return false; 24084f9bd12SAlex Elder } 24184f9bd12SAlex Elder if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) { 24284f9bd12SAlex Elder dev_err(dev, "LAN RX endpoint not defined\n"); 24384f9bd12SAlex Elder return false; 24484f9bd12SAlex Elder } 24584f9bd12SAlex Elder if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) { 24684f9bd12SAlex Elder dev_err(dev, "AP->modem TX endpoint not defined\n"); 24784f9bd12SAlex Elder return false; 24884f9bd12SAlex Elder } 24984f9bd12SAlex Elder if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) { 25084f9bd12SAlex Elder dev_err(dev, "AP<-modem RX endpoint not defined\n"); 25184f9bd12SAlex Elder return false; 25284f9bd12SAlex Elder } 25384f9bd12SAlex Elder 25484f9bd12SAlex Elder for (name = 0; name < count; name++, dp++) 25584f9bd12SAlex Elder if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) 25684f9bd12SAlex Elder return false; 25784f9bd12SAlex Elder 25884f9bd12SAlex Elder return true; 25984f9bd12SAlex Elder } 26084f9bd12SAlex Elder 26184f9bd12SAlex Elder #else /* !IPA_VALIDATE */ 26284f9bd12SAlex Elder 26384f9bd12SAlex Elder static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, 26484f9bd12SAlex Elder const struct ipa_gsi_endpoint_data *data) 26584f9bd12SAlex Elder { 26684f9bd12SAlex Elder return true; 26784f9bd12SAlex Elder } 26884f9bd12SAlex Elder 26984f9bd12SAlex Elder #endif /* !IPA_VALIDATE */ 27084f9bd12SAlex Elder 27184f9bd12SAlex Elder /* Allocate a transaction to use on a non-command endpoint */ 27284f9bd12SAlex Elder static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, 27384f9bd12SAlex Elder u32 tre_count) 27484f9bd12SAlex Elder { 27584f9bd12SAlex Elder struct gsi *gsi = &endpoint->ipa->gsi; 27684f9bd12SAlex Elder u32 channel_id = endpoint->channel_id; 27784f9bd12SAlex Elder enum dma_data_direction direction; 27884f9bd12SAlex Elder 27984f9bd12SAlex Elder direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 28084f9bd12SAlex Elder 28184f9bd12SAlex Elder return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction); 28284f9bd12SAlex Elder } 28384f9bd12SAlex Elder 28484f9bd12SAlex Elder /* suspend_delay represents suspend for RX, delay for TX endpoints. 28584f9bd12SAlex Elder * Note that suspend is not supported starting with IPA v4.0. 28684f9bd12SAlex Elder */ 28784f9bd12SAlex Elder static int 28884f9bd12SAlex Elder ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) 28984f9bd12SAlex Elder { 29084f9bd12SAlex Elder u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); 29184f9bd12SAlex Elder struct ipa *ipa = endpoint->ipa; 29284f9bd12SAlex Elder u32 mask; 29384f9bd12SAlex Elder u32 val; 29484f9bd12SAlex Elder 29584f9bd12SAlex Elder /* assert(ipa->version == IPA_VERSION_3_5_1 */ 29684f9bd12SAlex Elder mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; 29784f9bd12SAlex Elder 29884f9bd12SAlex Elder val = ioread32(ipa->reg_virt + offset); 29984f9bd12SAlex Elder if (suspend_delay == !!(val & mask)) 30084f9bd12SAlex Elder return -EALREADY; /* Already set to desired state */ 30184f9bd12SAlex Elder 30284f9bd12SAlex Elder val ^= mask; 30384f9bd12SAlex Elder iowrite32(val, ipa->reg_virt + offset); 30484f9bd12SAlex Elder 30584f9bd12SAlex Elder return 0; 30684f9bd12SAlex Elder } 30784f9bd12SAlex Elder 30884f9bd12SAlex Elder /* Enable or disable delay or suspend mode on all modem endpoints */ 30984f9bd12SAlex Elder void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) 31084f9bd12SAlex Elder { 31184f9bd12SAlex Elder bool support_suspend; 31284f9bd12SAlex Elder u32 endpoint_id; 31384f9bd12SAlex Elder 31484f9bd12SAlex Elder /* DELAY mode doesn't work right on IPA v4.2 */ 31584f9bd12SAlex Elder if (ipa->version == IPA_VERSION_4_2) 31684f9bd12SAlex Elder return; 31784f9bd12SAlex Elder 31884f9bd12SAlex Elder /* Only IPA v3.5.1 supports SUSPEND mode on RX endpoints */ 31984f9bd12SAlex Elder support_suspend = ipa->version == IPA_VERSION_3_5_1; 32084f9bd12SAlex Elder 32184f9bd12SAlex Elder for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) { 32284f9bd12SAlex Elder struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; 32384f9bd12SAlex Elder 32484f9bd12SAlex Elder if (endpoint->ee_id != GSI_EE_MODEM) 32584f9bd12SAlex Elder continue; 32684f9bd12SAlex Elder 32784f9bd12SAlex Elder /* Set TX delay mode, or for IPA v3.5.1 RX suspend mode */ 32884f9bd12SAlex Elder if (endpoint->toward_ipa || support_suspend) 32984f9bd12SAlex Elder (void)ipa_endpoint_init_ctrl(endpoint, enable); 33084f9bd12SAlex Elder } 33184f9bd12SAlex Elder } 33284f9bd12SAlex Elder 33384f9bd12SAlex Elder /* Reset all modem endpoints to use the default exception endpoint */ 33484f9bd12SAlex Elder int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) 33584f9bd12SAlex Elder { 33684f9bd12SAlex Elder u32 initialized = ipa->initialized; 33784f9bd12SAlex Elder struct gsi_trans *trans; 33884f9bd12SAlex Elder u32 count; 33984f9bd12SAlex Elder 34084f9bd12SAlex Elder /* We need one command per modem TX endpoint. We can get an upper 34184f9bd12SAlex Elder * bound on that by assuming all initialized endpoints are modem->IPA. 34284f9bd12SAlex Elder * That won't happen, and we could be more precise, but this is fine 34384f9bd12SAlex Elder * for now. We need to end the transactio with a "tag process." 34484f9bd12SAlex Elder */ 34584f9bd12SAlex Elder count = hweight32(initialized) + ipa_cmd_tag_process_count(); 34684f9bd12SAlex Elder trans = ipa_cmd_trans_alloc(ipa, count); 34784f9bd12SAlex Elder if (!trans) { 34884f9bd12SAlex Elder dev_err(&ipa->pdev->dev, 34984f9bd12SAlex Elder "no transaction to reset modem exception endpoints\n"); 35084f9bd12SAlex Elder return -EBUSY; 35184f9bd12SAlex Elder } 35284f9bd12SAlex Elder 35384f9bd12SAlex Elder while (initialized) { 35484f9bd12SAlex Elder u32 endpoint_id = __ffs(initialized); 35584f9bd12SAlex Elder struct ipa_endpoint *endpoint; 35684f9bd12SAlex Elder u32 offset; 35784f9bd12SAlex Elder 35884f9bd12SAlex Elder initialized ^= BIT(endpoint_id); 35984f9bd12SAlex Elder 36084f9bd12SAlex Elder /* We only reset modem TX endpoints */ 36184f9bd12SAlex Elder endpoint = &ipa->endpoint[endpoint_id]; 36284f9bd12SAlex Elder if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) 36384f9bd12SAlex Elder continue; 36484f9bd12SAlex Elder 36584f9bd12SAlex Elder offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 36684f9bd12SAlex Elder 36784f9bd12SAlex Elder /* Value written is 0, and all bits are updated. That 36884f9bd12SAlex Elder * means status is disabled on the endpoint, and as a 36984f9bd12SAlex Elder * result all other fields in the register are ignored. 37084f9bd12SAlex Elder */ 37184f9bd12SAlex Elder ipa_cmd_register_write_add(trans, offset, 0, ~0, false); 37284f9bd12SAlex Elder } 37384f9bd12SAlex Elder 37484f9bd12SAlex Elder ipa_cmd_tag_process_add(trans); 37584f9bd12SAlex Elder 37684f9bd12SAlex Elder /* XXX This should have a 1 second timeout */ 37784f9bd12SAlex Elder gsi_trans_commit_wait(trans); 37884f9bd12SAlex Elder 37984f9bd12SAlex Elder return 0; 38084f9bd12SAlex Elder } 38184f9bd12SAlex Elder 38284f9bd12SAlex Elder static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) 38384f9bd12SAlex Elder { 38484f9bd12SAlex Elder u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id); 38584f9bd12SAlex Elder u32 val = 0; 38684f9bd12SAlex Elder 38784f9bd12SAlex Elder /* FRAG_OFFLOAD_EN is 0 */ 38884f9bd12SAlex Elder if (endpoint->data->checksum) { 38984f9bd12SAlex Elder if (endpoint->toward_ipa) { 39084f9bd12SAlex Elder u32 checksum_offset; 39184f9bd12SAlex Elder 39284f9bd12SAlex Elder val |= u32_encode_bits(IPA_CS_OFFLOAD_UL, 39384f9bd12SAlex Elder CS_OFFLOAD_EN_FMASK); 39484f9bd12SAlex Elder /* Checksum header offset is in 4-byte units */ 39584f9bd12SAlex Elder checksum_offset = sizeof(struct rmnet_map_header); 39684f9bd12SAlex Elder checksum_offset /= sizeof(u32); 39784f9bd12SAlex Elder val |= u32_encode_bits(checksum_offset, 39884f9bd12SAlex Elder CS_METADATA_HDR_OFFSET_FMASK); 39984f9bd12SAlex Elder } else { 40084f9bd12SAlex Elder val |= u32_encode_bits(IPA_CS_OFFLOAD_DL, 40184f9bd12SAlex Elder CS_OFFLOAD_EN_FMASK); 40284f9bd12SAlex Elder } 40384f9bd12SAlex Elder } else { 40484f9bd12SAlex Elder val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE, 40584f9bd12SAlex Elder CS_OFFLOAD_EN_FMASK); 40684f9bd12SAlex Elder } 40784f9bd12SAlex Elder /* CS_GEN_QMB_MASTER_SEL is 0 */ 40884f9bd12SAlex Elder 40984f9bd12SAlex Elder iowrite32(val, endpoint->ipa->reg_virt + offset); 41084f9bd12SAlex Elder } 41184f9bd12SAlex Elder 41284f9bd12SAlex Elder static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) 41384f9bd12SAlex Elder { 41484f9bd12SAlex Elder u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); 41584f9bd12SAlex Elder u32 val = 0; 41684f9bd12SAlex Elder 41784f9bd12SAlex Elder if (endpoint->data->qmap) { 41884f9bd12SAlex Elder size_t header_size = sizeof(struct rmnet_map_header); 41984f9bd12SAlex Elder 42084f9bd12SAlex Elder if (endpoint->toward_ipa && endpoint->data->checksum) 42184f9bd12SAlex Elder header_size += sizeof(struct rmnet_map_ul_csum_header); 42284f9bd12SAlex Elder 42384f9bd12SAlex Elder val |= u32_encode_bits(header_size, HDR_LEN_FMASK); 42484f9bd12SAlex Elder /* metadata is the 4 byte rmnet_map header itself */ 42584f9bd12SAlex Elder val |= HDR_OFST_METADATA_VALID_FMASK; 42684f9bd12SAlex Elder val |= u32_encode_bits(0, HDR_OFST_METADATA_FMASK); 42784f9bd12SAlex Elder /* HDR_ADDITIONAL_CONST_LEN is 0; (IPA->AP only) */ 42884f9bd12SAlex Elder if (!endpoint->toward_ipa) { 42984f9bd12SAlex Elder u32 size_offset = offsetof(struct rmnet_map_header, 43084f9bd12SAlex Elder pkt_len); 43184f9bd12SAlex Elder 43284f9bd12SAlex Elder val |= HDR_OFST_PKT_SIZE_VALID_FMASK; 43384f9bd12SAlex Elder val |= u32_encode_bits(size_offset, 43484f9bd12SAlex Elder HDR_OFST_PKT_SIZE_FMASK); 43584f9bd12SAlex Elder } 43684f9bd12SAlex Elder /* HDR_A5_MUX is 0 */ 43784f9bd12SAlex Elder /* HDR_LEN_INC_DEAGG_HDR is 0 */ 43884f9bd12SAlex Elder /* HDR_METADATA_REG_VALID is 0; (AP->IPA only) */ 43984f9bd12SAlex Elder } 44084f9bd12SAlex Elder 44184f9bd12SAlex Elder iowrite32(val, endpoint->ipa->reg_virt + offset); 44284f9bd12SAlex Elder } 44384f9bd12SAlex Elder 44484f9bd12SAlex Elder static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) 44584f9bd12SAlex Elder { 44684f9bd12SAlex Elder u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); 44784f9bd12SAlex Elder u32 pad_align = endpoint->data->rx.pad_align; 44884f9bd12SAlex Elder u32 val = 0; 44984f9bd12SAlex Elder 45084f9bd12SAlex Elder val |= HDR_ENDIANNESS_FMASK; /* big endian */ 45184f9bd12SAlex Elder val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK; 45284f9bd12SAlex Elder /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ 45384f9bd12SAlex Elder /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */ 45484f9bd12SAlex Elder /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ 45584f9bd12SAlex Elder if (!endpoint->toward_ipa) 45684f9bd12SAlex Elder val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK); 45784f9bd12SAlex Elder 45884f9bd12SAlex Elder iowrite32(val, endpoint->ipa->reg_virt + offset); 45984f9bd12SAlex Elder } 46084f9bd12SAlex Elder 46184f9bd12SAlex Elder /** 46284f9bd12SAlex Elder * Generate a metadata mask value that will select only the mux_id 46384f9bd12SAlex Elder * field in an rmnet_map header structure. The mux_id is at offset 46484f9bd12SAlex Elder * 1 byte from the beginning of the structure, but the metadata 46584f9bd12SAlex Elder * value is treated as a 4-byte unit. So this mask must be computed 46684f9bd12SAlex Elder * with endianness in mind. Note that ipa_endpoint_init_hdr_metadata_mask() 46784f9bd12SAlex Elder * will convert this value to the proper byte order. 46884f9bd12SAlex Elder * 46984f9bd12SAlex Elder * Marked __always_inline because this is really computing a 47084f9bd12SAlex Elder * constant value. 47184f9bd12SAlex Elder */ 47284f9bd12SAlex Elder static __always_inline __be32 ipa_rmnet_mux_id_metadata_mask(void) 47384f9bd12SAlex Elder { 47484f9bd12SAlex Elder size_t mux_id_offset = offsetof(struct rmnet_map_header, mux_id); 47584f9bd12SAlex Elder u32 mux_id_mask = 0; 47684f9bd12SAlex Elder u8 *bytes; 47784f9bd12SAlex Elder 47884f9bd12SAlex Elder bytes = (u8 *)&mux_id_mask; 47984f9bd12SAlex Elder bytes[mux_id_offset] = 0xff; /* mux_id is 1 byte */ 48084f9bd12SAlex Elder 48184f9bd12SAlex Elder return cpu_to_be32(mux_id_mask); 48284f9bd12SAlex Elder } 48384f9bd12SAlex Elder 48484f9bd12SAlex Elder static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) 48584f9bd12SAlex Elder { 48684f9bd12SAlex Elder u32 endpoint_id = endpoint->endpoint_id; 48784f9bd12SAlex Elder u32 val = 0; 48884f9bd12SAlex Elder u32 offset; 48984f9bd12SAlex Elder 49084f9bd12SAlex Elder offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id); 49184f9bd12SAlex Elder 49284f9bd12SAlex Elder if (!endpoint->toward_ipa && endpoint->data->qmap) 49384f9bd12SAlex Elder val = ipa_rmnet_mux_id_metadata_mask(); 49484f9bd12SAlex Elder 49584f9bd12SAlex Elder iowrite32(val, endpoint->ipa->reg_virt + offset); 49684f9bd12SAlex Elder } 49784f9bd12SAlex Elder 49884f9bd12SAlex Elder static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) 49984f9bd12SAlex Elder { 50084f9bd12SAlex Elder u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); 50184f9bd12SAlex Elder u32 val; 50284f9bd12SAlex Elder 50384f9bd12SAlex Elder if (endpoint->toward_ipa && endpoint->data->dma_mode) { 50484f9bd12SAlex Elder enum ipa_endpoint_name name = endpoint->data->dma_endpoint; 50584f9bd12SAlex Elder u32 dma_endpoint_id; 50684f9bd12SAlex Elder 50784f9bd12SAlex Elder dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; 50884f9bd12SAlex Elder 50984f9bd12SAlex Elder val = u32_encode_bits(IPA_DMA, MODE_FMASK); 51084f9bd12SAlex Elder val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK); 51184f9bd12SAlex Elder } else { 51284f9bd12SAlex Elder val = u32_encode_bits(IPA_BASIC, MODE_FMASK); 51384f9bd12SAlex Elder } 51484f9bd12SAlex Elder /* Other bitfields unspecified (and 0) */ 51584f9bd12SAlex Elder 51684f9bd12SAlex Elder iowrite32(val, endpoint->ipa->reg_virt + offset); 51784f9bd12SAlex Elder } 51884f9bd12SAlex Elder 51984f9bd12SAlex Elder /* Compute the aggregation size value to use for a given buffer size */ 52084f9bd12SAlex Elder static u32 ipa_aggr_size_kb(u32 rx_buffer_size) 52184f9bd12SAlex Elder { 52284f9bd12SAlex Elder /* We don't use "hard byte limit" aggregation, so we define the 52384f9bd12SAlex Elder * aggregation limit such that our buffer has enough space *after* 52484f9bd12SAlex Elder * that limit to receive a full MTU of data, plus overhead. 52584f9bd12SAlex Elder */ 52684f9bd12SAlex Elder rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; 52784f9bd12SAlex Elder 52884f9bd12SAlex Elder return rx_buffer_size / SZ_1K; 52984f9bd12SAlex Elder } 53084f9bd12SAlex Elder 53184f9bd12SAlex Elder static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) 53284f9bd12SAlex Elder { 53384f9bd12SAlex Elder u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); 53484f9bd12SAlex Elder u32 val = 0; 53584f9bd12SAlex Elder 53684f9bd12SAlex Elder if (endpoint->data->aggregation) { 53784f9bd12SAlex Elder if (!endpoint->toward_ipa) { 53884f9bd12SAlex Elder u32 aggr_size = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE); 53984f9bd12SAlex Elder u32 limit; 54084f9bd12SAlex Elder 54184f9bd12SAlex Elder val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); 54284f9bd12SAlex Elder val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); 54384f9bd12SAlex Elder val |= u32_encode_bits(aggr_size, 54484f9bd12SAlex Elder AGGR_BYTE_LIMIT_FMASK); 54584f9bd12SAlex Elder limit = IPA_AGGR_TIME_LIMIT_DEFAULT; 54684f9bd12SAlex Elder val |= u32_encode_bits(limit / IPA_AGGR_GRANULARITY, 54784f9bd12SAlex Elder AGGR_TIME_LIMIT_FMASK); 54884f9bd12SAlex Elder val |= u32_encode_bits(0, AGGR_PKT_LIMIT_FMASK); 54984f9bd12SAlex Elder if (endpoint->data->rx.aggr_close_eof) 55084f9bd12SAlex Elder val |= AGGR_SW_EOF_ACTIVE_FMASK; 55184f9bd12SAlex Elder /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */ 55284f9bd12SAlex Elder } else { 55384f9bd12SAlex Elder val |= u32_encode_bits(IPA_ENABLE_DEAGGR, 55484f9bd12SAlex Elder AGGR_EN_FMASK); 55584f9bd12SAlex Elder val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK); 55684f9bd12SAlex Elder /* other fields ignored */ 55784f9bd12SAlex Elder } 55884f9bd12SAlex Elder /* AGGR_FORCE_CLOSE is 0 */ 55984f9bd12SAlex Elder } else { 56084f9bd12SAlex Elder val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK); 56184f9bd12SAlex Elder /* other fields ignored */ 56284f9bd12SAlex Elder } 56384f9bd12SAlex Elder 56484f9bd12SAlex Elder iowrite32(val, endpoint->ipa->reg_virt + offset); 56584f9bd12SAlex Elder } 56684f9bd12SAlex Elder 56784f9bd12SAlex Elder /* A return value of 0 indicates an error */ 56884f9bd12SAlex Elder static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds) 56984f9bd12SAlex Elder { 57084f9bd12SAlex Elder u32 scale; 57184f9bd12SAlex Elder u32 base; 57284f9bd12SAlex Elder u32 val; 57384f9bd12SAlex Elder 57484f9bd12SAlex Elder if (!microseconds) 57584f9bd12SAlex Elder return 0; /* invalid delay */ 57684f9bd12SAlex Elder 57784f9bd12SAlex Elder /* Timer is represented in units of clock ticks. */ 57884f9bd12SAlex Elder if (ipa->version < IPA_VERSION_4_2) 57984f9bd12SAlex Elder return microseconds; /* XXX Needs to be computed */ 58084f9bd12SAlex Elder 58184f9bd12SAlex Elder /* IPA v4.2 represents the tick count as base * scale */ 58284f9bd12SAlex Elder scale = 1; /* XXX Needs to be computed */ 58384f9bd12SAlex Elder if (scale > field_max(SCALE_FMASK)) 58484f9bd12SAlex Elder return 0; /* scale too big */ 58584f9bd12SAlex Elder 58684f9bd12SAlex Elder base = DIV_ROUND_CLOSEST(microseconds, scale); 58784f9bd12SAlex Elder if (base > field_max(BASE_VALUE_FMASK)) 58884f9bd12SAlex Elder return 0; /* microseconds too big */ 58984f9bd12SAlex Elder 59084f9bd12SAlex Elder val = u32_encode_bits(scale, SCALE_FMASK); 59184f9bd12SAlex Elder val |= u32_encode_bits(base, BASE_VALUE_FMASK); 59284f9bd12SAlex Elder 59384f9bd12SAlex Elder return val; 59484f9bd12SAlex Elder } 59584f9bd12SAlex Elder 59684f9bd12SAlex Elder static int ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, 59784f9bd12SAlex Elder u32 microseconds) 59884f9bd12SAlex Elder { 59984f9bd12SAlex Elder u32 endpoint_id = endpoint->endpoint_id; 60084f9bd12SAlex Elder struct ipa *ipa = endpoint->ipa; 60184f9bd12SAlex Elder u32 offset; 60284f9bd12SAlex Elder u32 val; 60384f9bd12SAlex Elder 60484f9bd12SAlex Elder /* XXX We'll fix this when the register definition is clear */ 60584f9bd12SAlex Elder if (microseconds) { 60684f9bd12SAlex Elder struct device *dev = &ipa->pdev->dev; 60784f9bd12SAlex Elder 60884f9bd12SAlex Elder dev_err(dev, "endpoint %u non-zero HOLB period (ignoring)\n", 60984f9bd12SAlex Elder endpoint_id); 61084f9bd12SAlex Elder microseconds = 0; 61184f9bd12SAlex Elder } 61284f9bd12SAlex Elder 61384f9bd12SAlex Elder if (microseconds) { 61484f9bd12SAlex Elder val = ipa_reg_init_hol_block_timer_val(ipa, microseconds); 61584f9bd12SAlex Elder if (!val) 61684f9bd12SAlex Elder return -EINVAL; 61784f9bd12SAlex Elder } else { 61884f9bd12SAlex Elder val = 0; /* timeout is immediate */ 61984f9bd12SAlex Elder } 62084f9bd12SAlex Elder offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id); 62184f9bd12SAlex Elder iowrite32(val, ipa->reg_virt + offset); 62284f9bd12SAlex Elder 62384f9bd12SAlex Elder return 0; 62484f9bd12SAlex Elder } 62584f9bd12SAlex Elder 62684f9bd12SAlex Elder static void 62784f9bd12SAlex Elder ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable) 62884f9bd12SAlex Elder { 62984f9bd12SAlex Elder u32 endpoint_id = endpoint->endpoint_id; 63084f9bd12SAlex Elder u32 offset; 63184f9bd12SAlex Elder u32 val; 63284f9bd12SAlex Elder 63384f9bd12SAlex Elder val = u32_encode_bits(enable ? 1 : 0, HOL_BLOCK_EN_FMASK); 63484f9bd12SAlex Elder offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id); 63584f9bd12SAlex Elder iowrite32(val, endpoint->ipa->reg_virt + offset); 63684f9bd12SAlex Elder } 63784f9bd12SAlex Elder 63884f9bd12SAlex Elder void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) 63984f9bd12SAlex Elder { 64084f9bd12SAlex Elder u32 i; 64184f9bd12SAlex Elder 64284f9bd12SAlex Elder for (i = 0; i < IPA_ENDPOINT_MAX; i++) { 64384f9bd12SAlex Elder struct ipa_endpoint *endpoint = &ipa->endpoint[i]; 64484f9bd12SAlex Elder 64584f9bd12SAlex Elder if (endpoint->ee_id != GSI_EE_MODEM) 64684f9bd12SAlex Elder continue; 64784f9bd12SAlex Elder 64884f9bd12SAlex Elder (void)ipa_endpoint_init_hol_block_timer(endpoint, 0); 64984f9bd12SAlex Elder ipa_endpoint_init_hol_block_enable(endpoint, true); 65084f9bd12SAlex Elder } 65184f9bd12SAlex Elder } 65284f9bd12SAlex Elder 65384f9bd12SAlex Elder static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) 65484f9bd12SAlex Elder { 65584f9bd12SAlex Elder u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); 65684f9bd12SAlex Elder u32 val = 0; 65784f9bd12SAlex Elder 65884f9bd12SAlex Elder /* DEAGGR_HDR_LEN is 0 */ 65984f9bd12SAlex Elder /* PACKET_OFFSET_VALID is 0 */ 66084f9bd12SAlex Elder /* PACKET_OFFSET_LOCATION is ignored (not valid) */ 66184f9bd12SAlex Elder /* MAX_PACKET_LEN is 0 (not enforced) */ 66284f9bd12SAlex Elder 66384f9bd12SAlex Elder iowrite32(val, endpoint->ipa->reg_virt + offset); 66484f9bd12SAlex Elder } 66584f9bd12SAlex Elder 66684f9bd12SAlex Elder static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) 66784f9bd12SAlex Elder { 66884f9bd12SAlex Elder u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); 66984f9bd12SAlex Elder u32 seq_type = endpoint->seq_type; 67084f9bd12SAlex Elder u32 val = 0; 67184f9bd12SAlex Elder 67284f9bd12SAlex Elder val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK); 67384f9bd12SAlex Elder val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK); 67484f9bd12SAlex Elder /* HPS_REP_SEQ_TYPE is 0 */ 67584f9bd12SAlex Elder /* DPS_REP_SEQ_TYPE is 0 */ 67684f9bd12SAlex Elder 67784f9bd12SAlex Elder iowrite32(val, endpoint->ipa->reg_virt + offset); 67884f9bd12SAlex Elder } 67984f9bd12SAlex Elder 68084f9bd12SAlex Elder /** 68184f9bd12SAlex Elder * ipa_endpoint_skb_tx() - Transmit a socket buffer 68284f9bd12SAlex Elder * @endpoint: Endpoint pointer 68384f9bd12SAlex Elder * @skb: Socket buffer to send 68484f9bd12SAlex Elder * 68584f9bd12SAlex Elder * Returns: 0 if successful, or a negative error code 68684f9bd12SAlex Elder */ 68784f9bd12SAlex Elder int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb) 68884f9bd12SAlex Elder { 68984f9bd12SAlex Elder struct gsi_trans *trans; 69084f9bd12SAlex Elder u32 nr_frags; 69184f9bd12SAlex Elder int ret; 69284f9bd12SAlex Elder 69384f9bd12SAlex Elder /* Make sure source endpoint's TLV FIFO has enough entries to 69484f9bd12SAlex Elder * hold the linear portion of the skb and all its fragments. 69584f9bd12SAlex Elder * If not, see if we can linearize it before giving up. 69684f9bd12SAlex Elder */ 69784f9bd12SAlex Elder nr_frags = skb_shinfo(skb)->nr_frags; 69884f9bd12SAlex Elder if (1 + nr_frags > endpoint->trans_tre_max) { 69984f9bd12SAlex Elder if (skb_linearize(skb)) 70084f9bd12SAlex Elder return -E2BIG; 70184f9bd12SAlex Elder nr_frags = 0; 70284f9bd12SAlex Elder } 70384f9bd12SAlex Elder 70484f9bd12SAlex Elder trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags); 70584f9bd12SAlex Elder if (!trans) 70684f9bd12SAlex Elder return -EBUSY; 70784f9bd12SAlex Elder 70884f9bd12SAlex Elder ret = gsi_trans_skb_add(trans, skb); 70984f9bd12SAlex Elder if (ret) 71084f9bd12SAlex Elder goto err_trans_free; 71184f9bd12SAlex Elder trans->data = skb; /* transaction owns skb now */ 71284f9bd12SAlex Elder 71384f9bd12SAlex Elder gsi_trans_commit(trans, !netdev_xmit_more()); 71484f9bd12SAlex Elder 71584f9bd12SAlex Elder return 0; 71684f9bd12SAlex Elder 71784f9bd12SAlex Elder err_trans_free: 71884f9bd12SAlex Elder gsi_trans_free(trans); 71984f9bd12SAlex Elder 72084f9bd12SAlex Elder return -ENOMEM; 72184f9bd12SAlex Elder } 72284f9bd12SAlex Elder 72384f9bd12SAlex Elder static void ipa_endpoint_status(struct ipa_endpoint *endpoint) 72484f9bd12SAlex Elder { 72584f9bd12SAlex Elder u32 endpoint_id = endpoint->endpoint_id; 72684f9bd12SAlex Elder struct ipa *ipa = endpoint->ipa; 72784f9bd12SAlex Elder u32 val = 0; 72884f9bd12SAlex Elder u32 offset; 72984f9bd12SAlex Elder 73084f9bd12SAlex Elder offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); 73184f9bd12SAlex Elder 73284f9bd12SAlex Elder if (endpoint->data->status_enable) { 73384f9bd12SAlex Elder val |= STATUS_EN_FMASK; 73484f9bd12SAlex Elder if (endpoint->toward_ipa) { 73584f9bd12SAlex Elder enum ipa_endpoint_name name; 73684f9bd12SAlex Elder u32 status_endpoint_id; 73784f9bd12SAlex Elder 73884f9bd12SAlex Elder name = endpoint->data->tx.status_endpoint; 73984f9bd12SAlex Elder status_endpoint_id = ipa->name_map[name]->endpoint_id; 74084f9bd12SAlex Elder 74184f9bd12SAlex Elder val |= u32_encode_bits(status_endpoint_id, 74284f9bd12SAlex Elder STATUS_ENDP_FMASK); 74384f9bd12SAlex Elder } 74484f9bd12SAlex Elder /* STATUS_LOCATION is 0 (status element precedes packet) */ 74584f9bd12SAlex Elder /* The next field is present for IPA v4.0 and above */ 74684f9bd12SAlex Elder /* STATUS_PKT_SUPPRESS_FMASK is 0 */ 74784f9bd12SAlex Elder } 74884f9bd12SAlex Elder 74984f9bd12SAlex Elder iowrite32(val, ipa->reg_virt + offset); 75084f9bd12SAlex Elder } 75184f9bd12SAlex Elder 75284f9bd12SAlex Elder static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) 75384f9bd12SAlex Elder { 75484f9bd12SAlex Elder struct gsi_trans *trans; 75584f9bd12SAlex Elder bool doorbell = false; 75684f9bd12SAlex Elder struct page *page; 75784f9bd12SAlex Elder u32 offset; 75884f9bd12SAlex Elder u32 len; 75984f9bd12SAlex Elder int ret; 76084f9bd12SAlex Elder 761*6fcd4224SAlex Elder page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE)); 76284f9bd12SAlex Elder if (!page) 76384f9bd12SAlex Elder return -ENOMEM; 76484f9bd12SAlex Elder 76584f9bd12SAlex Elder trans = ipa_endpoint_trans_alloc(endpoint, 1); 76684f9bd12SAlex Elder if (!trans) 76784f9bd12SAlex Elder goto err_free_pages; 76884f9bd12SAlex Elder 76984f9bd12SAlex Elder /* Offset the buffer to make space for skb headroom */ 77084f9bd12SAlex Elder offset = NET_SKB_PAD; 77184f9bd12SAlex Elder len = IPA_RX_BUFFER_SIZE - offset; 77284f9bd12SAlex Elder 77384f9bd12SAlex Elder ret = gsi_trans_page_add(trans, page, len, offset); 77484f9bd12SAlex Elder if (ret) 77584f9bd12SAlex Elder goto err_trans_free; 77684f9bd12SAlex Elder trans->data = page; /* transaction owns page now */ 77784f9bd12SAlex Elder 77884f9bd12SAlex Elder if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) { 77984f9bd12SAlex Elder doorbell = true; 78084f9bd12SAlex Elder endpoint->replenish_ready = 0; 78184f9bd12SAlex Elder } 78284f9bd12SAlex Elder 78384f9bd12SAlex Elder gsi_trans_commit(trans, doorbell); 78484f9bd12SAlex Elder 78584f9bd12SAlex Elder return 0; 78684f9bd12SAlex Elder 78784f9bd12SAlex Elder err_trans_free: 78884f9bd12SAlex Elder gsi_trans_free(trans); 78984f9bd12SAlex Elder err_free_pages: 790*6fcd4224SAlex Elder __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); 79184f9bd12SAlex Elder 79284f9bd12SAlex Elder return -ENOMEM; 79384f9bd12SAlex Elder } 79484f9bd12SAlex Elder 79584f9bd12SAlex Elder /** 79684f9bd12SAlex Elder * ipa_endpoint_replenish() - Replenish the Rx packets cache. 79784f9bd12SAlex Elder * 79884f9bd12SAlex Elder * Allocate RX packet wrapper structures with maximal socket buffers 79984f9bd12SAlex Elder * for an endpoint. These are supplied to the hardware, which fills 80084f9bd12SAlex Elder * them with incoming data. 80184f9bd12SAlex Elder */ 80284f9bd12SAlex Elder static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count) 80384f9bd12SAlex Elder { 80484f9bd12SAlex Elder struct gsi *gsi; 80584f9bd12SAlex Elder u32 backlog; 80684f9bd12SAlex Elder 80784f9bd12SAlex Elder if (!endpoint->replenish_enabled) { 80884f9bd12SAlex Elder if (count) 80984f9bd12SAlex Elder atomic_add(count, &endpoint->replenish_saved); 81084f9bd12SAlex Elder return; 81184f9bd12SAlex Elder } 81284f9bd12SAlex Elder 81384f9bd12SAlex Elder 81484f9bd12SAlex Elder while (atomic_dec_not_zero(&endpoint->replenish_backlog)) 81584f9bd12SAlex Elder if (ipa_endpoint_replenish_one(endpoint)) 81684f9bd12SAlex Elder goto try_again_later; 81784f9bd12SAlex Elder if (count) 81884f9bd12SAlex Elder atomic_add(count, &endpoint->replenish_backlog); 81984f9bd12SAlex Elder 82084f9bd12SAlex Elder return; 82184f9bd12SAlex Elder 82284f9bd12SAlex Elder try_again_later: 82384f9bd12SAlex Elder /* The last one didn't succeed, so fix the backlog */ 82484f9bd12SAlex Elder backlog = atomic_inc_return(&endpoint->replenish_backlog); 82584f9bd12SAlex Elder 82684f9bd12SAlex Elder if (count) 82784f9bd12SAlex Elder atomic_add(count, &endpoint->replenish_backlog); 82884f9bd12SAlex Elder 82984f9bd12SAlex Elder /* Whenever a receive buffer transaction completes we'll try to 83084f9bd12SAlex Elder * replenish again. It's unlikely, but if we fail to supply even 83184f9bd12SAlex Elder * one buffer, nothing will trigger another replenish attempt. 83284f9bd12SAlex Elder * Receive buffer transactions use one TRE, so schedule work to 83384f9bd12SAlex Elder * try replenishing again if our backlog is *all* available TREs. 83484f9bd12SAlex Elder */ 83584f9bd12SAlex Elder gsi = &endpoint->ipa->gsi; 83684f9bd12SAlex Elder if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) 83784f9bd12SAlex Elder schedule_delayed_work(&endpoint->replenish_work, 83884f9bd12SAlex Elder msecs_to_jiffies(1)); 83984f9bd12SAlex Elder } 84084f9bd12SAlex Elder 84184f9bd12SAlex Elder static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) 84284f9bd12SAlex Elder { 84384f9bd12SAlex Elder struct gsi *gsi = &endpoint->ipa->gsi; 84484f9bd12SAlex Elder u32 max_backlog; 84584f9bd12SAlex Elder u32 saved; 84684f9bd12SAlex Elder 84784f9bd12SAlex Elder endpoint->replenish_enabled = true; 84884f9bd12SAlex Elder while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) 84984f9bd12SAlex Elder atomic_add(saved, &endpoint->replenish_backlog); 85084f9bd12SAlex Elder 85184f9bd12SAlex Elder /* Start replenishing if hardware currently has no buffers */ 85284f9bd12SAlex Elder max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); 85384f9bd12SAlex Elder if (atomic_read(&endpoint->replenish_backlog) == max_backlog) 85484f9bd12SAlex Elder ipa_endpoint_replenish(endpoint, 0); 85584f9bd12SAlex Elder } 85684f9bd12SAlex Elder 85784f9bd12SAlex Elder static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) 85884f9bd12SAlex Elder { 85984f9bd12SAlex Elder u32 backlog; 86084f9bd12SAlex Elder 86184f9bd12SAlex Elder endpoint->replenish_enabled = false; 86284f9bd12SAlex Elder while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) 86384f9bd12SAlex Elder atomic_add(backlog, &endpoint->replenish_saved); 86484f9bd12SAlex Elder } 86584f9bd12SAlex Elder 86684f9bd12SAlex Elder static void ipa_endpoint_replenish_work(struct work_struct *work) 86784f9bd12SAlex Elder { 86884f9bd12SAlex Elder struct delayed_work *dwork = to_delayed_work(work); 86984f9bd12SAlex Elder struct ipa_endpoint *endpoint; 87084f9bd12SAlex Elder 87184f9bd12SAlex Elder endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); 87284f9bd12SAlex Elder 87384f9bd12SAlex Elder ipa_endpoint_replenish(endpoint, 0); 87484f9bd12SAlex Elder } 87584f9bd12SAlex Elder 87684f9bd12SAlex Elder static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, 87784f9bd12SAlex Elder void *data, u32 len, u32 extra) 87884f9bd12SAlex Elder { 87984f9bd12SAlex Elder struct sk_buff *skb; 88084f9bd12SAlex Elder 88184f9bd12SAlex Elder skb = __dev_alloc_skb(len, GFP_ATOMIC); 88284f9bd12SAlex Elder if (skb) { 88384f9bd12SAlex Elder skb_put(skb, len); 88484f9bd12SAlex Elder memcpy(skb->data, data, len); 88584f9bd12SAlex Elder skb->truesize += extra; 88684f9bd12SAlex Elder } 88784f9bd12SAlex Elder 88884f9bd12SAlex Elder /* Now receive it, or drop it if there's no netdev */ 88984f9bd12SAlex Elder if (endpoint->netdev) 89084f9bd12SAlex Elder ipa_modem_skb_rx(endpoint->netdev, skb); 89184f9bd12SAlex Elder else if (skb) 89284f9bd12SAlex Elder dev_kfree_skb_any(skb); 89384f9bd12SAlex Elder } 89484f9bd12SAlex Elder 89584f9bd12SAlex Elder static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, 89684f9bd12SAlex Elder struct page *page, u32 len) 89784f9bd12SAlex Elder { 89884f9bd12SAlex Elder struct sk_buff *skb; 89984f9bd12SAlex Elder 90084f9bd12SAlex Elder /* Nothing to do if there's no netdev */ 90184f9bd12SAlex Elder if (!endpoint->netdev) 90284f9bd12SAlex Elder return false; 90384f9bd12SAlex Elder 90484f9bd12SAlex Elder /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */ 90584f9bd12SAlex Elder skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE); 90684f9bd12SAlex Elder if (skb) { 90784f9bd12SAlex Elder /* Reserve the headroom and account for the data */ 90884f9bd12SAlex Elder skb_reserve(skb, NET_SKB_PAD); 90984f9bd12SAlex Elder skb_put(skb, len); 91084f9bd12SAlex Elder } 91184f9bd12SAlex Elder 91284f9bd12SAlex Elder /* Receive the buffer (or record drop if unable to build it) */ 91384f9bd12SAlex Elder ipa_modem_skb_rx(endpoint->netdev, skb); 91484f9bd12SAlex Elder 91584f9bd12SAlex Elder return skb != NULL; 91684f9bd12SAlex Elder } 91784f9bd12SAlex Elder 91884f9bd12SAlex Elder /* The format of a packet status element is the same for several status 91984f9bd12SAlex Elder * types (opcodes). The NEW_FRAG_RULE, LOG, DCMP (decompression) types 92084f9bd12SAlex Elder * aren't currently supported 92184f9bd12SAlex Elder */ 92284f9bd12SAlex Elder static bool ipa_status_format_packet(enum ipa_status_opcode opcode) 92384f9bd12SAlex Elder { 92484f9bd12SAlex Elder switch (opcode) { 92584f9bd12SAlex Elder case IPA_STATUS_OPCODE_PACKET: 92684f9bd12SAlex Elder case IPA_STATUS_OPCODE_DROPPED_PACKET: 92784f9bd12SAlex Elder case IPA_STATUS_OPCODE_SUSPENDED_PACKET: 92884f9bd12SAlex Elder case IPA_STATUS_OPCODE_PACKET_2ND_PASS: 92984f9bd12SAlex Elder return true; 93084f9bd12SAlex Elder default: 93184f9bd12SAlex Elder return false; 93284f9bd12SAlex Elder } 93384f9bd12SAlex Elder } 93484f9bd12SAlex Elder 93584f9bd12SAlex Elder static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, 93684f9bd12SAlex Elder const struct ipa_status *status) 93784f9bd12SAlex Elder { 93884f9bd12SAlex Elder u32 endpoint_id; 93984f9bd12SAlex Elder 94084f9bd12SAlex Elder if (!ipa_status_format_packet(status->opcode)) 94184f9bd12SAlex Elder return true; 94284f9bd12SAlex Elder if (!status->pkt_len) 94384f9bd12SAlex Elder return true; 94484f9bd12SAlex Elder endpoint_id = u32_get_bits(status->endp_dst_idx, 94584f9bd12SAlex Elder IPA_STATUS_DST_IDX_FMASK); 94684f9bd12SAlex Elder if (endpoint_id != endpoint->endpoint_id) 94784f9bd12SAlex Elder return true; 94884f9bd12SAlex Elder 94984f9bd12SAlex Elder return false; /* Don't skip this packet, process it */ 95084f9bd12SAlex Elder } 95184f9bd12SAlex Elder 95284f9bd12SAlex Elder /* Return whether the status indicates the packet should be dropped */ 95384f9bd12SAlex Elder static bool ipa_status_drop_packet(const struct ipa_status *status) 95484f9bd12SAlex Elder { 95584f9bd12SAlex Elder u32 val; 95684f9bd12SAlex Elder 95784f9bd12SAlex Elder /* Deaggregation exceptions we drop; others we consume */ 95884f9bd12SAlex Elder if (status->exception) 95984f9bd12SAlex Elder return status->exception == IPA_STATUS_EXCEPTION_DEAGGR; 96084f9bd12SAlex Elder 96184f9bd12SAlex Elder /* Drop the packet if it fails to match a routing rule; otherwise no */ 96284f9bd12SAlex Elder val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 96384f9bd12SAlex Elder 96484f9bd12SAlex Elder return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); 96584f9bd12SAlex Elder } 96684f9bd12SAlex Elder 96784f9bd12SAlex Elder static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, 96884f9bd12SAlex Elder struct page *page, u32 total_len) 96984f9bd12SAlex Elder { 97084f9bd12SAlex Elder void *data = page_address(page) + NET_SKB_PAD; 97184f9bd12SAlex Elder u32 unused = IPA_RX_BUFFER_SIZE - total_len; 97284f9bd12SAlex Elder u32 resid = total_len; 97384f9bd12SAlex Elder 97484f9bd12SAlex Elder while (resid) { 97584f9bd12SAlex Elder const struct ipa_status *status = data; 97684f9bd12SAlex Elder u32 align; 97784f9bd12SAlex Elder u32 len; 97884f9bd12SAlex Elder 97984f9bd12SAlex Elder if (resid < sizeof(*status)) { 98084f9bd12SAlex Elder dev_err(&endpoint->ipa->pdev->dev, 98184f9bd12SAlex Elder "short message (%u bytes < %zu byte status)\n", 98284f9bd12SAlex Elder resid, sizeof(*status)); 98384f9bd12SAlex Elder break; 98484f9bd12SAlex Elder } 98584f9bd12SAlex Elder 98684f9bd12SAlex Elder /* Skip over status packets that lack packet data */ 98784f9bd12SAlex Elder if (ipa_endpoint_status_skip(endpoint, status)) { 98884f9bd12SAlex Elder data += sizeof(*status); 98984f9bd12SAlex Elder resid -= sizeof(*status); 99084f9bd12SAlex Elder continue; 99184f9bd12SAlex Elder } 99284f9bd12SAlex Elder 99384f9bd12SAlex Elder /* Compute the amount of buffer space consumed by the 99484f9bd12SAlex Elder * packet, including the status element. If the hardware 99584f9bd12SAlex Elder * is configured to pad packet data to an aligned boundary, 99684f9bd12SAlex Elder * account for that. And if checksum offload is is enabled 99784f9bd12SAlex Elder * a trailer containing computed checksum information will 99884f9bd12SAlex Elder * be appended. 99984f9bd12SAlex Elder */ 100084f9bd12SAlex Elder align = endpoint->data->rx.pad_align ? : 1; 100184f9bd12SAlex Elder len = le16_to_cpu(status->pkt_len); 100284f9bd12SAlex Elder len = sizeof(*status) + ALIGN(len, align); 100384f9bd12SAlex Elder if (endpoint->data->checksum) 100484f9bd12SAlex Elder len += sizeof(struct rmnet_map_dl_csum_trailer); 100584f9bd12SAlex Elder 100684f9bd12SAlex Elder /* Charge the new packet with a proportional fraction of 100784f9bd12SAlex Elder * the unused space in the original receive buffer. 100884f9bd12SAlex Elder * XXX Charge a proportion of the *whole* receive buffer? 100984f9bd12SAlex Elder */ 101084f9bd12SAlex Elder if (!ipa_status_drop_packet(status)) { 101184f9bd12SAlex Elder u32 extra = unused * len / total_len; 101284f9bd12SAlex Elder void *data2 = data + sizeof(*status); 101384f9bd12SAlex Elder u32 len2 = le16_to_cpu(status->pkt_len); 101484f9bd12SAlex Elder 101584f9bd12SAlex Elder /* Client receives only packet data (no status) */ 101684f9bd12SAlex Elder ipa_endpoint_skb_copy(endpoint, data2, len2, extra); 101784f9bd12SAlex Elder } 101884f9bd12SAlex Elder 101984f9bd12SAlex Elder /* Consume status and the full packet it describes */ 102084f9bd12SAlex Elder data += len; 102184f9bd12SAlex Elder resid -= len; 102284f9bd12SAlex Elder } 102384f9bd12SAlex Elder } 102484f9bd12SAlex Elder 102584f9bd12SAlex Elder /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */ 102684f9bd12SAlex Elder static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint, 102784f9bd12SAlex Elder struct gsi_trans *trans) 102884f9bd12SAlex Elder { 102984f9bd12SAlex Elder } 103084f9bd12SAlex Elder 103184f9bd12SAlex Elder /* Complete transaction initiated in ipa_endpoint_replenish_one() */ 103284f9bd12SAlex Elder static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, 103384f9bd12SAlex Elder struct gsi_trans *trans) 103484f9bd12SAlex Elder { 103584f9bd12SAlex Elder struct page *page; 103684f9bd12SAlex Elder 103784f9bd12SAlex Elder ipa_endpoint_replenish(endpoint, 1); 103884f9bd12SAlex Elder 103984f9bd12SAlex Elder if (trans->cancelled) 104084f9bd12SAlex Elder return; 104184f9bd12SAlex Elder 104284f9bd12SAlex Elder /* Parse or build a socket buffer using the actual received length */ 104384f9bd12SAlex Elder page = trans->data; 104484f9bd12SAlex Elder if (endpoint->data->status_enable) 104584f9bd12SAlex Elder ipa_endpoint_status_parse(endpoint, page, trans->len); 104684f9bd12SAlex Elder else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) 104784f9bd12SAlex Elder trans->data = NULL; /* Pages have been consumed */ 104884f9bd12SAlex Elder } 104984f9bd12SAlex Elder 105084f9bd12SAlex Elder void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, 105184f9bd12SAlex Elder struct gsi_trans *trans) 105284f9bd12SAlex Elder { 105384f9bd12SAlex Elder if (endpoint->toward_ipa) 105484f9bd12SAlex Elder ipa_endpoint_tx_complete(endpoint, trans); 105584f9bd12SAlex Elder else 105684f9bd12SAlex Elder ipa_endpoint_rx_complete(endpoint, trans); 105784f9bd12SAlex Elder } 105884f9bd12SAlex Elder 105984f9bd12SAlex Elder void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, 106084f9bd12SAlex Elder struct gsi_trans *trans) 106184f9bd12SAlex Elder { 106284f9bd12SAlex Elder if (endpoint->toward_ipa) { 106384f9bd12SAlex Elder struct ipa *ipa = endpoint->ipa; 106484f9bd12SAlex Elder 106584f9bd12SAlex Elder /* Nothing to do for command transactions */ 106684f9bd12SAlex Elder if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { 106784f9bd12SAlex Elder struct sk_buff *skb = trans->data; 106884f9bd12SAlex Elder 106984f9bd12SAlex Elder if (skb) 107084f9bd12SAlex Elder dev_kfree_skb_any(skb); 107184f9bd12SAlex Elder } 107284f9bd12SAlex Elder } else { 107384f9bd12SAlex Elder struct page *page = trans->data; 107484f9bd12SAlex Elder 107584f9bd12SAlex Elder if (page) 1076*6fcd4224SAlex Elder __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); 107784f9bd12SAlex Elder } 107884f9bd12SAlex Elder } 107984f9bd12SAlex Elder 108084f9bd12SAlex Elder void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) 108184f9bd12SAlex Elder { 108284f9bd12SAlex Elder u32 val; 108384f9bd12SAlex Elder 108484f9bd12SAlex Elder /* ROUTE_DIS is 0 */ 108584f9bd12SAlex Elder val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK); 108684f9bd12SAlex Elder val |= ROUTE_DEF_HDR_TABLE_FMASK; 108784f9bd12SAlex Elder val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK); 108884f9bd12SAlex Elder val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK); 108984f9bd12SAlex Elder val |= ROUTE_DEF_RETAIN_HDR_FMASK; 109084f9bd12SAlex Elder 109184f9bd12SAlex Elder iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET); 109284f9bd12SAlex Elder } 109384f9bd12SAlex Elder 109484f9bd12SAlex Elder void ipa_endpoint_default_route_clear(struct ipa *ipa) 109584f9bd12SAlex Elder { 109684f9bd12SAlex Elder ipa_endpoint_default_route_set(ipa, 0); 109784f9bd12SAlex Elder } 109884f9bd12SAlex Elder 109984f9bd12SAlex Elder static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) 110084f9bd12SAlex Elder { 110184f9bd12SAlex Elder u32 mask = BIT(endpoint->endpoint_id); 110284f9bd12SAlex Elder struct ipa *ipa = endpoint->ipa; 110384f9bd12SAlex Elder u32 offset; 110484f9bd12SAlex Elder u32 val; 110584f9bd12SAlex Elder 110684f9bd12SAlex Elder /* assert(mask & ipa->available); */ 110784f9bd12SAlex Elder offset = ipa_reg_state_aggr_active_offset(ipa->version); 110884f9bd12SAlex Elder val = ioread32(ipa->reg_virt + offset); 110984f9bd12SAlex Elder 111084f9bd12SAlex Elder return !!(val & mask); 111184f9bd12SAlex Elder } 111284f9bd12SAlex Elder 111384f9bd12SAlex Elder static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) 111484f9bd12SAlex Elder { 111584f9bd12SAlex Elder u32 mask = BIT(endpoint->endpoint_id); 111684f9bd12SAlex Elder struct ipa *ipa = endpoint->ipa; 111784f9bd12SAlex Elder 111884f9bd12SAlex Elder /* assert(mask & ipa->available); */ 111984f9bd12SAlex Elder iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); 112084f9bd12SAlex Elder } 112184f9bd12SAlex Elder 112284f9bd12SAlex Elder /** 112384f9bd12SAlex Elder * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active 112484f9bd12SAlex Elder * @endpoint: Endpoint to be reset 112584f9bd12SAlex Elder * 112684f9bd12SAlex Elder * If aggregation is active on an RX endpoint when a reset is performed 112784f9bd12SAlex Elder * on its underlying GSI channel, a special sequence of actions must be 112884f9bd12SAlex Elder * taken to ensure the IPA pipeline is properly cleared. 112984f9bd12SAlex Elder * 113084f9bd12SAlex Elder * @Return: 0 if successful, or a negative error code 113184f9bd12SAlex Elder */ 113284f9bd12SAlex Elder static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) 113384f9bd12SAlex Elder { 113484f9bd12SAlex Elder struct device *dev = &endpoint->ipa->pdev->dev; 113584f9bd12SAlex Elder struct ipa *ipa = endpoint->ipa; 113684f9bd12SAlex Elder bool endpoint_suspended = false; 113784f9bd12SAlex Elder struct gsi *gsi = &ipa->gsi; 113884f9bd12SAlex Elder dma_addr_t addr; 113984f9bd12SAlex Elder bool db_enable; 114084f9bd12SAlex Elder u32 retries; 114184f9bd12SAlex Elder u32 len = 1; 114284f9bd12SAlex Elder void *virt; 114384f9bd12SAlex Elder int ret; 114484f9bd12SAlex Elder 114584f9bd12SAlex Elder virt = kzalloc(len, GFP_KERNEL); 114684f9bd12SAlex Elder if (!virt) 114784f9bd12SAlex Elder return -ENOMEM; 114884f9bd12SAlex Elder 114984f9bd12SAlex Elder addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE); 115084f9bd12SAlex Elder if (dma_mapping_error(dev, addr)) { 115184f9bd12SAlex Elder ret = -ENOMEM; 115284f9bd12SAlex Elder goto out_kfree; 115384f9bd12SAlex Elder } 115484f9bd12SAlex Elder 115584f9bd12SAlex Elder /* Force close aggregation before issuing the reset */ 115684f9bd12SAlex Elder ipa_endpoint_force_close(endpoint); 115784f9bd12SAlex Elder 115884f9bd12SAlex Elder /* Reset and reconfigure the channel with the doorbell engine 115984f9bd12SAlex Elder * disabled. Then poll until we know aggregation is no longer 116084f9bd12SAlex Elder * active. We'll re-enable the doorbell (if appropriate) when 116184f9bd12SAlex Elder * we reset again below. 116284f9bd12SAlex Elder */ 116384f9bd12SAlex Elder gsi_channel_reset(gsi, endpoint->channel_id, false); 116484f9bd12SAlex Elder 116584f9bd12SAlex Elder /* Make sure the channel isn't suspended */ 116684f9bd12SAlex Elder if (endpoint->ipa->version == IPA_VERSION_3_5_1) 116784f9bd12SAlex Elder if (!ipa_endpoint_init_ctrl(endpoint, false)) 116884f9bd12SAlex Elder endpoint_suspended = true; 116984f9bd12SAlex Elder 117084f9bd12SAlex Elder /* Start channel and do a 1 byte read */ 117184f9bd12SAlex Elder ret = gsi_channel_start(gsi, endpoint->channel_id); 117284f9bd12SAlex Elder if (ret) 117384f9bd12SAlex Elder goto out_suspend_again; 117484f9bd12SAlex Elder 117584f9bd12SAlex Elder ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); 117684f9bd12SAlex Elder if (ret) 117784f9bd12SAlex Elder goto err_endpoint_stop; 117884f9bd12SAlex Elder 117984f9bd12SAlex Elder /* Wait for aggregation to be closed on the channel */ 118084f9bd12SAlex Elder retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX; 118184f9bd12SAlex Elder do { 118284f9bd12SAlex Elder if (!ipa_endpoint_aggr_active(endpoint)) 118384f9bd12SAlex Elder break; 118484f9bd12SAlex Elder msleep(1); 118584f9bd12SAlex Elder } while (retries--); 118684f9bd12SAlex Elder 118784f9bd12SAlex Elder /* Check one last time */ 118884f9bd12SAlex Elder if (ipa_endpoint_aggr_active(endpoint)) 118984f9bd12SAlex Elder dev_err(dev, "endpoint %u still active during reset\n", 119084f9bd12SAlex Elder endpoint->endpoint_id); 119184f9bd12SAlex Elder 119284f9bd12SAlex Elder gsi_trans_read_byte_done(gsi, endpoint->channel_id); 119384f9bd12SAlex Elder 119484f9bd12SAlex Elder ret = ipa_endpoint_stop(endpoint); 119584f9bd12SAlex Elder if (ret) 119684f9bd12SAlex Elder goto out_suspend_again; 119784f9bd12SAlex Elder 119884f9bd12SAlex Elder /* Finally, reset and reconfigure the channel again (re-enabling the 119984f9bd12SAlex Elder * the doorbell engine if appropriate). Sleep for 1 millisecond to 120084f9bd12SAlex Elder * complete the channel reset sequence. Finish by suspending the 120184f9bd12SAlex Elder * channel again (if necessary). 120284f9bd12SAlex Elder */ 120384f9bd12SAlex Elder db_enable = ipa->version == IPA_VERSION_3_5_1; 120484f9bd12SAlex Elder gsi_channel_reset(gsi, endpoint->channel_id, db_enable); 120584f9bd12SAlex Elder 120684f9bd12SAlex Elder msleep(1); 120784f9bd12SAlex Elder 120884f9bd12SAlex Elder goto out_suspend_again; 120984f9bd12SAlex Elder 121084f9bd12SAlex Elder err_endpoint_stop: 121184f9bd12SAlex Elder ipa_endpoint_stop(endpoint); 121284f9bd12SAlex Elder out_suspend_again: 121384f9bd12SAlex Elder if (endpoint_suspended) 121484f9bd12SAlex Elder (void)ipa_endpoint_init_ctrl(endpoint, true); 121584f9bd12SAlex Elder dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE); 121684f9bd12SAlex Elder out_kfree: 121784f9bd12SAlex Elder kfree(virt); 121884f9bd12SAlex Elder 121984f9bd12SAlex Elder return ret; 122084f9bd12SAlex Elder } 122184f9bd12SAlex Elder 122284f9bd12SAlex Elder static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) 122384f9bd12SAlex Elder { 122484f9bd12SAlex Elder u32 channel_id = endpoint->channel_id; 122584f9bd12SAlex Elder struct ipa *ipa = endpoint->ipa; 122684f9bd12SAlex Elder bool db_enable; 122784f9bd12SAlex Elder bool special; 122884f9bd12SAlex Elder int ret = 0; 122984f9bd12SAlex Elder 123084f9bd12SAlex Elder /* On IPA v3.5.1, if an RX endpoint is reset while aggregation 123184f9bd12SAlex Elder * is active, we need to handle things specially to recover. 123284f9bd12SAlex Elder * All other cases just need to reset the underlying GSI channel. 123384f9bd12SAlex Elder * 123484f9bd12SAlex Elder * IPA v3.5.1 enables the doorbell engine. Newer versions do not. 123584f9bd12SAlex Elder */ 123684f9bd12SAlex Elder db_enable = ipa->version == IPA_VERSION_3_5_1; 123784f9bd12SAlex Elder special = !endpoint->toward_ipa && endpoint->data->aggregation; 123884f9bd12SAlex Elder if (special && ipa_endpoint_aggr_active(endpoint)) 123984f9bd12SAlex Elder ret = ipa_endpoint_reset_rx_aggr(endpoint); 124084f9bd12SAlex Elder else 124184f9bd12SAlex Elder gsi_channel_reset(&ipa->gsi, channel_id, db_enable); 124284f9bd12SAlex Elder 124384f9bd12SAlex Elder if (ret) 124484f9bd12SAlex Elder dev_err(&ipa->pdev->dev, 124584f9bd12SAlex Elder "error %d resetting channel %u for endpoint %u\n", 124684f9bd12SAlex Elder ret, endpoint->channel_id, endpoint->endpoint_id); 124784f9bd12SAlex Elder } 124884f9bd12SAlex Elder 124984f9bd12SAlex Elder static int ipa_endpoint_stop_rx_dma(struct ipa *ipa) 125084f9bd12SAlex Elder { 125184f9bd12SAlex Elder u16 size = IPA_ENDPOINT_STOP_RX_SIZE; 125284f9bd12SAlex Elder struct gsi_trans *trans; 125384f9bd12SAlex Elder dma_addr_t addr; 125484f9bd12SAlex Elder int ret; 125584f9bd12SAlex Elder 125684f9bd12SAlex Elder trans = ipa_cmd_trans_alloc(ipa, 1); 125784f9bd12SAlex Elder if (!trans) { 125884f9bd12SAlex Elder dev_err(&ipa->pdev->dev, 125984f9bd12SAlex Elder "no transaction for RX endpoint STOP workaround\n"); 126084f9bd12SAlex Elder return -EBUSY; 126184f9bd12SAlex Elder } 126284f9bd12SAlex Elder 126384f9bd12SAlex Elder /* Read into the highest part of the zero memory area */ 126484f9bd12SAlex Elder addr = ipa->zero_addr + ipa->zero_size - size; 126584f9bd12SAlex Elder 126684f9bd12SAlex Elder ipa_cmd_dma_task_32b_addr_add(trans, size, addr, false); 126784f9bd12SAlex Elder 126884f9bd12SAlex Elder ret = gsi_trans_commit_wait_timeout(trans, ENDPOINT_STOP_DMA_TIMEOUT); 126984f9bd12SAlex Elder if (ret) 127084f9bd12SAlex Elder gsi_trans_free(trans); 127184f9bd12SAlex Elder 127284f9bd12SAlex Elder return ret; 127384f9bd12SAlex Elder } 127484f9bd12SAlex Elder 127584f9bd12SAlex Elder /** 127684f9bd12SAlex Elder * ipa_endpoint_stop() - Stops a GSI channel in IPA 127784f9bd12SAlex Elder * @client: Client whose endpoint should be stopped 127884f9bd12SAlex Elder * 127984f9bd12SAlex Elder * This function implements the sequence to stop a GSI channel 128084f9bd12SAlex Elder * in IPA. This function returns when the channel is is STOP state. 128184f9bd12SAlex Elder * 128284f9bd12SAlex Elder * Return value: 0 on success, negative otherwise 128384f9bd12SAlex Elder */ 128484f9bd12SAlex Elder int ipa_endpoint_stop(struct ipa_endpoint *endpoint) 128584f9bd12SAlex Elder { 128684f9bd12SAlex Elder u32 retries = endpoint->toward_ipa ? 0 : IPA_ENDPOINT_STOP_RX_RETRIES; 128784f9bd12SAlex Elder int ret; 128884f9bd12SAlex Elder 128984f9bd12SAlex Elder do { 129084f9bd12SAlex Elder struct ipa *ipa = endpoint->ipa; 129184f9bd12SAlex Elder struct gsi *gsi = &ipa->gsi; 129284f9bd12SAlex Elder 129384f9bd12SAlex Elder ret = gsi_channel_stop(gsi, endpoint->channel_id); 129484f9bd12SAlex Elder if (ret != -EAGAIN) 129584f9bd12SAlex Elder break; 129684f9bd12SAlex Elder 129784f9bd12SAlex Elder if (endpoint->toward_ipa) 129884f9bd12SAlex Elder continue; 129984f9bd12SAlex Elder 130084f9bd12SAlex Elder /* For IPA v3.5.1, send a DMA read task and check again */ 130184f9bd12SAlex Elder if (ipa->version == IPA_VERSION_3_5_1) { 130284f9bd12SAlex Elder ret = ipa_endpoint_stop_rx_dma(ipa); 130384f9bd12SAlex Elder if (ret) 130484f9bd12SAlex Elder break; 130584f9bd12SAlex Elder } 130684f9bd12SAlex Elder 130784f9bd12SAlex Elder msleep(1); 130884f9bd12SAlex Elder } while (retries--); 130984f9bd12SAlex Elder 131084f9bd12SAlex Elder return retries ? ret : -EIO; 131184f9bd12SAlex Elder } 131284f9bd12SAlex Elder 131384f9bd12SAlex Elder static void ipa_endpoint_program(struct ipa_endpoint *endpoint) 131484f9bd12SAlex Elder { 131584f9bd12SAlex Elder struct device *dev = &endpoint->ipa->pdev->dev; 131684f9bd12SAlex Elder int ret; 131784f9bd12SAlex Elder 131884f9bd12SAlex Elder if (endpoint->toward_ipa) { 131984f9bd12SAlex Elder bool delay_mode = endpoint->data->tx.delay; 132084f9bd12SAlex Elder 132184f9bd12SAlex Elder ret = ipa_endpoint_init_ctrl(endpoint, delay_mode); 132284f9bd12SAlex Elder /* Endpoint is expected to not be in delay mode */ 132384f9bd12SAlex Elder if (!ret != delay_mode) { 132484f9bd12SAlex Elder dev_warn(dev, 132584f9bd12SAlex Elder "TX endpoint %u was %sin delay mode\n", 132684f9bd12SAlex Elder endpoint->endpoint_id, 132784f9bd12SAlex Elder delay_mode ? "already " : ""); 132884f9bd12SAlex Elder } 132984f9bd12SAlex Elder ipa_endpoint_init_hdr_ext(endpoint); 133084f9bd12SAlex Elder ipa_endpoint_init_aggr(endpoint); 133184f9bd12SAlex Elder ipa_endpoint_init_deaggr(endpoint); 133284f9bd12SAlex Elder ipa_endpoint_init_seq(endpoint); 133384f9bd12SAlex Elder } else { 133484f9bd12SAlex Elder if (endpoint->ipa->version == IPA_VERSION_3_5_1) { 133584f9bd12SAlex Elder if (!ipa_endpoint_init_ctrl(endpoint, false)) 133684f9bd12SAlex Elder dev_warn(dev, 133784f9bd12SAlex Elder "RX endpoint %u was suspended\n", 133884f9bd12SAlex Elder endpoint->endpoint_id); 133984f9bd12SAlex Elder } 134084f9bd12SAlex Elder ipa_endpoint_init_hdr_ext(endpoint); 134184f9bd12SAlex Elder ipa_endpoint_init_aggr(endpoint); 134284f9bd12SAlex Elder } 134384f9bd12SAlex Elder ipa_endpoint_init_cfg(endpoint); 134484f9bd12SAlex Elder ipa_endpoint_init_hdr(endpoint); 134584f9bd12SAlex Elder ipa_endpoint_init_hdr_metadata_mask(endpoint); 134684f9bd12SAlex Elder ipa_endpoint_init_mode(endpoint); 134784f9bd12SAlex Elder ipa_endpoint_status(endpoint); 134884f9bd12SAlex Elder } 134984f9bd12SAlex Elder 135084f9bd12SAlex Elder int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint) 135184f9bd12SAlex Elder { 135284f9bd12SAlex Elder struct ipa *ipa = endpoint->ipa; 135384f9bd12SAlex Elder struct gsi *gsi = &ipa->gsi; 135484f9bd12SAlex Elder int ret; 135584f9bd12SAlex Elder 135684f9bd12SAlex Elder ret = gsi_channel_start(gsi, endpoint->channel_id); 135784f9bd12SAlex Elder if (ret) { 135884f9bd12SAlex Elder dev_err(&ipa->pdev->dev, 135984f9bd12SAlex Elder "error %d starting %cX channel %u for endpoint %u\n", 136084f9bd12SAlex Elder ret, endpoint->toward_ipa ? 'T' : 'R', 136184f9bd12SAlex Elder endpoint->channel_id, endpoint->endpoint_id); 136284f9bd12SAlex Elder return ret; 136384f9bd12SAlex Elder } 136484f9bd12SAlex Elder 136584f9bd12SAlex Elder if (!endpoint->toward_ipa) { 136684f9bd12SAlex Elder ipa_interrupt_suspend_enable(ipa->interrupt, 136784f9bd12SAlex Elder endpoint->endpoint_id); 136884f9bd12SAlex Elder ipa_endpoint_replenish_enable(endpoint); 136984f9bd12SAlex Elder } 137084f9bd12SAlex Elder 137184f9bd12SAlex Elder ipa->enabled |= BIT(endpoint->endpoint_id); 137284f9bd12SAlex Elder 137384f9bd12SAlex Elder return 0; 137484f9bd12SAlex Elder } 137584f9bd12SAlex Elder 137684f9bd12SAlex Elder void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) 137784f9bd12SAlex Elder { 137884f9bd12SAlex Elder u32 mask = BIT(endpoint->endpoint_id); 137984f9bd12SAlex Elder struct ipa *ipa = endpoint->ipa; 138084f9bd12SAlex Elder int ret; 138184f9bd12SAlex Elder 138284f9bd12SAlex Elder if (!(endpoint->ipa->enabled & mask)) 138384f9bd12SAlex Elder return; 138484f9bd12SAlex Elder 138584f9bd12SAlex Elder endpoint->ipa->enabled ^= mask; 138684f9bd12SAlex Elder 138784f9bd12SAlex Elder if (!endpoint->toward_ipa) { 138884f9bd12SAlex Elder ipa_endpoint_replenish_disable(endpoint); 138984f9bd12SAlex Elder ipa_interrupt_suspend_disable(ipa->interrupt, 139084f9bd12SAlex Elder endpoint->endpoint_id); 139184f9bd12SAlex Elder } 139284f9bd12SAlex Elder 139384f9bd12SAlex Elder /* Note that if stop fails, the channel's state is not well-defined */ 139484f9bd12SAlex Elder ret = ipa_endpoint_stop(endpoint); 139584f9bd12SAlex Elder if (ret) 139684f9bd12SAlex Elder dev_err(&ipa->pdev->dev, 139784f9bd12SAlex Elder "error %d attempting to stop endpoint %u\n", ret, 139884f9bd12SAlex Elder endpoint->endpoint_id); 139984f9bd12SAlex Elder } 140084f9bd12SAlex Elder 140184f9bd12SAlex Elder /** 140284f9bd12SAlex Elder * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt 140384f9bd12SAlex Elder * @endpoint_id: Endpoint on which to emulate a suspend 140484f9bd12SAlex Elder * 140584f9bd12SAlex Elder * Emulate suspend IPA interrupt to unsuspend an endpoint suspended 140684f9bd12SAlex Elder * with an open aggregation frame. This is to work around a hardware 140784f9bd12SAlex Elder * issue in IPA version 3.5.1 where the suspend interrupt will not be 140884f9bd12SAlex Elder * generated when it should be. 140984f9bd12SAlex Elder */ 141084f9bd12SAlex Elder static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) 141184f9bd12SAlex Elder { 141284f9bd12SAlex Elder struct ipa *ipa = endpoint->ipa; 141384f9bd12SAlex Elder 141484f9bd12SAlex Elder /* assert(ipa->version == IPA_VERSION_3_5_1); */ 141584f9bd12SAlex Elder 141684f9bd12SAlex Elder if (!endpoint->data->aggregation) 141784f9bd12SAlex Elder return; 141884f9bd12SAlex Elder 141984f9bd12SAlex Elder /* Nothing to do if the endpoint doesn't have aggregation open */ 142084f9bd12SAlex Elder if (!ipa_endpoint_aggr_active(endpoint)) 142184f9bd12SAlex Elder return; 142284f9bd12SAlex Elder 142384f9bd12SAlex Elder /* Force close aggregation */ 142484f9bd12SAlex Elder ipa_endpoint_force_close(endpoint); 142584f9bd12SAlex Elder 142684f9bd12SAlex Elder ipa_interrupt_simulate_suspend(ipa->interrupt); 142784f9bd12SAlex Elder } 142884f9bd12SAlex Elder 142984f9bd12SAlex Elder void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) 143084f9bd12SAlex Elder { 143184f9bd12SAlex Elder struct device *dev = &endpoint->ipa->pdev->dev; 143284f9bd12SAlex Elder struct gsi *gsi = &endpoint->ipa->gsi; 143384f9bd12SAlex Elder bool stop_channel; 143484f9bd12SAlex Elder int ret; 143584f9bd12SAlex Elder 143684f9bd12SAlex Elder if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 143784f9bd12SAlex Elder return; 143884f9bd12SAlex Elder 143984f9bd12SAlex Elder if (!endpoint->toward_ipa) 144084f9bd12SAlex Elder ipa_endpoint_replenish_disable(endpoint); 144184f9bd12SAlex Elder 144284f9bd12SAlex Elder /* IPA v3.5.1 doesn't use channel stop for suspend */ 144384f9bd12SAlex Elder stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; 144484f9bd12SAlex Elder if (!endpoint->toward_ipa && !stop_channel) { 144584f9bd12SAlex Elder /* Due to a hardware bug, a client suspended with an open 144684f9bd12SAlex Elder * aggregation frame will not generate a SUSPEND IPA 144784f9bd12SAlex Elder * interrupt. We work around this by force-closing the 144884f9bd12SAlex Elder * aggregation frame, then simulating the arrival of such 144984f9bd12SAlex Elder * an interrupt. 145084f9bd12SAlex Elder */ 145184f9bd12SAlex Elder WARN_ON(ipa_endpoint_init_ctrl(endpoint, true)); 145284f9bd12SAlex Elder ipa_endpoint_suspend_aggr(endpoint); 145384f9bd12SAlex Elder } 145484f9bd12SAlex Elder 145584f9bd12SAlex Elder ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); 145684f9bd12SAlex Elder if (ret) 145784f9bd12SAlex Elder dev_err(dev, "error %d suspending channel %u\n", ret, 145884f9bd12SAlex Elder endpoint->channel_id); 145984f9bd12SAlex Elder } 146084f9bd12SAlex Elder 146184f9bd12SAlex Elder void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) 146284f9bd12SAlex Elder { 146384f9bd12SAlex Elder struct device *dev = &endpoint->ipa->pdev->dev; 146484f9bd12SAlex Elder struct gsi *gsi = &endpoint->ipa->gsi; 146584f9bd12SAlex Elder bool start_channel; 146684f9bd12SAlex Elder int ret; 146784f9bd12SAlex Elder 146884f9bd12SAlex Elder if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) 146984f9bd12SAlex Elder return; 147084f9bd12SAlex Elder 147184f9bd12SAlex Elder /* IPA v3.5.1 doesn't use channel start for resume */ 147284f9bd12SAlex Elder start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; 147384f9bd12SAlex Elder if (!endpoint->toward_ipa && !start_channel) 147484f9bd12SAlex Elder WARN_ON(ipa_endpoint_init_ctrl(endpoint, false)); 147584f9bd12SAlex Elder 147684f9bd12SAlex Elder ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); 147784f9bd12SAlex Elder if (ret) 147884f9bd12SAlex Elder dev_err(dev, "error %d resuming channel %u\n", ret, 147984f9bd12SAlex Elder endpoint->channel_id); 148084f9bd12SAlex Elder else if (!endpoint->toward_ipa) 148184f9bd12SAlex Elder ipa_endpoint_replenish_enable(endpoint); 148284f9bd12SAlex Elder } 148384f9bd12SAlex Elder 148484f9bd12SAlex Elder void ipa_endpoint_suspend(struct ipa *ipa) 148584f9bd12SAlex Elder { 148684f9bd12SAlex Elder if (ipa->modem_netdev) 148784f9bd12SAlex Elder ipa_modem_suspend(ipa->modem_netdev); 148884f9bd12SAlex Elder 148984f9bd12SAlex Elder ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 149084f9bd12SAlex Elder ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 149184f9bd12SAlex Elder } 149284f9bd12SAlex Elder 149384f9bd12SAlex Elder void ipa_endpoint_resume(struct ipa *ipa) 149484f9bd12SAlex Elder { 149584f9bd12SAlex Elder ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); 149684f9bd12SAlex Elder ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); 149784f9bd12SAlex Elder 149884f9bd12SAlex Elder if (ipa->modem_netdev) 149984f9bd12SAlex Elder ipa_modem_resume(ipa->modem_netdev); 150084f9bd12SAlex Elder } 150184f9bd12SAlex Elder 150284f9bd12SAlex Elder static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) 150384f9bd12SAlex Elder { 150484f9bd12SAlex Elder struct gsi *gsi = &endpoint->ipa->gsi; 150584f9bd12SAlex Elder u32 channel_id = endpoint->channel_id; 150684f9bd12SAlex Elder 150784f9bd12SAlex Elder /* Only AP endpoints get set up */ 150884f9bd12SAlex Elder if (endpoint->ee_id != GSI_EE_AP) 150984f9bd12SAlex Elder return; 151084f9bd12SAlex Elder 151184f9bd12SAlex Elder endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id); 151284f9bd12SAlex Elder if (!endpoint->toward_ipa) { 151384f9bd12SAlex Elder /* RX transactions require a single TRE, so the maximum 151484f9bd12SAlex Elder * backlog is the same as the maximum outstanding TREs. 151584f9bd12SAlex Elder */ 151684f9bd12SAlex Elder endpoint->replenish_enabled = false; 151784f9bd12SAlex Elder atomic_set(&endpoint->replenish_saved, 151884f9bd12SAlex Elder gsi_channel_tre_max(gsi, endpoint->channel_id)); 151984f9bd12SAlex Elder atomic_set(&endpoint->replenish_backlog, 0); 152084f9bd12SAlex Elder INIT_DELAYED_WORK(&endpoint->replenish_work, 152184f9bd12SAlex Elder ipa_endpoint_replenish_work); 152284f9bd12SAlex Elder } 152384f9bd12SAlex Elder 152484f9bd12SAlex Elder ipa_endpoint_program(endpoint); 152584f9bd12SAlex Elder 152684f9bd12SAlex Elder endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); 152784f9bd12SAlex Elder } 152884f9bd12SAlex Elder 152984f9bd12SAlex Elder static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint) 153084f9bd12SAlex Elder { 153184f9bd12SAlex Elder endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); 153284f9bd12SAlex Elder 153384f9bd12SAlex Elder if (!endpoint->toward_ipa) 153484f9bd12SAlex Elder cancel_delayed_work_sync(&endpoint->replenish_work); 153584f9bd12SAlex Elder 153684f9bd12SAlex Elder ipa_endpoint_reset(endpoint); 153784f9bd12SAlex Elder } 153884f9bd12SAlex Elder 153984f9bd12SAlex Elder void ipa_endpoint_setup(struct ipa *ipa) 154084f9bd12SAlex Elder { 154184f9bd12SAlex Elder u32 initialized = ipa->initialized; 154284f9bd12SAlex Elder 154384f9bd12SAlex Elder ipa->set_up = 0; 154484f9bd12SAlex Elder while (initialized) { 154584f9bd12SAlex Elder u32 endpoint_id = __ffs(initialized); 154684f9bd12SAlex Elder 154784f9bd12SAlex Elder initialized ^= BIT(endpoint_id); 154884f9bd12SAlex Elder 154984f9bd12SAlex Elder ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); 155084f9bd12SAlex Elder } 155184f9bd12SAlex Elder } 155284f9bd12SAlex Elder 155384f9bd12SAlex Elder void ipa_endpoint_teardown(struct ipa *ipa) 155484f9bd12SAlex Elder { 155584f9bd12SAlex Elder u32 set_up = ipa->set_up; 155684f9bd12SAlex Elder 155784f9bd12SAlex Elder while (set_up) { 155884f9bd12SAlex Elder u32 endpoint_id = __fls(set_up); 155984f9bd12SAlex Elder 156084f9bd12SAlex Elder set_up ^= BIT(endpoint_id); 156184f9bd12SAlex Elder 156284f9bd12SAlex Elder ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); 156384f9bd12SAlex Elder } 156484f9bd12SAlex Elder ipa->set_up = 0; 156584f9bd12SAlex Elder } 156684f9bd12SAlex Elder 156784f9bd12SAlex Elder int ipa_endpoint_config(struct ipa *ipa) 156884f9bd12SAlex Elder { 156984f9bd12SAlex Elder struct device *dev = &ipa->pdev->dev; 157084f9bd12SAlex Elder u32 initialized; 157184f9bd12SAlex Elder u32 rx_base; 157284f9bd12SAlex Elder u32 rx_mask; 157384f9bd12SAlex Elder u32 tx_mask; 157484f9bd12SAlex Elder int ret = 0; 157584f9bd12SAlex Elder u32 max; 157684f9bd12SAlex Elder u32 val; 157784f9bd12SAlex Elder 157884f9bd12SAlex Elder /* Find out about the endpoints supplied by the hardware, and ensure 157984f9bd12SAlex Elder * the highest one doesn't exceed the number we support. 158084f9bd12SAlex Elder */ 158184f9bd12SAlex Elder val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); 158284f9bd12SAlex Elder 158384f9bd12SAlex Elder /* Our RX is an IPA producer */ 158484f9bd12SAlex Elder rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK); 158584f9bd12SAlex Elder max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK); 158684f9bd12SAlex Elder if (max > IPA_ENDPOINT_MAX) { 158784f9bd12SAlex Elder dev_err(dev, "too many endpoints (%u > %u)\n", 158884f9bd12SAlex Elder max, IPA_ENDPOINT_MAX); 158984f9bd12SAlex Elder return -EINVAL; 159084f9bd12SAlex Elder } 159184f9bd12SAlex Elder rx_mask = GENMASK(max - 1, rx_base); 159284f9bd12SAlex Elder 159384f9bd12SAlex Elder /* Our TX is an IPA consumer */ 159484f9bd12SAlex Elder max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK); 159584f9bd12SAlex Elder tx_mask = GENMASK(max - 1, 0); 159684f9bd12SAlex Elder 159784f9bd12SAlex Elder ipa->available = rx_mask | tx_mask; 159884f9bd12SAlex Elder 159984f9bd12SAlex Elder /* Check for initialized endpoints not supported by the hardware */ 160084f9bd12SAlex Elder if (ipa->initialized & ~ipa->available) { 160184f9bd12SAlex Elder dev_err(dev, "unavailable endpoint id(s) 0x%08x\n", 160284f9bd12SAlex Elder ipa->initialized & ~ipa->available); 160384f9bd12SAlex Elder ret = -EINVAL; /* Report other errors too */ 160484f9bd12SAlex Elder } 160584f9bd12SAlex Elder 160684f9bd12SAlex Elder initialized = ipa->initialized; 160784f9bd12SAlex Elder while (initialized) { 160884f9bd12SAlex Elder u32 endpoint_id = __ffs(initialized); 160984f9bd12SAlex Elder struct ipa_endpoint *endpoint; 161084f9bd12SAlex Elder 161184f9bd12SAlex Elder initialized ^= BIT(endpoint_id); 161284f9bd12SAlex Elder 161384f9bd12SAlex Elder /* Make sure it's pointing in the right direction */ 161484f9bd12SAlex Elder endpoint = &ipa->endpoint[endpoint_id]; 161584f9bd12SAlex Elder if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) { 161684f9bd12SAlex Elder dev_err(dev, "endpoint id %u wrong direction\n", 161784f9bd12SAlex Elder endpoint_id); 161884f9bd12SAlex Elder ret = -EINVAL; 161984f9bd12SAlex Elder } 162084f9bd12SAlex Elder } 162184f9bd12SAlex Elder 162284f9bd12SAlex Elder return ret; 162384f9bd12SAlex Elder } 162484f9bd12SAlex Elder 162584f9bd12SAlex Elder void ipa_endpoint_deconfig(struct ipa *ipa) 162684f9bd12SAlex Elder { 162784f9bd12SAlex Elder ipa->available = 0; /* Nothing more to do */ 162884f9bd12SAlex Elder } 162984f9bd12SAlex Elder 163084f9bd12SAlex Elder static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, 163184f9bd12SAlex Elder const struct ipa_gsi_endpoint_data *data) 163284f9bd12SAlex Elder { 163384f9bd12SAlex Elder struct ipa_endpoint *endpoint; 163484f9bd12SAlex Elder 163584f9bd12SAlex Elder endpoint = &ipa->endpoint[data->endpoint_id]; 163684f9bd12SAlex Elder 163784f9bd12SAlex Elder if (data->ee_id == GSI_EE_AP) 163884f9bd12SAlex Elder ipa->channel_map[data->channel_id] = endpoint; 163984f9bd12SAlex Elder ipa->name_map[name] = endpoint; 164084f9bd12SAlex Elder 164184f9bd12SAlex Elder endpoint->ipa = ipa; 164284f9bd12SAlex Elder endpoint->ee_id = data->ee_id; 164384f9bd12SAlex Elder endpoint->seq_type = data->endpoint.seq_type; 164484f9bd12SAlex Elder endpoint->channel_id = data->channel_id; 164584f9bd12SAlex Elder endpoint->endpoint_id = data->endpoint_id; 164684f9bd12SAlex Elder endpoint->toward_ipa = data->toward_ipa; 164784f9bd12SAlex Elder endpoint->data = &data->endpoint.config; 164884f9bd12SAlex Elder 164984f9bd12SAlex Elder ipa->initialized |= BIT(endpoint->endpoint_id); 165084f9bd12SAlex Elder } 165184f9bd12SAlex Elder 165284f9bd12SAlex Elder void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) 165384f9bd12SAlex Elder { 165484f9bd12SAlex Elder endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); 165584f9bd12SAlex Elder 165684f9bd12SAlex Elder memset(endpoint, 0, sizeof(*endpoint)); 165784f9bd12SAlex Elder } 165884f9bd12SAlex Elder 165984f9bd12SAlex Elder void ipa_endpoint_exit(struct ipa *ipa) 166084f9bd12SAlex Elder { 166184f9bd12SAlex Elder u32 initialized = ipa->initialized; 166284f9bd12SAlex Elder 166384f9bd12SAlex Elder while (initialized) { 166484f9bd12SAlex Elder u32 endpoint_id = __fls(initialized); 166584f9bd12SAlex Elder 166684f9bd12SAlex Elder initialized ^= BIT(endpoint_id); 166784f9bd12SAlex Elder 166884f9bd12SAlex Elder ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); 166984f9bd12SAlex Elder } 167084f9bd12SAlex Elder memset(ipa->name_map, 0, sizeof(ipa->name_map)); 167184f9bd12SAlex Elder memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); 167284f9bd12SAlex Elder } 167384f9bd12SAlex Elder 167484f9bd12SAlex Elder /* Returns a bitmask of endpoints that support filtering, or 0 on error */ 167584f9bd12SAlex Elder u32 ipa_endpoint_init(struct ipa *ipa, u32 count, 167684f9bd12SAlex Elder const struct ipa_gsi_endpoint_data *data) 167784f9bd12SAlex Elder { 167884f9bd12SAlex Elder enum ipa_endpoint_name name; 167984f9bd12SAlex Elder u32 filter_map; 168084f9bd12SAlex Elder 168184f9bd12SAlex Elder if (!ipa_endpoint_data_valid(ipa, count, data)) 168284f9bd12SAlex Elder return 0; /* Error */ 168384f9bd12SAlex Elder 168484f9bd12SAlex Elder ipa->initialized = 0; 168584f9bd12SAlex Elder 168684f9bd12SAlex Elder filter_map = 0; 168784f9bd12SAlex Elder for (name = 0; name < count; name++, data++) { 168884f9bd12SAlex Elder if (ipa_gsi_endpoint_data_empty(data)) 168984f9bd12SAlex Elder continue; /* Skip over empty slots */ 169084f9bd12SAlex Elder 169184f9bd12SAlex Elder ipa_endpoint_init_one(ipa, name, data); 169284f9bd12SAlex Elder 169384f9bd12SAlex Elder if (data->endpoint.filter_support) 169484f9bd12SAlex Elder filter_map |= BIT(data->endpoint_id); 169584f9bd12SAlex Elder } 169684f9bd12SAlex Elder 169784f9bd12SAlex Elder if (!ipa_filter_map_valid(ipa, filter_map)) 169884f9bd12SAlex Elder goto err_endpoint_exit; 169984f9bd12SAlex Elder 170084f9bd12SAlex Elder return filter_map; /* Non-zero bitmask */ 170184f9bd12SAlex Elder 170284f9bd12SAlex Elder err_endpoint_exit: 170384f9bd12SAlex Elder ipa_endpoint_exit(ipa); 170484f9bd12SAlex Elder 170584f9bd12SAlex Elder return 0; /* Error */ 170684f9bd12SAlex Elder } 1707