xref: /linux/drivers/net/ipa/ipa_endpoint.c (revision fb57c3ea98519f811b37f299e0ac4988a021fe2a)
184f9bd12SAlex Elder // SPDX-License-Identifier: GPL-2.0
284f9bd12SAlex Elder 
384f9bd12SAlex Elder /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
484f9bd12SAlex Elder  * Copyright (C) 2019-2020 Linaro Ltd.
584f9bd12SAlex Elder  */
684f9bd12SAlex Elder 
784f9bd12SAlex Elder #include <linux/types.h>
884f9bd12SAlex Elder #include <linux/device.h>
984f9bd12SAlex Elder #include <linux/slab.h>
1084f9bd12SAlex Elder #include <linux/bitfield.h>
1184f9bd12SAlex Elder #include <linux/if_rmnet.h>
1284f9bd12SAlex Elder #include <linux/dma-direction.h>
1384f9bd12SAlex Elder 
1484f9bd12SAlex Elder #include "gsi.h"
1584f9bd12SAlex Elder #include "gsi_trans.h"
1684f9bd12SAlex Elder #include "ipa.h"
1784f9bd12SAlex Elder #include "ipa_data.h"
1884f9bd12SAlex Elder #include "ipa_endpoint.h"
1984f9bd12SAlex Elder #include "ipa_cmd.h"
2084f9bd12SAlex Elder #include "ipa_mem.h"
2184f9bd12SAlex Elder #include "ipa_modem.h"
2284f9bd12SAlex Elder #include "ipa_table.h"
2384f9bd12SAlex Elder #include "ipa_gsi.h"
2484f9bd12SAlex Elder 
2584f9bd12SAlex Elder #define atomic_dec_not_zero(v)	atomic_add_unless((v), -1, 0)
2684f9bd12SAlex Elder 
2784f9bd12SAlex Elder #define IPA_REPLENISH_BATCH	16
2884f9bd12SAlex Elder 
296fcd4224SAlex Elder /* RX buffer is 1 page (or a power-of-2 contiguous pages) */
306fcd4224SAlex Elder #define IPA_RX_BUFFER_SIZE	8192	/* PAGE_SIZE > 4096 wastes a LOT */
3184f9bd12SAlex Elder 
3284f9bd12SAlex Elder /* The amount of RX buffer space consumed by standard skb overhead */
3384f9bd12SAlex Elder #define IPA_RX_BUFFER_OVERHEAD	(PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
3484f9bd12SAlex Elder 
358730f45dSAlex Elder /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
368730f45dSAlex Elder #define IPA_ENDPOINT_QMAP_METADATA_MASK		0x000000ff /* host byte order */
378730f45dSAlex Elder 
3884f9bd12SAlex Elder #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX	3
391d86652bSAlex Elder #define IPA_AGGR_TIME_LIMIT_DEFAULT		500	/* microseconds */
4084f9bd12SAlex Elder 
4184f9bd12SAlex Elder /** enum ipa_status_opcode - status element opcode hardware values */
4284f9bd12SAlex Elder enum ipa_status_opcode {
4384f9bd12SAlex Elder 	IPA_STATUS_OPCODE_PACKET		= 0x01,
4484f9bd12SAlex Elder 	IPA_STATUS_OPCODE_NEW_FRAG_RULE		= 0x02,
4584f9bd12SAlex Elder 	IPA_STATUS_OPCODE_DROPPED_PACKET	= 0x04,
4684f9bd12SAlex Elder 	IPA_STATUS_OPCODE_SUSPENDED_PACKET	= 0x08,
4784f9bd12SAlex Elder 	IPA_STATUS_OPCODE_LOG			= 0x10,
4884f9bd12SAlex Elder 	IPA_STATUS_OPCODE_DCMP			= 0x20,
4984f9bd12SAlex Elder 	IPA_STATUS_OPCODE_PACKET_2ND_PASS	= 0x40,
5084f9bd12SAlex Elder };
5184f9bd12SAlex Elder 
5284f9bd12SAlex Elder /** enum ipa_status_exception - status element exception type */
5384f9bd12SAlex Elder enum ipa_status_exception {
5484f9bd12SAlex Elder 	/* 0 means no exception */
5584f9bd12SAlex Elder 	IPA_STATUS_EXCEPTION_DEAGGR		= 0x01,
5684f9bd12SAlex Elder 	IPA_STATUS_EXCEPTION_IPTYPE		= 0x04,
5784f9bd12SAlex Elder 	IPA_STATUS_EXCEPTION_PACKET_LENGTH	= 0x08,
5884f9bd12SAlex Elder 	IPA_STATUS_EXCEPTION_FRAG_RULE_MISS	= 0x10,
5984f9bd12SAlex Elder 	IPA_STATUS_EXCEPTION_SW_FILT		= 0x20,
6084f9bd12SAlex Elder 	/* The meaning of the next value depends on whether the IP version */
6184f9bd12SAlex Elder 	IPA_STATUS_EXCEPTION_NAT		= 0x40,		/* IPv4 */
6284f9bd12SAlex Elder 	IPA_STATUS_EXCEPTION_IPV6CT		= IPA_STATUS_EXCEPTION_NAT,
6384f9bd12SAlex Elder };
6484f9bd12SAlex Elder 
6584f9bd12SAlex Elder /* Status element provided by hardware */
6684f9bd12SAlex Elder struct ipa_status {
6784f9bd12SAlex Elder 	u8 opcode;		/* enum ipa_status_opcode */
6884f9bd12SAlex Elder 	u8 exception;		/* enum ipa_status_exception */
6984f9bd12SAlex Elder 	__le16 mask;
7084f9bd12SAlex Elder 	__le16 pkt_len;
7184f9bd12SAlex Elder 	u8 endp_src_idx;
7284f9bd12SAlex Elder 	u8 endp_dst_idx;
7384f9bd12SAlex Elder 	__le32 metadata;
7484f9bd12SAlex Elder 	__le32 flags1;
7584f9bd12SAlex Elder 	__le64 flags2;
7684f9bd12SAlex Elder 	__le32 flags3;
7784f9bd12SAlex Elder 	__le32 flags4;
7884f9bd12SAlex Elder };
7984f9bd12SAlex Elder 
8084f9bd12SAlex Elder /* Field masks for struct ipa_status structure fields */
8184f9bd12SAlex Elder 
8284f9bd12SAlex Elder #define IPA_STATUS_SRC_IDX_FMASK		GENMASK(4, 0)
8384f9bd12SAlex Elder 
8484f9bd12SAlex Elder #define IPA_STATUS_DST_IDX_FMASK		GENMASK(4, 0)
8584f9bd12SAlex Elder 
8684f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_FLT_LOCAL_FMASK	GENMASK(0, 0)
8784f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_FLT_HASH_FMASK	GENMASK(1, 1)
8884f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_FLT_GLOBAL_FMASK	GENMASK(2, 2)
8984f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_FLT_RET_HDR_FMASK	GENMASK(3, 3)
9084f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_FLT_RULE_ID_FMASK	GENMASK(13, 4)
9184f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_RT_LOCAL_FMASK	GENMASK(14, 14)
9284f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_RT_HASH_FMASK		GENMASK(15, 15)
9384f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_UCP_FMASK		GENMASK(16, 16)
9484f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_RT_TBL_IDX_FMASK	GENMASK(21, 17)
9584f9bd12SAlex Elder #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK	GENMASK(31, 22)
9684f9bd12SAlex Elder 
9784f9bd12SAlex Elder #define IPA_STATUS_FLAGS2_NAT_HIT_FMASK		GENMASK_ULL(0, 0)
9884f9bd12SAlex Elder #define IPA_STATUS_FLAGS2_NAT_ENTRY_IDX_FMASK	GENMASK_ULL(13, 1)
9984f9bd12SAlex Elder #define IPA_STATUS_FLAGS2_NAT_TYPE_FMASK	GENMASK_ULL(15, 14)
10084f9bd12SAlex Elder #define IPA_STATUS_FLAGS2_TAG_INFO_FMASK	GENMASK_ULL(63, 16)
10184f9bd12SAlex Elder 
10284f9bd12SAlex Elder #define IPA_STATUS_FLAGS3_SEQ_NUM_FMASK		GENMASK(7, 0)
10384f9bd12SAlex Elder #define IPA_STATUS_FLAGS3_TOD_CTR_FMASK		GENMASK(31, 8)
10484f9bd12SAlex Elder 
10584f9bd12SAlex Elder #define IPA_STATUS_FLAGS4_HDR_LOCAL_FMASK	GENMASK(0, 0)
10684f9bd12SAlex Elder #define IPA_STATUS_FLAGS4_HDR_OFFSET_FMASK	GENMASK(10, 1)
10784f9bd12SAlex Elder #define IPA_STATUS_FLAGS4_FRAG_HIT_FMASK	GENMASK(11, 11)
10884f9bd12SAlex Elder #define IPA_STATUS_FLAGS4_FRAG_RULE_FMASK	GENMASK(15, 12)
10984f9bd12SAlex Elder #define IPA_STATUS_FLAGS4_HW_SPECIFIC_FMASK	GENMASK(31, 16)
11084f9bd12SAlex Elder 
11184f9bd12SAlex Elder #ifdef IPA_VALIDATE
11284f9bd12SAlex Elder 
11384f9bd12SAlex Elder static void ipa_endpoint_validate_build(void)
11484f9bd12SAlex Elder {
11584f9bd12SAlex Elder 	/* The aggregation byte limit defines the point at which an
11684f9bd12SAlex Elder 	 * aggregation window will close.  It is programmed into the
11784f9bd12SAlex Elder 	 * IPA hardware as a number of KB.  We don't use "hard byte
11884f9bd12SAlex Elder 	 * limit" aggregation, which means that we need to supply
11984f9bd12SAlex Elder 	 * enough space in a receive buffer to hold a complete MTU
12084f9bd12SAlex Elder 	 * plus normal skb overhead *after* that aggregation byte
12184f9bd12SAlex Elder 	 * limit has been crossed.
12284f9bd12SAlex Elder 	 *
12384f9bd12SAlex Elder 	 * This check just ensures we don't define a receive buffer
12484f9bd12SAlex Elder 	 * size that would exceed what we can represent in the field
12584f9bd12SAlex Elder 	 * that is used to program its size.
12684f9bd12SAlex Elder 	 */
12784f9bd12SAlex Elder 	BUILD_BUG_ON(IPA_RX_BUFFER_SIZE >
12884f9bd12SAlex Elder 		     field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K +
12984f9bd12SAlex Elder 		     IPA_MTU + IPA_RX_BUFFER_OVERHEAD);
13084f9bd12SAlex Elder 
13184f9bd12SAlex Elder 	/* I honestly don't know where this requirement comes from.  But
13284f9bd12SAlex Elder 	 * it holds, and if we someday need to loosen the constraint we
13384f9bd12SAlex Elder 	 * can try to track it down.
13484f9bd12SAlex Elder 	 */
13584f9bd12SAlex Elder 	BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
13684f9bd12SAlex Elder }
13784f9bd12SAlex Elder 
13884f9bd12SAlex Elder static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
13984f9bd12SAlex Elder 			    const struct ipa_gsi_endpoint_data *all_data,
14084f9bd12SAlex Elder 			    const struct ipa_gsi_endpoint_data *data)
14184f9bd12SAlex Elder {
14284f9bd12SAlex Elder 	const struct ipa_gsi_endpoint_data *other_data;
14384f9bd12SAlex Elder 	struct device *dev = &ipa->pdev->dev;
14484f9bd12SAlex Elder 	enum ipa_endpoint_name other_name;
14584f9bd12SAlex Elder 
14684f9bd12SAlex Elder 	if (ipa_gsi_endpoint_data_empty(data))
14784f9bd12SAlex Elder 		return true;
14884f9bd12SAlex Elder 
14984f9bd12SAlex Elder 	if (!data->toward_ipa) {
15084f9bd12SAlex Elder 		if (data->endpoint.filter_support) {
15184f9bd12SAlex Elder 			dev_err(dev, "filtering not supported for "
15284f9bd12SAlex Elder 					"RX endpoint %u\n",
15384f9bd12SAlex Elder 				data->endpoint_id);
15484f9bd12SAlex Elder 			return false;
15584f9bd12SAlex Elder 		}
15684f9bd12SAlex Elder 
15784f9bd12SAlex Elder 		return true;	/* Nothing more to check for RX */
15884f9bd12SAlex Elder 	}
15984f9bd12SAlex Elder 
16084f9bd12SAlex Elder 	if (data->endpoint.config.status_enable) {
16184f9bd12SAlex Elder 		other_name = data->endpoint.config.tx.status_endpoint;
16284f9bd12SAlex Elder 		if (other_name >= count) {
16384f9bd12SAlex Elder 			dev_err(dev, "status endpoint name %u out of range "
16484f9bd12SAlex Elder 					"for endpoint %u\n",
16584f9bd12SAlex Elder 				other_name, data->endpoint_id);
16684f9bd12SAlex Elder 			return false;
16784f9bd12SAlex Elder 		}
16884f9bd12SAlex Elder 
16984f9bd12SAlex Elder 		/* Status endpoint must be defined... */
17084f9bd12SAlex Elder 		other_data = &all_data[other_name];
17184f9bd12SAlex Elder 		if (ipa_gsi_endpoint_data_empty(other_data)) {
17284f9bd12SAlex Elder 			dev_err(dev, "DMA endpoint name %u undefined "
17384f9bd12SAlex Elder 					"for endpoint %u\n",
17484f9bd12SAlex Elder 				other_name, data->endpoint_id);
17584f9bd12SAlex Elder 			return false;
17684f9bd12SAlex Elder 		}
17784f9bd12SAlex Elder 
17884f9bd12SAlex Elder 		/* ...and has to be an RX endpoint... */
17984f9bd12SAlex Elder 		if (other_data->toward_ipa) {
18084f9bd12SAlex Elder 			dev_err(dev,
18184f9bd12SAlex Elder 				"status endpoint for endpoint %u not RX\n",
18284f9bd12SAlex Elder 				data->endpoint_id);
18384f9bd12SAlex Elder 			return false;
18484f9bd12SAlex Elder 		}
18584f9bd12SAlex Elder 
18684f9bd12SAlex Elder 		/* ...and if it's to be an AP endpoint... */
18784f9bd12SAlex Elder 		if (other_data->ee_id == GSI_EE_AP) {
18884f9bd12SAlex Elder 			/* ...make sure it has status enabled. */
18984f9bd12SAlex Elder 			if (!other_data->endpoint.config.status_enable) {
19084f9bd12SAlex Elder 				dev_err(dev,
19184f9bd12SAlex Elder 					"status not enabled for endpoint %u\n",
19284f9bd12SAlex Elder 					other_data->endpoint_id);
19384f9bd12SAlex Elder 				return false;
19484f9bd12SAlex Elder 			}
19584f9bd12SAlex Elder 		}
19684f9bd12SAlex Elder 	}
19784f9bd12SAlex Elder 
19884f9bd12SAlex Elder 	if (data->endpoint.config.dma_mode) {
19984f9bd12SAlex Elder 		other_name = data->endpoint.config.dma_endpoint;
20084f9bd12SAlex Elder 		if (other_name >= count) {
20184f9bd12SAlex Elder 			dev_err(dev, "DMA endpoint name %u out of range "
20284f9bd12SAlex Elder 					"for endpoint %u\n",
20384f9bd12SAlex Elder 				other_name, data->endpoint_id);
20484f9bd12SAlex Elder 			return false;
20584f9bd12SAlex Elder 		}
20684f9bd12SAlex Elder 
20784f9bd12SAlex Elder 		other_data = &all_data[other_name];
20884f9bd12SAlex Elder 		if (ipa_gsi_endpoint_data_empty(other_data)) {
20984f9bd12SAlex Elder 			dev_err(dev, "DMA endpoint name %u undefined "
21084f9bd12SAlex Elder 					"for endpoint %u\n",
21184f9bd12SAlex Elder 				other_name, data->endpoint_id);
21284f9bd12SAlex Elder 			return false;
21384f9bd12SAlex Elder 		}
21484f9bd12SAlex Elder 	}
21584f9bd12SAlex Elder 
21684f9bd12SAlex Elder 	return true;
21784f9bd12SAlex Elder }
21884f9bd12SAlex Elder 
21984f9bd12SAlex Elder static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
22084f9bd12SAlex Elder 				    const struct ipa_gsi_endpoint_data *data)
22184f9bd12SAlex Elder {
22284f9bd12SAlex Elder 	const struct ipa_gsi_endpoint_data *dp = data;
22384f9bd12SAlex Elder 	struct device *dev = &ipa->pdev->dev;
22484f9bd12SAlex Elder 	enum ipa_endpoint_name name;
22584f9bd12SAlex Elder 
22684f9bd12SAlex Elder 	ipa_endpoint_validate_build();
22784f9bd12SAlex Elder 
22884f9bd12SAlex Elder 	if (count > IPA_ENDPOINT_COUNT) {
22984f9bd12SAlex Elder 		dev_err(dev, "too many endpoints specified (%u > %u)\n",
23084f9bd12SAlex Elder 			count, IPA_ENDPOINT_COUNT);
23184f9bd12SAlex Elder 		return false;
23284f9bd12SAlex Elder 	}
23384f9bd12SAlex Elder 
23484f9bd12SAlex Elder 	/* Make sure needed endpoints have defined data */
23584f9bd12SAlex Elder 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
23684f9bd12SAlex Elder 		dev_err(dev, "command TX endpoint not defined\n");
23784f9bd12SAlex Elder 		return false;
23884f9bd12SAlex Elder 	}
23984f9bd12SAlex Elder 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
24084f9bd12SAlex Elder 		dev_err(dev, "LAN RX endpoint not defined\n");
24184f9bd12SAlex Elder 		return false;
24284f9bd12SAlex Elder 	}
24384f9bd12SAlex Elder 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
24484f9bd12SAlex Elder 		dev_err(dev, "AP->modem TX endpoint not defined\n");
24584f9bd12SAlex Elder 		return false;
24684f9bd12SAlex Elder 	}
24784f9bd12SAlex Elder 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
24884f9bd12SAlex Elder 		dev_err(dev, "AP<-modem RX endpoint not defined\n");
24984f9bd12SAlex Elder 		return false;
25084f9bd12SAlex Elder 	}
25184f9bd12SAlex Elder 
25284f9bd12SAlex Elder 	for (name = 0; name < count; name++, dp++)
25384f9bd12SAlex Elder 		if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
25484f9bd12SAlex Elder 			return false;
25584f9bd12SAlex Elder 
25684f9bd12SAlex Elder 	return true;
25784f9bd12SAlex Elder }
25884f9bd12SAlex Elder 
25984f9bd12SAlex Elder #else /* !IPA_VALIDATE */
26084f9bd12SAlex Elder 
26184f9bd12SAlex Elder static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
26284f9bd12SAlex Elder 				    const struct ipa_gsi_endpoint_data *data)
26384f9bd12SAlex Elder {
26484f9bd12SAlex Elder 	return true;
26584f9bd12SAlex Elder }
26684f9bd12SAlex Elder 
26784f9bd12SAlex Elder #endif /* !IPA_VALIDATE */
26884f9bd12SAlex Elder 
26984f9bd12SAlex Elder /* Allocate a transaction to use on a non-command endpoint */
27084f9bd12SAlex Elder static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
27184f9bd12SAlex Elder 						  u32 tre_count)
27284f9bd12SAlex Elder {
27384f9bd12SAlex Elder 	struct gsi *gsi = &endpoint->ipa->gsi;
27484f9bd12SAlex Elder 	u32 channel_id = endpoint->channel_id;
27584f9bd12SAlex Elder 	enum dma_data_direction direction;
27684f9bd12SAlex Elder 
27784f9bd12SAlex Elder 	direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
27884f9bd12SAlex Elder 
27984f9bd12SAlex Elder 	return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
28084f9bd12SAlex Elder }
28184f9bd12SAlex Elder 
28284f9bd12SAlex Elder /* suspend_delay represents suspend for RX, delay for TX endpoints.
28384f9bd12SAlex Elder  * Note that suspend is not supported starting with IPA v4.0.
28484f9bd12SAlex Elder  */
2854900bf34SAlex Elder static bool
28684f9bd12SAlex Elder ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
28784f9bd12SAlex Elder {
28884f9bd12SAlex Elder 	u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
28984f9bd12SAlex Elder 	struct ipa *ipa = endpoint->ipa;
2904900bf34SAlex Elder 	bool state;
29184f9bd12SAlex Elder 	u32 mask;
29284f9bd12SAlex Elder 	u32 val;
29384f9bd12SAlex Elder 
2944fa95248SAlex Elder 	/* Suspend is not supported for IPA v4.0+.  Delay doesn't work
2954fa95248SAlex Elder 	 * correctly on IPA v4.2.
2964fa95248SAlex Elder 	 *
2974fa95248SAlex Elder 	 * if (endpoint->toward_ipa)
2984fa95248SAlex Elder 	 * 	assert(ipa->version != IPA_VERSION_4.2);
2994fa95248SAlex Elder 	 * else
3004fa95248SAlex Elder 	 * 	assert(ipa->version == IPA_VERSION_3_5_1);
3014fa95248SAlex Elder 	 */
30284f9bd12SAlex Elder 	mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
30384f9bd12SAlex Elder 
30484f9bd12SAlex Elder 	val = ioread32(ipa->reg_virt + offset);
3054900bf34SAlex Elder 	/* Don't bother if it's already in the requested state */
3064900bf34SAlex Elder 	state = !!(val & mask);
3074900bf34SAlex Elder 	if (suspend_delay != state) {
30884f9bd12SAlex Elder 		val ^= mask;
30984f9bd12SAlex Elder 		iowrite32(val, ipa->reg_virt + offset);
3104900bf34SAlex Elder 	}
31184f9bd12SAlex Elder 
3124900bf34SAlex Elder 	return state;
31384f9bd12SAlex Elder }
31484f9bd12SAlex Elder 
3154fa95248SAlex Elder /* We currently don't care what the previous state was for delay mode */
3164fa95248SAlex Elder static void
3174fa95248SAlex Elder ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
3184fa95248SAlex Elder {
3194fa95248SAlex Elder 	/* assert(endpoint->toward_ipa); */
3204fa95248SAlex Elder 
32166eba767SAlex Elder 	/* Delay mode doesn't work properly for IPA v4.2 */
32266eba767SAlex Elder 	if (endpoint->ipa->version != IPA_VERSION_4_2)
3234fa95248SAlex Elder 		(void)ipa_endpoint_init_ctrl(endpoint, enable);
3244fa95248SAlex Elder }
3254fa95248SAlex Elder 
326fff89971SAlex Elder static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
327fff89971SAlex Elder {
328fff89971SAlex Elder 	u32 mask = BIT(endpoint->endpoint_id);
329fff89971SAlex Elder 	struct ipa *ipa = endpoint->ipa;
330fff89971SAlex Elder 	u32 offset;
331fff89971SAlex Elder 	u32 val;
332fff89971SAlex Elder 
333fff89971SAlex Elder 	/* assert(mask & ipa->available); */
334fff89971SAlex Elder 	offset = ipa_reg_state_aggr_active_offset(ipa->version);
335fff89971SAlex Elder 	val = ioread32(ipa->reg_virt + offset);
336fff89971SAlex Elder 
337fff89971SAlex Elder 	return !!(val & mask);
338fff89971SAlex Elder }
339fff89971SAlex Elder 
340fff89971SAlex Elder static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
341fff89971SAlex Elder {
342fff89971SAlex Elder 	u32 mask = BIT(endpoint->endpoint_id);
343fff89971SAlex Elder 	struct ipa *ipa = endpoint->ipa;
344fff89971SAlex Elder 
345fff89971SAlex Elder 	/* assert(mask & ipa->available); */
346fff89971SAlex Elder 	iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
347fff89971SAlex Elder }
348fff89971SAlex Elder 
349fff89971SAlex Elder /**
350fff89971SAlex Elder  * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
351fff89971SAlex Elder  * @endpoint_id:	Endpoint on which to emulate a suspend
352fff89971SAlex Elder  *
353fff89971SAlex Elder  *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
354fff89971SAlex Elder  *  with an open aggregation frame.  This is to work around a hardware
355fff89971SAlex Elder  *  issue in IPA version 3.5.1 where the suspend interrupt will not be
356fff89971SAlex Elder  *  generated when it should be.
357fff89971SAlex Elder  */
358fff89971SAlex Elder static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
359fff89971SAlex Elder {
360fff89971SAlex Elder 	struct ipa *ipa = endpoint->ipa;
361fff89971SAlex Elder 
362fff89971SAlex Elder 	if (!endpoint->data->aggregation)
363fff89971SAlex Elder 		return;
364fff89971SAlex Elder 
365fff89971SAlex Elder 	/* Nothing to do if the endpoint doesn't have aggregation open */
366fff89971SAlex Elder 	if (!ipa_endpoint_aggr_active(endpoint))
367fff89971SAlex Elder 		return;
368fff89971SAlex Elder 
369fff89971SAlex Elder 	/* Force close aggregation */
370fff89971SAlex Elder 	ipa_endpoint_force_close(endpoint);
371fff89971SAlex Elder 
372fff89971SAlex Elder 	ipa_interrupt_simulate_suspend(ipa->interrupt);
373fff89971SAlex Elder }
374fff89971SAlex Elder 
375fff89971SAlex Elder /* Returns previous suspend state (true means suspend was enabled) */
3764fa95248SAlex Elder static bool
3774fa95248SAlex Elder ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
3784fa95248SAlex Elder {
379fff89971SAlex Elder 	bool suspended;
380fff89971SAlex Elder 
381b07f283eSAlex Elder 	if (endpoint->ipa->version != IPA_VERSION_3_5_1)
382b07f283eSAlex Elder 		return enable;	/* For IPA v4.0+, no change made */
383b07f283eSAlex Elder 
3844fa95248SAlex Elder 	/* assert(!endpoint->toward_ipa); */
3854fa95248SAlex Elder 
386fff89971SAlex Elder 	suspended = ipa_endpoint_init_ctrl(endpoint, enable);
387fff89971SAlex Elder 
388fff89971SAlex Elder 	/* A client suspended with an open aggregation frame will not
389fff89971SAlex Elder 	 * generate a SUSPEND IPA interrupt.  If enabling suspend, have
390fff89971SAlex Elder 	 * ipa_endpoint_suspend_aggr() handle this.
391fff89971SAlex Elder 	 */
392fff89971SAlex Elder 	if (enable && !suspended)
393fff89971SAlex Elder 		ipa_endpoint_suspend_aggr(endpoint);
394fff89971SAlex Elder 
395fff89971SAlex Elder 	return suspended;
3964fa95248SAlex Elder }
3974fa95248SAlex Elder 
39884f9bd12SAlex Elder /* Enable or disable delay or suspend mode on all modem endpoints */
39984f9bd12SAlex Elder void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
40084f9bd12SAlex Elder {
40184f9bd12SAlex Elder 	u32 endpoint_id;
40284f9bd12SAlex Elder 
4034fa95248SAlex Elder 	/* DELAY mode doesn't work correctly on IPA v4.2 */
40484f9bd12SAlex Elder 	if (ipa->version == IPA_VERSION_4_2)
40584f9bd12SAlex Elder 		return;
40684f9bd12SAlex Elder 
40784f9bd12SAlex Elder 	for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
40884f9bd12SAlex Elder 		struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
40984f9bd12SAlex Elder 
41084f9bd12SAlex Elder 		if (endpoint->ee_id != GSI_EE_MODEM)
41184f9bd12SAlex Elder 			continue;
41284f9bd12SAlex Elder 
413b07f283eSAlex Elder 		/* Set TX delay mode or RX suspend mode */
4144fa95248SAlex Elder 		if (endpoint->toward_ipa)
4154fa95248SAlex Elder 			ipa_endpoint_program_delay(endpoint, enable);
416b07f283eSAlex Elder 		else
4174fa95248SAlex Elder 			(void)ipa_endpoint_program_suspend(endpoint, enable);
41884f9bd12SAlex Elder 	}
41984f9bd12SAlex Elder }
42084f9bd12SAlex Elder 
42184f9bd12SAlex Elder /* Reset all modem endpoints to use the default exception endpoint */
42284f9bd12SAlex Elder int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
42384f9bd12SAlex Elder {
42484f9bd12SAlex Elder 	u32 initialized = ipa->initialized;
42584f9bd12SAlex Elder 	struct gsi_trans *trans;
42684f9bd12SAlex Elder 	u32 count;
42784f9bd12SAlex Elder 
42884f9bd12SAlex Elder 	/* We need one command per modem TX endpoint.  We can get an upper
42984f9bd12SAlex Elder 	 * bound on that by assuming all initialized endpoints are modem->IPA.
43084f9bd12SAlex Elder 	 * That won't happen, and we could be more precise, but this is fine
4318fa54b11SWang Wenhu 	 * for now.  We need to end the transaction with a "tag process."
43284f9bd12SAlex Elder 	 */
43384f9bd12SAlex Elder 	count = hweight32(initialized) + ipa_cmd_tag_process_count();
43484f9bd12SAlex Elder 	trans = ipa_cmd_trans_alloc(ipa, count);
43584f9bd12SAlex Elder 	if (!trans) {
43684f9bd12SAlex Elder 		dev_err(&ipa->pdev->dev,
43784f9bd12SAlex Elder 			"no transaction to reset modem exception endpoints\n");
43884f9bd12SAlex Elder 		return -EBUSY;
43984f9bd12SAlex Elder 	}
44084f9bd12SAlex Elder 
44184f9bd12SAlex Elder 	while (initialized) {
44284f9bd12SAlex Elder 		u32 endpoint_id = __ffs(initialized);
44384f9bd12SAlex Elder 		struct ipa_endpoint *endpoint;
44484f9bd12SAlex Elder 		u32 offset;
44584f9bd12SAlex Elder 
44684f9bd12SAlex Elder 		initialized ^= BIT(endpoint_id);
44784f9bd12SAlex Elder 
44884f9bd12SAlex Elder 		/* We only reset modem TX endpoints */
44984f9bd12SAlex Elder 		endpoint = &ipa->endpoint[endpoint_id];
45084f9bd12SAlex Elder 		if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
45184f9bd12SAlex Elder 			continue;
45284f9bd12SAlex Elder 
45384f9bd12SAlex Elder 		offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
45484f9bd12SAlex Elder 
45584f9bd12SAlex Elder 		/* Value written is 0, and all bits are updated.  That
45684f9bd12SAlex Elder 		 * means status is disabled on the endpoint, and as a
45784f9bd12SAlex Elder 		 * result all other fields in the register are ignored.
45884f9bd12SAlex Elder 		 */
45984f9bd12SAlex Elder 		ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
46084f9bd12SAlex Elder 	}
46184f9bd12SAlex Elder 
46284f9bd12SAlex Elder 	ipa_cmd_tag_process_add(trans);
46384f9bd12SAlex Elder 
46484f9bd12SAlex Elder 	/* XXX This should have a 1 second timeout */
46584f9bd12SAlex Elder 	gsi_trans_commit_wait(trans);
46684f9bd12SAlex Elder 
46784f9bd12SAlex Elder 	return 0;
46884f9bd12SAlex Elder }
46984f9bd12SAlex Elder 
47084f9bd12SAlex Elder static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
47184f9bd12SAlex Elder {
47284f9bd12SAlex Elder 	u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
47384f9bd12SAlex Elder 	u32 val = 0;
47484f9bd12SAlex Elder 
47584f9bd12SAlex Elder 	/* FRAG_OFFLOAD_EN is 0 */
47684f9bd12SAlex Elder 	if (endpoint->data->checksum) {
47784f9bd12SAlex Elder 		if (endpoint->toward_ipa) {
47884f9bd12SAlex Elder 			u32 checksum_offset;
47984f9bd12SAlex Elder 
48084f9bd12SAlex Elder 			val |= u32_encode_bits(IPA_CS_OFFLOAD_UL,
48184f9bd12SAlex Elder 					       CS_OFFLOAD_EN_FMASK);
48284f9bd12SAlex Elder 			/* Checksum header offset is in 4-byte units */
48384f9bd12SAlex Elder 			checksum_offset = sizeof(struct rmnet_map_header);
48484f9bd12SAlex Elder 			checksum_offset /= sizeof(u32);
48584f9bd12SAlex Elder 			val |= u32_encode_bits(checksum_offset,
48684f9bd12SAlex Elder 					       CS_METADATA_HDR_OFFSET_FMASK);
48784f9bd12SAlex Elder 		} else {
48884f9bd12SAlex Elder 			val |= u32_encode_bits(IPA_CS_OFFLOAD_DL,
48984f9bd12SAlex Elder 					       CS_OFFLOAD_EN_FMASK);
49084f9bd12SAlex Elder 		}
49184f9bd12SAlex Elder 	} else {
49284f9bd12SAlex Elder 		val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE,
49384f9bd12SAlex Elder 				       CS_OFFLOAD_EN_FMASK);
49484f9bd12SAlex Elder 	}
49584f9bd12SAlex Elder 	/* CS_GEN_QMB_MASTER_SEL is 0 */
49684f9bd12SAlex Elder 
49784f9bd12SAlex Elder 	iowrite32(val, endpoint->ipa->reg_virt + offset);
49884f9bd12SAlex Elder }
49984f9bd12SAlex Elder 
5008730f45dSAlex Elder /**
5018730f45dSAlex Elder  * We program QMAP endpoints so each packet received is preceded by a QMAP
5028730f45dSAlex Elder  * header structure.  The QMAP header contains a 1-byte mux_id and 2-byte
5038730f45dSAlex Elder  * packet size field, and we have the IPA hardware populate both for each
5048730f45dSAlex Elder  * received packet.  The header is configured (in the HDR_EXT register)
5058730f45dSAlex Elder  * to use big endian format.
5068730f45dSAlex Elder  *
5078730f45dSAlex Elder  * The packet size is written into the QMAP header's pkt_len field.  That
5088730f45dSAlex Elder  * location is defined here using the HDR_OFST_PKT_SIZE field.
5098730f45dSAlex Elder  *
5108730f45dSAlex Elder  * The mux_id comes from a 4-byte metadata value supplied with each packet
5118730f45dSAlex Elder  * by the modem.  It is *not* a QMAP header, but it does contain the mux_id
5128730f45dSAlex Elder  * value that we want, in its low-order byte.  A bitmask defined in the
5138730f45dSAlex Elder  * endpoint's METADATA_MASK register defines which byte within the modem
5148730f45dSAlex Elder  * metadata contains the mux_id.  And the OFST_METADATA field programmed
5158730f45dSAlex Elder  * here indicates where the extracted byte should be placed within the QMAP
5168730f45dSAlex Elder  * header.
5178730f45dSAlex Elder  */
51884f9bd12SAlex Elder static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
51984f9bd12SAlex Elder {
52084f9bd12SAlex Elder 	u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
52184f9bd12SAlex Elder 	u32 val = 0;
52284f9bd12SAlex Elder 
52384f9bd12SAlex Elder 	if (endpoint->data->qmap) {
52484f9bd12SAlex Elder 		size_t header_size = sizeof(struct rmnet_map_header);
52584f9bd12SAlex Elder 
5268730f45dSAlex Elder 		/* We might supply a checksum header after the QMAP header */
52784f9bd12SAlex Elder 		if (endpoint->toward_ipa && endpoint->data->checksum)
52884f9bd12SAlex Elder 			header_size += sizeof(struct rmnet_map_ul_csum_header);
52984f9bd12SAlex Elder 		val |= u32_encode_bits(header_size, HDR_LEN_FMASK);
53084f9bd12SAlex Elder 
531f330fda3SAlex Elder 		/* Define how to fill fields in a received QMAP header */
5328730f45dSAlex Elder 		if (!endpoint->toward_ipa) {
5338730f45dSAlex Elder 			u32 off;	/* Field offset within header */
5348730f45dSAlex Elder 
5358730f45dSAlex Elder 			/* Where IPA will write the metadata value */
5368730f45dSAlex Elder 			off = offsetof(struct rmnet_map_header, mux_id);
5378730f45dSAlex Elder 			val |= u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
5388730f45dSAlex Elder 
5398730f45dSAlex Elder 			/* Where IPA will write the length */
5408730f45dSAlex Elder 			off = offsetof(struct rmnet_map_header, pkt_len);
54184f9bd12SAlex Elder 			val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
5428730f45dSAlex Elder 			val |= u32_encode_bits(off, HDR_OFST_PKT_SIZE_FMASK);
54384f9bd12SAlex Elder 		}
5448730f45dSAlex Elder 		/* For QMAP TX, metadata offset is 0 (modem assumes this) */
5458730f45dSAlex Elder 		val |= HDR_OFST_METADATA_VALID_FMASK;
5468730f45dSAlex Elder 
5478730f45dSAlex Elder 		/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
54884f9bd12SAlex Elder 		/* HDR_A5_MUX is 0 */
54984f9bd12SAlex Elder 		/* HDR_LEN_INC_DEAGG_HDR is 0 */
5508730f45dSAlex Elder 		/* HDR_METADATA_REG_VALID is 0 (TX only) */
55184f9bd12SAlex Elder 	}
55284f9bd12SAlex Elder 
55384f9bd12SAlex Elder 	iowrite32(val, endpoint->ipa->reg_virt + offset);
55484f9bd12SAlex Elder }
55584f9bd12SAlex Elder 
55684f9bd12SAlex Elder static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
55784f9bd12SAlex Elder {
55884f9bd12SAlex Elder 	u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
55984f9bd12SAlex Elder 	u32 pad_align = endpoint->data->rx.pad_align;
56084f9bd12SAlex Elder 	u32 val = 0;
56184f9bd12SAlex Elder 
56284f9bd12SAlex Elder 	val |= HDR_ENDIANNESS_FMASK;		/* big endian */
563f330fda3SAlex Elder 
564f330fda3SAlex Elder 	/* A QMAP header contains a 6 bit pad field at offset 0.  The RMNet
565f330fda3SAlex Elder 	 * driver assumes this field is meaningful in packets it receives,
566f330fda3SAlex Elder 	 * and assumes the header's payload length includes that padding.
567f330fda3SAlex Elder 	 * The RMNet driver does *not* pad packets it sends, however, so
568f330fda3SAlex Elder 	 * the pad field (although 0) should be ignored.
569f330fda3SAlex Elder 	 */
570f330fda3SAlex Elder 	if (endpoint->data->qmap && !endpoint->toward_ipa) {
57184f9bd12SAlex Elder 		val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
57284f9bd12SAlex Elder 		/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
573f330fda3SAlex Elder 		val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
57484f9bd12SAlex Elder 		/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
575f330fda3SAlex Elder 	}
576f330fda3SAlex Elder 
577f330fda3SAlex Elder 	/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
57884f9bd12SAlex Elder 	if (!endpoint->toward_ipa)
57984f9bd12SAlex Elder 		val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
58084f9bd12SAlex Elder 
58184f9bd12SAlex Elder 	iowrite32(val, endpoint->ipa->reg_virt + offset);
58284f9bd12SAlex Elder }
58384f9bd12SAlex Elder 
58484f9bd12SAlex Elder 
58584f9bd12SAlex Elder static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
58684f9bd12SAlex Elder {
58784f9bd12SAlex Elder 	u32 endpoint_id = endpoint->endpoint_id;
58884f9bd12SAlex Elder 	u32 val = 0;
58984f9bd12SAlex Elder 	u32 offset;
59084f9bd12SAlex Elder 
591*fb57c3eaSAlex Elder 	if (endpoint->toward_ipa)
592*fb57c3eaSAlex Elder 		return;		/* Register not valid for TX endpoints */
593*fb57c3eaSAlex Elder 
59484f9bd12SAlex Elder 	offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
59584f9bd12SAlex Elder 
5968730f45dSAlex Elder 	/* Note that HDR_ENDIANNESS indicates big endian header fields */
5979b63f093SAlex Elder 	if (endpoint->data->qmap)
5988730f45dSAlex Elder 		val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
59984f9bd12SAlex Elder 
60084f9bd12SAlex Elder 	iowrite32(val, endpoint->ipa->reg_virt + offset);
60184f9bd12SAlex Elder }
60284f9bd12SAlex Elder 
60384f9bd12SAlex Elder static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
60484f9bd12SAlex Elder {
60584f9bd12SAlex Elder 	u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
60684f9bd12SAlex Elder 	u32 val;
60784f9bd12SAlex Elder 
608*fb57c3eaSAlex Elder 	if (!endpoint->toward_ipa)
609*fb57c3eaSAlex Elder 		return;		/* Register not valid for RX endpoints */
610*fb57c3eaSAlex Elder 
61100b9102aSAlex Elder 	if (endpoint->data->dma_mode) {
61284f9bd12SAlex Elder 		enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
61384f9bd12SAlex Elder 		u32 dma_endpoint_id;
61484f9bd12SAlex Elder 
61584f9bd12SAlex Elder 		dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
61684f9bd12SAlex Elder 
61784f9bd12SAlex Elder 		val = u32_encode_bits(IPA_DMA, MODE_FMASK);
61884f9bd12SAlex Elder 		val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
61984f9bd12SAlex Elder 	} else {
62084f9bd12SAlex Elder 		val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
62184f9bd12SAlex Elder 	}
62200b9102aSAlex Elder 	/* All other bits unspecified (and 0) */
62384f9bd12SAlex Elder 
62484f9bd12SAlex Elder 	iowrite32(val, endpoint->ipa->reg_virt + offset);
62584f9bd12SAlex Elder }
62684f9bd12SAlex Elder 
62784f9bd12SAlex Elder /* Compute the aggregation size value to use for a given buffer size */
62884f9bd12SAlex Elder static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
62984f9bd12SAlex Elder {
63084f9bd12SAlex Elder 	/* We don't use "hard byte limit" aggregation, so we define the
63184f9bd12SAlex Elder 	 * aggregation limit such that our buffer has enough space *after*
63284f9bd12SAlex Elder 	 * that limit to receive a full MTU of data, plus overhead.
63384f9bd12SAlex Elder 	 */
63484f9bd12SAlex Elder 	rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
63584f9bd12SAlex Elder 
63684f9bd12SAlex Elder 	return rx_buffer_size / SZ_1K;
63784f9bd12SAlex Elder }
63884f9bd12SAlex Elder 
63984f9bd12SAlex Elder static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
64084f9bd12SAlex Elder {
64184f9bd12SAlex Elder 	u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
64284f9bd12SAlex Elder 	u32 val = 0;
64384f9bd12SAlex Elder 
64484f9bd12SAlex Elder 	if (endpoint->data->aggregation) {
64584f9bd12SAlex Elder 		if (!endpoint->toward_ipa) {
64684f9bd12SAlex Elder 			u32 limit;
64784f9bd12SAlex Elder 
64884f9bd12SAlex Elder 			val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
64984f9bd12SAlex Elder 			val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
6509e88cb5fSAlex Elder 
6519e88cb5fSAlex Elder 			limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
6529e88cb5fSAlex Elder 			val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK);
6531d86652bSAlex Elder 
65484f9bd12SAlex Elder 			limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
6551d86652bSAlex Elder 			limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
6561d86652bSAlex Elder 			val |= u32_encode_bits(limit, AGGR_TIME_LIMIT_FMASK);
6571d86652bSAlex Elder 
6589e88cb5fSAlex Elder 			/* AGGR_PKT_LIMIT is 0 (unlimited) */
6599e88cb5fSAlex Elder 
66084f9bd12SAlex Elder 			if (endpoint->data->rx.aggr_close_eof)
66184f9bd12SAlex Elder 				val |= AGGR_SW_EOF_ACTIVE_FMASK;
66284f9bd12SAlex Elder 			/* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
66384f9bd12SAlex Elder 		} else {
66484f9bd12SAlex Elder 			val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
66584f9bd12SAlex Elder 					       AGGR_EN_FMASK);
66684f9bd12SAlex Elder 			val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
66784f9bd12SAlex Elder 			/* other fields ignored */
66884f9bd12SAlex Elder 		}
66984f9bd12SAlex Elder 		/* AGGR_FORCE_CLOSE is 0 */
67084f9bd12SAlex Elder 	} else {
67184f9bd12SAlex Elder 		val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
67284f9bd12SAlex Elder 		/* other fields ignored */
67384f9bd12SAlex Elder 	}
67484f9bd12SAlex Elder 
67584f9bd12SAlex Elder 	iowrite32(val, endpoint->ipa->reg_virt + offset);
67684f9bd12SAlex Elder }
67784f9bd12SAlex Elder 
67884f9bd12SAlex Elder /* A return value of 0 indicates an error */
67984f9bd12SAlex Elder static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds)
68084f9bd12SAlex Elder {
68184f9bd12SAlex Elder 	u32 scale;
68284f9bd12SAlex Elder 	u32 base;
68384f9bd12SAlex Elder 	u32 val;
68484f9bd12SAlex Elder 
68584f9bd12SAlex Elder 	if (!microseconds)
68684f9bd12SAlex Elder 		return 0;	/* invalid delay */
68784f9bd12SAlex Elder 
68884f9bd12SAlex Elder 	/* Timer is represented in units of clock ticks. */
68984f9bd12SAlex Elder 	if (ipa->version < IPA_VERSION_4_2)
69084f9bd12SAlex Elder 		return microseconds;	/* XXX Needs to be computed */
69184f9bd12SAlex Elder 
69284f9bd12SAlex Elder 	/* IPA v4.2 represents the tick count as base * scale */
69384f9bd12SAlex Elder 	scale = 1;			/* XXX Needs to be computed */
69484f9bd12SAlex Elder 	if (scale > field_max(SCALE_FMASK))
69584f9bd12SAlex Elder 		return 0;		/* scale too big */
69684f9bd12SAlex Elder 
69784f9bd12SAlex Elder 	base = DIV_ROUND_CLOSEST(microseconds, scale);
69884f9bd12SAlex Elder 	if (base > field_max(BASE_VALUE_FMASK))
69984f9bd12SAlex Elder 		return 0;		/* microseconds too big */
70084f9bd12SAlex Elder 
70184f9bd12SAlex Elder 	val = u32_encode_bits(scale, SCALE_FMASK);
70284f9bd12SAlex Elder 	val |= u32_encode_bits(base, BASE_VALUE_FMASK);
70384f9bd12SAlex Elder 
70484f9bd12SAlex Elder 	return val;
70584f9bd12SAlex Elder }
70684f9bd12SAlex Elder 
70784f9bd12SAlex Elder static int ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
70884f9bd12SAlex Elder 					     u32 microseconds)
70984f9bd12SAlex Elder {
71084f9bd12SAlex Elder 	u32 endpoint_id = endpoint->endpoint_id;
71184f9bd12SAlex Elder 	struct ipa *ipa = endpoint->ipa;
71284f9bd12SAlex Elder 	u32 offset;
71384f9bd12SAlex Elder 	u32 val;
71484f9bd12SAlex Elder 
71584f9bd12SAlex Elder 	/* XXX We'll fix this when the register definition is clear */
71684f9bd12SAlex Elder 	if (microseconds) {
71784f9bd12SAlex Elder 		struct device *dev = &ipa->pdev->dev;
71884f9bd12SAlex Elder 
71984f9bd12SAlex Elder 		dev_err(dev, "endpoint %u non-zero HOLB period (ignoring)\n",
72084f9bd12SAlex Elder 			endpoint_id);
72184f9bd12SAlex Elder 		microseconds = 0;
72284f9bd12SAlex Elder 	}
72384f9bd12SAlex Elder 
72484f9bd12SAlex Elder 	if (microseconds) {
72584f9bd12SAlex Elder 		val = ipa_reg_init_hol_block_timer_val(ipa, microseconds);
72684f9bd12SAlex Elder 		if (!val)
72784f9bd12SAlex Elder 			return -EINVAL;
72884f9bd12SAlex Elder 	} else {
72984f9bd12SAlex Elder 		val = 0;	/* timeout is immediate */
73084f9bd12SAlex Elder 	}
73184f9bd12SAlex Elder 	offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
73284f9bd12SAlex Elder 	iowrite32(val, ipa->reg_virt + offset);
73384f9bd12SAlex Elder 
73484f9bd12SAlex Elder 	return 0;
73584f9bd12SAlex Elder }
73684f9bd12SAlex Elder 
73784f9bd12SAlex Elder static void
73884f9bd12SAlex Elder ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
73984f9bd12SAlex Elder {
74084f9bd12SAlex Elder 	u32 endpoint_id = endpoint->endpoint_id;
74184f9bd12SAlex Elder 	u32 offset;
74284f9bd12SAlex Elder 	u32 val;
74384f9bd12SAlex Elder 
744547c8788SAlex Elder 	val = enable ? HOL_BLOCK_EN_FMASK : 0;
74584f9bd12SAlex Elder 	offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
74684f9bd12SAlex Elder 	iowrite32(val, endpoint->ipa->reg_virt + offset);
74784f9bd12SAlex Elder }
74884f9bd12SAlex Elder 
74984f9bd12SAlex Elder void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
75084f9bd12SAlex Elder {
75184f9bd12SAlex Elder 	u32 i;
75284f9bd12SAlex Elder 
75384f9bd12SAlex Elder 	for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
75484f9bd12SAlex Elder 		struct ipa_endpoint *endpoint = &ipa->endpoint[i];
75584f9bd12SAlex Elder 
756f8d34dfdSAlex Elder 		if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
75784f9bd12SAlex Elder 			continue;
75884f9bd12SAlex Elder 
75984f9bd12SAlex Elder 		(void)ipa_endpoint_init_hol_block_timer(endpoint, 0);
76084f9bd12SAlex Elder 		ipa_endpoint_init_hol_block_enable(endpoint, true);
76184f9bd12SAlex Elder 	}
76284f9bd12SAlex Elder }
76384f9bd12SAlex Elder 
76484f9bd12SAlex Elder static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
76584f9bd12SAlex Elder {
76684f9bd12SAlex Elder 	u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
76784f9bd12SAlex Elder 	u32 val = 0;
76884f9bd12SAlex Elder 
769*fb57c3eaSAlex Elder 	if (!endpoint->toward_ipa)
770*fb57c3eaSAlex Elder 		return;		/* Register not valid for RX endpoints */
771*fb57c3eaSAlex Elder 
77284f9bd12SAlex Elder 	/* DEAGGR_HDR_LEN is 0 */
77384f9bd12SAlex Elder 	/* PACKET_OFFSET_VALID is 0 */
77484f9bd12SAlex Elder 	/* PACKET_OFFSET_LOCATION is ignored (not valid) */
77584f9bd12SAlex Elder 	/* MAX_PACKET_LEN is 0 (not enforced) */
77684f9bd12SAlex Elder 
77784f9bd12SAlex Elder 	iowrite32(val, endpoint->ipa->reg_virt + offset);
77884f9bd12SAlex Elder }
77984f9bd12SAlex Elder 
78084f9bd12SAlex Elder static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
78184f9bd12SAlex Elder {
78284f9bd12SAlex Elder 	u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
78384f9bd12SAlex Elder 	u32 seq_type = endpoint->seq_type;
78484f9bd12SAlex Elder 	u32 val = 0;
78584f9bd12SAlex Elder 
786*fb57c3eaSAlex Elder 	if (!endpoint->toward_ipa)
787*fb57c3eaSAlex Elder 		return;		/* Register not valid for RX endpoints */
788*fb57c3eaSAlex Elder 
789636edeaaSAlex Elder 	/* Sequencer type is made up of four nibbles */
79084f9bd12SAlex Elder 	val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK);
79184f9bd12SAlex Elder 	val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK);
792636edeaaSAlex Elder 	/* The second two apply to replicated packets */
793636edeaaSAlex Elder 	val |= u32_encode_bits((seq_type >> 8) & 0xf, HPS_REP_SEQ_TYPE_FMASK);
794636edeaaSAlex Elder 	val |= u32_encode_bits((seq_type >> 12) & 0xf, DPS_REP_SEQ_TYPE_FMASK);
79584f9bd12SAlex Elder 
79684f9bd12SAlex Elder 	iowrite32(val, endpoint->ipa->reg_virt + offset);
79784f9bd12SAlex Elder }
79884f9bd12SAlex Elder 
79984f9bd12SAlex Elder /**
80084f9bd12SAlex Elder  * ipa_endpoint_skb_tx() - Transmit a socket buffer
80184f9bd12SAlex Elder  * @endpoint:	Endpoint pointer
80284f9bd12SAlex Elder  * @skb:	Socket buffer to send
80384f9bd12SAlex Elder  *
80484f9bd12SAlex Elder  * Returns:	0 if successful, or a negative error code
80584f9bd12SAlex Elder  */
80684f9bd12SAlex Elder int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
80784f9bd12SAlex Elder {
80884f9bd12SAlex Elder 	struct gsi_trans *trans;
80984f9bd12SAlex Elder 	u32 nr_frags;
81084f9bd12SAlex Elder 	int ret;
81184f9bd12SAlex Elder 
81284f9bd12SAlex Elder 	/* Make sure source endpoint's TLV FIFO has enough entries to
81384f9bd12SAlex Elder 	 * hold the linear portion of the skb and all its fragments.
81484f9bd12SAlex Elder 	 * If not, see if we can linearize it before giving up.
81584f9bd12SAlex Elder 	 */
81684f9bd12SAlex Elder 	nr_frags = skb_shinfo(skb)->nr_frags;
81784f9bd12SAlex Elder 	if (1 + nr_frags > endpoint->trans_tre_max) {
81884f9bd12SAlex Elder 		if (skb_linearize(skb))
81984f9bd12SAlex Elder 			return -E2BIG;
82084f9bd12SAlex Elder 		nr_frags = 0;
82184f9bd12SAlex Elder 	}
82284f9bd12SAlex Elder 
82384f9bd12SAlex Elder 	trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
82484f9bd12SAlex Elder 	if (!trans)
82584f9bd12SAlex Elder 		return -EBUSY;
82684f9bd12SAlex Elder 
82784f9bd12SAlex Elder 	ret = gsi_trans_skb_add(trans, skb);
82884f9bd12SAlex Elder 	if (ret)
82984f9bd12SAlex Elder 		goto err_trans_free;
83084f9bd12SAlex Elder 	trans->data = skb;	/* transaction owns skb now */
83184f9bd12SAlex Elder 
83284f9bd12SAlex Elder 	gsi_trans_commit(trans, !netdev_xmit_more());
83384f9bd12SAlex Elder 
83484f9bd12SAlex Elder 	return 0;
83584f9bd12SAlex Elder 
83684f9bd12SAlex Elder err_trans_free:
83784f9bd12SAlex Elder 	gsi_trans_free(trans);
83884f9bd12SAlex Elder 
83984f9bd12SAlex Elder 	return -ENOMEM;
84084f9bd12SAlex Elder }
84184f9bd12SAlex Elder 
84284f9bd12SAlex Elder static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
84384f9bd12SAlex Elder {
84484f9bd12SAlex Elder 	u32 endpoint_id = endpoint->endpoint_id;
84584f9bd12SAlex Elder 	struct ipa *ipa = endpoint->ipa;
84684f9bd12SAlex Elder 	u32 val = 0;
84784f9bd12SAlex Elder 	u32 offset;
84884f9bd12SAlex Elder 
84984f9bd12SAlex Elder 	offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
85084f9bd12SAlex Elder 
85184f9bd12SAlex Elder 	if (endpoint->data->status_enable) {
85284f9bd12SAlex Elder 		val |= STATUS_EN_FMASK;
85384f9bd12SAlex Elder 		if (endpoint->toward_ipa) {
85484f9bd12SAlex Elder 			enum ipa_endpoint_name name;
85584f9bd12SAlex Elder 			u32 status_endpoint_id;
85684f9bd12SAlex Elder 
85784f9bd12SAlex Elder 			name = endpoint->data->tx.status_endpoint;
85884f9bd12SAlex Elder 			status_endpoint_id = ipa->name_map[name]->endpoint_id;
85984f9bd12SAlex Elder 
86084f9bd12SAlex Elder 			val |= u32_encode_bits(status_endpoint_id,
86184f9bd12SAlex Elder 					       STATUS_ENDP_FMASK);
86284f9bd12SAlex Elder 		}
86384f9bd12SAlex Elder 		/* STATUS_LOCATION is 0 (status element precedes packet) */
86484f9bd12SAlex Elder 		/* The next field is present for IPA v4.0 and above */
86584f9bd12SAlex Elder 		/* STATUS_PKT_SUPPRESS_FMASK is 0 */
86684f9bd12SAlex Elder 	}
86784f9bd12SAlex Elder 
86884f9bd12SAlex Elder 	iowrite32(val, ipa->reg_virt + offset);
86984f9bd12SAlex Elder }
87084f9bd12SAlex Elder 
87184f9bd12SAlex Elder static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
87284f9bd12SAlex Elder {
87384f9bd12SAlex Elder 	struct gsi_trans *trans;
87484f9bd12SAlex Elder 	bool doorbell = false;
87584f9bd12SAlex Elder 	struct page *page;
87684f9bd12SAlex Elder 	u32 offset;
87784f9bd12SAlex Elder 	u32 len;
87884f9bd12SAlex Elder 	int ret;
87984f9bd12SAlex Elder 
8806fcd4224SAlex Elder 	page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
88184f9bd12SAlex Elder 	if (!page)
88284f9bd12SAlex Elder 		return -ENOMEM;
88384f9bd12SAlex Elder 
88484f9bd12SAlex Elder 	trans = ipa_endpoint_trans_alloc(endpoint, 1);
88584f9bd12SAlex Elder 	if (!trans)
88684f9bd12SAlex Elder 		goto err_free_pages;
88784f9bd12SAlex Elder 
88884f9bd12SAlex Elder 	/* Offset the buffer to make space for skb headroom */
88984f9bd12SAlex Elder 	offset = NET_SKB_PAD;
89084f9bd12SAlex Elder 	len = IPA_RX_BUFFER_SIZE - offset;
89184f9bd12SAlex Elder 
89284f9bd12SAlex Elder 	ret = gsi_trans_page_add(trans, page, len, offset);
89384f9bd12SAlex Elder 	if (ret)
89484f9bd12SAlex Elder 		goto err_trans_free;
89584f9bd12SAlex Elder 	trans->data = page;	/* transaction owns page now */
89684f9bd12SAlex Elder 
89784f9bd12SAlex Elder 	if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
89884f9bd12SAlex Elder 		doorbell = true;
89984f9bd12SAlex Elder 		endpoint->replenish_ready = 0;
90084f9bd12SAlex Elder 	}
90184f9bd12SAlex Elder 
90284f9bd12SAlex Elder 	gsi_trans_commit(trans, doorbell);
90384f9bd12SAlex Elder 
90484f9bd12SAlex Elder 	return 0;
90584f9bd12SAlex Elder 
90684f9bd12SAlex Elder err_trans_free:
90784f9bd12SAlex Elder 	gsi_trans_free(trans);
90884f9bd12SAlex Elder err_free_pages:
9096fcd4224SAlex Elder 	__free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
91084f9bd12SAlex Elder 
91184f9bd12SAlex Elder 	return -ENOMEM;
91284f9bd12SAlex Elder }
91384f9bd12SAlex Elder 
91484f9bd12SAlex Elder /**
91584f9bd12SAlex Elder  * ipa_endpoint_replenish() - Replenish the Rx packets cache.
91684f9bd12SAlex Elder  *
91784f9bd12SAlex Elder  * Allocate RX packet wrapper structures with maximal socket buffers
91884f9bd12SAlex Elder  * for an endpoint.  These are supplied to the hardware, which fills
91984f9bd12SAlex Elder  * them with incoming data.
92084f9bd12SAlex Elder  */
92184f9bd12SAlex Elder static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
92284f9bd12SAlex Elder {
92384f9bd12SAlex Elder 	struct gsi *gsi;
92484f9bd12SAlex Elder 	u32 backlog;
92584f9bd12SAlex Elder 
92684f9bd12SAlex Elder 	if (!endpoint->replenish_enabled) {
92784f9bd12SAlex Elder 		if (count)
92884f9bd12SAlex Elder 			atomic_add(count, &endpoint->replenish_saved);
92984f9bd12SAlex Elder 		return;
93084f9bd12SAlex Elder 	}
93184f9bd12SAlex Elder 
93284f9bd12SAlex Elder 
93384f9bd12SAlex Elder 	while (atomic_dec_not_zero(&endpoint->replenish_backlog))
93484f9bd12SAlex Elder 		if (ipa_endpoint_replenish_one(endpoint))
93584f9bd12SAlex Elder 			goto try_again_later;
93684f9bd12SAlex Elder 	if (count)
93784f9bd12SAlex Elder 		atomic_add(count, &endpoint->replenish_backlog);
93884f9bd12SAlex Elder 
93984f9bd12SAlex Elder 	return;
94084f9bd12SAlex Elder 
94184f9bd12SAlex Elder try_again_later:
94284f9bd12SAlex Elder 	/* The last one didn't succeed, so fix the backlog */
94384f9bd12SAlex Elder 	backlog = atomic_inc_return(&endpoint->replenish_backlog);
94484f9bd12SAlex Elder 
94584f9bd12SAlex Elder 	if (count)
94684f9bd12SAlex Elder 		atomic_add(count, &endpoint->replenish_backlog);
94784f9bd12SAlex Elder 
94884f9bd12SAlex Elder 	/* Whenever a receive buffer transaction completes we'll try to
94984f9bd12SAlex Elder 	 * replenish again.  It's unlikely, but if we fail to supply even
95084f9bd12SAlex Elder 	 * one buffer, nothing will trigger another replenish attempt.
95184f9bd12SAlex Elder 	 * Receive buffer transactions use one TRE, so schedule work to
95284f9bd12SAlex Elder 	 * try replenishing again if our backlog is *all* available TREs.
95384f9bd12SAlex Elder 	 */
95484f9bd12SAlex Elder 	gsi = &endpoint->ipa->gsi;
95584f9bd12SAlex Elder 	if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
95684f9bd12SAlex Elder 		schedule_delayed_work(&endpoint->replenish_work,
95784f9bd12SAlex Elder 				      msecs_to_jiffies(1));
95884f9bd12SAlex Elder }
95984f9bd12SAlex Elder 
96084f9bd12SAlex Elder static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
96184f9bd12SAlex Elder {
96284f9bd12SAlex Elder 	struct gsi *gsi = &endpoint->ipa->gsi;
96384f9bd12SAlex Elder 	u32 max_backlog;
96484f9bd12SAlex Elder 	u32 saved;
96584f9bd12SAlex Elder 
96684f9bd12SAlex Elder 	endpoint->replenish_enabled = true;
96784f9bd12SAlex Elder 	while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
96884f9bd12SAlex Elder 		atomic_add(saved, &endpoint->replenish_backlog);
96984f9bd12SAlex Elder 
97084f9bd12SAlex Elder 	/* Start replenishing if hardware currently has no buffers */
97184f9bd12SAlex Elder 	max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
97284f9bd12SAlex Elder 	if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
97384f9bd12SAlex Elder 		ipa_endpoint_replenish(endpoint, 0);
97484f9bd12SAlex Elder }
97584f9bd12SAlex Elder 
97684f9bd12SAlex Elder static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
97784f9bd12SAlex Elder {
97884f9bd12SAlex Elder 	u32 backlog;
97984f9bd12SAlex Elder 
98084f9bd12SAlex Elder 	endpoint->replenish_enabled = false;
98184f9bd12SAlex Elder 	while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
98284f9bd12SAlex Elder 		atomic_add(backlog, &endpoint->replenish_saved);
98384f9bd12SAlex Elder }
98484f9bd12SAlex Elder 
98584f9bd12SAlex Elder static void ipa_endpoint_replenish_work(struct work_struct *work)
98684f9bd12SAlex Elder {
98784f9bd12SAlex Elder 	struct delayed_work *dwork = to_delayed_work(work);
98884f9bd12SAlex Elder 	struct ipa_endpoint *endpoint;
98984f9bd12SAlex Elder 
99084f9bd12SAlex Elder 	endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
99184f9bd12SAlex Elder 
99284f9bd12SAlex Elder 	ipa_endpoint_replenish(endpoint, 0);
99384f9bd12SAlex Elder }
99484f9bd12SAlex Elder 
99584f9bd12SAlex Elder static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
99684f9bd12SAlex Elder 				  void *data, u32 len, u32 extra)
99784f9bd12SAlex Elder {
99884f9bd12SAlex Elder 	struct sk_buff *skb;
99984f9bd12SAlex Elder 
100084f9bd12SAlex Elder 	skb = __dev_alloc_skb(len, GFP_ATOMIC);
100184f9bd12SAlex Elder 	if (skb) {
100284f9bd12SAlex Elder 		skb_put(skb, len);
100384f9bd12SAlex Elder 		memcpy(skb->data, data, len);
100484f9bd12SAlex Elder 		skb->truesize += extra;
100584f9bd12SAlex Elder 	}
100684f9bd12SAlex Elder 
100784f9bd12SAlex Elder 	/* Now receive it, or drop it if there's no netdev */
100884f9bd12SAlex Elder 	if (endpoint->netdev)
100984f9bd12SAlex Elder 		ipa_modem_skb_rx(endpoint->netdev, skb);
101084f9bd12SAlex Elder 	else if (skb)
101184f9bd12SAlex Elder 		dev_kfree_skb_any(skb);
101284f9bd12SAlex Elder }
101384f9bd12SAlex Elder 
101484f9bd12SAlex Elder static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
101584f9bd12SAlex Elder 				   struct page *page, u32 len)
101684f9bd12SAlex Elder {
101784f9bd12SAlex Elder 	struct sk_buff *skb;
101884f9bd12SAlex Elder 
101984f9bd12SAlex Elder 	/* Nothing to do if there's no netdev */
102084f9bd12SAlex Elder 	if (!endpoint->netdev)
102184f9bd12SAlex Elder 		return false;
102284f9bd12SAlex Elder 
102384f9bd12SAlex Elder 	/* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */
102484f9bd12SAlex Elder 	skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
102584f9bd12SAlex Elder 	if (skb) {
102684f9bd12SAlex Elder 		/* Reserve the headroom and account for the data */
102784f9bd12SAlex Elder 		skb_reserve(skb, NET_SKB_PAD);
102884f9bd12SAlex Elder 		skb_put(skb, len);
102984f9bd12SAlex Elder 	}
103084f9bd12SAlex Elder 
103184f9bd12SAlex Elder 	/* Receive the buffer (or record drop if unable to build it) */
103284f9bd12SAlex Elder 	ipa_modem_skb_rx(endpoint->netdev, skb);
103384f9bd12SAlex Elder 
103484f9bd12SAlex Elder 	return skb != NULL;
103584f9bd12SAlex Elder }
103684f9bd12SAlex Elder 
103784f9bd12SAlex Elder /* The format of a packet status element is the same for several status
103884f9bd12SAlex Elder  * types (opcodes).  The NEW_FRAG_RULE, LOG, DCMP (decompression) types
103984f9bd12SAlex Elder  * aren't currently supported
104084f9bd12SAlex Elder  */
104184f9bd12SAlex Elder static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
104284f9bd12SAlex Elder {
104384f9bd12SAlex Elder 	switch (opcode) {
104484f9bd12SAlex Elder 	case IPA_STATUS_OPCODE_PACKET:
104584f9bd12SAlex Elder 	case IPA_STATUS_OPCODE_DROPPED_PACKET:
104684f9bd12SAlex Elder 	case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
104784f9bd12SAlex Elder 	case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
104884f9bd12SAlex Elder 		return true;
104984f9bd12SAlex Elder 	default:
105084f9bd12SAlex Elder 		return false;
105184f9bd12SAlex Elder 	}
105284f9bd12SAlex Elder }
105384f9bd12SAlex Elder 
105484f9bd12SAlex Elder static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
105584f9bd12SAlex Elder 				     const struct ipa_status *status)
105684f9bd12SAlex Elder {
105784f9bd12SAlex Elder 	u32 endpoint_id;
105884f9bd12SAlex Elder 
105984f9bd12SAlex Elder 	if (!ipa_status_format_packet(status->opcode))
106084f9bd12SAlex Elder 		return true;
106184f9bd12SAlex Elder 	if (!status->pkt_len)
106284f9bd12SAlex Elder 		return true;
106384f9bd12SAlex Elder 	endpoint_id = u32_get_bits(status->endp_dst_idx,
106484f9bd12SAlex Elder 				   IPA_STATUS_DST_IDX_FMASK);
106584f9bd12SAlex Elder 	if (endpoint_id != endpoint->endpoint_id)
106684f9bd12SAlex Elder 		return true;
106784f9bd12SAlex Elder 
106884f9bd12SAlex Elder 	return false;	/* Don't skip this packet, process it */
106984f9bd12SAlex Elder }
107084f9bd12SAlex Elder 
107184f9bd12SAlex Elder /* Return whether the status indicates the packet should be dropped */
107284f9bd12SAlex Elder static bool ipa_status_drop_packet(const struct ipa_status *status)
107384f9bd12SAlex Elder {
107484f9bd12SAlex Elder 	u32 val;
107584f9bd12SAlex Elder 
107684f9bd12SAlex Elder 	/* Deaggregation exceptions we drop; others we consume */
107784f9bd12SAlex Elder 	if (status->exception)
107884f9bd12SAlex Elder 		return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
107984f9bd12SAlex Elder 
108084f9bd12SAlex Elder 	/* Drop the packet if it fails to match a routing rule; otherwise no */
108184f9bd12SAlex Elder 	val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
108284f9bd12SAlex Elder 
108384f9bd12SAlex Elder 	return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
108484f9bd12SAlex Elder }
108584f9bd12SAlex Elder 
108684f9bd12SAlex Elder static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
108784f9bd12SAlex Elder 				      struct page *page, u32 total_len)
108884f9bd12SAlex Elder {
108984f9bd12SAlex Elder 	void *data = page_address(page) + NET_SKB_PAD;
109084f9bd12SAlex Elder 	u32 unused = IPA_RX_BUFFER_SIZE - total_len;
109184f9bd12SAlex Elder 	u32 resid = total_len;
109284f9bd12SAlex Elder 
109384f9bd12SAlex Elder 	while (resid) {
109484f9bd12SAlex Elder 		const struct ipa_status *status = data;
109584f9bd12SAlex Elder 		u32 align;
109684f9bd12SAlex Elder 		u32 len;
109784f9bd12SAlex Elder 
109884f9bd12SAlex Elder 		if (resid < sizeof(*status)) {
109984f9bd12SAlex Elder 			dev_err(&endpoint->ipa->pdev->dev,
110084f9bd12SAlex Elder 				"short message (%u bytes < %zu byte status)\n",
110184f9bd12SAlex Elder 				resid, sizeof(*status));
110284f9bd12SAlex Elder 			break;
110384f9bd12SAlex Elder 		}
110484f9bd12SAlex Elder 
110584f9bd12SAlex Elder 		/* Skip over status packets that lack packet data */
110684f9bd12SAlex Elder 		if (ipa_endpoint_status_skip(endpoint, status)) {
110784f9bd12SAlex Elder 			data += sizeof(*status);
110884f9bd12SAlex Elder 			resid -= sizeof(*status);
110984f9bd12SAlex Elder 			continue;
111084f9bd12SAlex Elder 		}
111184f9bd12SAlex Elder 
111284f9bd12SAlex Elder 		/* Compute the amount of buffer space consumed by the
111384f9bd12SAlex Elder 		 * packet, including the status element.  If the hardware
111484f9bd12SAlex Elder 		 * is configured to pad packet data to an aligned boundary,
111584f9bd12SAlex Elder 		 * account for that.  And if checksum offload is is enabled
111684f9bd12SAlex Elder 		 * a trailer containing computed checksum information will
111784f9bd12SAlex Elder 		 * be appended.
111884f9bd12SAlex Elder 		 */
111984f9bd12SAlex Elder 		align = endpoint->data->rx.pad_align ? : 1;
112084f9bd12SAlex Elder 		len = le16_to_cpu(status->pkt_len);
112184f9bd12SAlex Elder 		len = sizeof(*status) + ALIGN(len, align);
112284f9bd12SAlex Elder 		if (endpoint->data->checksum)
112384f9bd12SAlex Elder 			len += sizeof(struct rmnet_map_dl_csum_trailer);
112484f9bd12SAlex Elder 
112584f9bd12SAlex Elder 		/* Charge the new packet with a proportional fraction of
112684f9bd12SAlex Elder 		 * the unused space in the original receive buffer.
112784f9bd12SAlex Elder 		 * XXX Charge a proportion of the *whole* receive buffer?
112884f9bd12SAlex Elder 		 */
112984f9bd12SAlex Elder 		if (!ipa_status_drop_packet(status)) {
113084f9bd12SAlex Elder 			u32 extra = unused * len / total_len;
113184f9bd12SAlex Elder 			void *data2 = data + sizeof(*status);
113284f9bd12SAlex Elder 			u32 len2 = le16_to_cpu(status->pkt_len);
113384f9bd12SAlex Elder 
113484f9bd12SAlex Elder 			/* Client receives only packet data (no status) */
113584f9bd12SAlex Elder 			ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
113684f9bd12SAlex Elder 		}
113784f9bd12SAlex Elder 
113884f9bd12SAlex Elder 		/* Consume status and the full packet it describes */
113984f9bd12SAlex Elder 		data += len;
114084f9bd12SAlex Elder 		resid -= len;
114184f9bd12SAlex Elder 	}
114284f9bd12SAlex Elder }
114384f9bd12SAlex Elder 
114484f9bd12SAlex Elder /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
114584f9bd12SAlex Elder static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
114684f9bd12SAlex Elder 				     struct gsi_trans *trans)
114784f9bd12SAlex Elder {
114884f9bd12SAlex Elder }
114984f9bd12SAlex Elder 
115084f9bd12SAlex Elder /* Complete transaction initiated in ipa_endpoint_replenish_one() */
115184f9bd12SAlex Elder static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
115284f9bd12SAlex Elder 				     struct gsi_trans *trans)
115384f9bd12SAlex Elder {
115484f9bd12SAlex Elder 	struct page *page;
115584f9bd12SAlex Elder 
115684f9bd12SAlex Elder 	ipa_endpoint_replenish(endpoint, 1);
115784f9bd12SAlex Elder 
115884f9bd12SAlex Elder 	if (trans->cancelled)
115984f9bd12SAlex Elder 		return;
116084f9bd12SAlex Elder 
116184f9bd12SAlex Elder 	/* Parse or build a socket buffer using the actual received length */
116284f9bd12SAlex Elder 	page = trans->data;
116384f9bd12SAlex Elder 	if (endpoint->data->status_enable)
116484f9bd12SAlex Elder 		ipa_endpoint_status_parse(endpoint, page, trans->len);
116584f9bd12SAlex Elder 	else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
116684f9bd12SAlex Elder 		trans->data = NULL;	/* Pages have been consumed */
116784f9bd12SAlex Elder }
116884f9bd12SAlex Elder 
116984f9bd12SAlex Elder void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
117084f9bd12SAlex Elder 				 struct gsi_trans *trans)
117184f9bd12SAlex Elder {
117284f9bd12SAlex Elder 	if (endpoint->toward_ipa)
117384f9bd12SAlex Elder 		ipa_endpoint_tx_complete(endpoint, trans);
117484f9bd12SAlex Elder 	else
117584f9bd12SAlex Elder 		ipa_endpoint_rx_complete(endpoint, trans);
117684f9bd12SAlex Elder }
117784f9bd12SAlex Elder 
117884f9bd12SAlex Elder void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
117984f9bd12SAlex Elder 				struct gsi_trans *trans)
118084f9bd12SAlex Elder {
118184f9bd12SAlex Elder 	if (endpoint->toward_ipa) {
118284f9bd12SAlex Elder 		struct ipa *ipa = endpoint->ipa;
118384f9bd12SAlex Elder 
118484f9bd12SAlex Elder 		/* Nothing to do for command transactions */
118584f9bd12SAlex Elder 		if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
118684f9bd12SAlex Elder 			struct sk_buff *skb = trans->data;
118784f9bd12SAlex Elder 
118884f9bd12SAlex Elder 			if (skb)
118984f9bd12SAlex Elder 				dev_kfree_skb_any(skb);
119084f9bd12SAlex Elder 		}
119184f9bd12SAlex Elder 	} else {
119284f9bd12SAlex Elder 		struct page *page = trans->data;
119384f9bd12SAlex Elder 
119484f9bd12SAlex Elder 		if (page)
11956fcd4224SAlex Elder 			__free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
119684f9bd12SAlex Elder 	}
119784f9bd12SAlex Elder }
119884f9bd12SAlex Elder 
119984f9bd12SAlex Elder void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
120084f9bd12SAlex Elder {
120184f9bd12SAlex Elder 	u32 val;
120284f9bd12SAlex Elder 
120384f9bd12SAlex Elder 	/* ROUTE_DIS is 0 */
120484f9bd12SAlex Elder 	val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
120584f9bd12SAlex Elder 	val |= ROUTE_DEF_HDR_TABLE_FMASK;
120684f9bd12SAlex Elder 	val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
120784f9bd12SAlex Elder 	val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
120884f9bd12SAlex Elder 	val |= ROUTE_DEF_RETAIN_HDR_FMASK;
120984f9bd12SAlex Elder 
121084f9bd12SAlex Elder 	iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
121184f9bd12SAlex Elder }
121284f9bd12SAlex Elder 
121384f9bd12SAlex Elder void ipa_endpoint_default_route_clear(struct ipa *ipa)
121484f9bd12SAlex Elder {
121584f9bd12SAlex Elder 	ipa_endpoint_default_route_set(ipa, 0);
121684f9bd12SAlex Elder }
121784f9bd12SAlex Elder 
121884f9bd12SAlex Elder /**
121984f9bd12SAlex Elder  * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
122084f9bd12SAlex Elder  * @endpoint:	Endpoint to be reset
122184f9bd12SAlex Elder  *
122284f9bd12SAlex Elder  * If aggregation is active on an RX endpoint when a reset is performed
122384f9bd12SAlex Elder  * on its underlying GSI channel, a special sequence of actions must be
122484f9bd12SAlex Elder  * taken to ensure the IPA pipeline is properly cleared.
122584f9bd12SAlex Elder  *
122684f9bd12SAlex Elder  * @Return:	0 if successful, or a negative error code
122784f9bd12SAlex Elder  */
122884f9bd12SAlex Elder static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
122984f9bd12SAlex Elder {
123084f9bd12SAlex Elder 	struct device *dev = &endpoint->ipa->pdev->dev;
123184f9bd12SAlex Elder 	struct ipa *ipa = endpoint->ipa;
123284f9bd12SAlex Elder 	struct gsi *gsi = &ipa->gsi;
12334fa95248SAlex Elder 	bool suspended = false;
123484f9bd12SAlex Elder 	dma_addr_t addr;
1235f86a1909SAlex Elder 	bool legacy;
123684f9bd12SAlex Elder 	u32 retries;
123784f9bd12SAlex Elder 	u32 len = 1;
123884f9bd12SAlex Elder 	void *virt;
123984f9bd12SAlex Elder 	int ret;
124084f9bd12SAlex Elder 
124184f9bd12SAlex Elder 	virt = kzalloc(len, GFP_KERNEL);
124284f9bd12SAlex Elder 	if (!virt)
124384f9bd12SAlex Elder 		return -ENOMEM;
124484f9bd12SAlex Elder 
124584f9bd12SAlex Elder 	addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
124684f9bd12SAlex Elder 	if (dma_mapping_error(dev, addr)) {
124784f9bd12SAlex Elder 		ret = -ENOMEM;
124884f9bd12SAlex Elder 		goto out_kfree;
124984f9bd12SAlex Elder 	}
125084f9bd12SAlex Elder 
125184f9bd12SAlex Elder 	/* Force close aggregation before issuing the reset */
125284f9bd12SAlex Elder 	ipa_endpoint_force_close(endpoint);
125384f9bd12SAlex Elder 
125484f9bd12SAlex Elder 	/* Reset and reconfigure the channel with the doorbell engine
125584f9bd12SAlex Elder 	 * disabled.  Then poll until we know aggregation is no longer
125684f9bd12SAlex Elder 	 * active.  We'll re-enable the doorbell (if appropriate) when
125784f9bd12SAlex Elder 	 * we reset again below.
125884f9bd12SAlex Elder 	 */
125984f9bd12SAlex Elder 	gsi_channel_reset(gsi, endpoint->channel_id, false);
126084f9bd12SAlex Elder 
126184f9bd12SAlex Elder 	/* Make sure the channel isn't suspended */
12624fa95248SAlex Elder 	suspended = ipa_endpoint_program_suspend(endpoint, false);
126384f9bd12SAlex Elder 
126484f9bd12SAlex Elder 	/* Start channel and do a 1 byte read */
126584f9bd12SAlex Elder 	ret = gsi_channel_start(gsi, endpoint->channel_id);
126684f9bd12SAlex Elder 	if (ret)
126784f9bd12SAlex Elder 		goto out_suspend_again;
126884f9bd12SAlex Elder 
126984f9bd12SAlex Elder 	ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
127084f9bd12SAlex Elder 	if (ret)
127184f9bd12SAlex Elder 		goto err_endpoint_stop;
127284f9bd12SAlex Elder 
127384f9bd12SAlex Elder 	/* Wait for aggregation to be closed on the channel */
127484f9bd12SAlex Elder 	retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
127584f9bd12SAlex Elder 	do {
127684f9bd12SAlex Elder 		if (!ipa_endpoint_aggr_active(endpoint))
127784f9bd12SAlex Elder 			break;
127884f9bd12SAlex Elder 		msleep(1);
127984f9bd12SAlex Elder 	} while (retries--);
128084f9bd12SAlex Elder 
128184f9bd12SAlex Elder 	/* Check one last time */
128284f9bd12SAlex Elder 	if (ipa_endpoint_aggr_active(endpoint))
128384f9bd12SAlex Elder 		dev_err(dev, "endpoint %u still active during reset\n",
128484f9bd12SAlex Elder 			endpoint->endpoint_id);
128584f9bd12SAlex Elder 
128684f9bd12SAlex Elder 	gsi_trans_read_byte_done(gsi, endpoint->channel_id);
128784f9bd12SAlex Elder 
1288f30dcb7dSAlex Elder 	ret = gsi_channel_stop(gsi, endpoint->channel_id);
128984f9bd12SAlex Elder 	if (ret)
129084f9bd12SAlex Elder 		goto out_suspend_again;
129184f9bd12SAlex Elder 
129284f9bd12SAlex Elder 	/* Finally, reset and reconfigure the channel again (re-enabling the
129384f9bd12SAlex Elder 	 * the doorbell engine if appropriate).  Sleep for 1 millisecond to
129484f9bd12SAlex Elder 	 * complete the channel reset sequence.  Finish by suspending the
129584f9bd12SAlex Elder 	 * channel again (if necessary).
129684f9bd12SAlex Elder 	 */
1297f86a1909SAlex Elder 	legacy = ipa->version == IPA_VERSION_3_5_1;
1298f86a1909SAlex Elder 	gsi_channel_reset(gsi, endpoint->channel_id, legacy);
129984f9bd12SAlex Elder 
130084f9bd12SAlex Elder 	msleep(1);
130184f9bd12SAlex Elder 
130284f9bd12SAlex Elder 	goto out_suspend_again;
130384f9bd12SAlex Elder 
130484f9bd12SAlex Elder err_endpoint_stop:
1305f30dcb7dSAlex Elder 	(void)gsi_channel_stop(gsi, endpoint->channel_id);
130684f9bd12SAlex Elder out_suspend_again:
13074fa95248SAlex Elder 	if (suspended)
13084fa95248SAlex Elder 		(void)ipa_endpoint_program_suspend(endpoint, true);
130984f9bd12SAlex Elder 	dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
131084f9bd12SAlex Elder out_kfree:
131184f9bd12SAlex Elder 	kfree(virt);
131284f9bd12SAlex Elder 
131384f9bd12SAlex Elder 	return ret;
131484f9bd12SAlex Elder }
131584f9bd12SAlex Elder 
131684f9bd12SAlex Elder static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
131784f9bd12SAlex Elder {
131884f9bd12SAlex Elder 	u32 channel_id = endpoint->channel_id;
131984f9bd12SAlex Elder 	struct ipa *ipa = endpoint->ipa;
132084f9bd12SAlex Elder 	bool special;
1321f86a1909SAlex Elder 	bool legacy;
132284f9bd12SAlex Elder 	int ret = 0;
132384f9bd12SAlex Elder 
132484f9bd12SAlex Elder 	/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
132584f9bd12SAlex Elder 	 * is active, we need to handle things specially to recover.
132684f9bd12SAlex Elder 	 * All other cases just need to reset the underlying GSI channel.
132784f9bd12SAlex Elder 	 *
132884f9bd12SAlex Elder 	 * IPA v3.5.1 enables the doorbell engine.  Newer versions do not.
132984f9bd12SAlex Elder 	 */
1330f86a1909SAlex Elder 	legacy = ipa->version == IPA_VERSION_3_5_1;
133184f9bd12SAlex Elder 	special = !endpoint->toward_ipa && endpoint->data->aggregation;
133284f9bd12SAlex Elder 	if (special && ipa_endpoint_aggr_active(endpoint))
133384f9bd12SAlex Elder 		ret = ipa_endpoint_reset_rx_aggr(endpoint);
133484f9bd12SAlex Elder 	else
1335f86a1909SAlex Elder 		gsi_channel_reset(&ipa->gsi, channel_id, legacy);
133684f9bd12SAlex Elder 
133784f9bd12SAlex Elder 	if (ret)
133884f9bd12SAlex Elder 		dev_err(&ipa->pdev->dev,
133984f9bd12SAlex Elder 			"error %d resetting channel %u for endpoint %u\n",
134084f9bd12SAlex Elder 			ret, endpoint->channel_id, endpoint->endpoint_id);
134184f9bd12SAlex Elder }
134284f9bd12SAlex Elder 
134384f9bd12SAlex Elder static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
134484f9bd12SAlex Elder {
1345*fb57c3eaSAlex Elder 	if (endpoint->toward_ipa)
1346a4dcad34SAlex Elder 		ipa_endpoint_program_delay(endpoint, false);
1347*fb57c3eaSAlex Elder 	else
1348*fb57c3eaSAlex Elder 		(void)ipa_endpoint_program_suspend(endpoint, false);
1349*fb57c3eaSAlex Elder 	ipa_endpoint_init_cfg(endpoint);
1350*fb57c3eaSAlex Elder 	ipa_endpoint_init_hdr(endpoint);
135184f9bd12SAlex Elder 	ipa_endpoint_init_hdr_ext(endpoint);
1352*fb57c3eaSAlex Elder 	ipa_endpoint_init_hdr_metadata_mask(endpoint);
1353*fb57c3eaSAlex Elder 	ipa_endpoint_init_mode(endpoint);
135484f9bd12SAlex Elder 	ipa_endpoint_init_aggr(endpoint);
135584f9bd12SAlex Elder 	ipa_endpoint_init_deaggr(endpoint);
135684f9bd12SAlex Elder 	ipa_endpoint_init_seq(endpoint);
135784f9bd12SAlex Elder 	ipa_endpoint_status(endpoint);
135884f9bd12SAlex Elder }
135984f9bd12SAlex Elder 
136084f9bd12SAlex Elder int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
136184f9bd12SAlex Elder {
136284f9bd12SAlex Elder 	struct ipa *ipa = endpoint->ipa;
136384f9bd12SAlex Elder 	struct gsi *gsi = &ipa->gsi;
136484f9bd12SAlex Elder 	int ret;
136584f9bd12SAlex Elder 
136684f9bd12SAlex Elder 	ret = gsi_channel_start(gsi, endpoint->channel_id);
136784f9bd12SAlex Elder 	if (ret) {
136884f9bd12SAlex Elder 		dev_err(&ipa->pdev->dev,
136984f9bd12SAlex Elder 			"error %d starting %cX channel %u for endpoint %u\n",
137084f9bd12SAlex Elder 			ret, endpoint->toward_ipa ? 'T' : 'R',
137184f9bd12SAlex Elder 			endpoint->channel_id, endpoint->endpoint_id);
137284f9bd12SAlex Elder 		return ret;
137384f9bd12SAlex Elder 	}
137484f9bd12SAlex Elder 
137584f9bd12SAlex Elder 	if (!endpoint->toward_ipa) {
137684f9bd12SAlex Elder 		ipa_interrupt_suspend_enable(ipa->interrupt,
137784f9bd12SAlex Elder 					     endpoint->endpoint_id);
137884f9bd12SAlex Elder 		ipa_endpoint_replenish_enable(endpoint);
137984f9bd12SAlex Elder 	}
138084f9bd12SAlex Elder 
138184f9bd12SAlex Elder 	ipa->enabled |= BIT(endpoint->endpoint_id);
138284f9bd12SAlex Elder 
138384f9bd12SAlex Elder 	return 0;
138484f9bd12SAlex Elder }
138584f9bd12SAlex Elder 
138684f9bd12SAlex Elder void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
138784f9bd12SAlex Elder {
138884f9bd12SAlex Elder 	u32 mask = BIT(endpoint->endpoint_id);
138984f9bd12SAlex Elder 	struct ipa *ipa = endpoint->ipa;
1390f30dcb7dSAlex Elder 	struct gsi *gsi = &ipa->gsi;
139184f9bd12SAlex Elder 	int ret;
139284f9bd12SAlex Elder 
1393f30dcb7dSAlex Elder 	if (!(ipa->enabled & mask))
139484f9bd12SAlex Elder 		return;
139584f9bd12SAlex Elder 
1396f30dcb7dSAlex Elder 	ipa->enabled ^= mask;
139784f9bd12SAlex Elder 
139884f9bd12SAlex Elder 	if (!endpoint->toward_ipa) {
139984f9bd12SAlex Elder 		ipa_endpoint_replenish_disable(endpoint);
140084f9bd12SAlex Elder 		ipa_interrupt_suspend_disable(ipa->interrupt,
140184f9bd12SAlex Elder 					      endpoint->endpoint_id);
140284f9bd12SAlex Elder 	}
140384f9bd12SAlex Elder 
140484f9bd12SAlex Elder 	/* Note that if stop fails, the channel's state is not well-defined */
1405f30dcb7dSAlex Elder 	ret = gsi_channel_stop(gsi, endpoint->channel_id);
140684f9bd12SAlex Elder 	if (ret)
140784f9bd12SAlex Elder 		dev_err(&ipa->pdev->dev,
140884f9bd12SAlex Elder 			"error %d attempting to stop endpoint %u\n", ret,
140984f9bd12SAlex Elder 			endpoint->endpoint_id);
141084f9bd12SAlex Elder }
141184f9bd12SAlex Elder 
141284f9bd12SAlex Elder void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
141384f9bd12SAlex Elder {
141484f9bd12SAlex Elder 	struct device *dev = &endpoint->ipa->pdev->dev;
141584f9bd12SAlex Elder 	struct gsi *gsi = &endpoint->ipa->gsi;
141684f9bd12SAlex Elder 	bool stop_channel;
141784f9bd12SAlex Elder 	int ret;
141884f9bd12SAlex Elder 
141984f9bd12SAlex Elder 	if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
142084f9bd12SAlex Elder 		return;
142184f9bd12SAlex Elder 
142284f9bd12SAlex Elder 	if (!endpoint->toward_ipa)
142384f9bd12SAlex Elder 		ipa_endpoint_replenish_disable(endpoint);
142484f9bd12SAlex Elder 
1425b07f283eSAlex Elder 	if (!endpoint->toward_ipa)
14264fa95248SAlex Elder 		(void)ipa_endpoint_program_suspend(endpoint, true);
142784f9bd12SAlex Elder 
1428b07f283eSAlex Elder 	/* IPA v3.5.1 doesn't use channel stop for suspend */
1429b07f283eSAlex Elder 	stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
143084f9bd12SAlex Elder 	ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
143184f9bd12SAlex Elder 	if (ret)
143284f9bd12SAlex Elder 		dev_err(dev, "error %d suspending channel %u\n", ret,
143384f9bd12SAlex Elder 			endpoint->channel_id);
143484f9bd12SAlex Elder }
143584f9bd12SAlex Elder 
143684f9bd12SAlex Elder void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
143784f9bd12SAlex Elder {
143884f9bd12SAlex Elder 	struct device *dev = &endpoint->ipa->pdev->dev;
143984f9bd12SAlex Elder 	struct gsi *gsi = &endpoint->ipa->gsi;
144084f9bd12SAlex Elder 	bool start_channel;
144184f9bd12SAlex Elder 	int ret;
144284f9bd12SAlex Elder 
144384f9bd12SAlex Elder 	if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
144484f9bd12SAlex Elder 		return;
144584f9bd12SAlex Elder 
1446b07f283eSAlex Elder 	if (!endpoint->toward_ipa)
14474fa95248SAlex Elder 		(void)ipa_endpoint_program_suspend(endpoint, false);
144884f9bd12SAlex Elder 
1449b07f283eSAlex Elder 	/* IPA v3.5.1 doesn't use channel start for resume */
1450b07f283eSAlex Elder 	start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
145184f9bd12SAlex Elder 	ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
145284f9bd12SAlex Elder 	if (ret)
145384f9bd12SAlex Elder 		dev_err(dev, "error %d resuming channel %u\n", ret,
145484f9bd12SAlex Elder 			endpoint->channel_id);
145584f9bd12SAlex Elder 	else if (!endpoint->toward_ipa)
145684f9bd12SAlex Elder 		ipa_endpoint_replenish_enable(endpoint);
145784f9bd12SAlex Elder }
145884f9bd12SAlex Elder 
145984f9bd12SAlex Elder void ipa_endpoint_suspend(struct ipa *ipa)
146084f9bd12SAlex Elder {
146184f9bd12SAlex Elder 	if (ipa->modem_netdev)
146284f9bd12SAlex Elder 		ipa_modem_suspend(ipa->modem_netdev);
146384f9bd12SAlex Elder 
146484f9bd12SAlex Elder 	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
146584f9bd12SAlex Elder 	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
146684f9bd12SAlex Elder }
146784f9bd12SAlex Elder 
146884f9bd12SAlex Elder void ipa_endpoint_resume(struct ipa *ipa)
146984f9bd12SAlex Elder {
147084f9bd12SAlex Elder 	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
147184f9bd12SAlex Elder 	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
147284f9bd12SAlex Elder 
147384f9bd12SAlex Elder 	if (ipa->modem_netdev)
147484f9bd12SAlex Elder 		ipa_modem_resume(ipa->modem_netdev);
147584f9bd12SAlex Elder }
147684f9bd12SAlex Elder 
147784f9bd12SAlex Elder static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
147884f9bd12SAlex Elder {
147984f9bd12SAlex Elder 	struct gsi *gsi = &endpoint->ipa->gsi;
148084f9bd12SAlex Elder 	u32 channel_id = endpoint->channel_id;
148184f9bd12SAlex Elder 
148284f9bd12SAlex Elder 	/* Only AP endpoints get set up */
148384f9bd12SAlex Elder 	if (endpoint->ee_id != GSI_EE_AP)
148484f9bd12SAlex Elder 		return;
148584f9bd12SAlex Elder 
148684f9bd12SAlex Elder 	endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
148784f9bd12SAlex Elder 	if (!endpoint->toward_ipa) {
148884f9bd12SAlex Elder 		/* RX transactions require a single TRE, so the maximum
148984f9bd12SAlex Elder 		 * backlog is the same as the maximum outstanding TREs.
149084f9bd12SAlex Elder 		 */
149184f9bd12SAlex Elder 		endpoint->replenish_enabled = false;
149284f9bd12SAlex Elder 		atomic_set(&endpoint->replenish_saved,
149384f9bd12SAlex Elder 			   gsi_channel_tre_max(gsi, endpoint->channel_id));
149484f9bd12SAlex Elder 		atomic_set(&endpoint->replenish_backlog, 0);
149584f9bd12SAlex Elder 		INIT_DELAYED_WORK(&endpoint->replenish_work,
149684f9bd12SAlex Elder 				  ipa_endpoint_replenish_work);
149784f9bd12SAlex Elder 	}
149884f9bd12SAlex Elder 
149984f9bd12SAlex Elder 	ipa_endpoint_program(endpoint);
150084f9bd12SAlex Elder 
150184f9bd12SAlex Elder 	endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
150284f9bd12SAlex Elder }
150384f9bd12SAlex Elder 
150484f9bd12SAlex Elder static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
150584f9bd12SAlex Elder {
150684f9bd12SAlex Elder 	endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
150784f9bd12SAlex Elder 
150884f9bd12SAlex Elder 	if (!endpoint->toward_ipa)
150984f9bd12SAlex Elder 		cancel_delayed_work_sync(&endpoint->replenish_work);
151084f9bd12SAlex Elder 
151184f9bd12SAlex Elder 	ipa_endpoint_reset(endpoint);
151284f9bd12SAlex Elder }
151384f9bd12SAlex Elder 
151484f9bd12SAlex Elder void ipa_endpoint_setup(struct ipa *ipa)
151584f9bd12SAlex Elder {
151684f9bd12SAlex Elder 	u32 initialized = ipa->initialized;
151784f9bd12SAlex Elder 
151884f9bd12SAlex Elder 	ipa->set_up = 0;
151984f9bd12SAlex Elder 	while (initialized) {
152084f9bd12SAlex Elder 		u32 endpoint_id = __ffs(initialized);
152184f9bd12SAlex Elder 
152284f9bd12SAlex Elder 		initialized ^= BIT(endpoint_id);
152384f9bd12SAlex Elder 
152484f9bd12SAlex Elder 		ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
152584f9bd12SAlex Elder 	}
152684f9bd12SAlex Elder }
152784f9bd12SAlex Elder 
152884f9bd12SAlex Elder void ipa_endpoint_teardown(struct ipa *ipa)
152984f9bd12SAlex Elder {
153084f9bd12SAlex Elder 	u32 set_up = ipa->set_up;
153184f9bd12SAlex Elder 
153284f9bd12SAlex Elder 	while (set_up) {
153384f9bd12SAlex Elder 		u32 endpoint_id = __fls(set_up);
153484f9bd12SAlex Elder 
153584f9bd12SAlex Elder 		set_up ^= BIT(endpoint_id);
153684f9bd12SAlex Elder 
153784f9bd12SAlex Elder 		ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
153884f9bd12SAlex Elder 	}
153984f9bd12SAlex Elder 	ipa->set_up = 0;
154084f9bd12SAlex Elder }
154184f9bd12SAlex Elder 
154284f9bd12SAlex Elder int ipa_endpoint_config(struct ipa *ipa)
154384f9bd12SAlex Elder {
154484f9bd12SAlex Elder 	struct device *dev = &ipa->pdev->dev;
154584f9bd12SAlex Elder 	u32 initialized;
154684f9bd12SAlex Elder 	u32 rx_base;
154784f9bd12SAlex Elder 	u32 rx_mask;
154884f9bd12SAlex Elder 	u32 tx_mask;
154984f9bd12SAlex Elder 	int ret = 0;
155084f9bd12SAlex Elder 	u32 max;
155184f9bd12SAlex Elder 	u32 val;
155284f9bd12SAlex Elder 
155384f9bd12SAlex Elder 	/* Find out about the endpoints supplied by the hardware, and ensure
155484f9bd12SAlex Elder 	 * the highest one doesn't exceed the number we support.
155584f9bd12SAlex Elder 	 */
155684f9bd12SAlex Elder 	val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
155784f9bd12SAlex Elder 
155884f9bd12SAlex Elder 	/* Our RX is an IPA producer */
155984f9bd12SAlex Elder 	rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK);
156084f9bd12SAlex Elder 	max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK);
156184f9bd12SAlex Elder 	if (max > IPA_ENDPOINT_MAX) {
156284f9bd12SAlex Elder 		dev_err(dev, "too many endpoints (%u > %u)\n",
156384f9bd12SAlex Elder 			max, IPA_ENDPOINT_MAX);
156484f9bd12SAlex Elder 		return -EINVAL;
156584f9bd12SAlex Elder 	}
156684f9bd12SAlex Elder 	rx_mask = GENMASK(max - 1, rx_base);
156784f9bd12SAlex Elder 
156884f9bd12SAlex Elder 	/* Our TX is an IPA consumer */
156984f9bd12SAlex Elder 	max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK);
157084f9bd12SAlex Elder 	tx_mask = GENMASK(max - 1, 0);
157184f9bd12SAlex Elder 
157284f9bd12SAlex Elder 	ipa->available = rx_mask | tx_mask;
157384f9bd12SAlex Elder 
157484f9bd12SAlex Elder 	/* Check for initialized endpoints not supported by the hardware */
157584f9bd12SAlex Elder 	if (ipa->initialized & ~ipa->available) {
157684f9bd12SAlex Elder 		dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
157784f9bd12SAlex Elder 			ipa->initialized & ~ipa->available);
157884f9bd12SAlex Elder 		ret = -EINVAL;		/* Report other errors too */
157984f9bd12SAlex Elder 	}
158084f9bd12SAlex Elder 
158184f9bd12SAlex Elder 	initialized = ipa->initialized;
158284f9bd12SAlex Elder 	while (initialized) {
158384f9bd12SAlex Elder 		u32 endpoint_id = __ffs(initialized);
158484f9bd12SAlex Elder 		struct ipa_endpoint *endpoint;
158584f9bd12SAlex Elder 
158684f9bd12SAlex Elder 		initialized ^= BIT(endpoint_id);
158784f9bd12SAlex Elder 
158884f9bd12SAlex Elder 		/* Make sure it's pointing in the right direction */
158984f9bd12SAlex Elder 		endpoint = &ipa->endpoint[endpoint_id];
159084f9bd12SAlex Elder 		if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) {
159184f9bd12SAlex Elder 			dev_err(dev, "endpoint id %u wrong direction\n",
159284f9bd12SAlex Elder 				endpoint_id);
159384f9bd12SAlex Elder 			ret = -EINVAL;
159484f9bd12SAlex Elder 		}
159584f9bd12SAlex Elder 	}
159684f9bd12SAlex Elder 
159784f9bd12SAlex Elder 	return ret;
159884f9bd12SAlex Elder }
159984f9bd12SAlex Elder 
160084f9bd12SAlex Elder void ipa_endpoint_deconfig(struct ipa *ipa)
160184f9bd12SAlex Elder {
160284f9bd12SAlex Elder 	ipa->available = 0;	/* Nothing more to do */
160384f9bd12SAlex Elder }
160484f9bd12SAlex Elder 
160584f9bd12SAlex Elder static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
160684f9bd12SAlex Elder 				  const struct ipa_gsi_endpoint_data *data)
160784f9bd12SAlex Elder {
160884f9bd12SAlex Elder 	struct ipa_endpoint *endpoint;
160984f9bd12SAlex Elder 
161084f9bd12SAlex Elder 	endpoint = &ipa->endpoint[data->endpoint_id];
161184f9bd12SAlex Elder 
161284f9bd12SAlex Elder 	if (data->ee_id == GSI_EE_AP)
161384f9bd12SAlex Elder 		ipa->channel_map[data->channel_id] = endpoint;
161484f9bd12SAlex Elder 	ipa->name_map[name] = endpoint;
161584f9bd12SAlex Elder 
161684f9bd12SAlex Elder 	endpoint->ipa = ipa;
161784f9bd12SAlex Elder 	endpoint->ee_id = data->ee_id;
161884f9bd12SAlex Elder 	endpoint->seq_type = data->endpoint.seq_type;
161984f9bd12SAlex Elder 	endpoint->channel_id = data->channel_id;
162084f9bd12SAlex Elder 	endpoint->endpoint_id = data->endpoint_id;
162184f9bd12SAlex Elder 	endpoint->toward_ipa = data->toward_ipa;
162284f9bd12SAlex Elder 	endpoint->data = &data->endpoint.config;
162384f9bd12SAlex Elder 
162484f9bd12SAlex Elder 	ipa->initialized |= BIT(endpoint->endpoint_id);
162584f9bd12SAlex Elder }
162684f9bd12SAlex Elder 
162784f9bd12SAlex Elder void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
162884f9bd12SAlex Elder {
162984f9bd12SAlex Elder 	endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
163084f9bd12SAlex Elder 
163184f9bd12SAlex Elder 	memset(endpoint, 0, sizeof(*endpoint));
163284f9bd12SAlex Elder }
163384f9bd12SAlex Elder 
163484f9bd12SAlex Elder void ipa_endpoint_exit(struct ipa *ipa)
163584f9bd12SAlex Elder {
163684f9bd12SAlex Elder 	u32 initialized = ipa->initialized;
163784f9bd12SAlex Elder 
163884f9bd12SAlex Elder 	while (initialized) {
163984f9bd12SAlex Elder 		u32 endpoint_id = __fls(initialized);
164084f9bd12SAlex Elder 
164184f9bd12SAlex Elder 		initialized ^= BIT(endpoint_id);
164284f9bd12SAlex Elder 
164384f9bd12SAlex Elder 		ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
164484f9bd12SAlex Elder 	}
164584f9bd12SAlex Elder 	memset(ipa->name_map, 0, sizeof(ipa->name_map));
164684f9bd12SAlex Elder 	memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
164784f9bd12SAlex Elder }
164884f9bd12SAlex Elder 
164984f9bd12SAlex Elder /* Returns a bitmask of endpoints that support filtering, or 0 on error */
165084f9bd12SAlex Elder u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
165184f9bd12SAlex Elder 		      const struct ipa_gsi_endpoint_data *data)
165284f9bd12SAlex Elder {
165384f9bd12SAlex Elder 	enum ipa_endpoint_name name;
165484f9bd12SAlex Elder 	u32 filter_map;
165584f9bd12SAlex Elder 
165684f9bd12SAlex Elder 	if (!ipa_endpoint_data_valid(ipa, count, data))
165784f9bd12SAlex Elder 		return 0;	/* Error */
165884f9bd12SAlex Elder 
165984f9bd12SAlex Elder 	ipa->initialized = 0;
166084f9bd12SAlex Elder 
166184f9bd12SAlex Elder 	filter_map = 0;
166284f9bd12SAlex Elder 	for (name = 0; name < count; name++, data++) {
166384f9bd12SAlex Elder 		if (ipa_gsi_endpoint_data_empty(data))
166484f9bd12SAlex Elder 			continue;	/* Skip over empty slots */
166584f9bd12SAlex Elder 
166684f9bd12SAlex Elder 		ipa_endpoint_init_one(ipa, name, data);
166784f9bd12SAlex Elder 
166884f9bd12SAlex Elder 		if (data->endpoint.filter_support)
166984f9bd12SAlex Elder 			filter_map |= BIT(data->endpoint_id);
167084f9bd12SAlex Elder 	}
167184f9bd12SAlex Elder 
167284f9bd12SAlex Elder 	if (!ipa_filter_map_valid(ipa, filter_map))
167384f9bd12SAlex Elder 		goto err_endpoint_exit;
167484f9bd12SAlex Elder 
167584f9bd12SAlex Elder 	return filter_map;	/* Non-zero bitmask */
167684f9bd12SAlex Elder 
167784f9bd12SAlex Elder err_endpoint_exit:
167884f9bd12SAlex Elder 	ipa_endpoint_exit(ipa);
167984f9bd12SAlex Elder 
168084f9bd12SAlex Elder 	return 0;	/* Error */
168184f9bd12SAlex Elder }
1682