xref: /linux/drivers/net/ipa/ipa_endpoint.c (revision 436396f26d502ada54281958db0a9f6fc12ff256)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2019-2023 Linaro Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 #include <linux/bitfield.h>
11 #include <linux/if_rmnet.h>
12 #include <linux/dma-direction.h>
13 
14 #include "gsi.h"
15 #include "gsi_trans.h"
16 #include "ipa.h"
17 #include "ipa_data.h"
18 #include "ipa_endpoint.h"
19 #include "ipa_cmd.h"
20 #include "ipa_mem.h"
21 #include "ipa_modem.h"
22 #include "ipa_table.h"
23 #include "ipa_gsi.h"
24 #include "ipa_power.h"
25 
26 /* Hardware is told about receive buffers once a "batch" has been queued */
27 #define IPA_REPLENISH_BATCH	16		/* Must be non-zero */
28 
29 /* The amount of RX buffer space consumed by standard skb overhead */
30 #define IPA_RX_BUFFER_OVERHEAD	(PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
31 
32 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
33 #define IPA_ENDPOINT_QMAP_METADATA_MASK		0x000000ff /* host byte order */
34 
35 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX	3
36 
37 /** enum ipa_status_opcode - IPA status opcode field hardware values */
38 enum ipa_status_opcode {				/* *Not* a bitmask */
39 	IPA_STATUS_OPCODE_PACKET		= 1,
40 	IPA_STATUS_OPCODE_NEW_RULE_PACKET	= 2,
41 	IPA_STATUS_OPCODE_DROPPED_PACKET	= 4,
42 	IPA_STATUS_OPCODE_SUSPENDED_PACKET	= 8,
43 	IPA_STATUS_OPCODE_LOG			= 16,
44 	IPA_STATUS_OPCODE_DCMP			= 32,
45 	IPA_STATUS_OPCODE_PACKET_2ND_PASS	= 64,
46 };
47 
48 /** enum ipa_status_exception - IPA status exception field hardware values */
49 enum ipa_status_exception {				/* *Not* a bitmask */
50 	/* 0 means no exception */
51 	IPA_STATUS_EXCEPTION_DEAGGR		= 1,
52 	IPA_STATUS_EXCEPTION_IPTYPE		= 4,
53 	IPA_STATUS_EXCEPTION_PACKET_LENGTH	= 8,
54 	IPA_STATUS_EXCEPTION_FRAG_RULE_MISS	= 16,
55 	IPA_STATUS_EXCEPTION_SW_FILTER		= 32,
56 	IPA_STATUS_EXCEPTION_NAT		= 64,		/* IPv4 */
57 	IPA_STATUS_EXCEPTION_IPV6_CONN_TRACK	= 64,		/* IPv6 */
58 	IPA_STATUS_EXCEPTION_UC			= 128,
59 	IPA_STATUS_EXCEPTION_INVALID_ENDPOINT	= 129,
60 	IPA_STATUS_EXCEPTION_HEADER_INSERT	= 136,
61 	IPA_STATUS_EXCEPTION_CHEKCSUM		= 229,
62 };
63 
64 /** enum ipa_status_mask - IPA status mask field bitmask hardware values */
65 enum ipa_status_mask {
66 	IPA_STATUS_MASK_FRAG_PROCESS		= BIT(0),
67 	IPA_STATUS_MASK_FILT_PROCESS		= BIT(1),
68 	IPA_STATUS_MASK_NAT_PROCESS		= BIT(2),
69 	IPA_STATUS_MASK_ROUTE_PROCESS		= BIT(3),
70 	IPA_STATUS_MASK_TAG_VALID		= BIT(4),
71 	IPA_STATUS_MASK_FRAGMENT		= BIT(5),
72 	IPA_STATUS_MASK_FIRST_FRAGMENT		= BIT(6),
73 	IPA_STATUS_MASK_V4			= BIT(7),
74 	IPA_STATUS_MASK_CKSUM_PROCESS		= BIT(8),
75 	IPA_STATUS_MASK_AGGR_PROCESS		= BIT(9),
76 	IPA_STATUS_MASK_DEST_EOT		= BIT(10),
77 	IPA_STATUS_MASK_DEAGGR_PROCESS		= BIT(11),
78 	IPA_STATUS_MASK_DEAGG_FIRST		= BIT(12),
79 	IPA_STATUS_MASK_SRC_EOT			= BIT(13),
80 	IPA_STATUS_MASK_PREV_EOT		= BIT(14),
81 	IPA_STATUS_MASK_BYTE_LIMIT		= BIT(15),
82 };
83 
84 /* Special IPA filter/router rule field value indicating "rule miss" */
85 #define IPA_STATUS_RULE_MISS	0x3ff	/* 10-bit filter/router rule fields */
86 
87 /** The IPA status nat_type field uses enum ipa_nat_type hardware values */
88 
89 /* enum ipa_status_field_id - IPA packet status structure field identifiers */
90 enum ipa_status_field_id {
91 	STATUS_OPCODE,			/* enum ipa_status_opcode */
92 	STATUS_EXCEPTION,		/* enum ipa_status_exception */
93 	STATUS_MASK,			/* enum ipa_status_mask (bitmask) */
94 	STATUS_LENGTH,
95 	STATUS_SRC_ENDPOINT,
96 	STATUS_DST_ENDPOINT,
97 	STATUS_METADATA,
98 	STATUS_FILTER_LOCAL,		/* Boolean */
99 	STATUS_FILTER_HASH,		/* Boolean */
100 	STATUS_FILTER_GLOBAL,		/* Boolean */
101 	STATUS_FILTER_RETAIN,		/* Boolean */
102 	STATUS_FILTER_RULE_INDEX,
103 	STATUS_ROUTER_LOCAL,		/* Boolean */
104 	STATUS_ROUTER_HASH,		/* Boolean */
105 	STATUS_UCP,			/* Boolean */
106 	STATUS_ROUTER_TABLE,
107 	STATUS_ROUTER_RULE_INDEX,
108 	STATUS_NAT_HIT,			/* Boolean */
109 	STATUS_NAT_INDEX,
110 	STATUS_NAT_TYPE,		/* enum ipa_nat_type */
111 	STATUS_TAG_LOW32,		/* Low-order 32 bits of 48-bit tag */
112 	STATUS_TAG_HIGH16,		/* High-order 16 bits of 48-bit tag */
113 	STATUS_SEQUENCE,
114 	STATUS_TIME_OF_DAY,
115 	STATUS_HEADER_LOCAL,		/* Boolean */
116 	STATUS_HEADER_OFFSET,
117 	STATUS_FRAG_HIT,		/* Boolean */
118 	STATUS_FRAG_RULE_INDEX,
119 };
120 
121 /* Size in bytes of an IPA packet status structure */
122 #define IPA_STATUS_SIZE			sizeof(__le32[4])
123 
124 /* IPA status structure decoder; looks up field values for a structure */
125 static u32 ipa_status_extract(struct ipa *ipa, const void *data,
126 			      enum ipa_status_field_id field)
127 {
128 	enum ipa_version version = ipa->version;
129 	const __le32 *word = data;
130 
131 	switch (field) {
132 	case STATUS_OPCODE:
133 		return le32_get_bits(word[0], GENMASK(7, 0));
134 	case STATUS_EXCEPTION:
135 		return le32_get_bits(word[0], GENMASK(15, 8));
136 	case STATUS_MASK:
137 		return le32_get_bits(word[0], GENMASK(31, 16));
138 	case STATUS_LENGTH:
139 		return le32_get_bits(word[1], GENMASK(15, 0));
140 	case STATUS_SRC_ENDPOINT:
141 		if (version < IPA_VERSION_5_0)
142 			return le32_get_bits(word[1], GENMASK(20, 16));
143 		return le32_get_bits(word[1], GENMASK(23, 16));
144 	/* Status word 1, bits 21-23 are reserved (not IPA v5.0+) */
145 	/* Status word 1, bits 24-26 are reserved (IPA v5.0+) */
146 	case STATUS_DST_ENDPOINT:
147 		if (version < IPA_VERSION_5_0)
148 			return le32_get_bits(word[1], GENMASK(28, 24));
149 		return le32_get_bits(word[7], GENMASK(23, 16));
150 	/* Status word 1, bits 29-31 are reserved */
151 	case STATUS_METADATA:
152 		return le32_to_cpu(word[2]);
153 	case STATUS_FILTER_LOCAL:
154 		return le32_get_bits(word[3], GENMASK(0, 0));
155 	case STATUS_FILTER_HASH:
156 		return le32_get_bits(word[3], GENMASK(1, 1));
157 	case STATUS_FILTER_GLOBAL:
158 		return le32_get_bits(word[3], GENMASK(2, 2));
159 	case STATUS_FILTER_RETAIN:
160 		return le32_get_bits(word[3], GENMASK(3, 3));
161 	case STATUS_FILTER_RULE_INDEX:
162 		return le32_get_bits(word[3], GENMASK(13, 4));
163 	/* ROUTER_TABLE is in word 3, bits 14-21 (IPA v5.0+) */
164 	case STATUS_ROUTER_LOCAL:
165 		if (version < IPA_VERSION_5_0)
166 			return le32_get_bits(word[3], GENMASK(14, 14));
167 		return le32_get_bits(word[1], GENMASK(27, 27));
168 	case STATUS_ROUTER_HASH:
169 		if (version < IPA_VERSION_5_0)
170 			return le32_get_bits(word[3], GENMASK(15, 15));
171 		return le32_get_bits(word[1], GENMASK(28, 28));
172 	case STATUS_UCP:
173 		if (version < IPA_VERSION_5_0)
174 			return le32_get_bits(word[3], GENMASK(16, 16));
175 		return le32_get_bits(word[7], GENMASK(31, 31));
176 	case STATUS_ROUTER_TABLE:
177 		if (version < IPA_VERSION_5_0)
178 			return le32_get_bits(word[3], GENMASK(21, 17));
179 		return le32_get_bits(word[3], GENMASK(21, 14));
180 	case STATUS_ROUTER_RULE_INDEX:
181 		return le32_get_bits(word[3], GENMASK(31, 22));
182 	case STATUS_NAT_HIT:
183 		return le32_get_bits(word[4], GENMASK(0, 0));
184 	case STATUS_NAT_INDEX:
185 		return le32_get_bits(word[4], GENMASK(13, 1));
186 	case STATUS_NAT_TYPE:
187 		return le32_get_bits(word[4], GENMASK(15, 14));
188 	case STATUS_TAG_LOW32:
189 		return le32_get_bits(word[4], GENMASK(31, 16)) |
190 			(le32_get_bits(word[5], GENMASK(15, 0)) << 16);
191 	case STATUS_TAG_HIGH16:
192 		return le32_get_bits(word[5], GENMASK(31, 16));
193 	case STATUS_SEQUENCE:
194 		return le32_get_bits(word[6], GENMASK(7, 0));
195 	case STATUS_TIME_OF_DAY:
196 		return le32_get_bits(word[6], GENMASK(31, 8));
197 	case STATUS_HEADER_LOCAL:
198 		return le32_get_bits(word[7], GENMASK(0, 0));
199 	case STATUS_HEADER_OFFSET:
200 		return le32_get_bits(word[7], GENMASK(10, 1));
201 	case STATUS_FRAG_HIT:
202 		return le32_get_bits(word[7], GENMASK(11, 11));
203 	case STATUS_FRAG_RULE_INDEX:
204 		return le32_get_bits(word[7], GENMASK(15, 12));
205 	/* Status word 7, bits 16-30 are reserved */
206 	/* Status word 7, bit 31 is reserved (not IPA v5.0+) */
207 	default:
208 		WARN(true, "%s: bad field_id %u\n", __func__, field);
209 		return 0;
210 	}
211 }
212 
213 /* Compute the aggregation size value to use for a given buffer size */
214 static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
215 {
216 	/* A hard aggregation limit will not be crossed; aggregation closes
217 	 * if saving incoming data would cross the hard byte limit boundary.
218 	 *
219 	 * With a soft limit, aggregation closes *after* the size boundary
220 	 * has been crossed.  In that case the limit must leave enough space
221 	 * after that limit to receive a full MTU of data plus overhead.
222 	 */
223 	if (!aggr_hard_limit)
224 		rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
225 
226 	/* The byte limit is encoded as a number of kilobytes */
227 
228 	return rx_buffer_size / SZ_1K;
229 }
230 
231 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
232 			    const struct ipa_gsi_endpoint_data *all_data,
233 			    const struct ipa_gsi_endpoint_data *data)
234 {
235 	const struct ipa_gsi_endpoint_data *other_data;
236 	struct device *dev = &ipa->pdev->dev;
237 	enum ipa_endpoint_name other_name;
238 
239 	if (ipa_gsi_endpoint_data_empty(data))
240 		return true;
241 
242 	if (!data->toward_ipa) {
243 		const struct ipa_endpoint_rx *rx_config;
244 		const struct ipa_reg *reg;
245 		u32 buffer_size;
246 		u32 aggr_size;
247 		u32 limit;
248 
249 		if (data->endpoint.filter_support) {
250 			dev_err(dev, "filtering not supported for "
251 					"RX endpoint %u\n",
252 				data->endpoint_id);
253 			return false;
254 		}
255 
256 		/* Nothing more to check for non-AP RX */
257 		if (data->ee_id != GSI_EE_AP)
258 			return true;
259 
260 		rx_config = &data->endpoint.config.rx;
261 
262 		/* The buffer size must hold an MTU plus overhead */
263 		buffer_size = rx_config->buffer_size;
264 		limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
265 		if (buffer_size < limit) {
266 			dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
267 				data->endpoint_id, buffer_size, limit);
268 			return false;
269 		}
270 
271 		if (!data->endpoint.config.aggregation) {
272 			bool result = true;
273 
274 			/* No aggregation; check for bogus aggregation data */
275 			if (rx_config->aggr_time_limit) {
276 				dev_err(dev,
277 					"time limit with no aggregation for RX endpoint %u\n",
278 					data->endpoint_id);
279 				result = false;
280 			}
281 
282 			if (rx_config->aggr_hard_limit) {
283 				dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
284 					data->endpoint_id);
285 				result = false;
286 			}
287 
288 			if (rx_config->aggr_close_eof) {
289 				dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
290 					data->endpoint_id);
291 				result = false;
292 			}
293 
294 			return result;	/* Nothing more to check */
295 		}
296 
297 		/* For an endpoint supporting receive aggregation, the byte
298 		 * limit defines the point at which aggregation closes.  This
299 		 * check ensures the receive buffer size doesn't result in a
300 		 * limit that exceeds what's representable in the aggregation
301 		 * byte limit field.
302 		 */
303 		aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
304 					     rx_config->aggr_hard_limit);
305 		reg = ipa_reg(ipa, ENDP_INIT_AGGR);
306 
307 		limit = ipa_reg_field_max(reg, BYTE_LIMIT);
308 		if (aggr_size > limit) {
309 			dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
310 				data->endpoint_id, aggr_size, limit);
311 
312 			return false;
313 		}
314 
315 		return true;	/* Nothing more to check for RX */
316 	}
317 
318 	/* Starting with IPA v4.5 sequencer replication is obsolete */
319 	if (ipa->version >= IPA_VERSION_4_5) {
320 		if (data->endpoint.config.tx.seq_rep_type) {
321 			dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n",
322 				data->endpoint_id);
323 			return false;
324 		}
325 	}
326 
327 	if (data->endpoint.config.status_enable) {
328 		other_name = data->endpoint.config.tx.status_endpoint;
329 		if (other_name >= count) {
330 			dev_err(dev, "status endpoint name %u out of range "
331 					"for endpoint %u\n",
332 				other_name, data->endpoint_id);
333 			return false;
334 		}
335 
336 		/* Status endpoint must be defined... */
337 		other_data = &all_data[other_name];
338 		if (ipa_gsi_endpoint_data_empty(other_data)) {
339 			dev_err(dev, "DMA endpoint name %u undefined "
340 					"for endpoint %u\n",
341 				other_name, data->endpoint_id);
342 			return false;
343 		}
344 
345 		/* ...and has to be an RX endpoint... */
346 		if (other_data->toward_ipa) {
347 			dev_err(dev,
348 				"status endpoint for endpoint %u not RX\n",
349 				data->endpoint_id);
350 			return false;
351 		}
352 
353 		/* ...and if it's to be an AP endpoint... */
354 		if (other_data->ee_id == GSI_EE_AP) {
355 			/* ...make sure it has status enabled. */
356 			if (!other_data->endpoint.config.status_enable) {
357 				dev_err(dev,
358 					"status not enabled for endpoint %u\n",
359 					other_data->endpoint_id);
360 				return false;
361 			}
362 		}
363 	}
364 
365 	if (data->endpoint.config.dma_mode) {
366 		other_name = data->endpoint.config.dma_endpoint;
367 		if (other_name >= count) {
368 			dev_err(dev, "DMA endpoint name %u out of range "
369 					"for endpoint %u\n",
370 				other_name, data->endpoint_id);
371 			return false;
372 		}
373 
374 		other_data = &all_data[other_name];
375 		if (ipa_gsi_endpoint_data_empty(other_data)) {
376 			dev_err(dev, "DMA endpoint name %u undefined "
377 					"for endpoint %u\n",
378 				other_name, data->endpoint_id);
379 			return false;
380 		}
381 	}
382 
383 	return true;
384 }
385 
386 /* Validate endpoint configuration data.  Return max defined endpoint ID */
387 static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
388 			    const struct ipa_gsi_endpoint_data *data)
389 {
390 	const struct ipa_gsi_endpoint_data *dp = data;
391 	struct device *dev = &ipa->pdev->dev;
392 	enum ipa_endpoint_name name;
393 	u32 max;
394 
395 	if (count > IPA_ENDPOINT_COUNT) {
396 		dev_err(dev, "too many endpoints specified (%u > %u)\n",
397 			count, IPA_ENDPOINT_COUNT);
398 		return 0;
399 	}
400 
401 	/* Make sure needed endpoints have defined data */
402 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
403 		dev_err(dev, "command TX endpoint not defined\n");
404 		return 0;
405 	}
406 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
407 		dev_err(dev, "LAN RX endpoint not defined\n");
408 		return 0;
409 	}
410 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
411 		dev_err(dev, "AP->modem TX endpoint not defined\n");
412 		return 0;
413 	}
414 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
415 		dev_err(dev, "AP<-modem RX endpoint not defined\n");
416 		return 0;
417 	}
418 
419 	max = 0;
420 	for (name = 0; name < count; name++, dp++) {
421 		if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
422 			return 0;
423 		max = max_t(u32, max, dp->endpoint_id);
424 	}
425 
426 	return max;
427 }
428 
429 /* Allocate a transaction to use on a non-command endpoint */
430 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
431 						  u32 tre_count)
432 {
433 	struct gsi *gsi = &endpoint->ipa->gsi;
434 	u32 channel_id = endpoint->channel_id;
435 	enum dma_data_direction direction;
436 
437 	direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
438 
439 	return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
440 }
441 
442 /* suspend_delay represents suspend for RX, delay for TX endpoints.
443  * Note that suspend is not supported starting with IPA v4.0, and
444  * delay mode should not be used starting with IPA v4.2.
445  */
446 static bool
447 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
448 {
449 	struct ipa *ipa = endpoint->ipa;
450 	const struct ipa_reg *reg;
451 	u32 field_id;
452 	u32 offset;
453 	bool state;
454 	u32 mask;
455 	u32 val;
456 
457 	if (endpoint->toward_ipa)
458 		WARN_ON(ipa->version >= IPA_VERSION_4_2);
459 	else
460 		WARN_ON(ipa->version >= IPA_VERSION_4_0);
461 
462 	reg = ipa_reg(ipa, ENDP_INIT_CTRL);
463 	offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
464 	val = ioread32(ipa->reg_virt + offset);
465 
466 	field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
467 	mask = ipa_reg_bit(reg, field_id);
468 
469 	state = !!(val & mask);
470 
471 	/* Don't bother if it's already in the requested state */
472 	if (suspend_delay != state) {
473 		val ^= mask;
474 		iowrite32(val, ipa->reg_virt + offset);
475 	}
476 
477 	return state;
478 }
479 
480 /* We don't care what the previous state was for delay mode */
481 static void
482 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
483 {
484 	/* Delay mode should not be used for IPA v4.2+ */
485 	WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
486 	WARN_ON(!endpoint->toward_ipa);
487 
488 	(void)ipa_endpoint_init_ctrl(endpoint, enable);
489 }
490 
491 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
492 {
493 	u32 endpoint_id = endpoint->endpoint_id;
494 	struct ipa *ipa = endpoint->ipa;
495 	u32 unit = endpoint_id / 32;
496 	const struct ipa_reg *reg;
497 	u32 val;
498 
499 	WARN_ON(!test_bit(endpoint_id, ipa->available));
500 
501 	reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
502 	val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit));
503 
504 	return !!(val & BIT(endpoint_id % 32));
505 }
506 
507 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
508 {
509 	u32 endpoint_id = endpoint->endpoint_id;
510 	u32 mask = BIT(endpoint_id % 32);
511 	struct ipa *ipa = endpoint->ipa;
512 	u32 unit = endpoint_id / 32;
513 	const struct ipa_reg *reg;
514 
515 	WARN_ON(!test_bit(endpoint_id, ipa->available));
516 
517 	reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
518 	iowrite32(mask, ipa->reg_virt + ipa_reg_n_offset(reg, unit));
519 }
520 
521 /**
522  * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
523  * @endpoint:	Endpoint on which to emulate a suspend
524  *
525  *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
526  *  with an open aggregation frame.  This is to work around a hardware
527  *  issue in IPA version 3.5.1 where the suspend interrupt will not be
528  *  generated when it should be.
529  */
530 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
531 {
532 	struct ipa *ipa = endpoint->ipa;
533 
534 	if (!endpoint->config.aggregation)
535 		return;
536 
537 	/* Nothing to do if the endpoint doesn't have aggregation open */
538 	if (!ipa_endpoint_aggr_active(endpoint))
539 		return;
540 
541 	/* Force close aggregation */
542 	ipa_endpoint_force_close(endpoint);
543 
544 	ipa_interrupt_simulate_suspend(ipa->interrupt);
545 }
546 
547 /* Returns previous suspend state (true means suspend was enabled) */
548 static bool
549 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
550 {
551 	bool suspended;
552 
553 	if (endpoint->ipa->version >= IPA_VERSION_4_0)
554 		return enable;	/* For IPA v4.0+, no change made */
555 
556 	WARN_ON(endpoint->toward_ipa);
557 
558 	suspended = ipa_endpoint_init_ctrl(endpoint, enable);
559 
560 	/* A client suspended with an open aggregation frame will not
561 	 * generate a SUSPEND IPA interrupt.  If enabling suspend, have
562 	 * ipa_endpoint_suspend_aggr() handle this.
563 	 */
564 	if (enable && !suspended)
565 		ipa_endpoint_suspend_aggr(endpoint);
566 
567 	return suspended;
568 }
569 
570 /* Put all modem RX endpoints into suspend mode, and stop transmission
571  * on all modem TX endpoints.  Prior to IPA v4.2, endpoint DELAY mode is
572  * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
573  * control instead.
574  */
575 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
576 {
577 	u32 endpoint_id = 0;
578 
579 	while (endpoint_id < ipa->endpoint_count) {
580 		struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
581 
582 		if (endpoint->ee_id != GSI_EE_MODEM)
583 			continue;
584 
585 		if (!endpoint->toward_ipa)
586 			(void)ipa_endpoint_program_suspend(endpoint, enable);
587 		else if (ipa->version < IPA_VERSION_4_2)
588 			ipa_endpoint_program_delay(endpoint, enable);
589 		else
590 			gsi_modem_channel_flow_control(&ipa->gsi,
591 						       endpoint->channel_id,
592 						       enable);
593 	}
594 }
595 
596 /* Reset all modem endpoints to use the default exception endpoint */
597 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
598 {
599 	struct gsi_trans *trans;
600 	u32 endpoint_id;
601 	u32 count;
602 
603 	/* We need one command per modem TX endpoint, plus the commands
604 	 * that clear the pipeline.
605 	 */
606 	count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
607 	trans = ipa_cmd_trans_alloc(ipa, count);
608 	if (!trans) {
609 		dev_err(&ipa->pdev->dev,
610 			"no transaction to reset modem exception endpoints\n");
611 		return -EBUSY;
612 	}
613 
614 	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
615 		struct ipa_endpoint *endpoint;
616 		const struct ipa_reg *reg;
617 		u32 offset;
618 
619 		/* We only reset modem TX endpoints */
620 		endpoint = &ipa->endpoint[endpoint_id];
621 		if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
622 			continue;
623 
624 		reg = ipa_reg(ipa, ENDP_STATUS);
625 		offset = ipa_reg_n_offset(reg, endpoint_id);
626 
627 		/* Value written is 0, and all bits are updated.  That
628 		 * means status is disabled on the endpoint, and as a
629 		 * result all other fields in the register are ignored.
630 		 */
631 		ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
632 	}
633 
634 	ipa_cmd_pipeline_clear_add(trans);
635 
636 	gsi_trans_commit_wait(trans);
637 
638 	ipa_cmd_pipeline_clear_wait(ipa);
639 
640 	return 0;
641 }
642 
643 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
644 {
645 	u32 endpoint_id = endpoint->endpoint_id;
646 	struct ipa *ipa = endpoint->ipa;
647 	enum ipa_cs_offload_en enabled;
648 	const struct ipa_reg *reg;
649 	u32 val = 0;
650 
651 	reg = ipa_reg(ipa, ENDP_INIT_CFG);
652 	/* FRAG_OFFLOAD_EN is 0 */
653 	if (endpoint->config.checksum) {
654 		enum ipa_version version = ipa->version;
655 
656 		if (endpoint->toward_ipa) {
657 			u32 off;
658 
659 			/* Checksum header offset is in 4-byte units */
660 			off = sizeof(struct rmnet_map_header) / sizeof(u32);
661 			val |= ipa_reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
662 
663 			enabled = version < IPA_VERSION_4_5
664 					? IPA_CS_OFFLOAD_UL
665 					: IPA_CS_OFFLOAD_INLINE;
666 		} else {
667 			enabled = version < IPA_VERSION_4_5
668 					? IPA_CS_OFFLOAD_DL
669 					: IPA_CS_OFFLOAD_INLINE;
670 		}
671 	} else {
672 		enabled = IPA_CS_OFFLOAD_NONE;
673 	}
674 	val |= ipa_reg_encode(reg, CS_OFFLOAD_EN, enabled);
675 	/* CS_GEN_QMB_MASTER_SEL is 0 */
676 
677 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
678 }
679 
680 static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
681 {
682 	u32 endpoint_id = endpoint->endpoint_id;
683 	struct ipa *ipa = endpoint->ipa;
684 	const struct ipa_reg *reg;
685 	u32 val;
686 
687 	if (!endpoint->toward_ipa)
688 		return;
689 
690 	reg = ipa_reg(ipa, ENDP_INIT_NAT);
691 	val = ipa_reg_encode(reg, NAT_EN, IPA_NAT_TYPE_BYPASS);
692 
693 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
694 }
695 
696 static u32
697 ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
698 {
699 	u32 header_size = sizeof(struct rmnet_map_header);
700 
701 	/* Without checksum offload, we just have the MAP header */
702 	if (!endpoint->config.checksum)
703 		return header_size;
704 
705 	if (version < IPA_VERSION_4_5) {
706 		/* Checksum header inserted for AP TX endpoints only */
707 		if (endpoint->toward_ipa)
708 			header_size += sizeof(struct rmnet_map_ul_csum_header);
709 	} else {
710 		/* Checksum header is used in both directions */
711 		header_size += sizeof(struct rmnet_map_v5_csum_header);
712 	}
713 
714 	return header_size;
715 }
716 
717 /* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
718 static u32 ipa_header_size_encode(enum ipa_version version,
719 				  const struct ipa_reg *reg, u32 header_size)
720 {
721 	u32 field_max = ipa_reg_field_max(reg, HDR_LEN);
722 	u32 val;
723 
724 	/* We know field_max can be used as a mask (2^n - 1) */
725 	val = ipa_reg_encode(reg, HDR_LEN, header_size & field_max);
726 	if (version < IPA_VERSION_4_5) {
727 		WARN_ON(header_size > field_max);
728 		return val;
729 	}
730 
731 	/* IPA v4.5 adds a few more most-significant bits */
732 	header_size >>= hweight32(field_max);
733 	WARN_ON(header_size > ipa_reg_field_max(reg, HDR_LEN_MSB));
734 	val |= ipa_reg_encode(reg, HDR_LEN_MSB, header_size);
735 
736 	return val;
737 }
738 
739 /* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
740 static u32 ipa_metadata_offset_encode(enum ipa_version version,
741 				      const struct ipa_reg *reg, u32 offset)
742 {
743 	u32 field_max = ipa_reg_field_max(reg, HDR_OFST_METADATA);
744 	u32 val;
745 
746 	/* We know field_max can be used as a mask (2^n - 1) */
747 	val = ipa_reg_encode(reg, HDR_OFST_METADATA, offset);
748 	if (version < IPA_VERSION_4_5) {
749 		WARN_ON(offset > field_max);
750 		return val;
751 	}
752 
753 	/* IPA v4.5 adds a few more most-significant bits */
754 	offset >>= hweight32(field_max);
755 	WARN_ON(offset > ipa_reg_field_max(reg, HDR_OFST_METADATA_MSB));
756 	val |= ipa_reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
757 
758 	return val;
759 }
760 
761 /**
762  * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
763  * @endpoint:	Endpoint pointer
764  *
765  * We program QMAP endpoints so each packet received is preceded by a QMAP
766  * header structure.  The QMAP header contains a 1-byte mux_id and 2-byte
767  * packet size field, and we have the IPA hardware populate both for each
768  * received packet.  The header is configured (in the HDR_EXT register)
769  * to use big endian format.
770  *
771  * The packet size is written into the QMAP header's pkt_len field.  That
772  * location is defined here using the HDR_OFST_PKT_SIZE field.
773  *
774  * The mux_id comes from a 4-byte metadata value supplied with each packet
775  * by the modem.  It is *not* a QMAP header, but it does contain the mux_id
776  * value that we want, in its low-order byte.  A bitmask defined in the
777  * endpoint's METADATA_MASK register defines which byte within the modem
778  * metadata contains the mux_id.  And the OFST_METADATA field programmed
779  * here indicates where the extracted byte should be placed within the QMAP
780  * header.
781  */
782 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
783 {
784 	u32 endpoint_id = endpoint->endpoint_id;
785 	struct ipa *ipa = endpoint->ipa;
786 	const struct ipa_reg *reg;
787 	u32 val = 0;
788 
789 	reg = ipa_reg(ipa, ENDP_INIT_HDR);
790 	if (endpoint->config.qmap) {
791 		enum ipa_version version = ipa->version;
792 		size_t header_size;
793 
794 		header_size = ipa_qmap_header_size(version, endpoint);
795 		val = ipa_header_size_encode(version, reg, header_size);
796 
797 		/* Define how to fill fields in a received QMAP header */
798 		if (!endpoint->toward_ipa) {
799 			u32 off;     /* Field offset within header */
800 
801 			/* Where IPA will write the metadata value */
802 			off = offsetof(struct rmnet_map_header, mux_id);
803 			val |= ipa_metadata_offset_encode(version, reg, off);
804 
805 			/* Where IPA will write the length */
806 			off = offsetof(struct rmnet_map_header, pkt_len);
807 			/* Upper bits are stored in HDR_EXT with IPA v4.5 */
808 			if (version >= IPA_VERSION_4_5)
809 				off &= ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
810 
811 			val |= ipa_reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
812 			val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE, off);
813 		}
814 		/* For QMAP TX, metadata offset is 0 (modem assumes this) */
815 		val |= ipa_reg_bit(reg, HDR_OFST_METADATA_VALID);
816 
817 		/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
818 		/* HDR_A5_MUX is 0 */
819 		/* HDR_LEN_INC_DEAGG_HDR is 0 */
820 		/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
821 	}
822 
823 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
824 }
825 
826 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
827 {
828 	u32 pad_align = endpoint->config.rx.pad_align;
829 	u32 endpoint_id = endpoint->endpoint_id;
830 	struct ipa *ipa = endpoint->ipa;
831 	const struct ipa_reg *reg;
832 	u32 val = 0;
833 
834 	reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
835 	if (endpoint->config.qmap) {
836 		/* We have a header, so we must specify its endianness */
837 		val |= ipa_reg_bit(reg, HDR_ENDIANNESS);	/* big endian */
838 
839 		/* A QMAP header contains a 6 bit pad field at offset 0.
840 		 * The RMNet driver assumes this field is meaningful in
841 		 * packets it receives, and assumes the header's payload
842 		 * length includes that padding.  The RMNet driver does
843 		 * *not* pad packets it sends, however, so the pad field
844 		 * (although 0) should be ignored.
845 		 */
846 		if (!endpoint->toward_ipa) {
847 			val |= ipa_reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
848 			/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
849 			val |= ipa_reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
850 			/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
851 		}
852 	}
853 
854 	/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
855 	if (!endpoint->toward_ipa)
856 		val |= ipa_reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
857 
858 	/* IPA v4.5 adds some most-significant bits to a few fields,
859 	 * two of which are defined in the HDR (not HDR_EXT) register.
860 	 */
861 	if (ipa->version >= IPA_VERSION_4_5) {
862 		/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
863 		if (endpoint->config.qmap && !endpoint->toward_ipa) {
864 			u32 mask = ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
865 			u32 off;     /* Field offset within header */
866 
867 			off = offsetof(struct rmnet_map_header, pkt_len);
868 			/* Low bits are in the ENDP_INIT_HDR register */
869 			off >>= hweight32(mask);
870 			val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
871 			/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
872 		}
873 	}
874 
875 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
876 }
877 
878 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
879 {
880 	u32 endpoint_id = endpoint->endpoint_id;
881 	struct ipa *ipa = endpoint->ipa;
882 	const struct ipa_reg *reg;
883 	u32 val = 0;
884 	u32 offset;
885 
886 	if (endpoint->toward_ipa)
887 		return;		/* Register not valid for TX endpoints */
888 
889 	reg = ipa_reg(ipa,  ENDP_INIT_HDR_METADATA_MASK);
890 	offset = ipa_reg_n_offset(reg, endpoint_id);
891 
892 	/* Note that HDR_ENDIANNESS indicates big endian header fields */
893 	if (endpoint->config.qmap)
894 		val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
895 
896 	iowrite32(val, ipa->reg_virt + offset);
897 }
898 
899 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
900 {
901 	struct ipa *ipa = endpoint->ipa;
902 	const struct ipa_reg *reg;
903 	u32 offset;
904 	u32 val;
905 
906 	if (!endpoint->toward_ipa)
907 		return;		/* Register not valid for RX endpoints */
908 
909 	reg = ipa_reg(ipa, ENDP_INIT_MODE);
910 	if (endpoint->config.dma_mode) {
911 		enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
912 		u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
913 
914 		val = ipa_reg_encode(reg, ENDP_MODE, IPA_DMA);
915 		val |= ipa_reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
916 	} else {
917 		val = ipa_reg_encode(reg, ENDP_MODE, IPA_BASIC);
918 	}
919 	/* All other bits unspecified (and 0) */
920 
921 	offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
922 	iowrite32(val, ipa->reg_virt + offset);
923 }
924 
925 /* For IPA v4.5+, times are expressed using Qtime.  A time is represented
926  * at one of several available granularities, which are configured in
927  * ipa_qtime_config().  Three (or, starting with IPA v5.0, four) pulse
928  * generators are set up with different "tick" periods.  A Qtime value
929  * encodes a tick count along with an indication of a pulse generator
930  * (which has a fixed tick period).  Two pulse generators are always
931  * available to the AP; a third is available starting with IPA v5.0.
932  * This function determines which pulse generator most accurately
933  * represents the time period provided, and returns the tick count to
934  * use to represent that time.
935  */
936 static u32
937 ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select)
938 {
939 	u32 which = 0;
940 	u32 ticks;
941 
942 	/* Pulse generator 0 has 100 microsecond granularity */
943 	ticks = DIV_ROUND_CLOSEST(microseconds, 100);
944 	if (ticks <= max)
945 		goto out;
946 
947 	/* Pulse generator 1 has millisecond granularity */
948 	which = 1;
949 	ticks = DIV_ROUND_CLOSEST(microseconds, 1000);
950 	if (ticks <= max)
951 		goto out;
952 
953 	if (ipa->version >= IPA_VERSION_5_0) {
954 		/* Pulse generator 2 has 10 millisecond granularity */
955 		which = 2;
956 		ticks = DIV_ROUND_CLOSEST(microseconds, 100);
957 	}
958 	WARN_ON(ticks > max);
959 out:
960 	*select = which;
961 
962 	return ticks;
963 }
964 
965 /* Encode the aggregation timer limit (microseconds) based on IPA version */
966 static u32 aggr_time_limit_encode(struct ipa *ipa, const struct ipa_reg *reg,
967 				  u32 microseconds)
968 {
969 	u32 ticks;
970 	u32 max;
971 
972 	if (!microseconds)
973 		return 0;	/* Nothing to compute if time limit is 0 */
974 
975 	max = ipa_reg_field_max(reg, TIME_LIMIT);
976 	if (ipa->version >= IPA_VERSION_4_5) {
977 		u32 select;
978 
979 		ticks = ipa_qtime_val(ipa, microseconds, max, &select);
980 
981 		return ipa_reg_encode(reg, AGGR_GRAN_SEL, select) |
982 		       ipa_reg_encode(reg, TIME_LIMIT, ticks);
983 	}
984 
985 	/* We program aggregation granularity in ipa_hardware_config() */
986 	ticks = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
987 	WARN(ticks > max, "aggr_time_limit too large (%u > %u usec)\n",
988 	     microseconds, max * IPA_AGGR_GRANULARITY);
989 
990 	return ipa_reg_encode(reg, TIME_LIMIT, ticks);
991 }
992 
993 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
994 {
995 	u32 endpoint_id = endpoint->endpoint_id;
996 	struct ipa *ipa = endpoint->ipa;
997 	const struct ipa_reg *reg;
998 	u32 val = 0;
999 
1000 	reg = ipa_reg(ipa, ENDP_INIT_AGGR);
1001 	if (endpoint->config.aggregation) {
1002 		if (!endpoint->toward_ipa) {
1003 			const struct ipa_endpoint_rx *rx_config;
1004 			u32 buffer_size;
1005 			u32 limit;
1006 
1007 			rx_config = &endpoint->config.rx;
1008 			val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
1009 			val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
1010 
1011 			buffer_size = rx_config->buffer_size;
1012 			limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
1013 						 rx_config->aggr_hard_limit);
1014 			val |= ipa_reg_encode(reg, BYTE_LIMIT, limit);
1015 
1016 			limit = rx_config->aggr_time_limit;
1017 			val |= aggr_time_limit_encode(ipa, reg, limit);
1018 
1019 			/* AGGR_PKT_LIMIT is 0 (unlimited) */
1020 
1021 			if (rx_config->aggr_close_eof)
1022 				val |= ipa_reg_bit(reg, SW_EOF_ACTIVE);
1023 		} else {
1024 			val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
1025 			val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
1026 			/* other fields ignored */
1027 		}
1028 		/* AGGR_FORCE_CLOSE is 0 */
1029 		/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
1030 	} else {
1031 		val |= ipa_reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
1032 		/* other fields ignored */
1033 	}
1034 
1035 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1036 }
1037 
1038 /* The head-of-line blocking timer is defined as a tick count.  For
1039  * IPA version 4.5 the tick count is based on the Qtimer, which is
1040  * derived from the 19.2 MHz SoC XO clock.  For older IPA versions
1041  * each tick represents 128 cycles of the IPA core clock.
1042  *
1043  * Return the encoded value representing the timeout period provided
1044  * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
1045  */
1046 static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
1047 				  u32 microseconds)
1048 {
1049 	u32 width;
1050 	u32 scale;
1051 	u64 ticks;
1052 	u64 rate;
1053 	u32 high;
1054 	u32 val;
1055 
1056 	if (!microseconds)
1057 		return 0;	/* Nothing to compute if timer period is 0 */
1058 
1059 	if (ipa->version >= IPA_VERSION_4_5) {
1060 		u32 max = ipa_reg_field_max(reg, TIMER_LIMIT);
1061 		u32 select;
1062 		u32 ticks;
1063 
1064 		ticks = ipa_qtime_val(ipa, microseconds, max, &select);
1065 
1066 		return ipa_reg_encode(reg, TIMER_GRAN_SEL, 1) |
1067 		       ipa_reg_encode(reg, TIMER_LIMIT, ticks);
1068 	}
1069 
1070 	/* Use 64 bit arithmetic to avoid overflow */
1071 	rate = ipa_core_clock_rate(ipa);
1072 	ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
1073 
1074 	/* We still need the result to fit into the field */
1075 	WARN_ON(ticks > ipa_reg_field_max(reg, TIMER_BASE_VALUE));
1076 
1077 	/* IPA v3.5.1 through v4.1 just record the tick count */
1078 	if (ipa->version < IPA_VERSION_4_2)
1079 		return ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
1080 
1081 	/* For IPA v4.2, the tick count is represented by base and
1082 	 * scale fields within the 32-bit timer register, where:
1083 	 *     ticks = base << scale;
1084 	 * The best precision is achieved when the base value is as
1085 	 * large as possible.  Find the highest set bit in the tick
1086 	 * count, and extract the number of bits in the base field
1087 	 * such that high bit is included.
1088 	 */
1089 	high = fls(ticks);		/* 1..32 (or warning above) */
1090 	width = hweight32(ipa_reg_fmask(reg, TIMER_BASE_VALUE));
1091 	scale = high > width ? high - width : 0;
1092 	if (scale) {
1093 		/* If we're scaling, round up to get a closer result */
1094 		ticks += 1 << (scale - 1);
1095 		/* High bit was set, so rounding might have affected it */
1096 		if (fls(ticks) != high)
1097 			scale++;
1098 	}
1099 
1100 	val = ipa_reg_encode(reg, TIMER_SCALE, scale);
1101 	val |= ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
1102 
1103 	return val;
1104 }
1105 
1106 /* If microseconds is 0, timeout is immediate */
1107 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
1108 					      u32 microseconds)
1109 {
1110 	u32 endpoint_id = endpoint->endpoint_id;
1111 	struct ipa *ipa = endpoint->ipa;
1112 	const struct ipa_reg *reg;
1113 	u32 val;
1114 
1115 	/* This should only be changed when HOL_BLOCK_EN is disabled */
1116 	reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
1117 	val = hol_block_timer_encode(ipa, reg, microseconds);
1118 
1119 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1120 }
1121 
1122 static void
1123 ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
1124 {
1125 	u32 endpoint_id = endpoint->endpoint_id;
1126 	struct ipa *ipa = endpoint->ipa;
1127 	const struct ipa_reg *reg;
1128 	u32 offset;
1129 	u32 val;
1130 
1131 	reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
1132 	offset = ipa_reg_n_offset(reg, endpoint_id);
1133 	val = enable ? ipa_reg_bit(reg, HOL_BLOCK_EN) : 0;
1134 
1135 	iowrite32(val, ipa->reg_virt + offset);
1136 
1137 	/* When enabling, the register must be written twice for IPA v4.5+ */
1138 	if (enable && ipa->version >= IPA_VERSION_4_5)
1139 		iowrite32(val, ipa->reg_virt + offset);
1140 }
1141 
1142 /* Assumes HOL_BLOCK is in disabled state */
1143 static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
1144 					       u32 microseconds)
1145 {
1146 	ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
1147 	ipa_endpoint_init_hol_block_en(endpoint, true);
1148 }
1149 
1150 static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
1151 {
1152 	ipa_endpoint_init_hol_block_en(endpoint, false);
1153 }
1154 
1155 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
1156 {
1157 	u32 endpoint_id = 0;
1158 
1159 	while (endpoint_id < ipa->endpoint_count) {
1160 		struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
1161 
1162 		if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
1163 			continue;
1164 
1165 		ipa_endpoint_init_hol_block_disable(endpoint);
1166 		ipa_endpoint_init_hol_block_enable(endpoint, 0);
1167 	}
1168 }
1169 
1170 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
1171 {
1172 	u32 endpoint_id = endpoint->endpoint_id;
1173 	struct ipa *ipa = endpoint->ipa;
1174 	const struct ipa_reg *reg;
1175 	u32 val = 0;
1176 
1177 	if (!endpoint->toward_ipa)
1178 		return;		/* Register not valid for RX endpoints */
1179 
1180 	reg = ipa_reg(ipa, ENDP_INIT_DEAGGR);
1181 	/* DEAGGR_HDR_LEN is 0 */
1182 	/* PACKET_OFFSET_VALID is 0 */
1183 	/* PACKET_OFFSET_LOCATION is ignored (not valid) */
1184 	/* MAX_PACKET_LEN is 0 (not enforced) */
1185 
1186 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1187 }
1188 
1189 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
1190 {
1191 	u32 resource_group = endpoint->config.resource_group;
1192 	u32 endpoint_id = endpoint->endpoint_id;
1193 	struct ipa *ipa = endpoint->ipa;
1194 	const struct ipa_reg *reg;
1195 	u32 val;
1196 
1197 	reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
1198 	val = ipa_reg_encode(reg, ENDP_RSRC_GRP, resource_group);
1199 
1200 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1201 }
1202 
1203 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
1204 {
1205 	u32 endpoint_id = endpoint->endpoint_id;
1206 	struct ipa *ipa = endpoint->ipa;
1207 	const struct ipa_reg *reg;
1208 	u32 val;
1209 
1210 	if (!endpoint->toward_ipa)
1211 		return;		/* Register not valid for RX endpoints */
1212 
1213 	reg = ipa_reg(ipa, ENDP_INIT_SEQ);
1214 
1215 	/* Low-order byte configures primary packet processing */
1216 	val = ipa_reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
1217 
1218 	/* Second byte (if supported) configures replicated packet processing */
1219 	if (ipa->version < IPA_VERSION_4_5)
1220 		val |= ipa_reg_encode(reg, SEQ_REP_TYPE,
1221 				      endpoint->config.tx.seq_rep_type);
1222 
1223 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1224 }
1225 
1226 /**
1227  * ipa_endpoint_skb_tx() - Transmit a socket buffer
1228  * @endpoint:	Endpoint pointer
1229  * @skb:	Socket buffer to send
1230  *
1231  * Returns:	0 if successful, or a negative error code
1232  */
1233 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
1234 {
1235 	struct gsi_trans *trans;
1236 	u32 nr_frags;
1237 	int ret;
1238 
1239 	/* Make sure source endpoint's TLV FIFO has enough entries to
1240 	 * hold the linear portion of the skb and all its fragments.
1241 	 * If not, see if we can linearize it before giving up.
1242 	 */
1243 	nr_frags = skb_shinfo(skb)->nr_frags;
1244 	if (nr_frags > endpoint->skb_frag_max) {
1245 		if (skb_linearize(skb))
1246 			return -E2BIG;
1247 		nr_frags = 0;
1248 	}
1249 
1250 	trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
1251 	if (!trans)
1252 		return -EBUSY;
1253 
1254 	ret = gsi_trans_skb_add(trans, skb);
1255 	if (ret)
1256 		goto err_trans_free;
1257 	trans->data = skb;	/* transaction owns skb now */
1258 
1259 	gsi_trans_commit(trans, !netdev_xmit_more());
1260 
1261 	return 0;
1262 
1263 err_trans_free:
1264 	gsi_trans_free(trans);
1265 
1266 	return -ENOMEM;
1267 }
1268 
1269 static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
1270 {
1271 	u32 endpoint_id = endpoint->endpoint_id;
1272 	struct ipa *ipa = endpoint->ipa;
1273 	const struct ipa_reg *reg;
1274 	u32 val = 0;
1275 
1276 	reg = ipa_reg(ipa, ENDP_STATUS);
1277 	if (endpoint->config.status_enable) {
1278 		val |= ipa_reg_bit(reg, STATUS_EN);
1279 		if (endpoint->toward_ipa) {
1280 			enum ipa_endpoint_name name;
1281 			u32 status_endpoint_id;
1282 
1283 			name = endpoint->config.tx.status_endpoint;
1284 			status_endpoint_id = ipa->name_map[name]->endpoint_id;
1285 
1286 			val |= ipa_reg_encode(reg, STATUS_ENDP,
1287 					      status_endpoint_id);
1288 		}
1289 		/* STATUS_LOCATION is 0, meaning IPA packet status
1290 		 * precedes the packet (not present for IPA v4.5+)
1291 		 */
1292 		/* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
1293 	}
1294 
1295 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1296 }
1297 
1298 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
1299 				      struct gsi_trans *trans)
1300 {
1301 	struct page *page;
1302 	u32 buffer_size;
1303 	u32 offset;
1304 	u32 len;
1305 	int ret;
1306 
1307 	buffer_size = endpoint->config.rx.buffer_size;
1308 	page = dev_alloc_pages(get_order(buffer_size));
1309 	if (!page)
1310 		return -ENOMEM;
1311 
1312 	/* Offset the buffer to make space for skb headroom */
1313 	offset = NET_SKB_PAD;
1314 	len = buffer_size - offset;
1315 
1316 	ret = gsi_trans_page_add(trans, page, len, offset);
1317 	if (ret)
1318 		put_page(page);
1319 	else
1320 		trans->data = page;	/* transaction owns page now */
1321 
1322 	return ret;
1323 }
1324 
1325 /**
1326  * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1327  * @endpoint:	Endpoint to be replenished
1328  *
1329  * The IPA hardware can hold a fixed number of receive buffers for an RX
1330  * endpoint, based on the number of entries in the underlying channel ring
1331  * buffer.  If an endpoint's "backlog" is non-zero, it indicates how many
1332  * more receive buffers can be supplied to the hardware.  Replenishing for
1333  * an endpoint can be disabled, in which case buffers are not queued to
1334  * the hardware.
1335  */
1336 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
1337 {
1338 	struct gsi_trans *trans;
1339 
1340 	if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
1341 		return;
1342 
1343 	/* Skip it if it's already active */
1344 	if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
1345 		return;
1346 
1347 	while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
1348 		bool doorbell;
1349 
1350 		if (ipa_endpoint_replenish_one(endpoint, trans))
1351 			goto try_again_later;
1352 
1353 
1354 		/* Ring the doorbell if we've got a full batch */
1355 		doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
1356 		gsi_trans_commit(trans, doorbell);
1357 	}
1358 
1359 	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1360 
1361 	return;
1362 
1363 try_again_later:
1364 	gsi_trans_free(trans);
1365 	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1366 
1367 	/* Whenever a receive buffer transaction completes we'll try to
1368 	 * replenish again.  It's unlikely, but if we fail to supply even
1369 	 * one buffer, nothing will trigger another replenish attempt.
1370 	 * If the hardware has no receive buffers queued, schedule work to
1371 	 * try replenishing again.
1372 	 */
1373 	if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1374 		schedule_delayed_work(&endpoint->replenish_work,
1375 				      msecs_to_jiffies(1));
1376 }
1377 
1378 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
1379 {
1380 	set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1381 
1382 	/* Start replenishing if hardware currently has no buffers */
1383 	if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1384 		ipa_endpoint_replenish(endpoint);
1385 }
1386 
1387 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
1388 {
1389 	clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1390 }
1391 
1392 static void ipa_endpoint_replenish_work(struct work_struct *work)
1393 {
1394 	struct delayed_work *dwork = to_delayed_work(work);
1395 	struct ipa_endpoint *endpoint;
1396 
1397 	endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1398 
1399 	ipa_endpoint_replenish(endpoint);
1400 }
1401 
1402 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1403 				  void *data, u32 len, u32 extra)
1404 {
1405 	struct sk_buff *skb;
1406 
1407 	if (!endpoint->netdev)
1408 		return;
1409 
1410 	skb = __dev_alloc_skb(len, GFP_ATOMIC);
1411 	if (skb) {
1412 		/* Copy the data into the socket buffer and receive it */
1413 		skb_put(skb, len);
1414 		memcpy(skb->data, data, len);
1415 		skb->truesize += extra;
1416 	}
1417 
1418 	ipa_modem_skb_rx(endpoint->netdev, skb);
1419 }
1420 
1421 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1422 				   struct page *page, u32 len)
1423 {
1424 	u32 buffer_size = endpoint->config.rx.buffer_size;
1425 	struct sk_buff *skb;
1426 
1427 	/* Nothing to do if there's no netdev */
1428 	if (!endpoint->netdev)
1429 		return false;
1430 
1431 	WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
1432 
1433 	skb = build_skb(page_address(page), buffer_size);
1434 	if (skb) {
1435 		/* Reserve the headroom and account for the data */
1436 		skb_reserve(skb, NET_SKB_PAD);
1437 		skb_put(skb, len);
1438 	}
1439 
1440 	/* Receive the buffer (or record drop if unable to build it) */
1441 	ipa_modem_skb_rx(endpoint->netdev, skb);
1442 
1443 	return skb != NULL;
1444 }
1445 
1446  /* The format of an IPA packet status structure is the same for several
1447   * status types (opcodes).  Other types aren't currently supported.
1448  */
1449 static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1450 {
1451 	switch (opcode) {
1452 	case IPA_STATUS_OPCODE_PACKET:
1453 	case IPA_STATUS_OPCODE_DROPPED_PACKET:
1454 	case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1455 	case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1456 		return true;
1457 	default:
1458 		return false;
1459 	}
1460 }
1461 
1462 static bool
1463 ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, const void *data)
1464 {
1465 	struct ipa *ipa = endpoint->ipa;
1466 	enum ipa_status_opcode opcode;
1467 	u32 endpoint_id;
1468 
1469 	opcode = ipa_status_extract(ipa, data, STATUS_OPCODE);
1470 	if (!ipa_status_format_packet(opcode))
1471 		return true;
1472 
1473 	endpoint_id = ipa_status_extract(ipa, data, STATUS_DST_ENDPOINT);
1474 	if (endpoint_id != endpoint->endpoint_id)
1475 		return true;
1476 
1477 	return false;	/* Don't skip this packet, process it */
1478 }
1479 
1480 static bool
1481 ipa_endpoint_status_tag_valid(struct ipa_endpoint *endpoint, const void *data)
1482 {
1483 	struct ipa_endpoint *command_endpoint;
1484 	enum ipa_status_mask status_mask;
1485 	struct ipa *ipa = endpoint->ipa;
1486 	u32 endpoint_id;
1487 
1488 	status_mask = ipa_status_extract(ipa, data, STATUS_MASK);
1489 	if (!status_mask)
1490 		return false;	/* No valid tag */
1491 
1492 	/* The status contains a valid tag.  We know the packet was sent to
1493 	 * this endpoint (already verified by ipa_endpoint_status_skip()).
1494 	 * If the packet came from the AP->command TX endpoint we know
1495 	 * this packet was sent as part of the pipeline clear process.
1496 	 */
1497 	endpoint_id = ipa_status_extract(ipa, data, STATUS_SRC_ENDPOINT);
1498 	command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1499 	if (endpoint_id == command_endpoint->endpoint_id) {
1500 		complete(&ipa->completion);
1501 	} else {
1502 		dev_err(&ipa->pdev->dev,
1503 			"unexpected tagged packet from endpoint %u\n",
1504 			endpoint_id);
1505 	}
1506 
1507 	return true;
1508 }
1509 
1510 /* Return whether the status indicates the packet should be dropped */
1511 static bool
1512 ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, const void *data)
1513 {
1514 	enum ipa_status_exception exception;
1515 	struct ipa *ipa = endpoint->ipa;
1516 	u32 rule;
1517 
1518 	/* If the status indicates a tagged transfer, we'll drop the packet */
1519 	if (ipa_endpoint_status_tag_valid(endpoint, data))
1520 		return true;
1521 
1522 	/* Deaggregation exceptions we drop; all other types we consume */
1523 	exception = ipa_status_extract(ipa, data, STATUS_EXCEPTION);
1524 	if (exception)
1525 		return exception == IPA_STATUS_EXCEPTION_DEAGGR;
1526 
1527 	/* Drop the packet if it fails to match a routing rule; otherwise no */
1528 	rule = ipa_status_extract(ipa, data, STATUS_ROUTER_RULE_INDEX);
1529 
1530 	return rule == IPA_STATUS_RULE_MISS;
1531 }
1532 
1533 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1534 				      struct page *page, u32 total_len)
1535 {
1536 	u32 buffer_size = endpoint->config.rx.buffer_size;
1537 	void *data = page_address(page) + NET_SKB_PAD;
1538 	u32 unused = buffer_size - total_len;
1539 	struct ipa *ipa = endpoint->ipa;
1540 	u32 resid = total_len;
1541 
1542 	while (resid) {
1543 		u32 length;
1544 		u32 align;
1545 		u32 len;
1546 
1547 		if (resid < IPA_STATUS_SIZE) {
1548 			dev_err(&endpoint->ipa->pdev->dev,
1549 				"short message (%u bytes < %zu byte status)\n",
1550 				resid, IPA_STATUS_SIZE);
1551 			break;
1552 		}
1553 
1554 		/* Skip over status packets that lack packet data */
1555 		length = ipa_status_extract(ipa, data, STATUS_LENGTH);
1556 		if (!length || ipa_endpoint_status_skip(endpoint, data)) {
1557 			data += IPA_STATUS_SIZE;
1558 			resid -= IPA_STATUS_SIZE;
1559 			continue;
1560 		}
1561 
1562 		/* Compute the amount of buffer space consumed by the packet,
1563 		 * including the status.  If the hardware is configured to
1564 		 * pad packet data to an aligned boundary, account for that.
1565 		 * And if checksum offload is enabled a trailer containing
1566 		 * computed checksum information will be appended.
1567 		 */
1568 		align = endpoint->config.rx.pad_align ? : 1;
1569 		len = IPA_STATUS_SIZE + ALIGN(length, align);
1570 		if (endpoint->config.checksum)
1571 			len += sizeof(struct rmnet_map_dl_csum_trailer);
1572 
1573 		if (!ipa_endpoint_status_drop(endpoint, data)) {
1574 			void *data2;
1575 			u32 extra;
1576 
1577 			/* Client receives only packet data (no status) */
1578 			data2 = data + IPA_STATUS_SIZE;
1579 
1580 			/* Have the true size reflect the extra unused space in
1581 			 * the original receive buffer.  Distribute the "cost"
1582 			 * proportionately across all aggregated packets in the
1583 			 * buffer.
1584 			 */
1585 			extra = DIV_ROUND_CLOSEST(unused * len, total_len);
1586 			ipa_endpoint_skb_copy(endpoint, data2, length, extra);
1587 		}
1588 
1589 		/* Consume status and the full packet it describes */
1590 		data += len;
1591 		resid -= len;
1592 	}
1593 }
1594 
1595 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1596 				 struct gsi_trans *trans)
1597 {
1598 	struct page *page;
1599 
1600 	if (endpoint->toward_ipa)
1601 		return;
1602 
1603 	if (trans->cancelled)
1604 		goto done;
1605 
1606 	/* Parse or build a socket buffer using the actual received length */
1607 	page = trans->data;
1608 	if (endpoint->config.status_enable)
1609 		ipa_endpoint_status_parse(endpoint, page, trans->len);
1610 	else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1611 		trans->data = NULL;	/* Pages have been consumed */
1612 done:
1613 	ipa_endpoint_replenish(endpoint);
1614 }
1615 
1616 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1617 				struct gsi_trans *trans)
1618 {
1619 	if (endpoint->toward_ipa) {
1620 		struct ipa *ipa = endpoint->ipa;
1621 
1622 		/* Nothing to do for command transactions */
1623 		if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1624 			struct sk_buff *skb = trans->data;
1625 
1626 			if (skb)
1627 				dev_kfree_skb_any(skb);
1628 		}
1629 	} else {
1630 		struct page *page = trans->data;
1631 
1632 		if (page)
1633 			put_page(page);
1634 	}
1635 }
1636 
1637 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1638 {
1639 	const struct ipa_reg *reg;
1640 	u32 val;
1641 
1642 	reg = ipa_reg(ipa, ROUTE);
1643 	/* ROUTE_DIS is 0 */
1644 	val = ipa_reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
1645 	val |= ipa_reg_bit(reg, ROUTE_DEF_HDR_TABLE);
1646 	/* ROUTE_DEF_HDR_OFST is 0 */
1647 	val |= ipa_reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
1648 	val |= ipa_reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
1649 
1650 	iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
1651 }
1652 
1653 void ipa_endpoint_default_route_clear(struct ipa *ipa)
1654 {
1655 	ipa_endpoint_default_route_set(ipa, 0);
1656 }
1657 
1658 /**
1659  * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1660  * @endpoint:	Endpoint to be reset
1661  *
1662  * If aggregation is active on an RX endpoint when a reset is performed
1663  * on its underlying GSI channel, a special sequence of actions must be
1664  * taken to ensure the IPA pipeline is properly cleared.
1665  *
1666  * Return:	0 if successful, or a negative error code
1667  */
1668 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1669 {
1670 	struct device *dev = &endpoint->ipa->pdev->dev;
1671 	struct ipa *ipa = endpoint->ipa;
1672 	struct gsi *gsi = &ipa->gsi;
1673 	bool suspended = false;
1674 	dma_addr_t addr;
1675 	u32 retries;
1676 	u32 len = 1;
1677 	void *virt;
1678 	int ret;
1679 
1680 	virt = kzalloc(len, GFP_KERNEL);
1681 	if (!virt)
1682 		return -ENOMEM;
1683 
1684 	addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1685 	if (dma_mapping_error(dev, addr)) {
1686 		ret = -ENOMEM;
1687 		goto out_kfree;
1688 	}
1689 
1690 	/* Force close aggregation before issuing the reset */
1691 	ipa_endpoint_force_close(endpoint);
1692 
1693 	/* Reset and reconfigure the channel with the doorbell engine
1694 	 * disabled.  Then poll until we know aggregation is no longer
1695 	 * active.  We'll re-enable the doorbell (if appropriate) when
1696 	 * we reset again below.
1697 	 */
1698 	gsi_channel_reset(gsi, endpoint->channel_id, false);
1699 
1700 	/* Make sure the channel isn't suspended */
1701 	suspended = ipa_endpoint_program_suspend(endpoint, false);
1702 
1703 	/* Start channel and do a 1 byte read */
1704 	ret = gsi_channel_start(gsi, endpoint->channel_id);
1705 	if (ret)
1706 		goto out_suspend_again;
1707 
1708 	ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1709 	if (ret)
1710 		goto err_endpoint_stop;
1711 
1712 	/* Wait for aggregation to be closed on the channel */
1713 	retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1714 	do {
1715 		if (!ipa_endpoint_aggr_active(endpoint))
1716 			break;
1717 		usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1718 	} while (retries--);
1719 
1720 	/* Check one last time */
1721 	if (ipa_endpoint_aggr_active(endpoint))
1722 		dev_err(dev, "endpoint %u still active during reset\n",
1723 			endpoint->endpoint_id);
1724 
1725 	gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1726 
1727 	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1728 	if (ret)
1729 		goto out_suspend_again;
1730 
1731 	/* Finally, reset and reconfigure the channel again (re-enabling
1732 	 * the doorbell engine if appropriate).  Sleep for 1 millisecond to
1733 	 * complete the channel reset sequence.  Finish by suspending the
1734 	 * channel again (if necessary).
1735 	 */
1736 	gsi_channel_reset(gsi, endpoint->channel_id, true);
1737 
1738 	usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1739 
1740 	goto out_suspend_again;
1741 
1742 err_endpoint_stop:
1743 	(void)gsi_channel_stop(gsi, endpoint->channel_id);
1744 out_suspend_again:
1745 	if (suspended)
1746 		(void)ipa_endpoint_program_suspend(endpoint, true);
1747 	dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1748 out_kfree:
1749 	kfree(virt);
1750 
1751 	return ret;
1752 }
1753 
1754 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1755 {
1756 	u32 channel_id = endpoint->channel_id;
1757 	struct ipa *ipa = endpoint->ipa;
1758 	bool special;
1759 	int ret = 0;
1760 
1761 	/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1762 	 * is active, we need to handle things specially to recover.
1763 	 * All other cases just need to reset the underlying GSI channel.
1764 	 */
1765 	special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1766 			endpoint->config.aggregation;
1767 	if (special && ipa_endpoint_aggr_active(endpoint))
1768 		ret = ipa_endpoint_reset_rx_aggr(endpoint);
1769 	else
1770 		gsi_channel_reset(&ipa->gsi, channel_id, true);
1771 
1772 	if (ret)
1773 		dev_err(&ipa->pdev->dev,
1774 			"error %d resetting channel %u for endpoint %u\n",
1775 			ret, endpoint->channel_id, endpoint->endpoint_id);
1776 }
1777 
1778 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1779 {
1780 	if (endpoint->toward_ipa) {
1781 		/* Newer versions of IPA use GSI channel flow control
1782 		 * instead of endpoint DELAY mode to prevent sending data.
1783 		 * Flow control is disabled for newly-allocated channels,
1784 		 * and we can assume flow control is not (ever) enabled
1785 		 * for AP TX channels.
1786 		 */
1787 		if (endpoint->ipa->version < IPA_VERSION_4_2)
1788 			ipa_endpoint_program_delay(endpoint, false);
1789 	} else {
1790 		/* Ensure suspend mode is off on all AP RX endpoints */
1791 		(void)ipa_endpoint_program_suspend(endpoint, false);
1792 	}
1793 	ipa_endpoint_init_cfg(endpoint);
1794 	ipa_endpoint_init_nat(endpoint);
1795 	ipa_endpoint_init_hdr(endpoint);
1796 	ipa_endpoint_init_hdr_ext(endpoint);
1797 	ipa_endpoint_init_hdr_metadata_mask(endpoint);
1798 	ipa_endpoint_init_mode(endpoint);
1799 	ipa_endpoint_init_aggr(endpoint);
1800 	if (!endpoint->toward_ipa) {
1801 		if (endpoint->config.rx.holb_drop)
1802 			ipa_endpoint_init_hol_block_enable(endpoint, 0);
1803 		else
1804 			ipa_endpoint_init_hol_block_disable(endpoint);
1805 	}
1806 	ipa_endpoint_init_deaggr(endpoint);
1807 	ipa_endpoint_init_rsrc_grp(endpoint);
1808 	ipa_endpoint_init_seq(endpoint);
1809 	ipa_endpoint_status(endpoint);
1810 }
1811 
1812 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1813 {
1814 	u32 endpoint_id = endpoint->endpoint_id;
1815 	struct ipa *ipa = endpoint->ipa;
1816 	struct gsi *gsi = &ipa->gsi;
1817 	int ret;
1818 
1819 	ret = gsi_channel_start(gsi, endpoint->channel_id);
1820 	if (ret) {
1821 		dev_err(&ipa->pdev->dev,
1822 			"error %d starting %cX channel %u for endpoint %u\n",
1823 			ret, endpoint->toward_ipa ? 'T' : 'R',
1824 			endpoint->channel_id, endpoint_id);
1825 		return ret;
1826 	}
1827 
1828 	if (!endpoint->toward_ipa) {
1829 		ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id);
1830 		ipa_endpoint_replenish_enable(endpoint);
1831 	}
1832 
1833 	__set_bit(endpoint_id, ipa->enabled);
1834 
1835 	return 0;
1836 }
1837 
1838 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1839 {
1840 	u32 endpoint_id = endpoint->endpoint_id;
1841 	struct ipa *ipa = endpoint->ipa;
1842 	struct gsi *gsi = &ipa->gsi;
1843 	int ret;
1844 
1845 	if (!test_bit(endpoint_id, ipa->enabled))
1846 		return;
1847 
1848 	__clear_bit(endpoint_id, endpoint->ipa->enabled);
1849 
1850 	if (!endpoint->toward_ipa) {
1851 		ipa_endpoint_replenish_disable(endpoint);
1852 		ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id);
1853 	}
1854 
1855 	/* Note that if stop fails, the channel's state is not well-defined */
1856 	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1857 	if (ret)
1858 		dev_err(&ipa->pdev->dev,
1859 			"error %d attempting to stop endpoint %u\n", ret,
1860 			endpoint_id);
1861 }
1862 
1863 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1864 {
1865 	struct device *dev = &endpoint->ipa->pdev->dev;
1866 	struct gsi *gsi = &endpoint->ipa->gsi;
1867 	int ret;
1868 
1869 	if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
1870 		return;
1871 
1872 	if (!endpoint->toward_ipa) {
1873 		ipa_endpoint_replenish_disable(endpoint);
1874 		(void)ipa_endpoint_program_suspend(endpoint, true);
1875 	}
1876 
1877 	ret = gsi_channel_suspend(gsi, endpoint->channel_id);
1878 	if (ret)
1879 		dev_err(dev, "error %d suspending channel %u\n", ret,
1880 			endpoint->channel_id);
1881 }
1882 
1883 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1884 {
1885 	struct device *dev = &endpoint->ipa->pdev->dev;
1886 	struct gsi *gsi = &endpoint->ipa->gsi;
1887 	int ret;
1888 
1889 	if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
1890 		return;
1891 
1892 	if (!endpoint->toward_ipa)
1893 		(void)ipa_endpoint_program_suspend(endpoint, false);
1894 
1895 	ret = gsi_channel_resume(gsi, endpoint->channel_id);
1896 	if (ret)
1897 		dev_err(dev, "error %d resuming channel %u\n", ret,
1898 			endpoint->channel_id);
1899 	else if (!endpoint->toward_ipa)
1900 		ipa_endpoint_replenish_enable(endpoint);
1901 }
1902 
1903 void ipa_endpoint_suspend(struct ipa *ipa)
1904 {
1905 	if (!ipa->setup_complete)
1906 		return;
1907 
1908 	if (ipa->modem_netdev)
1909 		ipa_modem_suspend(ipa->modem_netdev);
1910 
1911 	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1912 	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1913 }
1914 
1915 void ipa_endpoint_resume(struct ipa *ipa)
1916 {
1917 	if (!ipa->setup_complete)
1918 		return;
1919 
1920 	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1921 	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1922 
1923 	if (ipa->modem_netdev)
1924 		ipa_modem_resume(ipa->modem_netdev);
1925 }
1926 
1927 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1928 {
1929 	struct gsi *gsi = &endpoint->ipa->gsi;
1930 	u32 channel_id = endpoint->channel_id;
1931 
1932 	/* Only AP endpoints get set up */
1933 	if (endpoint->ee_id != GSI_EE_AP)
1934 		return;
1935 
1936 	endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
1937 	if (!endpoint->toward_ipa) {
1938 		/* RX transactions require a single TRE, so the maximum
1939 		 * backlog is the same as the maximum outstanding TREs.
1940 		 */
1941 		clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1942 		clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1943 		INIT_DELAYED_WORK(&endpoint->replenish_work,
1944 				  ipa_endpoint_replenish_work);
1945 	}
1946 
1947 	ipa_endpoint_program(endpoint);
1948 
1949 	__set_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
1950 }
1951 
1952 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1953 {
1954 	__clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
1955 
1956 	if (!endpoint->toward_ipa)
1957 		cancel_delayed_work_sync(&endpoint->replenish_work);
1958 
1959 	ipa_endpoint_reset(endpoint);
1960 }
1961 
1962 void ipa_endpoint_setup(struct ipa *ipa)
1963 {
1964 	u32 endpoint_id;
1965 
1966 	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
1967 		ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1968 }
1969 
1970 void ipa_endpoint_teardown(struct ipa *ipa)
1971 {
1972 	u32 endpoint_id;
1973 
1974 	for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count)
1975 		ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1976 }
1977 
1978 void ipa_endpoint_deconfig(struct ipa *ipa)
1979 {
1980 	ipa->available_count = 0;
1981 	bitmap_free(ipa->available);
1982 	ipa->available = NULL;
1983 }
1984 
1985 int ipa_endpoint_config(struct ipa *ipa)
1986 {
1987 	struct device *dev = &ipa->pdev->dev;
1988 	const struct ipa_reg *reg;
1989 	u32 endpoint_id;
1990 	u32 hw_limit;
1991 	u32 tx_count;
1992 	u32 rx_count;
1993 	u32 rx_base;
1994 	u32 limit;
1995 	u32 val;
1996 
1997 	/* Prior to IPA v3.5, the FLAVOR_0 register was not supported.
1998 	 * Furthermore, the endpoints were not grouped such that TX
1999 	 * endpoint numbers started with 0 and RX endpoints had numbers
2000 	 * higher than all TX endpoints, so we can't do the simple
2001 	 * direction check used for newer hardware below.
2002 	 *
2003 	 * For hardware that doesn't support the FLAVOR_0 register,
2004 	 * just set the available mask to support any endpoint, and
2005 	 * assume the configuration is valid.
2006 	 */
2007 	if (ipa->version < IPA_VERSION_3_5) {
2008 		ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL);
2009 		if (!ipa->available)
2010 			return -ENOMEM;
2011 		ipa->available_count = IPA_ENDPOINT_MAX;
2012 
2013 		bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX);
2014 
2015 		return 0;
2016 	}
2017 
2018 	/* Find out about the endpoints supplied by the hardware, and ensure
2019 	 * the highest one doesn't exceed the number supported by software.
2020 	 */
2021 	reg = ipa_reg(ipa, FLAVOR_0);
2022 	val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
2023 
2024 	/* Our RX is an IPA producer; our TX is an IPA consumer. */
2025 	tx_count = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
2026 	rx_count = ipa_reg_decode(reg, MAX_PROD_PIPES, val);
2027 	rx_base = ipa_reg_decode(reg, PROD_LOWEST, val);
2028 
2029 	limit = rx_base + rx_count;
2030 	if (limit > IPA_ENDPOINT_MAX) {
2031 		dev_err(dev, "too many endpoints, %u > %u\n",
2032 			limit, IPA_ENDPOINT_MAX);
2033 		return -EINVAL;
2034 	}
2035 
2036 	/* Until IPA v5.0, the max endpoint ID was 32 */
2037 	hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1;
2038 	if (limit > hw_limit) {
2039 		dev_err(dev, "unexpected endpoint count, %u > %u\n",
2040 			limit, hw_limit);
2041 		return -EINVAL;
2042 	}
2043 
2044 	/* Allocate and initialize the available endpoint bitmap */
2045 	ipa->available = bitmap_zalloc(limit, GFP_KERNEL);
2046 	if (!ipa->available)
2047 		return -ENOMEM;
2048 	ipa->available_count = limit;
2049 
2050 	/* Mark all supported RX and TX endpoints as available */
2051 	bitmap_set(ipa->available, 0, tx_count);
2052 	bitmap_set(ipa->available, rx_base, rx_count);
2053 
2054 	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
2055 		struct ipa_endpoint *endpoint;
2056 
2057 		if (endpoint_id >= limit) {
2058 			dev_err(dev, "invalid endpoint id, %u > %u\n",
2059 				endpoint_id, limit - 1);
2060 			goto err_free_bitmap;
2061 		}
2062 
2063 		if (!test_bit(endpoint_id, ipa->available)) {
2064 			dev_err(dev, "unavailable endpoint id %u\n",
2065 				endpoint_id);
2066 			goto err_free_bitmap;
2067 		}
2068 
2069 		/* Make sure it's pointing in the right direction */
2070 		endpoint = &ipa->endpoint[endpoint_id];
2071 		if (endpoint->toward_ipa) {
2072 			if (endpoint_id < tx_count)
2073 				continue;
2074 		} else if (endpoint_id >= rx_base) {
2075 			continue;
2076 		}
2077 
2078 		dev_err(dev, "endpoint id %u wrong direction\n", endpoint_id);
2079 		goto err_free_bitmap;
2080 	}
2081 
2082 	return 0;
2083 
2084 err_free_bitmap:
2085 	ipa_endpoint_deconfig(ipa);
2086 
2087 	return -EINVAL;
2088 }
2089 
2090 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
2091 				  const struct ipa_gsi_endpoint_data *data)
2092 {
2093 	struct ipa_endpoint *endpoint;
2094 
2095 	endpoint = &ipa->endpoint[data->endpoint_id];
2096 
2097 	if (data->ee_id == GSI_EE_AP)
2098 		ipa->channel_map[data->channel_id] = endpoint;
2099 	ipa->name_map[name] = endpoint;
2100 
2101 	endpoint->ipa = ipa;
2102 	endpoint->ee_id = data->ee_id;
2103 	endpoint->channel_id = data->channel_id;
2104 	endpoint->endpoint_id = data->endpoint_id;
2105 	endpoint->toward_ipa = data->toward_ipa;
2106 	endpoint->config = data->endpoint.config;
2107 
2108 	__set_bit(endpoint->endpoint_id, ipa->defined);
2109 }
2110 
2111 static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
2112 {
2113 	__clear_bit(endpoint->endpoint_id, endpoint->ipa->defined);
2114 
2115 	memset(endpoint, 0, sizeof(*endpoint));
2116 }
2117 
2118 void ipa_endpoint_exit(struct ipa *ipa)
2119 {
2120 	u32 endpoint_id;
2121 
2122 	ipa->filtered = 0;
2123 
2124 	for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
2125 		ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
2126 
2127 	bitmap_free(ipa->enabled);
2128 	ipa->enabled = NULL;
2129 	bitmap_free(ipa->set_up);
2130 	ipa->set_up = NULL;
2131 	bitmap_free(ipa->defined);
2132 	ipa->defined = NULL;
2133 
2134 	memset(ipa->name_map, 0, sizeof(ipa->name_map));
2135 	memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
2136 }
2137 
2138 /* Returns a bitmask of endpoints that support filtering, or 0 on error */
2139 int ipa_endpoint_init(struct ipa *ipa, u32 count,
2140 		      const struct ipa_gsi_endpoint_data *data)
2141 {
2142 	enum ipa_endpoint_name name;
2143 	u32 filtered;
2144 
2145 	BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
2146 
2147 	/* Number of endpoints is one more than the maximum ID */
2148 	ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1;
2149 	if (!ipa->endpoint_count)
2150 		return -EINVAL;
2151 
2152 	/* Initialize endpoint state bitmaps */
2153 	ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2154 	if (!ipa->defined)
2155 		return -ENOMEM;
2156 
2157 	ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2158 	if (!ipa->set_up)
2159 		goto err_free_defined;
2160 
2161 	ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2162 	if (!ipa->enabled)
2163 		goto err_free_set_up;
2164 
2165 	filtered = 0;
2166 	for (name = 0; name < count; name++, data++) {
2167 		if (ipa_gsi_endpoint_data_empty(data))
2168 			continue;	/* Skip over empty slots */
2169 
2170 		ipa_endpoint_init_one(ipa, name, data);
2171 
2172 		if (data->endpoint.filter_support)
2173 			filtered |= BIT(data->endpoint_id);
2174 		if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
2175 			ipa->modem_tx_count++;
2176 	}
2177 
2178 	/* Make sure the set of filtered endpoints is valid */
2179 	if (!ipa_filtered_valid(ipa, filtered)) {
2180 		ipa_endpoint_exit(ipa);
2181 
2182 		return -EINVAL;
2183 	}
2184 
2185 	ipa->filtered = filtered;
2186 
2187 	return 0;
2188 
2189 err_free_set_up:
2190 	bitmap_free(ipa->set_up);
2191 	ipa->set_up = NULL;
2192 err_free_defined:
2193 	bitmap_free(ipa->defined);
2194 	ipa->defined = NULL;
2195 
2196 	return -ENOMEM;
2197 }
2198