xref: /linux/drivers/net/ipa/ipa_endpoint.c (revision 248376b1b13f7300e94a9f8d97062d43dfa4a847)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2019-2021 Linaro Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 #include <linux/bitfield.h>
11 #include <linux/if_rmnet.h>
12 #include <linux/dma-direction.h>
13 
14 #include "gsi.h"
15 #include "gsi_trans.h"
16 #include "ipa.h"
17 #include "ipa_data.h"
18 #include "ipa_endpoint.h"
19 #include "ipa_cmd.h"
20 #include "ipa_mem.h"
21 #include "ipa_modem.h"
22 #include "ipa_table.h"
23 #include "ipa_gsi.h"
24 #include "ipa_power.h"
25 
26 #define atomic_dec_not_zero(v)	atomic_add_unless((v), -1, 0)
27 
28 /* Hardware is told about receive buffers once a "batch" has been queued */
29 #define IPA_REPLENISH_BATCH	16		/* Must be non-zero */
30 
31 /* The amount of RX buffer space consumed by standard skb overhead */
32 #define IPA_RX_BUFFER_OVERHEAD	(PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
33 
34 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
35 #define IPA_ENDPOINT_QMAP_METADATA_MASK		0x000000ff /* host byte order */
36 
37 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX	3
38 
39 /** enum ipa_status_opcode - status element opcode hardware values */
40 enum ipa_status_opcode {
41 	IPA_STATUS_OPCODE_PACKET		= 0x01,
42 	IPA_STATUS_OPCODE_DROPPED_PACKET	= 0x04,
43 	IPA_STATUS_OPCODE_SUSPENDED_PACKET	= 0x08,
44 	IPA_STATUS_OPCODE_PACKET_2ND_PASS	= 0x40,
45 };
46 
47 /** enum ipa_status_exception - status element exception type */
48 enum ipa_status_exception {
49 	/* 0 means no exception */
50 	IPA_STATUS_EXCEPTION_DEAGGR		= 0x01,
51 };
52 
53 /* Status element provided by hardware */
54 struct ipa_status {
55 	u8 opcode;		/* enum ipa_status_opcode */
56 	u8 exception;		/* enum ipa_status_exception */
57 	__le16 mask;
58 	__le16 pkt_len;
59 	u8 endp_src_idx;
60 	u8 endp_dst_idx;
61 	__le32 metadata;
62 	__le32 flags1;
63 	__le64 flags2;
64 	__le32 flags3;
65 	__le32 flags4;
66 };
67 
68 /* Field masks for struct ipa_status structure fields */
69 #define IPA_STATUS_MASK_TAG_VALID_FMASK		GENMASK(4, 4)
70 #define IPA_STATUS_SRC_IDX_FMASK		GENMASK(4, 0)
71 #define IPA_STATUS_DST_IDX_FMASK		GENMASK(4, 0)
72 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK	GENMASK(31, 22)
73 #define IPA_STATUS_FLAGS2_TAG_FMASK		GENMASK_ULL(63, 16)
74 
75 /* Compute the aggregation size value to use for a given buffer size */
76 static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
77 {
78 	/* A hard aggregation limit will not be crossed; aggregation closes
79 	 * if saving incoming data would cross the hard byte limit boundary.
80 	 *
81 	 * With a soft limit, aggregation closes *after* the size boundary
82 	 * has been crossed.  In that case the limit must leave enough space
83 	 * after that limit to receive a full MTU of data plus overhead.
84 	 */
85 	if (!aggr_hard_limit)
86 		rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
87 
88 	/* The byte limit is encoded as a number of kilobytes */
89 
90 	return rx_buffer_size / SZ_1K;
91 }
92 
93 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
94 			    const struct ipa_gsi_endpoint_data *all_data,
95 			    const struct ipa_gsi_endpoint_data *data)
96 {
97 	const struct ipa_gsi_endpoint_data *other_data;
98 	struct device *dev = &ipa->pdev->dev;
99 	enum ipa_endpoint_name other_name;
100 
101 	if (ipa_gsi_endpoint_data_empty(data))
102 		return true;
103 
104 	if (!data->toward_ipa) {
105 		const struct ipa_endpoint_rx *rx_config;
106 		const struct ipa_reg *reg;
107 		u32 buffer_size;
108 		u32 aggr_size;
109 		u32 limit;
110 
111 		if (data->endpoint.filter_support) {
112 			dev_err(dev, "filtering not supported for "
113 					"RX endpoint %u\n",
114 				data->endpoint_id);
115 			return false;
116 		}
117 
118 		/* Nothing more to check for non-AP RX */
119 		if (data->ee_id != GSI_EE_AP)
120 			return true;
121 
122 		rx_config = &data->endpoint.config.rx;
123 
124 		/* The buffer size must hold an MTU plus overhead */
125 		buffer_size = rx_config->buffer_size;
126 		limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
127 		if (buffer_size < limit) {
128 			dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
129 				data->endpoint_id, buffer_size, limit);
130 			return false;
131 		}
132 
133 		if (!data->endpoint.config.aggregation) {
134 			bool result = true;
135 
136 			/* No aggregation; check for bogus aggregation data */
137 			if (rx_config->aggr_time_limit) {
138 				dev_err(dev,
139 					"time limit with no aggregation for RX endpoint %u\n",
140 					data->endpoint_id);
141 				result = false;
142 			}
143 
144 			if (rx_config->aggr_hard_limit) {
145 				dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
146 					data->endpoint_id);
147 				result = false;
148 			}
149 
150 			if (rx_config->aggr_close_eof) {
151 				dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
152 					data->endpoint_id);
153 				result = false;
154 			}
155 
156 			return result;	/* Nothing more to check */
157 		}
158 
159 		/* For an endpoint supporting receive aggregation, the byte
160 		 * limit defines the point at which aggregation closes.  This
161 		 * check ensures the receive buffer size doesn't result in a
162 		 * limit that exceeds what's representable in the aggregation
163 		 * byte limit field.
164 		 */
165 		aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
166 					     rx_config->aggr_hard_limit);
167 		reg = ipa_reg(ipa, ENDP_INIT_AGGR);
168 
169 		limit = ipa_reg_field_max(reg, BYTE_LIMIT);
170 		if (aggr_size > limit) {
171 			dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
172 				data->endpoint_id, aggr_size, limit);
173 
174 			return false;
175 		}
176 
177 		return true;	/* Nothing more to check for RX */
178 	}
179 
180 	/* Starting with IPA v4.5 sequencer replication is obsolete */
181 	if (ipa->version >= IPA_VERSION_4_5) {
182 		if (data->endpoint.config.tx.seq_rep_type) {
183 			dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n",
184 				data->endpoint_id);
185 			return false;
186 		}
187 	}
188 
189 	if (data->endpoint.config.status_enable) {
190 		other_name = data->endpoint.config.tx.status_endpoint;
191 		if (other_name >= count) {
192 			dev_err(dev, "status endpoint name %u out of range "
193 					"for endpoint %u\n",
194 				other_name, data->endpoint_id);
195 			return false;
196 		}
197 
198 		/* Status endpoint must be defined... */
199 		other_data = &all_data[other_name];
200 		if (ipa_gsi_endpoint_data_empty(other_data)) {
201 			dev_err(dev, "DMA endpoint name %u undefined "
202 					"for endpoint %u\n",
203 				other_name, data->endpoint_id);
204 			return false;
205 		}
206 
207 		/* ...and has to be an RX endpoint... */
208 		if (other_data->toward_ipa) {
209 			dev_err(dev,
210 				"status endpoint for endpoint %u not RX\n",
211 				data->endpoint_id);
212 			return false;
213 		}
214 
215 		/* ...and if it's to be an AP endpoint... */
216 		if (other_data->ee_id == GSI_EE_AP) {
217 			/* ...make sure it has status enabled. */
218 			if (!other_data->endpoint.config.status_enable) {
219 				dev_err(dev,
220 					"status not enabled for endpoint %u\n",
221 					other_data->endpoint_id);
222 				return false;
223 			}
224 		}
225 	}
226 
227 	if (data->endpoint.config.dma_mode) {
228 		other_name = data->endpoint.config.dma_endpoint;
229 		if (other_name >= count) {
230 			dev_err(dev, "DMA endpoint name %u out of range "
231 					"for endpoint %u\n",
232 				other_name, data->endpoint_id);
233 			return false;
234 		}
235 
236 		other_data = &all_data[other_name];
237 		if (ipa_gsi_endpoint_data_empty(other_data)) {
238 			dev_err(dev, "DMA endpoint name %u undefined "
239 					"for endpoint %u\n",
240 				other_name, data->endpoint_id);
241 			return false;
242 		}
243 	}
244 
245 	return true;
246 }
247 
248 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
249 				    const struct ipa_gsi_endpoint_data *data)
250 {
251 	const struct ipa_gsi_endpoint_data *dp = data;
252 	struct device *dev = &ipa->pdev->dev;
253 	enum ipa_endpoint_name name;
254 
255 	if (count > IPA_ENDPOINT_COUNT) {
256 		dev_err(dev, "too many endpoints specified (%u > %u)\n",
257 			count, IPA_ENDPOINT_COUNT);
258 		return false;
259 	}
260 
261 	/* Make sure needed endpoints have defined data */
262 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
263 		dev_err(dev, "command TX endpoint not defined\n");
264 		return false;
265 	}
266 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
267 		dev_err(dev, "LAN RX endpoint not defined\n");
268 		return false;
269 	}
270 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
271 		dev_err(dev, "AP->modem TX endpoint not defined\n");
272 		return false;
273 	}
274 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
275 		dev_err(dev, "AP<-modem RX endpoint not defined\n");
276 		return false;
277 	}
278 
279 	for (name = 0; name < count; name++, dp++)
280 		if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
281 			return false;
282 
283 	return true;
284 }
285 
286 /* Allocate a transaction to use on a non-command endpoint */
287 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
288 						  u32 tre_count)
289 {
290 	struct gsi *gsi = &endpoint->ipa->gsi;
291 	u32 channel_id = endpoint->channel_id;
292 	enum dma_data_direction direction;
293 
294 	direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
295 
296 	return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
297 }
298 
299 /* suspend_delay represents suspend for RX, delay for TX endpoints.
300  * Note that suspend is not supported starting with IPA v4.0, and
301  * delay mode should not be used starting with IPA v4.2.
302  */
303 static bool
304 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
305 {
306 	struct ipa *ipa = endpoint->ipa;
307 	const struct ipa_reg *reg;
308 	u32 field_id;
309 	u32 offset;
310 	bool state;
311 	u32 mask;
312 	u32 val;
313 
314 	if (endpoint->toward_ipa)
315 		WARN_ON(ipa->version >= IPA_VERSION_4_2);
316 	else
317 		WARN_ON(ipa->version >= IPA_VERSION_4_0);
318 
319 	reg = ipa_reg(ipa, ENDP_INIT_CTRL);
320 	offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
321 	val = ioread32(ipa->reg_virt + offset);
322 
323 	field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
324 	mask = ipa_reg_bit(reg, field_id);
325 
326 	state = !!(val & mask);
327 
328 	/* Don't bother if it's already in the requested state */
329 	if (suspend_delay != state) {
330 		val ^= mask;
331 		iowrite32(val, ipa->reg_virt + offset);
332 	}
333 
334 	return state;
335 }
336 
337 /* We don't care what the previous state was for delay mode */
338 static void
339 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
340 {
341 	/* Delay mode should not be used for IPA v4.2+ */
342 	WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
343 	WARN_ON(!endpoint->toward_ipa);
344 
345 	(void)ipa_endpoint_init_ctrl(endpoint, enable);
346 }
347 
348 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
349 {
350 	u32 mask = BIT(endpoint->endpoint_id);
351 	struct ipa *ipa = endpoint->ipa;
352 	const struct ipa_reg *reg;
353 	u32 val;
354 
355 	WARN_ON(!(mask & ipa->available));
356 
357 	reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
358 	val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
359 
360 	return !!(val & mask);
361 }
362 
363 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
364 {
365 	u32 mask = BIT(endpoint->endpoint_id);
366 	struct ipa *ipa = endpoint->ipa;
367 	const struct ipa_reg *reg;
368 
369 	WARN_ON(!(mask & ipa->available));
370 
371 	reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
372 	iowrite32(mask, ipa->reg_virt + ipa_reg_offset(reg));
373 }
374 
375 /**
376  * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
377  * @endpoint:	Endpoint on which to emulate a suspend
378  *
379  *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
380  *  with an open aggregation frame.  This is to work around a hardware
381  *  issue in IPA version 3.5.1 where the suspend interrupt will not be
382  *  generated when it should be.
383  */
384 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
385 {
386 	struct ipa *ipa = endpoint->ipa;
387 
388 	if (!endpoint->config.aggregation)
389 		return;
390 
391 	/* Nothing to do if the endpoint doesn't have aggregation open */
392 	if (!ipa_endpoint_aggr_active(endpoint))
393 		return;
394 
395 	/* Force close aggregation */
396 	ipa_endpoint_force_close(endpoint);
397 
398 	ipa_interrupt_simulate_suspend(ipa->interrupt);
399 }
400 
401 /* Returns previous suspend state (true means suspend was enabled) */
402 static bool
403 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
404 {
405 	bool suspended;
406 
407 	if (endpoint->ipa->version >= IPA_VERSION_4_0)
408 		return enable;	/* For IPA v4.0+, no change made */
409 
410 	WARN_ON(endpoint->toward_ipa);
411 
412 	suspended = ipa_endpoint_init_ctrl(endpoint, enable);
413 
414 	/* A client suspended with an open aggregation frame will not
415 	 * generate a SUSPEND IPA interrupt.  If enabling suspend, have
416 	 * ipa_endpoint_suspend_aggr() handle this.
417 	 */
418 	if (enable && !suspended)
419 		ipa_endpoint_suspend_aggr(endpoint);
420 
421 	return suspended;
422 }
423 
424 /* Put all modem RX endpoints into suspend mode, and stop transmission
425  * on all modem TX endpoints.  Prior to IPA v4.2, endpoint DELAY mode is
426  * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
427  * control instead.
428  */
429 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
430 {
431 	u32 endpoint_id;
432 
433 	for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
434 		struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
435 
436 		if (endpoint->ee_id != GSI_EE_MODEM)
437 			continue;
438 
439 		if (!endpoint->toward_ipa)
440 			(void)ipa_endpoint_program_suspend(endpoint, enable);
441 		else if (ipa->version < IPA_VERSION_4_2)
442 			ipa_endpoint_program_delay(endpoint, enable);
443 		else
444 			gsi_modem_channel_flow_control(&ipa->gsi,
445 						       endpoint->channel_id,
446 						       enable);
447 	}
448 }
449 
450 /* Reset all modem endpoints to use the default exception endpoint */
451 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
452 {
453 	u32 initialized = ipa->initialized;
454 	struct gsi_trans *trans;
455 	u32 count;
456 
457 	/* We need one command per modem TX endpoint, plus the commands
458 	 * that clear the pipeline.
459 	 */
460 	count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
461 	trans = ipa_cmd_trans_alloc(ipa, count);
462 	if (!trans) {
463 		dev_err(&ipa->pdev->dev,
464 			"no transaction to reset modem exception endpoints\n");
465 		return -EBUSY;
466 	}
467 
468 	while (initialized) {
469 		u32 endpoint_id = __ffs(initialized);
470 		struct ipa_endpoint *endpoint;
471 		const struct ipa_reg *reg;
472 		u32 offset;
473 
474 		initialized ^= BIT(endpoint_id);
475 
476 		/* We only reset modem TX endpoints */
477 		endpoint = &ipa->endpoint[endpoint_id];
478 		if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
479 			continue;
480 
481 		reg = ipa_reg(ipa, ENDP_STATUS);
482 		offset = ipa_reg_n_offset(reg, endpoint_id);
483 
484 		/* Value written is 0, and all bits are updated.  That
485 		 * means status is disabled on the endpoint, and as a
486 		 * result all other fields in the register are ignored.
487 		 */
488 		ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
489 	}
490 
491 	ipa_cmd_pipeline_clear_add(trans);
492 
493 	gsi_trans_commit_wait(trans);
494 
495 	ipa_cmd_pipeline_clear_wait(ipa);
496 
497 	return 0;
498 }
499 
500 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
501 {
502 	u32 endpoint_id = endpoint->endpoint_id;
503 	struct ipa *ipa = endpoint->ipa;
504 	enum ipa_cs_offload_en enabled;
505 	const struct ipa_reg *reg;
506 	u32 val = 0;
507 
508 	reg = ipa_reg(ipa, ENDP_INIT_CFG);
509 	/* FRAG_OFFLOAD_EN is 0 */
510 	if (endpoint->config.checksum) {
511 		enum ipa_version version = ipa->version;
512 
513 		if (endpoint->toward_ipa) {
514 			u32 off;
515 
516 			/* Checksum header offset is in 4-byte units */
517 			off = sizeof(struct rmnet_map_header) / sizeof(u32);
518 			val |= ipa_reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
519 
520 			enabled = version < IPA_VERSION_4_5
521 					? IPA_CS_OFFLOAD_UL
522 					: IPA_CS_OFFLOAD_INLINE;
523 		} else {
524 			enabled = version < IPA_VERSION_4_5
525 					? IPA_CS_OFFLOAD_DL
526 					: IPA_CS_OFFLOAD_INLINE;
527 		}
528 	} else {
529 		enabled = IPA_CS_OFFLOAD_NONE;
530 	}
531 	val |= ipa_reg_encode(reg, CS_OFFLOAD_EN, enabled);
532 	/* CS_GEN_QMB_MASTER_SEL is 0 */
533 
534 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
535 }
536 
537 static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
538 {
539 	u32 endpoint_id = endpoint->endpoint_id;
540 	struct ipa *ipa = endpoint->ipa;
541 	const struct ipa_reg *reg;
542 	u32 val;
543 
544 	if (!endpoint->toward_ipa)
545 		return;
546 
547 	reg = ipa_reg(ipa, ENDP_INIT_NAT);
548 	val = ipa_reg_encode(reg, NAT_EN, IPA_NAT_BYPASS);
549 
550 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
551 }
552 
553 static u32
554 ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
555 {
556 	u32 header_size = sizeof(struct rmnet_map_header);
557 
558 	/* Without checksum offload, we just have the MAP header */
559 	if (!endpoint->config.checksum)
560 		return header_size;
561 
562 	if (version < IPA_VERSION_4_5) {
563 		/* Checksum header inserted for AP TX endpoints only */
564 		if (endpoint->toward_ipa)
565 			header_size += sizeof(struct rmnet_map_ul_csum_header);
566 	} else {
567 		/* Checksum header is used in both directions */
568 		header_size += sizeof(struct rmnet_map_v5_csum_header);
569 	}
570 
571 	return header_size;
572 }
573 
574 /* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
575 static u32 ipa_header_size_encode(enum ipa_version version,
576 				  const struct ipa_reg *reg, u32 header_size)
577 {
578 	u32 field_max = ipa_reg_field_max(reg, HDR_LEN);
579 	u32 val;
580 
581 	/* We know field_max can be used as a mask (2^n - 1) */
582 	val = ipa_reg_encode(reg, HDR_LEN, header_size & field_max);
583 	if (version < IPA_VERSION_4_5) {
584 		WARN_ON(header_size > field_max);
585 		return val;
586 	}
587 
588 	/* IPA v4.5 adds a few more most-significant bits */
589 	header_size >>= hweight32(field_max);
590 	WARN_ON(header_size > ipa_reg_field_max(reg, HDR_LEN_MSB));
591 	val |= ipa_reg_encode(reg, HDR_LEN_MSB, header_size);
592 
593 	return val;
594 }
595 
596 /* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
597 static u32 ipa_metadata_offset_encode(enum ipa_version version,
598 				      const struct ipa_reg *reg, u32 offset)
599 {
600 	u32 field_max = ipa_reg_field_max(reg, HDR_OFST_METADATA);
601 	u32 val;
602 
603 	/* We know field_max can be used as a mask (2^n - 1) */
604 	val = ipa_reg_encode(reg, HDR_OFST_METADATA, offset);
605 	if (version < IPA_VERSION_4_5) {
606 		WARN_ON(offset > field_max);
607 		return val;
608 	}
609 
610 	/* IPA v4.5 adds a few more most-significant bits */
611 	offset >>= hweight32(field_max);
612 	WARN_ON(offset > ipa_reg_field_max(reg, HDR_OFST_METADATA_MSB));
613 	val |= ipa_reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
614 
615 	return val;
616 }
617 
618 /**
619  * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
620  * @endpoint:	Endpoint pointer
621  *
622  * We program QMAP endpoints so each packet received is preceded by a QMAP
623  * header structure.  The QMAP header contains a 1-byte mux_id and 2-byte
624  * packet size field, and we have the IPA hardware populate both for each
625  * received packet.  The header is configured (in the HDR_EXT register)
626  * to use big endian format.
627  *
628  * The packet size is written into the QMAP header's pkt_len field.  That
629  * location is defined here using the HDR_OFST_PKT_SIZE field.
630  *
631  * The mux_id comes from a 4-byte metadata value supplied with each packet
632  * by the modem.  It is *not* a QMAP header, but it does contain the mux_id
633  * value that we want, in its low-order byte.  A bitmask defined in the
634  * endpoint's METADATA_MASK register defines which byte within the modem
635  * metadata contains the mux_id.  And the OFST_METADATA field programmed
636  * here indicates where the extracted byte should be placed within the QMAP
637  * header.
638  */
639 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
640 {
641 	u32 endpoint_id = endpoint->endpoint_id;
642 	struct ipa *ipa = endpoint->ipa;
643 	const struct ipa_reg *reg;
644 	u32 val = 0;
645 
646 	reg = ipa_reg(ipa, ENDP_INIT_HDR);
647 	if (endpoint->config.qmap) {
648 		enum ipa_version version = ipa->version;
649 		size_t header_size;
650 
651 		header_size = ipa_qmap_header_size(version, endpoint);
652 		val = ipa_header_size_encode(version, reg, header_size);
653 
654 		/* Define how to fill fields in a received QMAP header */
655 		if (!endpoint->toward_ipa) {
656 			u32 off;     /* Field offset within header */
657 
658 			/* Where IPA will write the metadata value */
659 			off = offsetof(struct rmnet_map_header, mux_id);
660 			val |= ipa_metadata_offset_encode(version, reg, off);
661 
662 			/* Where IPA will write the length */
663 			off = offsetof(struct rmnet_map_header, pkt_len);
664 			/* Upper bits are stored in HDR_EXT with IPA v4.5 */
665 			if (version >= IPA_VERSION_4_5)
666 				off &= ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
667 
668 			val |= ipa_reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
669 			val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE, off);
670 		}
671 		/* For QMAP TX, metadata offset is 0 (modem assumes this) */
672 		val |= ipa_reg_bit(reg, HDR_OFST_METADATA_VALID);
673 
674 		/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
675 		/* HDR_A5_MUX is 0 */
676 		/* HDR_LEN_INC_DEAGG_HDR is 0 */
677 		/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
678 	}
679 
680 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
681 }
682 
683 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
684 {
685 	u32 pad_align = endpoint->config.rx.pad_align;
686 	u32 endpoint_id = endpoint->endpoint_id;
687 	struct ipa *ipa = endpoint->ipa;
688 	const struct ipa_reg *reg;
689 	u32 val = 0;
690 
691 	reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
692 	if (endpoint->config.qmap) {
693 		/* We have a header, so we must specify its endianness */
694 		val |= ipa_reg_bit(reg, HDR_ENDIANNESS);	/* big endian */
695 
696 		/* A QMAP header contains a 6 bit pad field at offset 0.
697 		 * The RMNet driver assumes this field is meaningful in
698 		 * packets it receives, and assumes the header's payload
699 		 * length includes that padding.  The RMNet driver does
700 		 * *not* pad packets it sends, however, so the pad field
701 		 * (although 0) should be ignored.
702 		 */
703 		if (!endpoint->toward_ipa) {
704 			val |= ipa_reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
705 			/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
706 			val |= ipa_reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
707 			/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
708 		}
709 	}
710 
711 	/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
712 	if (!endpoint->toward_ipa)
713 		val |= ipa_reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
714 
715 	/* IPA v4.5 adds some most-significant bits to a few fields,
716 	 * two of which are defined in the HDR (not HDR_EXT) register.
717 	 */
718 	if (ipa->version >= IPA_VERSION_4_5) {
719 		/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
720 		if (endpoint->config.qmap && !endpoint->toward_ipa) {
721 			u32 mask = ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
722 			u32 off;     /* Field offset within header */
723 
724 			off = offsetof(struct rmnet_map_header, pkt_len);
725 			/* Low bits are in the ENDP_INIT_HDR register */
726 			off >>= hweight32(mask);
727 			val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
728 			/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
729 		}
730 	}
731 
732 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
733 }
734 
735 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
736 {
737 	u32 endpoint_id = endpoint->endpoint_id;
738 	struct ipa *ipa = endpoint->ipa;
739 	const struct ipa_reg *reg;
740 	u32 val = 0;
741 	u32 offset;
742 
743 	if (endpoint->toward_ipa)
744 		return;		/* Register not valid for TX endpoints */
745 
746 	reg = ipa_reg(ipa,  ENDP_INIT_HDR_METADATA_MASK);
747 	offset = ipa_reg_n_offset(reg, endpoint_id);
748 
749 	/* Note that HDR_ENDIANNESS indicates big endian header fields */
750 	if (endpoint->config.qmap)
751 		val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
752 
753 	iowrite32(val, ipa->reg_virt + offset);
754 }
755 
756 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
757 {
758 	struct ipa *ipa = endpoint->ipa;
759 	const struct ipa_reg *reg;
760 	u32 offset;
761 	u32 val;
762 
763 	if (!endpoint->toward_ipa)
764 		return;		/* Register not valid for RX endpoints */
765 
766 	reg = ipa_reg(ipa, ENDP_INIT_MODE);
767 	if (endpoint->config.dma_mode) {
768 		enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
769 		u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
770 
771 		val = ipa_reg_encode(reg, ENDP_MODE, IPA_DMA);
772 		val |= ipa_reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
773 	} else {
774 		val = ipa_reg_encode(reg, ENDP_MODE, IPA_BASIC);
775 	}
776 	/* All other bits unspecified (and 0) */
777 
778 	offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
779 	iowrite32(val, ipa->reg_virt + offset);
780 }
781 
782 /* For IPA v4.5+, times are expressed using Qtime.  The AP uses one of two
783  * pulse generators (0 and 1) to measure elapsed time.  In ipa_qtime_config()
784  * they're configured to have granularity 100 usec and 1 msec, respectively.
785  *
786  * The return value is the positive or negative Qtime value to use to
787  * express the (microsecond) time provided.  A positive return value
788  * means pulse generator 0 can be used; otherwise use pulse generator 1.
789  */
790 static int ipa_qtime_val(u32 microseconds, u32 max)
791 {
792 	u32 val;
793 
794 	/* Use 100 microsecond granularity if possible */
795 	val = DIV_ROUND_CLOSEST(microseconds, 100);
796 	if (val <= max)
797 		return (int)val;
798 
799 	/* Have to use pulse generator 1 (millisecond granularity) */
800 	val = DIV_ROUND_CLOSEST(microseconds, 1000);
801 	WARN_ON(val > max);
802 
803 	return (int)-val;
804 }
805 
806 /* Encode the aggregation timer limit (microseconds) based on IPA version */
807 static u32 aggr_time_limit_encode(struct ipa *ipa, const struct ipa_reg *reg,
808 				  u32 microseconds)
809 {
810 	u32 max;
811 	u32 val;
812 
813 	if (!microseconds)
814 		return 0;	/* Nothing to compute if time limit is 0 */
815 
816 	max = ipa_reg_field_max(reg, TIME_LIMIT);
817 	if (ipa->version >= IPA_VERSION_4_5) {
818 		u32 gran_sel;
819 		int ret;
820 
821 		/* Compute the Qtime limit value to use */
822 		ret = ipa_qtime_val(microseconds, max);
823 		if (ret < 0) {
824 			val = -ret;
825 			gran_sel = ipa_reg_bit(reg, AGGR_GRAN_SEL);
826 		} else {
827 			val = ret;
828 			gran_sel = 0;
829 		}
830 
831 		return gran_sel | ipa_reg_encode(reg, TIME_LIMIT, val);
832 	}
833 
834 	/* We program aggregation granularity in ipa_hardware_config() */
835 	val = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
836 	WARN(val > max, "aggr_time_limit too large (%u > %u usec)\n",
837 	     microseconds, max * IPA_AGGR_GRANULARITY);
838 
839 	return ipa_reg_encode(reg, TIME_LIMIT, val);
840 }
841 
842 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
843 {
844 	u32 endpoint_id = endpoint->endpoint_id;
845 	struct ipa *ipa = endpoint->ipa;
846 	const struct ipa_reg *reg;
847 	u32 val = 0;
848 
849 	reg = ipa_reg(ipa, ENDP_INIT_AGGR);
850 	if (endpoint->config.aggregation) {
851 		if (!endpoint->toward_ipa) {
852 			const struct ipa_endpoint_rx *rx_config;
853 			u32 buffer_size;
854 			u32 limit;
855 
856 			rx_config = &endpoint->config.rx;
857 			val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
858 			val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
859 
860 			buffer_size = rx_config->buffer_size;
861 			limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
862 						 rx_config->aggr_hard_limit);
863 			val |= ipa_reg_encode(reg, BYTE_LIMIT, limit);
864 
865 			limit = rx_config->aggr_time_limit;
866 			val |= aggr_time_limit_encode(ipa, reg, limit);
867 
868 			/* AGGR_PKT_LIMIT is 0 (unlimited) */
869 
870 			if (rx_config->aggr_close_eof)
871 				val |= ipa_reg_bit(reg, SW_EOF_ACTIVE);
872 		} else {
873 			val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
874 			val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
875 			/* other fields ignored */
876 		}
877 		/* AGGR_FORCE_CLOSE is 0 */
878 		/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
879 	} else {
880 		val |= ipa_reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
881 		/* other fields ignored */
882 	}
883 
884 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
885 }
886 
887 /* The head-of-line blocking timer is defined as a tick count.  For
888  * IPA version 4.5 the tick count is based on the Qtimer, which is
889  * derived from the 19.2 MHz SoC XO clock.  For older IPA versions
890  * each tick represents 128 cycles of the IPA core clock.
891  *
892  * Return the encoded value representing the timeout period provided
893  * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
894  */
895 static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
896 				  u32 microseconds)
897 {
898 	u32 width;
899 	u32 scale;
900 	u64 ticks;
901 	u64 rate;
902 	u32 high;
903 	u32 val;
904 
905 	if (!microseconds)
906 		return 0;	/* Nothing to compute if timer period is 0 */
907 
908 	if (ipa->version >= IPA_VERSION_4_5) {
909 		u32 max = ipa_reg_field_max(reg, TIMER_LIMIT);
910 		u32 gran_sel;
911 		int ret;
912 
913 		/* Compute the Qtime limit value to use */
914 		ret = ipa_qtime_val(microseconds, max);
915 		if (ret < 0) {
916 			val = -ret;
917 			gran_sel = ipa_reg_bit(reg, TIMER_GRAN_SEL);
918 		} else {
919 			val = ret;
920 			gran_sel = 0;
921 		}
922 
923 		return gran_sel | ipa_reg_encode(reg, TIMER_LIMIT, val);
924 	}
925 
926 	/* Use 64 bit arithmetic to avoid overflow */
927 	rate = ipa_core_clock_rate(ipa);
928 	ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
929 
930 	/* We still need the result to fit into the field */
931 	WARN_ON(ticks > ipa_reg_field_max(reg, TIMER_BASE_VALUE));
932 
933 	/* IPA v3.5.1 through v4.1 just record the tick count */
934 	if (ipa->version < IPA_VERSION_4_2)
935 		return ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
936 
937 	/* For IPA v4.2, the tick count is represented by base and
938 	 * scale fields within the 32-bit timer register, where:
939 	 *     ticks = base << scale;
940 	 * The best precision is achieved when the base value is as
941 	 * large as possible.  Find the highest set bit in the tick
942 	 * count, and extract the number of bits in the base field
943 	 * such that high bit is included.
944 	 */
945 	high = fls(ticks);		/* 1..32 (or warning above) */
946 	width = hweight32(ipa_reg_fmask(reg, TIMER_BASE_VALUE));
947 	scale = high > width ? high - width : 0;
948 	if (scale) {
949 		/* If we're scaling, round up to get a closer result */
950 		ticks += 1 << (scale - 1);
951 		/* High bit was set, so rounding might have affected it */
952 		if (fls(ticks) != high)
953 			scale++;
954 	}
955 
956 	val = ipa_reg_encode(reg, TIMER_SCALE, scale);
957 	val |= ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
958 
959 	return val;
960 }
961 
962 /* If microseconds is 0, timeout is immediate */
963 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
964 					      u32 microseconds)
965 {
966 	u32 endpoint_id = endpoint->endpoint_id;
967 	struct ipa *ipa = endpoint->ipa;
968 	const struct ipa_reg *reg;
969 	u32 val;
970 
971 	/* This should only be changed when HOL_BLOCK_EN is disabled */
972 	reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
973 	val = hol_block_timer_encode(ipa, reg, microseconds);
974 
975 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
976 }
977 
978 static void
979 ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
980 {
981 	u32 endpoint_id = endpoint->endpoint_id;
982 	struct ipa *ipa = endpoint->ipa;
983 	const struct ipa_reg *reg;
984 	u32 offset;
985 	u32 val;
986 
987 	reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
988 	offset = ipa_reg_n_offset(reg, endpoint_id);
989 	val = enable ? ipa_reg_bit(reg, HOL_BLOCK_EN) : 0;
990 
991 	iowrite32(val, ipa->reg_virt + offset);
992 
993 	/* When enabling, the register must be written twice for IPA v4.5+ */
994 	if (enable && ipa->version >= IPA_VERSION_4_5)
995 		iowrite32(val, ipa->reg_virt + offset);
996 }
997 
998 /* Assumes HOL_BLOCK is in disabled state */
999 static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
1000 					       u32 microseconds)
1001 {
1002 	ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
1003 	ipa_endpoint_init_hol_block_en(endpoint, true);
1004 }
1005 
1006 static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
1007 {
1008 	ipa_endpoint_init_hol_block_en(endpoint, false);
1009 }
1010 
1011 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
1012 {
1013 	u32 i;
1014 
1015 	for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
1016 		struct ipa_endpoint *endpoint = &ipa->endpoint[i];
1017 
1018 		if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
1019 			continue;
1020 
1021 		ipa_endpoint_init_hol_block_disable(endpoint);
1022 		ipa_endpoint_init_hol_block_enable(endpoint, 0);
1023 	}
1024 }
1025 
1026 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
1027 {
1028 	u32 endpoint_id = endpoint->endpoint_id;
1029 	struct ipa *ipa = endpoint->ipa;
1030 	const struct ipa_reg *reg;
1031 	u32 val = 0;
1032 
1033 	if (!endpoint->toward_ipa)
1034 		return;		/* Register not valid for RX endpoints */
1035 
1036 	reg = ipa_reg(ipa, ENDP_INIT_DEAGGR);
1037 	/* DEAGGR_HDR_LEN is 0 */
1038 	/* PACKET_OFFSET_VALID is 0 */
1039 	/* PACKET_OFFSET_LOCATION is ignored (not valid) */
1040 	/* MAX_PACKET_LEN is 0 (not enforced) */
1041 
1042 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1043 }
1044 
1045 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
1046 {
1047 	u32 resource_group = endpoint->config.resource_group;
1048 	u32 endpoint_id = endpoint->endpoint_id;
1049 	struct ipa *ipa = endpoint->ipa;
1050 	const struct ipa_reg *reg;
1051 	u32 val;
1052 
1053 	reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
1054 	val = ipa_reg_encode(reg, ENDP_RSRC_GRP, resource_group);
1055 
1056 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1057 }
1058 
1059 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
1060 {
1061 	u32 endpoint_id = endpoint->endpoint_id;
1062 	struct ipa *ipa = endpoint->ipa;
1063 	const struct ipa_reg *reg;
1064 	u32 val;
1065 
1066 	if (!endpoint->toward_ipa)
1067 		return;		/* Register not valid for RX endpoints */
1068 
1069 	reg = ipa_reg(ipa, ENDP_INIT_SEQ);
1070 
1071 	/* Low-order byte configures primary packet processing */
1072 	val = ipa_reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
1073 
1074 	/* Second byte (if supported) configures replicated packet processing */
1075 	if (ipa->version < IPA_VERSION_4_5)
1076 		val |= ipa_reg_encode(reg, SEQ_REP_TYPE,
1077 				      endpoint->config.tx.seq_rep_type);
1078 
1079 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1080 }
1081 
1082 /**
1083  * ipa_endpoint_skb_tx() - Transmit a socket buffer
1084  * @endpoint:	Endpoint pointer
1085  * @skb:	Socket buffer to send
1086  *
1087  * Returns:	0 if successful, or a negative error code
1088  */
1089 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
1090 {
1091 	struct gsi_trans *trans;
1092 	u32 nr_frags;
1093 	int ret;
1094 
1095 	/* Make sure source endpoint's TLV FIFO has enough entries to
1096 	 * hold the linear portion of the skb and all its fragments.
1097 	 * If not, see if we can linearize it before giving up.
1098 	 */
1099 	nr_frags = skb_shinfo(skb)->nr_frags;
1100 	if (nr_frags > endpoint->skb_frag_max) {
1101 		if (skb_linearize(skb))
1102 			return -E2BIG;
1103 		nr_frags = 0;
1104 	}
1105 
1106 	trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
1107 	if (!trans)
1108 		return -EBUSY;
1109 
1110 	ret = gsi_trans_skb_add(trans, skb);
1111 	if (ret)
1112 		goto err_trans_free;
1113 	trans->data = skb;	/* transaction owns skb now */
1114 
1115 	gsi_trans_commit(trans, !netdev_xmit_more());
1116 
1117 	return 0;
1118 
1119 err_trans_free:
1120 	gsi_trans_free(trans);
1121 
1122 	return -ENOMEM;
1123 }
1124 
1125 static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
1126 {
1127 	u32 endpoint_id = endpoint->endpoint_id;
1128 	struct ipa *ipa = endpoint->ipa;
1129 	const struct ipa_reg *reg;
1130 	u32 val = 0;
1131 
1132 	reg = ipa_reg(ipa, ENDP_STATUS);
1133 	if (endpoint->config.status_enable) {
1134 		val |= ipa_reg_bit(reg, STATUS_EN);
1135 		if (endpoint->toward_ipa) {
1136 			enum ipa_endpoint_name name;
1137 			u32 status_endpoint_id;
1138 
1139 			name = endpoint->config.tx.status_endpoint;
1140 			status_endpoint_id = ipa->name_map[name]->endpoint_id;
1141 
1142 			val |= ipa_reg_encode(reg, STATUS_ENDP,
1143 					      status_endpoint_id);
1144 		}
1145 		/* STATUS_LOCATION is 0, meaning status element precedes
1146 		 * packet (not present for IPA v4.5+)
1147 		 */
1148 		/* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
1149 	}
1150 
1151 	iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1152 }
1153 
1154 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
1155 				      struct gsi_trans *trans)
1156 {
1157 	struct page *page;
1158 	u32 buffer_size;
1159 	u32 offset;
1160 	u32 len;
1161 	int ret;
1162 
1163 	buffer_size = endpoint->config.rx.buffer_size;
1164 	page = dev_alloc_pages(get_order(buffer_size));
1165 	if (!page)
1166 		return -ENOMEM;
1167 
1168 	/* Offset the buffer to make space for skb headroom */
1169 	offset = NET_SKB_PAD;
1170 	len = buffer_size - offset;
1171 
1172 	ret = gsi_trans_page_add(trans, page, len, offset);
1173 	if (ret)
1174 		put_page(page);
1175 	else
1176 		trans->data = page;	/* transaction owns page now */
1177 
1178 	return ret;
1179 }
1180 
1181 /**
1182  * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1183  * @endpoint:	Endpoint to be replenished
1184  *
1185  * The IPA hardware can hold a fixed number of receive buffers for an RX
1186  * endpoint, based on the number of entries in the underlying channel ring
1187  * buffer.  If an endpoint's "backlog" is non-zero, it indicates how many
1188  * more receive buffers can be supplied to the hardware.  Replenishing for
1189  * an endpoint can be disabled, in which case buffers are not queued to
1190  * the hardware.
1191  */
1192 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
1193 {
1194 	struct gsi_trans *trans;
1195 
1196 	if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
1197 		return;
1198 
1199 	/* Skip it if it's already active */
1200 	if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
1201 		return;
1202 
1203 	while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
1204 		bool doorbell;
1205 
1206 		if (ipa_endpoint_replenish_one(endpoint, trans))
1207 			goto try_again_later;
1208 
1209 
1210 		/* Ring the doorbell if we've got a full batch */
1211 		doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
1212 		gsi_trans_commit(trans, doorbell);
1213 	}
1214 
1215 	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1216 
1217 	return;
1218 
1219 try_again_later:
1220 	gsi_trans_free(trans);
1221 	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1222 
1223 	/* Whenever a receive buffer transaction completes we'll try to
1224 	 * replenish again.  It's unlikely, but if we fail to supply even
1225 	 * one buffer, nothing will trigger another replenish attempt.
1226 	 * If the hardware has no receive buffers queued, schedule work to
1227 	 * try replenishing again.
1228 	 */
1229 	if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1230 		schedule_delayed_work(&endpoint->replenish_work,
1231 				      msecs_to_jiffies(1));
1232 }
1233 
1234 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
1235 {
1236 	set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1237 
1238 	/* Start replenishing if hardware currently has no buffers */
1239 	if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1240 		ipa_endpoint_replenish(endpoint);
1241 }
1242 
1243 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
1244 {
1245 	clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1246 }
1247 
1248 static void ipa_endpoint_replenish_work(struct work_struct *work)
1249 {
1250 	struct delayed_work *dwork = to_delayed_work(work);
1251 	struct ipa_endpoint *endpoint;
1252 
1253 	endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1254 
1255 	ipa_endpoint_replenish(endpoint);
1256 }
1257 
1258 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1259 				  void *data, u32 len, u32 extra)
1260 {
1261 	struct sk_buff *skb;
1262 
1263 	if (!endpoint->netdev)
1264 		return;
1265 
1266 	skb = __dev_alloc_skb(len, GFP_ATOMIC);
1267 	if (skb) {
1268 		/* Copy the data into the socket buffer and receive it */
1269 		skb_put(skb, len);
1270 		memcpy(skb->data, data, len);
1271 		skb->truesize += extra;
1272 	}
1273 
1274 	ipa_modem_skb_rx(endpoint->netdev, skb);
1275 }
1276 
1277 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1278 				   struct page *page, u32 len)
1279 {
1280 	u32 buffer_size = endpoint->config.rx.buffer_size;
1281 	struct sk_buff *skb;
1282 
1283 	/* Nothing to do if there's no netdev */
1284 	if (!endpoint->netdev)
1285 		return false;
1286 
1287 	WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
1288 
1289 	skb = build_skb(page_address(page), buffer_size);
1290 	if (skb) {
1291 		/* Reserve the headroom and account for the data */
1292 		skb_reserve(skb, NET_SKB_PAD);
1293 		skb_put(skb, len);
1294 	}
1295 
1296 	/* Receive the buffer (or record drop if unable to build it) */
1297 	ipa_modem_skb_rx(endpoint->netdev, skb);
1298 
1299 	return skb != NULL;
1300 }
1301 
1302 /* The format of a packet status element is the same for several status
1303  * types (opcodes).  Other types aren't currently supported.
1304  */
1305 static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1306 {
1307 	switch (opcode) {
1308 	case IPA_STATUS_OPCODE_PACKET:
1309 	case IPA_STATUS_OPCODE_DROPPED_PACKET:
1310 	case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1311 	case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1312 		return true;
1313 	default:
1314 		return false;
1315 	}
1316 }
1317 
1318 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
1319 				     const struct ipa_status *status)
1320 {
1321 	u32 endpoint_id;
1322 
1323 	if (!ipa_status_format_packet(status->opcode))
1324 		return true;
1325 	if (!status->pkt_len)
1326 		return true;
1327 	endpoint_id = u8_get_bits(status->endp_dst_idx,
1328 				  IPA_STATUS_DST_IDX_FMASK);
1329 	if (endpoint_id != endpoint->endpoint_id)
1330 		return true;
1331 
1332 	return false;	/* Don't skip this packet, process it */
1333 }
1334 
1335 static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
1336 				    const struct ipa_status *status)
1337 {
1338 	struct ipa_endpoint *command_endpoint;
1339 	struct ipa *ipa = endpoint->ipa;
1340 	u32 endpoint_id;
1341 
1342 	if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
1343 		return false;	/* No valid tag */
1344 
1345 	/* The status contains a valid tag.  We know the packet was sent to
1346 	 * this endpoint (already verified by ipa_endpoint_status_skip()).
1347 	 * If the packet came from the AP->command TX endpoint we know
1348 	 * this packet was sent as part of the pipeline clear process.
1349 	 */
1350 	endpoint_id = u8_get_bits(status->endp_src_idx,
1351 				  IPA_STATUS_SRC_IDX_FMASK);
1352 	command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1353 	if (endpoint_id == command_endpoint->endpoint_id) {
1354 		complete(&ipa->completion);
1355 	} else {
1356 		dev_err(&ipa->pdev->dev,
1357 			"unexpected tagged packet from endpoint %u\n",
1358 			endpoint_id);
1359 	}
1360 
1361 	return true;
1362 }
1363 
1364 /* Return whether the status indicates the packet should be dropped */
1365 static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
1366 				     const struct ipa_status *status)
1367 {
1368 	u32 val;
1369 
1370 	/* If the status indicates a tagged transfer, we'll drop the packet */
1371 	if (ipa_endpoint_status_tag(endpoint, status))
1372 		return true;
1373 
1374 	/* Deaggregation exceptions we drop; all other types we consume */
1375 	if (status->exception)
1376 		return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
1377 
1378 	/* Drop the packet if it fails to match a routing rule; otherwise no */
1379 	val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1380 
1381 	return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1382 }
1383 
1384 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1385 				      struct page *page, u32 total_len)
1386 {
1387 	u32 buffer_size = endpoint->config.rx.buffer_size;
1388 	void *data = page_address(page) + NET_SKB_PAD;
1389 	u32 unused = buffer_size - total_len;
1390 	u32 resid = total_len;
1391 
1392 	while (resid) {
1393 		const struct ipa_status *status = data;
1394 		u32 align;
1395 		u32 len;
1396 
1397 		if (resid < sizeof(*status)) {
1398 			dev_err(&endpoint->ipa->pdev->dev,
1399 				"short message (%u bytes < %zu byte status)\n",
1400 				resid, sizeof(*status));
1401 			break;
1402 		}
1403 
1404 		/* Skip over status packets that lack packet data */
1405 		if (ipa_endpoint_status_skip(endpoint, status)) {
1406 			data += sizeof(*status);
1407 			resid -= sizeof(*status);
1408 			continue;
1409 		}
1410 
1411 		/* Compute the amount of buffer space consumed by the packet,
1412 		 * including the status element.  If the hardware is configured
1413 		 * to pad packet data to an aligned boundary, account for that.
1414 		 * And if checksum offload is enabled a trailer containing
1415 		 * computed checksum information will be appended.
1416 		 */
1417 		align = endpoint->config.rx.pad_align ? : 1;
1418 		len = le16_to_cpu(status->pkt_len);
1419 		len = sizeof(*status) + ALIGN(len, align);
1420 		if (endpoint->config.checksum)
1421 			len += sizeof(struct rmnet_map_dl_csum_trailer);
1422 
1423 		if (!ipa_endpoint_status_drop(endpoint, status)) {
1424 			void *data2;
1425 			u32 extra;
1426 			u32 len2;
1427 
1428 			/* Client receives only packet data (no status) */
1429 			data2 = data + sizeof(*status);
1430 			len2 = le16_to_cpu(status->pkt_len);
1431 
1432 			/* Have the true size reflect the extra unused space in
1433 			 * the original receive buffer.  Distribute the "cost"
1434 			 * proportionately across all aggregated packets in the
1435 			 * buffer.
1436 			 */
1437 			extra = DIV_ROUND_CLOSEST(unused * len, total_len);
1438 			ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1439 		}
1440 
1441 		/* Consume status and the full packet it describes */
1442 		data += len;
1443 		resid -= len;
1444 	}
1445 }
1446 
1447 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1448 				 struct gsi_trans *trans)
1449 {
1450 	struct page *page;
1451 
1452 	if (endpoint->toward_ipa)
1453 		return;
1454 
1455 	if (trans->cancelled)
1456 		goto done;
1457 
1458 	/* Parse or build a socket buffer using the actual received length */
1459 	page = trans->data;
1460 	if (endpoint->config.status_enable)
1461 		ipa_endpoint_status_parse(endpoint, page, trans->len);
1462 	else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1463 		trans->data = NULL;	/* Pages have been consumed */
1464 done:
1465 	ipa_endpoint_replenish(endpoint);
1466 }
1467 
1468 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1469 				struct gsi_trans *trans)
1470 {
1471 	if (endpoint->toward_ipa) {
1472 		struct ipa *ipa = endpoint->ipa;
1473 
1474 		/* Nothing to do for command transactions */
1475 		if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1476 			struct sk_buff *skb = trans->data;
1477 
1478 			if (skb)
1479 				dev_kfree_skb_any(skb);
1480 		}
1481 	} else {
1482 		struct page *page = trans->data;
1483 
1484 		if (page)
1485 			put_page(page);
1486 	}
1487 }
1488 
1489 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1490 {
1491 	const struct ipa_reg *reg;
1492 	u32 val;
1493 
1494 	reg = ipa_reg(ipa, ROUTE);
1495 	/* ROUTE_DIS is 0 */
1496 	val = ipa_reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
1497 	val |= ipa_reg_bit(reg, ROUTE_DEF_HDR_TABLE);
1498 	/* ROUTE_DEF_HDR_OFST is 0 */
1499 	val |= ipa_reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
1500 	val |= ipa_reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
1501 
1502 	iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
1503 }
1504 
1505 void ipa_endpoint_default_route_clear(struct ipa *ipa)
1506 {
1507 	ipa_endpoint_default_route_set(ipa, 0);
1508 }
1509 
1510 /**
1511  * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1512  * @endpoint:	Endpoint to be reset
1513  *
1514  * If aggregation is active on an RX endpoint when a reset is performed
1515  * on its underlying GSI channel, a special sequence of actions must be
1516  * taken to ensure the IPA pipeline is properly cleared.
1517  *
1518  * Return:	0 if successful, or a negative error code
1519  */
1520 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1521 {
1522 	struct device *dev = &endpoint->ipa->pdev->dev;
1523 	struct ipa *ipa = endpoint->ipa;
1524 	struct gsi *gsi = &ipa->gsi;
1525 	bool suspended = false;
1526 	dma_addr_t addr;
1527 	u32 retries;
1528 	u32 len = 1;
1529 	void *virt;
1530 	int ret;
1531 
1532 	virt = kzalloc(len, GFP_KERNEL);
1533 	if (!virt)
1534 		return -ENOMEM;
1535 
1536 	addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1537 	if (dma_mapping_error(dev, addr)) {
1538 		ret = -ENOMEM;
1539 		goto out_kfree;
1540 	}
1541 
1542 	/* Force close aggregation before issuing the reset */
1543 	ipa_endpoint_force_close(endpoint);
1544 
1545 	/* Reset and reconfigure the channel with the doorbell engine
1546 	 * disabled.  Then poll until we know aggregation is no longer
1547 	 * active.  We'll re-enable the doorbell (if appropriate) when
1548 	 * we reset again below.
1549 	 */
1550 	gsi_channel_reset(gsi, endpoint->channel_id, false);
1551 
1552 	/* Make sure the channel isn't suspended */
1553 	suspended = ipa_endpoint_program_suspend(endpoint, false);
1554 
1555 	/* Start channel and do a 1 byte read */
1556 	ret = gsi_channel_start(gsi, endpoint->channel_id);
1557 	if (ret)
1558 		goto out_suspend_again;
1559 
1560 	ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1561 	if (ret)
1562 		goto err_endpoint_stop;
1563 
1564 	/* Wait for aggregation to be closed on the channel */
1565 	retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1566 	do {
1567 		if (!ipa_endpoint_aggr_active(endpoint))
1568 			break;
1569 		usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1570 	} while (retries--);
1571 
1572 	/* Check one last time */
1573 	if (ipa_endpoint_aggr_active(endpoint))
1574 		dev_err(dev, "endpoint %u still active during reset\n",
1575 			endpoint->endpoint_id);
1576 
1577 	gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1578 
1579 	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1580 	if (ret)
1581 		goto out_suspend_again;
1582 
1583 	/* Finally, reset and reconfigure the channel again (re-enabling
1584 	 * the doorbell engine if appropriate).  Sleep for 1 millisecond to
1585 	 * complete the channel reset sequence.  Finish by suspending the
1586 	 * channel again (if necessary).
1587 	 */
1588 	gsi_channel_reset(gsi, endpoint->channel_id, true);
1589 
1590 	usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1591 
1592 	goto out_suspend_again;
1593 
1594 err_endpoint_stop:
1595 	(void)gsi_channel_stop(gsi, endpoint->channel_id);
1596 out_suspend_again:
1597 	if (suspended)
1598 		(void)ipa_endpoint_program_suspend(endpoint, true);
1599 	dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1600 out_kfree:
1601 	kfree(virt);
1602 
1603 	return ret;
1604 }
1605 
1606 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1607 {
1608 	u32 channel_id = endpoint->channel_id;
1609 	struct ipa *ipa = endpoint->ipa;
1610 	bool special;
1611 	int ret = 0;
1612 
1613 	/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1614 	 * is active, we need to handle things specially to recover.
1615 	 * All other cases just need to reset the underlying GSI channel.
1616 	 */
1617 	special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1618 			endpoint->config.aggregation;
1619 	if (special && ipa_endpoint_aggr_active(endpoint))
1620 		ret = ipa_endpoint_reset_rx_aggr(endpoint);
1621 	else
1622 		gsi_channel_reset(&ipa->gsi, channel_id, true);
1623 
1624 	if (ret)
1625 		dev_err(&ipa->pdev->dev,
1626 			"error %d resetting channel %u for endpoint %u\n",
1627 			ret, endpoint->channel_id, endpoint->endpoint_id);
1628 }
1629 
1630 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1631 {
1632 	if (endpoint->toward_ipa) {
1633 		/* Newer versions of IPA use GSI channel flow control
1634 		 * instead of endpoint DELAY mode to prevent sending data.
1635 		 * Flow control is disabled for newly-allocated channels,
1636 		 * and we can assume flow control is not (ever) enabled
1637 		 * for AP TX channels.
1638 		 */
1639 		if (endpoint->ipa->version < IPA_VERSION_4_2)
1640 			ipa_endpoint_program_delay(endpoint, false);
1641 	} else {
1642 		/* Ensure suspend mode is off on all AP RX endpoints */
1643 		(void)ipa_endpoint_program_suspend(endpoint, false);
1644 	}
1645 	ipa_endpoint_init_cfg(endpoint);
1646 	ipa_endpoint_init_nat(endpoint);
1647 	ipa_endpoint_init_hdr(endpoint);
1648 	ipa_endpoint_init_hdr_ext(endpoint);
1649 	ipa_endpoint_init_hdr_metadata_mask(endpoint);
1650 	ipa_endpoint_init_mode(endpoint);
1651 	ipa_endpoint_init_aggr(endpoint);
1652 	if (!endpoint->toward_ipa) {
1653 		if (endpoint->config.rx.holb_drop)
1654 			ipa_endpoint_init_hol_block_enable(endpoint, 0);
1655 		else
1656 			ipa_endpoint_init_hol_block_disable(endpoint);
1657 	}
1658 	ipa_endpoint_init_deaggr(endpoint);
1659 	ipa_endpoint_init_rsrc_grp(endpoint);
1660 	ipa_endpoint_init_seq(endpoint);
1661 	ipa_endpoint_status(endpoint);
1662 }
1663 
1664 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1665 {
1666 	struct ipa *ipa = endpoint->ipa;
1667 	struct gsi *gsi = &ipa->gsi;
1668 	int ret;
1669 
1670 	ret = gsi_channel_start(gsi, endpoint->channel_id);
1671 	if (ret) {
1672 		dev_err(&ipa->pdev->dev,
1673 			"error %d starting %cX channel %u for endpoint %u\n",
1674 			ret, endpoint->toward_ipa ? 'T' : 'R',
1675 			endpoint->channel_id, endpoint->endpoint_id);
1676 		return ret;
1677 	}
1678 
1679 	if (!endpoint->toward_ipa) {
1680 		ipa_interrupt_suspend_enable(ipa->interrupt,
1681 					     endpoint->endpoint_id);
1682 		ipa_endpoint_replenish_enable(endpoint);
1683 	}
1684 
1685 	ipa->enabled |= BIT(endpoint->endpoint_id);
1686 
1687 	return 0;
1688 }
1689 
1690 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1691 {
1692 	u32 mask = BIT(endpoint->endpoint_id);
1693 	struct ipa *ipa = endpoint->ipa;
1694 	struct gsi *gsi = &ipa->gsi;
1695 	int ret;
1696 
1697 	if (!(ipa->enabled & mask))
1698 		return;
1699 
1700 	ipa->enabled ^= mask;
1701 
1702 	if (!endpoint->toward_ipa) {
1703 		ipa_endpoint_replenish_disable(endpoint);
1704 		ipa_interrupt_suspend_disable(ipa->interrupt,
1705 					      endpoint->endpoint_id);
1706 	}
1707 
1708 	/* Note that if stop fails, the channel's state is not well-defined */
1709 	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1710 	if (ret)
1711 		dev_err(&ipa->pdev->dev,
1712 			"error %d attempting to stop endpoint %u\n", ret,
1713 			endpoint->endpoint_id);
1714 }
1715 
1716 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1717 {
1718 	struct device *dev = &endpoint->ipa->pdev->dev;
1719 	struct gsi *gsi = &endpoint->ipa->gsi;
1720 	int ret;
1721 
1722 	if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1723 		return;
1724 
1725 	if (!endpoint->toward_ipa) {
1726 		ipa_endpoint_replenish_disable(endpoint);
1727 		(void)ipa_endpoint_program_suspend(endpoint, true);
1728 	}
1729 
1730 	ret = gsi_channel_suspend(gsi, endpoint->channel_id);
1731 	if (ret)
1732 		dev_err(dev, "error %d suspending channel %u\n", ret,
1733 			endpoint->channel_id);
1734 }
1735 
1736 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1737 {
1738 	struct device *dev = &endpoint->ipa->pdev->dev;
1739 	struct gsi *gsi = &endpoint->ipa->gsi;
1740 	int ret;
1741 
1742 	if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1743 		return;
1744 
1745 	if (!endpoint->toward_ipa)
1746 		(void)ipa_endpoint_program_suspend(endpoint, false);
1747 
1748 	ret = gsi_channel_resume(gsi, endpoint->channel_id);
1749 	if (ret)
1750 		dev_err(dev, "error %d resuming channel %u\n", ret,
1751 			endpoint->channel_id);
1752 	else if (!endpoint->toward_ipa)
1753 		ipa_endpoint_replenish_enable(endpoint);
1754 }
1755 
1756 void ipa_endpoint_suspend(struct ipa *ipa)
1757 {
1758 	if (!ipa->setup_complete)
1759 		return;
1760 
1761 	if (ipa->modem_netdev)
1762 		ipa_modem_suspend(ipa->modem_netdev);
1763 
1764 	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1765 	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1766 }
1767 
1768 void ipa_endpoint_resume(struct ipa *ipa)
1769 {
1770 	if (!ipa->setup_complete)
1771 		return;
1772 
1773 	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1774 	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1775 
1776 	if (ipa->modem_netdev)
1777 		ipa_modem_resume(ipa->modem_netdev);
1778 }
1779 
1780 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1781 {
1782 	struct gsi *gsi = &endpoint->ipa->gsi;
1783 	u32 channel_id = endpoint->channel_id;
1784 
1785 	/* Only AP endpoints get set up */
1786 	if (endpoint->ee_id != GSI_EE_AP)
1787 		return;
1788 
1789 	endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
1790 	if (!endpoint->toward_ipa) {
1791 		/* RX transactions require a single TRE, so the maximum
1792 		 * backlog is the same as the maximum outstanding TREs.
1793 		 */
1794 		clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1795 		clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1796 		INIT_DELAYED_WORK(&endpoint->replenish_work,
1797 				  ipa_endpoint_replenish_work);
1798 	}
1799 
1800 	ipa_endpoint_program(endpoint);
1801 
1802 	endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1803 }
1804 
1805 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1806 {
1807 	endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1808 
1809 	if (!endpoint->toward_ipa)
1810 		cancel_delayed_work_sync(&endpoint->replenish_work);
1811 
1812 	ipa_endpoint_reset(endpoint);
1813 }
1814 
1815 void ipa_endpoint_setup(struct ipa *ipa)
1816 {
1817 	u32 initialized = ipa->initialized;
1818 
1819 	ipa->set_up = 0;
1820 	while (initialized) {
1821 		u32 endpoint_id = __ffs(initialized);
1822 
1823 		initialized ^= BIT(endpoint_id);
1824 
1825 		ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1826 	}
1827 }
1828 
1829 void ipa_endpoint_teardown(struct ipa *ipa)
1830 {
1831 	u32 set_up = ipa->set_up;
1832 
1833 	while (set_up) {
1834 		u32 endpoint_id = __fls(set_up);
1835 
1836 		set_up ^= BIT(endpoint_id);
1837 
1838 		ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1839 	}
1840 	ipa->set_up = 0;
1841 }
1842 
1843 int ipa_endpoint_config(struct ipa *ipa)
1844 {
1845 	struct device *dev = &ipa->pdev->dev;
1846 	const struct ipa_reg *reg;
1847 	u32 initialized;
1848 	u32 rx_base;
1849 	u32 rx_mask;
1850 	u32 tx_mask;
1851 	int ret = 0;
1852 	u32 max;
1853 	u32 val;
1854 
1855 	/* Prior to IPAv3.5, the FLAVOR_0 register was not supported.
1856 	 * Furthermore, the endpoints were not grouped such that TX
1857 	 * endpoint numbers started with 0 and RX endpoints had numbers
1858 	 * higher than all TX endpoints, so we can't do the simple
1859 	 * direction check used for newer hardware below.
1860 	 *
1861 	 * For hardware that doesn't support the FLAVOR_0 register,
1862 	 * just set the available mask to support any endpoint, and
1863 	 * assume the configuration is valid.
1864 	 */
1865 	if (ipa->version < IPA_VERSION_3_5) {
1866 		ipa->available = ~0;
1867 		return 0;
1868 	}
1869 
1870 	/* Find out about the endpoints supplied by the hardware, and ensure
1871 	 * the highest one doesn't exceed the number we support.
1872 	 */
1873 	reg = ipa_reg(ipa, FLAVOR_0);
1874 	val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
1875 
1876 	/* Our RX is an IPA producer */
1877 	rx_base = ipa_reg_decode(reg, PROD_LOWEST, val);
1878 	max = rx_base + ipa_reg_decode(reg, MAX_PROD_PIPES, val);
1879 	if (max > IPA_ENDPOINT_MAX) {
1880 		dev_err(dev, "too many endpoints (%u > %u)\n",
1881 			max, IPA_ENDPOINT_MAX);
1882 		return -EINVAL;
1883 	}
1884 	rx_mask = GENMASK(max - 1, rx_base);
1885 
1886 	/* Our TX is an IPA consumer */
1887 	max = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
1888 	tx_mask = GENMASK(max - 1, 0);
1889 
1890 	ipa->available = rx_mask | tx_mask;
1891 
1892 	/* Check for initialized endpoints not supported by the hardware */
1893 	if (ipa->initialized & ~ipa->available) {
1894 		dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1895 			ipa->initialized & ~ipa->available);
1896 		ret = -EINVAL;		/* Report other errors too */
1897 	}
1898 
1899 	initialized = ipa->initialized;
1900 	while (initialized) {
1901 		u32 endpoint_id = __ffs(initialized);
1902 		struct ipa_endpoint *endpoint;
1903 
1904 		initialized ^= BIT(endpoint_id);
1905 
1906 		/* Make sure it's pointing in the right direction */
1907 		endpoint = &ipa->endpoint[endpoint_id];
1908 		if ((endpoint_id < rx_base) != endpoint->toward_ipa) {
1909 			dev_err(dev, "endpoint id %u wrong direction\n",
1910 				endpoint_id);
1911 			ret = -EINVAL;
1912 		}
1913 	}
1914 
1915 	return ret;
1916 }
1917 
1918 void ipa_endpoint_deconfig(struct ipa *ipa)
1919 {
1920 	ipa->available = 0;	/* Nothing more to do */
1921 }
1922 
1923 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1924 				  const struct ipa_gsi_endpoint_data *data)
1925 {
1926 	struct ipa_endpoint *endpoint;
1927 
1928 	endpoint = &ipa->endpoint[data->endpoint_id];
1929 
1930 	if (data->ee_id == GSI_EE_AP)
1931 		ipa->channel_map[data->channel_id] = endpoint;
1932 	ipa->name_map[name] = endpoint;
1933 
1934 	endpoint->ipa = ipa;
1935 	endpoint->ee_id = data->ee_id;
1936 	endpoint->channel_id = data->channel_id;
1937 	endpoint->endpoint_id = data->endpoint_id;
1938 	endpoint->toward_ipa = data->toward_ipa;
1939 	endpoint->config = data->endpoint.config;
1940 
1941 	ipa->initialized |= BIT(endpoint->endpoint_id);
1942 }
1943 
1944 static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1945 {
1946 	endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1947 
1948 	memset(endpoint, 0, sizeof(*endpoint));
1949 }
1950 
1951 void ipa_endpoint_exit(struct ipa *ipa)
1952 {
1953 	u32 initialized = ipa->initialized;
1954 
1955 	while (initialized) {
1956 		u32 endpoint_id = __fls(initialized);
1957 
1958 		initialized ^= BIT(endpoint_id);
1959 
1960 		ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1961 	}
1962 	memset(ipa->name_map, 0, sizeof(ipa->name_map));
1963 	memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1964 }
1965 
1966 /* Returns a bitmask of endpoints that support filtering, or 0 on error */
1967 u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
1968 		      const struct ipa_gsi_endpoint_data *data)
1969 {
1970 	enum ipa_endpoint_name name;
1971 	u32 filter_map;
1972 
1973 	BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
1974 
1975 	if (!ipa_endpoint_data_valid(ipa, count, data))
1976 		return 0;	/* Error */
1977 
1978 	ipa->initialized = 0;
1979 
1980 	filter_map = 0;
1981 	for (name = 0; name < count; name++, data++) {
1982 		if (ipa_gsi_endpoint_data_empty(data))
1983 			continue;	/* Skip over empty slots */
1984 
1985 		ipa_endpoint_init_one(ipa, name, data);
1986 
1987 		if (data->endpoint.filter_support)
1988 			filter_map |= BIT(data->endpoint_id);
1989 		if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
1990 			ipa->modem_tx_count++;
1991 	}
1992 
1993 	if (!ipa_filter_map_valid(ipa, filter_map))
1994 		goto err_endpoint_exit;
1995 
1996 	return filter_map;	/* Non-zero bitmask */
1997 
1998 err_endpoint_exit:
1999 	ipa_endpoint_exit(ipa);
2000 
2001 	return 0;	/* Error */
2002 }
2003