xref: /linux/drivers/net/ipa/ipa_endpoint.c (revision 6387bf7c390a17a03f05a70099e135f61c7cb437)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2019-2021 Linaro Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 #include <linux/bitfield.h>
11 #include <linux/if_rmnet.h>
12 #include <linux/dma-direction.h>
13 
14 #include "gsi.h"
15 #include "gsi_trans.h"
16 #include "ipa.h"
17 #include "ipa_data.h"
18 #include "ipa_endpoint.h"
19 #include "ipa_cmd.h"
20 #include "ipa_mem.h"
21 #include "ipa_modem.h"
22 #include "ipa_table.h"
23 #include "ipa_gsi.h"
24 #include "ipa_power.h"
25 
26 #define atomic_dec_not_zero(v)	atomic_add_unless((v), -1, 0)
27 
28 /* Hardware is told about receive buffers once a "batch" has been queued */
29 #define IPA_REPLENISH_BATCH	16		/* Must be non-zero */
30 
31 /* The amount of RX buffer space consumed by standard skb overhead */
32 #define IPA_RX_BUFFER_OVERHEAD	(PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
33 
34 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
35 #define IPA_ENDPOINT_QMAP_METADATA_MASK		0x000000ff /* host byte order */
36 
37 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX	3
38 
39 /** enum ipa_status_opcode - status element opcode hardware values */
40 enum ipa_status_opcode {
41 	IPA_STATUS_OPCODE_PACKET		= 0x01,
42 	IPA_STATUS_OPCODE_DROPPED_PACKET	= 0x04,
43 	IPA_STATUS_OPCODE_SUSPENDED_PACKET	= 0x08,
44 	IPA_STATUS_OPCODE_PACKET_2ND_PASS	= 0x40,
45 };
46 
47 /** enum ipa_status_exception - status element exception type */
48 enum ipa_status_exception {
49 	/* 0 means no exception */
50 	IPA_STATUS_EXCEPTION_DEAGGR		= 0x01,
51 };
52 
53 /* Status element provided by hardware */
54 struct ipa_status {
55 	u8 opcode;		/* enum ipa_status_opcode */
56 	u8 exception;		/* enum ipa_status_exception */
57 	__le16 mask;
58 	__le16 pkt_len;
59 	u8 endp_src_idx;
60 	u8 endp_dst_idx;
61 	__le32 metadata;
62 	__le32 flags1;
63 	__le64 flags2;
64 	__le32 flags3;
65 	__le32 flags4;
66 };
67 
68 /* Field masks for struct ipa_status structure fields */
69 #define IPA_STATUS_MASK_TAG_VALID_FMASK		GENMASK(4, 4)
70 #define IPA_STATUS_SRC_IDX_FMASK		GENMASK(4, 0)
71 #define IPA_STATUS_DST_IDX_FMASK		GENMASK(4, 0)
72 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK	GENMASK(31, 22)
73 #define IPA_STATUS_FLAGS2_TAG_FMASK		GENMASK_ULL(63, 16)
74 
75 static u32 aggr_byte_limit_max(enum ipa_version version)
76 {
77 	if (version < IPA_VERSION_4_5)
78 		return field_max(aggr_byte_limit_fmask(true));
79 
80 	return field_max(aggr_byte_limit_fmask(false));
81 }
82 
83 /* Compute the aggregation size value to use for a given buffer size */
84 static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
85 {
86 	/* A hard aggregation limit will not be crossed; aggregation closes
87 	 * if saving incoming data would cross the hard byte limit boundary.
88 	 *
89 	 * With a soft limit, aggregation closes *after* the size boundary
90 	 * has been crossed.  In that case the limit must leave enough space
91 	 * after that limit to receive a full MTU of data plus overhead.
92 	 */
93 	if (!aggr_hard_limit)
94 		rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
95 
96 	/* The byte limit is encoded as a number of kilobytes */
97 
98 	return rx_buffer_size / SZ_1K;
99 }
100 
101 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
102 			    const struct ipa_gsi_endpoint_data *all_data,
103 			    const struct ipa_gsi_endpoint_data *data)
104 {
105 	const struct ipa_gsi_endpoint_data *other_data;
106 	struct device *dev = &ipa->pdev->dev;
107 	enum ipa_endpoint_name other_name;
108 
109 	if (ipa_gsi_endpoint_data_empty(data))
110 		return true;
111 
112 	if (!data->toward_ipa) {
113 		const struct ipa_endpoint_rx *rx_config;
114 		u32 buffer_size;
115 		u32 aggr_size;
116 		u32 limit;
117 
118 		if (data->endpoint.filter_support) {
119 			dev_err(dev, "filtering not supported for "
120 					"RX endpoint %u\n",
121 				data->endpoint_id);
122 			return false;
123 		}
124 
125 		/* Nothing more to check for non-AP RX */
126 		if (data->ee_id != GSI_EE_AP)
127 			return true;
128 
129 		rx_config = &data->endpoint.config.rx;
130 
131 		/* The buffer size must hold an MTU plus overhead */
132 		buffer_size = rx_config->buffer_size;
133 		limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
134 		if (buffer_size < limit) {
135 			dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
136 				data->endpoint_id, buffer_size, limit);
137 			return false;
138 		}
139 
140 		if (!data->endpoint.config.aggregation) {
141 			bool result = true;
142 
143 			/* No aggregation; check for bogus aggregation data */
144 			if (rx_config->aggr_time_limit) {
145 				dev_err(dev,
146 					"time limit with no aggregation for RX endpoint %u\n",
147 					data->endpoint_id);
148 				result = false;
149 			}
150 
151 			if (rx_config->aggr_hard_limit) {
152 				dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
153 					data->endpoint_id);
154 				result = false;
155 			}
156 
157 			if (rx_config->aggr_close_eof) {
158 				dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
159 					data->endpoint_id);
160 				result = false;
161 			}
162 
163 			return result;	/* Nothing more to check */
164 		}
165 
166 		/* For an endpoint supporting receive aggregation, the byte
167 		 * limit defines the point at which aggregation closes.  This
168 		 * check ensures the receive buffer size doesn't result in a
169 		 * limit that exceeds what's representable in the aggregation
170 		 * byte limit field.
171 		 */
172 		aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
173 					     rx_config->aggr_hard_limit);
174 		limit = aggr_byte_limit_max(ipa->version);
175 		if (aggr_size > limit) {
176 			dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
177 				data->endpoint_id, aggr_size, limit);
178 
179 			return false;
180 		}
181 
182 		return true;	/* Nothing more to check for RX */
183 	}
184 
185 	/* Starting with IPA v4.5 sequencer replication is obsolete */
186 	if (ipa->version >= IPA_VERSION_4_5) {
187 		if (data->endpoint.config.tx.seq_rep_type) {
188 			dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n",
189 				data->endpoint_id);
190 			return false;
191 		}
192 	}
193 
194 	if (data->endpoint.config.status_enable) {
195 		other_name = data->endpoint.config.tx.status_endpoint;
196 		if (other_name >= count) {
197 			dev_err(dev, "status endpoint name %u out of range "
198 					"for endpoint %u\n",
199 				other_name, data->endpoint_id);
200 			return false;
201 		}
202 
203 		/* Status endpoint must be defined... */
204 		other_data = &all_data[other_name];
205 		if (ipa_gsi_endpoint_data_empty(other_data)) {
206 			dev_err(dev, "DMA endpoint name %u undefined "
207 					"for endpoint %u\n",
208 				other_name, data->endpoint_id);
209 			return false;
210 		}
211 
212 		/* ...and has to be an RX endpoint... */
213 		if (other_data->toward_ipa) {
214 			dev_err(dev,
215 				"status endpoint for endpoint %u not RX\n",
216 				data->endpoint_id);
217 			return false;
218 		}
219 
220 		/* ...and if it's to be an AP endpoint... */
221 		if (other_data->ee_id == GSI_EE_AP) {
222 			/* ...make sure it has status enabled. */
223 			if (!other_data->endpoint.config.status_enable) {
224 				dev_err(dev,
225 					"status not enabled for endpoint %u\n",
226 					other_data->endpoint_id);
227 				return false;
228 			}
229 		}
230 	}
231 
232 	if (data->endpoint.config.dma_mode) {
233 		other_name = data->endpoint.config.dma_endpoint;
234 		if (other_name >= count) {
235 			dev_err(dev, "DMA endpoint name %u out of range "
236 					"for endpoint %u\n",
237 				other_name, data->endpoint_id);
238 			return false;
239 		}
240 
241 		other_data = &all_data[other_name];
242 		if (ipa_gsi_endpoint_data_empty(other_data)) {
243 			dev_err(dev, "DMA endpoint name %u undefined "
244 					"for endpoint %u\n",
245 				other_name, data->endpoint_id);
246 			return false;
247 		}
248 	}
249 
250 	return true;
251 }
252 
253 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
254 				    const struct ipa_gsi_endpoint_data *data)
255 {
256 	const struct ipa_gsi_endpoint_data *dp = data;
257 	struct device *dev = &ipa->pdev->dev;
258 	enum ipa_endpoint_name name;
259 
260 	if (count > IPA_ENDPOINT_COUNT) {
261 		dev_err(dev, "too many endpoints specified (%u > %u)\n",
262 			count, IPA_ENDPOINT_COUNT);
263 		return false;
264 	}
265 
266 	/* Make sure needed endpoints have defined data */
267 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
268 		dev_err(dev, "command TX endpoint not defined\n");
269 		return false;
270 	}
271 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
272 		dev_err(dev, "LAN RX endpoint not defined\n");
273 		return false;
274 	}
275 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
276 		dev_err(dev, "AP->modem TX endpoint not defined\n");
277 		return false;
278 	}
279 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
280 		dev_err(dev, "AP<-modem RX endpoint not defined\n");
281 		return false;
282 	}
283 
284 	for (name = 0; name < count; name++, dp++)
285 		if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
286 			return false;
287 
288 	return true;
289 }
290 
291 /* Allocate a transaction to use on a non-command endpoint */
292 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
293 						  u32 tre_count)
294 {
295 	struct gsi *gsi = &endpoint->ipa->gsi;
296 	u32 channel_id = endpoint->channel_id;
297 	enum dma_data_direction direction;
298 
299 	direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
300 
301 	return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
302 }
303 
304 /* suspend_delay represents suspend for RX, delay for TX endpoints.
305  * Note that suspend is not supported starting with IPA v4.0, and
306  * delay mode should not be used starting with IPA v4.2.
307  */
308 static bool
309 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
310 {
311 	u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
312 	struct ipa *ipa = endpoint->ipa;
313 	bool state;
314 	u32 mask;
315 	u32 val;
316 
317 	if (endpoint->toward_ipa)
318 		WARN_ON(ipa->version >= IPA_VERSION_4_2);
319 	else
320 		WARN_ON(ipa->version >= IPA_VERSION_4_0);
321 
322 	mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
323 
324 	val = ioread32(ipa->reg_virt + offset);
325 	state = !!(val & mask);
326 
327 	/* Don't bother if it's already in the requested state */
328 	if (suspend_delay != state) {
329 		val ^= mask;
330 		iowrite32(val, ipa->reg_virt + offset);
331 	}
332 
333 	return state;
334 }
335 
336 /* We don't care what the previous state was for delay mode */
337 static void
338 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
339 {
340 	/* Delay mode should not be used for IPA v4.2+ */
341 	WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
342 	WARN_ON(!endpoint->toward_ipa);
343 
344 	(void)ipa_endpoint_init_ctrl(endpoint, enable);
345 }
346 
347 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
348 {
349 	u32 mask = BIT(endpoint->endpoint_id);
350 	struct ipa *ipa = endpoint->ipa;
351 	u32 offset;
352 	u32 val;
353 
354 	WARN_ON(!(mask & ipa->available));
355 
356 	offset = ipa_reg_state_aggr_active_offset(ipa->version);
357 	val = ioread32(ipa->reg_virt + offset);
358 
359 	return !!(val & mask);
360 }
361 
362 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
363 {
364 	u32 mask = BIT(endpoint->endpoint_id);
365 	struct ipa *ipa = endpoint->ipa;
366 
367 	WARN_ON(!(mask & ipa->available));
368 
369 	iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
370 }
371 
372 /**
373  * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
374  * @endpoint:	Endpoint on which to emulate a suspend
375  *
376  *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
377  *  with an open aggregation frame.  This is to work around a hardware
378  *  issue in IPA version 3.5.1 where the suspend interrupt will not be
379  *  generated when it should be.
380  */
381 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
382 {
383 	struct ipa *ipa = endpoint->ipa;
384 
385 	if (!endpoint->config.aggregation)
386 		return;
387 
388 	/* Nothing to do if the endpoint doesn't have aggregation open */
389 	if (!ipa_endpoint_aggr_active(endpoint))
390 		return;
391 
392 	/* Force close aggregation */
393 	ipa_endpoint_force_close(endpoint);
394 
395 	ipa_interrupt_simulate_suspend(ipa->interrupt);
396 }
397 
398 /* Returns previous suspend state (true means suspend was enabled) */
399 static bool
400 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
401 {
402 	bool suspended;
403 
404 	if (endpoint->ipa->version >= IPA_VERSION_4_0)
405 		return enable;	/* For IPA v4.0+, no change made */
406 
407 	WARN_ON(endpoint->toward_ipa);
408 
409 	suspended = ipa_endpoint_init_ctrl(endpoint, enable);
410 
411 	/* A client suspended with an open aggregation frame will not
412 	 * generate a SUSPEND IPA interrupt.  If enabling suspend, have
413 	 * ipa_endpoint_suspend_aggr() handle this.
414 	 */
415 	if (enable && !suspended)
416 		ipa_endpoint_suspend_aggr(endpoint);
417 
418 	return suspended;
419 }
420 
421 /* Put all modem RX endpoints into suspend mode, and stop transmission
422  * on all modem TX endpoints.  Prior to IPA v4.2, endpoint DELAY mode is
423  * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
424  * control instead.
425  */
426 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
427 {
428 	u32 endpoint_id;
429 
430 	for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
431 		struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
432 
433 		if (endpoint->ee_id != GSI_EE_MODEM)
434 			continue;
435 
436 		if (!endpoint->toward_ipa)
437 			(void)ipa_endpoint_program_suspend(endpoint, enable);
438 		else if (ipa->version < IPA_VERSION_4_2)
439 			ipa_endpoint_program_delay(endpoint, enable);
440 		else
441 			gsi_modem_channel_flow_control(&ipa->gsi,
442 						       endpoint->channel_id,
443 						       enable);
444 	}
445 }
446 
447 /* Reset all modem endpoints to use the default exception endpoint */
448 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
449 {
450 	u32 initialized = ipa->initialized;
451 	struct gsi_trans *trans;
452 	u32 count;
453 
454 	/* We need one command per modem TX endpoint, plus the commands
455 	 * that clear the pipeline.
456 	 */
457 	count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
458 	trans = ipa_cmd_trans_alloc(ipa, count);
459 	if (!trans) {
460 		dev_err(&ipa->pdev->dev,
461 			"no transaction to reset modem exception endpoints\n");
462 		return -EBUSY;
463 	}
464 
465 	while (initialized) {
466 		u32 endpoint_id = __ffs(initialized);
467 		struct ipa_endpoint *endpoint;
468 		u32 offset;
469 
470 		initialized ^= BIT(endpoint_id);
471 
472 		/* We only reset modem TX endpoints */
473 		endpoint = &ipa->endpoint[endpoint_id];
474 		if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
475 			continue;
476 
477 		offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
478 
479 		/* Value written is 0, and all bits are updated.  That
480 		 * means status is disabled on the endpoint, and as a
481 		 * result all other fields in the register are ignored.
482 		 */
483 		ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
484 	}
485 
486 	ipa_cmd_pipeline_clear_add(trans);
487 
488 	gsi_trans_commit_wait(trans);
489 
490 	ipa_cmd_pipeline_clear_wait(ipa);
491 
492 	return 0;
493 }
494 
495 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
496 {
497 	u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
498 	enum ipa_cs_offload_en enabled;
499 	u32 val = 0;
500 
501 	/* FRAG_OFFLOAD_EN is 0 */
502 	if (endpoint->config.checksum) {
503 		enum ipa_version version = endpoint->ipa->version;
504 
505 		if (endpoint->toward_ipa) {
506 			u32 off;
507 
508 			/* Checksum header offset is in 4-byte units */
509 			off = sizeof(struct rmnet_map_header);
510 			off /= sizeof(u32);
511 			val |= u32_encode_bits(off,
512 					       CS_METADATA_HDR_OFFSET_FMASK);
513 
514 			enabled = version < IPA_VERSION_4_5
515 					? IPA_CS_OFFLOAD_UL
516 					: IPA_CS_OFFLOAD_INLINE;
517 		} else {
518 			enabled = version < IPA_VERSION_4_5
519 					? IPA_CS_OFFLOAD_DL
520 					: IPA_CS_OFFLOAD_INLINE;
521 		}
522 	} else {
523 		enabled = IPA_CS_OFFLOAD_NONE;
524 	}
525 	val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK);
526 	/* CS_GEN_QMB_MASTER_SEL is 0 */
527 
528 	iowrite32(val, endpoint->ipa->reg_virt + offset);
529 }
530 
531 static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
532 {
533 	u32 offset;
534 	u32 val;
535 
536 	if (!endpoint->toward_ipa)
537 		return;
538 
539 	offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id);
540 	val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK);
541 
542 	iowrite32(val, endpoint->ipa->reg_virt + offset);
543 }
544 
545 static u32
546 ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
547 {
548 	u32 header_size = sizeof(struct rmnet_map_header);
549 
550 	/* Without checksum offload, we just have the MAP header */
551 	if (!endpoint->config.checksum)
552 		return header_size;
553 
554 	if (version < IPA_VERSION_4_5) {
555 		/* Checksum header inserted for AP TX endpoints only */
556 		if (endpoint->toward_ipa)
557 			header_size += sizeof(struct rmnet_map_ul_csum_header);
558 	} else {
559 		/* Checksum header is used in both directions */
560 		header_size += sizeof(struct rmnet_map_v5_csum_header);
561 	}
562 
563 	return header_size;
564 }
565 
566 /**
567  * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
568  * @endpoint:	Endpoint pointer
569  *
570  * We program QMAP endpoints so each packet received is preceded by a QMAP
571  * header structure.  The QMAP header contains a 1-byte mux_id and 2-byte
572  * packet size field, and we have the IPA hardware populate both for each
573  * received packet.  The header is configured (in the HDR_EXT register)
574  * to use big endian format.
575  *
576  * The packet size is written into the QMAP header's pkt_len field.  That
577  * location is defined here using the HDR_OFST_PKT_SIZE field.
578  *
579  * The mux_id comes from a 4-byte metadata value supplied with each packet
580  * by the modem.  It is *not* a QMAP header, but it does contain the mux_id
581  * value that we want, in its low-order byte.  A bitmask defined in the
582  * endpoint's METADATA_MASK register defines which byte within the modem
583  * metadata contains the mux_id.  And the OFST_METADATA field programmed
584  * here indicates where the extracted byte should be placed within the QMAP
585  * header.
586  */
587 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
588 {
589 	u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
590 	struct ipa *ipa = endpoint->ipa;
591 	u32 val = 0;
592 
593 	if (endpoint->config.qmap) {
594 		enum ipa_version version = ipa->version;
595 		size_t header_size;
596 
597 		header_size = ipa_qmap_header_size(version, endpoint);
598 		val = ipa_header_size_encoded(version, header_size);
599 
600 		/* Define how to fill fields in a received QMAP header */
601 		if (!endpoint->toward_ipa) {
602 			u32 off;	/* Field offset within header */
603 
604 			/* Where IPA will write the metadata value */
605 			off = offsetof(struct rmnet_map_header, mux_id);
606 			val |= ipa_metadata_offset_encoded(version, off);
607 
608 			/* Where IPA will write the length */
609 			off = offsetof(struct rmnet_map_header, pkt_len);
610 			/* Upper bits are stored in HDR_EXT with IPA v4.5 */
611 			if (version >= IPA_VERSION_4_5)
612 				off &= field_mask(HDR_OFST_PKT_SIZE_FMASK);
613 
614 			val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
615 			val |= u32_encode_bits(off, HDR_OFST_PKT_SIZE_FMASK);
616 		}
617 		/* For QMAP TX, metadata offset is 0 (modem assumes this) */
618 		val |= HDR_OFST_METADATA_VALID_FMASK;
619 
620 		/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
621 		/* HDR_A5_MUX is 0 */
622 		/* HDR_LEN_INC_DEAGG_HDR is 0 */
623 		/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
624 	}
625 
626 	iowrite32(val, ipa->reg_virt + offset);
627 }
628 
629 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
630 {
631 	u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
632 	u32 pad_align = endpoint->config.rx.pad_align;
633 	struct ipa *ipa = endpoint->ipa;
634 	u32 val = 0;
635 
636 	if (endpoint->config.qmap) {
637 		/* We have a header, so we must specify its endianness */
638 		val |= HDR_ENDIANNESS_FMASK;	/* big endian */
639 
640 		/* A QMAP header contains a 6 bit pad field at offset 0.
641 		 * The RMNet driver assumes this field is meaningful in
642 		 * packets it receives, and assumes the header's payload
643 		 * length includes that padding.  The RMNet driver does
644 		 * *not* pad packets it sends, however, so the pad field
645 		 * (although 0) should be ignored.
646 		 */
647 		if (!endpoint->toward_ipa) {
648 			val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
649 			/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
650 			val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
651 			/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
652 		}
653 	}
654 
655 	/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
656 	if (!endpoint->toward_ipa)
657 		val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
658 
659 	/* IPA v4.5 adds some most-significant bits to a few fields,
660 	 * two of which are defined in the HDR (not HDR_EXT) register.
661 	 */
662 	if (ipa->version >= IPA_VERSION_4_5) {
663 		/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
664 		if (endpoint->config.qmap && !endpoint->toward_ipa) {
665 			u32 off;
666 
667 			off = offsetof(struct rmnet_map_header, pkt_len);
668 			off >>= hweight32(HDR_OFST_PKT_SIZE_FMASK);
669 			val |= u32_encode_bits(off,
670 					       HDR_OFST_PKT_SIZE_MSB_FMASK);
671 			/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
672 		}
673 	}
674 	iowrite32(val, ipa->reg_virt + offset);
675 }
676 
677 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
678 {
679 	u32 endpoint_id = endpoint->endpoint_id;
680 	u32 val = 0;
681 	u32 offset;
682 
683 	if (endpoint->toward_ipa)
684 		return;		/* Register not valid for TX endpoints */
685 
686 	offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
687 
688 	/* Note that HDR_ENDIANNESS indicates big endian header fields */
689 	if (endpoint->config.qmap)
690 		val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
691 
692 	iowrite32(val, endpoint->ipa->reg_virt + offset);
693 }
694 
695 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
696 {
697 	u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
698 	u32 val;
699 
700 	if (!endpoint->toward_ipa)
701 		return;		/* Register not valid for RX endpoints */
702 
703 	if (endpoint->config.dma_mode) {
704 		enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
705 		u32 dma_endpoint_id;
706 
707 		dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
708 
709 		val = u32_encode_bits(IPA_DMA, MODE_FMASK);
710 		val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
711 	} else {
712 		val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
713 	}
714 	/* All other bits unspecified (and 0) */
715 
716 	iowrite32(val, endpoint->ipa->reg_virt + offset);
717 }
718 
719 /* Encoded values for AGGR endpoint register fields */
720 static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit)
721 {
722 	if (version < IPA_VERSION_4_5)
723 		return u32_encode_bits(limit, aggr_byte_limit_fmask(true));
724 
725 	return u32_encode_bits(limit, aggr_byte_limit_fmask(false));
726 }
727 
728 /* For IPA v4.5+, times are expressed using Qtime.  The AP uses one of two
729  * pulse generators (0 and 1) to measure elapsed time.  In ipa_qtime_config()
730  * they're configured to have granularity 100 usec and 1 msec, respectively.
731  *
732  * The return value is the positive or negative Qtime value to use to
733  * express the (microsecond) time provided.  A positive return value
734  * means pulse generator 0 can be used; otherwise use pulse generator 1.
735  */
736 static int ipa_qtime_val(u32 microseconds, u32 max)
737 {
738 	u32 val;
739 
740 	/* Use 100 microsecond granularity if possible */
741 	val = DIV_ROUND_CLOSEST(microseconds, 100);
742 	if (val <= max)
743 		return (int)val;
744 
745 	/* Have to use pulse generator 1 (millisecond granularity) */
746 	val = DIV_ROUND_CLOSEST(microseconds, 1000);
747 	WARN_ON(val > max);
748 
749 	return (int)-val;
750 }
751 
752 /* Encode the aggregation timer limit (microseconds) based on IPA version */
753 static u32 aggr_time_limit_encode(enum ipa_version version, u32 microseconds)
754 {
755 	u32 fmask;
756 	u32 val;
757 
758 	if (!microseconds)
759 		return 0;	/* Nothing to compute if time limit is 0 */
760 
761 	if (version >= IPA_VERSION_4_5) {
762 		u32 gran_sel;
763 		int ret;
764 
765 		/* Compute the Qtime limit value to use */
766 		fmask = aggr_time_limit_fmask(false);
767 		ret = ipa_qtime_val(microseconds, field_max(fmask));
768 		if (ret < 0) {
769 			val = -ret;
770 			gran_sel = AGGR_GRAN_SEL_FMASK;
771 		} else {
772 			val = ret;
773 			gran_sel = 0;
774 		}
775 
776 		return gran_sel | u32_encode_bits(val, fmask);
777 	}
778 
779 	/* We set aggregation granularity in ipa_hardware_config() */
780 	fmask = aggr_time_limit_fmask(true);
781 	val = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
782 	WARN(val > field_max(fmask),
783 	     "aggr_time_limit too large (%u > %u usec)\n",
784 	     val, field_max(fmask) * IPA_AGGR_GRANULARITY);
785 
786 	return u32_encode_bits(val, fmask);
787 }
788 
789 static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled)
790 {
791 	u32 val = enabled ? 1 : 0;
792 
793 	if (version < IPA_VERSION_4_5)
794 		return u32_encode_bits(val, aggr_sw_eof_active_fmask(true));
795 
796 	return u32_encode_bits(val, aggr_sw_eof_active_fmask(false));
797 }
798 
799 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
800 {
801 	u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
802 	enum ipa_version version = endpoint->ipa->version;
803 	u32 val = 0;
804 
805 	if (endpoint->config.aggregation) {
806 		if (!endpoint->toward_ipa) {
807 			const struct ipa_endpoint_rx *rx_config;
808 			u32 buffer_size;
809 			bool close_eof;
810 			u32 limit;
811 
812 			rx_config = &endpoint->config.rx;
813 			val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
814 			val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
815 
816 			buffer_size = rx_config->buffer_size;
817 			limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
818 						 rx_config->aggr_hard_limit);
819 			val |= aggr_byte_limit_encoded(version, limit);
820 
821 			limit = rx_config->aggr_time_limit;
822 			val |= aggr_time_limit_encode(version, limit);
823 
824 			/* AGGR_PKT_LIMIT is 0 (unlimited) */
825 
826 			close_eof = rx_config->aggr_close_eof;
827 			val |= aggr_sw_eof_active_encoded(version, close_eof);
828 		} else {
829 			val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
830 					       AGGR_EN_FMASK);
831 			val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
832 			/* other fields ignored */
833 		}
834 		/* AGGR_FORCE_CLOSE is 0 */
835 		/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
836 	} else {
837 		val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
838 		/* other fields ignored */
839 	}
840 
841 	iowrite32(val, endpoint->ipa->reg_virt + offset);
842 }
843 
844 /* The head-of-line blocking timer is defined as a tick count.  For
845  * IPA version 4.5 the tick count is based on the Qtimer, which is
846  * derived from the 19.2 MHz SoC XO clock.  For older IPA versions
847  * each tick represents 128 cycles of the IPA core clock.
848  *
849  * Return the encoded value representing the timeout period provided
850  * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
851  */
852 static u32 hol_block_timer_encode(struct ipa *ipa, u32 microseconds)
853 {
854 	u32 width;
855 	u32 scale;
856 	u64 ticks;
857 	u64 rate;
858 	u32 high;
859 	u32 val;
860 
861 	if (!microseconds)
862 		return 0;	/* Nothing to compute if timer period is 0 */
863 
864 	if (ipa->version >= IPA_VERSION_4_5) {
865 		u32 gran_sel;
866 		int ret;
867 
868 		/* Compute the Qtime limit value to use */
869 		ret = ipa_qtime_val(microseconds, field_max(TIME_LIMIT_FMASK));
870 		if (ret < 0) {
871 			val = -ret;
872 			gran_sel = GRAN_SEL_FMASK;
873 		} else {
874 			val = ret;
875 			gran_sel = 0;
876 		}
877 
878 		return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK);
879 	}
880 
881 	/* Use 64 bit arithmetic to avoid overflow... */
882 	rate = ipa_core_clock_rate(ipa);
883 	ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
884 	/* ...but we still need to fit into a 32-bit register */
885 	WARN_ON(ticks > U32_MAX);
886 
887 	/* IPA v3.5.1 through v4.1 just record the tick count */
888 	if (ipa->version < IPA_VERSION_4_2)
889 		return (u32)ticks;
890 
891 	/* For IPA v4.2, the tick count is represented by base and
892 	 * scale fields within the 32-bit timer register, where:
893 	 *     ticks = base << scale;
894 	 * The best precision is achieved when the base value is as
895 	 * large as possible.  Find the highest set bit in the tick
896 	 * count, and extract the number of bits in the base field
897 	 * such that high bit is included.
898 	 */
899 	high = fls(ticks);		/* 1..32 */
900 	width = HWEIGHT32(BASE_VALUE_FMASK);
901 	scale = high > width ? high - width : 0;
902 	if (scale) {
903 		/* If we're scaling, round up to get a closer result */
904 		ticks += 1 << (scale - 1);
905 		/* High bit was set, so rounding might have affected it */
906 		if (fls(ticks) != high)
907 			scale++;
908 	}
909 
910 	val = u32_encode_bits(scale, SCALE_FMASK);
911 	val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
912 
913 	return val;
914 }
915 
916 /* If microseconds is 0, timeout is immediate */
917 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
918 					      u32 microseconds)
919 {
920 	u32 endpoint_id = endpoint->endpoint_id;
921 	struct ipa *ipa = endpoint->ipa;
922 	u32 offset;
923 	u32 val;
924 
925 	/* This should only be changed when HOL_BLOCK_EN is disabled */
926 	offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
927 	val = hol_block_timer_encode(ipa, microseconds);
928 	iowrite32(val, ipa->reg_virt + offset);
929 }
930 
931 static void
932 ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
933 {
934 	u32 endpoint_id = endpoint->endpoint_id;
935 	u32 offset;
936 	u32 val;
937 
938 	val = enable ? HOL_BLOCK_EN_FMASK : 0;
939 	offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
940 	iowrite32(val, endpoint->ipa->reg_virt + offset);
941 	/* When enabling, the register must be written twice for IPA v4.5+ */
942 	if (enable && endpoint->ipa->version >= IPA_VERSION_4_5)
943 		iowrite32(val, endpoint->ipa->reg_virt + offset);
944 }
945 
946 /* Assumes HOL_BLOCK is in disabled state */
947 static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
948 					       u32 microseconds)
949 {
950 	ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
951 	ipa_endpoint_init_hol_block_en(endpoint, true);
952 }
953 
954 static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
955 {
956 	ipa_endpoint_init_hol_block_en(endpoint, false);
957 }
958 
959 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
960 {
961 	u32 i;
962 
963 	for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
964 		struct ipa_endpoint *endpoint = &ipa->endpoint[i];
965 
966 		if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
967 			continue;
968 
969 		ipa_endpoint_init_hol_block_disable(endpoint);
970 		ipa_endpoint_init_hol_block_enable(endpoint, 0);
971 	}
972 }
973 
974 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
975 {
976 	u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
977 	u32 val = 0;
978 
979 	if (!endpoint->toward_ipa)
980 		return;		/* Register not valid for RX endpoints */
981 
982 	/* DEAGGR_HDR_LEN is 0 */
983 	/* PACKET_OFFSET_VALID is 0 */
984 	/* PACKET_OFFSET_LOCATION is ignored (not valid) */
985 	/* MAX_PACKET_LEN is 0 (not enforced) */
986 
987 	iowrite32(val, endpoint->ipa->reg_virt + offset);
988 }
989 
990 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
991 {
992 	u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id);
993 	struct ipa *ipa = endpoint->ipa;
994 	u32 val;
995 
996 	val = rsrc_grp_encoded(ipa->version, endpoint->config.resource_group);
997 	iowrite32(val, ipa->reg_virt + offset);
998 }
999 
1000 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
1001 {
1002 	u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
1003 	u32 val = 0;
1004 
1005 	if (!endpoint->toward_ipa)
1006 		return;		/* Register not valid for RX endpoints */
1007 
1008 	/* Low-order byte configures primary packet processing */
1009 	val |= u32_encode_bits(endpoint->config.tx.seq_type, SEQ_TYPE_FMASK);
1010 
1011 	/* Second byte (if supported) configures replicated packet processing */
1012 	if (endpoint->ipa->version < IPA_VERSION_4_5)
1013 		val |= u32_encode_bits(endpoint->config.tx.seq_rep_type,
1014 				       SEQ_REP_TYPE_FMASK);
1015 
1016 	iowrite32(val, endpoint->ipa->reg_virt + offset);
1017 }
1018 
1019 /**
1020  * ipa_endpoint_skb_tx() - Transmit a socket buffer
1021  * @endpoint:	Endpoint pointer
1022  * @skb:	Socket buffer to send
1023  *
1024  * Returns:	0 if successful, or a negative error code
1025  */
1026 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
1027 {
1028 	struct gsi_trans *trans;
1029 	u32 nr_frags;
1030 	int ret;
1031 
1032 	/* Make sure source endpoint's TLV FIFO has enough entries to
1033 	 * hold the linear portion of the skb and all its fragments.
1034 	 * If not, see if we can linearize it before giving up.
1035 	 */
1036 	nr_frags = skb_shinfo(skb)->nr_frags;
1037 	if (nr_frags > endpoint->skb_frag_max) {
1038 		if (skb_linearize(skb))
1039 			return -E2BIG;
1040 		nr_frags = 0;
1041 	}
1042 
1043 	trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
1044 	if (!trans)
1045 		return -EBUSY;
1046 
1047 	ret = gsi_trans_skb_add(trans, skb);
1048 	if (ret)
1049 		goto err_trans_free;
1050 	trans->data = skb;	/* transaction owns skb now */
1051 
1052 	gsi_trans_commit(trans, !netdev_xmit_more());
1053 
1054 	return 0;
1055 
1056 err_trans_free:
1057 	gsi_trans_free(trans);
1058 
1059 	return -ENOMEM;
1060 }
1061 
1062 static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
1063 {
1064 	u32 endpoint_id = endpoint->endpoint_id;
1065 	struct ipa *ipa = endpoint->ipa;
1066 	u32 val = 0;
1067 	u32 offset;
1068 
1069 	offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
1070 
1071 	if (endpoint->config.status_enable) {
1072 		val |= STATUS_EN_FMASK;
1073 		if (endpoint->toward_ipa) {
1074 			enum ipa_endpoint_name name;
1075 			u32 status_endpoint_id;
1076 
1077 			name = endpoint->config.tx.status_endpoint;
1078 			status_endpoint_id = ipa->name_map[name]->endpoint_id;
1079 
1080 			val |= u32_encode_bits(status_endpoint_id,
1081 					       STATUS_ENDP_FMASK);
1082 		}
1083 		/* STATUS_LOCATION is 0, meaning status element precedes
1084 		 * packet (not present for IPA v4.5)
1085 		 */
1086 		/* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */
1087 	}
1088 
1089 	iowrite32(val, ipa->reg_virt + offset);
1090 }
1091 
1092 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
1093 				      struct gsi_trans *trans)
1094 {
1095 	struct page *page;
1096 	u32 buffer_size;
1097 	u32 offset;
1098 	u32 len;
1099 	int ret;
1100 
1101 	buffer_size = endpoint->config.rx.buffer_size;
1102 	page = dev_alloc_pages(get_order(buffer_size));
1103 	if (!page)
1104 		return -ENOMEM;
1105 
1106 	/* Offset the buffer to make space for skb headroom */
1107 	offset = NET_SKB_PAD;
1108 	len = buffer_size - offset;
1109 
1110 	ret = gsi_trans_page_add(trans, page, len, offset);
1111 	if (ret)
1112 		put_page(page);
1113 	else
1114 		trans->data = page;	/* transaction owns page now */
1115 
1116 	return ret;
1117 }
1118 
1119 /**
1120  * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1121  * @endpoint:	Endpoint to be replenished
1122  *
1123  * The IPA hardware can hold a fixed number of receive buffers for an RX
1124  * endpoint, based on the number of entries in the underlying channel ring
1125  * buffer.  If an endpoint's "backlog" is non-zero, it indicates how many
1126  * more receive buffers can be supplied to the hardware.  Replenishing for
1127  * an endpoint can be disabled, in which case buffers are not queued to
1128  * the hardware.
1129  */
1130 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
1131 {
1132 	struct gsi_trans *trans;
1133 
1134 	if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
1135 		return;
1136 
1137 	/* Skip it if it's already active */
1138 	if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
1139 		return;
1140 
1141 	while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
1142 		bool doorbell;
1143 
1144 		if (ipa_endpoint_replenish_one(endpoint, trans))
1145 			goto try_again_later;
1146 
1147 
1148 		/* Ring the doorbell if we've got a full batch */
1149 		doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
1150 		gsi_trans_commit(trans, doorbell);
1151 	}
1152 
1153 	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1154 
1155 	return;
1156 
1157 try_again_later:
1158 	gsi_trans_free(trans);
1159 	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1160 
1161 	/* Whenever a receive buffer transaction completes we'll try to
1162 	 * replenish again.  It's unlikely, but if we fail to supply even
1163 	 * one buffer, nothing will trigger another replenish attempt.
1164 	 * If the hardware has no receive buffers queued, schedule work to
1165 	 * try replenishing again.
1166 	 */
1167 	if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1168 		schedule_delayed_work(&endpoint->replenish_work,
1169 				      msecs_to_jiffies(1));
1170 }
1171 
1172 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
1173 {
1174 	set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1175 
1176 	/* Start replenishing if hardware currently has no buffers */
1177 	if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1178 		ipa_endpoint_replenish(endpoint);
1179 }
1180 
1181 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
1182 {
1183 	clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1184 }
1185 
1186 static void ipa_endpoint_replenish_work(struct work_struct *work)
1187 {
1188 	struct delayed_work *dwork = to_delayed_work(work);
1189 	struct ipa_endpoint *endpoint;
1190 
1191 	endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1192 
1193 	ipa_endpoint_replenish(endpoint);
1194 }
1195 
1196 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1197 				  void *data, u32 len, u32 extra)
1198 {
1199 	struct sk_buff *skb;
1200 
1201 	if (!endpoint->netdev)
1202 		return;
1203 
1204 	skb = __dev_alloc_skb(len, GFP_ATOMIC);
1205 	if (skb) {
1206 		/* Copy the data into the socket buffer and receive it */
1207 		skb_put(skb, len);
1208 		memcpy(skb->data, data, len);
1209 		skb->truesize += extra;
1210 	}
1211 
1212 	ipa_modem_skb_rx(endpoint->netdev, skb);
1213 }
1214 
1215 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1216 				   struct page *page, u32 len)
1217 {
1218 	u32 buffer_size = endpoint->config.rx.buffer_size;
1219 	struct sk_buff *skb;
1220 
1221 	/* Nothing to do if there's no netdev */
1222 	if (!endpoint->netdev)
1223 		return false;
1224 
1225 	WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
1226 
1227 	skb = build_skb(page_address(page), buffer_size);
1228 	if (skb) {
1229 		/* Reserve the headroom and account for the data */
1230 		skb_reserve(skb, NET_SKB_PAD);
1231 		skb_put(skb, len);
1232 	}
1233 
1234 	/* Receive the buffer (or record drop if unable to build it) */
1235 	ipa_modem_skb_rx(endpoint->netdev, skb);
1236 
1237 	return skb != NULL;
1238 }
1239 
1240 /* The format of a packet status element is the same for several status
1241  * types (opcodes).  Other types aren't currently supported.
1242  */
1243 static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1244 {
1245 	switch (opcode) {
1246 	case IPA_STATUS_OPCODE_PACKET:
1247 	case IPA_STATUS_OPCODE_DROPPED_PACKET:
1248 	case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1249 	case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1250 		return true;
1251 	default:
1252 		return false;
1253 	}
1254 }
1255 
1256 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
1257 				     const struct ipa_status *status)
1258 {
1259 	u32 endpoint_id;
1260 
1261 	if (!ipa_status_format_packet(status->opcode))
1262 		return true;
1263 	if (!status->pkt_len)
1264 		return true;
1265 	endpoint_id = u8_get_bits(status->endp_dst_idx,
1266 				  IPA_STATUS_DST_IDX_FMASK);
1267 	if (endpoint_id != endpoint->endpoint_id)
1268 		return true;
1269 
1270 	return false;	/* Don't skip this packet, process it */
1271 }
1272 
1273 static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
1274 				    const struct ipa_status *status)
1275 {
1276 	struct ipa_endpoint *command_endpoint;
1277 	struct ipa *ipa = endpoint->ipa;
1278 	u32 endpoint_id;
1279 
1280 	if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
1281 		return false;	/* No valid tag */
1282 
1283 	/* The status contains a valid tag.  We know the packet was sent to
1284 	 * this endpoint (already verified by ipa_endpoint_status_skip()).
1285 	 * If the packet came from the AP->command TX endpoint we know
1286 	 * this packet was sent as part of the pipeline clear process.
1287 	 */
1288 	endpoint_id = u8_get_bits(status->endp_src_idx,
1289 				  IPA_STATUS_SRC_IDX_FMASK);
1290 	command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1291 	if (endpoint_id == command_endpoint->endpoint_id) {
1292 		complete(&ipa->completion);
1293 	} else {
1294 		dev_err(&ipa->pdev->dev,
1295 			"unexpected tagged packet from endpoint %u\n",
1296 			endpoint_id);
1297 	}
1298 
1299 	return true;
1300 }
1301 
1302 /* Return whether the status indicates the packet should be dropped */
1303 static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
1304 				     const struct ipa_status *status)
1305 {
1306 	u32 val;
1307 
1308 	/* If the status indicates a tagged transfer, we'll drop the packet */
1309 	if (ipa_endpoint_status_tag(endpoint, status))
1310 		return true;
1311 
1312 	/* Deaggregation exceptions we drop; all other types we consume */
1313 	if (status->exception)
1314 		return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
1315 
1316 	/* Drop the packet if it fails to match a routing rule; otherwise no */
1317 	val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1318 
1319 	return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1320 }
1321 
1322 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1323 				      struct page *page, u32 total_len)
1324 {
1325 	u32 buffer_size = endpoint->config.rx.buffer_size;
1326 	void *data = page_address(page) + NET_SKB_PAD;
1327 	u32 unused = buffer_size - total_len;
1328 	u32 resid = total_len;
1329 
1330 	while (resid) {
1331 		const struct ipa_status *status = data;
1332 		u32 align;
1333 		u32 len;
1334 
1335 		if (resid < sizeof(*status)) {
1336 			dev_err(&endpoint->ipa->pdev->dev,
1337 				"short message (%u bytes < %zu byte status)\n",
1338 				resid, sizeof(*status));
1339 			break;
1340 		}
1341 
1342 		/* Skip over status packets that lack packet data */
1343 		if (ipa_endpoint_status_skip(endpoint, status)) {
1344 			data += sizeof(*status);
1345 			resid -= sizeof(*status);
1346 			continue;
1347 		}
1348 
1349 		/* Compute the amount of buffer space consumed by the packet,
1350 		 * including the status element.  If the hardware is configured
1351 		 * to pad packet data to an aligned boundary, account for that.
1352 		 * And if checksum offload is enabled a trailer containing
1353 		 * computed checksum information will be appended.
1354 		 */
1355 		align = endpoint->config.rx.pad_align ? : 1;
1356 		len = le16_to_cpu(status->pkt_len);
1357 		len = sizeof(*status) + ALIGN(len, align);
1358 		if (endpoint->config.checksum)
1359 			len += sizeof(struct rmnet_map_dl_csum_trailer);
1360 
1361 		if (!ipa_endpoint_status_drop(endpoint, status)) {
1362 			void *data2;
1363 			u32 extra;
1364 			u32 len2;
1365 
1366 			/* Client receives only packet data (no status) */
1367 			data2 = data + sizeof(*status);
1368 			len2 = le16_to_cpu(status->pkt_len);
1369 
1370 			/* Have the true size reflect the extra unused space in
1371 			 * the original receive buffer.  Distribute the "cost"
1372 			 * proportionately across all aggregated packets in the
1373 			 * buffer.
1374 			 */
1375 			extra = DIV_ROUND_CLOSEST(unused * len, total_len);
1376 			ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1377 		}
1378 
1379 		/* Consume status and the full packet it describes */
1380 		data += len;
1381 		resid -= len;
1382 	}
1383 }
1384 
1385 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1386 				 struct gsi_trans *trans)
1387 {
1388 	struct page *page;
1389 
1390 	if (endpoint->toward_ipa)
1391 		return;
1392 
1393 	if (trans->cancelled)
1394 		goto done;
1395 
1396 	/* Parse or build a socket buffer using the actual received length */
1397 	page = trans->data;
1398 	if (endpoint->config.status_enable)
1399 		ipa_endpoint_status_parse(endpoint, page, trans->len);
1400 	else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1401 		trans->data = NULL;	/* Pages have been consumed */
1402 done:
1403 	ipa_endpoint_replenish(endpoint);
1404 }
1405 
1406 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1407 				struct gsi_trans *trans)
1408 {
1409 	if (endpoint->toward_ipa) {
1410 		struct ipa *ipa = endpoint->ipa;
1411 
1412 		/* Nothing to do for command transactions */
1413 		if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1414 			struct sk_buff *skb = trans->data;
1415 
1416 			if (skb)
1417 				dev_kfree_skb_any(skb);
1418 		}
1419 	} else {
1420 		struct page *page = trans->data;
1421 
1422 		if (page)
1423 			put_page(page);
1424 	}
1425 }
1426 
1427 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1428 {
1429 	u32 val;
1430 
1431 	/* ROUTE_DIS is 0 */
1432 	val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
1433 	val |= ROUTE_DEF_HDR_TABLE_FMASK;
1434 	val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
1435 	val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
1436 	val |= ROUTE_DEF_RETAIN_HDR_FMASK;
1437 
1438 	iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
1439 }
1440 
1441 void ipa_endpoint_default_route_clear(struct ipa *ipa)
1442 {
1443 	ipa_endpoint_default_route_set(ipa, 0);
1444 }
1445 
1446 /**
1447  * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1448  * @endpoint:	Endpoint to be reset
1449  *
1450  * If aggregation is active on an RX endpoint when a reset is performed
1451  * on its underlying GSI channel, a special sequence of actions must be
1452  * taken to ensure the IPA pipeline is properly cleared.
1453  *
1454  * Return:	0 if successful, or a negative error code
1455  */
1456 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1457 {
1458 	struct device *dev = &endpoint->ipa->pdev->dev;
1459 	struct ipa *ipa = endpoint->ipa;
1460 	struct gsi *gsi = &ipa->gsi;
1461 	bool suspended = false;
1462 	dma_addr_t addr;
1463 	u32 retries;
1464 	u32 len = 1;
1465 	void *virt;
1466 	int ret;
1467 
1468 	virt = kzalloc(len, GFP_KERNEL);
1469 	if (!virt)
1470 		return -ENOMEM;
1471 
1472 	addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1473 	if (dma_mapping_error(dev, addr)) {
1474 		ret = -ENOMEM;
1475 		goto out_kfree;
1476 	}
1477 
1478 	/* Force close aggregation before issuing the reset */
1479 	ipa_endpoint_force_close(endpoint);
1480 
1481 	/* Reset and reconfigure the channel with the doorbell engine
1482 	 * disabled.  Then poll until we know aggregation is no longer
1483 	 * active.  We'll re-enable the doorbell (if appropriate) when
1484 	 * we reset again below.
1485 	 */
1486 	gsi_channel_reset(gsi, endpoint->channel_id, false);
1487 
1488 	/* Make sure the channel isn't suspended */
1489 	suspended = ipa_endpoint_program_suspend(endpoint, false);
1490 
1491 	/* Start channel and do a 1 byte read */
1492 	ret = gsi_channel_start(gsi, endpoint->channel_id);
1493 	if (ret)
1494 		goto out_suspend_again;
1495 
1496 	ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1497 	if (ret)
1498 		goto err_endpoint_stop;
1499 
1500 	/* Wait for aggregation to be closed on the channel */
1501 	retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1502 	do {
1503 		if (!ipa_endpoint_aggr_active(endpoint))
1504 			break;
1505 		usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1506 	} while (retries--);
1507 
1508 	/* Check one last time */
1509 	if (ipa_endpoint_aggr_active(endpoint))
1510 		dev_err(dev, "endpoint %u still active during reset\n",
1511 			endpoint->endpoint_id);
1512 
1513 	gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1514 
1515 	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1516 	if (ret)
1517 		goto out_suspend_again;
1518 
1519 	/* Finally, reset and reconfigure the channel again (re-enabling
1520 	 * the doorbell engine if appropriate).  Sleep for 1 millisecond to
1521 	 * complete the channel reset sequence.  Finish by suspending the
1522 	 * channel again (if necessary).
1523 	 */
1524 	gsi_channel_reset(gsi, endpoint->channel_id, true);
1525 
1526 	usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1527 
1528 	goto out_suspend_again;
1529 
1530 err_endpoint_stop:
1531 	(void)gsi_channel_stop(gsi, endpoint->channel_id);
1532 out_suspend_again:
1533 	if (suspended)
1534 		(void)ipa_endpoint_program_suspend(endpoint, true);
1535 	dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1536 out_kfree:
1537 	kfree(virt);
1538 
1539 	return ret;
1540 }
1541 
1542 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1543 {
1544 	u32 channel_id = endpoint->channel_id;
1545 	struct ipa *ipa = endpoint->ipa;
1546 	bool special;
1547 	int ret = 0;
1548 
1549 	/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1550 	 * is active, we need to handle things specially to recover.
1551 	 * All other cases just need to reset the underlying GSI channel.
1552 	 */
1553 	special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1554 			endpoint->config.aggregation;
1555 	if (special && ipa_endpoint_aggr_active(endpoint))
1556 		ret = ipa_endpoint_reset_rx_aggr(endpoint);
1557 	else
1558 		gsi_channel_reset(&ipa->gsi, channel_id, true);
1559 
1560 	if (ret)
1561 		dev_err(&ipa->pdev->dev,
1562 			"error %d resetting channel %u for endpoint %u\n",
1563 			ret, endpoint->channel_id, endpoint->endpoint_id);
1564 }
1565 
1566 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1567 {
1568 	if (endpoint->toward_ipa) {
1569 		/* Newer versions of IPA use GSI channel flow control
1570 		 * instead of endpoint DELAY mode to prevent sending data.
1571 		 * Flow control is disabled for newly-allocated channels,
1572 		 * and we can assume flow control is not (ever) enabled
1573 		 * for AP TX channels.
1574 		 */
1575 		if (endpoint->ipa->version < IPA_VERSION_4_2)
1576 			ipa_endpoint_program_delay(endpoint, false);
1577 	} else {
1578 		/* Ensure suspend mode is off on all AP RX endpoints */
1579 		(void)ipa_endpoint_program_suspend(endpoint, false);
1580 	}
1581 	ipa_endpoint_init_cfg(endpoint);
1582 	ipa_endpoint_init_nat(endpoint);
1583 	ipa_endpoint_init_hdr(endpoint);
1584 	ipa_endpoint_init_hdr_ext(endpoint);
1585 	ipa_endpoint_init_hdr_metadata_mask(endpoint);
1586 	ipa_endpoint_init_mode(endpoint);
1587 	ipa_endpoint_init_aggr(endpoint);
1588 	if (!endpoint->toward_ipa) {
1589 		if (endpoint->config.rx.holb_drop)
1590 			ipa_endpoint_init_hol_block_enable(endpoint, 0);
1591 		else
1592 			ipa_endpoint_init_hol_block_disable(endpoint);
1593 	}
1594 	ipa_endpoint_init_deaggr(endpoint);
1595 	ipa_endpoint_init_rsrc_grp(endpoint);
1596 	ipa_endpoint_init_seq(endpoint);
1597 	ipa_endpoint_status(endpoint);
1598 }
1599 
1600 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1601 {
1602 	struct ipa *ipa = endpoint->ipa;
1603 	struct gsi *gsi = &ipa->gsi;
1604 	int ret;
1605 
1606 	ret = gsi_channel_start(gsi, endpoint->channel_id);
1607 	if (ret) {
1608 		dev_err(&ipa->pdev->dev,
1609 			"error %d starting %cX channel %u for endpoint %u\n",
1610 			ret, endpoint->toward_ipa ? 'T' : 'R',
1611 			endpoint->channel_id, endpoint->endpoint_id);
1612 		return ret;
1613 	}
1614 
1615 	if (!endpoint->toward_ipa) {
1616 		ipa_interrupt_suspend_enable(ipa->interrupt,
1617 					     endpoint->endpoint_id);
1618 		ipa_endpoint_replenish_enable(endpoint);
1619 	}
1620 
1621 	ipa->enabled |= BIT(endpoint->endpoint_id);
1622 
1623 	return 0;
1624 }
1625 
1626 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1627 {
1628 	u32 mask = BIT(endpoint->endpoint_id);
1629 	struct ipa *ipa = endpoint->ipa;
1630 	struct gsi *gsi = &ipa->gsi;
1631 	int ret;
1632 
1633 	if (!(ipa->enabled & mask))
1634 		return;
1635 
1636 	ipa->enabled ^= mask;
1637 
1638 	if (!endpoint->toward_ipa) {
1639 		ipa_endpoint_replenish_disable(endpoint);
1640 		ipa_interrupt_suspend_disable(ipa->interrupt,
1641 					      endpoint->endpoint_id);
1642 	}
1643 
1644 	/* Note that if stop fails, the channel's state is not well-defined */
1645 	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1646 	if (ret)
1647 		dev_err(&ipa->pdev->dev,
1648 			"error %d attempting to stop endpoint %u\n", ret,
1649 			endpoint->endpoint_id);
1650 }
1651 
1652 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1653 {
1654 	struct device *dev = &endpoint->ipa->pdev->dev;
1655 	struct gsi *gsi = &endpoint->ipa->gsi;
1656 	int ret;
1657 
1658 	if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1659 		return;
1660 
1661 	if (!endpoint->toward_ipa) {
1662 		ipa_endpoint_replenish_disable(endpoint);
1663 		(void)ipa_endpoint_program_suspend(endpoint, true);
1664 	}
1665 
1666 	ret = gsi_channel_suspend(gsi, endpoint->channel_id);
1667 	if (ret)
1668 		dev_err(dev, "error %d suspending channel %u\n", ret,
1669 			endpoint->channel_id);
1670 }
1671 
1672 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1673 {
1674 	struct device *dev = &endpoint->ipa->pdev->dev;
1675 	struct gsi *gsi = &endpoint->ipa->gsi;
1676 	int ret;
1677 
1678 	if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1679 		return;
1680 
1681 	if (!endpoint->toward_ipa)
1682 		(void)ipa_endpoint_program_suspend(endpoint, false);
1683 
1684 	ret = gsi_channel_resume(gsi, endpoint->channel_id);
1685 	if (ret)
1686 		dev_err(dev, "error %d resuming channel %u\n", ret,
1687 			endpoint->channel_id);
1688 	else if (!endpoint->toward_ipa)
1689 		ipa_endpoint_replenish_enable(endpoint);
1690 }
1691 
1692 void ipa_endpoint_suspend(struct ipa *ipa)
1693 {
1694 	if (!ipa->setup_complete)
1695 		return;
1696 
1697 	if (ipa->modem_netdev)
1698 		ipa_modem_suspend(ipa->modem_netdev);
1699 
1700 	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1701 	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1702 }
1703 
1704 void ipa_endpoint_resume(struct ipa *ipa)
1705 {
1706 	if (!ipa->setup_complete)
1707 		return;
1708 
1709 	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1710 	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1711 
1712 	if (ipa->modem_netdev)
1713 		ipa_modem_resume(ipa->modem_netdev);
1714 }
1715 
1716 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1717 {
1718 	struct gsi *gsi = &endpoint->ipa->gsi;
1719 	u32 channel_id = endpoint->channel_id;
1720 
1721 	/* Only AP endpoints get set up */
1722 	if (endpoint->ee_id != GSI_EE_AP)
1723 		return;
1724 
1725 	endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
1726 	if (!endpoint->toward_ipa) {
1727 		/* RX transactions require a single TRE, so the maximum
1728 		 * backlog is the same as the maximum outstanding TREs.
1729 		 */
1730 		clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1731 		clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1732 		INIT_DELAYED_WORK(&endpoint->replenish_work,
1733 				  ipa_endpoint_replenish_work);
1734 	}
1735 
1736 	ipa_endpoint_program(endpoint);
1737 
1738 	endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1739 }
1740 
1741 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1742 {
1743 	endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1744 
1745 	if (!endpoint->toward_ipa)
1746 		cancel_delayed_work_sync(&endpoint->replenish_work);
1747 
1748 	ipa_endpoint_reset(endpoint);
1749 }
1750 
1751 void ipa_endpoint_setup(struct ipa *ipa)
1752 {
1753 	u32 initialized = ipa->initialized;
1754 
1755 	ipa->set_up = 0;
1756 	while (initialized) {
1757 		u32 endpoint_id = __ffs(initialized);
1758 
1759 		initialized ^= BIT(endpoint_id);
1760 
1761 		ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1762 	}
1763 }
1764 
1765 void ipa_endpoint_teardown(struct ipa *ipa)
1766 {
1767 	u32 set_up = ipa->set_up;
1768 
1769 	while (set_up) {
1770 		u32 endpoint_id = __fls(set_up);
1771 
1772 		set_up ^= BIT(endpoint_id);
1773 
1774 		ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1775 	}
1776 	ipa->set_up = 0;
1777 }
1778 
1779 int ipa_endpoint_config(struct ipa *ipa)
1780 {
1781 	struct device *dev = &ipa->pdev->dev;
1782 	u32 initialized;
1783 	u32 rx_base;
1784 	u32 rx_mask;
1785 	u32 tx_mask;
1786 	int ret = 0;
1787 	u32 max;
1788 	u32 val;
1789 
1790 	/* Prior to IPAv3.5, the FLAVOR_0 register was not supported.
1791 	 * Furthermore, the endpoints were not grouped such that TX
1792 	 * endpoint numbers started with 0 and RX endpoints had numbers
1793 	 * higher than all TX endpoints, so we can't do the simple
1794 	 * direction check used for newer hardware below.
1795 	 *
1796 	 * For hardware that doesn't support the FLAVOR_0 register,
1797 	 * just set the available mask to support any endpoint, and
1798 	 * assume the configuration is valid.
1799 	 */
1800 	if (ipa->version < IPA_VERSION_3_5) {
1801 		ipa->available = ~0;
1802 		return 0;
1803 	}
1804 
1805 	/* Find out about the endpoints supplied by the hardware, and ensure
1806 	 * the highest one doesn't exceed the number we support.
1807 	 */
1808 	val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
1809 
1810 	/* Our RX is an IPA producer */
1811 	rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
1812 	max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK);
1813 	if (max > IPA_ENDPOINT_MAX) {
1814 		dev_err(dev, "too many endpoints (%u > %u)\n",
1815 			max, IPA_ENDPOINT_MAX);
1816 		return -EINVAL;
1817 	}
1818 	rx_mask = GENMASK(max - 1, rx_base);
1819 
1820 	/* Our TX is an IPA consumer */
1821 	max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK);
1822 	tx_mask = GENMASK(max - 1, 0);
1823 
1824 	ipa->available = rx_mask | tx_mask;
1825 
1826 	/* Check for initialized endpoints not supported by the hardware */
1827 	if (ipa->initialized & ~ipa->available) {
1828 		dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1829 			ipa->initialized & ~ipa->available);
1830 		ret = -EINVAL;		/* Report other errors too */
1831 	}
1832 
1833 	initialized = ipa->initialized;
1834 	while (initialized) {
1835 		u32 endpoint_id = __ffs(initialized);
1836 		struct ipa_endpoint *endpoint;
1837 
1838 		initialized ^= BIT(endpoint_id);
1839 
1840 		/* Make sure it's pointing in the right direction */
1841 		endpoint = &ipa->endpoint[endpoint_id];
1842 		if ((endpoint_id < rx_base) != endpoint->toward_ipa) {
1843 			dev_err(dev, "endpoint id %u wrong direction\n",
1844 				endpoint_id);
1845 			ret = -EINVAL;
1846 		}
1847 	}
1848 
1849 	return ret;
1850 }
1851 
1852 void ipa_endpoint_deconfig(struct ipa *ipa)
1853 {
1854 	ipa->available = 0;	/* Nothing more to do */
1855 }
1856 
1857 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1858 				  const struct ipa_gsi_endpoint_data *data)
1859 {
1860 	struct ipa_endpoint *endpoint;
1861 
1862 	endpoint = &ipa->endpoint[data->endpoint_id];
1863 
1864 	if (data->ee_id == GSI_EE_AP)
1865 		ipa->channel_map[data->channel_id] = endpoint;
1866 	ipa->name_map[name] = endpoint;
1867 
1868 	endpoint->ipa = ipa;
1869 	endpoint->ee_id = data->ee_id;
1870 	endpoint->channel_id = data->channel_id;
1871 	endpoint->endpoint_id = data->endpoint_id;
1872 	endpoint->toward_ipa = data->toward_ipa;
1873 	endpoint->config = data->endpoint.config;
1874 
1875 	ipa->initialized |= BIT(endpoint->endpoint_id);
1876 }
1877 
1878 static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1879 {
1880 	endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1881 
1882 	memset(endpoint, 0, sizeof(*endpoint));
1883 }
1884 
1885 void ipa_endpoint_exit(struct ipa *ipa)
1886 {
1887 	u32 initialized = ipa->initialized;
1888 
1889 	while (initialized) {
1890 		u32 endpoint_id = __fls(initialized);
1891 
1892 		initialized ^= BIT(endpoint_id);
1893 
1894 		ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1895 	}
1896 	memset(ipa->name_map, 0, sizeof(ipa->name_map));
1897 	memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1898 }
1899 
1900 /* Returns a bitmask of endpoints that support filtering, or 0 on error */
1901 u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
1902 		      const struct ipa_gsi_endpoint_data *data)
1903 {
1904 	enum ipa_endpoint_name name;
1905 	u32 filter_map;
1906 
1907 	BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
1908 
1909 	if (!ipa_endpoint_data_valid(ipa, count, data))
1910 		return 0;	/* Error */
1911 
1912 	ipa->initialized = 0;
1913 
1914 	filter_map = 0;
1915 	for (name = 0; name < count; name++, data++) {
1916 		if (ipa_gsi_endpoint_data_empty(data))
1917 			continue;	/* Skip over empty slots */
1918 
1919 		ipa_endpoint_init_one(ipa, name, data);
1920 
1921 		if (data->endpoint.filter_support)
1922 			filter_map |= BIT(data->endpoint_id);
1923 		if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
1924 			ipa->modem_tx_count++;
1925 	}
1926 
1927 	if (!ipa_filter_map_valid(ipa, filter_map))
1928 		goto err_endpoint_exit;
1929 
1930 	return filter_map;	/* Non-zero bitmask */
1931 
1932 err_endpoint_exit:
1933 	ipa_endpoint_exit(ipa);
1934 
1935 	return 0;	/* Error */
1936 }
1937