xref: /linux/drivers/net/ipa/gsi.c (revision 3839a7460721b87501134697b7b90c45dcc7825d)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2018-2020 Linaro Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/bits.h>
9 #include <linux/bitfield.h>
10 #include <linux/mutex.h>
11 #include <linux/completion.h>
12 #include <linux/io.h>
13 #include <linux/bug.h>
14 #include <linux/interrupt.h>
15 #include <linux/platform_device.h>
16 #include <linux/netdevice.h>
17 
18 #include "gsi.h"
19 #include "gsi_reg.h"
20 #include "gsi_private.h"
21 #include "gsi_trans.h"
22 #include "ipa_gsi.h"
23 #include "ipa_data.h"
24 
25 /**
26  * DOC: The IPA Generic Software Interface
27  *
28  * The generic software interface (GSI) is an integral component of the IPA,
29  * providing a well-defined communication layer between the AP subsystem
30  * and the IPA core.  The modem uses the GSI layer as well.
31  *
32  *	--------	     ---------
33  *	|      |	     |	     |
34  *	|  AP  +<---.	.----+ Modem |
35  *	|      +--. |	| .->+	     |
36  *	|      |  | |	| |  |	     |
37  *	--------  | |	| |  ---------
38  *		  v |	v |
39  *		--+-+---+-+--
40  *		|    GSI    |
41  *		|-----------|
42  *		|	    |
43  *		|    IPA    |
44  *		|	    |
45  *		-------------
46  *
47  * In the above diagram, the AP and Modem represent "execution environments"
48  * (EEs), which are independent operating environments that use the IPA for
49  * data transfer.
50  *
51  * Each EE uses a set of unidirectional GSI "channels," which allow transfer
52  * of data to or from the IPA.  A channel is implemented as a ring buffer,
53  * with a DRAM-resident array of "transfer elements" (TREs) available to
54  * describe transfers to or from other EEs through the IPA.  A transfer
55  * element can also contain an immediate command, requesting the IPA perform
56  * actions other than data transfer.
57  *
58  * Each TRE refers to a block of data--also located DRAM.  After writing one
59  * or more TREs to a channel, the writer (either the IPA or an EE) writes a
60  * doorbell register to inform the receiving side how many elements have
61  * been written.
62  *
63  * Each channel has a GSI "event ring" associated with it.  An event ring
64  * is implemented very much like a channel ring, but is always directed from
65  * the IPA to an EE.  The IPA notifies an EE (such as the AP) about channel
66  * events by adding an entry to the event ring associated with the channel.
67  * The GSI then writes its doorbell for the event ring, causing the target
68  * EE to be interrupted.  Each entry in an event ring contains a pointer
69  * to the channel TRE whose completion the event represents.
70  *
71  * Each TRE in a channel ring has a set of flags.  One flag indicates whether
72  * the completion of the transfer operation generates an entry (and possibly
73  * an interrupt) in the channel's event ring.  Other flags allow transfer
74  * elements to be chained together, forming a single logical transaction.
75  * TRE flags are used to control whether and when interrupts are generated
76  * to signal completion of channel transfers.
77  *
78  * Elements in channel and event rings are completed (or consumed) strictly
79  * in order.  Completion of one entry implies the completion of all preceding
80  * entries.  A single completion interrupt can therefore communicate the
81  * completion of many transfers.
82  *
83  * Note that all GSI registers are little-endian, which is the assumed
84  * endianness of I/O space accesses.  The accessor functions perform byte
85  * swapping if needed (i.e., for a big endian CPU).
86  */
87 
88 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
89 #define GSI_EVT_RING_INT_MODT		(32 * 1) /* 1ms under 32KHz clock */
90 
91 #define GSI_CMD_TIMEOUT			5	/* seconds */
92 
93 #define GSI_CHANNEL_STOP_RX_RETRIES	10
94 
95 #define GSI_MHI_EVENT_ID_START		10	/* 1st reserved event id */
96 #define GSI_MHI_EVENT_ID_END		16	/* Last reserved event id */
97 
98 #define GSI_ISR_MAX_ITER		50	/* Detect interrupt storms */
99 
100 /* An entry in an event ring */
101 struct gsi_event {
102 	__le64 xfer_ptr;
103 	__le16 len;
104 	u8 reserved1;
105 	u8 code;
106 	__le16 reserved2;
107 	u8 type;
108 	u8 chid;
109 };
110 
111 /* Hardware values from the error log register error code field */
112 enum gsi_err_code {
113 	GSI_INVALID_TRE_ERR			= 0x1,
114 	GSI_OUT_OF_BUFFERS_ERR			= 0x2,
115 	GSI_OUT_OF_RESOURCES_ERR		= 0x3,
116 	GSI_UNSUPPORTED_INTER_EE_OP_ERR		= 0x4,
117 	GSI_EVT_RING_EMPTY_ERR			= 0x5,
118 	GSI_NON_ALLOCATED_EVT_ACCESS_ERR	= 0x6,
119 	GSI_HWO_1_ERR				= 0x8,
120 };
121 
122 /* Hardware values from the error log register error type field */
123 enum gsi_err_type {
124 	GSI_ERR_TYPE_GLOB	= 0x1,
125 	GSI_ERR_TYPE_CHAN	= 0x2,
126 	GSI_ERR_TYPE_EVT	= 0x3,
127 };
128 
129 /* Hardware values used when programming an event ring */
130 enum gsi_evt_chtype {
131 	GSI_EVT_CHTYPE_MHI_EV	= 0x0,
132 	GSI_EVT_CHTYPE_XHCI_EV	= 0x1,
133 	GSI_EVT_CHTYPE_GPI_EV	= 0x2,
134 	GSI_EVT_CHTYPE_XDCI_EV	= 0x3,
135 };
136 
137 /* Hardware values used when programming a channel */
138 enum gsi_channel_protocol {
139 	GSI_CHANNEL_PROTOCOL_MHI	= 0x0,
140 	GSI_CHANNEL_PROTOCOL_XHCI	= 0x1,
141 	GSI_CHANNEL_PROTOCOL_GPI	= 0x2,
142 	GSI_CHANNEL_PROTOCOL_XDCI	= 0x3,
143 };
144 
145 /* Hardware values representing an event ring immediate command opcode */
146 enum gsi_evt_cmd_opcode {
147 	GSI_EVT_ALLOCATE	= 0x0,
148 	GSI_EVT_RESET		= 0x9,
149 	GSI_EVT_DE_ALLOC	= 0xa,
150 };
151 
152 /* Hardware values representing a generic immediate command opcode */
153 enum gsi_generic_cmd_opcode {
154 	GSI_GENERIC_HALT_CHANNEL	= 0x1,
155 	GSI_GENERIC_ALLOCATE_CHANNEL	= 0x2,
156 };
157 
158 /* Hardware values representing a channel immediate command opcode */
159 enum gsi_ch_cmd_opcode {
160 	GSI_CH_ALLOCATE	= 0x0,
161 	GSI_CH_START	= 0x1,
162 	GSI_CH_STOP	= 0x2,
163 	GSI_CH_RESET	= 0x9,
164 	GSI_CH_DE_ALLOC	= 0xa,
165 };
166 
167 /** gsi_channel_scratch_gpi - GPI protocol scratch register
168  * @max_outstanding_tre:
169  *	Defines the maximum number of TREs allowed in a single transaction
170  *	on a channel (in bytes).  This determines the amount of prefetch
171  *	performed by the hardware.  We configure this to equal the size of
172  *	the TLV FIFO for the channel.
173  * @outstanding_threshold:
174  *	Defines the threshold (in bytes) determining when the sequencer
175  *	should update the channel doorbell.  We configure this to equal
176  *	the size of two TREs.
177  */
178 struct gsi_channel_scratch_gpi {
179 	u64 reserved1;
180 	u16 reserved2;
181 	u16 max_outstanding_tre;
182 	u16 reserved3;
183 	u16 outstanding_threshold;
184 };
185 
186 /** gsi_channel_scratch - channel scratch configuration area
187  *
188  * The exact interpretation of this register is protocol-specific.
189  * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
190  */
191 union gsi_channel_scratch {
192 	struct gsi_channel_scratch_gpi gpi;
193 	struct {
194 		u32 word1;
195 		u32 word2;
196 		u32 word3;
197 		u32 word4;
198 	} data;
199 };
200 
201 /* Check things that can be validated at build time. */
202 static void gsi_validate_build(void)
203 {
204 	/* This is used as a divisor */
205 	BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
206 
207 	/* Code assumes the size of channel and event ring element are
208 	 * the same (and fixed).  Make sure the size of an event ring
209 	 * element is what's expected.
210 	 */
211 	BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
212 
213 	/* Hardware requires a 2^n ring size.  We ensure the number of
214 	 * elements in an event ring is a power of 2 elsewhere; this
215 	 * ensure the elements themselves meet the requirement.
216 	 */
217 	BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
218 
219 	/* The channel element size must fit in this field */
220 	BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
221 
222 	/* The event ring element size must fit in this field */
223 	BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
224 }
225 
226 /* Return the channel id associated with a given channel */
227 static u32 gsi_channel_id(struct gsi_channel *channel)
228 {
229 	return channel - &channel->gsi->channel[0];
230 }
231 
232 static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
233 {
234 	u32 val;
235 
236 	gsi->event_enable_bitmap |= BIT(evt_ring_id);
237 	val = gsi->event_enable_bitmap;
238 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
239 }
240 
241 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
242 {
243 	u32 val;
244 
245 	gsi->event_enable_bitmap &= ~BIT(evt_ring_id);
246 	val = gsi->event_enable_bitmap;
247 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
248 }
249 
250 /* Enable all GSI_interrupt types */
251 static void gsi_irq_enable(struct gsi *gsi)
252 {
253 	u32 val;
254 
255 	/* We don't use inter-EE channel or event interrupts */
256 	val = GSI_CNTXT_TYPE_IRQ_MSK_ALL;
257 	val &= ~MSK_INTER_EE_CH_CTRL_FMASK;
258 	val &= ~MSK_INTER_EE_EV_CTRL_FMASK;
259 	iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
260 
261 	val = GENMASK(gsi->channel_count - 1, 0);
262 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
263 
264 	val = GENMASK(gsi->evt_ring_count - 1, 0);
265 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
266 
267 	/* Each IEOB interrupt is enabled (later) as needed by channels */
268 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
269 
270 	val = GSI_CNTXT_GLOB_IRQ_ALL;
271 	iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
272 
273 	/* Never enable GSI_BREAK_POINT */
274 	val = GSI_CNTXT_GSI_IRQ_ALL & ~EN_BREAK_POINT_FMASK;
275 	iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
276 }
277 
278 /* Disable all GSI_interrupt types */
279 static void gsi_irq_disable(struct gsi *gsi)
280 {
281 	iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
282 	iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
283 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
284 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
285 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
286 	iowrite32(0, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
287 }
288 
289 /* Return the virtual address associated with a ring index */
290 void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
291 {
292 	/* Note: index *must* be used modulo the ring count here */
293 	return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
294 }
295 
296 /* Return the 32-bit DMA address associated with a ring index */
297 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
298 {
299 	return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE;
300 }
301 
302 /* Return the ring index of a 32-bit ring offset */
303 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
304 {
305 	return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
306 }
307 
308 /* Issue a GSI command by writing a value to a register, then wait for
309  * completion to be signaled.  Returns true if the command completes
310  * or false if it times out.
311  */
312 static bool
313 gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
314 {
315 	reinit_completion(completion);
316 
317 	iowrite32(val, gsi->virt + reg);
318 
319 	return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ);
320 }
321 
322 /* Return the hardware's notion of the current state of an event ring */
323 static enum gsi_evt_ring_state
324 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
325 {
326 	u32 val;
327 
328 	val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
329 
330 	return u32_get_bits(val, EV_CHSTATE_FMASK);
331 }
332 
333 /* Issue an event ring command and wait for it to complete */
334 static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
335 			    enum gsi_evt_cmd_opcode opcode)
336 {
337 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
338 	struct completion *completion = &evt_ring->completion;
339 	u32 val;
340 
341 	val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
342 	val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
343 
344 	if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion))
345 		return 0;	/* Success! */
346 
347 	dev_err(gsi->dev, "GSI command %u to event ring %u timed out "
348 		"(state is %u)\n", opcode, evt_ring_id, evt_ring->state);
349 
350 	return -ETIMEDOUT;
351 }
352 
353 /* Allocate an event ring in NOT_ALLOCATED state */
354 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
355 {
356 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
357 	int ret;
358 
359 	/* Get initial event ring state */
360 	evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
361 
362 	if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
363 		return -EINVAL;
364 
365 	ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
366 	if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
367 		dev_err(gsi->dev, "bad event ring state (%u) after alloc\n",
368 			evt_ring->state);
369 		ret = -EIO;
370 	}
371 
372 	return ret;
373 }
374 
375 /* Reset a GSI event ring in ALLOCATED or ERROR state. */
376 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
377 {
378 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
379 	enum gsi_evt_ring_state state = evt_ring->state;
380 	int ret;
381 
382 	if (state != GSI_EVT_RING_STATE_ALLOCATED &&
383 	    state != GSI_EVT_RING_STATE_ERROR) {
384 		dev_err(gsi->dev, "bad event ring state (%u) before reset\n",
385 			evt_ring->state);
386 		return;
387 	}
388 
389 	ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
390 	if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
391 		dev_err(gsi->dev, "bad event ring state (%u) after reset\n",
392 			evt_ring->state);
393 }
394 
395 /* Issue a hardware de-allocation request for an allocated event ring */
396 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
397 {
398 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
399 	int ret;
400 
401 	if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
402 		dev_err(gsi->dev, "bad event ring state (%u) before dealloc\n",
403 			evt_ring->state);
404 		return;
405 	}
406 
407 	ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
408 	if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
409 		dev_err(gsi->dev, "bad event ring state (%u) after dealloc\n",
410 			evt_ring->state);
411 }
412 
413 /* Fetch the current state of a channel from hardware */
414 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
415 {
416 	u32 channel_id = gsi_channel_id(channel);
417 	void *virt = channel->gsi->virt;
418 	u32 val;
419 
420 	val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
421 
422 	return u32_get_bits(val, CHSTATE_FMASK);
423 }
424 
425 /* Issue a channel command and wait for it to complete */
426 static int
427 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
428 {
429 	struct completion *completion = &channel->completion;
430 	u32 channel_id = gsi_channel_id(channel);
431 	struct gsi *gsi = channel->gsi;
432 	u32 val;
433 
434 	val = u32_encode_bits(channel_id, CH_CHID_FMASK);
435 	val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
436 
437 	if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion))
438 		return 0;	/* Success! */
439 
440 	dev_err(gsi->dev,
441 		"GSI command %u to channel %u timed out (state is %u)\n",
442 		opcode, channel_id, gsi_channel_state(channel));
443 
444 	return -ETIMEDOUT;
445 }
446 
447 /* Allocate GSI channel in NOT_ALLOCATED state */
448 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
449 {
450 	struct gsi_channel *channel = &gsi->channel[channel_id];
451 	enum gsi_channel_state state;
452 	int ret;
453 
454 	/* Get initial channel state */
455 	state = gsi_channel_state(channel);
456 	if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
457 		return -EINVAL;
458 
459 	ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
460 
461 	/* Channel state will normally have been updated */
462 	state = gsi_channel_state(channel);
463 	if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
464 		dev_err(gsi->dev, "bad channel state (%u) after alloc\n",
465 			state);
466 		ret = -EIO;
467 	}
468 
469 	return ret;
470 }
471 
472 /* Start an ALLOCATED channel */
473 static int gsi_channel_start_command(struct gsi_channel *channel)
474 {
475 	enum gsi_channel_state state;
476 	int ret;
477 
478 	state = gsi_channel_state(channel);
479 	if (state != GSI_CHANNEL_STATE_ALLOCATED &&
480 	    state != GSI_CHANNEL_STATE_STOPPED)
481 		return -EINVAL;
482 
483 	ret = gsi_channel_command(channel, GSI_CH_START);
484 
485 	/* Channel state will normally have been updated */
486 	state = gsi_channel_state(channel);
487 	if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
488 		dev_err(channel->gsi->dev,
489 			"bad channel state (%u) after start\n", state);
490 		ret = -EIO;
491 	}
492 
493 	return ret;
494 }
495 
496 /* Stop a GSI channel in STARTED state */
497 static int gsi_channel_stop_command(struct gsi_channel *channel)
498 {
499 	enum gsi_channel_state state;
500 	int ret;
501 
502 	state = gsi_channel_state(channel);
503 	if (state != GSI_CHANNEL_STATE_STARTED &&
504 	    state != GSI_CHANNEL_STATE_STOP_IN_PROC)
505 		return -EINVAL;
506 
507 	ret = gsi_channel_command(channel, GSI_CH_STOP);
508 
509 	/* Channel state will normally have been updated */
510 	state = gsi_channel_state(channel);
511 	if (ret || state == GSI_CHANNEL_STATE_STOPPED)
512 		return ret;
513 
514 	/* We may have to try again if stop is in progress */
515 	if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
516 		return -EAGAIN;
517 
518 	dev_err(channel->gsi->dev,
519 		"bad channel state (%u) after stop\n", state);
520 
521 	return -EIO;
522 }
523 
524 /* Reset a GSI channel in ALLOCATED or ERROR state. */
525 static void gsi_channel_reset_command(struct gsi_channel *channel)
526 {
527 	enum gsi_channel_state state;
528 	int ret;
529 
530 	msleep(1);	/* A short delay is required before a RESET command */
531 
532 	state = gsi_channel_state(channel);
533 	if (state != GSI_CHANNEL_STATE_STOPPED &&
534 	    state != GSI_CHANNEL_STATE_ERROR) {
535 		dev_err(channel->gsi->dev,
536 			"bad channel state (%u) before reset\n", state);
537 		return;
538 	}
539 
540 	ret = gsi_channel_command(channel, GSI_CH_RESET);
541 
542 	/* Channel state will normally have been updated */
543 	state = gsi_channel_state(channel);
544 	if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
545 		dev_err(channel->gsi->dev,
546 			"bad channel state (%u) after reset\n", state);
547 }
548 
549 /* Deallocate an ALLOCATED GSI channel */
550 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
551 {
552 	struct gsi_channel *channel = &gsi->channel[channel_id];
553 	enum gsi_channel_state state;
554 	int ret;
555 
556 	state = gsi_channel_state(channel);
557 	if (state != GSI_CHANNEL_STATE_ALLOCATED) {
558 		dev_err(gsi->dev,
559 			"bad channel state (%u) before dealloc\n", state);
560 		return;
561 	}
562 
563 	ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
564 
565 	/* Channel state will normally have been updated */
566 	state = gsi_channel_state(channel);
567 	if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
568 		dev_err(gsi->dev,
569 			"bad channel state (%u) after dealloc\n", state);
570 }
571 
572 /* Ring an event ring doorbell, reporting the last entry processed by the AP.
573  * The index argument (modulo the ring count) is the first unfilled entry, so
574  * we supply one less than that with the doorbell.  Update the event ring
575  * index field with the value provided.
576  */
577 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
578 {
579 	struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
580 	u32 val;
581 
582 	ring->index = index;	/* Next unused entry */
583 
584 	/* Note: index *must* be used modulo the ring count here */
585 	val = gsi_ring_addr(ring, (index - 1) % ring->count);
586 	iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
587 }
588 
589 /* Program an event ring for use */
590 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
591 {
592 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
593 	size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
594 	u32 val;
595 
596 	val = u32_encode_bits(GSI_EVT_CHTYPE_GPI_EV, EV_CHTYPE_FMASK);
597 	val |= EV_INTYPE_FMASK;
598 	val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
599 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
600 
601 	val = u32_encode_bits(size, EV_R_LENGTH_FMASK);
602 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
603 
604 	/* The context 2 and 3 registers store the low-order and
605 	 * high-order 32 bits of the address of the event ring,
606 	 * respectively.
607 	 */
608 	val = evt_ring->ring.addr & GENMASK(31, 0);
609 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
610 
611 	val = evt_ring->ring.addr >> 32;
612 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
613 
614 	/* Enable interrupt moderation by setting the moderation delay */
615 	val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
616 	val |= u32_encode_bits(1, MODC_FMASK);	/* comes from channel */
617 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
618 
619 	/* No MSI write data, and MSI address high and low address is 0 */
620 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
621 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
622 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
623 
624 	/* We don't need to get event read pointer updates */
625 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
626 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
627 
628 	/* Finally, tell the hardware we've completed event 0 (arbitrary) */
629 	gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
630 }
631 
632 /* Return the last (most recent) transaction completed on a channel. */
633 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
634 {
635 	struct gsi_trans_info *trans_info = &channel->trans_info;
636 	struct gsi_trans *trans;
637 
638 	spin_lock_bh(&trans_info->spinlock);
639 
640 	if (!list_empty(&trans_info->complete))
641 		trans = list_last_entry(&trans_info->complete,
642 					struct gsi_trans, links);
643 	else if (!list_empty(&trans_info->polled))
644 		trans = list_last_entry(&trans_info->polled,
645 					struct gsi_trans, links);
646 	else
647 		trans = NULL;
648 
649 	/* Caller will wait for this, so take a reference */
650 	if (trans)
651 		refcount_inc(&trans->refcount);
652 
653 	spin_unlock_bh(&trans_info->spinlock);
654 
655 	return trans;
656 }
657 
658 /* Wait for transaction activity on a channel to complete */
659 static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
660 {
661 	struct gsi_trans *trans;
662 
663 	/* Get the last transaction, and wait for it to complete */
664 	trans = gsi_channel_trans_last(channel);
665 	if (trans) {
666 		wait_for_completion(&trans->completion);
667 		gsi_trans_free(trans);
668 	}
669 }
670 
671 /* Stop channel activity.  Transactions may not be allocated until thawed. */
672 static void gsi_channel_freeze(struct gsi_channel *channel)
673 {
674 	gsi_channel_trans_quiesce(channel);
675 
676 	napi_disable(&channel->napi);
677 
678 	gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id);
679 }
680 
681 /* Allow transactions to be used on the channel again. */
682 static void gsi_channel_thaw(struct gsi_channel *channel)
683 {
684 	gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
685 
686 	napi_enable(&channel->napi);
687 }
688 
689 /* Program a channel for use */
690 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
691 {
692 	size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
693 	u32 channel_id = gsi_channel_id(channel);
694 	union gsi_channel_scratch scr = { };
695 	struct gsi_channel_scratch_gpi *gpi;
696 	struct gsi *gsi = channel->gsi;
697 	u32 wrr_weight = 0;
698 	u32 val;
699 
700 	/* Arbitrarily pick TRE 0 as the first channel element to use */
701 	channel->tre_ring.index = 0;
702 
703 	/* We program all channels to use GPI protocol */
704 	val = u32_encode_bits(GSI_CHANNEL_PROTOCOL_GPI, CHTYPE_PROTOCOL_FMASK);
705 	if (channel->toward_ipa)
706 		val |= CHTYPE_DIR_FMASK;
707 	val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
708 	val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
709 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
710 
711 	val = u32_encode_bits(size, R_LENGTH_FMASK);
712 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
713 
714 	/* The context 2 and 3 registers store the low-order and
715 	 * high-order 32 bits of the address of the channel ring,
716 	 * respectively.
717 	 */
718 	val = channel->tre_ring.addr & GENMASK(31, 0);
719 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
720 
721 	val = channel->tre_ring.addr >> 32;
722 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
723 
724 	/* Command channel gets low weighted round-robin priority */
725 	if (channel->command)
726 		wrr_weight = field_max(WRR_WEIGHT_FMASK);
727 	val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
728 
729 	/* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
730 
731 	/* Enable the doorbell engine if requested */
732 	if (doorbell)
733 		val |= USE_DB_ENG_FMASK;
734 
735 	if (!channel->use_prefetch)
736 		val |= USE_ESCAPE_BUF_ONLY_FMASK;
737 
738 	iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
739 
740 	/* Now update the scratch registers for GPI protocol */
741 	gpi = &scr.gpi;
742 	gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
743 					GSI_RING_ELEMENT_SIZE;
744 	gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
745 
746 	val = scr.data.word1;
747 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
748 
749 	val = scr.data.word2;
750 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
751 
752 	val = scr.data.word3;
753 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
754 
755 	/* We must preserve the upper 16 bits of the last scratch register.
756 	 * The next sequence assumes those bits remain unchanged between the
757 	 * read and the write.
758 	 */
759 	val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
760 	val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
761 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
762 
763 	/* All done! */
764 }
765 
766 static void gsi_channel_deprogram(struct gsi_channel *channel)
767 {
768 	/* Nothing to do */
769 }
770 
771 /* Start an allocated GSI channel */
772 int gsi_channel_start(struct gsi *gsi, u32 channel_id)
773 {
774 	struct gsi_channel *channel = &gsi->channel[channel_id];
775 	int ret;
776 
777 	mutex_lock(&gsi->mutex);
778 
779 	ret = gsi_channel_start_command(channel);
780 
781 	mutex_unlock(&gsi->mutex);
782 
783 	gsi_channel_thaw(channel);
784 
785 	return ret;
786 }
787 
788 /* Stop a started channel */
789 int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
790 {
791 	struct gsi_channel *channel = &gsi->channel[channel_id];
792 	enum gsi_channel_state state;
793 	u32 retries;
794 	int ret;
795 
796 	gsi_channel_freeze(channel);
797 
798 	/* Channel could have entered STOPPED state since last call if the
799 	 * STOP command timed out.  We won't stop a channel if stopping it
800 	 * was successful previously (so we still want the freeze above).
801 	 */
802 	state = gsi_channel_state(channel);
803 	if (state == GSI_CHANNEL_STATE_STOPPED)
804 		return 0;
805 
806 	/* RX channels might require a little time to enter STOPPED state */
807 	retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
808 
809 	mutex_lock(&gsi->mutex);
810 
811 	do {
812 		ret = gsi_channel_stop_command(channel);
813 		if (ret != -EAGAIN)
814 			break;
815 		msleep(1);
816 	} while (retries--);
817 
818 	mutex_unlock(&gsi->mutex);
819 
820 	/* Thaw the channel if we need to retry (or on error) */
821 	if (ret)
822 		gsi_channel_thaw(channel);
823 
824 	return ret;
825 }
826 
827 /* Reset and reconfigure a channel (possibly leaving doorbell disabled) */
828 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy)
829 {
830 	struct gsi_channel *channel = &gsi->channel[channel_id];
831 
832 	mutex_lock(&gsi->mutex);
833 
834 	gsi_channel_reset_command(channel);
835 	/* Due to a hardware quirk we may need to reset RX channels twice. */
836 	if (legacy && !channel->toward_ipa)
837 		gsi_channel_reset_command(channel);
838 
839 	gsi_channel_program(channel, legacy);
840 	gsi_channel_trans_cancel_pending(channel);
841 
842 	mutex_unlock(&gsi->mutex);
843 }
844 
845 /* Stop a STARTED channel for suspend (using stop if requested) */
846 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
847 {
848 	struct gsi_channel *channel = &gsi->channel[channel_id];
849 
850 	if (stop)
851 		return gsi_channel_stop(gsi, channel_id);
852 
853 	gsi_channel_freeze(channel);
854 
855 	return 0;
856 }
857 
858 /* Resume a suspended channel (starting will be requested if STOPPED) */
859 int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start)
860 {
861 	struct gsi_channel *channel = &gsi->channel[channel_id];
862 
863 	if (start)
864 		return gsi_channel_start(gsi, channel_id);
865 
866 	gsi_channel_thaw(channel);
867 
868 	return 0;
869 }
870 
871 /**
872  * gsi_channel_tx_queued() - Report queued TX transfers for a channel
873  * @channel:	Channel for which to report
874  *
875  * Report to the network stack the number of bytes and transactions that
876  * have been queued to hardware since last call.  This and the next function
877  * supply information used by the network stack for throttling.
878  *
879  * For each channel we track the number of transactions used and bytes of
880  * data those transactions represent.  We also track what those values are
881  * each time this function is called.  Subtracting the two tells us
882  * the number of bytes and transactions that have been added between
883  * successive calls.
884  *
885  * Calling this each time we ring the channel doorbell allows us to
886  * provide accurate information to the network stack about how much
887  * work we've given the hardware at any point in time.
888  */
889 void gsi_channel_tx_queued(struct gsi_channel *channel)
890 {
891 	u32 trans_count;
892 	u32 byte_count;
893 
894 	byte_count = channel->byte_count - channel->queued_byte_count;
895 	trans_count = channel->trans_count - channel->queued_trans_count;
896 	channel->queued_byte_count = channel->byte_count;
897 	channel->queued_trans_count = channel->trans_count;
898 
899 	ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
900 				  trans_count, byte_count);
901 }
902 
903 /**
904  * gsi_channel_tx_update() - Report completed TX transfers
905  * @channel:	Channel that has completed transmitting packets
906  * @trans:	Last transation known to be complete
907  *
908  * Compute the number of transactions and bytes that have been transferred
909  * over a TX channel since the given transaction was committed.  Report this
910  * information to the network stack.
911  *
912  * At the time a transaction is committed, we record its channel's
913  * committed transaction and byte counts *in the transaction*.
914  * Completions are signaled by the hardware with an interrupt, and
915  * we can determine the latest completed transaction at that time.
916  *
917  * The difference between the byte/transaction count recorded in
918  * the transaction and the count last time we recorded a completion
919  * tells us exactly how much data has been transferred between
920  * completions.
921  *
922  * Calling this each time we learn of a newly-completed transaction
923  * allows us to provide accurate information to the network stack
924  * about how much work has been completed by the hardware at a given
925  * point in time.
926  */
927 static void
928 gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
929 {
930 	u64 byte_count = trans->byte_count + trans->len;
931 	u64 trans_count = trans->trans_count + 1;
932 
933 	byte_count -= channel->compl_byte_count;
934 	channel->compl_byte_count += byte_count;
935 	trans_count -= channel->compl_trans_count;
936 	channel->compl_trans_count += trans_count;
937 
938 	ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
939 				     trans_count, byte_count);
940 }
941 
942 /* Channel control interrupt handler */
943 static void gsi_isr_chan_ctrl(struct gsi *gsi)
944 {
945 	u32 channel_mask;
946 
947 	channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
948 	iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
949 
950 	while (channel_mask) {
951 		u32 channel_id = __ffs(channel_mask);
952 		struct gsi_channel *channel;
953 
954 		channel_mask ^= BIT(channel_id);
955 
956 		channel = &gsi->channel[channel_id];
957 
958 		complete(&channel->completion);
959 	}
960 }
961 
962 /* Event ring control interrupt handler */
963 static void gsi_isr_evt_ctrl(struct gsi *gsi)
964 {
965 	u32 event_mask;
966 
967 	event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
968 	iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
969 
970 	while (event_mask) {
971 		u32 evt_ring_id = __ffs(event_mask);
972 		struct gsi_evt_ring *evt_ring;
973 
974 		event_mask ^= BIT(evt_ring_id);
975 
976 		evt_ring = &gsi->evt_ring[evt_ring_id];
977 		evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
978 
979 		complete(&evt_ring->completion);
980 	}
981 }
982 
983 /* Global channel error interrupt handler */
984 static void
985 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
986 {
987 	if (code == GSI_OUT_OF_RESOURCES_ERR) {
988 		dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
989 		complete(&gsi->channel[channel_id].completion);
990 		return;
991 	}
992 
993 	/* Report, but otherwise ignore all other error codes */
994 	dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
995 		channel_id, err_ee, code);
996 }
997 
998 /* Global event error interrupt handler */
999 static void
1000 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
1001 {
1002 	if (code == GSI_OUT_OF_RESOURCES_ERR) {
1003 		struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1004 		u32 channel_id = gsi_channel_id(evt_ring->channel);
1005 
1006 		complete(&evt_ring->completion);
1007 		dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
1008 			channel_id);
1009 		return;
1010 	}
1011 
1012 	/* Report, but otherwise ignore all other error codes */
1013 	dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
1014 		evt_ring_id, err_ee, code);
1015 }
1016 
1017 /* Global error interrupt handler */
1018 static void gsi_isr_glob_err(struct gsi *gsi)
1019 {
1020 	enum gsi_err_type type;
1021 	enum gsi_err_code code;
1022 	u32 which;
1023 	u32 val;
1024 	u32 ee;
1025 
1026 	/* Get the logged error, then reinitialize the log */
1027 	val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
1028 	iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1029 	iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
1030 
1031 	ee = u32_get_bits(val, ERR_EE_FMASK);
1032 	which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
1033 	type = u32_get_bits(val, ERR_TYPE_FMASK);
1034 	code = u32_get_bits(val, ERR_CODE_FMASK);
1035 
1036 	if (type == GSI_ERR_TYPE_CHAN)
1037 		gsi_isr_glob_chan_err(gsi, ee, which, code);
1038 	else if (type == GSI_ERR_TYPE_EVT)
1039 		gsi_isr_glob_evt_err(gsi, ee, which, code);
1040 	else	/* type GSI_ERR_TYPE_GLOB should be fatal */
1041 		dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
1042 }
1043 
1044 /* Generic EE interrupt handler */
1045 static void gsi_isr_gp_int1(struct gsi *gsi)
1046 {
1047 	u32 result;
1048 	u32 val;
1049 
1050 	val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1051 	result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
1052 	if (result != GENERIC_EE_SUCCESS_FVAL)
1053 		dev_err(gsi->dev, "global INT1 generic result %u\n", result);
1054 
1055 	complete(&gsi->completion);
1056 }
1057 
1058 /* Inter-EE interrupt handler */
1059 static void gsi_isr_glob_ee(struct gsi *gsi)
1060 {
1061 	u32 val;
1062 
1063 	val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
1064 
1065 	if (val & ERROR_INT_FMASK)
1066 		gsi_isr_glob_err(gsi);
1067 
1068 	iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
1069 
1070 	val &= ~ERROR_INT_FMASK;
1071 
1072 	if (val & EN_GP_INT1_FMASK) {
1073 		val ^= EN_GP_INT1_FMASK;
1074 		gsi_isr_gp_int1(gsi);
1075 	}
1076 
1077 	if (val)
1078 		dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
1079 }
1080 
1081 /* I/O completion interrupt event */
1082 static void gsi_isr_ieob(struct gsi *gsi)
1083 {
1084 	u32 event_mask;
1085 
1086 	event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
1087 	iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
1088 
1089 	while (event_mask) {
1090 		u32 evt_ring_id = __ffs(event_mask);
1091 
1092 		event_mask ^= BIT(evt_ring_id);
1093 
1094 		gsi_irq_ieob_disable(gsi, evt_ring_id);
1095 		napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
1096 	}
1097 }
1098 
1099 /* General event interrupts represent serious problems, so report them */
1100 static void gsi_isr_general(struct gsi *gsi)
1101 {
1102 	struct device *dev = gsi->dev;
1103 	u32 val;
1104 
1105 	val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
1106 	iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
1107 
1108 	if (val)
1109 		dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
1110 }
1111 
1112 /**
1113  * gsi_isr() - Top level GSI interrupt service routine
1114  * @irq:	Interrupt number (ignored)
1115  * @dev_id:	GSI pointer supplied to request_irq()
1116  *
1117  * This is the main handler function registered for the GSI IRQ. Each type
1118  * of interrupt has a separate handler function that is called from here.
1119  */
1120 static irqreturn_t gsi_isr(int irq, void *dev_id)
1121 {
1122 	struct gsi *gsi = dev_id;
1123 	u32 intr_mask;
1124 	u32 cnt = 0;
1125 
1126 	while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
1127 		/* intr_mask contains bitmask of pending GSI interrupts */
1128 		do {
1129 			u32 gsi_intr = BIT(__ffs(intr_mask));
1130 
1131 			intr_mask ^= gsi_intr;
1132 
1133 			switch (gsi_intr) {
1134 			case CH_CTRL_FMASK:
1135 				gsi_isr_chan_ctrl(gsi);
1136 				break;
1137 			case EV_CTRL_FMASK:
1138 				gsi_isr_evt_ctrl(gsi);
1139 				break;
1140 			case GLOB_EE_FMASK:
1141 				gsi_isr_glob_ee(gsi);
1142 				break;
1143 			case IEOB_FMASK:
1144 				gsi_isr_ieob(gsi);
1145 				break;
1146 			case GENERAL_FMASK:
1147 				gsi_isr_general(gsi);
1148 				break;
1149 			default:
1150 				dev_err(gsi->dev,
1151 					"%s: unrecognized type 0x%08x\n",
1152 					__func__, gsi_intr);
1153 				break;
1154 			}
1155 		} while (intr_mask);
1156 
1157 		if (++cnt > GSI_ISR_MAX_ITER) {
1158 			dev_err(gsi->dev, "interrupt flood\n");
1159 			break;
1160 		}
1161 	}
1162 
1163 	return IRQ_HANDLED;
1164 }
1165 
1166 /* Return the transaction associated with a transfer completion event */
1167 static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
1168 					 struct gsi_event *event)
1169 {
1170 	u32 tre_offset;
1171 	u32 tre_index;
1172 
1173 	/* Event xfer_ptr records the TRE it's associated with */
1174 	tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0);
1175 	tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
1176 
1177 	return gsi_channel_trans_mapped(channel, tre_index);
1178 }
1179 
1180 /**
1181  * gsi_evt_ring_rx_update() - Record lengths of received data
1182  * @evt_ring:	Event ring associated with channel that received packets
1183  * @index:	Event index in ring reported by hardware
1184  *
1185  * Events for RX channels contain the actual number of bytes received into
1186  * the buffer.  Every event has a transaction associated with it, and here
1187  * we update transactions to record their actual received lengths.
1188  *
1189  * This function is called whenever we learn that the GSI hardware has filled
1190  * new events since the last time we checked.  The ring's index field tells
1191  * the first entry in need of processing.  The index provided is the
1192  * first *unfilled* event in the ring (following the last filled one).
1193  *
1194  * Events are sequential within the event ring, and transactions are
1195  * sequential within the transaction pool.
1196  *
1197  * Note that @index always refers to an element *within* the event ring.
1198  */
1199 static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
1200 {
1201 	struct gsi_channel *channel = evt_ring->channel;
1202 	struct gsi_ring *ring = &evt_ring->ring;
1203 	struct gsi_trans_info *trans_info;
1204 	struct gsi_event *event_done;
1205 	struct gsi_event *event;
1206 	struct gsi_trans *trans;
1207 	u32 byte_count = 0;
1208 	u32 old_index;
1209 	u32 event_avail;
1210 
1211 	trans_info = &channel->trans_info;
1212 
1213 	/* We'll start with the oldest un-processed event.  RX channels
1214 	 * replenish receive buffers in single-TRE transactions, so we
1215 	 * can just map that event to its transaction.  Transactions
1216 	 * associated with completion events are consecutive.
1217 	 */
1218 	old_index = ring->index;
1219 	event = gsi_ring_virt(ring, old_index);
1220 	trans = gsi_event_trans(channel, event);
1221 
1222 	/* Compute the number of events to process before we wrap,
1223 	 * and determine when we'll be done processing events.
1224 	 */
1225 	event_avail = ring->count - old_index % ring->count;
1226 	event_done = gsi_ring_virt(ring, index);
1227 	do {
1228 		trans->len = __le16_to_cpu(event->len);
1229 		byte_count += trans->len;
1230 
1231 		/* Move on to the next event and transaction */
1232 		if (--event_avail)
1233 			event++;
1234 		else
1235 			event = gsi_ring_virt(ring, 0);
1236 		trans = gsi_trans_pool_next(&trans_info->pool, trans);
1237 	} while (event != event_done);
1238 
1239 	/* We record RX bytes when they are received */
1240 	channel->byte_count += byte_count;
1241 	channel->trans_count++;
1242 }
1243 
1244 /* Initialize a ring, including allocating DMA memory for its entries */
1245 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1246 {
1247 	size_t size = count * GSI_RING_ELEMENT_SIZE;
1248 	struct device *dev = gsi->dev;
1249 	dma_addr_t addr;
1250 
1251 	/* Hardware requires a 2^n ring size, with alignment equal to size */
1252 	ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1253 	if (ring->virt && addr % size) {
1254 		dma_free_coherent(dev, size, ring->virt, ring->addr);
1255 		dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
1256 				size);
1257 		return -EINVAL;	/* Not a good error value, but distinct */
1258 	} else if (!ring->virt) {
1259 		return -ENOMEM;
1260 	}
1261 	ring->addr = addr;
1262 	ring->count = count;
1263 
1264 	return 0;
1265 }
1266 
1267 /* Free a previously-allocated ring */
1268 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
1269 {
1270 	size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
1271 
1272 	dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
1273 }
1274 
1275 /* Allocate an available event ring id */
1276 static int gsi_evt_ring_id_alloc(struct gsi *gsi)
1277 {
1278 	u32 evt_ring_id;
1279 
1280 	if (gsi->event_bitmap == ~0U) {
1281 		dev_err(gsi->dev, "event rings exhausted\n");
1282 		return -ENOSPC;
1283 	}
1284 
1285 	evt_ring_id = ffz(gsi->event_bitmap);
1286 	gsi->event_bitmap |= BIT(evt_ring_id);
1287 
1288 	return (int)evt_ring_id;
1289 }
1290 
1291 /* Free a previously-allocated event ring id */
1292 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
1293 {
1294 	gsi->event_bitmap &= ~BIT(evt_ring_id);
1295 }
1296 
1297 /* Ring a channel doorbell, reporting the first un-filled entry */
1298 void gsi_channel_doorbell(struct gsi_channel *channel)
1299 {
1300 	struct gsi_ring *tre_ring = &channel->tre_ring;
1301 	u32 channel_id = gsi_channel_id(channel);
1302 	struct gsi *gsi = channel->gsi;
1303 	u32 val;
1304 
1305 	/* Note: index *must* be used modulo the ring count here */
1306 	val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
1307 	iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
1308 }
1309 
1310 /* Consult hardware, move any newly completed transactions to completed list */
1311 static void gsi_channel_update(struct gsi_channel *channel)
1312 {
1313 	u32 evt_ring_id = channel->evt_ring_id;
1314 	struct gsi *gsi = channel->gsi;
1315 	struct gsi_evt_ring *evt_ring;
1316 	struct gsi_trans *trans;
1317 	struct gsi_ring *ring;
1318 	u32 offset;
1319 	u32 index;
1320 
1321 	evt_ring = &gsi->evt_ring[evt_ring_id];
1322 	ring = &evt_ring->ring;
1323 
1324 	/* See if there's anything new to process; if not, we're done.  Note
1325 	 * that index always refers to an entry *within* the event ring.
1326 	 */
1327 	offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
1328 	index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
1329 	if (index == ring->index % ring->count)
1330 		return;
1331 
1332 	/* Get the transaction for the latest completed event.  Take a
1333 	 * reference to keep it from completing before we give the events
1334 	 * for this and previous transactions back to the hardware.
1335 	 */
1336 	trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
1337 	refcount_inc(&trans->refcount);
1338 
1339 	/* For RX channels, update each completed transaction with the number
1340 	 * of bytes that were actually received.  For TX channels, report
1341 	 * the number of transactions and bytes this completion represents
1342 	 * up the network stack.
1343 	 */
1344 	if (channel->toward_ipa)
1345 		gsi_channel_tx_update(channel, trans);
1346 	else
1347 		gsi_evt_ring_rx_update(evt_ring, index);
1348 
1349 	gsi_trans_move_complete(trans);
1350 
1351 	/* Tell the hardware we've handled these events */
1352 	gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
1353 
1354 	gsi_trans_free(trans);
1355 }
1356 
1357 /**
1358  * gsi_channel_poll_one() - Return a single completed transaction on a channel
1359  * @channel:	Channel to be polled
1360  *
1361  * @Return:	Transaction pointer, or null if none are available
1362  *
1363  * This function returns the first entry on a channel's completed transaction
1364  * list.  If that list is empty, the hardware is consulted to determine
1365  * whether any new transactions have completed.  If so, they're moved to the
1366  * completed list and the new first entry is returned.  If there are no more
1367  * completed transactions, a null pointer is returned.
1368  */
1369 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
1370 {
1371 	struct gsi_trans *trans;
1372 
1373 	/* Get the first transaction from the completed list */
1374 	trans = gsi_channel_trans_complete(channel);
1375 	if (!trans) {
1376 		/* List is empty; see if there's more to do */
1377 		gsi_channel_update(channel);
1378 		trans = gsi_channel_trans_complete(channel);
1379 	}
1380 
1381 	if (trans)
1382 		gsi_trans_move_polled(trans);
1383 
1384 	return trans;
1385 }
1386 
1387 /**
1388  * gsi_channel_poll() - NAPI poll function for a channel
1389  * @napi:	NAPI structure for the channel
1390  * @budget:	Budget supplied by NAPI core
1391 
1392  * @Return:	 Number of items polled (<= budget)
1393  *
1394  * Single transactions completed by hardware are polled until either
1395  * the budget is exhausted, or there are no more.  Each transaction
1396  * polled is passed to gsi_trans_complete(), to perform remaining
1397  * completion processing and retire/free the transaction.
1398  */
1399 static int gsi_channel_poll(struct napi_struct *napi, int budget)
1400 {
1401 	struct gsi_channel *channel;
1402 	int count = 0;
1403 
1404 	channel = container_of(napi, struct gsi_channel, napi);
1405 	while (count < budget) {
1406 		struct gsi_trans *trans;
1407 
1408 		count++;
1409 		trans = gsi_channel_poll_one(channel);
1410 		if (!trans)
1411 			break;
1412 		gsi_trans_complete(trans);
1413 	}
1414 
1415 	if (count < budget) {
1416 		napi_complete(&channel->napi);
1417 		gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
1418 	}
1419 
1420 	return count;
1421 }
1422 
1423 /* The event bitmap represents which event ids are available for allocation.
1424  * Set bits are not available, clear bits can be used.  This function
1425  * initializes the map so all events supported by the hardware are available,
1426  * then precludes any reserved events from being allocated.
1427  */
1428 static u32 gsi_event_bitmap_init(u32 evt_ring_max)
1429 {
1430 	u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
1431 
1432 	event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
1433 
1434 	return event_bitmap;
1435 }
1436 
1437 /* Setup function for event rings */
1438 static void gsi_evt_ring_setup(struct gsi *gsi)
1439 {
1440 	/* Nothing to do */
1441 }
1442 
1443 /* Inverse of gsi_evt_ring_setup() */
1444 static void gsi_evt_ring_teardown(struct gsi *gsi)
1445 {
1446 	/* Nothing to do */
1447 }
1448 
1449 /* Setup function for a single channel */
1450 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
1451 				 bool legacy)
1452 {
1453 	struct gsi_channel *channel = &gsi->channel[channel_id];
1454 	u32 evt_ring_id = channel->evt_ring_id;
1455 	int ret;
1456 
1457 	if (!channel->gsi)
1458 		return 0;	/* Ignore uninitialized channels */
1459 
1460 	ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
1461 	if (ret)
1462 		return ret;
1463 
1464 	gsi_evt_ring_program(gsi, evt_ring_id);
1465 
1466 	ret = gsi_channel_alloc_command(gsi, channel_id);
1467 	if (ret)
1468 		goto err_evt_ring_de_alloc;
1469 
1470 	gsi_channel_program(channel, legacy);
1471 
1472 	if (channel->toward_ipa)
1473 		netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
1474 				  gsi_channel_poll, NAPI_POLL_WEIGHT);
1475 	else
1476 		netif_napi_add(&gsi->dummy_dev, &channel->napi,
1477 			       gsi_channel_poll, NAPI_POLL_WEIGHT);
1478 
1479 	return 0;
1480 
1481 err_evt_ring_de_alloc:
1482 	/* We've done nothing with the event ring yet so don't reset */
1483 	gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1484 
1485 	return ret;
1486 }
1487 
1488 /* Inverse of gsi_channel_setup_one() */
1489 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
1490 {
1491 	struct gsi_channel *channel = &gsi->channel[channel_id];
1492 	u32 evt_ring_id = channel->evt_ring_id;
1493 
1494 	if (!channel->gsi)
1495 		return;		/* Ignore uninitialized channels */
1496 
1497 	netif_napi_del(&channel->napi);
1498 
1499 	gsi_channel_deprogram(channel);
1500 	gsi_channel_de_alloc_command(gsi, channel_id);
1501 	gsi_evt_ring_reset_command(gsi, evt_ring_id);
1502 	gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1503 }
1504 
1505 static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
1506 			       enum gsi_generic_cmd_opcode opcode)
1507 {
1508 	struct completion *completion = &gsi->completion;
1509 	u32 val;
1510 
1511 	/* First zero the result code field */
1512 	val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1513 	val &= ~GENERIC_EE_RESULT_FMASK;
1514 	iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1515 
1516 	/* Now issue the command */
1517 	val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
1518 	val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
1519 	val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
1520 
1521 	if (gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion))
1522 		return 0;	/* Success! */
1523 
1524 	dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
1525 		opcode, channel_id);
1526 
1527 	return -ETIMEDOUT;
1528 }
1529 
1530 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
1531 {
1532 	return gsi_generic_command(gsi, channel_id,
1533 				   GSI_GENERIC_ALLOCATE_CHANNEL);
1534 }
1535 
1536 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
1537 {
1538 	int ret;
1539 
1540 	ret = gsi_generic_command(gsi, channel_id, GSI_GENERIC_HALT_CHANNEL);
1541 	if (ret)
1542 		dev_err(gsi->dev, "error %d halting modem channel %u\n",
1543 			ret, channel_id);
1544 }
1545 
1546 /* Setup function for channels */
1547 static int gsi_channel_setup(struct gsi *gsi, bool legacy)
1548 {
1549 	u32 channel_id = 0;
1550 	u32 mask;
1551 	int ret;
1552 
1553 	gsi_evt_ring_setup(gsi);
1554 	gsi_irq_enable(gsi);
1555 
1556 	mutex_lock(&gsi->mutex);
1557 
1558 	do {
1559 		ret = gsi_channel_setup_one(gsi, channel_id, legacy);
1560 		if (ret)
1561 			goto err_unwind;
1562 	} while (++channel_id < gsi->channel_count);
1563 
1564 	/* Make sure no channels were defined that hardware does not support */
1565 	while (channel_id < GSI_CHANNEL_COUNT_MAX) {
1566 		struct gsi_channel *channel = &gsi->channel[channel_id++];
1567 
1568 		if (!channel->gsi)
1569 			continue;	/* Ignore uninitialized channels */
1570 
1571 		dev_err(gsi->dev, "channel %u not supported by hardware\n",
1572 			channel_id - 1);
1573 		channel_id = gsi->channel_count;
1574 		goto err_unwind;
1575 	}
1576 
1577 	/* Allocate modem channels if necessary */
1578 	mask = gsi->modem_channel_bitmap;
1579 	while (mask) {
1580 		u32 modem_channel_id = __ffs(mask);
1581 
1582 		ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
1583 		if (ret)
1584 			goto err_unwind_modem;
1585 
1586 		/* Clear bit from mask only after success (for unwind) */
1587 		mask ^= BIT(modem_channel_id);
1588 	}
1589 
1590 	mutex_unlock(&gsi->mutex);
1591 
1592 	return 0;
1593 
1594 err_unwind_modem:
1595 	/* Compute which modem channels need to be deallocated */
1596 	mask ^= gsi->modem_channel_bitmap;
1597 	while (mask) {
1598 		u32 channel_id = __fls(mask);
1599 
1600 		mask ^= BIT(channel_id);
1601 
1602 		gsi_modem_channel_halt(gsi, channel_id);
1603 	}
1604 
1605 err_unwind:
1606 	while (channel_id--)
1607 		gsi_channel_teardown_one(gsi, channel_id);
1608 
1609 	mutex_unlock(&gsi->mutex);
1610 
1611 	gsi_irq_disable(gsi);
1612 	gsi_evt_ring_teardown(gsi);
1613 
1614 	return ret;
1615 }
1616 
1617 /* Inverse of gsi_channel_setup() */
1618 static void gsi_channel_teardown(struct gsi *gsi)
1619 {
1620 	u32 mask = gsi->modem_channel_bitmap;
1621 	u32 channel_id;
1622 
1623 	mutex_lock(&gsi->mutex);
1624 
1625 	while (mask) {
1626 		u32 channel_id = __fls(mask);
1627 
1628 		mask ^= BIT(channel_id);
1629 
1630 		gsi_modem_channel_halt(gsi, channel_id);
1631 	}
1632 
1633 	channel_id = gsi->channel_count - 1;
1634 	do
1635 		gsi_channel_teardown_one(gsi, channel_id);
1636 	while (channel_id--);
1637 
1638 	mutex_unlock(&gsi->mutex);
1639 
1640 	gsi_irq_disable(gsi);
1641 	gsi_evt_ring_teardown(gsi);
1642 }
1643 
1644 /* Setup function for GSI.  GSI firmware must be loaded and initialized */
1645 int gsi_setup(struct gsi *gsi, bool legacy)
1646 {
1647 	u32 val;
1648 
1649 	/* Here is where we first touch the GSI hardware */
1650 	val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
1651 	if (!(val & ENABLED_FMASK)) {
1652 		dev_err(gsi->dev, "GSI has not been enabled\n");
1653 		return -EIO;
1654 	}
1655 
1656 	val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
1657 
1658 	gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
1659 	if (!gsi->channel_count) {
1660 		dev_err(gsi->dev, "GSI reports zero channels supported\n");
1661 		return -EINVAL;
1662 	}
1663 	if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
1664 		dev_warn(gsi->dev,
1665 			"limiting to %u channels (hardware supports %u)\n",
1666 			 GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
1667 		gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
1668 	}
1669 
1670 	gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
1671 	if (!gsi->evt_ring_count) {
1672 		dev_err(gsi->dev, "GSI reports zero event rings supported\n");
1673 		return -EINVAL;
1674 	}
1675 	if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
1676 		dev_warn(gsi->dev,
1677 			"limiting to %u event rings (hardware supports %u)\n",
1678 			 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
1679 		gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
1680 	}
1681 
1682 	/* Initialize the error log */
1683 	iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1684 
1685 	/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
1686 	iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
1687 
1688 	return gsi_channel_setup(gsi, legacy);
1689 }
1690 
1691 /* Inverse of gsi_setup() */
1692 void gsi_teardown(struct gsi *gsi)
1693 {
1694 	gsi_channel_teardown(gsi);
1695 }
1696 
1697 /* Initialize a channel's event ring */
1698 static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
1699 {
1700 	struct gsi *gsi = channel->gsi;
1701 	struct gsi_evt_ring *evt_ring;
1702 	int ret;
1703 
1704 	ret = gsi_evt_ring_id_alloc(gsi);
1705 	if (ret < 0)
1706 		return ret;
1707 	channel->evt_ring_id = ret;
1708 
1709 	evt_ring = &gsi->evt_ring[channel->evt_ring_id];
1710 	evt_ring->channel = channel;
1711 
1712 	ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
1713 	if (!ret)
1714 		return 0;	/* Success! */
1715 
1716 	dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
1717 		ret, gsi_channel_id(channel));
1718 
1719 	gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
1720 
1721 	return ret;
1722 }
1723 
1724 /* Inverse of gsi_channel_evt_ring_init() */
1725 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
1726 {
1727 	u32 evt_ring_id = channel->evt_ring_id;
1728 	struct gsi *gsi = channel->gsi;
1729 	struct gsi_evt_ring *evt_ring;
1730 
1731 	evt_ring = &gsi->evt_ring[evt_ring_id];
1732 	gsi_ring_free(gsi, &evt_ring->ring);
1733 	gsi_evt_ring_id_free(gsi, evt_ring_id);
1734 }
1735 
1736 /* Init function for event rings */
1737 static void gsi_evt_ring_init(struct gsi *gsi)
1738 {
1739 	u32 evt_ring_id = 0;
1740 
1741 	gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
1742 	gsi->event_enable_bitmap = 0;
1743 	do
1744 		init_completion(&gsi->evt_ring[evt_ring_id].completion);
1745 	while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
1746 }
1747 
1748 /* Inverse of gsi_evt_ring_init() */
1749 static void gsi_evt_ring_exit(struct gsi *gsi)
1750 {
1751 	/* Nothing to do */
1752 }
1753 
1754 static bool gsi_channel_data_valid(struct gsi *gsi,
1755 				   const struct ipa_gsi_endpoint_data *data)
1756 {
1757 #ifdef IPA_VALIDATION
1758 	u32 channel_id = data->channel_id;
1759 	struct device *dev = gsi->dev;
1760 
1761 	/* Make sure channel ids are in the range driver supports */
1762 	if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
1763 		dev_err(dev, "bad channel id %u (must be less than %u)\n",
1764 			channel_id, GSI_CHANNEL_COUNT_MAX);
1765 		return false;
1766 	}
1767 
1768 	if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
1769 		dev_err(dev, "bad EE id %u (AP or modem)\n", data->ee_id);
1770 		return false;
1771 	}
1772 
1773 	if (!data->channel.tlv_count ||
1774 	    data->channel.tlv_count > GSI_TLV_MAX) {
1775 		dev_err(dev, "channel %u bad tlv_count %u (must be 1..%u)\n",
1776 			channel_id, data->channel.tlv_count, GSI_TLV_MAX);
1777 		return false;
1778 	}
1779 
1780 	/* We have to allow at least one maximally-sized transaction to
1781 	 * be outstanding (which would use tlv_count TREs).  Given how
1782 	 * gsi_channel_tre_max() is computed, tre_count has to be almost
1783 	 * twice the TLV FIFO size to satisfy this requirement.
1784 	 */
1785 	if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
1786 		dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
1787 			channel_id, data->channel.tlv_count,
1788 			data->channel.tre_count);
1789 		return false;
1790 	}
1791 
1792 	if (!is_power_of_2(data->channel.tre_count)) {
1793 		dev_err(dev, "channel %u bad tre_count %u (not power of 2)\n",
1794 			channel_id, data->channel.tre_count);
1795 		return false;
1796 	}
1797 
1798 	if (!is_power_of_2(data->channel.event_count)) {
1799 		dev_err(dev, "channel %u bad event_count %u (not power of 2)\n",
1800 			channel_id, data->channel.event_count);
1801 		return false;
1802 	}
1803 #endif /* IPA_VALIDATION */
1804 
1805 	return true;
1806 }
1807 
1808 /* Init function for a single channel */
1809 static int gsi_channel_init_one(struct gsi *gsi,
1810 				const struct ipa_gsi_endpoint_data *data,
1811 				bool command, bool prefetch)
1812 {
1813 	struct gsi_channel *channel;
1814 	u32 tre_count;
1815 	int ret;
1816 
1817 	if (!gsi_channel_data_valid(gsi, data))
1818 		return -EINVAL;
1819 
1820 	/* Worst case we need an event for every outstanding TRE */
1821 	if (data->channel.tre_count > data->channel.event_count) {
1822 		tre_count = data->channel.event_count;
1823 		dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
1824 			 data->channel_id, tre_count);
1825 	} else {
1826 		tre_count = data->channel.tre_count;
1827 	}
1828 
1829 	channel = &gsi->channel[data->channel_id];
1830 	memset(channel, 0, sizeof(*channel));
1831 
1832 	channel->gsi = gsi;
1833 	channel->toward_ipa = data->toward_ipa;
1834 	channel->command = command;
1835 	channel->use_prefetch = command && prefetch;
1836 	channel->tlv_count = data->channel.tlv_count;
1837 	channel->tre_count = tre_count;
1838 	channel->event_count = data->channel.event_count;
1839 	init_completion(&channel->completion);
1840 
1841 	ret = gsi_channel_evt_ring_init(channel);
1842 	if (ret)
1843 		goto err_clear_gsi;
1844 
1845 	ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
1846 	if (ret) {
1847 		dev_err(gsi->dev, "error %d allocating channel %u ring\n",
1848 			ret, data->channel_id);
1849 		goto err_channel_evt_ring_exit;
1850 	}
1851 
1852 	ret = gsi_channel_trans_init(gsi, data->channel_id);
1853 	if (ret)
1854 		goto err_ring_free;
1855 
1856 	if (command) {
1857 		u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
1858 
1859 		ret = ipa_cmd_pool_init(channel, tre_max);
1860 	}
1861 	if (!ret)
1862 		return 0;	/* Success! */
1863 
1864 	gsi_channel_trans_exit(channel);
1865 err_ring_free:
1866 	gsi_ring_free(gsi, &channel->tre_ring);
1867 err_channel_evt_ring_exit:
1868 	gsi_channel_evt_ring_exit(channel);
1869 err_clear_gsi:
1870 	channel->gsi = NULL;	/* Mark it not (fully) initialized */
1871 
1872 	return ret;
1873 }
1874 
1875 /* Inverse of gsi_channel_init_one() */
1876 static void gsi_channel_exit_one(struct gsi_channel *channel)
1877 {
1878 	if (!channel->gsi)
1879 		return;		/* Ignore uninitialized channels */
1880 
1881 	if (channel->command)
1882 		ipa_cmd_pool_exit(channel);
1883 	gsi_channel_trans_exit(channel);
1884 	gsi_ring_free(channel->gsi, &channel->tre_ring);
1885 	gsi_channel_evt_ring_exit(channel);
1886 }
1887 
1888 /* Init function for channels */
1889 static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count,
1890 			    const struct ipa_gsi_endpoint_data *data,
1891 			    bool modem_alloc)
1892 {
1893 	int ret = 0;
1894 	u32 i;
1895 
1896 	gsi_evt_ring_init(gsi);
1897 
1898 	/* The endpoint data array is indexed by endpoint name */
1899 	for (i = 0; i < count; i++) {
1900 		bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
1901 
1902 		if (ipa_gsi_endpoint_data_empty(&data[i]))
1903 			continue;	/* Skip over empty slots */
1904 
1905 		/* Mark modem channels to be allocated (hardware workaround) */
1906 		if (data[i].ee_id == GSI_EE_MODEM) {
1907 			if (modem_alloc)
1908 				gsi->modem_channel_bitmap |=
1909 						BIT(data[i].channel_id);
1910 			continue;
1911 		}
1912 
1913 		ret = gsi_channel_init_one(gsi, &data[i], command, prefetch);
1914 		if (ret)
1915 			goto err_unwind;
1916 	}
1917 
1918 	return ret;
1919 
1920 err_unwind:
1921 	while (i--) {
1922 		if (ipa_gsi_endpoint_data_empty(&data[i]))
1923 			continue;
1924 		if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
1925 			gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
1926 			continue;
1927 		}
1928 		gsi_channel_exit_one(&gsi->channel[data->channel_id]);
1929 	}
1930 	gsi_evt_ring_exit(gsi);
1931 
1932 	return ret;
1933 }
1934 
1935 /* Inverse of gsi_channel_init() */
1936 static void gsi_channel_exit(struct gsi *gsi)
1937 {
1938 	u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
1939 
1940 	do
1941 		gsi_channel_exit_one(&gsi->channel[channel_id]);
1942 	while (channel_id--);
1943 	gsi->modem_channel_bitmap = 0;
1944 
1945 	gsi_evt_ring_exit(gsi);
1946 }
1947 
1948 /* Init function for GSI.  GSI hardware does not need to be "ready" */
1949 int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
1950 	     u32 count, const struct ipa_gsi_endpoint_data *data,
1951 	     bool modem_alloc)
1952 {
1953 	struct resource *res;
1954 	resource_size_t size;
1955 	unsigned int irq;
1956 	int ret;
1957 
1958 	gsi_validate_build();
1959 
1960 	gsi->dev = &pdev->dev;
1961 
1962 	/* The GSI layer performs NAPI on all endpoints.  NAPI requires a
1963 	 * network device structure, but the GSI layer does not have one,
1964 	 * so we must create a dummy network device for this purpose.
1965 	 */
1966 	init_dummy_netdev(&gsi->dummy_dev);
1967 
1968 	/* Get the GSI IRQ and request for it to wake the system */
1969 	ret = platform_get_irq_byname(pdev, "gsi");
1970 	if (ret <= 0) {
1971 		dev_err(gsi->dev,
1972 			"DT error %d getting \"gsi\" IRQ property\n", ret);
1973 		return ret ? : -EINVAL;
1974 	}
1975 	irq = ret;
1976 
1977 	ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
1978 	if (ret) {
1979 		dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
1980 		return ret;
1981 	}
1982 	gsi->irq = irq;
1983 
1984 	ret = enable_irq_wake(gsi->irq);
1985 	if (ret)
1986 		dev_warn(gsi->dev, "error %d enabling gsi wake irq\n", ret);
1987 	gsi->irq_wake_enabled = !ret;
1988 
1989 	/* Get GSI memory range and map it */
1990 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
1991 	if (!res) {
1992 		dev_err(gsi->dev,
1993 			"DT error getting \"gsi\" memory property\n");
1994 		ret = -ENODEV;
1995 		goto err_disable_irq_wake;
1996 	}
1997 
1998 	size = resource_size(res);
1999 	if (res->start > U32_MAX || size > U32_MAX - res->start) {
2000 		dev_err(gsi->dev, "DT memory resource \"gsi\" out of range\n");
2001 		ret = -EINVAL;
2002 		goto err_disable_irq_wake;
2003 	}
2004 
2005 	gsi->virt = ioremap(res->start, size);
2006 	if (!gsi->virt) {
2007 		dev_err(gsi->dev, "unable to remap \"gsi\" memory\n");
2008 		ret = -ENOMEM;
2009 		goto err_disable_irq_wake;
2010 	}
2011 
2012 	ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc);
2013 	if (ret)
2014 		goto err_iounmap;
2015 
2016 	mutex_init(&gsi->mutex);
2017 	init_completion(&gsi->completion);
2018 
2019 	return 0;
2020 
2021 err_iounmap:
2022 	iounmap(gsi->virt);
2023 err_disable_irq_wake:
2024 	if (gsi->irq_wake_enabled)
2025 		(void)disable_irq_wake(gsi->irq);
2026 	free_irq(gsi->irq, gsi);
2027 
2028 	return ret;
2029 }
2030 
2031 /* Inverse of gsi_init() */
2032 void gsi_exit(struct gsi *gsi)
2033 {
2034 	mutex_destroy(&gsi->mutex);
2035 	gsi_channel_exit(gsi);
2036 	if (gsi->irq_wake_enabled)
2037 		(void)disable_irq_wake(gsi->irq);
2038 	free_irq(gsi->irq, gsi);
2039 	iounmap(gsi->virt);
2040 }
2041 
2042 /* The maximum number of outstanding TREs on a channel.  This limits
2043  * a channel's maximum number of transactions outstanding (worst case
2044  * is one TRE per transaction).
2045  *
2046  * The absolute limit is the number of TREs in the channel's TRE ring,
2047  * and in theory we should be able use all of them.  But in practice,
2048  * doing that led to the hardware reporting exhaustion of event ring
2049  * slots for writing completion information.  So the hardware limit
2050  * would be (tre_count - 1).
2051  *
2052  * We reduce it a bit further though.  Transaction resource pools are
2053  * sized to be a little larger than this maximum, to allow resource
2054  * allocations to always be contiguous.  The number of entries in a
2055  * TRE ring buffer is a power of 2, and the extra resources in a pool
2056  * tends to nearly double the memory allocated for it.  Reducing the
2057  * maximum number of outstanding TREs allows the number of entries in
2058  * a pool to avoid crossing that power-of-2 boundary, and this can
2059  * substantially reduce pool memory requirements.  The number we
2060  * reduce it by matches the number added in gsi_trans_pool_init().
2061  */
2062 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
2063 {
2064 	struct gsi_channel *channel = &gsi->channel[channel_id];
2065 
2066 	/* Hardware limit is channel->tre_count - 1 */
2067 	return channel->tre_count - (channel->tlv_count - 1);
2068 }
2069 
2070 /* Returns the maximum number of TREs in a single transaction for a channel */
2071 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
2072 {
2073 	struct gsi_channel *channel = &gsi->channel[channel_id];
2074 
2075 	return channel->tlv_count;
2076 }
2077