xref: /linux/drivers/net/ipa/gsi.c (revision 826f328e2b7e8854dd42ea44e6519cd75018e7b1)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2018-2020 Linaro Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/bits.h>
9 #include <linux/bitfield.h>
10 #include <linux/mutex.h>
11 #include <linux/completion.h>
12 #include <linux/io.h>
13 #include <linux/bug.h>
14 #include <linux/interrupt.h>
15 #include <linux/platform_device.h>
16 #include <linux/netdevice.h>
17 
18 #include "gsi.h"
19 #include "gsi_reg.h"
20 #include "gsi_private.h"
21 #include "gsi_trans.h"
22 #include "ipa_gsi.h"
23 #include "ipa_data.h"
24 #include "ipa_version.h"
25 
26 /**
27  * DOC: The IPA Generic Software Interface
28  *
29  * The generic software interface (GSI) is an integral component of the IPA,
30  * providing a well-defined communication layer between the AP subsystem
31  * and the IPA core.  The modem uses the GSI layer as well.
32  *
33  *	--------	     ---------
34  *	|      |	     |	     |
35  *	|  AP  +<---.	.----+ Modem |
36  *	|      +--. |	| .->+	     |
37  *	|      |  | |	| |  |	     |
38  *	--------  | |	| |  ---------
39  *		  v |	v |
40  *		--+-+---+-+--
41  *		|    GSI    |
42  *		|-----------|
43  *		|	    |
44  *		|    IPA    |
45  *		|	    |
46  *		-------------
47  *
48  * In the above diagram, the AP and Modem represent "execution environments"
49  * (EEs), which are independent operating environments that use the IPA for
50  * data transfer.
51  *
52  * Each EE uses a set of unidirectional GSI "channels," which allow transfer
53  * of data to or from the IPA.  A channel is implemented as a ring buffer,
54  * with a DRAM-resident array of "transfer elements" (TREs) available to
55  * describe transfers to or from other EEs through the IPA.  A transfer
56  * element can also contain an immediate command, requesting the IPA perform
57  * actions other than data transfer.
58  *
59  * Each TRE refers to a block of data--also located DRAM.  After writing one
60  * or more TREs to a channel, the writer (either the IPA or an EE) writes a
61  * doorbell register to inform the receiving side how many elements have
62  * been written.
63  *
64  * Each channel has a GSI "event ring" associated with it.  An event ring
65  * is implemented very much like a channel ring, but is always directed from
66  * the IPA to an EE.  The IPA notifies an EE (such as the AP) about channel
67  * events by adding an entry to the event ring associated with the channel.
68  * The GSI then writes its doorbell for the event ring, causing the target
69  * EE to be interrupted.  Each entry in an event ring contains a pointer
70  * to the channel TRE whose completion the event represents.
71  *
72  * Each TRE in a channel ring has a set of flags.  One flag indicates whether
73  * the completion of the transfer operation generates an entry (and possibly
74  * an interrupt) in the channel's event ring.  Other flags allow transfer
75  * elements to be chained together, forming a single logical transaction.
76  * TRE flags are used to control whether and when interrupts are generated
77  * to signal completion of channel transfers.
78  *
79  * Elements in channel and event rings are completed (or consumed) strictly
80  * in order.  Completion of one entry implies the completion of all preceding
81  * entries.  A single completion interrupt can therefore communicate the
82  * completion of many transfers.
83  *
84  * Note that all GSI registers are little-endian, which is the assumed
85  * endianness of I/O space accesses.  The accessor functions perform byte
86  * swapping if needed (i.e., for a big endian CPU).
87  */
88 
89 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
90 #define GSI_EVT_RING_INT_MODT		(32 * 1) /* 1ms under 32KHz clock */
91 
92 #define GSI_CMD_TIMEOUT			5	/* seconds */
93 
94 #define GSI_CHANNEL_STOP_RX_RETRIES	10
95 #define GSI_CHANNEL_MODEM_HALT_RETRIES	10
96 
97 #define GSI_MHI_EVENT_ID_START		10	/* 1st reserved event id */
98 #define GSI_MHI_EVENT_ID_END		16	/* Last reserved event id */
99 
100 #define GSI_ISR_MAX_ITER		50	/* Detect interrupt storms */
101 
102 /* An entry in an event ring */
103 struct gsi_event {
104 	__le64 xfer_ptr;
105 	__le16 len;
106 	u8 reserved1;
107 	u8 code;
108 	__le16 reserved2;
109 	u8 type;
110 	u8 chid;
111 };
112 
113 /** gsi_channel_scratch_gpi - GPI protocol scratch register
114  * @max_outstanding_tre:
115  *	Defines the maximum number of TREs allowed in a single transaction
116  *	on a channel (in bytes).  This determines the amount of prefetch
117  *	performed by the hardware.  We configure this to equal the size of
118  *	the TLV FIFO for the channel.
119  * @outstanding_threshold:
120  *	Defines the threshold (in bytes) determining when the sequencer
121  *	should update the channel doorbell.  We configure this to equal
122  *	the size of two TREs.
123  */
124 struct gsi_channel_scratch_gpi {
125 	u64 reserved1;
126 	u16 reserved2;
127 	u16 max_outstanding_tre;
128 	u16 reserved3;
129 	u16 outstanding_threshold;
130 };
131 
132 /** gsi_channel_scratch - channel scratch configuration area
133  *
134  * The exact interpretation of this register is protocol-specific.
135  * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
136  */
137 union gsi_channel_scratch {
138 	struct gsi_channel_scratch_gpi gpi;
139 	struct {
140 		u32 word1;
141 		u32 word2;
142 		u32 word3;
143 		u32 word4;
144 	} data;
145 };
146 
147 /* Check things that can be validated at build time. */
148 static void gsi_validate_build(void)
149 {
150 	/* This is used as a divisor */
151 	BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
152 
153 	/* Code assumes the size of channel and event ring element are
154 	 * the same (and fixed).  Make sure the size of an event ring
155 	 * element is what's expected.
156 	 */
157 	BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
158 
159 	/* Hardware requires a 2^n ring size.  We ensure the number of
160 	 * elements in an event ring is a power of 2 elsewhere; this
161 	 * ensure the elements themselves meet the requirement.
162 	 */
163 	BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
164 
165 	/* The channel element size must fit in this field */
166 	BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
167 
168 	/* The event ring element size must fit in this field */
169 	BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
170 }
171 
172 /* Return the channel id associated with a given channel */
173 static u32 gsi_channel_id(struct gsi_channel *channel)
174 {
175 	return channel - &channel->gsi->channel[0];
176 }
177 
178 /* Update the GSI IRQ type register with the cached value */
179 static void gsi_irq_type_update(struct gsi *gsi, u32 val)
180 {
181 	gsi->type_enabled_bitmap = val;
182 	iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
183 }
184 
185 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
186 {
187 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
188 }
189 
190 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
191 {
192 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
193 }
194 
195 /* Turn off all GSI interrupts initially */
196 static void gsi_irq_setup(struct gsi *gsi)
197 {
198 	u32 adjust;
199 
200 	/* Disable all interrupt types */
201 	gsi_irq_type_update(gsi, 0);
202 
203 	/* Clear all type-specific interrupt masks */
204 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
205 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
206 	iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
207 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
208 
209 	/* Reverse the offset adjustment for inter-EE register offsets */
210 	adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
211 	iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
212 	iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
213 
214 	iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
215 }
216 
217 /* Turn off all GSI interrupts when we're all done */
218 static void gsi_irq_teardown(struct gsi *gsi)
219 {
220 	/* Nothing to do */
221 }
222 
223 static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
224 {
225 	bool enable_ieob = !gsi->ieob_enabled_bitmap;
226 	u32 val;
227 
228 	gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
229 	val = gsi->ieob_enabled_bitmap;
230 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
231 
232 	/* Enable the interrupt type if this is the first channel enabled */
233 	if (enable_ieob)
234 		gsi_irq_type_enable(gsi, GSI_IEOB);
235 }
236 
237 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
238 {
239 	u32 val;
240 
241 	gsi->ieob_enabled_bitmap &= ~BIT(evt_ring_id);
242 
243 	/* Disable the interrupt type if this was the last enabled channel */
244 	if (!gsi->ieob_enabled_bitmap)
245 		gsi_irq_type_disable(gsi, GSI_IEOB);
246 
247 	val = gsi->ieob_enabled_bitmap;
248 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
249 }
250 
251 /* Enable all GSI_interrupt types */
252 static void gsi_irq_enable(struct gsi *gsi)
253 {
254 	u32 val;
255 
256 	/* Global interrupts include hardware error reports.  Enable
257 	 * that so we can at least report the error should it occur.
258 	 */
259 	iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
260 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
261 
262 	/* General GSI interrupts are reported to all EEs; if they occur
263 	 * they are unrecoverable (without reset).  A breakpoint interrupt
264 	 * also exists, but we don't support that.  We want to be notified
265 	 * of errors so we can report them, even if they can't be handled.
266 	 */
267 	val = BIT(BUS_ERROR);
268 	val |= BIT(CMD_FIFO_OVRFLOW);
269 	val |= BIT(MCS_STACK_OVRFLOW);
270 	iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
271 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
272 }
273 
274 /* Disable all GSI interrupt types */
275 static void gsi_irq_disable(struct gsi *gsi)
276 {
277 	gsi_irq_type_update(gsi, 0);
278 
279 	/* Clear the type-specific interrupt masks set by gsi_irq_enable() */
280 	iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
281 	iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
282 }
283 
284 /* Return the virtual address associated with a ring index */
285 void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
286 {
287 	/* Note: index *must* be used modulo the ring count here */
288 	return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
289 }
290 
291 /* Return the 32-bit DMA address associated with a ring index */
292 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
293 {
294 	return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE;
295 }
296 
297 /* Return the ring index of a 32-bit ring offset */
298 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
299 {
300 	return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
301 }
302 
303 /* Issue a GSI command by writing a value to a register, then wait for
304  * completion to be signaled.  Returns true if the command completes
305  * or false if it times out.
306  */
307 static bool
308 gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
309 {
310 	reinit_completion(completion);
311 
312 	iowrite32(val, gsi->virt + reg);
313 
314 	return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ);
315 }
316 
317 /* Return the hardware's notion of the current state of an event ring */
318 static enum gsi_evt_ring_state
319 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
320 {
321 	u32 val;
322 
323 	val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
324 
325 	return u32_get_bits(val, EV_CHSTATE_FMASK);
326 }
327 
328 /* Issue an event ring command and wait for it to complete */
329 static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
330 			    enum gsi_evt_cmd_opcode opcode)
331 {
332 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
333 	struct completion *completion = &evt_ring->completion;
334 	struct device *dev = gsi->dev;
335 	bool success;
336 	u32 val;
337 
338 	/* We only perform one event ring command at a time, and event
339 	 * control interrupts should only occur when such a command
340 	 * is issued here.  Only permit *this* event ring to trigger
341 	 * an interrupt, and only enable the event control IRQ type
342 	 * when we expect it to occur.
343 	 *
344 	 * There's a small chance that a previous command completed
345 	 * after the interrupt was disabled, so make sure we have no
346 	 * pending interrupts before we enable them.
347 	 */
348 	iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
349 
350 	val = BIT(evt_ring_id);
351 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
352 	gsi_irq_type_enable(gsi, GSI_EV_CTRL);
353 
354 	val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
355 	val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
356 
357 	success = gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
358 
359 	/* Disable the interrupt again */
360 	gsi_irq_type_disable(gsi, GSI_EV_CTRL);
361 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
362 
363 	if (success)
364 		return 0;
365 
366 	dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
367 		opcode, evt_ring_id, evt_ring->state);
368 
369 	return -ETIMEDOUT;
370 }
371 
372 /* Allocate an event ring in NOT_ALLOCATED state */
373 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
374 {
375 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
376 	int ret;
377 
378 	/* Get initial event ring state */
379 	evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
380 	if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
381 		dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
382 			evt_ring_id, evt_ring->state);
383 		return -EINVAL;
384 	}
385 
386 	ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
387 
388 	/* If successful the event ring state will have changed */
389 	if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
390 		return 0;
391 
392 	dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
393 		evt_ring_id, evt_ring->state);
394 
395 	return -EIO;
396 }
397 
398 /* Reset a GSI event ring in ALLOCATED or ERROR state. */
399 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
400 {
401 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
402 	enum gsi_evt_ring_state state = evt_ring->state;
403 	int ret;
404 
405 	if (state != GSI_EVT_RING_STATE_ALLOCATED &&
406 	    state != GSI_EVT_RING_STATE_ERROR) {
407 		dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
408 			evt_ring_id, evt_ring->state);
409 		return;
410 	}
411 
412 	ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
413 
414 	/* If successful the event ring state will have changed */
415 	if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
416 		return;
417 
418 	dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
419 		evt_ring_id, evt_ring->state);
420 }
421 
422 /* Issue a hardware de-allocation request for an allocated event ring */
423 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
424 {
425 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
426 	int ret;
427 
428 	if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
429 		dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
430 			evt_ring_id, evt_ring->state);
431 		return;
432 	}
433 
434 	ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
435 
436 	/* If successful the event ring state will have changed */
437 	if (evt_ring->state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
438 		return;
439 
440 	dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
441 		evt_ring_id, evt_ring->state);
442 }
443 
444 /* Fetch the current state of a channel from hardware */
445 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
446 {
447 	u32 channel_id = gsi_channel_id(channel);
448 	void *virt = channel->gsi->virt;
449 	u32 val;
450 
451 	val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
452 
453 	return u32_get_bits(val, CHSTATE_FMASK);
454 }
455 
456 /* Issue a channel command and wait for it to complete */
457 static int
458 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
459 {
460 	struct completion *completion = &channel->completion;
461 	u32 channel_id = gsi_channel_id(channel);
462 	struct gsi *gsi = channel->gsi;
463 	struct device *dev = gsi->dev;
464 	bool success;
465 	u32 val;
466 
467 	/* We only perform one channel command at a time, and channel
468 	 * control interrupts should only occur when such a command is
469 	 * issued here.  So we only permit *this* channel to trigger
470 	 * an interrupt and only enable the channel control IRQ type
471 	 * when we expect it to occur.
472 	 *
473 	 * There's a small chance that a previous command completed
474 	 * after the interrupt was disabled, so make sure we have no
475 	 * pending interrupts before we enable them.
476 	 */
477 	iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
478 
479 	val = BIT(channel_id);
480 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
481 	gsi_irq_type_enable(gsi, GSI_CH_CTRL);
482 
483 	val = u32_encode_bits(channel_id, CH_CHID_FMASK);
484 	val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
485 	success = gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
486 
487 	/* Disable the interrupt again */
488 	gsi_irq_type_disable(gsi, GSI_CH_CTRL);
489 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
490 
491 	if (success)
492 		return 0;
493 
494 	dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
495 		opcode, channel_id, gsi_channel_state(channel));
496 
497 	return -ETIMEDOUT;
498 }
499 
500 /* Allocate GSI channel in NOT_ALLOCATED state */
501 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
502 {
503 	struct gsi_channel *channel = &gsi->channel[channel_id];
504 	struct device *dev = gsi->dev;
505 	enum gsi_channel_state state;
506 	int ret;
507 
508 	/* Get initial channel state */
509 	state = gsi_channel_state(channel);
510 	if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
511 		dev_err(dev, "channel %u bad state %u before alloc\n",
512 			channel_id, state);
513 		return -EINVAL;
514 	}
515 
516 	ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
517 
518 	/* If successful the channel state will have changed */
519 	state = gsi_channel_state(channel);
520 	if (state == GSI_CHANNEL_STATE_ALLOCATED)
521 		return 0;
522 
523 	dev_err(dev, "channel %u bad state %u after alloc\n",
524 		channel_id, state);
525 
526 	return -EIO;
527 }
528 
529 /* Start an ALLOCATED channel */
530 static int gsi_channel_start_command(struct gsi_channel *channel)
531 {
532 	struct device *dev = channel->gsi->dev;
533 	enum gsi_channel_state state;
534 	int ret;
535 
536 	state = gsi_channel_state(channel);
537 	if (state != GSI_CHANNEL_STATE_ALLOCATED &&
538 	    state != GSI_CHANNEL_STATE_STOPPED) {
539 		dev_err(dev, "channel %u bad state %u before start\n",
540 			gsi_channel_id(channel), state);
541 		return -EINVAL;
542 	}
543 
544 	ret = gsi_channel_command(channel, GSI_CH_START);
545 
546 	/* If successful the channel state will have changed */
547 	state = gsi_channel_state(channel);
548 	if (state == GSI_CHANNEL_STATE_STARTED)
549 		return 0;
550 
551 	dev_err(dev, "channel %u bad state %u after start\n",
552 		gsi_channel_id(channel), state);
553 
554 	return -EIO;
555 }
556 
557 /* Stop a GSI channel in STARTED state */
558 static int gsi_channel_stop_command(struct gsi_channel *channel)
559 {
560 	struct device *dev = channel->gsi->dev;
561 	enum gsi_channel_state state;
562 	int ret;
563 
564 	state = gsi_channel_state(channel);
565 
566 	/* Channel could have entered STOPPED state since last call
567 	 * if it timed out.  If so, we're done.
568 	 */
569 	if (state == GSI_CHANNEL_STATE_STOPPED)
570 		return 0;
571 
572 	if (state != GSI_CHANNEL_STATE_STARTED &&
573 	    state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
574 		dev_err(dev, "channel %u bad state %u before stop\n",
575 			gsi_channel_id(channel), state);
576 		return -EINVAL;
577 	}
578 
579 	ret = gsi_channel_command(channel, GSI_CH_STOP);
580 
581 	/* If successful the channel state will have changed */
582 	state = gsi_channel_state(channel);
583 	if (state == GSI_CHANNEL_STATE_STOPPED)
584 		return 0;
585 
586 	/* We may have to try again if stop is in progress */
587 	if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
588 		return -EAGAIN;
589 
590 	dev_err(dev, "channel %u bad state %u after stop\n",
591 		gsi_channel_id(channel), state);
592 
593 	return -EIO;
594 }
595 
596 /* Reset a GSI channel in ALLOCATED or ERROR state. */
597 static void gsi_channel_reset_command(struct gsi_channel *channel)
598 {
599 	struct device *dev = channel->gsi->dev;
600 	enum gsi_channel_state state;
601 	int ret;
602 
603 	msleep(1);	/* A short delay is required before a RESET command */
604 
605 	state = gsi_channel_state(channel);
606 	if (state != GSI_CHANNEL_STATE_STOPPED &&
607 	    state != GSI_CHANNEL_STATE_ERROR) {
608 		/* No need to reset a channel already in ALLOCATED state */
609 		if (state != GSI_CHANNEL_STATE_ALLOCATED)
610 			dev_err(dev, "channel %u bad state %u before reset\n",
611 				gsi_channel_id(channel), state);
612 		return;
613 	}
614 
615 	ret = gsi_channel_command(channel, GSI_CH_RESET);
616 
617 	/* If successful the channel state will have changed */
618 	state = gsi_channel_state(channel);
619 	if (state != GSI_CHANNEL_STATE_ALLOCATED)
620 		dev_err(dev, "channel %u bad state %u after reset\n",
621 			gsi_channel_id(channel), state);
622 }
623 
624 /* Deallocate an ALLOCATED GSI channel */
625 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
626 {
627 	struct gsi_channel *channel = &gsi->channel[channel_id];
628 	struct device *dev = gsi->dev;
629 	enum gsi_channel_state state;
630 	int ret;
631 
632 	state = gsi_channel_state(channel);
633 	if (state != GSI_CHANNEL_STATE_ALLOCATED) {
634 		dev_err(dev, "channel %u bad state %u before dealloc\n",
635 			channel_id, state);
636 		return;
637 	}
638 
639 	ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
640 
641 	/* If successful the channel state will have changed */
642 	state = gsi_channel_state(channel);
643 
644 	if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
645 		dev_err(dev, "channel %u bad state %u after dealloc\n",
646 			channel_id, state);
647 }
648 
649 /* Ring an event ring doorbell, reporting the last entry processed by the AP.
650  * The index argument (modulo the ring count) is the first unfilled entry, so
651  * we supply one less than that with the doorbell.  Update the event ring
652  * index field with the value provided.
653  */
654 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
655 {
656 	struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
657 	u32 val;
658 
659 	ring->index = index;	/* Next unused entry */
660 
661 	/* Note: index *must* be used modulo the ring count here */
662 	val = gsi_ring_addr(ring, (index - 1) % ring->count);
663 	iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
664 }
665 
666 /* Program an event ring for use */
667 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
668 {
669 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
670 	size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
671 	u32 val;
672 
673 	/* We program all event rings as GPI type/protocol */
674 	val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
675 	val |= EV_INTYPE_FMASK;
676 	val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
677 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
678 
679 	val = u32_encode_bits(size, EV_R_LENGTH_FMASK);
680 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
681 
682 	/* The context 2 and 3 registers store the low-order and
683 	 * high-order 32 bits of the address of the event ring,
684 	 * respectively.
685 	 */
686 	val = evt_ring->ring.addr & GENMASK(31, 0);
687 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
688 
689 	val = evt_ring->ring.addr >> 32;
690 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
691 
692 	/* Enable interrupt moderation by setting the moderation delay */
693 	val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
694 	val |= u32_encode_bits(1, MODC_FMASK);	/* comes from channel */
695 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
696 
697 	/* No MSI write data, and MSI address high and low address is 0 */
698 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
699 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
700 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
701 
702 	/* We don't need to get event read pointer updates */
703 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
704 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
705 
706 	/* Finally, tell the hardware we've completed event 0 (arbitrary) */
707 	gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
708 }
709 
710 /* Return the last (most recent) transaction completed on a channel. */
711 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
712 {
713 	struct gsi_trans_info *trans_info = &channel->trans_info;
714 	struct gsi_trans *trans;
715 
716 	spin_lock_bh(&trans_info->spinlock);
717 
718 	if (!list_empty(&trans_info->complete))
719 		trans = list_last_entry(&trans_info->complete,
720 					struct gsi_trans, links);
721 	else if (!list_empty(&trans_info->polled))
722 		trans = list_last_entry(&trans_info->polled,
723 					struct gsi_trans, links);
724 	else
725 		trans = NULL;
726 
727 	/* Caller will wait for this, so take a reference */
728 	if (trans)
729 		refcount_inc(&trans->refcount);
730 
731 	spin_unlock_bh(&trans_info->spinlock);
732 
733 	return trans;
734 }
735 
736 /* Wait for transaction activity on a channel to complete */
737 static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
738 {
739 	struct gsi_trans *trans;
740 
741 	/* Get the last transaction, and wait for it to complete */
742 	trans = gsi_channel_trans_last(channel);
743 	if (trans) {
744 		wait_for_completion(&trans->completion);
745 		gsi_trans_free(trans);
746 	}
747 }
748 
749 /* Stop channel activity.  Transactions may not be allocated until thawed. */
750 static void gsi_channel_freeze(struct gsi_channel *channel)
751 {
752 	gsi_channel_trans_quiesce(channel);
753 
754 	napi_disable(&channel->napi);
755 
756 	gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id);
757 }
758 
759 /* Allow transactions to be used on the channel again. */
760 static void gsi_channel_thaw(struct gsi_channel *channel)
761 {
762 	gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
763 
764 	napi_enable(&channel->napi);
765 }
766 
767 /* Program a channel for use */
768 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
769 {
770 	size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
771 	u32 channel_id = gsi_channel_id(channel);
772 	union gsi_channel_scratch scr = { };
773 	struct gsi_channel_scratch_gpi *gpi;
774 	struct gsi *gsi = channel->gsi;
775 	u32 wrr_weight = 0;
776 	u32 val;
777 
778 	/* Arbitrarily pick TRE 0 as the first channel element to use */
779 	channel->tre_ring.index = 0;
780 
781 	/* We program all channels as GPI type/protocol */
782 	val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, CHTYPE_PROTOCOL_FMASK);
783 	if (channel->toward_ipa)
784 		val |= CHTYPE_DIR_FMASK;
785 	val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
786 	val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
787 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
788 
789 	val = u32_encode_bits(size, R_LENGTH_FMASK);
790 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
791 
792 	/* The context 2 and 3 registers store the low-order and
793 	 * high-order 32 bits of the address of the channel ring,
794 	 * respectively.
795 	 */
796 	val = channel->tre_ring.addr & GENMASK(31, 0);
797 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
798 
799 	val = channel->tre_ring.addr >> 32;
800 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
801 
802 	/* Command channel gets low weighted round-robin priority */
803 	if (channel->command)
804 		wrr_weight = field_max(WRR_WEIGHT_FMASK);
805 	val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
806 
807 	/* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
808 
809 	/* We enable the doorbell engine for IPA v3.5.1 */
810 	if (gsi->version == IPA_VERSION_3_5_1 && doorbell)
811 		val |= USE_DB_ENG_FMASK;
812 
813 	/* v4.0 introduces an escape buffer for prefetch.  We use it
814 	 * on all but the AP command channel.
815 	 */
816 	if (gsi->version != IPA_VERSION_3_5_1 && !channel->command) {
817 		/* If not otherwise set, prefetch buffers are used */
818 		if (gsi->version < IPA_VERSION_4_5)
819 			val |= USE_ESCAPE_BUF_ONLY_FMASK;
820 		else
821 			val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY,
822 					       PREFETCH_MODE_FMASK);
823 	}
824 
825 	iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
826 
827 	/* Now update the scratch registers for GPI protocol */
828 	gpi = &scr.gpi;
829 	gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
830 					GSI_RING_ELEMENT_SIZE;
831 	gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
832 
833 	val = scr.data.word1;
834 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
835 
836 	val = scr.data.word2;
837 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
838 
839 	val = scr.data.word3;
840 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
841 
842 	/* We must preserve the upper 16 bits of the last scratch register.
843 	 * The next sequence assumes those bits remain unchanged between the
844 	 * read and the write.
845 	 */
846 	val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
847 	val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
848 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
849 
850 	/* All done! */
851 }
852 
853 static void gsi_channel_deprogram(struct gsi_channel *channel)
854 {
855 	/* Nothing to do */
856 }
857 
858 /* Start an allocated GSI channel */
859 int gsi_channel_start(struct gsi *gsi, u32 channel_id)
860 {
861 	struct gsi_channel *channel = &gsi->channel[channel_id];
862 	int ret;
863 
864 	mutex_lock(&gsi->mutex);
865 
866 	ret = gsi_channel_start_command(channel);
867 
868 	mutex_unlock(&gsi->mutex);
869 
870 	gsi_channel_thaw(channel);
871 
872 	return ret;
873 }
874 
875 /* Stop a started channel */
876 int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
877 {
878 	struct gsi_channel *channel = &gsi->channel[channel_id];
879 	u32 retries;
880 	int ret;
881 
882 	gsi_channel_freeze(channel);
883 
884 	/* RX channels might require a little time to enter STOPPED state */
885 	retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
886 
887 	mutex_lock(&gsi->mutex);
888 
889 	do {
890 		ret = gsi_channel_stop_command(channel);
891 		if (ret != -EAGAIN)
892 			break;
893 		msleep(1);
894 	} while (retries--);
895 
896 	mutex_unlock(&gsi->mutex);
897 
898 	/* Thaw the channel if we need to retry (or on error) */
899 	if (ret)
900 		gsi_channel_thaw(channel);
901 
902 	return ret;
903 }
904 
905 /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
906 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
907 {
908 	struct gsi_channel *channel = &gsi->channel[channel_id];
909 
910 	mutex_lock(&gsi->mutex);
911 
912 	gsi_channel_reset_command(channel);
913 	/* Due to a hardware quirk we may need to reset RX channels twice. */
914 	if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa)
915 		gsi_channel_reset_command(channel);
916 
917 	gsi_channel_program(channel, doorbell);
918 	gsi_channel_trans_cancel_pending(channel);
919 
920 	mutex_unlock(&gsi->mutex);
921 }
922 
923 /* Stop a STARTED channel for suspend (using stop if requested) */
924 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
925 {
926 	struct gsi_channel *channel = &gsi->channel[channel_id];
927 
928 	if (stop)
929 		return gsi_channel_stop(gsi, channel_id);
930 
931 	gsi_channel_freeze(channel);
932 
933 	return 0;
934 }
935 
936 /* Resume a suspended channel (starting will be requested if STOPPED) */
937 int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start)
938 {
939 	struct gsi_channel *channel = &gsi->channel[channel_id];
940 
941 	if (start)
942 		return gsi_channel_start(gsi, channel_id);
943 
944 	gsi_channel_thaw(channel);
945 
946 	return 0;
947 }
948 
949 /**
950  * gsi_channel_tx_queued() - Report queued TX transfers for a channel
951  * @channel:	Channel for which to report
952  *
953  * Report to the network stack the number of bytes and transactions that
954  * have been queued to hardware since last call.  This and the next function
955  * supply information used by the network stack for throttling.
956  *
957  * For each channel we track the number of transactions used and bytes of
958  * data those transactions represent.  We also track what those values are
959  * each time this function is called.  Subtracting the two tells us
960  * the number of bytes and transactions that have been added between
961  * successive calls.
962  *
963  * Calling this each time we ring the channel doorbell allows us to
964  * provide accurate information to the network stack about how much
965  * work we've given the hardware at any point in time.
966  */
967 void gsi_channel_tx_queued(struct gsi_channel *channel)
968 {
969 	u32 trans_count;
970 	u32 byte_count;
971 
972 	byte_count = channel->byte_count - channel->queued_byte_count;
973 	trans_count = channel->trans_count - channel->queued_trans_count;
974 	channel->queued_byte_count = channel->byte_count;
975 	channel->queued_trans_count = channel->trans_count;
976 
977 	ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
978 				  trans_count, byte_count);
979 }
980 
981 /**
982  * gsi_channel_tx_update() - Report completed TX transfers
983  * @channel:	Channel that has completed transmitting packets
984  * @trans:	Last transation known to be complete
985  *
986  * Compute the number of transactions and bytes that have been transferred
987  * over a TX channel since the given transaction was committed.  Report this
988  * information to the network stack.
989  *
990  * At the time a transaction is committed, we record its channel's
991  * committed transaction and byte counts *in the transaction*.
992  * Completions are signaled by the hardware with an interrupt, and
993  * we can determine the latest completed transaction at that time.
994  *
995  * The difference between the byte/transaction count recorded in
996  * the transaction and the count last time we recorded a completion
997  * tells us exactly how much data has been transferred between
998  * completions.
999  *
1000  * Calling this each time we learn of a newly-completed transaction
1001  * allows us to provide accurate information to the network stack
1002  * about how much work has been completed by the hardware at a given
1003  * point in time.
1004  */
1005 static void
1006 gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
1007 {
1008 	u64 byte_count = trans->byte_count + trans->len;
1009 	u64 trans_count = trans->trans_count + 1;
1010 
1011 	byte_count -= channel->compl_byte_count;
1012 	channel->compl_byte_count += byte_count;
1013 	trans_count -= channel->compl_trans_count;
1014 	channel->compl_trans_count += trans_count;
1015 
1016 	ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
1017 				     trans_count, byte_count);
1018 }
1019 
1020 /* Channel control interrupt handler */
1021 static void gsi_isr_chan_ctrl(struct gsi *gsi)
1022 {
1023 	u32 channel_mask;
1024 
1025 	channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
1026 	iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
1027 
1028 	while (channel_mask) {
1029 		u32 channel_id = __ffs(channel_mask);
1030 		struct gsi_channel *channel;
1031 
1032 		channel_mask ^= BIT(channel_id);
1033 
1034 		channel = &gsi->channel[channel_id];
1035 
1036 		complete(&channel->completion);
1037 	}
1038 }
1039 
1040 /* Event ring control interrupt handler */
1041 static void gsi_isr_evt_ctrl(struct gsi *gsi)
1042 {
1043 	u32 event_mask;
1044 
1045 	event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
1046 	iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
1047 
1048 	while (event_mask) {
1049 		u32 evt_ring_id = __ffs(event_mask);
1050 		struct gsi_evt_ring *evt_ring;
1051 
1052 		event_mask ^= BIT(evt_ring_id);
1053 
1054 		evt_ring = &gsi->evt_ring[evt_ring_id];
1055 		evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
1056 
1057 		complete(&evt_ring->completion);
1058 	}
1059 }
1060 
1061 /* Global channel error interrupt handler */
1062 static void
1063 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
1064 {
1065 	if (code == GSI_OUT_OF_RESOURCES) {
1066 		dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
1067 		complete(&gsi->channel[channel_id].completion);
1068 		return;
1069 	}
1070 
1071 	/* Report, but otherwise ignore all other error codes */
1072 	dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
1073 		channel_id, err_ee, code);
1074 }
1075 
1076 /* Global event error interrupt handler */
1077 static void
1078 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
1079 {
1080 	if (code == GSI_OUT_OF_RESOURCES) {
1081 		struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1082 		u32 channel_id = gsi_channel_id(evt_ring->channel);
1083 
1084 		complete(&evt_ring->completion);
1085 		dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
1086 			channel_id);
1087 		return;
1088 	}
1089 
1090 	/* Report, but otherwise ignore all other error codes */
1091 	dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
1092 		evt_ring_id, err_ee, code);
1093 }
1094 
1095 /* Global error interrupt handler */
1096 static void gsi_isr_glob_err(struct gsi *gsi)
1097 {
1098 	enum gsi_err_type type;
1099 	enum gsi_err_code code;
1100 	u32 which;
1101 	u32 val;
1102 	u32 ee;
1103 
1104 	/* Get the logged error, then reinitialize the log */
1105 	val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
1106 	iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1107 	iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
1108 
1109 	ee = u32_get_bits(val, ERR_EE_FMASK);
1110 	type = u32_get_bits(val, ERR_TYPE_FMASK);
1111 	which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
1112 	code = u32_get_bits(val, ERR_CODE_FMASK);
1113 
1114 	if (type == GSI_ERR_TYPE_CHAN)
1115 		gsi_isr_glob_chan_err(gsi, ee, which, code);
1116 	else if (type == GSI_ERR_TYPE_EVT)
1117 		gsi_isr_glob_evt_err(gsi, ee, which, code);
1118 	else	/* type GSI_ERR_TYPE_GLOB should be fatal */
1119 		dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
1120 }
1121 
1122 /* Generic EE interrupt handler */
1123 static void gsi_isr_gp_int1(struct gsi *gsi)
1124 {
1125 	u32 result;
1126 	u32 val;
1127 
1128 	/* This interrupt is used to handle completions of the two GENERIC
1129 	 * GSI commands.  We use these to allocate and halt channels on
1130 	 * the modem's behalf due to a hardware quirk on IPA v4.2.  Once
1131 	 * allocated, the modem "owns" these channels, and as a result we
1132 	 * have no way of knowing the channel's state at any given time.
1133 	 *
1134 	 * It is recommended that we halt the modem channels we allocated
1135 	 * when shutting down, but it's possible the channel isn't running
1136 	 * at the time we issue the HALT command.  We'll get an error in
1137 	 * that case, but it's harmless (the channel is already halted).
1138 	 *
1139 	 * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error
1140 	 * if we receive it.
1141 	 */
1142 	val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1143 	result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
1144 
1145 	switch (result) {
1146 	case GENERIC_EE_SUCCESS:
1147 	case GENERIC_EE_CHANNEL_NOT_RUNNING:
1148 		gsi->result = 0;
1149 		break;
1150 
1151 	case GENERIC_EE_RETRY:
1152 		gsi->result = -EAGAIN;
1153 		break;
1154 
1155 	default:
1156 		dev_err(gsi->dev, "global INT1 generic result %u\n", result);
1157 		gsi->result = -EIO;
1158 		break;
1159 	}
1160 
1161 	complete(&gsi->completion);
1162 }
1163 
1164 /* Inter-EE interrupt handler */
1165 static void gsi_isr_glob_ee(struct gsi *gsi)
1166 {
1167 	u32 val;
1168 
1169 	val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
1170 
1171 	if (val & BIT(ERROR_INT))
1172 		gsi_isr_glob_err(gsi);
1173 
1174 	iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
1175 
1176 	val &= ~BIT(ERROR_INT);
1177 
1178 	if (val & BIT(GP_INT1)) {
1179 		val ^= BIT(GP_INT1);
1180 		gsi_isr_gp_int1(gsi);
1181 	}
1182 
1183 	if (val)
1184 		dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
1185 }
1186 
1187 /* I/O completion interrupt event */
1188 static void gsi_isr_ieob(struct gsi *gsi)
1189 {
1190 	u32 event_mask;
1191 
1192 	event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
1193 	iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
1194 
1195 	while (event_mask) {
1196 		u32 evt_ring_id = __ffs(event_mask);
1197 
1198 		event_mask ^= BIT(evt_ring_id);
1199 
1200 		gsi_irq_ieob_disable(gsi, evt_ring_id);
1201 		napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
1202 	}
1203 }
1204 
1205 /* General event interrupts represent serious problems, so report them */
1206 static void gsi_isr_general(struct gsi *gsi)
1207 {
1208 	struct device *dev = gsi->dev;
1209 	u32 val;
1210 
1211 	val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
1212 	iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
1213 
1214 	dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
1215 }
1216 
1217 /**
1218  * gsi_isr() - Top level GSI interrupt service routine
1219  * @irq:	Interrupt number (ignored)
1220  * @dev_id:	GSI pointer supplied to request_irq()
1221  *
1222  * This is the main handler function registered for the GSI IRQ. Each type
1223  * of interrupt has a separate handler function that is called from here.
1224  */
1225 static irqreturn_t gsi_isr(int irq, void *dev_id)
1226 {
1227 	struct gsi *gsi = dev_id;
1228 	u32 intr_mask;
1229 	u32 cnt = 0;
1230 
1231 	/* enum gsi_irq_type_id defines GSI interrupt types */
1232 	while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
1233 		/* intr_mask contains bitmask of pending GSI interrupts */
1234 		do {
1235 			u32 gsi_intr = BIT(__ffs(intr_mask));
1236 
1237 			intr_mask ^= gsi_intr;
1238 
1239 			switch (gsi_intr) {
1240 			case BIT(GSI_CH_CTRL):
1241 				gsi_isr_chan_ctrl(gsi);
1242 				break;
1243 			case BIT(GSI_EV_CTRL):
1244 				gsi_isr_evt_ctrl(gsi);
1245 				break;
1246 			case BIT(GSI_GLOB_EE):
1247 				gsi_isr_glob_ee(gsi);
1248 				break;
1249 			case BIT(GSI_IEOB):
1250 				gsi_isr_ieob(gsi);
1251 				break;
1252 			case BIT(GSI_GENERAL):
1253 				gsi_isr_general(gsi);
1254 				break;
1255 			default:
1256 				dev_err(gsi->dev,
1257 					"unrecognized interrupt type 0x%08x\n",
1258 					gsi_intr);
1259 				break;
1260 			}
1261 		} while (intr_mask);
1262 
1263 		if (++cnt > GSI_ISR_MAX_ITER) {
1264 			dev_err(gsi->dev, "interrupt flood\n");
1265 			break;
1266 		}
1267 	}
1268 
1269 	return IRQ_HANDLED;
1270 }
1271 
1272 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
1273 {
1274 	struct device *dev = &pdev->dev;
1275 	unsigned int irq;
1276 	int ret;
1277 
1278 	ret = platform_get_irq_byname(pdev, "gsi");
1279 	if (ret <= 0) {
1280 		dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
1281 		return ret ? : -EINVAL;
1282 	}
1283 	irq = ret;
1284 
1285 	ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
1286 	if (ret) {
1287 		dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
1288 		return ret;
1289 	}
1290 	gsi->irq = irq;
1291 
1292 	return 0;
1293 }
1294 
1295 static void gsi_irq_exit(struct gsi *gsi)
1296 {
1297 	free_irq(gsi->irq, gsi);
1298 }
1299 
1300 /* Return the transaction associated with a transfer completion event */
1301 static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
1302 					 struct gsi_event *event)
1303 {
1304 	u32 tre_offset;
1305 	u32 tre_index;
1306 
1307 	/* Event xfer_ptr records the TRE it's associated with */
1308 	tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0);
1309 	tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
1310 
1311 	return gsi_channel_trans_mapped(channel, tre_index);
1312 }
1313 
1314 /**
1315  * gsi_evt_ring_rx_update() - Record lengths of received data
1316  * @evt_ring:	Event ring associated with channel that received packets
1317  * @index:	Event index in ring reported by hardware
1318  *
1319  * Events for RX channels contain the actual number of bytes received into
1320  * the buffer.  Every event has a transaction associated with it, and here
1321  * we update transactions to record their actual received lengths.
1322  *
1323  * This function is called whenever we learn that the GSI hardware has filled
1324  * new events since the last time we checked.  The ring's index field tells
1325  * the first entry in need of processing.  The index provided is the
1326  * first *unfilled* event in the ring (following the last filled one).
1327  *
1328  * Events are sequential within the event ring, and transactions are
1329  * sequential within the transaction pool.
1330  *
1331  * Note that @index always refers to an element *within* the event ring.
1332  */
1333 static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
1334 {
1335 	struct gsi_channel *channel = evt_ring->channel;
1336 	struct gsi_ring *ring = &evt_ring->ring;
1337 	struct gsi_trans_info *trans_info;
1338 	struct gsi_event *event_done;
1339 	struct gsi_event *event;
1340 	struct gsi_trans *trans;
1341 	u32 byte_count = 0;
1342 	u32 old_index;
1343 	u32 event_avail;
1344 
1345 	trans_info = &channel->trans_info;
1346 
1347 	/* We'll start with the oldest un-processed event.  RX channels
1348 	 * replenish receive buffers in single-TRE transactions, so we
1349 	 * can just map that event to its transaction.  Transactions
1350 	 * associated with completion events are consecutive.
1351 	 */
1352 	old_index = ring->index;
1353 	event = gsi_ring_virt(ring, old_index);
1354 	trans = gsi_event_trans(channel, event);
1355 
1356 	/* Compute the number of events to process before we wrap,
1357 	 * and determine when we'll be done processing events.
1358 	 */
1359 	event_avail = ring->count - old_index % ring->count;
1360 	event_done = gsi_ring_virt(ring, index);
1361 	do {
1362 		trans->len = __le16_to_cpu(event->len);
1363 		byte_count += trans->len;
1364 
1365 		/* Move on to the next event and transaction */
1366 		if (--event_avail)
1367 			event++;
1368 		else
1369 			event = gsi_ring_virt(ring, 0);
1370 		trans = gsi_trans_pool_next(&trans_info->pool, trans);
1371 	} while (event != event_done);
1372 
1373 	/* We record RX bytes when they are received */
1374 	channel->byte_count += byte_count;
1375 	channel->trans_count++;
1376 }
1377 
1378 /* Initialize a ring, including allocating DMA memory for its entries */
1379 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1380 {
1381 	size_t size = count * GSI_RING_ELEMENT_SIZE;
1382 	struct device *dev = gsi->dev;
1383 	dma_addr_t addr;
1384 
1385 	/* Hardware requires a 2^n ring size, with alignment equal to size */
1386 	ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1387 	if (ring->virt && addr % size) {
1388 		dma_free_coherent(dev, size, ring->virt, ring->addr);
1389 		dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
1390 			size);
1391 		return -EINVAL;	/* Not a good error value, but distinct */
1392 	} else if (!ring->virt) {
1393 		return -ENOMEM;
1394 	}
1395 	ring->addr = addr;
1396 	ring->count = count;
1397 
1398 	return 0;
1399 }
1400 
1401 /* Free a previously-allocated ring */
1402 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
1403 {
1404 	size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
1405 
1406 	dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
1407 }
1408 
1409 /* Allocate an available event ring id */
1410 static int gsi_evt_ring_id_alloc(struct gsi *gsi)
1411 {
1412 	u32 evt_ring_id;
1413 
1414 	if (gsi->event_bitmap == ~0U) {
1415 		dev_err(gsi->dev, "event rings exhausted\n");
1416 		return -ENOSPC;
1417 	}
1418 
1419 	evt_ring_id = ffz(gsi->event_bitmap);
1420 	gsi->event_bitmap |= BIT(evt_ring_id);
1421 
1422 	return (int)evt_ring_id;
1423 }
1424 
1425 /* Free a previously-allocated event ring id */
1426 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
1427 {
1428 	gsi->event_bitmap &= ~BIT(evt_ring_id);
1429 }
1430 
1431 /* Ring a channel doorbell, reporting the first un-filled entry */
1432 void gsi_channel_doorbell(struct gsi_channel *channel)
1433 {
1434 	struct gsi_ring *tre_ring = &channel->tre_ring;
1435 	u32 channel_id = gsi_channel_id(channel);
1436 	struct gsi *gsi = channel->gsi;
1437 	u32 val;
1438 
1439 	/* Note: index *must* be used modulo the ring count here */
1440 	val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
1441 	iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
1442 }
1443 
1444 /* Consult hardware, move any newly completed transactions to completed list */
1445 static void gsi_channel_update(struct gsi_channel *channel)
1446 {
1447 	u32 evt_ring_id = channel->evt_ring_id;
1448 	struct gsi *gsi = channel->gsi;
1449 	struct gsi_evt_ring *evt_ring;
1450 	struct gsi_trans *trans;
1451 	struct gsi_ring *ring;
1452 	u32 offset;
1453 	u32 index;
1454 
1455 	evt_ring = &gsi->evt_ring[evt_ring_id];
1456 	ring = &evt_ring->ring;
1457 
1458 	/* See if there's anything new to process; if not, we're done.  Note
1459 	 * that index always refers to an entry *within* the event ring.
1460 	 */
1461 	offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
1462 	index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
1463 	if (index == ring->index % ring->count)
1464 		return;
1465 
1466 	/* Get the transaction for the latest completed event.  Take a
1467 	 * reference to keep it from completing before we give the events
1468 	 * for this and previous transactions back to the hardware.
1469 	 */
1470 	trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
1471 	refcount_inc(&trans->refcount);
1472 
1473 	/* For RX channels, update each completed transaction with the number
1474 	 * of bytes that were actually received.  For TX channels, report
1475 	 * the number of transactions and bytes this completion represents
1476 	 * up the network stack.
1477 	 */
1478 	if (channel->toward_ipa)
1479 		gsi_channel_tx_update(channel, trans);
1480 	else
1481 		gsi_evt_ring_rx_update(evt_ring, index);
1482 
1483 	gsi_trans_move_complete(trans);
1484 
1485 	/* Tell the hardware we've handled these events */
1486 	gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
1487 
1488 	gsi_trans_free(trans);
1489 }
1490 
1491 /**
1492  * gsi_channel_poll_one() - Return a single completed transaction on a channel
1493  * @channel:	Channel to be polled
1494  *
1495  * Return:	Transaction pointer, or null if none are available
1496  *
1497  * This function returns the first entry on a channel's completed transaction
1498  * list.  If that list is empty, the hardware is consulted to determine
1499  * whether any new transactions have completed.  If so, they're moved to the
1500  * completed list and the new first entry is returned.  If there are no more
1501  * completed transactions, a null pointer is returned.
1502  */
1503 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
1504 {
1505 	struct gsi_trans *trans;
1506 
1507 	/* Get the first transaction from the completed list */
1508 	trans = gsi_channel_trans_complete(channel);
1509 	if (!trans) {
1510 		/* List is empty; see if there's more to do */
1511 		gsi_channel_update(channel);
1512 		trans = gsi_channel_trans_complete(channel);
1513 	}
1514 
1515 	if (trans)
1516 		gsi_trans_move_polled(trans);
1517 
1518 	return trans;
1519 }
1520 
1521 /**
1522  * gsi_channel_poll() - NAPI poll function for a channel
1523  * @napi:	NAPI structure for the channel
1524  * @budget:	Budget supplied by NAPI core
1525  *
1526  * Return:	Number of items polled (<= budget)
1527  *
1528  * Single transactions completed by hardware are polled until either
1529  * the budget is exhausted, or there are no more.  Each transaction
1530  * polled is passed to gsi_trans_complete(), to perform remaining
1531  * completion processing and retire/free the transaction.
1532  */
1533 static int gsi_channel_poll(struct napi_struct *napi, int budget)
1534 {
1535 	struct gsi_channel *channel;
1536 	int count = 0;
1537 
1538 	channel = container_of(napi, struct gsi_channel, napi);
1539 	while (count < budget) {
1540 		struct gsi_trans *trans;
1541 
1542 		count++;
1543 		trans = gsi_channel_poll_one(channel);
1544 		if (!trans)
1545 			break;
1546 		gsi_trans_complete(trans);
1547 	}
1548 
1549 	if (count < budget) {
1550 		napi_complete(&channel->napi);
1551 		gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
1552 	}
1553 
1554 	return count;
1555 }
1556 
1557 /* The event bitmap represents which event ids are available for allocation.
1558  * Set bits are not available, clear bits can be used.  This function
1559  * initializes the map so all events supported by the hardware are available,
1560  * then precludes any reserved events from being allocated.
1561  */
1562 static u32 gsi_event_bitmap_init(u32 evt_ring_max)
1563 {
1564 	u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
1565 
1566 	event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
1567 
1568 	return event_bitmap;
1569 }
1570 
1571 /* Setup function for event rings */
1572 static void gsi_evt_ring_setup(struct gsi *gsi)
1573 {
1574 	/* Nothing to do */
1575 }
1576 
1577 /* Inverse of gsi_evt_ring_setup() */
1578 static void gsi_evt_ring_teardown(struct gsi *gsi)
1579 {
1580 	/* Nothing to do */
1581 }
1582 
1583 /* Setup function for a single channel */
1584 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
1585 {
1586 	struct gsi_channel *channel = &gsi->channel[channel_id];
1587 	u32 evt_ring_id = channel->evt_ring_id;
1588 	int ret;
1589 
1590 	if (!channel->gsi)
1591 		return 0;	/* Ignore uninitialized channels */
1592 
1593 	ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
1594 	if (ret)
1595 		return ret;
1596 
1597 	gsi_evt_ring_program(gsi, evt_ring_id);
1598 
1599 	ret = gsi_channel_alloc_command(gsi, channel_id);
1600 	if (ret)
1601 		goto err_evt_ring_de_alloc;
1602 
1603 	gsi_channel_program(channel, true);
1604 
1605 	if (channel->toward_ipa)
1606 		netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
1607 				  gsi_channel_poll, NAPI_POLL_WEIGHT);
1608 	else
1609 		netif_napi_add(&gsi->dummy_dev, &channel->napi,
1610 			       gsi_channel_poll, NAPI_POLL_WEIGHT);
1611 
1612 	return 0;
1613 
1614 err_evt_ring_de_alloc:
1615 	/* We've done nothing with the event ring yet so don't reset */
1616 	gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1617 
1618 	return ret;
1619 }
1620 
1621 /* Inverse of gsi_channel_setup_one() */
1622 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
1623 {
1624 	struct gsi_channel *channel = &gsi->channel[channel_id];
1625 	u32 evt_ring_id = channel->evt_ring_id;
1626 
1627 	if (!channel->gsi)
1628 		return;		/* Ignore uninitialized channels */
1629 
1630 	netif_napi_del(&channel->napi);
1631 
1632 	gsi_channel_deprogram(channel);
1633 	gsi_channel_de_alloc_command(gsi, channel_id);
1634 	gsi_evt_ring_reset_command(gsi, evt_ring_id);
1635 	gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1636 }
1637 
1638 static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
1639 			       enum gsi_generic_cmd_opcode opcode)
1640 {
1641 	struct completion *completion = &gsi->completion;
1642 	bool success;
1643 	u32 val;
1644 
1645 	/* The error global interrupt type is always enabled (until we
1646 	 * teardown), so we won't change that.  A generic EE command
1647 	 * completes with a GSI global interrupt of type GP_INT1.  We
1648 	 * only perform one generic command at a time (to allocate or
1649 	 * halt a modem channel) and only from this function.  So we
1650 	 * enable the GP_INT1 IRQ type here while we're expecting it.
1651 	 */
1652 	val = BIT(ERROR_INT) | BIT(GP_INT1);
1653 	iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1654 
1655 	/* First zero the result code field */
1656 	val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1657 	val &= ~GENERIC_EE_RESULT_FMASK;
1658 	iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1659 
1660 	/* Now issue the command */
1661 	val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
1662 	val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
1663 	val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
1664 
1665 	success = gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
1666 
1667 	/* Disable the GP_INT1 IRQ type again */
1668 	iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1669 
1670 	if (success)
1671 		return gsi->result;
1672 
1673 	dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
1674 		opcode, channel_id);
1675 
1676 	return -ETIMEDOUT;
1677 }
1678 
1679 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
1680 {
1681 	return gsi_generic_command(gsi, channel_id,
1682 				   GSI_GENERIC_ALLOCATE_CHANNEL);
1683 }
1684 
1685 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
1686 {
1687 	u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES;
1688 	int ret;
1689 
1690 	do
1691 		ret = gsi_generic_command(gsi, channel_id,
1692 					  GSI_GENERIC_HALT_CHANNEL);
1693 	while (ret == -EAGAIN && retries--);
1694 
1695 	if (ret)
1696 		dev_err(gsi->dev, "error %d halting modem channel %u\n",
1697 			ret, channel_id);
1698 }
1699 
1700 /* Setup function for channels */
1701 static int gsi_channel_setup(struct gsi *gsi)
1702 {
1703 	u32 channel_id = 0;
1704 	u32 mask;
1705 	int ret;
1706 
1707 	gsi_evt_ring_setup(gsi);
1708 	gsi_irq_enable(gsi);
1709 
1710 	mutex_lock(&gsi->mutex);
1711 
1712 	do {
1713 		ret = gsi_channel_setup_one(gsi, channel_id);
1714 		if (ret)
1715 			goto err_unwind;
1716 	} while (++channel_id < gsi->channel_count);
1717 
1718 	/* Make sure no channels were defined that hardware does not support */
1719 	while (channel_id < GSI_CHANNEL_COUNT_MAX) {
1720 		struct gsi_channel *channel = &gsi->channel[channel_id++];
1721 
1722 		if (!channel->gsi)
1723 			continue;	/* Ignore uninitialized channels */
1724 
1725 		dev_err(gsi->dev, "channel %u not supported by hardware\n",
1726 			channel_id - 1);
1727 		channel_id = gsi->channel_count;
1728 		goto err_unwind;
1729 	}
1730 
1731 	/* Allocate modem channels if necessary */
1732 	mask = gsi->modem_channel_bitmap;
1733 	while (mask) {
1734 		u32 modem_channel_id = __ffs(mask);
1735 
1736 		ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
1737 		if (ret)
1738 			goto err_unwind_modem;
1739 
1740 		/* Clear bit from mask only after success (for unwind) */
1741 		mask ^= BIT(modem_channel_id);
1742 	}
1743 
1744 	mutex_unlock(&gsi->mutex);
1745 
1746 	return 0;
1747 
1748 err_unwind_modem:
1749 	/* Compute which modem channels need to be deallocated */
1750 	mask ^= gsi->modem_channel_bitmap;
1751 	while (mask) {
1752 		channel_id = __fls(mask);
1753 
1754 		mask ^= BIT(channel_id);
1755 
1756 		gsi_modem_channel_halt(gsi, channel_id);
1757 	}
1758 
1759 err_unwind:
1760 	while (channel_id--)
1761 		gsi_channel_teardown_one(gsi, channel_id);
1762 
1763 	mutex_unlock(&gsi->mutex);
1764 
1765 	gsi_irq_disable(gsi);
1766 	gsi_evt_ring_teardown(gsi);
1767 
1768 	return ret;
1769 }
1770 
1771 /* Inverse of gsi_channel_setup() */
1772 static void gsi_channel_teardown(struct gsi *gsi)
1773 {
1774 	u32 mask = gsi->modem_channel_bitmap;
1775 	u32 channel_id;
1776 
1777 	mutex_lock(&gsi->mutex);
1778 
1779 	while (mask) {
1780 		channel_id = __fls(mask);
1781 
1782 		mask ^= BIT(channel_id);
1783 
1784 		gsi_modem_channel_halt(gsi, channel_id);
1785 	}
1786 
1787 	channel_id = gsi->channel_count - 1;
1788 	do
1789 		gsi_channel_teardown_one(gsi, channel_id);
1790 	while (channel_id--);
1791 
1792 	mutex_unlock(&gsi->mutex);
1793 
1794 	gsi_irq_disable(gsi);
1795 	gsi_evt_ring_teardown(gsi);
1796 }
1797 
1798 /* Setup function for GSI.  GSI firmware must be loaded and initialized */
1799 int gsi_setup(struct gsi *gsi)
1800 {
1801 	struct device *dev = gsi->dev;
1802 	u32 val;
1803 	int ret;
1804 
1805 	/* Here is where we first touch the GSI hardware */
1806 	val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
1807 	if (!(val & ENABLED_FMASK)) {
1808 		dev_err(dev, "GSI has not been enabled\n");
1809 		return -EIO;
1810 	}
1811 
1812 	gsi_irq_setup(gsi);
1813 
1814 	val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
1815 
1816 	gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
1817 	if (!gsi->channel_count) {
1818 		dev_err(dev, "GSI reports zero channels supported\n");
1819 		return -EINVAL;
1820 	}
1821 	if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
1822 		dev_warn(dev,
1823 			 "limiting to %u channels; hardware supports %u\n",
1824 			 GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
1825 		gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
1826 	}
1827 
1828 	gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
1829 	if (!gsi->evt_ring_count) {
1830 		dev_err(dev, "GSI reports zero event rings supported\n");
1831 		return -EINVAL;
1832 	}
1833 	if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
1834 		dev_warn(dev,
1835 			 "limiting to %u event rings; hardware supports %u\n",
1836 			 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
1837 		gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
1838 	}
1839 
1840 	/* Initialize the error log */
1841 	iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1842 
1843 	/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
1844 	iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
1845 
1846 	ret = gsi_channel_setup(gsi);
1847 	if (ret)
1848 		gsi_irq_teardown(gsi);
1849 
1850 	return ret;
1851 }
1852 
1853 /* Inverse of gsi_setup() */
1854 void gsi_teardown(struct gsi *gsi)
1855 {
1856 	gsi_channel_teardown(gsi);
1857 	gsi_irq_teardown(gsi);
1858 }
1859 
1860 /* Initialize a channel's event ring */
1861 static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
1862 {
1863 	struct gsi *gsi = channel->gsi;
1864 	struct gsi_evt_ring *evt_ring;
1865 	int ret;
1866 
1867 	ret = gsi_evt_ring_id_alloc(gsi);
1868 	if (ret < 0)
1869 		return ret;
1870 	channel->evt_ring_id = ret;
1871 
1872 	evt_ring = &gsi->evt_ring[channel->evt_ring_id];
1873 	evt_ring->channel = channel;
1874 
1875 	ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
1876 	if (!ret)
1877 		return 0;	/* Success! */
1878 
1879 	dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
1880 		ret, gsi_channel_id(channel));
1881 
1882 	gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
1883 
1884 	return ret;
1885 }
1886 
1887 /* Inverse of gsi_channel_evt_ring_init() */
1888 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
1889 {
1890 	u32 evt_ring_id = channel->evt_ring_id;
1891 	struct gsi *gsi = channel->gsi;
1892 	struct gsi_evt_ring *evt_ring;
1893 
1894 	evt_ring = &gsi->evt_ring[evt_ring_id];
1895 	gsi_ring_free(gsi, &evt_ring->ring);
1896 	gsi_evt_ring_id_free(gsi, evt_ring_id);
1897 }
1898 
1899 /* Init function for event rings */
1900 static void gsi_evt_ring_init(struct gsi *gsi)
1901 {
1902 	u32 evt_ring_id = 0;
1903 
1904 	gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
1905 	gsi->ieob_enabled_bitmap = 0;
1906 	do
1907 		init_completion(&gsi->evt_ring[evt_ring_id].completion);
1908 	while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
1909 }
1910 
1911 /* Inverse of gsi_evt_ring_init() */
1912 static void gsi_evt_ring_exit(struct gsi *gsi)
1913 {
1914 	/* Nothing to do */
1915 }
1916 
1917 static bool gsi_channel_data_valid(struct gsi *gsi,
1918 				   const struct ipa_gsi_endpoint_data *data)
1919 {
1920 #ifdef IPA_VALIDATION
1921 	u32 channel_id = data->channel_id;
1922 	struct device *dev = gsi->dev;
1923 
1924 	/* Make sure channel ids are in the range driver supports */
1925 	if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
1926 		dev_err(dev, "bad channel id %u; must be less than %u\n",
1927 			channel_id, GSI_CHANNEL_COUNT_MAX);
1928 		return false;
1929 	}
1930 
1931 	if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
1932 		dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
1933 		return false;
1934 	}
1935 
1936 	if (!data->channel.tlv_count ||
1937 	    data->channel.tlv_count > GSI_TLV_MAX) {
1938 		dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
1939 			channel_id, data->channel.tlv_count, GSI_TLV_MAX);
1940 		return false;
1941 	}
1942 
1943 	/* We have to allow at least one maximally-sized transaction to
1944 	 * be outstanding (which would use tlv_count TREs).  Given how
1945 	 * gsi_channel_tre_max() is computed, tre_count has to be almost
1946 	 * twice the TLV FIFO size to satisfy this requirement.
1947 	 */
1948 	if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
1949 		dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
1950 			channel_id, data->channel.tlv_count,
1951 			data->channel.tre_count);
1952 		return false;
1953 	}
1954 
1955 	if (!is_power_of_2(data->channel.tre_count)) {
1956 		dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
1957 			channel_id, data->channel.tre_count);
1958 		return false;
1959 	}
1960 
1961 	if (!is_power_of_2(data->channel.event_count)) {
1962 		dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
1963 			channel_id, data->channel.event_count);
1964 		return false;
1965 	}
1966 #endif /* IPA_VALIDATION */
1967 
1968 	return true;
1969 }
1970 
1971 /* Init function for a single channel */
1972 static int gsi_channel_init_one(struct gsi *gsi,
1973 				const struct ipa_gsi_endpoint_data *data,
1974 				bool command)
1975 {
1976 	struct gsi_channel *channel;
1977 	u32 tre_count;
1978 	int ret;
1979 
1980 	if (!gsi_channel_data_valid(gsi, data))
1981 		return -EINVAL;
1982 
1983 	/* Worst case we need an event for every outstanding TRE */
1984 	if (data->channel.tre_count > data->channel.event_count) {
1985 		tre_count = data->channel.event_count;
1986 		dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
1987 			 data->channel_id, tre_count);
1988 	} else {
1989 		tre_count = data->channel.tre_count;
1990 	}
1991 
1992 	channel = &gsi->channel[data->channel_id];
1993 	memset(channel, 0, sizeof(*channel));
1994 
1995 	channel->gsi = gsi;
1996 	channel->toward_ipa = data->toward_ipa;
1997 	channel->command = command;
1998 	channel->tlv_count = data->channel.tlv_count;
1999 	channel->tre_count = tre_count;
2000 	channel->event_count = data->channel.event_count;
2001 	init_completion(&channel->completion);
2002 
2003 	ret = gsi_channel_evt_ring_init(channel);
2004 	if (ret)
2005 		goto err_clear_gsi;
2006 
2007 	ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
2008 	if (ret) {
2009 		dev_err(gsi->dev, "error %d allocating channel %u ring\n",
2010 			ret, data->channel_id);
2011 		goto err_channel_evt_ring_exit;
2012 	}
2013 
2014 	ret = gsi_channel_trans_init(gsi, data->channel_id);
2015 	if (ret)
2016 		goto err_ring_free;
2017 
2018 	if (command) {
2019 		u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
2020 
2021 		ret = ipa_cmd_pool_init(channel, tre_max);
2022 	}
2023 	if (!ret)
2024 		return 0;	/* Success! */
2025 
2026 	gsi_channel_trans_exit(channel);
2027 err_ring_free:
2028 	gsi_ring_free(gsi, &channel->tre_ring);
2029 err_channel_evt_ring_exit:
2030 	gsi_channel_evt_ring_exit(channel);
2031 err_clear_gsi:
2032 	channel->gsi = NULL;	/* Mark it not (fully) initialized */
2033 
2034 	return ret;
2035 }
2036 
2037 /* Inverse of gsi_channel_init_one() */
2038 static void gsi_channel_exit_one(struct gsi_channel *channel)
2039 {
2040 	if (!channel->gsi)
2041 		return;		/* Ignore uninitialized channels */
2042 
2043 	if (channel->command)
2044 		ipa_cmd_pool_exit(channel);
2045 	gsi_channel_trans_exit(channel);
2046 	gsi_ring_free(channel->gsi, &channel->tre_ring);
2047 	gsi_channel_evt_ring_exit(channel);
2048 }
2049 
2050 /* Init function for channels */
2051 static int gsi_channel_init(struct gsi *gsi, u32 count,
2052 			    const struct ipa_gsi_endpoint_data *data)
2053 {
2054 	bool modem_alloc;
2055 	int ret = 0;
2056 	u32 i;
2057 
2058 	/* IPA v4.2 requires the AP to allocate channels for the modem */
2059 	modem_alloc = gsi->version == IPA_VERSION_4_2;
2060 
2061 	gsi_evt_ring_init(gsi);
2062 
2063 	/* The endpoint data array is indexed by endpoint name */
2064 	for (i = 0; i < count; i++) {
2065 		bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
2066 
2067 		if (ipa_gsi_endpoint_data_empty(&data[i]))
2068 			continue;	/* Skip over empty slots */
2069 
2070 		/* Mark modem channels to be allocated (hardware workaround) */
2071 		if (data[i].ee_id == GSI_EE_MODEM) {
2072 			if (modem_alloc)
2073 				gsi->modem_channel_bitmap |=
2074 						BIT(data[i].channel_id);
2075 			continue;
2076 		}
2077 
2078 		ret = gsi_channel_init_one(gsi, &data[i], command);
2079 		if (ret)
2080 			goto err_unwind;
2081 	}
2082 
2083 	return ret;
2084 
2085 err_unwind:
2086 	while (i--) {
2087 		if (ipa_gsi_endpoint_data_empty(&data[i]))
2088 			continue;
2089 		if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
2090 			gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
2091 			continue;
2092 		}
2093 		gsi_channel_exit_one(&gsi->channel[data->channel_id]);
2094 	}
2095 	gsi_evt_ring_exit(gsi);
2096 
2097 	return ret;
2098 }
2099 
2100 /* Inverse of gsi_channel_init() */
2101 static void gsi_channel_exit(struct gsi *gsi)
2102 {
2103 	u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
2104 
2105 	do
2106 		gsi_channel_exit_one(&gsi->channel[channel_id]);
2107 	while (channel_id--);
2108 	gsi->modem_channel_bitmap = 0;
2109 
2110 	gsi_evt_ring_exit(gsi);
2111 }
2112 
2113 /* Init function for GSI.  GSI hardware does not need to be "ready" */
2114 int gsi_init(struct gsi *gsi, struct platform_device *pdev,
2115 	     enum ipa_version version, u32 count,
2116 	     const struct ipa_gsi_endpoint_data *data)
2117 {
2118 	struct device *dev = &pdev->dev;
2119 	struct resource *res;
2120 	resource_size_t size;
2121 	u32 adjust;
2122 	int ret;
2123 
2124 	gsi_validate_build();
2125 
2126 	gsi->dev = dev;
2127 	gsi->version = version;
2128 
2129 	/* The GSI layer performs NAPI on all endpoints.  NAPI requires a
2130 	 * network device structure, but the GSI layer does not have one,
2131 	 * so we must create a dummy network device for this purpose.
2132 	 */
2133 	init_dummy_netdev(&gsi->dummy_dev);
2134 
2135 	/* Get GSI memory range and map it */
2136 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
2137 	if (!res) {
2138 		dev_err(dev, "DT error getting \"gsi\" memory property\n");
2139 		return -ENODEV;
2140 	}
2141 
2142 	size = resource_size(res);
2143 	if (res->start > U32_MAX || size > U32_MAX - res->start) {
2144 		dev_err(dev, "DT memory resource \"gsi\" out of range\n");
2145 		return -EINVAL;
2146 	}
2147 
2148 	/* Make sure we can make our pointer adjustment if necessary */
2149 	adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
2150 	if (res->start < adjust) {
2151 		dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n",
2152 			adjust);
2153 		return -EINVAL;
2154 	}
2155 
2156 	gsi->virt = ioremap(res->start, size);
2157 	if (!gsi->virt) {
2158 		dev_err(dev, "unable to remap \"gsi\" memory\n");
2159 		return -ENOMEM;
2160 	}
2161 	/* Adjust register range pointer downward for newer IPA versions */
2162 	gsi->virt -= adjust;
2163 
2164 	init_completion(&gsi->completion);
2165 
2166 	ret = gsi_irq_init(gsi, pdev);
2167 	if (ret)
2168 		goto err_iounmap;
2169 
2170 	ret = gsi_channel_init(gsi, count, data);
2171 	if (ret)
2172 		goto err_irq_exit;
2173 
2174 	mutex_init(&gsi->mutex);
2175 
2176 	return 0;
2177 
2178 err_irq_exit:
2179 	gsi_irq_exit(gsi);
2180 err_iounmap:
2181 	iounmap(gsi->virt);
2182 
2183 	return ret;
2184 }
2185 
2186 /* Inverse of gsi_init() */
2187 void gsi_exit(struct gsi *gsi)
2188 {
2189 	mutex_destroy(&gsi->mutex);
2190 	gsi_channel_exit(gsi);
2191 	gsi_irq_exit(gsi);
2192 	iounmap(gsi->virt);
2193 }
2194 
2195 /* The maximum number of outstanding TREs on a channel.  This limits
2196  * a channel's maximum number of transactions outstanding (worst case
2197  * is one TRE per transaction).
2198  *
2199  * The absolute limit is the number of TREs in the channel's TRE ring,
2200  * and in theory we should be able use all of them.  But in practice,
2201  * doing that led to the hardware reporting exhaustion of event ring
2202  * slots for writing completion information.  So the hardware limit
2203  * would be (tre_count - 1).
2204  *
2205  * We reduce it a bit further though.  Transaction resource pools are
2206  * sized to be a little larger than this maximum, to allow resource
2207  * allocations to always be contiguous.  The number of entries in a
2208  * TRE ring buffer is a power of 2, and the extra resources in a pool
2209  * tends to nearly double the memory allocated for it.  Reducing the
2210  * maximum number of outstanding TREs allows the number of entries in
2211  * a pool to avoid crossing that power-of-2 boundary, and this can
2212  * substantially reduce pool memory requirements.  The number we
2213  * reduce it by matches the number added in gsi_trans_pool_init().
2214  */
2215 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
2216 {
2217 	struct gsi_channel *channel = &gsi->channel[channel_id];
2218 
2219 	/* Hardware limit is channel->tre_count - 1 */
2220 	return channel->tre_count - (channel->tlv_count - 1);
2221 }
2222 
2223 /* Returns the maximum number of TREs in a single transaction for a channel */
2224 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
2225 {
2226 	struct gsi_channel *channel = &gsi->channel[channel_id];
2227 
2228 	return channel->tlv_count;
2229 }
2230