xref: /linux/drivers/net/ipa/gsi.c (revision d4a96be65423296e42091b0b79973b8d446e7798)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2018-2021 Linaro Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/bits.h>
9 #include <linux/bitfield.h>
10 #include <linux/mutex.h>
11 #include <linux/completion.h>
12 #include <linux/io.h>
13 #include <linux/bug.h>
14 #include <linux/interrupt.h>
15 #include <linux/platform_device.h>
16 #include <linux/netdevice.h>
17 
18 #include "gsi.h"
19 #include "gsi_reg.h"
20 #include "gsi_private.h"
21 #include "gsi_trans.h"
22 #include "ipa_gsi.h"
23 #include "ipa_data.h"
24 #include "ipa_version.h"
25 
26 /**
27  * DOC: The IPA Generic Software Interface
28  *
29  * The generic software interface (GSI) is an integral component of the IPA,
30  * providing a well-defined communication layer between the AP subsystem
31  * and the IPA core.  The modem uses the GSI layer as well.
32  *
33  *	--------	     ---------
34  *	|      |	     |	     |
35  *	|  AP  +<---.	.----+ Modem |
36  *	|      +--. |	| .->+	     |
37  *	|      |  | |	| |  |	     |
38  *	--------  | |	| |  ---------
39  *		  v |	v |
40  *		--+-+---+-+--
41  *		|    GSI    |
42  *		|-----------|
43  *		|	    |
44  *		|    IPA    |
45  *		|	    |
46  *		-------------
47  *
48  * In the above diagram, the AP and Modem represent "execution environments"
49  * (EEs), which are independent operating environments that use the IPA for
50  * data transfer.
51  *
52  * Each EE uses a set of unidirectional GSI "channels," which allow transfer
53  * of data to or from the IPA.  A channel is implemented as a ring buffer,
54  * with a DRAM-resident array of "transfer elements" (TREs) available to
55  * describe transfers to or from other EEs through the IPA.  A transfer
56  * element can also contain an immediate command, requesting the IPA perform
57  * actions other than data transfer.
58  *
59  * Each TRE refers to a block of data--also located DRAM.  After writing one
60  * or more TREs to a channel, the writer (either the IPA or an EE) writes a
61  * doorbell register to inform the receiving side how many elements have
62  * been written.
63  *
64  * Each channel has a GSI "event ring" associated with it.  An event ring
65  * is implemented very much like a channel ring, but is always directed from
66  * the IPA to an EE.  The IPA notifies an EE (such as the AP) about channel
67  * events by adding an entry to the event ring associated with the channel.
68  * The GSI then writes its doorbell for the event ring, causing the target
69  * EE to be interrupted.  Each entry in an event ring contains a pointer
70  * to the channel TRE whose completion the event represents.
71  *
72  * Each TRE in a channel ring has a set of flags.  One flag indicates whether
73  * the completion of the transfer operation generates an entry (and possibly
74  * an interrupt) in the channel's event ring.  Other flags allow transfer
75  * elements to be chained together, forming a single logical transaction.
76  * TRE flags are used to control whether and when interrupts are generated
77  * to signal completion of channel transfers.
78  *
79  * Elements in channel and event rings are completed (or consumed) strictly
80  * in order.  Completion of one entry implies the completion of all preceding
81  * entries.  A single completion interrupt can therefore communicate the
82  * completion of many transfers.
83  *
84  * Note that all GSI registers are little-endian, which is the assumed
85  * endianness of I/O space accesses.  The accessor functions perform byte
86  * swapping if needed (i.e., for a big endian CPU).
87  */
88 
89 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
90 #define GSI_EVT_RING_INT_MODT		(32 * 1) /* 1ms under 32KHz clock */
91 
92 #define GSI_CMD_TIMEOUT			50	/* milliseconds */
93 
94 #define GSI_CHANNEL_STOP_RETRIES	10
95 #define GSI_CHANNEL_MODEM_HALT_RETRIES	10
96 
97 #define GSI_MHI_EVENT_ID_START		10	/* 1st reserved event id */
98 #define GSI_MHI_EVENT_ID_END		16	/* Last reserved event id */
99 
100 #define GSI_ISR_MAX_ITER		50	/* Detect interrupt storms */
101 
102 /* An entry in an event ring */
103 struct gsi_event {
104 	__le64 xfer_ptr;
105 	__le16 len;
106 	u8 reserved1;
107 	u8 code;
108 	__le16 reserved2;
109 	u8 type;
110 	u8 chid;
111 };
112 
113 /** gsi_channel_scratch_gpi - GPI protocol scratch register
114  * @max_outstanding_tre:
115  *	Defines the maximum number of TREs allowed in a single transaction
116  *	on a channel (in bytes).  This determines the amount of prefetch
117  *	performed by the hardware.  We configure this to equal the size of
118  *	the TLV FIFO for the channel.
119  * @outstanding_threshold:
120  *	Defines the threshold (in bytes) determining when the sequencer
121  *	should update the channel doorbell.  We configure this to equal
122  *	the size of two TREs.
123  */
124 struct gsi_channel_scratch_gpi {
125 	u64 reserved1;
126 	u16 reserved2;
127 	u16 max_outstanding_tre;
128 	u16 reserved3;
129 	u16 outstanding_threshold;
130 };
131 
132 /** gsi_channel_scratch - channel scratch configuration area
133  *
134  * The exact interpretation of this register is protocol-specific.
135  * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
136  */
137 union gsi_channel_scratch {
138 	struct gsi_channel_scratch_gpi gpi;
139 	struct {
140 		u32 word1;
141 		u32 word2;
142 		u32 word3;
143 		u32 word4;
144 	} data;
145 };
146 
147 /* Check things that can be validated at build time. */
148 static void gsi_validate_build(void)
149 {
150 	/* This is used as a divisor */
151 	BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
152 
153 	/* Code assumes the size of channel and event ring element are
154 	 * the same (and fixed).  Make sure the size of an event ring
155 	 * element is what's expected.
156 	 */
157 	BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
158 
159 	/* Hardware requires a 2^n ring size.  We ensure the number of
160 	 * elements in an event ring is a power of 2 elsewhere; this
161 	 * ensure the elements themselves meet the requirement.
162 	 */
163 	BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
164 
165 	/* The channel element size must fit in this field */
166 	BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
167 
168 	/* The event ring element size must fit in this field */
169 	BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
170 }
171 
172 /* Return the channel id associated with a given channel */
173 static u32 gsi_channel_id(struct gsi_channel *channel)
174 {
175 	return channel - &channel->gsi->channel[0];
176 }
177 
178 /* An initialized channel has a non-null GSI pointer */
179 static bool gsi_channel_initialized(struct gsi_channel *channel)
180 {
181 	return !!channel->gsi;
182 }
183 
184 /* Update the GSI IRQ type register with the cached value */
185 static void gsi_irq_type_update(struct gsi *gsi, u32 val)
186 {
187 	gsi->type_enabled_bitmap = val;
188 	iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
189 }
190 
191 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
192 {
193 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
194 }
195 
196 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
197 {
198 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
199 }
200 
201 /* Turn off all GSI interrupts initially */
202 static void gsi_irq_setup(struct gsi *gsi)
203 {
204 	/* Disable all interrupt types */
205 	gsi_irq_type_update(gsi, 0);
206 
207 	/* Clear all type-specific interrupt masks */
208 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
209 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
210 	iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
211 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
212 
213 	/* The inter-EE registers are in the non-adjusted address range */
214 	iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
215 	iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
216 
217 	iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
218 }
219 
220 /* Turn off all GSI interrupts when we're all done */
221 static void gsi_irq_teardown(struct gsi *gsi)
222 {
223 	/* Nothing to do */
224 }
225 
226 /* Event ring commands are performed one at a time.  Their completion
227  * is signaled by the event ring control GSI interrupt type, which is
228  * only enabled when we issue an event ring command.  Only the event
229  * ring being operated on has this interrupt enabled.
230  */
231 static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
232 {
233 	u32 val = BIT(evt_ring_id);
234 
235 	/* There's a small chance that a previous command completed
236 	 * after the interrupt was disabled, so make sure we have no
237 	 * pending interrupts before we enable them.
238 	 */
239 	iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
240 
241 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
242 	gsi_irq_type_enable(gsi, GSI_EV_CTRL);
243 }
244 
245 /* Disable event ring control interrupts */
246 static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
247 {
248 	gsi_irq_type_disable(gsi, GSI_EV_CTRL);
249 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
250 }
251 
252 /* Channel commands are performed one at a time.  Their completion is
253  * signaled by the channel control GSI interrupt type, which is only
254  * enabled when we issue a channel command.  Only the channel being
255  * operated on has this interrupt enabled.
256  */
257 static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
258 {
259 	u32 val = BIT(channel_id);
260 
261 	/* There's a small chance that a previous command completed
262 	 * after the interrupt was disabled, so make sure we have no
263 	 * pending interrupts before we enable them.
264 	 */
265 	iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
266 
267 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
268 	gsi_irq_type_enable(gsi, GSI_CH_CTRL);
269 }
270 
271 /* Disable channel control interrupts */
272 static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
273 {
274 	gsi_irq_type_disable(gsi, GSI_CH_CTRL);
275 	iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
276 }
277 
278 static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
279 {
280 	bool enable_ieob = !gsi->ieob_enabled_bitmap;
281 	u32 val;
282 
283 	gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
284 	val = gsi->ieob_enabled_bitmap;
285 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
286 
287 	/* Enable the interrupt type if this is the first channel enabled */
288 	if (enable_ieob)
289 		gsi_irq_type_enable(gsi, GSI_IEOB);
290 }
291 
292 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
293 {
294 	u32 val;
295 
296 	gsi->ieob_enabled_bitmap &= ~event_mask;
297 
298 	/* Disable the interrupt type if this was the last enabled channel */
299 	if (!gsi->ieob_enabled_bitmap)
300 		gsi_irq_type_disable(gsi, GSI_IEOB);
301 
302 	val = gsi->ieob_enabled_bitmap;
303 	iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
304 }
305 
306 static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
307 {
308 	gsi_irq_ieob_disable(gsi, BIT(evt_ring_id));
309 }
310 
311 /* Enable all GSI_interrupt types */
312 static void gsi_irq_enable(struct gsi *gsi)
313 {
314 	u32 val;
315 
316 	/* Global interrupts include hardware error reports.  Enable
317 	 * that so we can at least report the error should it occur.
318 	 */
319 	iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
320 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
321 
322 	/* General GSI interrupts are reported to all EEs; if they occur
323 	 * they are unrecoverable (without reset).  A breakpoint interrupt
324 	 * also exists, but we don't support that.  We want to be notified
325 	 * of errors so we can report them, even if they can't be handled.
326 	 */
327 	val = BIT(BUS_ERROR);
328 	val |= BIT(CMD_FIFO_OVRFLOW);
329 	val |= BIT(MCS_STACK_OVRFLOW);
330 	iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
331 	gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
332 }
333 
334 /* Disable all GSI interrupt types */
335 static void gsi_irq_disable(struct gsi *gsi)
336 {
337 	gsi_irq_type_update(gsi, 0);
338 
339 	/* Clear the type-specific interrupt masks set by gsi_irq_enable() */
340 	iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
341 	iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
342 }
343 
344 /* Return the virtual address associated with a ring index */
345 void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
346 {
347 	/* Note: index *must* be used modulo the ring count here */
348 	return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
349 }
350 
351 /* Return the 32-bit DMA address associated with a ring index */
352 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
353 {
354 	return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE;
355 }
356 
357 /* Return the ring index of a 32-bit ring offset */
358 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
359 {
360 	return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
361 }
362 
363 /* Issue a GSI command by writing a value to a register, then wait for
364  * completion to be signaled.  Returns true if the command completes
365  * or false if it times out.
366  */
367 static bool
368 gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
369 {
370 	unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
371 
372 	reinit_completion(completion);
373 
374 	iowrite32(val, gsi->virt + reg);
375 
376 	return !!wait_for_completion_timeout(completion, timeout);
377 }
378 
379 /* Return the hardware's notion of the current state of an event ring */
380 static enum gsi_evt_ring_state
381 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
382 {
383 	u32 val;
384 
385 	val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
386 
387 	return u32_get_bits(val, EV_CHSTATE_FMASK);
388 }
389 
390 /* Issue an event ring command and wait for it to complete */
391 static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
392 				 enum gsi_evt_cmd_opcode opcode)
393 {
394 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
395 	struct completion *completion = &evt_ring->completion;
396 	struct device *dev = gsi->dev;
397 	bool timeout;
398 	u32 val;
399 
400 	/* Enable the completion interrupt for the command */
401 	gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
402 
403 	val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
404 	val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
405 
406 	timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
407 
408 	gsi_irq_ev_ctrl_disable(gsi);
409 
410 	if (!timeout)
411 		return;
412 
413 	dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
414 		opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id));
415 }
416 
417 /* Allocate an event ring in NOT_ALLOCATED state */
418 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
419 {
420 	enum gsi_evt_ring_state state;
421 
422 	/* Get initial event ring state */
423 	state = gsi_evt_ring_state(gsi, evt_ring_id);
424 	if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
425 		dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
426 			evt_ring_id, state);
427 		return -EINVAL;
428 	}
429 
430 	gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
431 
432 	/* If successful the event ring state will have changed */
433 	state = gsi_evt_ring_state(gsi, evt_ring_id);
434 	if (state == GSI_EVT_RING_STATE_ALLOCATED)
435 		return 0;
436 
437 	dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
438 		evt_ring_id, state);
439 
440 	return -EIO;
441 }
442 
443 /* Reset a GSI event ring in ALLOCATED or ERROR state. */
444 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
445 {
446 	enum gsi_evt_ring_state state;
447 
448 	state = gsi_evt_ring_state(gsi, evt_ring_id);
449 	if (state != GSI_EVT_RING_STATE_ALLOCATED &&
450 	    state != GSI_EVT_RING_STATE_ERROR) {
451 		dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
452 			evt_ring_id, state);
453 		return;
454 	}
455 
456 	gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
457 
458 	/* If successful the event ring state will have changed */
459 	state = gsi_evt_ring_state(gsi, evt_ring_id);
460 	if (state == GSI_EVT_RING_STATE_ALLOCATED)
461 		return;
462 
463 	dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
464 		evt_ring_id, state);
465 }
466 
467 /* Issue a hardware de-allocation request for an allocated event ring */
468 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
469 {
470 	enum gsi_evt_ring_state state;
471 
472 	state = gsi_evt_ring_state(gsi, evt_ring_id);
473 	if (state != GSI_EVT_RING_STATE_ALLOCATED) {
474 		dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
475 			evt_ring_id, state);
476 		return;
477 	}
478 
479 	gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
480 
481 	/* If successful the event ring state will have changed */
482 	state = gsi_evt_ring_state(gsi, evt_ring_id);
483 	if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
484 		return;
485 
486 	dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
487 		evt_ring_id, state);
488 }
489 
490 /* Fetch the current state of a channel from hardware */
491 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
492 {
493 	u32 channel_id = gsi_channel_id(channel);
494 	void __iomem *virt = channel->gsi->virt;
495 	u32 val;
496 
497 	val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
498 
499 	return u32_get_bits(val, CHSTATE_FMASK);
500 }
501 
502 /* Issue a channel command and wait for it to complete */
503 static void
504 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
505 {
506 	struct completion *completion = &channel->completion;
507 	u32 channel_id = gsi_channel_id(channel);
508 	struct gsi *gsi = channel->gsi;
509 	struct device *dev = gsi->dev;
510 	bool timeout;
511 	u32 val;
512 
513 	/* Enable the completion interrupt for the command */
514 	gsi_irq_ch_ctrl_enable(gsi, channel_id);
515 
516 	val = u32_encode_bits(channel_id, CH_CHID_FMASK);
517 	val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
518 	timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
519 
520 	gsi_irq_ch_ctrl_disable(gsi);
521 
522 	if (!timeout)
523 		return;
524 
525 	dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
526 		opcode, channel_id, gsi_channel_state(channel));
527 }
528 
529 /* Allocate GSI channel in NOT_ALLOCATED state */
530 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
531 {
532 	struct gsi_channel *channel = &gsi->channel[channel_id];
533 	struct device *dev = gsi->dev;
534 	enum gsi_channel_state state;
535 
536 	/* Get initial channel state */
537 	state = gsi_channel_state(channel);
538 	if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
539 		dev_err(dev, "channel %u bad state %u before alloc\n",
540 			channel_id, state);
541 		return -EINVAL;
542 	}
543 
544 	gsi_channel_command(channel, GSI_CH_ALLOCATE);
545 
546 	/* If successful the channel state will have changed */
547 	state = gsi_channel_state(channel);
548 	if (state == GSI_CHANNEL_STATE_ALLOCATED)
549 		return 0;
550 
551 	dev_err(dev, "channel %u bad state %u after alloc\n",
552 		channel_id, state);
553 
554 	return -EIO;
555 }
556 
557 /* Start an ALLOCATED channel */
558 static int gsi_channel_start_command(struct gsi_channel *channel)
559 {
560 	struct device *dev = channel->gsi->dev;
561 	enum gsi_channel_state state;
562 
563 	state = gsi_channel_state(channel);
564 	if (state != GSI_CHANNEL_STATE_ALLOCATED &&
565 	    state != GSI_CHANNEL_STATE_STOPPED) {
566 		dev_err(dev, "channel %u bad state %u before start\n",
567 			gsi_channel_id(channel), state);
568 		return -EINVAL;
569 	}
570 
571 	gsi_channel_command(channel, GSI_CH_START);
572 
573 	/* If successful the channel state will have changed */
574 	state = gsi_channel_state(channel);
575 	if (state == GSI_CHANNEL_STATE_STARTED)
576 		return 0;
577 
578 	dev_err(dev, "channel %u bad state %u after start\n",
579 		gsi_channel_id(channel), state);
580 
581 	return -EIO;
582 }
583 
584 /* Stop a GSI channel in STARTED state */
585 static int gsi_channel_stop_command(struct gsi_channel *channel)
586 {
587 	struct device *dev = channel->gsi->dev;
588 	enum gsi_channel_state state;
589 
590 	state = gsi_channel_state(channel);
591 
592 	/* Channel could have entered STOPPED state since last call
593 	 * if it timed out.  If so, we're done.
594 	 */
595 	if (state == GSI_CHANNEL_STATE_STOPPED)
596 		return 0;
597 
598 	if (state != GSI_CHANNEL_STATE_STARTED &&
599 	    state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
600 		dev_err(dev, "channel %u bad state %u before stop\n",
601 			gsi_channel_id(channel), state);
602 		return -EINVAL;
603 	}
604 
605 	gsi_channel_command(channel, GSI_CH_STOP);
606 
607 	/* If successful the channel state will have changed */
608 	state = gsi_channel_state(channel);
609 	if (state == GSI_CHANNEL_STATE_STOPPED)
610 		return 0;
611 
612 	/* We may have to try again if stop is in progress */
613 	if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
614 		return -EAGAIN;
615 
616 	dev_err(dev, "channel %u bad state %u after stop\n",
617 		gsi_channel_id(channel), state);
618 
619 	return -EIO;
620 }
621 
622 /* Reset a GSI channel in ALLOCATED or ERROR state. */
623 static void gsi_channel_reset_command(struct gsi_channel *channel)
624 {
625 	struct device *dev = channel->gsi->dev;
626 	enum gsi_channel_state state;
627 
628 	/* A short delay is required before a RESET command */
629 	usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
630 
631 	state = gsi_channel_state(channel);
632 	if (state != GSI_CHANNEL_STATE_STOPPED &&
633 	    state != GSI_CHANNEL_STATE_ERROR) {
634 		/* No need to reset a channel already in ALLOCATED state */
635 		if (state != GSI_CHANNEL_STATE_ALLOCATED)
636 			dev_err(dev, "channel %u bad state %u before reset\n",
637 				gsi_channel_id(channel), state);
638 		return;
639 	}
640 
641 	gsi_channel_command(channel, GSI_CH_RESET);
642 
643 	/* If successful the channel state will have changed */
644 	state = gsi_channel_state(channel);
645 	if (state != GSI_CHANNEL_STATE_ALLOCATED)
646 		dev_err(dev, "channel %u bad state %u after reset\n",
647 			gsi_channel_id(channel), state);
648 }
649 
650 /* Deallocate an ALLOCATED GSI channel */
651 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
652 {
653 	struct gsi_channel *channel = &gsi->channel[channel_id];
654 	struct device *dev = gsi->dev;
655 	enum gsi_channel_state state;
656 
657 	state = gsi_channel_state(channel);
658 	if (state != GSI_CHANNEL_STATE_ALLOCATED) {
659 		dev_err(dev, "channel %u bad state %u before dealloc\n",
660 			channel_id, state);
661 		return;
662 	}
663 
664 	gsi_channel_command(channel, GSI_CH_DE_ALLOC);
665 
666 	/* If successful the channel state will have changed */
667 	state = gsi_channel_state(channel);
668 
669 	if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
670 		dev_err(dev, "channel %u bad state %u after dealloc\n",
671 			channel_id, state);
672 }
673 
674 /* Ring an event ring doorbell, reporting the last entry processed by the AP.
675  * The index argument (modulo the ring count) is the first unfilled entry, so
676  * we supply one less than that with the doorbell.  Update the event ring
677  * index field with the value provided.
678  */
679 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
680 {
681 	struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
682 	u32 val;
683 
684 	ring->index = index;	/* Next unused entry */
685 
686 	/* Note: index *must* be used modulo the ring count here */
687 	val = gsi_ring_addr(ring, (index - 1) % ring->count);
688 	iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
689 }
690 
691 /* Program an event ring for use */
692 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
693 {
694 	struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
695 	size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
696 	u32 val;
697 
698 	/* We program all event rings as GPI type/protocol */
699 	val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
700 	val |= EV_INTYPE_FMASK;
701 	val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
702 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
703 
704 	val = u32_encode_bits(size, EV_R_LENGTH_FMASK);
705 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
706 
707 	/* The context 2 and 3 registers store the low-order and
708 	 * high-order 32 bits of the address of the event ring,
709 	 * respectively.
710 	 */
711 	val = evt_ring->ring.addr & GENMASK(31, 0);
712 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
713 
714 	val = evt_ring->ring.addr >> 32;
715 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
716 
717 	/* Enable interrupt moderation by setting the moderation delay */
718 	val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
719 	val |= u32_encode_bits(1, MODC_FMASK);	/* comes from channel */
720 	iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
721 
722 	/* No MSI write data, and MSI address high and low address is 0 */
723 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
724 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
725 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
726 
727 	/* We don't need to get event read pointer updates */
728 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
729 	iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
730 
731 	/* Finally, tell the hardware we've completed event 0 (arbitrary) */
732 	gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
733 }
734 
735 /* Find the transaction whose completion indicates a channel is quiesced */
736 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
737 {
738 	struct gsi_trans_info *trans_info = &channel->trans_info;
739 	const struct list_head *list;
740 	struct gsi_trans *trans;
741 
742 	spin_lock_bh(&trans_info->spinlock);
743 
744 	/* There is a small chance a TX transaction got allocated just
745 	 * before we disabled transmits, so check for that.
746 	 */
747 	if (channel->toward_ipa) {
748 		list = &trans_info->alloc;
749 		if (!list_empty(list))
750 			goto done;
751 		list = &trans_info->pending;
752 		if (!list_empty(list))
753 			goto done;
754 	}
755 
756 	/* Otherwise (TX or RX) we want to wait for anything that
757 	 * has completed, or has been polled but not released yet.
758 	 */
759 	list = &trans_info->complete;
760 	if (!list_empty(list))
761 		goto done;
762 	list = &trans_info->polled;
763 	if (list_empty(list))
764 		list = NULL;
765 done:
766 	trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL;
767 
768 	/* Caller will wait for this, so take a reference */
769 	if (trans)
770 		refcount_inc(&trans->refcount);
771 
772 	spin_unlock_bh(&trans_info->spinlock);
773 
774 	return trans;
775 }
776 
777 /* Wait for transaction activity on a channel to complete */
778 static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
779 {
780 	struct gsi_trans *trans;
781 
782 	/* Get the last transaction, and wait for it to complete */
783 	trans = gsi_channel_trans_last(channel);
784 	if (trans) {
785 		wait_for_completion(&trans->completion);
786 		gsi_trans_free(trans);
787 	}
788 }
789 
790 /* Program a channel for use */
791 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
792 {
793 	size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
794 	u32 channel_id = gsi_channel_id(channel);
795 	union gsi_channel_scratch scr = { };
796 	struct gsi_channel_scratch_gpi *gpi;
797 	struct gsi *gsi = channel->gsi;
798 	u32 wrr_weight = 0;
799 	u32 val;
800 
801 	/* Arbitrarily pick TRE 0 as the first channel element to use */
802 	channel->tre_ring.index = 0;
803 
804 	/* We program all channels as GPI type/protocol */
805 	val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, CHTYPE_PROTOCOL_FMASK);
806 	if (channel->toward_ipa)
807 		val |= CHTYPE_DIR_FMASK;
808 	val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
809 	val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
810 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
811 
812 	val = u32_encode_bits(size, R_LENGTH_FMASK);
813 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
814 
815 	/* The context 2 and 3 registers store the low-order and
816 	 * high-order 32 bits of the address of the channel ring,
817 	 * respectively.
818 	 */
819 	val = channel->tre_ring.addr & GENMASK(31, 0);
820 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
821 
822 	val = channel->tre_ring.addr >> 32;
823 	iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
824 
825 	/* Command channel gets low weighted round-robin priority */
826 	if (channel->command)
827 		wrr_weight = field_max(WRR_WEIGHT_FMASK);
828 	val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
829 
830 	/* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
831 
832 	/* We enable the doorbell engine for IPA v3.5.1 */
833 	if (gsi->version == IPA_VERSION_3_5_1 && doorbell)
834 		val |= USE_DB_ENG_FMASK;
835 
836 	/* v4.0 introduces an escape buffer for prefetch.  We use it
837 	 * on all but the AP command channel.
838 	 */
839 	if (gsi->version != IPA_VERSION_3_5_1 && !channel->command) {
840 		/* If not otherwise set, prefetch buffers are used */
841 		if (gsi->version < IPA_VERSION_4_5)
842 			val |= USE_ESCAPE_BUF_ONLY_FMASK;
843 		else
844 			val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY,
845 					       PREFETCH_MODE_FMASK);
846 	}
847 
848 	iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
849 
850 	/* Now update the scratch registers for GPI protocol */
851 	gpi = &scr.gpi;
852 	gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
853 					GSI_RING_ELEMENT_SIZE;
854 	gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
855 
856 	val = scr.data.word1;
857 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
858 
859 	val = scr.data.word2;
860 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
861 
862 	val = scr.data.word3;
863 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
864 
865 	/* We must preserve the upper 16 bits of the last scratch register.
866 	 * The next sequence assumes those bits remain unchanged between the
867 	 * read and the write.
868 	 */
869 	val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
870 	val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
871 	iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
872 
873 	/* All done! */
874 }
875 
876 static void gsi_channel_deprogram(struct gsi_channel *channel)
877 {
878 	/* Nothing to do */
879 }
880 
881 static int __gsi_channel_start(struct gsi_channel *channel, bool start)
882 {
883 	struct gsi *gsi = channel->gsi;
884 	int ret;
885 
886 	if (!start)
887 		return 0;
888 
889 	mutex_lock(&gsi->mutex);
890 
891 	ret = gsi_channel_start_command(channel);
892 
893 	mutex_unlock(&gsi->mutex);
894 
895 	return ret;
896 }
897 
898 /* Start an allocated GSI channel */
899 int gsi_channel_start(struct gsi *gsi, u32 channel_id)
900 {
901 	struct gsi_channel *channel = &gsi->channel[channel_id];
902 	int ret;
903 
904 	/* Enable NAPI and the completion interrupt */
905 	napi_enable(&channel->napi);
906 	gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id);
907 
908 	ret = __gsi_channel_start(channel, true);
909 	if (ret) {
910 		gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
911 		napi_disable(&channel->napi);
912 	}
913 
914 	return ret;
915 }
916 
917 static int gsi_channel_stop_retry(struct gsi_channel *channel)
918 {
919 	u32 retries = GSI_CHANNEL_STOP_RETRIES;
920 	int ret;
921 
922 	do {
923 		ret = gsi_channel_stop_command(channel);
924 		if (ret != -EAGAIN)
925 			break;
926 		usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC);
927 	} while (retries--);
928 
929 	return ret;
930 }
931 
932 static int __gsi_channel_stop(struct gsi_channel *channel, bool stop)
933 {
934 	struct gsi *gsi = channel->gsi;
935 	int ret;
936 
937 	/* Wait for any underway transactions to complete before stopping. */
938 	gsi_channel_trans_quiesce(channel);
939 
940 	if (!stop)
941 		return 0;
942 
943 	mutex_lock(&gsi->mutex);
944 
945 	ret = gsi_channel_stop_retry(channel);
946 
947 	mutex_unlock(&gsi->mutex);
948 
949 	return ret;
950 }
951 
952 /* Stop a started channel */
953 int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
954 {
955 	struct gsi_channel *channel = &gsi->channel[channel_id];
956 	int ret;
957 
958 	ret = __gsi_channel_stop(channel, true);
959 	if (ret)
960 		return ret;
961 
962 	/* Disable the completion interrupt and NAPI if successful */
963 	gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
964 	napi_disable(&channel->napi);
965 
966 	return 0;
967 }
968 
969 /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
970 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
971 {
972 	struct gsi_channel *channel = &gsi->channel[channel_id];
973 
974 	mutex_lock(&gsi->mutex);
975 
976 	gsi_channel_reset_command(channel);
977 	/* Due to a hardware quirk we may need to reset RX channels twice. */
978 	if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa)
979 		gsi_channel_reset_command(channel);
980 
981 	gsi_channel_program(channel, doorbell);
982 	gsi_channel_trans_cancel_pending(channel);
983 
984 	mutex_unlock(&gsi->mutex);
985 }
986 
987 /* Stop a STARTED channel for suspend (using stop if requested) */
988 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
989 {
990 	struct gsi_channel *channel = &gsi->channel[channel_id];
991 	int ret;
992 
993 	ret = __gsi_channel_stop(channel, stop);
994 	if (ret)
995 		return ret;
996 
997 	/* Ensure NAPI polling has finished. */
998 	napi_synchronize(&channel->napi);
999 
1000 	return 0;
1001 }
1002 
1003 /* Resume a suspended channel (starting will be requested if STOPPED) */
1004 int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start)
1005 {
1006 	struct gsi_channel *channel = &gsi->channel[channel_id];
1007 
1008 	return __gsi_channel_start(channel, start);
1009 }
1010 
1011 /**
1012  * gsi_channel_tx_queued() - Report queued TX transfers for a channel
1013  * @channel:	Channel for which to report
1014  *
1015  * Report to the network stack the number of bytes and transactions that
1016  * have been queued to hardware since last call.  This and the next function
1017  * supply information used by the network stack for throttling.
1018  *
1019  * For each channel we track the number of transactions used and bytes of
1020  * data those transactions represent.  We also track what those values are
1021  * each time this function is called.  Subtracting the two tells us
1022  * the number of bytes and transactions that have been added between
1023  * successive calls.
1024  *
1025  * Calling this each time we ring the channel doorbell allows us to
1026  * provide accurate information to the network stack about how much
1027  * work we've given the hardware at any point in time.
1028  */
1029 void gsi_channel_tx_queued(struct gsi_channel *channel)
1030 {
1031 	u32 trans_count;
1032 	u32 byte_count;
1033 
1034 	byte_count = channel->byte_count - channel->queued_byte_count;
1035 	trans_count = channel->trans_count - channel->queued_trans_count;
1036 	channel->queued_byte_count = channel->byte_count;
1037 	channel->queued_trans_count = channel->trans_count;
1038 
1039 	ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
1040 				  trans_count, byte_count);
1041 }
1042 
1043 /**
1044  * gsi_channel_tx_update() - Report completed TX transfers
1045  * @channel:	Channel that has completed transmitting packets
1046  * @trans:	Last transation known to be complete
1047  *
1048  * Compute the number of transactions and bytes that have been transferred
1049  * over a TX channel since the given transaction was committed.  Report this
1050  * information to the network stack.
1051  *
1052  * At the time a transaction is committed, we record its channel's
1053  * committed transaction and byte counts *in the transaction*.
1054  * Completions are signaled by the hardware with an interrupt, and
1055  * we can determine the latest completed transaction at that time.
1056  *
1057  * The difference between the byte/transaction count recorded in
1058  * the transaction and the count last time we recorded a completion
1059  * tells us exactly how much data has been transferred between
1060  * completions.
1061  *
1062  * Calling this each time we learn of a newly-completed transaction
1063  * allows us to provide accurate information to the network stack
1064  * about how much work has been completed by the hardware at a given
1065  * point in time.
1066  */
1067 static void
1068 gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
1069 {
1070 	u64 byte_count = trans->byte_count + trans->len;
1071 	u64 trans_count = trans->trans_count + 1;
1072 
1073 	byte_count -= channel->compl_byte_count;
1074 	channel->compl_byte_count += byte_count;
1075 	trans_count -= channel->compl_trans_count;
1076 	channel->compl_trans_count += trans_count;
1077 
1078 	ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
1079 				     trans_count, byte_count);
1080 }
1081 
1082 /* Channel control interrupt handler */
1083 static void gsi_isr_chan_ctrl(struct gsi *gsi)
1084 {
1085 	u32 channel_mask;
1086 
1087 	channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
1088 	iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
1089 
1090 	while (channel_mask) {
1091 		u32 channel_id = __ffs(channel_mask);
1092 		struct gsi_channel *channel;
1093 
1094 		channel_mask ^= BIT(channel_id);
1095 
1096 		channel = &gsi->channel[channel_id];
1097 
1098 		complete(&channel->completion);
1099 	}
1100 }
1101 
1102 /* Event ring control interrupt handler */
1103 static void gsi_isr_evt_ctrl(struct gsi *gsi)
1104 {
1105 	u32 event_mask;
1106 
1107 	event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
1108 	iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
1109 
1110 	while (event_mask) {
1111 		u32 evt_ring_id = __ffs(event_mask);
1112 		struct gsi_evt_ring *evt_ring;
1113 
1114 		event_mask ^= BIT(evt_ring_id);
1115 
1116 		evt_ring = &gsi->evt_ring[evt_ring_id];
1117 
1118 		complete(&evt_ring->completion);
1119 	}
1120 }
1121 
1122 /* Global channel error interrupt handler */
1123 static void
1124 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
1125 {
1126 	if (code == GSI_OUT_OF_RESOURCES) {
1127 		dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
1128 		complete(&gsi->channel[channel_id].completion);
1129 		return;
1130 	}
1131 
1132 	/* Report, but otherwise ignore all other error codes */
1133 	dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
1134 		channel_id, err_ee, code);
1135 }
1136 
1137 /* Global event error interrupt handler */
1138 static void
1139 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
1140 {
1141 	if (code == GSI_OUT_OF_RESOURCES) {
1142 		struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1143 		u32 channel_id = gsi_channel_id(evt_ring->channel);
1144 
1145 		complete(&evt_ring->completion);
1146 		dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
1147 			channel_id);
1148 		return;
1149 	}
1150 
1151 	/* Report, but otherwise ignore all other error codes */
1152 	dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
1153 		evt_ring_id, err_ee, code);
1154 }
1155 
1156 /* Global error interrupt handler */
1157 static void gsi_isr_glob_err(struct gsi *gsi)
1158 {
1159 	enum gsi_err_type type;
1160 	enum gsi_err_code code;
1161 	u32 which;
1162 	u32 val;
1163 	u32 ee;
1164 
1165 	/* Get the logged error, then reinitialize the log */
1166 	val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
1167 	iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1168 	iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
1169 
1170 	ee = u32_get_bits(val, ERR_EE_FMASK);
1171 	type = u32_get_bits(val, ERR_TYPE_FMASK);
1172 	which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
1173 	code = u32_get_bits(val, ERR_CODE_FMASK);
1174 
1175 	if (type == GSI_ERR_TYPE_CHAN)
1176 		gsi_isr_glob_chan_err(gsi, ee, which, code);
1177 	else if (type == GSI_ERR_TYPE_EVT)
1178 		gsi_isr_glob_evt_err(gsi, ee, which, code);
1179 	else	/* type GSI_ERR_TYPE_GLOB should be fatal */
1180 		dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
1181 }
1182 
1183 /* Generic EE interrupt handler */
1184 static void gsi_isr_gp_int1(struct gsi *gsi)
1185 {
1186 	u32 result;
1187 	u32 val;
1188 
1189 	/* This interrupt is used to handle completions of the two GENERIC
1190 	 * GSI commands.  We use these to allocate and halt channels on
1191 	 * the modem's behalf due to a hardware quirk on IPA v4.2.  Once
1192 	 * allocated, the modem "owns" these channels, and as a result we
1193 	 * have no way of knowing the channel's state at any given time.
1194 	 *
1195 	 * It is recommended that we halt the modem channels we allocated
1196 	 * when shutting down, but it's possible the channel isn't running
1197 	 * at the time we issue the HALT command.  We'll get an error in
1198 	 * that case, but it's harmless (the channel is already halted).
1199 	 *
1200 	 * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error
1201 	 * if we receive it.
1202 	 */
1203 	val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1204 	result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
1205 
1206 	switch (result) {
1207 	case GENERIC_EE_SUCCESS:
1208 	case GENERIC_EE_CHANNEL_NOT_RUNNING:
1209 		gsi->result = 0;
1210 		break;
1211 
1212 	case GENERIC_EE_RETRY:
1213 		gsi->result = -EAGAIN;
1214 		break;
1215 
1216 	default:
1217 		dev_err(gsi->dev, "global INT1 generic result %u\n", result);
1218 		gsi->result = -EIO;
1219 		break;
1220 	}
1221 
1222 	complete(&gsi->completion);
1223 }
1224 
1225 /* Inter-EE interrupt handler */
1226 static void gsi_isr_glob_ee(struct gsi *gsi)
1227 {
1228 	u32 val;
1229 
1230 	val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
1231 
1232 	if (val & BIT(ERROR_INT))
1233 		gsi_isr_glob_err(gsi);
1234 
1235 	iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
1236 
1237 	val &= ~BIT(ERROR_INT);
1238 
1239 	if (val & BIT(GP_INT1)) {
1240 		val ^= BIT(GP_INT1);
1241 		gsi_isr_gp_int1(gsi);
1242 	}
1243 
1244 	if (val)
1245 		dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
1246 }
1247 
1248 /* I/O completion interrupt event */
1249 static void gsi_isr_ieob(struct gsi *gsi)
1250 {
1251 	u32 event_mask;
1252 
1253 	event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
1254 	gsi_irq_ieob_disable(gsi, event_mask);
1255 	iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
1256 
1257 	while (event_mask) {
1258 		u32 evt_ring_id = __ffs(event_mask);
1259 
1260 		event_mask ^= BIT(evt_ring_id);
1261 
1262 		napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
1263 	}
1264 }
1265 
1266 /* General event interrupts represent serious problems, so report them */
1267 static void gsi_isr_general(struct gsi *gsi)
1268 {
1269 	struct device *dev = gsi->dev;
1270 	u32 val;
1271 
1272 	val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
1273 	iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
1274 
1275 	dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
1276 }
1277 
1278 /**
1279  * gsi_isr() - Top level GSI interrupt service routine
1280  * @irq:	Interrupt number (ignored)
1281  * @dev_id:	GSI pointer supplied to request_irq()
1282  *
1283  * This is the main handler function registered for the GSI IRQ. Each type
1284  * of interrupt has a separate handler function that is called from here.
1285  */
1286 static irqreturn_t gsi_isr(int irq, void *dev_id)
1287 {
1288 	struct gsi *gsi = dev_id;
1289 	u32 intr_mask;
1290 	u32 cnt = 0;
1291 
1292 	/* enum gsi_irq_type_id defines GSI interrupt types */
1293 	while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
1294 		/* intr_mask contains bitmask of pending GSI interrupts */
1295 		do {
1296 			u32 gsi_intr = BIT(__ffs(intr_mask));
1297 
1298 			intr_mask ^= gsi_intr;
1299 
1300 			switch (gsi_intr) {
1301 			case BIT(GSI_CH_CTRL):
1302 				gsi_isr_chan_ctrl(gsi);
1303 				break;
1304 			case BIT(GSI_EV_CTRL):
1305 				gsi_isr_evt_ctrl(gsi);
1306 				break;
1307 			case BIT(GSI_GLOB_EE):
1308 				gsi_isr_glob_ee(gsi);
1309 				break;
1310 			case BIT(GSI_IEOB):
1311 				gsi_isr_ieob(gsi);
1312 				break;
1313 			case BIT(GSI_GENERAL):
1314 				gsi_isr_general(gsi);
1315 				break;
1316 			default:
1317 				dev_err(gsi->dev,
1318 					"unrecognized interrupt type 0x%08x\n",
1319 					gsi_intr);
1320 				break;
1321 			}
1322 		} while (intr_mask);
1323 
1324 		if (++cnt > GSI_ISR_MAX_ITER) {
1325 			dev_err(gsi->dev, "interrupt flood\n");
1326 			break;
1327 		}
1328 	}
1329 
1330 	return IRQ_HANDLED;
1331 }
1332 
1333 static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
1334 {
1335 	struct device *dev = &pdev->dev;
1336 	unsigned int irq;
1337 	int ret;
1338 
1339 	ret = platform_get_irq_byname(pdev, "gsi");
1340 	if (ret <= 0)
1341 		return ret ? : -EINVAL;
1342 
1343 	irq = ret;
1344 
1345 	ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
1346 	if (ret) {
1347 		dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
1348 		return ret;
1349 	}
1350 	gsi->irq = irq;
1351 
1352 	return 0;
1353 }
1354 
1355 static void gsi_irq_exit(struct gsi *gsi)
1356 {
1357 	free_irq(gsi->irq, gsi);
1358 }
1359 
1360 /* Return the transaction associated with a transfer completion event */
1361 static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
1362 					 struct gsi_event *event)
1363 {
1364 	u32 tre_offset;
1365 	u32 tre_index;
1366 
1367 	/* Event xfer_ptr records the TRE it's associated with */
1368 	tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0);
1369 	tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
1370 
1371 	return gsi_channel_trans_mapped(channel, tre_index);
1372 }
1373 
1374 /**
1375  * gsi_evt_ring_rx_update() - Record lengths of received data
1376  * @evt_ring:	Event ring associated with channel that received packets
1377  * @index:	Event index in ring reported by hardware
1378  *
1379  * Events for RX channels contain the actual number of bytes received into
1380  * the buffer.  Every event has a transaction associated with it, and here
1381  * we update transactions to record their actual received lengths.
1382  *
1383  * This function is called whenever we learn that the GSI hardware has filled
1384  * new events since the last time we checked.  The ring's index field tells
1385  * the first entry in need of processing.  The index provided is the
1386  * first *unfilled* event in the ring (following the last filled one).
1387  *
1388  * Events are sequential within the event ring, and transactions are
1389  * sequential within the transaction pool.
1390  *
1391  * Note that @index always refers to an element *within* the event ring.
1392  */
1393 static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
1394 {
1395 	struct gsi_channel *channel = evt_ring->channel;
1396 	struct gsi_ring *ring = &evt_ring->ring;
1397 	struct gsi_trans_info *trans_info;
1398 	struct gsi_event *event_done;
1399 	struct gsi_event *event;
1400 	struct gsi_trans *trans;
1401 	u32 byte_count = 0;
1402 	u32 old_index;
1403 	u32 event_avail;
1404 
1405 	trans_info = &channel->trans_info;
1406 
1407 	/* We'll start with the oldest un-processed event.  RX channels
1408 	 * replenish receive buffers in single-TRE transactions, so we
1409 	 * can just map that event to its transaction.  Transactions
1410 	 * associated with completion events are consecutive.
1411 	 */
1412 	old_index = ring->index;
1413 	event = gsi_ring_virt(ring, old_index);
1414 	trans = gsi_event_trans(channel, event);
1415 
1416 	/* Compute the number of events to process before we wrap,
1417 	 * and determine when we'll be done processing events.
1418 	 */
1419 	event_avail = ring->count - old_index % ring->count;
1420 	event_done = gsi_ring_virt(ring, index);
1421 	do {
1422 		trans->len = __le16_to_cpu(event->len);
1423 		byte_count += trans->len;
1424 
1425 		/* Move on to the next event and transaction */
1426 		if (--event_avail)
1427 			event++;
1428 		else
1429 			event = gsi_ring_virt(ring, 0);
1430 		trans = gsi_trans_pool_next(&trans_info->pool, trans);
1431 	} while (event != event_done);
1432 
1433 	/* We record RX bytes when they are received */
1434 	channel->byte_count += byte_count;
1435 	channel->trans_count++;
1436 }
1437 
1438 /* Initialize a ring, including allocating DMA memory for its entries */
1439 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1440 {
1441 	size_t size = count * GSI_RING_ELEMENT_SIZE;
1442 	struct device *dev = gsi->dev;
1443 	dma_addr_t addr;
1444 
1445 	/* Hardware requires a 2^n ring size, with alignment equal to size */
1446 	ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1447 	if (ring->virt && addr % size) {
1448 		dma_free_coherent(dev, size, ring->virt, addr);
1449 		dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
1450 			size);
1451 		return -EINVAL;	/* Not a good error value, but distinct */
1452 	} else if (!ring->virt) {
1453 		return -ENOMEM;
1454 	}
1455 	ring->addr = addr;
1456 	ring->count = count;
1457 
1458 	return 0;
1459 }
1460 
1461 /* Free a previously-allocated ring */
1462 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
1463 {
1464 	size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
1465 
1466 	dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
1467 }
1468 
1469 /* Allocate an available event ring id */
1470 static int gsi_evt_ring_id_alloc(struct gsi *gsi)
1471 {
1472 	u32 evt_ring_id;
1473 
1474 	if (gsi->event_bitmap == ~0U) {
1475 		dev_err(gsi->dev, "event rings exhausted\n");
1476 		return -ENOSPC;
1477 	}
1478 
1479 	evt_ring_id = ffz(gsi->event_bitmap);
1480 	gsi->event_bitmap |= BIT(evt_ring_id);
1481 
1482 	return (int)evt_ring_id;
1483 }
1484 
1485 /* Free a previously-allocated event ring id */
1486 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
1487 {
1488 	gsi->event_bitmap &= ~BIT(evt_ring_id);
1489 }
1490 
1491 /* Ring a channel doorbell, reporting the first un-filled entry */
1492 void gsi_channel_doorbell(struct gsi_channel *channel)
1493 {
1494 	struct gsi_ring *tre_ring = &channel->tre_ring;
1495 	u32 channel_id = gsi_channel_id(channel);
1496 	struct gsi *gsi = channel->gsi;
1497 	u32 val;
1498 
1499 	/* Note: index *must* be used modulo the ring count here */
1500 	val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
1501 	iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
1502 }
1503 
1504 /* Consult hardware, move any newly completed transactions to completed list */
1505 static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
1506 {
1507 	u32 evt_ring_id = channel->evt_ring_id;
1508 	struct gsi *gsi = channel->gsi;
1509 	struct gsi_evt_ring *evt_ring;
1510 	struct gsi_trans *trans;
1511 	struct gsi_ring *ring;
1512 	u32 offset;
1513 	u32 index;
1514 
1515 	evt_ring = &gsi->evt_ring[evt_ring_id];
1516 	ring = &evt_ring->ring;
1517 
1518 	/* See if there's anything new to process; if not, we're done.  Note
1519 	 * that index always refers to an entry *within* the event ring.
1520 	 */
1521 	offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
1522 	index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
1523 	if (index == ring->index % ring->count)
1524 		return NULL;
1525 
1526 	/* Get the transaction for the latest completed event.  Take a
1527 	 * reference to keep it from completing before we give the events
1528 	 * for this and previous transactions back to the hardware.
1529 	 */
1530 	trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
1531 	refcount_inc(&trans->refcount);
1532 
1533 	/* For RX channels, update each completed transaction with the number
1534 	 * of bytes that were actually received.  For TX channels, report
1535 	 * the number of transactions and bytes this completion represents
1536 	 * up the network stack.
1537 	 */
1538 	if (channel->toward_ipa)
1539 		gsi_channel_tx_update(channel, trans);
1540 	else
1541 		gsi_evt_ring_rx_update(evt_ring, index);
1542 
1543 	gsi_trans_move_complete(trans);
1544 
1545 	/* Tell the hardware we've handled these events */
1546 	gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
1547 
1548 	gsi_trans_free(trans);
1549 
1550 	return gsi_channel_trans_complete(channel);
1551 }
1552 
1553 /**
1554  * gsi_channel_poll_one() - Return a single completed transaction on a channel
1555  * @channel:	Channel to be polled
1556  *
1557  * Return:	Transaction pointer, or null if none are available
1558  *
1559  * This function returns the first entry on a channel's completed transaction
1560  * list.  If that list is empty, the hardware is consulted to determine
1561  * whether any new transactions have completed.  If so, they're moved to the
1562  * completed list and the new first entry is returned.  If there are no more
1563  * completed transactions, a null pointer is returned.
1564  */
1565 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
1566 {
1567 	struct gsi_trans *trans;
1568 
1569 	/* Get the first transaction from the completed list */
1570 	trans = gsi_channel_trans_complete(channel);
1571 	if (!trans)	/* List is empty; see if there's more to do */
1572 		trans = gsi_channel_update(channel);
1573 
1574 	if (trans)
1575 		gsi_trans_move_polled(trans);
1576 
1577 	return trans;
1578 }
1579 
1580 /**
1581  * gsi_channel_poll() - NAPI poll function for a channel
1582  * @napi:	NAPI structure for the channel
1583  * @budget:	Budget supplied by NAPI core
1584  *
1585  * Return:	Number of items polled (<= budget)
1586  *
1587  * Single transactions completed by hardware are polled until either
1588  * the budget is exhausted, or there are no more.  Each transaction
1589  * polled is passed to gsi_trans_complete(), to perform remaining
1590  * completion processing and retire/free the transaction.
1591  */
1592 static int gsi_channel_poll(struct napi_struct *napi, int budget)
1593 {
1594 	struct gsi_channel *channel;
1595 	int count;
1596 
1597 	channel = container_of(napi, struct gsi_channel, napi);
1598 	for (count = 0; count < budget; count++) {
1599 		struct gsi_trans *trans;
1600 
1601 		trans = gsi_channel_poll_one(channel);
1602 		if (!trans)
1603 			break;
1604 		gsi_trans_complete(trans);
1605 	}
1606 
1607 	if (count < budget && napi_complete(napi))
1608 		gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id);
1609 
1610 	return count;
1611 }
1612 
1613 /* The event bitmap represents which event ids are available for allocation.
1614  * Set bits are not available, clear bits can be used.  This function
1615  * initializes the map so all events supported by the hardware are available,
1616  * then precludes any reserved events from being allocated.
1617  */
1618 static u32 gsi_event_bitmap_init(u32 evt_ring_max)
1619 {
1620 	u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
1621 
1622 	event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
1623 
1624 	return event_bitmap;
1625 }
1626 
1627 /* Setup function for event rings */
1628 static void gsi_evt_ring_setup(struct gsi *gsi)
1629 {
1630 	/* Nothing to do */
1631 }
1632 
1633 /* Inverse of gsi_evt_ring_setup() */
1634 static void gsi_evt_ring_teardown(struct gsi *gsi)
1635 {
1636 	/* Nothing to do */
1637 }
1638 
1639 /* Setup function for a single channel */
1640 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
1641 {
1642 	struct gsi_channel *channel = &gsi->channel[channel_id];
1643 	u32 evt_ring_id = channel->evt_ring_id;
1644 	int ret;
1645 
1646 	if (!gsi_channel_initialized(channel))
1647 		return 0;
1648 
1649 	ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
1650 	if (ret)
1651 		return ret;
1652 
1653 	gsi_evt_ring_program(gsi, evt_ring_id);
1654 
1655 	ret = gsi_channel_alloc_command(gsi, channel_id);
1656 	if (ret)
1657 		goto err_evt_ring_de_alloc;
1658 
1659 	gsi_channel_program(channel, true);
1660 
1661 	if (channel->toward_ipa)
1662 		netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
1663 				  gsi_channel_poll, NAPI_POLL_WEIGHT);
1664 	else
1665 		netif_napi_add(&gsi->dummy_dev, &channel->napi,
1666 			       gsi_channel_poll, NAPI_POLL_WEIGHT);
1667 
1668 	return 0;
1669 
1670 err_evt_ring_de_alloc:
1671 	/* We've done nothing with the event ring yet so don't reset */
1672 	gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1673 
1674 	return ret;
1675 }
1676 
1677 /* Inverse of gsi_channel_setup_one() */
1678 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
1679 {
1680 	struct gsi_channel *channel = &gsi->channel[channel_id];
1681 	u32 evt_ring_id = channel->evt_ring_id;
1682 
1683 	if (!gsi_channel_initialized(channel))
1684 		return;
1685 
1686 	netif_napi_del(&channel->napi);
1687 
1688 	gsi_channel_deprogram(channel);
1689 	gsi_channel_de_alloc_command(gsi, channel_id);
1690 	gsi_evt_ring_reset_command(gsi, evt_ring_id);
1691 	gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1692 }
1693 
1694 static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
1695 			       enum gsi_generic_cmd_opcode opcode)
1696 {
1697 	struct completion *completion = &gsi->completion;
1698 	bool timeout;
1699 	u32 val;
1700 
1701 	/* The error global interrupt type is always enabled (until we
1702 	 * teardown), so we won't change that.  A generic EE command
1703 	 * completes with a GSI global interrupt of type GP_INT1.  We
1704 	 * only perform one generic command at a time (to allocate or
1705 	 * halt a modem channel) and only from this function.  So we
1706 	 * enable the GP_INT1 IRQ type here while we're expecting it.
1707 	 */
1708 	val = BIT(ERROR_INT) | BIT(GP_INT1);
1709 	iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1710 
1711 	/* First zero the result code field */
1712 	val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1713 	val &= ~GENERIC_EE_RESULT_FMASK;
1714 	iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1715 
1716 	/* Now issue the command */
1717 	val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
1718 	val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
1719 	val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
1720 
1721 	timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
1722 
1723 	/* Disable the GP_INT1 IRQ type again */
1724 	iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1725 
1726 	if (!timeout)
1727 		return gsi->result;
1728 
1729 	dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
1730 		opcode, channel_id);
1731 
1732 	return -ETIMEDOUT;
1733 }
1734 
1735 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
1736 {
1737 	return gsi_generic_command(gsi, channel_id,
1738 				   GSI_GENERIC_ALLOCATE_CHANNEL);
1739 }
1740 
1741 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
1742 {
1743 	u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES;
1744 	int ret;
1745 
1746 	do
1747 		ret = gsi_generic_command(gsi, channel_id,
1748 					  GSI_GENERIC_HALT_CHANNEL);
1749 	while (ret == -EAGAIN && retries--);
1750 
1751 	if (ret)
1752 		dev_err(gsi->dev, "error %d halting modem channel %u\n",
1753 			ret, channel_id);
1754 }
1755 
1756 /* Setup function for channels */
1757 static int gsi_channel_setup(struct gsi *gsi)
1758 {
1759 	u32 channel_id = 0;
1760 	u32 mask;
1761 	int ret;
1762 
1763 	gsi_evt_ring_setup(gsi);
1764 	gsi_irq_enable(gsi);
1765 
1766 	mutex_lock(&gsi->mutex);
1767 
1768 	do {
1769 		ret = gsi_channel_setup_one(gsi, channel_id);
1770 		if (ret)
1771 			goto err_unwind;
1772 	} while (++channel_id < gsi->channel_count);
1773 
1774 	/* Make sure no channels were defined that hardware does not support */
1775 	while (channel_id < GSI_CHANNEL_COUNT_MAX) {
1776 		struct gsi_channel *channel = &gsi->channel[channel_id++];
1777 
1778 		if (!gsi_channel_initialized(channel))
1779 			continue;
1780 
1781 		ret = -EINVAL;
1782 		dev_err(gsi->dev, "channel %u not supported by hardware\n",
1783 			channel_id - 1);
1784 		channel_id = gsi->channel_count;
1785 		goto err_unwind;
1786 	}
1787 
1788 	/* Allocate modem channels if necessary */
1789 	mask = gsi->modem_channel_bitmap;
1790 	while (mask) {
1791 		u32 modem_channel_id = __ffs(mask);
1792 
1793 		ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
1794 		if (ret)
1795 			goto err_unwind_modem;
1796 
1797 		/* Clear bit from mask only after success (for unwind) */
1798 		mask ^= BIT(modem_channel_id);
1799 	}
1800 
1801 	mutex_unlock(&gsi->mutex);
1802 
1803 	return 0;
1804 
1805 err_unwind_modem:
1806 	/* Compute which modem channels need to be deallocated */
1807 	mask ^= gsi->modem_channel_bitmap;
1808 	while (mask) {
1809 		channel_id = __fls(mask);
1810 
1811 		mask ^= BIT(channel_id);
1812 
1813 		gsi_modem_channel_halt(gsi, channel_id);
1814 	}
1815 
1816 err_unwind:
1817 	while (channel_id--)
1818 		gsi_channel_teardown_one(gsi, channel_id);
1819 
1820 	mutex_unlock(&gsi->mutex);
1821 
1822 	gsi_irq_disable(gsi);
1823 	gsi_evt_ring_teardown(gsi);
1824 
1825 	return ret;
1826 }
1827 
1828 /* Inverse of gsi_channel_setup() */
1829 static void gsi_channel_teardown(struct gsi *gsi)
1830 {
1831 	u32 mask = gsi->modem_channel_bitmap;
1832 	u32 channel_id;
1833 
1834 	mutex_lock(&gsi->mutex);
1835 
1836 	while (mask) {
1837 		channel_id = __fls(mask);
1838 
1839 		mask ^= BIT(channel_id);
1840 
1841 		gsi_modem_channel_halt(gsi, channel_id);
1842 	}
1843 
1844 	channel_id = gsi->channel_count - 1;
1845 	do
1846 		gsi_channel_teardown_one(gsi, channel_id);
1847 	while (channel_id--);
1848 
1849 	mutex_unlock(&gsi->mutex);
1850 
1851 	gsi_irq_disable(gsi);
1852 	gsi_evt_ring_teardown(gsi);
1853 }
1854 
1855 /* Setup function for GSI.  GSI firmware must be loaded and initialized */
1856 int gsi_setup(struct gsi *gsi)
1857 {
1858 	struct device *dev = gsi->dev;
1859 	u32 val;
1860 	int ret;
1861 
1862 	/* Here is where we first touch the GSI hardware */
1863 	val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
1864 	if (!(val & ENABLED_FMASK)) {
1865 		dev_err(dev, "GSI has not been enabled\n");
1866 		return -EIO;
1867 	}
1868 
1869 	gsi_irq_setup(gsi);
1870 
1871 	val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
1872 
1873 	gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
1874 	if (!gsi->channel_count) {
1875 		dev_err(dev, "GSI reports zero channels supported\n");
1876 		return -EINVAL;
1877 	}
1878 	if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
1879 		dev_warn(dev,
1880 			 "limiting to %u channels; hardware supports %u\n",
1881 			 GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
1882 		gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
1883 	}
1884 
1885 	gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
1886 	if (!gsi->evt_ring_count) {
1887 		dev_err(dev, "GSI reports zero event rings supported\n");
1888 		return -EINVAL;
1889 	}
1890 	if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
1891 		dev_warn(dev,
1892 			 "limiting to %u event rings; hardware supports %u\n",
1893 			 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
1894 		gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
1895 	}
1896 
1897 	/* Initialize the error log */
1898 	iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1899 
1900 	/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
1901 	iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
1902 
1903 	ret = gsi_channel_setup(gsi);
1904 	if (ret)
1905 		gsi_irq_teardown(gsi);
1906 
1907 	return ret;
1908 }
1909 
1910 /* Inverse of gsi_setup() */
1911 void gsi_teardown(struct gsi *gsi)
1912 {
1913 	gsi_channel_teardown(gsi);
1914 	gsi_irq_teardown(gsi);
1915 }
1916 
1917 /* Initialize a channel's event ring */
1918 static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
1919 {
1920 	struct gsi *gsi = channel->gsi;
1921 	struct gsi_evt_ring *evt_ring;
1922 	int ret;
1923 
1924 	ret = gsi_evt_ring_id_alloc(gsi);
1925 	if (ret < 0)
1926 		return ret;
1927 	channel->evt_ring_id = ret;
1928 
1929 	evt_ring = &gsi->evt_ring[channel->evt_ring_id];
1930 	evt_ring->channel = channel;
1931 
1932 	ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
1933 	if (!ret)
1934 		return 0;	/* Success! */
1935 
1936 	dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
1937 		ret, gsi_channel_id(channel));
1938 
1939 	gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
1940 
1941 	return ret;
1942 }
1943 
1944 /* Inverse of gsi_channel_evt_ring_init() */
1945 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
1946 {
1947 	u32 evt_ring_id = channel->evt_ring_id;
1948 	struct gsi *gsi = channel->gsi;
1949 	struct gsi_evt_ring *evt_ring;
1950 
1951 	evt_ring = &gsi->evt_ring[evt_ring_id];
1952 	gsi_ring_free(gsi, &evt_ring->ring);
1953 	gsi_evt_ring_id_free(gsi, evt_ring_id);
1954 }
1955 
1956 /* Init function for event rings */
1957 static void gsi_evt_ring_init(struct gsi *gsi)
1958 {
1959 	u32 evt_ring_id = 0;
1960 
1961 	gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
1962 	gsi->ieob_enabled_bitmap = 0;
1963 	do
1964 		init_completion(&gsi->evt_ring[evt_ring_id].completion);
1965 	while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
1966 }
1967 
1968 /* Inverse of gsi_evt_ring_init() */
1969 static void gsi_evt_ring_exit(struct gsi *gsi)
1970 {
1971 	/* Nothing to do */
1972 }
1973 
1974 static bool gsi_channel_data_valid(struct gsi *gsi,
1975 				   const struct ipa_gsi_endpoint_data *data)
1976 {
1977 #ifdef IPA_VALIDATION
1978 	u32 channel_id = data->channel_id;
1979 	struct device *dev = gsi->dev;
1980 
1981 	/* Make sure channel ids are in the range driver supports */
1982 	if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
1983 		dev_err(dev, "bad channel id %u; must be less than %u\n",
1984 			channel_id, GSI_CHANNEL_COUNT_MAX);
1985 		return false;
1986 	}
1987 
1988 	if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
1989 		dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
1990 		return false;
1991 	}
1992 
1993 	if (!data->channel.tlv_count ||
1994 	    data->channel.tlv_count > GSI_TLV_MAX) {
1995 		dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
1996 			channel_id, data->channel.tlv_count, GSI_TLV_MAX);
1997 		return false;
1998 	}
1999 
2000 	/* We have to allow at least one maximally-sized transaction to
2001 	 * be outstanding (which would use tlv_count TREs).  Given how
2002 	 * gsi_channel_tre_max() is computed, tre_count has to be almost
2003 	 * twice the TLV FIFO size to satisfy this requirement.
2004 	 */
2005 	if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
2006 		dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
2007 			channel_id, data->channel.tlv_count,
2008 			data->channel.tre_count);
2009 		return false;
2010 	}
2011 
2012 	if (!is_power_of_2(data->channel.tre_count)) {
2013 		dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
2014 			channel_id, data->channel.tre_count);
2015 		return false;
2016 	}
2017 
2018 	if (!is_power_of_2(data->channel.event_count)) {
2019 		dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
2020 			channel_id, data->channel.event_count);
2021 		return false;
2022 	}
2023 #endif /* IPA_VALIDATION */
2024 
2025 	return true;
2026 }
2027 
2028 /* Init function for a single channel */
2029 static int gsi_channel_init_one(struct gsi *gsi,
2030 				const struct ipa_gsi_endpoint_data *data,
2031 				bool command)
2032 {
2033 	struct gsi_channel *channel;
2034 	u32 tre_count;
2035 	int ret;
2036 
2037 	if (!gsi_channel_data_valid(gsi, data))
2038 		return -EINVAL;
2039 
2040 	/* Worst case we need an event for every outstanding TRE */
2041 	if (data->channel.tre_count > data->channel.event_count) {
2042 		tre_count = data->channel.event_count;
2043 		dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
2044 			 data->channel_id, tre_count);
2045 	} else {
2046 		tre_count = data->channel.tre_count;
2047 	}
2048 
2049 	channel = &gsi->channel[data->channel_id];
2050 	memset(channel, 0, sizeof(*channel));
2051 
2052 	channel->gsi = gsi;
2053 	channel->toward_ipa = data->toward_ipa;
2054 	channel->command = command;
2055 	channel->tlv_count = data->channel.tlv_count;
2056 	channel->tre_count = tre_count;
2057 	channel->event_count = data->channel.event_count;
2058 	init_completion(&channel->completion);
2059 
2060 	ret = gsi_channel_evt_ring_init(channel);
2061 	if (ret)
2062 		goto err_clear_gsi;
2063 
2064 	ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
2065 	if (ret) {
2066 		dev_err(gsi->dev, "error %d allocating channel %u ring\n",
2067 			ret, data->channel_id);
2068 		goto err_channel_evt_ring_exit;
2069 	}
2070 
2071 	ret = gsi_channel_trans_init(gsi, data->channel_id);
2072 	if (ret)
2073 		goto err_ring_free;
2074 
2075 	if (command) {
2076 		u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
2077 
2078 		ret = ipa_cmd_pool_init(channel, tre_max);
2079 	}
2080 	if (!ret)
2081 		return 0;	/* Success! */
2082 
2083 	gsi_channel_trans_exit(channel);
2084 err_ring_free:
2085 	gsi_ring_free(gsi, &channel->tre_ring);
2086 err_channel_evt_ring_exit:
2087 	gsi_channel_evt_ring_exit(channel);
2088 err_clear_gsi:
2089 	channel->gsi = NULL;	/* Mark it not (fully) initialized */
2090 
2091 	return ret;
2092 }
2093 
2094 /* Inverse of gsi_channel_init_one() */
2095 static void gsi_channel_exit_one(struct gsi_channel *channel)
2096 {
2097 	if (!gsi_channel_initialized(channel))
2098 		return;
2099 
2100 	if (channel->command)
2101 		ipa_cmd_pool_exit(channel);
2102 	gsi_channel_trans_exit(channel);
2103 	gsi_ring_free(channel->gsi, &channel->tre_ring);
2104 	gsi_channel_evt_ring_exit(channel);
2105 }
2106 
2107 /* Init function for channels */
2108 static int gsi_channel_init(struct gsi *gsi, u32 count,
2109 			    const struct ipa_gsi_endpoint_data *data)
2110 {
2111 	bool modem_alloc;
2112 	int ret = 0;
2113 	u32 i;
2114 
2115 	/* IPA v4.2 requires the AP to allocate channels for the modem */
2116 	modem_alloc = gsi->version == IPA_VERSION_4_2;
2117 
2118 	gsi_evt_ring_init(gsi);
2119 
2120 	/* The endpoint data array is indexed by endpoint name */
2121 	for (i = 0; i < count; i++) {
2122 		bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
2123 
2124 		if (ipa_gsi_endpoint_data_empty(&data[i]))
2125 			continue;	/* Skip over empty slots */
2126 
2127 		/* Mark modem channels to be allocated (hardware workaround) */
2128 		if (data[i].ee_id == GSI_EE_MODEM) {
2129 			if (modem_alloc)
2130 				gsi->modem_channel_bitmap |=
2131 						BIT(data[i].channel_id);
2132 			continue;
2133 		}
2134 
2135 		ret = gsi_channel_init_one(gsi, &data[i], command);
2136 		if (ret)
2137 			goto err_unwind;
2138 	}
2139 
2140 	return ret;
2141 
2142 err_unwind:
2143 	while (i--) {
2144 		if (ipa_gsi_endpoint_data_empty(&data[i]))
2145 			continue;
2146 		if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
2147 			gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
2148 			continue;
2149 		}
2150 		gsi_channel_exit_one(&gsi->channel[data->channel_id]);
2151 	}
2152 	gsi_evt_ring_exit(gsi);
2153 
2154 	return ret;
2155 }
2156 
2157 /* Inverse of gsi_channel_init() */
2158 static void gsi_channel_exit(struct gsi *gsi)
2159 {
2160 	u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
2161 
2162 	do
2163 		gsi_channel_exit_one(&gsi->channel[channel_id]);
2164 	while (channel_id--);
2165 	gsi->modem_channel_bitmap = 0;
2166 
2167 	gsi_evt_ring_exit(gsi);
2168 }
2169 
2170 /* Init function for GSI.  GSI hardware does not need to be "ready" */
2171 int gsi_init(struct gsi *gsi, struct platform_device *pdev,
2172 	     enum ipa_version version, u32 count,
2173 	     const struct ipa_gsi_endpoint_data *data)
2174 {
2175 	struct device *dev = &pdev->dev;
2176 	struct resource *res;
2177 	resource_size_t size;
2178 	u32 adjust;
2179 	int ret;
2180 
2181 	gsi_validate_build();
2182 
2183 	gsi->dev = dev;
2184 	gsi->version = version;
2185 
2186 	/* GSI uses NAPI on all channels.  Create a dummy network device
2187 	 * for the channel NAPI contexts to be associated with.
2188 	 */
2189 	init_dummy_netdev(&gsi->dummy_dev);
2190 
2191 	/* Get GSI memory range and map it */
2192 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
2193 	if (!res) {
2194 		dev_err(dev, "DT error getting \"gsi\" memory property\n");
2195 		return -ENODEV;
2196 	}
2197 
2198 	size = resource_size(res);
2199 	if (res->start > U32_MAX || size > U32_MAX - res->start) {
2200 		dev_err(dev, "DT memory resource \"gsi\" out of range\n");
2201 		return -EINVAL;
2202 	}
2203 
2204 	/* Make sure we can make our pointer adjustment if necessary */
2205 	adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
2206 	if (res->start < adjust) {
2207 		dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n",
2208 			adjust);
2209 		return -EINVAL;
2210 	}
2211 
2212 	gsi->virt_raw = ioremap(res->start, size);
2213 	if (!gsi->virt_raw) {
2214 		dev_err(dev, "unable to remap \"gsi\" memory\n");
2215 		return -ENOMEM;
2216 	}
2217 	/* Most registers are accessed using an adjusted register range */
2218 	gsi->virt = gsi->virt_raw - adjust;
2219 
2220 	init_completion(&gsi->completion);
2221 
2222 	ret = gsi_irq_init(gsi, pdev);
2223 	if (ret)
2224 		goto err_iounmap;
2225 
2226 	ret = gsi_channel_init(gsi, count, data);
2227 	if (ret)
2228 		goto err_irq_exit;
2229 
2230 	mutex_init(&gsi->mutex);
2231 
2232 	return 0;
2233 
2234 err_irq_exit:
2235 	gsi_irq_exit(gsi);
2236 err_iounmap:
2237 	iounmap(gsi->virt_raw);
2238 
2239 	return ret;
2240 }
2241 
2242 /* Inverse of gsi_init() */
2243 void gsi_exit(struct gsi *gsi)
2244 {
2245 	mutex_destroy(&gsi->mutex);
2246 	gsi_channel_exit(gsi);
2247 	gsi_irq_exit(gsi);
2248 	iounmap(gsi->virt_raw);
2249 }
2250 
2251 /* The maximum number of outstanding TREs on a channel.  This limits
2252  * a channel's maximum number of transactions outstanding (worst case
2253  * is one TRE per transaction).
2254  *
2255  * The absolute limit is the number of TREs in the channel's TRE ring,
2256  * and in theory we should be able use all of them.  But in practice,
2257  * doing that led to the hardware reporting exhaustion of event ring
2258  * slots for writing completion information.  So the hardware limit
2259  * would be (tre_count - 1).
2260  *
2261  * We reduce it a bit further though.  Transaction resource pools are
2262  * sized to be a little larger than this maximum, to allow resource
2263  * allocations to always be contiguous.  The number of entries in a
2264  * TRE ring buffer is a power of 2, and the extra resources in a pool
2265  * tends to nearly double the memory allocated for it.  Reducing the
2266  * maximum number of outstanding TREs allows the number of entries in
2267  * a pool to avoid crossing that power-of-2 boundary, and this can
2268  * substantially reduce pool memory requirements.  The number we
2269  * reduce it by matches the number added in gsi_trans_pool_init().
2270  */
2271 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
2272 {
2273 	struct gsi_channel *channel = &gsi->channel[channel_id];
2274 
2275 	/* Hardware limit is channel->tre_count - 1 */
2276 	return channel->tre_count - (channel->tlv_count - 1);
2277 }
2278 
2279 /* Returns the maximum number of TREs in a single transaction for a channel */
2280 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
2281 {
2282 	struct gsi_channel *channel = &gsi->channel[channel_id];
2283 
2284 	return channel->tlv_count;
2285 }
2286