xref: /linux/drivers/soc/fsl/qe/qmc.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * QMC driver
4  *
5  * Copyright 2022 CS GROUP France
6  *
7  * Author: Herve Codina <herve.codina@bootlin.com>
8  */
9 
10 #include <soc/fsl/qe/qmc.h>
11 #include <linux/bitfield.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/firmware.h>
14 #include <linux/hdlc.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_platform.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22 #include <soc/fsl/cpm.h>
23 #include <soc/fsl/qe/ucc_slow.h>
24 #include <soc/fsl/qe/qe.h>
25 #include <sysdev/fsl_soc.h>
26 #include "tsa.h"
27 
28 /* SCC general mode register low (32 bits) (GUMR_L in QE) */
29 #define SCC_GSMRL	0x00
30 #define SCC_GSMRL_ENR		BIT(5)
31 #define SCC_GSMRL_ENT		BIT(4)
32 #define SCC_GSMRL_MODE_MASK	GENMASK(3, 0)
33 #define SCC_CPM1_GSMRL_MODE_QMC	FIELD_PREP_CONST(SCC_GSMRL_MODE_MASK, 0x0A)
34 #define SCC_QE_GSMRL_MODE_QMC	FIELD_PREP_CONST(SCC_GSMRL_MODE_MASK, 0x02)
35 
36 /* SCC general mode register high (32 bits) (identical to GUMR_H in QE) */
37 #define SCC_GSMRH	0x04
38 #define   SCC_GSMRH_CTSS	BIT(7)
39 #define   SCC_GSMRH_CDS		BIT(8)
40 #define   SCC_GSMRH_CTSP	BIT(9)
41 #define   SCC_GSMRH_CDP		BIT(10)
42 #define   SCC_GSMRH_TTX		BIT(11)
43 #define   SCC_GSMRH_TRX		BIT(12)
44 
45 /* SCC event register (16 bits) (identical to UCCE in QE) */
46 #define SCC_SCCE	0x10
47 #define   SCC_SCCE_IQOV		BIT(3)
48 #define   SCC_SCCE_GINT		BIT(2)
49 #define   SCC_SCCE_GUN		BIT(1)
50 #define   SCC_SCCE_GOV		BIT(0)
51 
52 /* SCC mask register (16 bits) */
53 #define SCC_SCCM	0x14
54 
55 /* UCC Extended Mode Register (8 bits, QE only) */
56 #define SCC_QE_UCC_GUEMR	0x90
57 
58 /* Multichannel base pointer (32 bits) */
59 #define QMC_GBL_MCBASE		0x00
60 /* Multichannel controller state (16 bits) */
61 #define QMC_GBL_QMCSTATE	0x04
62 /* Maximum receive buffer length (16 bits) */
63 #define QMC_GBL_MRBLR		0x06
64 /* Tx time-slot assignment table pointer (16 bits) */
65 #define QMC_GBL_TX_S_PTR	0x08
66 /* Rx pointer (16 bits) */
67 #define QMC_GBL_RXPTR		0x0A
68 /* Global receive frame threshold (16 bits) */
69 #define QMC_GBL_GRFTHR		0x0C
70 /* Global receive frame count (16 bits) */
71 #define QMC_GBL_GRFCNT		0x0E
72 /* Multichannel interrupt base address (32 bits) */
73 #define QMC_GBL_INTBASE		0x10
74 /* Multichannel interrupt pointer (32 bits) */
75 #define QMC_GBL_INTPTR		0x14
76 /* Rx time-slot assignment table pointer (16 bits) */
77 #define QMC_GBL_RX_S_PTR	0x18
78 /* Tx pointer (16 bits) */
79 #define QMC_GBL_TXPTR		0x1A
80 /* CRC constant (32 bits) */
81 #define QMC_GBL_C_MASK32	0x1C
82 /* Time slot assignment table Rx (32 x 16 bits) */
83 #define QMC_GBL_TSATRX		0x20
84 /* Time slot assignment table Tx (32 x 16 bits) */
85 #define QMC_GBL_TSATTX		0x60
86 /* CRC constant (16 bits) */
87 #define QMC_GBL_C_MASK16	0xA0
88 /* Rx framer base pointer (16 bits, QE only) */
89 #define QMC_QE_GBL_RX_FRM_BASE	0xAC
90 /* Tx framer base pointer (16 bits, QE only) */
91 #define QMC_QE_GBL_TX_FRM_BASE	0xAE
92 /* A reserved area (0xB0 -> 0xC3) that must be initialized to 0 (QE only) */
93 #define QMC_QE_GBL_RSV_B0_START	0xB0
94 #define QMC_QE_GBL_RSV_B0_SIZE	0x14
95 /* QMC Global Channel specific base (32 bits, QE only) */
96 #define QMC_QE_GBL_GCSBASE	0xC4
97 
98 /* TSA entry (16bit entry in TSATRX and TSATTX) */
99 #define QMC_TSA_VALID		BIT(15)
100 #define QMC_TSA_WRAP		BIT(14)
101 #define QMC_TSA_MASK_MASKH	GENMASK(13, 12)
102 #define QMC_TSA_MASK_MASKL	GENMASK(5, 0)
103 #define QMC_TSA_MASK_8BIT	(FIELD_PREP_CONST(QMC_TSA_MASK_MASKH, 0x3) | \
104 				 FIELD_PREP_CONST(QMC_TSA_MASK_MASKL, 0x3F))
105 #define QMC_TSA_CHANNEL_MASK	GENMASK(11, 6)
106 #define QMC_TSA_CHANNEL(x)	FIELD_PREP(QMC_TSA_CHANNEL_MASK, x)
107 
108 /* Tx buffer descriptor base address (16 bits, offset from MCBASE) */
109 #define QMC_SPE_TBASE	0x00
110 
111 /* Channel mode register (16 bits) */
112 #define QMC_SPE_CHAMR	0x02
113 #define   QMC_SPE_CHAMR_MODE_MASK	GENMASK(15, 15)
114 #define   QMC_SPE_CHAMR_MODE_HDLC	FIELD_PREP_CONST(QMC_SPE_CHAMR_MODE_MASK, 1)
115 #define   QMC_SPE_CHAMR_MODE_TRANSP	(FIELD_PREP_CONST(QMC_SPE_CHAMR_MODE_MASK, 0) | BIT(13))
116 #define   QMC_SPE_CHAMR_ENT		BIT(12)
117 #define   QMC_SPE_CHAMR_POL		BIT(8)
118 #define   QMC_SPE_CHAMR_HDLC_IDLM	BIT(13)
119 #define   QMC_SPE_CHAMR_HDLC_CRC	BIT(7)
120 #define   QMC_SPE_CHAMR_HDLC_NOF_MASK	GENMASK(3, 0)
121 #define   QMC_SPE_CHAMR_HDLC_NOF(x)	FIELD_PREP(QMC_SPE_CHAMR_HDLC_NOF_MASK, x)
122 #define   QMC_SPE_CHAMR_TRANSP_RD	BIT(14)
123 #define   QMC_SPE_CHAMR_TRANSP_SYNC	BIT(10)
124 
125 /* Tx internal state (32 bits) */
126 #define QMC_SPE_TSTATE	0x04
127 /* Tx buffer descriptor pointer (16 bits) */
128 #define QMC_SPE_TBPTR	0x0C
129 /* Zero-insertion state (32 bits) */
130 #define QMC_SPE_ZISTATE	0x14
131 /* Channel’s interrupt mask flags (16 bits) */
132 #define QMC_SPE_INTMSK	0x1C
133 /* Rx buffer descriptor base address (16 bits, offset from MCBASE) */
134 #define QMC_SPE_RBASE	0x20
135 /* HDLC: Maximum frame length register (16 bits) */
136 #define QMC_SPE_MFLR	0x22
137 /* TRANSPARENT: Transparent maximum receive length (16 bits) */
138 #define QMC_SPE_TMRBLR	0x22
139 /* Rx internal state (32 bits) */
140 #define QMC_SPE_RSTATE	0x24
141 /* Rx buffer descriptor pointer (16 bits) */
142 #define QMC_SPE_RBPTR	0x2C
143 /* Packs 4 bytes to 1 long word before writing to buffer (32 bits) */
144 #define QMC_SPE_RPACK	0x30
145 /* Zero deletion state (32 bits) */
146 #define QMC_SPE_ZDSTATE	0x34
147 
148 /* Transparent synchronization (16 bits) */
149 #define QMC_SPE_TRNSYNC 0x3C
150 #define   QMC_SPE_TRNSYNC_RX_MASK	GENMASK(15, 8)
151 #define   QMC_SPE_TRNSYNC_RX(x)		FIELD_PREP(QMC_SPE_TRNSYNC_RX_MASK, x)
152 #define   QMC_SPE_TRNSYNC_TX_MASK	GENMASK(7, 0)
153 #define   QMC_SPE_TRNSYNC_TX(x)		FIELD_PREP(QMC_SPE_TRNSYNC_TX_MASK, x)
154 
155 /* Interrupt related registers bits */
156 #define QMC_INT_V		BIT(15)
157 #define QMC_INT_W		BIT(14)
158 #define QMC_INT_NID		BIT(13)
159 #define QMC_INT_IDL		BIT(12)
160 #define QMC_INT_CHANNEL_MASK	GENMASK(11, 6)
161 #define QMC_INT_GET_CHANNEL(x)	FIELD_GET(QMC_INT_CHANNEL_MASK, x)
162 #define QMC_INT_MRF		BIT(5)
163 #define QMC_INT_UN		BIT(4)
164 #define QMC_INT_RXF		BIT(3)
165 #define QMC_INT_BSY		BIT(2)
166 #define QMC_INT_TXB		BIT(1)
167 #define QMC_INT_RXB		BIT(0)
168 
169 /* BD related registers bits */
170 #define QMC_BD_RX_E	BIT(15)
171 #define QMC_BD_RX_W	BIT(13)
172 #define QMC_BD_RX_I	BIT(12)
173 #define QMC_BD_RX_L	BIT(11)
174 #define QMC_BD_RX_F	BIT(10)
175 #define QMC_BD_RX_CM	BIT(9)
176 #define QMC_BD_RX_UB	BIT(7)
177 #define QMC_BD_RX_LG	BIT(5)
178 #define QMC_BD_RX_NO	BIT(4)
179 #define QMC_BD_RX_AB	BIT(3)
180 #define QMC_BD_RX_CR	BIT(2)
181 
182 #define QMC_BD_TX_R		BIT(15)
183 #define QMC_BD_TX_W		BIT(13)
184 #define QMC_BD_TX_I		BIT(12)
185 #define QMC_BD_TX_L		BIT(11)
186 #define QMC_BD_TX_TC		BIT(10)
187 #define QMC_BD_TX_CM		BIT(9)
188 #define QMC_BD_TX_UB		BIT(7)
189 #define QMC_BD_TX_PAD_MASK	GENMASK(3, 0)
190 #define QMC_BD_TX_PAD(x)	FIELD_PREP(QMC_BD_TX_PAD_MASK, x)
191 
192 /* Numbers of BDs and interrupt items */
193 #define QMC_NB_TXBDS	8
194 #define QMC_NB_RXBDS	8
195 #define QMC_NB_INTS	128
196 
197 struct qmc_xfer_desc {
198 	union {
199 		void (*tx_complete)(void *context);
200 		void (*rx_complete)(void *context, size_t length, unsigned int flags);
201 	};
202 	void *context;
203 };
204 
205 struct qmc_chan {
206 	struct list_head list;
207 	unsigned int id;
208 	struct qmc *qmc;
209 	void __iomem *s_param;
210 	enum qmc_mode mode;
211 	spinlock_t	ts_lock; /* Protect timeslots */
212 	u64	tx_ts_mask_avail;
213 	u64	tx_ts_mask;
214 	u64	rx_ts_mask_avail;
215 	u64	rx_ts_mask;
216 	bool is_reverse_data;
217 
218 	spinlock_t	tx_lock; /* Protect Tx related data */
219 	cbd_t __iomem *txbds;
220 	cbd_t __iomem *txbd_free;
221 	cbd_t __iomem *txbd_done;
222 	struct qmc_xfer_desc tx_desc[QMC_NB_TXBDS];
223 	u64	nb_tx_underrun;
224 	bool	is_tx_stopped;
225 
226 	spinlock_t	rx_lock; /* Protect Rx related data */
227 	cbd_t __iomem *rxbds;
228 	cbd_t __iomem *rxbd_free;
229 	cbd_t __iomem *rxbd_done;
230 	struct qmc_xfer_desc rx_desc[QMC_NB_RXBDS];
231 	u64	nb_rx_busy;
232 	int	rx_pending;
233 	bool	is_rx_halted;
234 	bool	is_rx_stopped;
235 };
236 
237 enum qmc_version {
238 	QMC_CPM1,
239 	QMC_QE,
240 };
241 
242 struct qmc_data {
243 	enum qmc_version version;
244 	u32 tstate; /* Initial TSTATE value */
245 	u32 rstate; /* Initial RSTATE value */
246 	u32 zistate; /* Initial ZISTATE value */
247 	u32 zdstate_hdlc; /* Initial ZDSTATE value (HDLC mode) */
248 	u32 zdstate_transp; /* Initial ZDSTATE value (Transparent mode) */
249 	u32 rpack; /* Initial RPACK value */
250 };
251 
252 struct qmc {
253 	struct device *dev;
254 	const struct qmc_data *data;
255 	struct tsa_serial *tsa_serial;
256 	void __iomem *scc_regs;
257 	void __iomem *scc_pram;
258 	void __iomem *dpram;
259 	u16 scc_pram_offset;
260 	u32 dpram_offset;
261 	u32 qe_subblock;
262 	cbd_t __iomem *bd_table;
263 	dma_addr_t bd_dma_addr;
264 	size_t bd_size;
265 	u16 __iomem *int_table;
266 	u16 __iomem *int_curr;
267 	dma_addr_t int_dma_addr;
268 	size_t int_size;
269 	bool is_tsa_64rxtx;
270 	struct list_head chan_head;
271 	struct qmc_chan *chans[64];
272 };
273 
274 static void qmc_write8(void __iomem *addr, u8 val)
275 {
276 	iowrite8(val, addr);
277 }
278 
279 static void qmc_write16(void __iomem *addr, u16 val)
280 {
281 	iowrite16be(val, addr);
282 }
283 
284 static u16 qmc_read16(void __iomem *addr)
285 {
286 	return ioread16be(addr);
287 }
288 
289 static void qmc_setbits16(void __iomem *addr, u16 set)
290 {
291 	qmc_write16(addr, qmc_read16(addr) | set);
292 }
293 
294 static void qmc_clrbits16(void __iomem *addr, u16 clr)
295 {
296 	qmc_write16(addr, qmc_read16(addr) & ~clr);
297 }
298 
299 static void qmc_clrsetbits16(void __iomem *addr, u16 clr, u16 set)
300 {
301 	qmc_write16(addr, (qmc_read16(addr) & ~clr) | set);
302 }
303 
304 static void qmc_write32(void __iomem *addr, u32 val)
305 {
306 	iowrite32be(val, addr);
307 }
308 
309 static u32 qmc_read32(void __iomem *addr)
310 {
311 	return ioread32be(addr);
312 }
313 
314 static void qmc_setbits32(void __iomem *addr, u32 set)
315 {
316 	qmc_write32(addr, qmc_read32(addr) | set);
317 }
318 
319 static bool qmc_is_qe(const struct qmc *qmc)
320 {
321 	if (IS_ENABLED(CONFIG_QUICC_ENGINE) && IS_ENABLED(CONFIG_CPM))
322 		return qmc->data->version == QMC_QE;
323 
324 	return IS_ENABLED(CONFIG_QUICC_ENGINE);
325 }
326 
327 int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
328 {
329 	struct tsa_serial_info tsa_info;
330 	unsigned long flags;
331 	int ret;
332 
333 	/* Retrieve info from the TSA related serial */
334 	ret = tsa_serial_get_info(chan->qmc->tsa_serial, &tsa_info);
335 	if (ret)
336 		return ret;
337 
338 	spin_lock_irqsave(&chan->ts_lock, flags);
339 
340 	info->mode = chan->mode;
341 	info->rx_fs_rate = tsa_info.rx_fs_rate;
342 	info->rx_bit_rate = tsa_info.rx_bit_rate;
343 	info->nb_tx_ts = hweight64(chan->tx_ts_mask);
344 	info->tx_fs_rate = tsa_info.tx_fs_rate;
345 	info->tx_bit_rate = tsa_info.tx_bit_rate;
346 	info->nb_rx_ts = hweight64(chan->rx_ts_mask);
347 
348 	spin_unlock_irqrestore(&chan->ts_lock, flags);
349 
350 	return 0;
351 }
352 EXPORT_SYMBOL(qmc_chan_get_info);
353 
354 int qmc_chan_get_ts_info(struct qmc_chan *chan, struct qmc_chan_ts_info *ts_info)
355 {
356 	unsigned long flags;
357 
358 	spin_lock_irqsave(&chan->ts_lock, flags);
359 
360 	ts_info->rx_ts_mask_avail = chan->rx_ts_mask_avail;
361 	ts_info->tx_ts_mask_avail = chan->tx_ts_mask_avail;
362 	ts_info->rx_ts_mask = chan->rx_ts_mask;
363 	ts_info->tx_ts_mask = chan->tx_ts_mask;
364 
365 	spin_unlock_irqrestore(&chan->ts_lock, flags);
366 
367 	return 0;
368 }
369 EXPORT_SYMBOL(qmc_chan_get_ts_info);
370 
371 int qmc_chan_set_ts_info(struct qmc_chan *chan, const struct qmc_chan_ts_info *ts_info)
372 {
373 	unsigned long flags;
374 	int ret;
375 
376 	/* Only a subset of available timeslots is allowed */
377 	if ((ts_info->rx_ts_mask & chan->rx_ts_mask_avail) != ts_info->rx_ts_mask)
378 		return -EINVAL;
379 	if ((ts_info->tx_ts_mask & chan->tx_ts_mask_avail) != ts_info->tx_ts_mask)
380 		return -EINVAL;
381 
382 	/* In case of common rx/tx table, rx/tx masks must be identical */
383 	if (chan->qmc->is_tsa_64rxtx) {
384 		if (ts_info->rx_ts_mask != ts_info->tx_ts_mask)
385 			return -EINVAL;
386 	}
387 
388 	spin_lock_irqsave(&chan->ts_lock, flags);
389 
390 	if ((chan->tx_ts_mask != ts_info->tx_ts_mask && !chan->is_tx_stopped) ||
391 	    (chan->rx_ts_mask != ts_info->rx_ts_mask && !chan->is_rx_stopped)) {
392 		dev_err(chan->qmc->dev, "Channel rx and/or tx not stopped\n");
393 		ret = -EBUSY;
394 	} else {
395 		chan->tx_ts_mask = ts_info->tx_ts_mask;
396 		chan->rx_ts_mask = ts_info->rx_ts_mask;
397 		ret = 0;
398 	}
399 	spin_unlock_irqrestore(&chan->ts_lock, flags);
400 
401 	return ret;
402 }
403 EXPORT_SYMBOL(qmc_chan_set_ts_info);
404 
405 int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param)
406 {
407 	if (param->mode != chan->mode)
408 		return -EINVAL;
409 
410 	switch (param->mode) {
411 	case QMC_HDLC:
412 		if (param->hdlc.max_rx_buf_size % 4 ||
413 		    param->hdlc.max_rx_buf_size < 8)
414 			return -EINVAL;
415 
416 		qmc_write16(chan->qmc->scc_pram + QMC_GBL_MRBLR,
417 			    param->hdlc.max_rx_buf_size - 8);
418 		qmc_write16(chan->s_param + QMC_SPE_MFLR,
419 			    param->hdlc.max_rx_frame_size);
420 		if (param->hdlc.is_crc32) {
421 			qmc_setbits16(chan->s_param + QMC_SPE_CHAMR,
422 				      QMC_SPE_CHAMR_HDLC_CRC);
423 		} else {
424 			qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR,
425 				      QMC_SPE_CHAMR_HDLC_CRC);
426 		}
427 		break;
428 
429 	case QMC_TRANSPARENT:
430 		qmc_write16(chan->s_param + QMC_SPE_TMRBLR,
431 			    param->transp.max_rx_buf_size);
432 		break;
433 
434 	default:
435 		return -EINVAL;
436 	}
437 
438 	return 0;
439 }
440 EXPORT_SYMBOL(qmc_chan_set_param);
441 
442 int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
443 			  void (*complete)(void *context), void *context)
444 {
445 	struct qmc_xfer_desc *xfer_desc;
446 	unsigned long flags;
447 	cbd_t __iomem *bd;
448 	u16 ctrl;
449 	int ret;
450 
451 	/*
452 	 * R bit  UB bit
453 	 *   0       0  : The BD is free
454 	 *   1       1  : The BD is in used, waiting for transfer
455 	 *   0       1  : The BD is in used, waiting for completion
456 	 *   1       0  : Should not append
457 	 */
458 
459 	spin_lock_irqsave(&chan->tx_lock, flags);
460 	bd = chan->txbd_free;
461 
462 	ctrl = qmc_read16(&bd->cbd_sc);
463 	if (ctrl & (QMC_BD_TX_R | QMC_BD_TX_UB)) {
464 		if (!(ctrl & (QMC_BD_TX_R | QMC_BD_TX_I)) && bd == chan->txbd_done) {
465 			if (ctrl & QMC_BD_TX_W)
466 				chan->txbd_done = chan->txbds;
467 			else
468 				chan->txbd_done++;
469 		} else {
470 			/* We are full ... */
471 			ret = -EBUSY;
472 			goto end;
473 		}
474 	}
475 
476 	qmc_write16(&bd->cbd_datlen, length);
477 	qmc_write32(&bd->cbd_bufaddr, addr);
478 
479 	xfer_desc = &chan->tx_desc[bd - chan->txbds];
480 	xfer_desc->tx_complete = complete;
481 	xfer_desc->context = context;
482 
483 	/* Activate the descriptor */
484 	ctrl |= (QMC_BD_TX_R | QMC_BD_TX_UB);
485 	if (complete)
486 		ctrl |= QMC_BD_TX_I;
487 	else
488 		ctrl &= ~QMC_BD_TX_I;
489 	wmb(); /* Be sure to flush the descriptor before control update */
490 	qmc_write16(&bd->cbd_sc, ctrl);
491 
492 	if (!chan->is_tx_stopped)
493 		qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
494 
495 	if (ctrl & QMC_BD_TX_W)
496 		chan->txbd_free = chan->txbds;
497 	else
498 		chan->txbd_free++;
499 
500 	ret = 0;
501 
502 end:
503 	spin_unlock_irqrestore(&chan->tx_lock, flags);
504 	return ret;
505 }
506 EXPORT_SYMBOL(qmc_chan_write_submit);
507 
508 static void qmc_chan_write_done(struct qmc_chan *chan)
509 {
510 	struct qmc_xfer_desc *xfer_desc;
511 	void (*complete)(void *context);
512 	unsigned long flags;
513 	void *context;
514 	cbd_t __iomem *bd;
515 	u16 ctrl;
516 
517 	/*
518 	 * R bit  UB bit
519 	 *   0       0  : The BD is free
520 	 *   1       1  : The BD is in used, waiting for transfer
521 	 *   0       1  : The BD is in used, waiting for completion
522 	 *   1       0  : Should not append
523 	 */
524 
525 	spin_lock_irqsave(&chan->tx_lock, flags);
526 	bd = chan->txbd_done;
527 
528 	ctrl = qmc_read16(&bd->cbd_sc);
529 	while (!(ctrl & QMC_BD_TX_R)) {
530 		if (!(ctrl & QMC_BD_TX_UB))
531 			goto end;
532 
533 		xfer_desc = &chan->tx_desc[bd - chan->txbds];
534 		complete = xfer_desc->tx_complete;
535 		context = xfer_desc->context;
536 		xfer_desc->tx_complete = NULL;
537 		xfer_desc->context = NULL;
538 
539 		qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_TX_UB);
540 
541 		if (ctrl & QMC_BD_TX_W)
542 			chan->txbd_done = chan->txbds;
543 		else
544 			chan->txbd_done++;
545 
546 		if (complete) {
547 			spin_unlock_irqrestore(&chan->tx_lock, flags);
548 			complete(context);
549 			spin_lock_irqsave(&chan->tx_lock, flags);
550 		}
551 
552 		bd = chan->txbd_done;
553 		ctrl = qmc_read16(&bd->cbd_sc);
554 	}
555 
556 end:
557 	spin_unlock_irqrestore(&chan->tx_lock, flags);
558 }
559 
560 int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
561 			 void (*complete)(void *context, size_t length, unsigned int flags),
562 			 void *context)
563 {
564 	struct qmc_xfer_desc *xfer_desc;
565 	unsigned long flags;
566 	cbd_t __iomem *bd;
567 	u16 ctrl;
568 	int ret;
569 
570 	/*
571 	 * E bit  UB bit
572 	 *   0       0  : The BD is free
573 	 *   1       1  : The BD is in used, waiting for transfer
574 	 *   0       1  : The BD is in used, waiting for completion
575 	 *   1       0  : Should not append
576 	 */
577 
578 	spin_lock_irqsave(&chan->rx_lock, flags);
579 	bd = chan->rxbd_free;
580 
581 	ctrl = qmc_read16(&bd->cbd_sc);
582 	if (ctrl & (QMC_BD_RX_E | QMC_BD_RX_UB)) {
583 		if (!(ctrl & (QMC_BD_RX_E | QMC_BD_RX_I)) && bd == chan->rxbd_done) {
584 			if (ctrl & QMC_BD_RX_W)
585 				chan->rxbd_done = chan->rxbds;
586 			else
587 				chan->rxbd_done++;
588 		} else {
589 			/* We are full ... */
590 			ret = -EBUSY;
591 			goto end;
592 		}
593 	}
594 
595 	qmc_write16(&bd->cbd_datlen, 0); /* data length is updated by the QMC */
596 	qmc_write32(&bd->cbd_bufaddr, addr);
597 
598 	xfer_desc = &chan->rx_desc[bd - chan->rxbds];
599 	xfer_desc->rx_complete = complete;
600 	xfer_desc->context = context;
601 
602 	/* Clear previous status flags */
603 	ctrl &= ~(QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG | QMC_BD_RX_NO |
604 		  QMC_BD_RX_AB | QMC_BD_RX_CR);
605 
606 	/* Activate the descriptor */
607 	ctrl |= (QMC_BD_RX_E | QMC_BD_RX_UB);
608 	if (complete)
609 		ctrl |= QMC_BD_RX_I;
610 	else
611 		ctrl &= ~QMC_BD_RX_I;
612 	wmb(); /* Be sure to flush data before descriptor activation */
613 	qmc_write16(&bd->cbd_sc, ctrl);
614 
615 	/* Restart receiver if needed */
616 	if (chan->is_rx_halted && !chan->is_rx_stopped) {
617 		/* Restart receiver */
618 		qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack);
619 		qmc_write32(chan->s_param + QMC_SPE_ZDSTATE,
620 			    chan->mode == QMC_TRANSPARENT ?
621 				chan->qmc->data->zdstate_transp :
622 				chan->qmc->data->zdstate_hdlc);
623 		qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate);
624 		chan->is_rx_halted = false;
625 	}
626 	chan->rx_pending++;
627 
628 	if (ctrl & QMC_BD_RX_W)
629 		chan->rxbd_free = chan->rxbds;
630 	else
631 		chan->rxbd_free++;
632 
633 	ret = 0;
634 end:
635 	spin_unlock_irqrestore(&chan->rx_lock, flags);
636 	return ret;
637 }
638 EXPORT_SYMBOL(qmc_chan_read_submit);
639 
640 static void qmc_chan_read_done(struct qmc_chan *chan)
641 {
642 	void (*complete)(void *context, size_t size, unsigned int flags);
643 	struct qmc_xfer_desc *xfer_desc;
644 	unsigned long flags;
645 	cbd_t __iomem *bd;
646 	void *context;
647 	u16 datalen;
648 	u16 ctrl;
649 
650 	/*
651 	 * E bit  UB bit
652 	 *   0       0  : The BD is free
653 	 *   1       1  : The BD is in used, waiting for transfer
654 	 *   0       1  : The BD is in used, waiting for completion
655 	 *   1       0  : Should not append
656 	 */
657 
658 	spin_lock_irqsave(&chan->rx_lock, flags);
659 	bd = chan->rxbd_done;
660 
661 	ctrl = qmc_read16(&bd->cbd_sc);
662 	while (!(ctrl & QMC_BD_RX_E)) {
663 		if (!(ctrl & QMC_BD_RX_UB))
664 			goto end;
665 
666 		xfer_desc = &chan->rx_desc[bd - chan->rxbds];
667 		complete = xfer_desc->rx_complete;
668 		context = xfer_desc->context;
669 		xfer_desc->rx_complete = NULL;
670 		xfer_desc->context = NULL;
671 
672 		datalen = qmc_read16(&bd->cbd_datlen);
673 		qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_RX_UB);
674 
675 		if (ctrl & QMC_BD_RX_W)
676 			chan->rxbd_done = chan->rxbds;
677 		else
678 			chan->rxbd_done++;
679 
680 		chan->rx_pending--;
681 
682 		if (complete) {
683 			spin_unlock_irqrestore(&chan->rx_lock, flags);
684 
685 			/*
686 			 * Avoid conversion between internal hardware flags and
687 			 * the software API flags.
688 			 * -> Be sure that the software API flags are consistent
689 			 *    with the hardware flags
690 			 */
691 			BUILD_BUG_ON(QMC_RX_FLAG_HDLC_LAST  != QMC_BD_RX_L);
692 			BUILD_BUG_ON(QMC_RX_FLAG_HDLC_FIRST != QMC_BD_RX_F);
693 			BUILD_BUG_ON(QMC_RX_FLAG_HDLC_OVF   != QMC_BD_RX_LG);
694 			BUILD_BUG_ON(QMC_RX_FLAG_HDLC_UNA   != QMC_BD_RX_NO);
695 			BUILD_BUG_ON(QMC_RX_FLAG_HDLC_ABORT != QMC_BD_RX_AB);
696 			BUILD_BUG_ON(QMC_RX_FLAG_HDLC_CRC   != QMC_BD_RX_CR);
697 
698 			complete(context, datalen,
699 				 ctrl & (QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG |
700 					 QMC_BD_RX_NO | QMC_BD_RX_AB | QMC_BD_RX_CR));
701 			spin_lock_irqsave(&chan->rx_lock, flags);
702 		}
703 
704 		bd = chan->rxbd_done;
705 		ctrl = qmc_read16(&bd->cbd_sc);
706 	}
707 
708 end:
709 	spin_unlock_irqrestore(&chan->rx_lock, flags);
710 }
711 
712 static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_serial_info *info,
713 				     bool enable)
714 {
715 	unsigned int i;
716 	u16 curr;
717 	u16 val;
718 
719 	/*
720 	 * Use a common Tx/Rx 64 entries table.
721 	 * Tx and Rx related stuffs must be identical
722 	 */
723 	if (chan->tx_ts_mask != chan->rx_ts_mask) {
724 		dev_err(chan->qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
725 		return -EINVAL;
726 	}
727 
728 	val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id);
729 
730 	/* Check entries based on Rx stuff*/
731 	for (i = 0; i < info->nb_rx_ts; i++) {
732 		if (!(chan->rx_ts_mask & (((u64)1) << i)))
733 			continue;
734 
735 		curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
736 		if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
737 			dev_err(chan->qmc->dev, "chan %u TxRx entry %d already used\n",
738 				chan->id, i);
739 			return -EBUSY;
740 		}
741 	}
742 
743 	/* Set entries based on Rx stuff*/
744 	for (i = 0; i < info->nb_rx_ts; i++) {
745 		if (!(chan->rx_ts_mask & (((u64)1) << i)))
746 			continue;
747 
748 		qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
749 				 (u16)~QMC_TSA_WRAP, enable ? val : 0x0000);
750 	}
751 
752 	return 0;
753 }
754 
755 static int qmc_chan_setup_tsa_32rx(struct qmc_chan *chan, const struct tsa_serial_info *info,
756 				   bool enable)
757 {
758 	unsigned int i;
759 	u16 curr;
760 	u16 val;
761 
762 	/* Use a Rx 32 entries table */
763 
764 	val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id);
765 
766 	/* Check entries based on Rx stuff */
767 	for (i = 0; i < info->nb_rx_ts; i++) {
768 		if (!(chan->rx_ts_mask & (((u64)1) << i)))
769 			continue;
770 
771 		curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
772 		if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
773 			dev_err(chan->qmc->dev, "chan %u Rx entry %d already used\n",
774 				chan->id, i);
775 			return -EBUSY;
776 		}
777 	}
778 
779 	/* Set entries based on Rx stuff */
780 	for (i = 0; i < info->nb_rx_ts; i++) {
781 		if (!(chan->rx_ts_mask & (((u64)1) << i)))
782 			continue;
783 
784 		qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
785 				 (u16)~QMC_TSA_WRAP, enable ? val : 0x0000);
786 	}
787 
788 	return 0;
789 }
790 
791 static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_serial_info *info,
792 				   bool enable)
793 {
794 	unsigned int i;
795 	u16 curr;
796 	u16 val;
797 
798 	/* Use a Tx 32 entries table */
799 
800 	val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id);
801 
802 	/* Check entries based on Tx stuff */
803 	for (i = 0; i < info->nb_tx_ts; i++) {
804 		if (!(chan->tx_ts_mask & (((u64)1) << i)))
805 			continue;
806 
807 		curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2));
808 		if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
809 			dev_err(chan->qmc->dev, "chan %u Tx entry %d already used\n",
810 				chan->id, i);
811 			return -EBUSY;
812 		}
813 	}
814 
815 	/* Set entries based on Tx stuff */
816 	for (i = 0; i < info->nb_tx_ts; i++) {
817 		if (!(chan->tx_ts_mask & (((u64)1) << i)))
818 			continue;
819 
820 		qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2),
821 				 (u16)~QMC_TSA_WRAP, enable ? val : 0x0000);
822 	}
823 
824 	return 0;
825 }
826 
827 static int qmc_chan_setup_tsa_tx(struct qmc_chan *chan, bool enable)
828 {
829 	struct tsa_serial_info info;
830 	int ret;
831 
832 	/* Retrieve info from the TSA related serial */
833 	ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
834 	if (ret)
835 		return ret;
836 
837 	/* Setup entries */
838 	if (chan->qmc->is_tsa_64rxtx)
839 		return qmc_chan_setup_tsa_64rxtx(chan, &info, enable);
840 
841 	return qmc_chan_setup_tsa_32tx(chan, &info, enable);
842 }
843 
844 static int qmc_chan_setup_tsa_rx(struct qmc_chan *chan, bool enable)
845 {
846 	struct tsa_serial_info info;
847 	int ret;
848 
849 	/* Retrieve info from the TSA related serial */
850 	ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
851 	if (ret)
852 		return ret;
853 
854 	/* Setup entries */
855 	if (chan->qmc->is_tsa_64rxtx)
856 		return qmc_chan_setup_tsa_64rxtx(chan, &info, enable);
857 
858 	return qmc_chan_setup_tsa_32rx(chan, &info, enable);
859 }
860 
861 static int qmc_chan_cpm1_command(struct qmc_chan *chan, u8 qmc_opcode)
862 {
863 	return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E);
864 }
865 
866 static int qmc_chan_qe_command(struct qmc_chan *chan, u32 cmd)
867 {
868 	if (!qe_issue_cmd(cmd, chan->qmc->qe_subblock, chan->id, 0))
869 		return -EIO;
870 	return 0;
871 }
872 
873 static int qmc_chan_stop_rx(struct qmc_chan *chan)
874 {
875 	unsigned long flags;
876 	int ret;
877 
878 	spin_lock_irqsave(&chan->rx_lock, flags);
879 
880 	if (chan->is_rx_stopped) {
881 		/* The channel is already stopped -> simply return ok */
882 		ret = 0;
883 		goto end;
884 	}
885 
886 	/* Send STOP RECEIVE command */
887 	ret = qmc_is_qe(chan->qmc) ?
888 		qmc_chan_qe_command(chan, QE_QMC_STOP_RX) :
889 		qmc_chan_cpm1_command(chan, 0x0);
890 	if (ret) {
891 		dev_err(chan->qmc->dev, "chan %u: Send STOP RECEIVE failed (%d)\n",
892 			chan->id, ret);
893 		goto end;
894 	}
895 
896 	chan->is_rx_stopped = true;
897 
898 	if (!chan->qmc->is_tsa_64rxtx || chan->is_tx_stopped) {
899 		ret = qmc_chan_setup_tsa_rx(chan, false);
900 		if (ret) {
901 			dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
902 				chan->id, ret);
903 			goto end;
904 		}
905 	}
906 
907 end:
908 	spin_unlock_irqrestore(&chan->rx_lock, flags);
909 	return ret;
910 }
911 
912 static int qmc_chan_stop_tx(struct qmc_chan *chan)
913 {
914 	unsigned long flags;
915 	int ret;
916 
917 	spin_lock_irqsave(&chan->tx_lock, flags);
918 
919 	if (chan->is_tx_stopped) {
920 		/* The channel is already stopped -> simply return ok */
921 		ret = 0;
922 		goto end;
923 	}
924 
925 	/* Send STOP TRANSMIT command */
926 	ret = qmc_is_qe(chan->qmc) ?
927 		qmc_chan_qe_command(chan, QE_QMC_STOP_TX) :
928 		qmc_chan_cpm1_command(chan, 0x1);
929 	if (ret) {
930 		dev_err(chan->qmc->dev, "chan %u: Send STOP TRANSMIT failed (%d)\n",
931 			chan->id, ret);
932 		goto end;
933 	}
934 
935 	chan->is_tx_stopped = true;
936 
937 	if (!chan->qmc->is_tsa_64rxtx || chan->is_rx_stopped) {
938 		ret = qmc_chan_setup_tsa_tx(chan, false);
939 		if (ret) {
940 			dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
941 				chan->id, ret);
942 			goto end;
943 		}
944 	}
945 
946 end:
947 	spin_unlock_irqrestore(&chan->tx_lock, flags);
948 	return ret;
949 }
950 
951 static int qmc_chan_start_rx(struct qmc_chan *chan);
952 
953 int qmc_chan_stop(struct qmc_chan *chan, int direction)
954 {
955 	bool is_rx_rollback_needed = false;
956 	unsigned long flags;
957 	int ret = 0;
958 
959 	spin_lock_irqsave(&chan->ts_lock, flags);
960 
961 	if (direction & QMC_CHAN_READ) {
962 		is_rx_rollback_needed = !chan->is_rx_stopped;
963 		ret = qmc_chan_stop_rx(chan);
964 		if (ret)
965 			goto end;
966 	}
967 
968 	if (direction & QMC_CHAN_WRITE) {
969 		ret = qmc_chan_stop_tx(chan);
970 		if (ret) {
971 			/* Restart rx if needed */
972 			if (is_rx_rollback_needed)
973 				qmc_chan_start_rx(chan);
974 			goto end;
975 		}
976 	}
977 
978 end:
979 	spin_unlock_irqrestore(&chan->ts_lock, flags);
980 	return ret;
981 }
982 EXPORT_SYMBOL(qmc_chan_stop);
983 
984 static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
985 {
986 	struct tsa_serial_info info;
987 	unsigned int w_rx, w_tx;
988 	u16 first_rx, last_tx;
989 	u16 trnsync;
990 	int ret;
991 
992 	/* Retrieve info from the TSA related serial */
993 	ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
994 	if (ret)
995 		return ret;
996 
997 	w_rx = hweight64(chan->rx_ts_mask);
998 	w_tx = hweight64(chan->tx_ts_mask);
999 	if (w_rx <= 1 && w_tx <= 1) {
1000 		dev_dbg(qmc->dev, "only one or zero ts -> disable trnsync\n");
1001 		qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_TRANSP_SYNC);
1002 		return 0;
1003 	}
1004 
1005 	/* Find the first Rx TS allocated to the channel */
1006 	first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
1007 
1008 	/* Find the last Tx TS allocated to the channel */
1009 	last_tx = fls64(chan->tx_ts_mask);
1010 
1011 	trnsync = 0;
1012 	if (info.nb_rx_ts)
1013 		trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2);
1014 	if (info.nb_tx_ts)
1015 		trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
1016 
1017 	qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
1018 	qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_TRANSP_SYNC);
1019 
1020 	dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
1021 		chan->id, trnsync,
1022 		first_rx, info.nb_rx_ts, chan->rx_ts_mask,
1023 		last_tx, info.nb_tx_ts, chan->tx_ts_mask);
1024 
1025 	return 0;
1026 }
1027 
1028 static int qmc_chan_start_rx(struct qmc_chan *chan)
1029 {
1030 	unsigned long flags;
1031 	int ret;
1032 
1033 	spin_lock_irqsave(&chan->rx_lock, flags);
1034 
1035 	if (!chan->is_rx_stopped) {
1036 		/* The channel is already started -> simply return ok */
1037 		ret = 0;
1038 		goto end;
1039 	}
1040 
1041 	ret = qmc_chan_setup_tsa_rx(chan, true);
1042 	if (ret) {
1043 		dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
1044 			chan->id, ret);
1045 		goto end;
1046 	}
1047 
1048 	if (chan->mode == QMC_TRANSPARENT) {
1049 		ret = qmc_setup_chan_trnsync(chan->qmc, chan);
1050 		if (ret) {
1051 			dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
1052 				chan->id, ret);
1053 			goto end;
1054 		}
1055 	}
1056 
1057 	/* Restart the receiver */
1058 	qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack);
1059 	qmc_write32(chan->s_param + QMC_SPE_ZDSTATE,
1060 		    chan->mode == QMC_TRANSPARENT ?
1061 			chan->qmc->data->zdstate_transp :
1062 			chan->qmc->data->zdstate_hdlc);
1063 	qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate);
1064 	chan->is_rx_halted = false;
1065 
1066 	chan->is_rx_stopped = false;
1067 
1068 end:
1069 	spin_unlock_irqrestore(&chan->rx_lock, flags);
1070 	return ret;
1071 }
1072 
1073 static int qmc_chan_start_tx(struct qmc_chan *chan)
1074 {
1075 	unsigned long flags;
1076 	int ret;
1077 
1078 	spin_lock_irqsave(&chan->tx_lock, flags);
1079 
1080 	if (!chan->is_tx_stopped) {
1081 		/* The channel is already started -> simply return ok */
1082 		ret = 0;
1083 		goto end;
1084 	}
1085 
1086 	ret = qmc_chan_setup_tsa_tx(chan, true);
1087 	if (ret) {
1088 		dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
1089 			chan->id, ret);
1090 		goto end;
1091 	}
1092 
1093 	if (chan->mode == QMC_TRANSPARENT) {
1094 		ret = qmc_setup_chan_trnsync(chan->qmc, chan);
1095 		if (ret) {
1096 			dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
1097 				chan->id, ret);
1098 			goto end;
1099 		}
1100 	}
1101 
1102 	/*
1103 	 * Enable channel transmitter as it could be disabled if
1104 	 * qmc_chan_reset() was called.
1105 	 */
1106 	qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
1107 
1108 	/* Set the POL bit in the channel mode register */
1109 	qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
1110 
1111 	chan->is_tx_stopped = false;
1112 
1113 end:
1114 	spin_unlock_irqrestore(&chan->tx_lock, flags);
1115 	return ret;
1116 }
1117 
1118 int qmc_chan_start(struct qmc_chan *chan, int direction)
1119 {
1120 	bool is_rx_rollback_needed = false;
1121 	unsigned long flags;
1122 	int ret = 0;
1123 
1124 	spin_lock_irqsave(&chan->ts_lock, flags);
1125 
1126 	if (direction & QMC_CHAN_READ) {
1127 		is_rx_rollback_needed = chan->is_rx_stopped;
1128 		ret = qmc_chan_start_rx(chan);
1129 		if (ret)
1130 			goto end;
1131 	}
1132 
1133 	if (direction & QMC_CHAN_WRITE) {
1134 		ret = qmc_chan_start_tx(chan);
1135 		if (ret) {
1136 			/* Restop rx if needed */
1137 			if (is_rx_rollback_needed)
1138 				qmc_chan_stop_rx(chan);
1139 			goto end;
1140 		}
1141 	}
1142 
1143 end:
1144 	spin_unlock_irqrestore(&chan->ts_lock, flags);
1145 	return ret;
1146 }
1147 EXPORT_SYMBOL(qmc_chan_start);
1148 
1149 static void qmc_chan_reset_rx(struct qmc_chan *chan)
1150 {
1151 	struct qmc_xfer_desc *xfer_desc;
1152 	unsigned long flags;
1153 	cbd_t __iomem *bd;
1154 	u16 ctrl;
1155 
1156 	spin_lock_irqsave(&chan->rx_lock, flags);
1157 	bd = chan->rxbds;
1158 	do {
1159 		ctrl = qmc_read16(&bd->cbd_sc);
1160 		qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_RX_UB | QMC_BD_RX_E));
1161 
1162 		xfer_desc = &chan->rx_desc[bd - chan->rxbds];
1163 		xfer_desc->rx_complete = NULL;
1164 		xfer_desc->context = NULL;
1165 
1166 		bd++;
1167 	} while (!(ctrl & QMC_BD_RX_W));
1168 
1169 	chan->rxbd_free = chan->rxbds;
1170 	chan->rxbd_done = chan->rxbds;
1171 	qmc_write16(chan->s_param + QMC_SPE_RBPTR,
1172 		    qmc_read16(chan->s_param + QMC_SPE_RBASE));
1173 
1174 	chan->rx_pending = 0;
1175 
1176 	spin_unlock_irqrestore(&chan->rx_lock, flags);
1177 }
1178 
1179 static void qmc_chan_reset_tx(struct qmc_chan *chan)
1180 {
1181 	struct qmc_xfer_desc *xfer_desc;
1182 	unsigned long flags;
1183 	cbd_t __iomem *bd;
1184 	u16 ctrl;
1185 
1186 	spin_lock_irqsave(&chan->tx_lock, flags);
1187 
1188 	/* Disable transmitter. It will be re-enable on qmc_chan_start() */
1189 	qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
1190 
1191 	bd = chan->txbds;
1192 	do {
1193 		ctrl = qmc_read16(&bd->cbd_sc);
1194 		qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_TX_UB | QMC_BD_TX_R));
1195 
1196 		xfer_desc = &chan->tx_desc[bd - chan->txbds];
1197 		xfer_desc->tx_complete = NULL;
1198 		xfer_desc->context = NULL;
1199 
1200 		bd++;
1201 	} while (!(ctrl & QMC_BD_TX_W));
1202 
1203 	chan->txbd_free = chan->txbds;
1204 	chan->txbd_done = chan->txbds;
1205 	qmc_write16(chan->s_param + QMC_SPE_TBPTR,
1206 		    qmc_read16(chan->s_param + QMC_SPE_TBASE));
1207 
1208 	/* Reset TSTATE and ZISTATE to their initial value */
1209 	qmc_write32(chan->s_param + QMC_SPE_TSTATE, chan->qmc->data->tstate);
1210 	qmc_write32(chan->s_param + QMC_SPE_ZISTATE, chan->qmc->data->zistate);
1211 
1212 	spin_unlock_irqrestore(&chan->tx_lock, flags);
1213 }
1214 
1215 int qmc_chan_reset(struct qmc_chan *chan, int direction)
1216 {
1217 	if (direction & QMC_CHAN_READ)
1218 		qmc_chan_reset_rx(chan);
1219 
1220 	if (direction & QMC_CHAN_WRITE)
1221 		qmc_chan_reset_tx(chan);
1222 
1223 	return 0;
1224 }
1225 EXPORT_SYMBOL(qmc_chan_reset);
1226 
1227 static int qmc_check_chans(struct qmc *qmc)
1228 {
1229 	struct tsa_serial_info info;
1230 	struct qmc_chan *chan;
1231 	u64 tx_ts_assigned_mask;
1232 	u64 rx_ts_assigned_mask;
1233 	int ret;
1234 
1235 	/* Retrieve info from the TSA related serial */
1236 	ret = tsa_serial_get_info(qmc->tsa_serial, &info);
1237 	if (ret)
1238 		return ret;
1239 
1240 	if (info.nb_tx_ts > 64 || info.nb_rx_ts > 64) {
1241 		dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned not supported\n");
1242 		return -EINVAL;
1243 	}
1244 
1245 	/*
1246 	 * If more than 32 TS are assigned to this serial, one common table is
1247 	 * used for Tx and Rx and so masks must be equal for all channels.
1248 	 */
1249 	if (info.nb_tx_ts > 32 || info.nb_rx_ts > 32) {
1250 		if (info.nb_tx_ts != info.nb_rx_ts) {
1251 			dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n");
1252 			return -EINVAL;
1253 		}
1254 	}
1255 
1256 	tx_ts_assigned_mask = info.nb_tx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_tx_ts) - 1;
1257 	rx_ts_assigned_mask = info.nb_rx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_rx_ts) - 1;
1258 
1259 	list_for_each_entry(chan, &qmc->chan_head, list) {
1260 		if (chan->tx_ts_mask_avail > tx_ts_assigned_mask) {
1261 			dev_err(qmc->dev, "chan %u can use TSA unassigned Tx TS\n", chan->id);
1262 			return -EINVAL;
1263 		}
1264 
1265 		if (chan->rx_ts_mask_avail > rx_ts_assigned_mask) {
1266 			dev_err(qmc->dev, "chan %u can use TSA unassigned Rx TS\n", chan->id);
1267 			return -EINVAL;
1268 		}
1269 	}
1270 
1271 	return 0;
1272 }
1273 
1274 static unsigned int qmc_nb_chans(struct qmc *qmc)
1275 {
1276 	unsigned int count = 0;
1277 	struct qmc_chan *chan;
1278 
1279 	list_for_each_entry(chan, &qmc->chan_head, list)
1280 		count++;
1281 
1282 	return count;
1283 }
1284 
1285 static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
1286 {
1287 	struct device_node *chan_np;
1288 	struct qmc_chan *chan;
1289 	const char *mode;
1290 	u32 chan_id;
1291 	u64 ts_mask;
1292 	int ret;
1293 
1294 	for_each_available_child_of_node(np, chan_np) {
1295 		ret = of_property_read_u32(chan_np, "reg", &chan_id);
1296 		if (ret) {
1297 			dev_err(qmc->dev, "%pOF: failed to read reg\n", chan_np);
1298 			of_node_put(chan_np);
1299 			return ret;
1300 		}
1301 		if (chan_id > 63) {
1302 			dev_err(qmc->dev, "%pOF: Invalid chan_id\n", chan_np);
1303 			of_node_put(chan_np);
1304 			return -EINVAL;
1305 		}
1306 
1307 		chan = devm_kzalloc(qmc->dev, sizeof(*chan), GFP_KERNEL);
1308 		if (!chan) {
1309 			of_node_put(chan_np);
1310 			return -ENOMEM;
1311 		}
1312 
1313 		chan->id = chan_id;
1314 		spin_lock_init(&chan->ts_lock);
1315 		spin_lock_init(&chan->rx_lock);
1316 		spin_lock_init(&chan->tx_lock);
1317 
1318 		ret = of_property_read_u64(chan_np, "fsl,tx-ts-mask", &ts_mask);
1319 		if (ret) {
1320 			dev_err(qmc->dev, "%pOF: failed to read fsl,tx-ts-mask\n",
1321 				chan_np);
1322 			of_node_put(chan_np);
1323 			return ret;
1324 		}
1325 		chan->tx_ts_mask_avail = ts_mask;
1326 		chan->tx_ts_mask = chan->tx_ts_mask_avail;
1327 
1328 		ret = of_property_read_u64(chan_np, "fsl,rx-ts-mask", &ts_mask);
1329 		if (ret) {
1330 			dev_err(qmc->dev, "%pOF: failed to read fsl,rx-ts-mask\n",
1331 				chan_np);
1332 			of_node_put(chan_np);
1333 			return ret;
1334 		}
1335 		chan->rx_ts_mask_avail = ts_mask;
1336 		chan->rx_ts_mask = chan->rx_ts_mask_avail;
1337 
1338 		mode = "transparent";
1339 		ret = of_property_read_string(chan_np, "fsl,operational-mode", &mode);
1340 		if (ret && ret != -EINVAL) {
1341 			dev_err(qmc->dev, "%pOF: failed to read fsl,operational-mode\n",
1342 				chan_np);
1343 			of_node_put(chan_np);
1344 			return ret;
1345 		}
1346 		if (!strcmp(mode, "transparent")) {
1347 			chan->mode = QMC_TRANSPARENT;
1348 		} else if (!strcmp(mode, "hdlc")) {
1349 			chan->mode = QMC_HDLC;
1350 		} else {
1351 			dev_err(qmc->dev, "%pOF: Invalid fsl,operational-mode (%s)\n",
1352 				chan_np, mode);
1353 			of_node_put(chan_np);
1354 			return -EINVAL;
1355 		}
1356 
1357 		chan->is_reverse_data = of_property_read_bool(chan_np,
1358 							      "fsl,reverse-data");
1359 
1360 		list_add_tail(&chan->list, &qmc->chan_head);
1361 		qmc->chans[chan->id] = chan;
1362 	}
1363 
1364 	return qmc_check_chans(qmc);
1365 }
1366 
1367 static int qmc_init_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info)
1368 {
1369 	unsigned int i;
1370 	u16 val;
1371 
1372 	/*
1373 	 * Use a common Tx/Rx 64 entries table.
1374 	 * Everything was previously checked, Tx and Rx related stuffs are
1375 	 * identical -> Used Rx related stuff to build the table
1376 	 */
1377 	qmc->is_tsa_64rxtx = true;
1378 
1379 	/* Invalidate all entries */
1380 	for (i = 0; i < 64; i++)
1381 		qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
1382 
1383 	/* Set Wrap bit on last entry */
1384 	qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
1385 		      QMC_TSA_WRAP);
1386 
1387 	/* Init pointers to the table */
1388 	val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
1389 	qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
1390 	qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
1391 	qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
1392 	qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
1393 
1394 	return 0;
1395 }
1396 
1397 static int qmc_init_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info)
1398 {
1399 	unsigned int i;
1400 	u16 val;
1401 
1402 	/*
1403 	 * Use a Tx 32 entries table and a Rx 32 entries table.
1404 	 * Everything was previously checked.
1405 	 */
1406 	qmc->is_tsa_64rxtx = false;
1407 
1408 	/* Invalidate all entries */
1409 	for (i = 0; i < 32; i++) {
1410 		qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
1411 		qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), 0x0000);
1412 	}
1413 
1414 	/* Set Wrap bit on last entries */
1415 	qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
1416 		      QMC_TSA_WRAP);
1417 	qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATTX + ((info->nb_tx_ts - 1) * 2),
1418 		      QMC_TSA_WRAP);
1419 
1420 	/* Init Rx pointers ...*/
1421 	val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
1422 	qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
1423 	qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
1424 
1425 	/* ... and Tx pointers */
1426 	val = qmc->scc_pram_offset + QMC_GBL_TSATTX;
1427 	qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
1428 	qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
1429 
1430 	return 0;
1431 }
1432 
1433 static int qmc_init_tsa(struct qmc *qmc)
1434 {
1435 	struct tsa_serial_info info;
1436 	int ret;
1437 
1438 	/* Retrieve info from the TSA related serial */
1439 	ret = tsa_serial_get_info(qmc->tsa_serial, &info);
1440 	if (ret)
1441 		return ret;
1442 
1443 	/*
1444 	 * Initialize one common 64 entries table or two 32 entries (one for Tx
1445 	 * and one for Tx) according to assigned TS numbers.
1446 	 */
1447 	return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ?
1448 		qmc_init_tsa_64rxtx(qmc, &info) :
1449 		qmc_init_tsa_32rx_32tx(qmc, &info);
1450 }
1451 
1452 static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
1453 {
1454 	unsigned int i;
1455 	cbd_t __iomem *bd;
1456 	int ret;
1457 	u16 val;
1458 
1459 	chan->qmc = qmc;
1460 
1461 	/* Set channel specific parameter base address */
1462 	chan->s_param = qmc->dpram + (chan->id * 64);
1463 	/* 16 bd per channel (8 rx and 8 tx) */
1464 	chan->txbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS));
1465 	chan->rxbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS;
1466 
1467 	chan->txbd_free = chan->txbds;
1468 	chan->txbd_done = chan->txbds;
1469 	chan->rxbd_free = chan->rxbds;
1470 	chan->rxbd_done = chan->rxbds;
1471 
1472 	/* TBASE and TBPTR*/
1473 	val = chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS) * sizeof(cbd_t);
1474 	qmc_write16(chan->s_param + QMC_SPE_TBASE, val);
1475 	qmc_write16(chan->s_param + QMC_SPE_TBPTR, val);
1476 
1477 	/* RBASE and RBPTR*/
1478 	val = ((chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS) * sizeof(cbd_t);
1479 	qmc_write16(chan->s_param + QMC_SPE_RBASE, val);
1480 	qmc_write16(chan->s_param + QMC_SPE_RBPTR, val);
1481 	qmc_write32(chan->s_param + QMC_SPE_TSTATE, chan->qmc->data->tstate);
1482 	qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate);
1483 	qmc_write32(chan->s_param + QMC_SPE_ZISTATE, chan->qmc->data->zistate);
1484 	qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack);
1485 	if (chan->mode == QMC_TRANSPARENT) {
1486 		qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, chan->qmc->data->zdstate_transp);
1487 		qmc_write16(chan->s_param + QMC_SPE_TMRBLR, 60);
1488 		val = QMC_SPE_CHAMR_MODE_TRANSP;
1489 		if (chan->is_reverse_data)
1490 			val |= QMC_SPE_CHAMR_TRANSP_RD;
1491 		qmc_write16(chan->s_param + QMC_SPE_CHAMR, val);
1492 		ret = qmc_setup_chan_trnsync(qmc, chan);
1493 		if (ret)
1494 			return ret;
1495 	} else {
1496 		qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, chan->qmc->data->zdstate_hdlc);
1497 		qmc_write16(chan->s_param + QMC_SPE_MFLR, 60);
1498 		qmc_write16(chan->s_param + QMC_SPE_CHAMR,
1499 			    QMC_SPE_CHAMR_MODE_HDLC | QMC_SPE_CHAMR_HDLC_IDLM);
1500 	}
1501 
1502 	/* Do not enable interrupts now. They will be enabled later */
1503 	qmc_write16(chan->s_param + QMC_SPE_INTMSK, 0x0000);
1504 
1505 	/* Init Rx BDs and set Wrap bit on last descriptor */
1506 	BUILD_BUG_ON(QMC_NB_RXBDS == 0);
1507 	for (i = 0; i < QMC_NB_RXBDS; i++) {
1508 		bd = chan->rxbds + i;
1509 		qmc_write16(&bd->cbd_sc, 0);
1510 	}
1511 	bd = chan->rxbds + QMC_NB_RXBDS - 1;
1512 	qmc_write16(&bd->cbd_sc, QMC_BD_RX_W);
1513 
1514 	/* Init Tx BDs and set Wrap bit on last descriptor */
1515 	BUILD_BUG_ON(QMC_NB_TXBDS == 0);
1516 	if (chan->mode == QMC_HDLC)
1517 		val = QMC_BD_TX_L | QMC_BD_TX_TC;
1518 	else
1519 		val = 0;
1520 	for (i = 0; i < QMC_NB_TXBDS; i++) {
1521 		bd = chan->txbds + i;
1522 		qmc_write16(&bd->cbd_sc, val);
1523 	}
1524 	bd = chan->txbds + QMC_NB_TXBDS - 1;
1525 	qmc_write16(&bd->cbd_sc, val | QMC_BD_TX_W);
1526 
1527 	return 0;
1528 }
1529 
1530 static int qmc_setup_chans(struct qmc *qmc)
1531 {
1532 	struct qmc_chan *chan;
1533 	int ret;
1534 
1535 	list_for_each_entry(chan, &qmc->chan_head, list) {
1536 		ret = qmc_setup_chan(qmc, chan);
1537 		if (ret)
1538 			return ret;
1539 	}
1540 
1541 	return 0;
1542 }
1543 
1544 static int qmc_finalize_chans(struct qmc *qmc)
1545 {
1546 	struct qmc_chan *chan;
1547 	int ret;
1548 
1549 	list_for_each_entry(chan, &qmc->chan_head, list) {
1550 		/* Unmask channel interrupts */
1551 		if (chan->mode == QMC_HDLC) {
1552 			qmc_write16(chan->s_param + QMC_SPE_INTMSK,
1553 				    QMC_INT_NID | QMC_INT_IDL | QMC_INT_MRF |
1554 				    QMC_INT_UN | QMC_INT_RXF | QMC_INT_BSY |
1555 				    QMC_INT_TXB | QMC_INT_RXB);
1556 		} else {
1557 			qmc_write16(chan->s_param + QMC_SPE_INTMSK,
1558 				    QMC_INT_UN | QMC_INT_BSY |
1559 				    QMC_INT_TXB | QMC_INT_RXB);
1560 		}
1561 
1562 		/* Forced stop the channel */
1563 		ret = qmc_chan_stop(chan, QMC_CHAN_ALL);
1564 		if (ret)
1565 			return ret;
1566 	}
1567 
1568 	return 0;
1569 }
1570 
1571 static int qmc_setup_ints(struct qmc *qmc)
1572 {
1573 	unsigned int i;
1574 	u16 __iomem *last;
1575 
1576 	/* Raz all entries */
1577 	for (i = 0; i < (qmc->int_size / sizeof(u16)); i++)
1578 		qmc_write16(qmc->int_table + i, 0x0000);
1579 
1580 	/* Set Wrap bit on last entry */
1581 	if (qmc->int_size >= sizeof(u16)) {
1582 		last = qmc->int_table + (qmc->int_size / sizeof(u16)) - 1;
1583 		qmc_write16(last, QMC_INT_W);
1584 	}
1585 
1586 	return 0;
1587 }
1588 
1589 static void qmc_irq_gint(struct qmc *qmc)
1590 {
1591 	struct qmc_chan *chan;
1592 	unsigned int chan_id;
1593 	unsigned long flags;
1594 	u16 int_entry;
1595 
1596 	int_entry = qmc_read16(qmc->int_curr);
1597 	while (int_entry & QMC_INT_V) {
1598 		/* Clear all but the Wrap bit */
1599 		qmc_write16(qmc->int_curr, int_entry & QMC_INT_W);
1600 
1601 		chan_id = QMC_INT_GET_CHANNEL(int_entry);
1602 		chan = qmc->chans[chan_id];
1603 		if (!chan) {
1604 			dev_err(qmc->dev, "interrupt on invalid chan %u\n", chan_id);
1605 			goto int_next;
1606 		}
1607 
1608 		if (int_entry & QMC_INT_TXB)
1609 			qmc_chan_write_done(chan);
1610 
1611 		if (int_entry & QMC_INT_UN) {
1612 			dev_info(qmc->dev, "intr chan %u, 0x%04x (UN)\n", chan_id,
1613 				 int_entry);
1614 			chan->nb_tx_underrun++;
1615 		}
1616 
1617 		if (int_entry & QMC_INT_BSY) {
1618 			dev_info(qmc->dev, "intr chan %u, 0x%04x (BSY)\n", chan_id,
1619 				 int_entry);
1620 			chan->nb_rx_busy++;
1621 			/* Restart the receiver if needed */
1622 			spin_lock_irqsave(&chan->rx_lock, flags);
1623 			if (chan->rx_pending && !chan->is_rx_stopped) {
1624 				qmc_write32(chan->s_param + QMC_SPE_RPACK,
1625 					    chan->qmc->data->rpack);
1626 				qmc_write32(chan->s_param + QMC_SPE_ZDSTATE,
1627 					    chan->mode == QMC_TRANSPARENT ?
1628 						chan->qmc->data->zdstate_transp :
1629 						chan->qmc->data->zdstate_hdlc);
1630 				qmc_write32(chan->s_param + QMC_SPE_RSTATE,
1631 					    chan->qmc->data->rstate);
1632 				chan->is_rx_halted = false;
1633 			} else {
1634 				chan->is_rx_halted = true;
1635 			}
1636 			spin_unlock_irqrestore(&chan->rx_lock, flags);
1637 		}
1638 
1639 		if (int_entry & QMC_INT_RXB)
1640 			qmc_chan_read_done(chan);
1641 
1642 int_next:
1643 		if (int_entry & QMC_INT_W)
1644 			qmc->int_curr = qmc->int_table;
1645 		else
1646 			qmc->int_curr++;
1647 		int_entry = qmc_read16(qmc->int_curr);
1648 	}
1649 }
1650 
1651 static irqreturn_t qmc_irq_handler(int irq, void *priv)
1652 {
1653 	struct qmc *qmc = (struct qmc *)priv;
1654 	u16 scce;
1655 
1656 	scce = qmc_read16(qmc->scc_regs + SCC_SCCE);
1657 	qmc_write16(qmc->scc_regs + SCC_SCCE, scce);
1658 
1659 	if (unlikely(scce & SCC_SCCE_IQOV))
1660 		dev_info(qmc->dev, "IRQ queue overflow\n");
1661 
1662 	if (unlikely(scce & SCC_SCCE_GUN))
1663 		dev_err(qmc->dev, "Global transmitter underrun\n");
1664 
1665 	if (unlikely(scce & SCC_SCCE_GOV))
1666 		dev_err(qmc->dev, "Global receiver overrun\n");
1667 
1668 	/* normal interrupt */
1669 	if (likely(scce & SCC_SCCE_GINT))
1670 		qmc_irq_gint(qmc);
1671 
1672 	return IRQ_HANDLED;
1673 }
1674 
1675 static int qmc_qe_soft_qmc_init(struct qmc *qmc, struct device_node *np)
1676 {
1677 	struct qe_firmware_info *qe_fw_info;
1678 	const struct qe_firmware *qe_fw;
1679 	const struct firmware *fw;
1680 	const char *filename;
1681 	int ret;
1682 
1683 	ret = of_property_read_string(np, "fsl,soft-qmc", &filename);
1684 	switch (ret) {
1685 	case 0:
1686 		break;
1687 	case -EINVAL:
1688 		/* fsl,soft-qmc property not set -> Simply do nothing */
1689 		return 0;
1690 	default:
1691 		dev_err(qmc->dev, "%pOF: failed to read fsl,soft-qmc\n",
1692 			np);
1693 		return ret;
1694 	}
1695 
1696 	qe_fw_info = qe_get_firmware_info();
1697 	if (qe_fw_info) {
1698 		if (!strstr(qe_fw_info->id, "Soft-QMC")) {
1699 			dev_err(qmc->dev, "Another Firmware is already loaded\n");
1700 			return -EALREADY;
1701 		}
1702 		dev_info(qmc->dev, "Firmware already loaded\n");
1703 		return 0;
1704 	}
1705 
1706 	dev_info(qmc->dev, "Using firmware %s\n", filename);
1707 
1708 	ret = request_firmware(&fw, filename, qmc->dev);
1709 	if (ret) {
1710 		dev_err(qmc->dev, "Failed to request firmware %s\n", filename);
1711 		return ret;
1712 	}
1713 
1714 	qe_fw = (const struct qe_firmware *)fw->data;
1715 
1716 	if (fw->size < sizeof(qe_fw->header) ||
1717 	    be32_to_cpu(qe_fw->header.length) != fw->size) {
1718 		dev_err(qmc->dev, "Invalid firmware %s\n", filename);
1719 		ret = -EINVAL;
1720 		goto end;
1721 	}
1722 
1723 	ret = qe_upload_firmware(qe_fw);
1724 	if (ret) {
1725 		dev_err(qmc->dev, "Failed to load firmware %s\n", filename);
1726 		goto end;
1727 	}
1728 
1729 	ret = 0;
1730 end:
1731 	release_firmware(fw);
1732 	return ret;
1733 }
1734 
1735 static int qmc_cpm1_init_resources(struct qmc *qmc, struct platform_device *pdev)
1736 {
1737 	struct resource *res;
1738 
1739 	qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "scc_regs");
1740 	if (IS_ERR(qmc->scc_regs))
1741 		return PTR_ERR(qmc->scc_regs);
1742 
1743 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scc_pram");
1744 	if (!res)
1745 		return -EINVAL;
1746 	qmc->scc_pram_offset = res->start - get_immrbase();
1747 	qmc->scc_pram = devm_ioremap_resource(qmc->dev, res);
1748 	if (IS_ERR(qmc->scc_pram))
1749 		return PTR_ERR(qmc->scc_pram);
1750 
1751 	qmc->dpram  = devm_platform_ioremap_resource_byname(pdev, "dpram");
1752 	if (IS_ERR(qmc->dpram))
1753 		return PTR_ERR(qmc->dpram);
1754 
1755 	return 0;
1756 }
1757 
1758 static int qmc_qe_init_resources(struct qmc *qmc, struct platform_device *pdev)
1759 {
1760 	struct resource *res;
1761 	int ucc_num;
1762 	s32 info;
1763 
1764 	qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "ucc_regs");
1765 	if (IS_ERR(qmc->scc_regs))
1766 		return PTR_ERR(qmc->scc_regs);
1767 
1768 	ucc_num = tsa_serial_get_num(qmc->tsa_serial);
1769 	if (ucc_num < 0)
1770 		return dev_err_probe(qmc->dev, ucc_num, "Failed to get UCC num\n");
1771 
1772 	qmc->qe_subblock = ucc_slow_get_qe_cr_subblock(ucc_num);
1773 	if (qmc->qe_subblock == QE_CR_SUBBLOCK_INVALID) {
1774 		dev_err(qmc->dev, "Unsupported ucc num %u\n", ucc_num);
1775 		return -EINVAL;
1776 	}
1777 	/* Allocate the 'Global Multichannel Parameters' and the
1778 	 * 'Framer parameters' areas. The 'Framer parameters' area
1779 	 * is located right after the 'Global Multichannel Parameters'.
1780 	 * The 'Framer parameters' need 1 byte per receive and transmit
1781 	 * channel. The maximum number of receive or transmit channel
1782 	 * is 64. So reserve 2 * 64 bytes for the 'Framer parameters'.
1783 	 */
1784 	info = devm_qe_muram_alloc(qmc->dev, UCC_SLOW_PRAM_SIZE + 2 * 64,
1785 				   ALIGNMENT_OF_UCC_SLOW_PRAM);
1786 	if (info < 0)
1787 		return info;
1788 
1789 	if (!qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, qmc->qe_subblock,
1790 			  QE_CR_PROTOCOL_UNSPECIFIED, info)) {
1791 		dev_err(qmc->dev, "QE_ASSIGN_PAGE_TO_DEVICE cmd failed");
1792 		return -EIO;
1793 	}
1794 	qmc->scc_pram = qe_muram_addr(info);
1795 	qmc->scc_pram_offset = info;
1796 
1797 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpram");
1798 	if (!res)
1799 		return -EINVAL;
1800 	qmc->dpram_offset = res->start - qe_muram_dma(qe_muram_addr(0));
1801 	qmc->dpram = devm_ioremap_resource(qmc->dev, res);
1802 	if (IS_ERR(qmc->scc_pram))
1803 		return PTR_ERR(qmc->scc_pram);
1804 
1805 	return 0;
1806 }
1807 
1808 static int qmc_init_resources(struct qmc *qmc, struct platform_device *pdev)
1809 {
1810 	return qmc_is_qe(qmc) ?
1811 		qmc_qe_init_resources(qmc, pdev) :
1812 		qmc_cpm1_init_resources(qmc, pdev);
1813 }
1814 
1815 static int qmc_cpm1_init_scc(struct qmc *qmc)
1816 {
1817 	u32 val;
1818 	int ret;
1819 
1820 	/* Connect the serial (SCC) to TSA */
1821 	ret = tsa_serial_connect(qmc->tsa_serial);
1822 	if (ret)
1823 		return dev_err_probe(qmc->dev, ret, "Failed to connect TSA serial\n");
1824 
1825 	/* Init GMSR_H and GMSR_L registers */
1826 	val = SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP;
1827 	qmc_write32(qmc->scc_regs + SCC_GSMRH, val);
1828 
1829 	/* enable QMC mode */
1830 	qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_CPM1_GSMRL_MODE_QMC);
1831 
1832 	/* Disable and clear interrupts */
1833 	qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
1834 	qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
1835 
1836 	return 0;
1837 }
1838 
1839 static int qmc_qe_init_ucc(struct qmc *qmc)
1840 {
1841 	u32 val;
1842 	int ret;
1843 
1844 	/* Set the UCC in slow mode */
1845 	qmc_write8(qmc->scc_regs + SCC_QE_UCC_GUEMR,
1846 		   UCC_GUEMR_SET_RESERVED3 | UCC_GUEMR_MODE_SLOW_RX | UCC_GUEMR_MODE_SLOW_TX);
1847 
1848 	/* Connect the serial (UCC) to TSA */
1849 	ret = tsa_serial_connect(qmc->tsa_serial);
1850 	if (ret)
1851 		return dev_err_probe(qmc->dev, ret, "Failed to connect TSA serial\n");
1852 
1853 	/* Initialize the QMC tx startup addresses */
1854 	if (!qe_issue_cmd(QE_PUSHSCHED, qmc->qe_subblock,
1855 			  QE_CR_PROTOCOL_UNSPECIFIED, 0x80)) {
1856 		dev_err(qmc->dev, "QE_CMD_PUSH_SCHED tx cmd failed");
1857 		ret = -EIO;
1858 		goto err_tsa_serial_disconnect;
1859 	}
1860 
1861 	/* Initialize the QMC rx startup addresses */
1862 	if (!qe_issue_cmd(QE_PUSHSCHED, qmc->qe_subblock | 0x00020000,
1863 			  QE_CR_PROTOCOL_UNSPECIFIED, 0x82)) {
1864 		dev_err(qmc->dev, "QE_CMD_PUSH_SCHED rx cmd failed");
1865 		ret = -EIO;
1866 		goto err_tsa_serial_disconnect;
1867 	}
1868 
1869 	/* Re-init RXPTR and TXPTR with the content of RX_S_PTR and
1870 	 * TX_S_PTR (RX_S_PTR and TX_S_PTR are initialized during
1871 	 * qmc_setup_tsa() call
1872 	 */
1873 	val = qmc_read16(qmc->scc_pram + QMC_GBL_RX_S_PTR);
1874 	qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
1875 	val = qmc_read16(qmc->scc_pram + QMC_GBL_TX_S_PTR);
1876 	qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
1877 
1878 	/* Init GUMR_H and GUMR_L registers (SCC GSMR_H and GSMR_L) */
1879 	val = SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP |
1880 	      SCC_GSMRH_TRX | SCC_GSMRH_TTX;
1881 	qmc_write32(qmc->scc_regs + SCC_GSMRH, val);
1882 
1883 	/* enable QMC mode */
1884 	qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_QE_GSMRL_MODE_QMC);
1885 
1886 	/* Disable and clear interrupts */
1887 	qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
1888 	qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
1889 
1890 	return 0;
1891 
1892 err_tsa_serial_disconnect:
1893 	tsa_serial_disconnect(qmc->tsa_serial);
1894 	return ret;
1895 }
1896 
1897 static int qmc_init_xcc(struct qmc *qmc)
1898 {
1899 	return qmc_is_qe(qmc) ?
1900 		qmc_qe_init_ucc(qmc) :
1901 		qmc_cpm1_init_scc(qmc);
1902 }
1903 
1904 static void qmc_exit_xcc(struct qmc *qmc)
1905 {
1906 	/* Disconnect the serial from TSA */
1907 	tsa_serial_disconnect(qmc->tsa_serial);
1908 }
1909 
1910 static int qmc_probe(struct platform_device *pdev)
1911 {
1912 	struct device_node *np = pdev->dev.of_node;
1913 	unsigned int nb_chans;
1914 	struct qmc *qmc;
1915 	int irq;
1916 	int ret;
1917 
1918 	qmc = devm_kzalloc(&pdev->dev, sizeof(*qmc), GFP_KERNEL);
1919 	if (!qmc)
1920 		return -ENOMEM;
1921 
1922 	qmc->dev = &pdev->dev;
1923 	qmc->data = of_device_get_match_data(&pdev->dev);
1924 	if (!qmc->data) {
1925 		dev_err(qmc->dev, "Missing match data\n");
1926 		return -EINVAL;
1927 	}
1928 	INIT_LIST_HEAD(&qmc->chan_head);
1929 
1930 	qmc->tsa_serial = devm_tsa_serial_get_byphandle(qmc->dev, np, "fsl,tsa-serial");
1931 	if (IS_ERR(qmc->tsa_serial)) {
1932 		return dev_err_probe(qmc->dev, PTR_ERR(qmc->tsa_serial),
1933 				     "Failed to get TSA serial\n");
1934 	}
1935 
1936 	ret = qmc_init_resources(qmc, pdev);
1937 	if (ret)
1938 		return ret;
1939 
1940 	if (qmc_is_qe(qmc)) {
1941 		ret = qmc_qe_soft_qmc_init(qmc, np);
1942 		if (ret)
1943 			return ret;
1944 	}
1945 
1946 	/* Parse channels informationss */
1947 	ret = qmc_of_parse_chans(qmc, np);
1948 	if (ret)
1949 		return ret;
1950 
1951 	nb_chans = qmc_nb_chans(qmc);
1952 
1953 	/*
1954 	 * Allocate the buffer descriptor table
1955 	 * 8 rx and 8 tx descriptors per channel
1956 	 */
1957 	qmc->bd_size = (nb_chans * (QMC_NB_TXBDS + QMC_NB_RXBDS)) * sizeof(cbd_t);
1958 	qmc->bd_table = dmam_alloc_coherent(qmc->dev, qmc->bd_size,
1959 					    &qmc->bd_dma_addr, GFP_KERNEL);
1960 	if (!qmc->bd_table) {
1961 		dev_err(qmc->dev, "Failed to allocate bd table\n");
1962 		return -ENOMEM;
1963 	}
1964 	memset(qmc->bd_table, 0, qmc->bd_size);
1965 
1966 	qmc_write32(qmc->scc_pram + QMC_GBL_MCBASE, qmc->bd_dma_addr);
1967 
1968 	/* Allocate the interrupt table */
1969 	qmc->int_size = QMC_NB_INTS * sizeof(u16);
1970 	qmc->int_table = dmam_alloc_coherent(qmc->dev, qmc->int_size,
1971 					     &qmc->int_dma_addr, GFP_KERNEL);
1972 	if (!qmc->int_table) {
1973 		dev_err(qmc->dev, "Failed to allocate interrupt table\n");
1974 		return -ENOMEM;
1975 	}
1976 	memset(qmc->int_table, 0, qmc->int_size);
1977 
1978 	qmc->int_curr = qmc->int_table;
1979 	qmc_write32(qmc->scc_pram + QMC_GBL_INTBASE, qmc->int_dma_addr);
1980 	qmc_write32(qmc->scc_pram + QMC_GBL_INTPTR, qmc->int_dma_addr);
1981 
1982 	/* Set MRBLR (valid for HDLC only) max MRU + max CRC */
1983 	qmc_write16(qmc->scc_pram + QMC_GBL_MRBLR, HDLC_MAX_MRU + 4);
1984 
1985 	qmc_write16(qmc->scc_pram + QMC_GBL_GRFTHR, 1);
1986 	qmc_write16(qmc->scc_pram + QMC_GBL_GRFCNT, 1);
1987 
1988 	qmc_write32(qmc->scc_pram + QMC_GBL_C_MASK32, 0xDEBB20E3);
1989 	qmc_write16(qmc->scc_pram + QMC_GBL_C_MASK16, 0xF0B8);
1990 
1991 	if (qmc_is_qe(qmc)) {
1992 		/* Zeroed the reserved area */
1993 		memset_io(qmc->scc_pram + QMC_QE_GBL_RSV_B0_START, 0,
1994 			  QMC_QE_GBL_RSV_B0_SIZE);
1995 
1996 		qmc_write32(qmc->scc_pram + QMC_QE_GBL_GCSBASE, qmc->dpram_offset);
1997 
1998 		/* Init 'framer parameters' area and set the base addresses */
1999 		memset_io(qmc->scc_pram + UCC_SLOW_PRAM_SIZE, 0x01, 64);
2000 		memset_io(qmc->scc_pram + UCC_SLOW_PRAM_SIZE + 64, 0x01, 64);
2001 		qmc_write16(qmc->scc_pram + QMC_QE_GBL_RX_FRM_BASE,
2002 			    qmc->scc_pram_offset + UCC_SLOW_PRAM_SIZE);
2003 		qmc_write16(qmc->scc_pram + QMC_QE_GBL_TX_FRM_BASE,
2004 			    qmc->scc_pram_offset + UCC_SLOW_PRAM_SIZE + 64);
2005 	}
2006 
2007 	ret = qmc_init_tsa(qmc);
2008 	if (ret)
2009 		return ret;
2010 
2011 	qmc_write16(qmc->scc_pram + QMC_GBL_QMCSTATE, 0x8000);
2012 
2013 	ret = qmc_setup_chans(qmc);
2014 	if (ret)
2015 		return ret;
2016 
2017 	/* Init interrupts table */
2018 	ret = qmc_setup_ints(qmc);
2019 	if (ret)
2020 		return ret;
2021 
2022 	/* Init SCC (CPM1) or UCC (QE) */
2023 	ret = qmc_init_xcc(qmc);
2024 	if (ret)
2025 		return ret;
2026 
2027 	/* Set the irq handler */
2028 	irq = platform_get_irq(pdev, 0);
2029 	if (irq < 0) {
2030 		ret = irq;
2031 		goto err_exit_xcc;
2032 	}
2033 	ret = devm_request_irq(qmc->dev, irq, qmc_irq_handler, 0, "qmc", qmc);
2034 	if (ret < 0)
2035 		goto err_exit_xcc;
2036 
2037 	/* Enable interrupts */
2038 	qmc_write16(qmc->scc_regs + SCC_SCCM,
2039 		    SCC_SCCE_IQOV | SCC_SCCE_GINT | SCC_SCCE_GUN | SCC_SCCE_GOV);
2040 
2041 	ret = qmc_finalize_chans(qmc);
2042 	if (ret < 0)
2043 		goto err_disable_intr;
2044 
2045 	/* Enable transmitter and receiver */
2046 	qmc_setbits32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
2047 
2048 	platform_set_drvdata(pdev, qmc);
2049 
2050 	/* Populate channel related devices */
2051 	ret = devm_of_platform_populate(qmc->dev);
2052 	if (ret)
2053 		goto err_disable_txrx;
2054 
2055 	return 0;
2056 
2057 err_disable_txrx:
2058 	qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
2059 
2060 err_disable_intr:
2061 	qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
2062 
2063 err_exit_xcc:
2064 	qmc_exit_xcc(qmc);
2065 	return ret;
2066 }
2067 
2068 static void qmc_remove(struct platform_device *pdev)
2069 {
2070 	struct qmc *qmc = platform_get_drvdata(pdev);
2071 
2072 	/* Disable transmitter and receiver */
2073 	qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
2074 
2075 	/* Disable interrupts */
2076 	qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
2077 
2078 	/* Exit SCC (CPM1) or UCC (QE) */
2079 	qmc_exit_xcc(qmc);
2080 }
2081 
2082 static const struct qmc_data qmc_data_cpm1 __maybe_unused = {
2083 	.version = QMC_CPM1,
2084 	.tstate = 0x30000000,
2085 	.rstate = 0x31000000,
2086 	.zistate = 0x00000100,
2087 	.zdstate_hdlc = 0x00000080,
2088 	.zdstate_transp = 0x18000080,
2089 	.rpack = 0x00000000,
2090 };
2091 
2092 static const struct qmc_data qmc_data_qe __maybe_unused = {
2093 	.version = QMC_QE,
2094 	.tstate = 0x30000000,
2095 	.rstate = 0x30000000,
2096 	.zistate = 0x00000200,
2097 	.zdstate_hdlc = 0x80FFFFE0,
2098 	.zdstate_transp = 0x003FFFE2,
2099 	.rpack = 0x80000000,
2100 };
2101 
2102 static const struct of_device_id qmc_id_table[] = {
2103 #if IS_ENABLED(CONFIG_CPM1)
2104 	{ .compatible = "fsl,cpm1-scc-qmc", .data = &qmc_data_cpm1 },
2105 #endif
2106 #if IS_ENABLED(CONFIG_QUICC_ENGINE)
2107 	{ .compatible = "fsl,qe-ucc-qmc", .data = &qmc_data_qe },
2108 #endif
2109 	{} /* sentinel */
2110 };
2111 MODULE_DEVICE_TABLE(of, qmc_id_table);
2112 
2113 static struct platform_driver qmc_driver = {
2114 	.driver = {
2115 		.name = "fsl-qmc",
2116 		.of_match_table = of_match_ptr(qmc_id_table),
2117 	},
2118 	.probe = qmc_probe,
2119 	.remove = qmc_remove,
2120 };
2121 module_platform_driver(qmc_driver);
2122 
2123 static struct qmc_chan *qmc_chan_get_from_qmc(struct device_node *qmc_np, unsigned int chan_index)
2124 {
2125 	struct platform_device *pdev;
2126 	struct qmc_chan *qmc_chan;
2127 	struct qmc *qmc;
2128 
2129 	if (!of_match_node(qmc_driver.driver.of_match_table, qmc_np))
2130 		return ERR_PTR(-EINVAL);
2131 
2132 	pdev = of_find_device_by_node(qmc_np);
2133 	if (!pdev)
2134 		return ERR_PTR(-ENODEV);
2135 
2136 	qmc = platform_get_drvdata(pdev);
2137 	if (!qmc) {
2138 		platform_device_put(pdev);
2139 		return ERR_PTR(-EPROBE_DEFER);
2140 	}
2141 
2142 	if (chan_index >= ARRAY_SIZE(qmc->chans)) {
2143 		platform_device_put(pdev);
2144 		return ERR_PTR(-EINVAL);
2145 	}
2146 
2147 	qmc_chan = qmc->chans[chan_index];
2148 	if (!qmc_chan) {
2149 		platform_device_put(pdev);
2150 		return ERR_PTR(-ENOENT);
2151 	}
2152 
2153 	return qmc_chan;
2154 }
2155 
2156 int qmc_chan_count_phandles(struct device_node *np, const char *phandles_name)
2157 {
2158 	int count;
2159 
2160 	/* phandles are fixed args phandles with one arg */
2161 	count = of_count_phandle_with_args(np, phandles_name, NULL);
2162 	if (count < 0)
2163 		return count;
2164 
2165 	return count / 2;
2166 }
2167 EXPORT_SYMBOL(qmc_chan_count_phandles);
2168 
2169 struct qmc_chan *qmc_chan_get_byphandles_index(struct device_node *np,
2170 					       const char *phandles_name,
2171 					       int index)
2172 {
2173 	struct of_phandle_args out_args;
2174 	struct qmc_chan *qmc_chan;
2175 	int ret;
2176 
2177 	ret = of_parse_phandle_with_fixed_args(np, phandles_name, 1, index,
2178 					       &out_args);
2179 	if (ret < 0)
2180 		return ERR_PTR(ret);
2181 
2182 	if (out_args.args_count != 1) {
2183 		of_node_put(out_args.np);
2184 		return ERR_PTR(-EINVAL);
2185 	}
2186 
2187 	qmc_chan = qmc_chan_get_from_qmc(out_args.np, out_args.args[0]);
2188 	of_node_put(out_args.np);
2189 	return qmc_chan;
2190 }
2191 EXPORT_SYMBOL(qmc_chan_get_byphandles_index);
2192 
2193 struct qmc_chan *qmc_chan_get_bychild(struct device_node *np)
2194 {
2195 	struct device_node *qmc_np;
2196 	u32 chan_index;
2197 	int ret;
2198 
2199 	qmc_np = np->parent;
2200 	ret = of_property_read_u32(np, "reg", &chan_index);
2201 	if (ret)
2202 		return ERR_PTR(-EINVAL);
2203 
2204 	return qmc_chan_get_from_qmc(qmc_np, chan_index);
2205 }
2206 EXPORT_SYMBOL(qmc_chan_get_bychild);
2207 
2208 void qmc_chan_put(struct qmc_chan *chan)
2209 {
2210 	put_device(chan->qmc->dev);
2211 }
2212 EXPORT_SYMBOL(qmc_chan_put);
2213 
2214 static void devm_qmc_chan_release(struct device *dev, void *res)
2215 {
2216 	struct qmc_chan **qmc_chan = res;
2217 
2218 	qmc_chan_put(*qmc_chan);
2219 }
2220 
2221 struct qmc_chan *devm_qmc_chan_get_byphandles_index(struct device *dev,
2222 						    struct device_node *np,
2223 						    const char *phandles_name,
2224 						    int index)
2225 {
2226 	struct qmc_chan *qmc_chan;
2227 	struct qmc_chan **dr;
2228 
2229 	dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
2230 	if (!dr)
2231 		return ERR_PTR(-ENOMEM);
2232 
2233 	qmc_chan = qmc_chan_get_byphandles_index(np, phandles_name, index);
2234 	if (!IS_ERR(qmc_chan)) {
2235 		*dr = qmc_chan;
2236 		devres_add(dev, dr);
2237 	} else {
2238 		devres_free(dr);
2239 	}
2240 
2241 	return qmc_chan;
2242 }
2243 EXPORT_SYMBOL(devm_qmc_chan_get_byphandles_index);
2244 
2245 struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev,
2246 					   struct device_node *np)
2247 {
2248 	struct qmc_chan *qmc_chan;
2249 	struct qmc_chan **dr;
2250 
2251 	dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
2252 	if (!dr)
2253 		return ERR_PTR(-ENOMEM);
2254 
2255 	qmc_chan = qmc_chan_get_bychild(np);
2256 	if (!IS_ERR(qmc_chan)) {
2257 		*dr = qmc_chan;
2258 		devres_add(dev, dr);
2259 	} else {
2260 		devres_free(dr);
2261 	}
2262 
2263 	return qmc_chan;
2264 }
2265 EXPORT_SYMBOL(devm_qmc_chan_get_bychild);
2266 
2267 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
2268 MODULE_DESCRIPTION("CPM/QE QMC driver");
2269 MODULE_LICENSE("GPL");
2270