1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * QMC driver
4 *
5 * Copyright 2022 CS GROUP France
6 *
7 * Author: Herve Codina <herve.codina@bootlin.com>
8 */
9
10 #include <soc/fsl/qe/qmc.h>
11 #include <linux/bitfield.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/firmware.h>
14 #include <linux/hdlc.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_platform.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22 #include <soc/fsl/cpm.h>
23 #include <soc/fsl/qe/ucc_slow.h>
24 #include <soc/fsl/qe/qe.h>
25 #include <sysdev/fsl_soc.h>
26 #include "tsa.h"
27
28 /* SCC general mode register low (32 bits) (GUMR_L in QE) */
29 #define SCC_GSMRL 0x00
30 #define SCC_GSMRL_ENR BIT(5)
31 #define SCC_GSMRL_ENT BIT(4)
32 #define SCC_GSMRL_MODE_MASK GENMASK(3, 0)
33 #define SCC_CPM1_GSMRL_MODE_QMC FIELD_PREP_CONST(SCC_GSMRL_MODE_MASK, 0x0A)
34 #define SCC_QE_GSMRL_MODE_QMC FIELD_PREP_CONST(SCC_GSMRL_MODE_MASK, 0x02)
35
36 /* SCC general mode register high (32 bits) (identical to GUMR_H in QE) */
37 #define SCC_GSMRH 0x04
38 #define SCC_GSMRH_CTSS BIT(7)
39 #define SCC_GSMRH_CDS BIT(8)
40 #define SCC_GSMRH_CTSP BIT(9)
41 #define SCC_GSMRH_CDP BIT(10)
42 #define SCC_GSMRH_TTX BIT(11)
43 #define SCC_GSMRH_TRX BIT(12)
44
45 /* SCC event register (16 bits) (identical to UCCE in QE) */
46 #define SCC_SCCE 0x10
47 #define SCC_SCCE_IQOV BIT(3)
48 #define SCC_SCCE_GINT BIT(2)
49 #define SCC_SCCE_GUN BIT(1)
50 #define SCC_SCCE_GOV BIT(0)
51
52 /* SCC mask register (16 bits) */
53 #define SCC_SCCM 0x14
54
55 /* UCC Extended Mode Register (8 bits, QE only) */
56 #define SCC_QE_UCC_GUEMR 0x90
57
58 /* Multichannel base pointer (32 bits) */
59 #define QMC_GBL_MCBASE 0x00
60 /* Multichannel controller state (16 bits) */
61 #define QMC_GBL_QMCSTATE 0x04
62 /* Maximum receive buffer length (16 bits) */
63 #define QMC_GBL_MRBLR 0x06
64 /* Tx time-slot assignment table pointer (16 bits) */
65 #define QMC_GBL_TX_S_PTR 0x08
66 /* Rx pointer (16 bits) */
67 #define QMC_GBL_RXPTR 0x0A
68 /* Global receive frame threshold (16 bits) */
69 #define QMC_GBL_GRFTHR 0x0C
70 /* Global receive frame count (16 bits) */
71 #define QMC_GBL_GRFCNT 0x0E
72 /* Multichannel interrupt base address (32 bits) */
73 #define QMC_GBL_INTBASE 0x10
74 /* Multichannel interrupt pointer (32 bits) */
75 #define QMC_GBL_INTPTR 0x14
76 /* Rx time-slot assignment table pointer (16 bits) */
77 #define QMC_GBL_RX_S_PTR 0x18
78 /* Tx pointer (16 bits) */
79 #define QMC_GBL_TXPTR 0x1A
80 /* CRC constant (32 bits) */
81 #define QMC_GBL_C_MASK32 0x1C
82 /* Time slot assignment table Rx (32 x 16 bits) */
83 #define QMC_GBL_TSATRX 0x20
84 /* Time slot assignment table Tx (32 x 16 bits) */
85 #define QMC_GBL_TSATTX 0x60
86 /* CRC constant (16 bits) */
87 #define QMC_GBL_C_MASK16 0xA0
88 /* Rx framer base pointer (16 bits, QE only) */
89 #define QMC_QE_GBL_RX_FRM_BASE 0xAC
90 /* Tx framer base pointer (16 bits, QE only) */
91 #define QMC_QE_GBL_TX_FRM_BASE 0xAE
92 /* A reserved area (0xB0 -> 0xC3) that must be initialized to 0 (QE only) */
93 #define QMC_QE_GBL_RSV_B0_START 0xB0
94 #define QMC_QE_GBL_RSV_B0_SIZE 0x14
95 /* QMC Global Channel specific base (32 bits, QE only) */
96 #define QMC_QE_GBL_GCSBASE 0xC4
97
98 /* TSA entry (16bit entry in TSATRX and TSATTX) */
99 #define QMC_TSA_VALID BIT(15)
100 #define QMC_TSA_WRAP BIT(14)
101 #define QMC_TSA_MASK_MASKH GENMASK(13, 12)
102 #define QMC_TSA_MASK_MASKL GENMASK(5, 0)
103 #define QMC_TSA_MASK_8BIT (FIELD_PREP_CONST(QMC_TSA_MASK_MASKH, 0x3) | \
104 FIELD_PREP_CONST(QMC_TSA_MASK_MASKL, 0x3F))
105 #define QMC_TSA_CHANNEL_MASK GENMASK(11, 6)
106 #define QMC_TSA_CHANNEL(x) FIELD_PREP(QMC_TSA_CHANNEL_MASK, x)
107
108 /* Tx buffer descriptor base address (16 bits, offset from MCBASE) */
109 #define QMC_SPE_TBASE 0x00
110
111 /* Channel mode register (16 bits) */
112 #define QMC_SPE_CHAMR 0x02
113 #define QMC_SPE_CHAMR_MODE_MASK GENMASK(15, 15)
114 #define QMC_SPE_CHAMR_MODE_HDLC FIELD_PREP_CONST(QMC_SPE_CHAMR_MODE_MASK, 1)
115 #define QMC_SPE_CHAMR_MODE_TRANSP (FIELD_PREP_CONST(QMC_SPE_CHAMR_MODE_MASK, 0) | BIT(13))
116 #define QMC_SPE_CHAMR_ENT BIT(12)
117 #define QMC_SPE_CHAMR_POL BIT(8)
118 #define QMC_SPE_CHAMR_HDLC_IDLM BIT(13)
119 #define QMC_SPE_CHAMR_HDLC_CRC BIT(7)
120 #define QMC_SPE_CHAMR_HDLC_NOF_MASK GENMASK(3, 0)
121 #define QMC_SPE_CHAMR_HDLC_NOF(x) FIELD_PREP(QMC_SPE_CHAMR_HDLC_NOF_MASK, x)
122 #define QMC_SPE_CHAMR_TRANSP_RD BIT(14)
123 #define QMC_SPE_CHAMR_TRANSP_SYNC BIT(10)
124
125 /* Tx internal state (32 bits) */
126 #define QMC_SPE_TSTATE 0x04
127 /* Tx buffer descriptor pointer (16 bits) */
128 #define QMC_SPE_TBPTR 0x0C
129 /* Zero-insertion state (32 bits) */
130 #define QMC_SPE_ZISTATE 0x14
131 /* Channel’s interrupt mask flags (16 bits) */
132 #define QMC_SPE_INTMSK 0x1C
133 /* Rx buffer descriptor base address (16 bits, offset from MCBASE) */
134 #define QMC_SPE_RBASE 0x20
135 /* HDLC: Maximum frame length register (16 bits) */
136 #define QMC_SPE_MFLR 0x22
137 /* TRANSPARENT: Transparent maximum receive length (16 bits) */
138 #define QMC_SPE_TMRBLR 0x22
139 /* Rx internal state (32 bits) */
140 #define QMC_SPE_RSTATE 0x24
141 /* Rx buffer descriptor pointer (16 bits) */
142 #define QMC_SPE_RBPTR 0x2C
143 /* Packs 4 bytes to 1 long word before writing to buffer (32 bits) */
144 #define QMC_SPE_RPACK 0x30
145 /* Zero deletion state (32 bits) */
146 #define QMC_SPE_ZDSTATE 0x34
147
148 /* Transparent synchronization (16 bits) */
149 #define QMC_SPE_TRNSYNC 0x3C
150 #define QMC_SPE_TRNSYNC_RX_MASK GENMASK(15, 8)
151 #define QMC_SPE_TRNSYNC_RX(x) FIELD_PREP(QMC_SPE_TRNSYNC_RX_MASK, x)
152 #define QMC_SPE_TRNSYNC_TX_MASK GENMASK(7, 0)
153 #define QMC_SPE_TRNSYNC_TX(x) FIELD_PREP(QMC_SPE_TRNSYNC_TX_MASK, x)
154
155 /* Interrupt related registers bits */
156 #define QMC_INT_V BIT(15)
157 #define QMC_INT_W BIT(14)
158 #define QMC_INT_NID BIT(13)
159 #define QMC_INT_IDL BIT(12)
160 #define QMC_INT_CHANNEL_MASK GENMASK(11, 6)
161 #define QMC_INT_GET_CHANNEL(x) FIELD_GET(QMC_INT_CHANNEL_MASK, x)
162 #define QMC_INT_MRF BIT(5)
163 #define QMC_INT_UN BIT(4)
164 #define QMC_INT_RXF BIT(3)
165 #define QMC_INT_BSY BIT(2)
166 #define QMC_INT_TXB BIT(1)
167 #define QMC_INT_RXB BIT(0)
168
169 /* BD related registers bits */
170 #define QMC_BD_RX_E BIT(15)
171 #define QMC_BD_RX_W BIT(13)
172 #define QMC_BD_RX_I BIT(12)
173 #define QMC_BD_RX_L BIT(11)
174 #define QMC_BD_RX_F BIT(10)
175 #define QMC_BD_RX_CM BIT(9)
176 #define QMC_BD_RX_UB BIT(7)
177 #define QMC_BD_RX_LG BIT(5)
178 #define QMC_BD_RX_NO BIT(4)
179 #define QMC_BD_RX_AB BIT(3)
180 #define QMC_BD_RX_CR BIT(2)
181
182 #define QMC_BD_TX_R BIT(15)
183 #define QMC_BD_TX_W BIT(13)
184 #define QMC_BD_TX_I BIT(12)
185 #define QMC_BD_TX_L BIT(11)
186 #define QMC_BD_TX_TC BIT(10)
187 #define QMC_BD_TX_CM BIT(9)
188 #define QMC_BD_TX_UB BIT(7)
189 #define QMC_BD_TX_PAD_MASK GENMASK(3, 0)
190 #define QMC_BD_TX_PAD(x) FIELD_PREP(QMC_BD_TX_PAD_MASK, x)
191
192 /* Numbers of BDs and interrupt items */
193 #define QMC_NB_TXBDS 8
194 #define QMC_NB_RXBDS 8
195 #define QMC_NB_INTS 128
196
197 struct qmc_xfer_desc {
198 union {
199 void (*tx_complete)(void *context);
200 void (*rx_complete)(void *context, size_t length, unsigned int flags);
201 };
202 void *context;
203 };
204
205 struct qmc_chan {
206 struct list_head list;
207 unsigned int id;
208 struct qmc *qmc;
209 void __iomem *s_param;
210 enum qmc_mode mode;
211 spinlock_t ts_lock; /* Protect timeslots */
212 u64 tx_ts_mask_avail;
213 u64 tx_ts_mask;
214 u64 rx_ts_mask_avail;
215 u64 rx_ts_mask;
216 bool is_reverse_data;
217
218 spinlock_t tx_lock; /* Protect Tx related data */
219 cbd_t __iomem *txbds;
220 cbd_t __iomem *txbd_free;
221 cbd_t __iomem *txbd_done;
222 struct qmc_xfer_desc tx_desc[QMC_NB_TXBDS];
223 u64 nb_tx_underrun;
224 bool is_tx_stopped;
225
226 spinlock_t rx_lock; /* Protect Rx related data */
227 cbd_t __iomem *rxbds;
228 cbd_t __iomem *rxbd_free;
229 cbd_t __iomem *rxbd_done;
230 struct qmc_xfer_desc rx_desc[QMC_NB_RXBDS];
231 u64 nb_rx_busy;
232 int rx_pending;
233 bool is_rx_halted;
234 bool is_rx_stopped;
235 };
236
237 enum qmc_version {
238 QMC_CPM1,
239 QMC_QE,
240 };
241
242 struct qmc_data {
243 enum qmc_version version;
244 u32 tstate; /* Initial TSTATE value */
245 u32 rstate; /* Initial RSTATE value */
246 u32 zistate; /* Initial ZISTATE value */
247 u32 zdstate_hdlc; /* Initial ZDSTATE value (HDLC mode) */
248 u32 zdstate_transp; /* Initial ZDSTATE value (Transparent mode) */
249 u32 rpack; /* Initial RPACK value */
250 };
251
252 struct qmc {
253 struct device *dev;
254 const struct qmc_data *data;
255 struct tsa_serial *tsa_serial;
256 void __iomem *scc_regs;
257 void __iomem *scc_pram;
258 void __iomem *dpram;
259 u16 scc_pram_offset;
260 u32 dpram_offset;
261 u32 qe_subblock;
262 cbd_t __iomem *bd_table;
263 dma_addr_t bd_dma_addr;
264 size_t bd_size;
265 u16 __iomem *int_table;
266 u16 __iomem *int_curr;
267 dma_addr_t int_dma_addr;
268 size_t int_size;
269 bool is_tsa_64rxtx;
270 struct list_head chan_head;
271 struct qmc_chan *chans[64];
272 };
273
qmc_write8(void __iomem * addr,u8 val)274 static void qmc_write8(void __iomem *addr, u8 val)
275 {
276 iowrite8(val, addr);
277 }
278
qmc_write16(void __iomem * addr,u16 val)279 static void qmc_write16(void __iomem *addr, u16 val)
280 {
281 iowrite16be(val, addr);
282 }
283
qmc_read16(void __iomem * addr)284 static u16 qmc_read16(void __iomem *addr)
285 {
286 return ioread16be(addr);
287 }
288
qmc_setbits16(void __iomem * addr,u16 set)289 static void qmc_setbits16(void __iomem *addr, u16 set)
290 {
291 qmc_write16(addr, qmc_read16(addr) | set);
292 }
293
qmc_clrbits16(void __iomem * addr,u16 clr)294 static void qmc_clrbits16(void __iomem *addr, u16 clr)
295 {
296 qmc_write16(addr, qmc_read16(addr) & ~clr);
297 }
298
qmc_clrsetbits16(void __iomem * addr,u16 clr,u16 set)299 static void qmc_clrsetbits16(void __iomem *addr, u16 clr, u16 set)
300 {
301 qmc_write16(addr, (qmc_read16(addr) & ~clr) | set);
302 }
303
qmc_write32(void __iomem * addr,u32 val)304 static void qmc_write32(void __iomem *addr, u32 val)
305 {
306 iowrite32be(val, addr);
307 }
308
qmc_read32(void __iomem * addr)309 static u32 qmc_read32(void __iomem *addr)
310 {
311 return ioread32be(addr);
312 }
313
qmc_setbits32(void __iomem * addr,u32 set)314 static void qmc_setbits32(void __iomem *addr, u32 set)
315 {
316 qmc_write32(addr, qmc_read32(addr) | set);
317 }
318
qmc_is_qe(const struct qmc * qmc)319 static bool qmc_is_qe(const struct qmc *qmc)
320 {
321 if (IS_ENABLED(CONFIG_QUICC_ENGINE) && IS_ENABLED(CONFIG_CPM))
322 return qmc->data->version == QMC_QE;
323
324 return IS_ENABLED(CONFIG_QUICC_ENGINE);
325 }
326
qmc_chan_get_info(struct qmc_chan * chan,struct qmc_chan_info * info)327 int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
328 {
329 struct tsa_serial_info tsa_info;
330 unsigned long flags;
331 int ret;
332
333 /* Retrieve info from the TSA related serial */
334 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &tsa_info);
335 if (ret)
336 return ret;
337
338 spin_lock_irqsave(&chan->ts_lock, flags);
339
340 info->mode = chan->mode;
341 info->rx_fs_rate = tsa_info.rx_fs_rate;
342 info->rx_bit_rate = tsa_info.rx_bit_rate;
343 info->nb_tx_ts = hweight64(chan->tx_ts_mask);
344 info->tx_fs_rate = tsa_info.tx_fs_rate;
345 info->tx_bit_rate = tsa_info.tx_bit_rate;
346 info->nb_rx_ts = hweight64(chan->rx_ts_mask);
347
348 spin_unlock_irqrestore(&chan->ts_lock, flags);
349
350 return 0;
351 }
352 EXPORT_SYMBOL(qmc_chan_get_info);
353
qmc_chan_get_ts_info(struct qmc_chan * chan,struct qmc_chan_ts_info * ts_info)354 int qmc_chan_get_ts_info(struct qmc_chan *chan, struct qmc_chan_ts_info *ts_info)
355 {
356 unsigned long flags;
357
358 spin_lock_irqsave(&chan->ts_lock, flags);
359
360 ts_info->rx_ts_mask_avail = chan->rx_ts_mask_avail;
361 ts_info->tx_ts_mask_avail = chan->tx_ts_mask_avail;
362 ts_info->rx_ts_mask = chan->rx_ts_mask;
363 ts_info->tx_ts_mask = chan->tx_ts_mask;
364
365 spin_unlock_irqrestore(&chan->ts_lock, flags);
366
367 return 0;
368 }
369 EXPORT_SYMBOL(qmc_chan_get_ts_info);
370
qmc_chan_set_ts_info(struct qmc_chan * chan,const struct qmc_chan_ts_info * ts_info)371 int qmc_chan_set_ts_info(struct qmc_chan *chan, const struct qmc_chan_ts_info *ts_info)
372 {
373 unsigned long flags;
374 int ret;
375
376 /* Only a subset of available timeslots is allowed */
377 if ((ts_info->rx_ts_mask & chan->rx_ts_mask_avail) != ts_info->rx_ts_mask)
378 return -EINVAL;
379 if ((ts_info->tx_ts_mask & chan->tx_ts_mask_avail) != ts_info->tx_ts_mask)
380 return -EINVAL;
381
382 /* In case of common rx/tx table, rx/tx masks must be identical */
383 if (chan->qmc->is_tsa_64rxtx) {
384 if (ts_info->rx_ts_mask != ts_info->tx_ts_mask)
385 return -EINVAL;
386 }
387
388 spin_lock_irqsave(&chan->ts_lock, flags);
389
390 if ((chan->tx_ts_mask != ts_info->tx_ts_mask && !chan->is_tx_stopped) ||
391 (chan->rx_ts_mask != ts_info->rx_ts_mask && !chan->is_rx_stopped)) {
392 dev_err(chan->qmc->dev, "Channel rx and/or tx not stopped\n");
393 ret = -EBUSY;
394 } else {
395 chan->tx_ts_mask = ts_info->tx_ts_mask;
396 chan->rx_ts_mask = ts_info->rx_ts_mask;
397 ret = 0;
398 }
399 spin_unlock_irqrestore(&chan->ts_lock, flags);
400
401 return ret;
402 }
403 EXPORT_SYMBOL(qmc_chan_set_ts_info);
404
qmc_chan_set_param(struct qmc_chan * chan,const struct qmc_chan_param * param)405 int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param)
406 {
407 if (param->mode != chan->mode)
408 return -EINVAL;
409
410 switch (param->mode) {
411 case QMC_HDLC:
412 if (param->hdlc.max_rx_buf_size % 4 ||
413 param->hdlc.max_rx_buf_size < 8)
414 return -EINVAL;
415
416 qmc_write16(chan->qmc->scc_pram + QMC_GBL_MRBLR,
417 param->hdlc.max_rx_buf_size - 8);
418 qmc_write16(chan->s_param + QMC_SPE_MFLR,
419 param->hdlc.max_rx_frame_size);
420 if (param->hdlc.is_crc32) {
421 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR,
422 QMC_SPE_CHAMR_HDLC_CRC);
423 } else {
424 qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR,
425 QMC_SPE_CHAMR_HDLC_CRC);
426 }
427 break;
428
429 case QMC_TRANSPARENT:
430 qmc_write16(chan->s_param + QMC_SPE_TMRBLR,
431 param->transp.max_rx_buf_size);
432 break;
433
434 default:
435 return -EINVAL;
436 }
437
438 return 0;
439 }
440 EXPORT_SYMBOL(qmc_chan_set_param);
441
qmc_chan_write_submit(struct qmc_chan * chan,dma_addr_t addr,size_t length,void (* complete)(void * context),void * context)442 int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
443 void (*complete)(void *context), void *context)
444 {
445 struct qmc_xfer_desc *xfer_desc;
446 unsigned long flags;
447 cbd_t __iomem *bd;
448 u16 ctrl;
449 int ret;
450
451 /*
452 * R bit UB bit
453 * 0 0 : The BD is free
454 * 1 1 : The BD is in used, waiting for transfer
455 * 0 1 : The BD is in used, waiting for completion
456 * 1 0 : Should not append
457 */
458
459 spin_lock_irqsave(&chan->tx_lock, flags);
460 bd = chan->txbd_free;
461
462 ctrl = qmc_read16(&bd->cbd_sc);
463 if (ctrl & (QMC_BD_TX_R | QMC_BD_TX_UB)) {
464 if (!(ctrl & (QMC_BD_TX_R | QMC_BD_TX_I)) && bd == chan->txbd_done) {
465 if (ctrl & QMC_BD_TX_W)
466 chan->txbd_done = chan->txbds;
467 else
468 chan->txbd_done++;
469 } else {
470 /* We are full ... */
471 ret = -EBUSY;
472 goto end;
473 }
474 }
475
476 qmc_write16(&bd->cbd_datlen, length);
477 qmc_write32(&bd->cbd_bufaddr, addr);
478
479 xfer_desc = &chan->tx_desc[bd - chan->txbds];
480 xfer_desc->tx_complete = complete;
481 xfer_desc->context = context;
482
483 /* Activate the descriptor */
484 ctrl |= (QMC_BD_TX_R | QMC_BD_TX_UB);
485 if (complete)
486 ctrl |= QMC_BD_TX_I;
487 else
488 ctrl &= ~QMC_BD_TX_I;
489 wmb(); /* Be sure to flush the descriptor before control update */
490 qmc_write16(&bd->cbd_sc, ctrl);
491
492 if (!chan->is_tx_stopped)
493 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
494
495 if (ctrl & QMC_BD_TX_W)
496 chan->txbd_free = chan->txbds;
497 else
498 chan->txbd_free++;
499
500 ret = 0;
501
502 end:
503 spin_unlock_irqrestore(&chan->tx_lock, flags);
504 return ret;
505 }
506 EXPORT_SYMBOL(qmc_chan_write_submit);
507
qmc_chan_write_done(struct qmc_chan * chan)508 static void qmc_chan_write_done(struct qmc_chan *chan)
509 {
510 struct qmc_xfer_desc *xfer_desc;
511 void (*complete)(void *context);
512 unsigned long flags;
513 void *context;
514 cbd_t __iomem *bd;
515 u16 ctrl;
516
517 /*
518 * R bit UB bit
519 * 0 0 : The BD is free
520 * 1 1 : The BD is in used, waiting for transfer
521 * 0 1 : The BD is in used, waiting for completion
522 * 1 0 : Should not append
523 */
524
525 spin_lock_irqsave(&chan->tx_lock, flags);
526 bd = chan->txbd_done;
527
528 ctrl = qmc_read16(&bd->cbd_sc);
529 while (!(ctrl & QMC_BD_TX_R)) {
530 if (!(ctrl & QMC_BD_TX_UB))
531 goto end;
532
533 xfer_desc = &chan->tx_desc[bd - chan->txbds];
534 complete = xfer_desc->tx_complete;
535 context = xfer_desc->context;
536 xfer_desc->tx_complete = NULL;
537 xfer_desc->context = NULL;
538
539 qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_TX_UB);
540
541 if (ctrl & QMC_BD_TX_W)
542 chan->txbd_done = chan->txbds;
543 else
544 chan->txbd_done++;
545
546 if (complete) {
547 spin_unlock_irqrestore(&chan->tx_lock, flags);
548 complete(context);
549 spin_lock_irqsave(&chan->tx_lock, flags);
550 }
551
552 bd = chan->txbd_done;
553 ctrl = qmc_read16(&bd->cbd_sc);
554 }
555
556 end:
557 spin_unlock_irqrestore(&chan->tx_lock, flags);
558 }
559
qmc_chan_read_submit(struct qmc_chan * chan,dma_addr_t addr,size_t length,void (* complete)(void * context,size_t length,unsigned int flags),void * context)560 int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
561 void (*complete)(void *context, size_t length, unsigned int flags),
562 void *context)
563 {
564 struct qmc_xfer_desc *xfer_desc;
565 unsigned long flags;
566 cbd_t __iomem *bd;
567 u16 ctrl;
568 int ret;
569
570 /*
571 * E bit UB bit
572 * 0 0 : The BD is free
573 * 1 1 : The BD is in used, waiting for transfer
574 * 0 1 : The BD is in used, waiting for completion
575 * 1 0 : Should not append
576 */
577
578 spin_lock_irqsave(&chan->rx_lock, flags);
579 bd = chan->rxbd_free;
580
581 ctrl = qmc_read16(&bd->cbd_sc);
582 if (ctrl & (QMC_BD_RX_E | QMC_BD_RX_UB)) {
583 if (!(ctrl & (QMC_BD_RX_E | QMC_BD_RX_I)) && bd == chan->rxbd_done) {
584 if (ctrl & QMC_BD_RX_W)
585 chan->rxbd_done = chan->rxbds;
586 else
587 chan->rxbd_done++;
588 } else {
589 /* We are full ... */
590 ret = -EBUSY;
591 goto end;
592 }
593 }
594
595 qmc_write16(&bd->cbd_datlen, 0); /* data length is updated by the QMC */
596 qmc_write32(&bd->cbd_bufaddr, addr);
597
598 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
599 xfer_desc->rx_complete = complete;
600 xfer_desc->context = context;
601
602 /* Clear previous status flags */
603 ctrl &= ~(QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG | QMC_BD_RX_NO |
604 QMC_BD_RX_AB | QMC_BD_RX_CR);
605
606 /* Activate the descriptor */
607 ctrl |= (QMC_BD_RX_E | QMC_BD_RX_UB);
608 if (complete)
609 ctrl |= QMC_BD_RX_I;
610 else
611 ctrl &= ~QMC_BD_RX_I;
612 wmb(); /* Be sure to flush data before descriptor activation */
613 qmc_write16(&bd->cbd_sc, ctrl);
614
615 /* Restart receiver if needed */
616 if (chan->is_rx_halted && !chan->is_rx_stopped) {
617 /* Restart receiver */
618 qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack);
619 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE,
620 chan->mode == QMC_TRANSPARENT ?
621 chan->qmc->data->zdstate_transp :
622 chan->qmc->data->zdstate_hdlc);
623 qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate);
624 chan->is_rx_halted = false;
625 }
626 chan->rx_pending++;
627
628 if (ctrl & QMC_BD_RX_W)
629 chan->rxbd_free = chan->rxbds;
630 else
631 chan->rxbd_free++;
632
633 ret = 0;
634 end:
635 spin_unlock_irqrestore(&chan->rx_lock, flags);
636 return ret;
637 }
638 EXPORT_SYMBOL(qmc_chan_read_submit);
639
qmc_chan_read_done(struct qmc_chan * chan)640 static void qmc_chan_read_done(struct qmc_chan *chan)
641 {
642 void (*complete)(void *context, size_t size, unsigned int flags);
643 struct qmc_xfer_desc *xfer_desc;
644 unsigned long flags;
645 cbd_t __iomem *bd;
646 void *context;
647 u16 datalen;
648 u16 ctrl;
649
650 /*
651 * E bit UB bit
652 * 0 0 : The BD is free
653 * 1 1 : The BD is in used, waiting for transfer
654 * 0 1 : The BD is in used, waiting for completion
655 * 1 0 : Should not append
656 */
657
658 spin_lock_irqsave(&chan->rx_lock, flags);
659 bd = chan->rxbd_done;
660
661 ctrl = qmc_read16(&bd->cbd_sc);
662 while (!(ctrl & QMC_BD_RX_E)) {
663 if (!(ctrl & QMC_BD_RX_UB))
664 goto end;
665
666 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
667 complete = xfer_desc->rx_complete;
668 context = xfer_desc->context;
669 xfer_desc->rx_complete = NULL;
670 xfer_desc->context = NULL;
671
672 datalen = qmc_read16(&bd->cbd_datlen);
673 qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_RX_UB);
674
675 if (ctrl & QMC_BD_RX_W)
676 chan->rxbd_done = chan->rxbds;
677 else
678 chan->rxbd_done++;
679
680 chan->rx_pending--;
681
682 if (complete) {
683 spin_unlock_irqrestore(&chan->rx_lock, flags);
684
685 /*
686 * Avoid conversion between internal hardware flags and
687 * the software API flags.
688 * -> Be sure that the software API flags are consistent
689 * with the hardware flags
690 */
691 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_LAST != QMC_BD_RX_L);
692 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_FIRST != QMC_BD_RX_F);
693 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_OVF != QMC_BD_RX_LG);
694 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_UNA != QMC_BD_RX_NO);
695 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_ABORT != QMC_BD_RX_AB);
696 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_CRC != QMC_BD_RX_CR);
697
698 complete(context, datalen,
699 ctrl & (QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG |
700 QMC_BD_RX_NO | QMC_BD_RX_AB | QMC_BD_RX_CR));
701 spin_lock_irqsave(&chan->rx_lock, flags);
702 }
703
704 bd = chan->rxbd_done;
705 ctrl = qmc_read16(&bd->cbd_sc);
706 }
707
708 end:
709 spin_unlock_irqrestore(&chan->rx_lock, flags);
710 }
711
qmc_chan_setup_tsa_64rxtx(struct qmc_chan * chan,const struct tsa_serial_info * info,bool enable)712 static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_serial_info *info,
713 bool enable)
714 {
715 unsigned int i;
716 u16 curr;
717 u16 val;
718
719 /*
720 * Use a common Tx/Rx 64 entries table.
721 * Tx and Rx related stuffs must be identical
722 */
723 if (chan->tx_ts_mask != chan->rx_ts_mask) {
724 dev_err(chan->qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
725 return -EINVAL;
726 }
727
728 val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id);
729
730 /* Check entries based on Rx stuff*/
731 for (i = 0; i < info->nb_rx_ts; i++) {
732 if (!(chan->rx_ts_mask & (((u64)1) << i)))
733 continue;
734
735 curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
736 if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
737 dev_err(chan->qmc->dev, "chan %u TxRx entry %d already used\n",
738 chan->id, i);
739 return -EBUSY;
740 }
741 }
742
743 /* Set entries based on Rx stuff*/
744 for (i = 0; i < info->nb_rx_ts; i++) {
745 if (!(chan->rx_ts_mask & (((u64)1) << i)))
746 continue;
747
748 qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
749 (u16)~QMC_TSA_WRAP, enable ? val : 0x0000);
750 }
751
752 return 0;
753 }
754
qmc_chan_setup_tsa_32rx(struct qmc_chan * chan,const struct tsa_serial_info * info,bool enable)755 static int qmc_chan_setup_tsa_32rx(struct qmc_chan *chan, const struct tsa_serial_info *info,
756 bool enable)
757 {
758 unsigned int i;
759 u16 curr;
760 u16 val;
761
762 /* Use a Rx 32 entries table */
763
764 val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id);
765
766 /* Check entries based on Rx stuff */
767 for (i = 0; i < info->nb_rx_ts; i++) {
768 if (!(chan->rx_ts_mask & (((u64)1) << i)))
769 continue;
770
771 curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
772 if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
773 dev_err(chan->qmc->dev, "chan %u Rx entry %d already used\n",
774 chan->id, i);
775 return -EBUSY;
776 }
777 }
778
779 /* Set entries based on Rx stuff */
780 for (i = 0; i < info->nb_rx_ts; i++) {
781 if (!(chan->rx_ts_mask & (((u64)1) << i)))
782 continue;
783
784 qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
785 (u16)~QMC_TSA_WRAP, enable ? val : 0x0000);
786 }
787
788 return 0;
789 }
790
qmc_chan_setup_tsa_32tx(struct qmc_chan * chan,const struct tsa_serial_info * info,bool enable)791 static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_serial_info *info,
792 bool enable)
793 {
794 unsigned int i;
795 u16 curr;
796 u16 val;
797
798 /* Use a Tx 32 entries table */
799
800 val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id);
801
802 /* Check entries based on Tx stuff */
803 for (i = 0; i < info->nb_tx_ts; i++) {
804 if (!(chan->tx_ts_mask & (((u64)1) << i)))
805 continue;
806
807 curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2));
808 if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
809 dev_err(chan->qmc->dev, "chan %u Tx entry %d already used\n",
810 chan->id, i);
811 return -EBUSY;
812 }
813 }
814
815 /* Set entries based on Tx stuff */
816 for (i = 0; i < info->nb_tx_ts; i++) {
817 if (!(chan->tx_ts_mask & (((u64)1) << i)))
818 continue;
819
820 qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2),
821 (u16)~QMC_TSA_WRAP, enable ? val : 0x0000);
822 }
823
824 return 0;
825 }
826
qmc_chan_setup_tsa_tx(struct qmc_chan * chan,bool enable)827 static int qmc_chan_setup_tsa_tx(struct qmc_chan *chan, bool enable)
828 {
829 struct tsa_serial_info info;
830 int ret;
831
832 /* Retrieve info from the TSA related serial */
833 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
834 if (ret)
835 return ret;
836
837 /* Setup entries */
838 if (chan->qmc->is_tsa_64rxtx)
839 return qmc_chan_setup_tsa_64rxtx(chan, &info, enable);
840
841 return qmc_chan_setup_tsa_32tx(chan, &info, enable);
842 }
843
qmc_chan_setup_tsa_rx(struct qmc_chan * chan,bool enable)844 static int qmc_chan_setup_tsa_rx(struct qmc_chan *chan, bool enable)
845 {
846 struct tsa_serial_info info;
847 int ret;
848
849 /* Retrieve info from the TSA related serial */
850 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
851 if (ret)
852 return ret;
853
854 /* Setup entries */
855 if (chan->qmc->is_tsa_64rxtx)
856 return qmc_chan_setup_tsa_64rxtx(chan, &info, enable);
857
858 return qmc_chan_setup_tsa_32rx(chan, &info, enable);
859 }
860
qmc_chan_cpm1_command(struct qmc_chan * chan,u8 qmc_opcode)861 static int qmc_chan_cpm1_command(struct qmc_chan *chan, u8 qmc_opcode)
862 {
863 return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E);
864 }
865
qmc_chan_qe_command(struct qmc_chan * chan,u32 cmd)866 static int qmc_chan_qe_command(struct qmc_chan *chan, u32 cmd)
867 {
868 if (!qe_issue_cmd(cmd, chan->qmc->qe_subblock, chan->id, 0))
869 return -EIO;
870 return 0;
871 }
872
qmc_chan_stop_rx(struct qmc_chan * chan)873 static int qmc_chan_stop_rx(struct qmc_chan *chan)
874 {
875 unsigned long flags;
876 int ret;
877
878 spin_lock_irqsave(&chan->rx_lock, flags);
879
880 if (chan->is_rx_stopped) {
881 /* The channel is already stopped -> simply return ok */
882 ret = 0;
883 goto end;
884 }
885
886 /* Send STOP RECEIVE command */
887 ret = qmc_is_qe(chan->qmc) ?
888 qmc_chan_qe_command(chan, QE_QMC_STOP_RX) :
889 qmc_chan_cpm1_command(chan, 0x0);
890 if (ret) {
891 dev_err(chan->qmc->dev, "chan %u: Send STOP RECEIVE failed (%d)\n",
892 chan->id, ret);
893 goto end;
894 }
895
896 chan->is_rx_stopped = true;
897
898 if (!chan->qmc->is_tsa_64rxtx || chan->is_tx_stopped) {
899 ret = qmc_chan_setup_tsa_rx(chan, false);
900 if (ret) {
901 dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
902 chan->id, ret);
903 goto end;
904 }
905 }
906
907 end:
908 spin_unlock_irqrestore(&chan->rx_lock, flags);
909 return ret;
910 }
911
qmc_chan_stop_tx(struct qmc_chan * chan)912 static int qmc_chan_stop_tx(struct qmc_chan *chan)
913 {
914 unsigned long flags;
915 int ret;
916
917 spin_lock_irqsave(&chan->tx_lock, flags);
918
919 if (chan->is_tx_stopped) {
920 /* The channel is already stopped -> simply return ok */
921 ret = 0;
922 goto end;
923 }
924
925 /* Send STOP TRANSMIT command */
926 ret = qmc_is_qe(chan->qmc) ?
927 qmc_chan_qe_command(chan, QE_QMC_STOP_TX) :
928 qmc_chan_cpm1_command(chan, 0x1);
929 if (ret) {
930 dev_err(chan->qmc->dev, "chan %u: Send STOP TRANSMIT failed (%d)\n",
931 chan->id, ret);
932 goto end;
933 }
934
935 chan->is_tx_stopped = true;
936
937 if (!chan->qmc->is_tsa_64rxtx || chan->is_rx_stopped) {
938 ret = qmc_chan_setup_tsa_tx(chan, false);
939 if (ret) {
940 dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
941 chan->id, ret);
942 goto end;
943 }
944 }
945
946 end:
947 spin_unlock_irqrestore(&chan->tx_lock, flags);
948 return ret;
949 }
950
951 static int qmc_chan_start_rx(struct qmc_chan *chan);
952
qmc_chan_stop(struct qmc_chan * chan,int direction)953 int qmc_chan_stop(struct qmc_chan *chan, int direction)
954 {
955 bool is_rx_rollback_needed = false;
956 unsigned long flags;
957 int ret = 0;
958
959 spin_lock_irqsave(&chan->ts_lock, flags);
960
961 if (direction & QMC_CHAN_READ) {
962 is_rx_rollback_needed = !chan->is_rx_stopped;
963 ret = qmc_chan_stop_rx(chan);
964 if (ret)
965 goto end;
966 }
967
968 if (direction & QMC_CHAN_WRITE) {
969 ret = qmc_chan_stop_tx(chan);
970 if (ret) {
971 /* Restart rx if needed */
972 if (is_rx_rollback_needed)
973 qmc_chan_start_rx(chan);
974 goto end;
975 }
976 }
977
978 end:
979 spin_unlock_irqrestore(&chan->ts_lock, flags);
980 return ret;
981 }
982 EXPORT_SYMBOL(qmc_chan_stop);
983
qmc_setup_chan_trnsync(struct qmc * qmc,struct qmc_chan * chan)984 static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
985 {
986 struct tsa_serial_info info;
987 unsigned int w_rx, w_tx;
988 u16 first_rx, last_tx;
989 u16 trnsync;
990 int ret;
991
992 /* Retrieve info from the TSA related serial */
993 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
994 if (ret)
995 return ret;
996
997 w_rx = hweight64(chan->rx_ts_mask);
998 w_tx = hweight64(chan->tx_ts_mask);
999 if (w_rx <= 1 && w_tx <= 1) {
1000 dev_dbg(qmc->dev, "only one or zero ts -> disable trnsync\n");
1001 qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_TRANSP_SYNC);
1002 return 0;
1003 }
1004
1005 /* Find the first Rx TS allocated to the channel */
1006 first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
1007
1008 /* Find the last Tx TS allocated to the channel */
1009 last_tx = fls64(chan->tx_ts_mask);
1010
1011 trnsync = 0;
1012 if (info.nb_rx_ts)
1013 trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2);
1014 if (info.nb_tx_ts)
1015 trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
1016
1017 qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
1018 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_TRANSP_SYNC);
1019
1020 dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
1021 chan->id, trnsync,
1022 first_rx, info.nb_rx_ts, chan->rx_ts_mask,
1023 last_tx, info.nb_tx_ts, chan->tx_ts_mask);
1024
1025 return 0;
1026 }
1027
qmc_chan_start_rx(struct qmc_chan * chan)1028 static int qmc_chan_start_rx(struct qmc_chan *chan)
1029 {
1030 unsigned long flags;
1031 int ret;
1032
1033 spin_lock_irqsave(&chan->rx_lock, flags);
1034
1035 if (!chan->is_rx_stopped) {
1036 /* The channel is already started -> simply return ok */
1037 ret = 0;
1038 goto end;
1039 }
1040
1041 ret = qmc_chan_setup_tsa_rx(chan, true);
1042 if (ret) {
1043 dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
1044 chan->id, ret);
1045 goto end;
1046 }
1047
1048 if (chan->mode == QMC_TRANSPARENT) {
1049 ret = qmc_setup_chan_trnsync(chan->qmc, chan);
1050 if (ret) {
1051 dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
1052 chan->id, ret);
1053 goto end;
1054 }
1055 }
1056
1057 /* Restart the receiver */
1058 qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack);
1059 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE,
1060 chan->mode == QMC_TRANSPARENT ?
1061 chan->qmc->data->zdstate_transp :
1062 chan->qmc->data->zdstate_hdlc);
1063 qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate);
1064 chan->is_rx_halted = false;
1065
1066 chan->is_rx_stopped = false;
1067
1068 end:
1069 spin_unlock_irqrestore(&chan->rx_lock, flags);
1070 return ret;
1071 }
1072
qmc_chan_start_tx(struct qmc_chan * chan)1073 static int qmc_chan_start_tx(struct qmc_chan *chan)
1074 {
1075 unsigned long flags;
1076 int ret;
1077
1078 spin_lock_irqsave(&chan->tx_lock, flags);
1079
1080 if (!chan->is_tx_stopped) {
1081 /* The channel is already started -> simply return ok */
1082 ret = 0;
1083 goto end;
1084 }
1085
1086 ret = qmc_chan_setup_tsa_tx(chan, true);
1087 if (ret) {
1088 dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
1089 chan->id, ret);
1090 goto end;
1091 }
1092
1093 if (chan->mode == QMC_TRANSPARENT) {
1094 ret = qmc_setup_chan_trnsync(chan->qmc, chan);
1095 if (ret) {
1096 dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
1097 chan->id, ret);
1098 goto end;
1099 }
1100 }
1101
1102 /*
1103 * Enable channel transmitter as it could be disabled if
1104 * qmc_chan_reset() was called.
1105 */
1106 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
1107
1108 /* Set the POL bit in the channel mode register */
1109 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
1110
1111 chan->is_tx_stopped = false;
1112
1113 end:
1114 spin_unlock_irqrestore(&chan->tx_lock, flags);
1115 return ret;
1116 }
1117
qmc_chan_start(struct qmc_chan * chan,int direction)1118 int qmc_chan_start(struct qmc_chan *chan, int direction)
1119 {
1120 bool is_rx_rollback_needed = false;
1121 unsigned long flags;
1122 int ret = 0;
1123
1124 spin_lock_irqsave(&chan->ts_lock, flags);
1125
1126 if (direction & QMC_CHAN_READ) {
1127 is_rx_rollback_needed = chan->is_rx_stopped;
1128 ret = qmc_chan_start_rx(chan);
1129 if (ret)
1130 goto end;
1131 }
1132
1133 if (direction & QMC_CHAN_WRITE) {
1134 ret = qmc_chan_start_tx(chan);
1135 if (ret) {
1136 /* Restop rx if needed */
1137 if (is_rx_rollback_needed)
1138 qmc_chan_stop_rx(chan);
1139 goto end;
1140 }
1141 }
1142
1143 end:
1144 spin_unlock_irqrestore(&chan->ts_lock, flags);
1145 return ret;
1146 }
1147 EXPORT_SYMBOL(qmc_chan_start);
1148
qmc_chan_reset_rx(struct qmc_chan * chan)1149 static void qmc_chan_reset_rx(struct qmc_chan *chan)
1150 {
1151 struct qmc_xfer_desc *xfer_desc;
1152 unsigned long flags;
1153 cbd_t __iomem *bd;
1154 u16 ctrl;
1155
1156 spin_lock_irqsave(&chan->rx_lock, flags);
1157 bd = chan->rxbds;
1158 do {
1159 ctrl = qmc_read16(&bd->cbd_sc);
1160 qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_RX_UB | QMC_BD_RX_E));
1161
1162 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
1163 xfer_desc->rx_complete = NULL;
1164 xfer_desc->context = NULL;
1165
1166 bd++;
1167 } while (!(ctrl & QMC_BD_RX_W));
1168
1169 chan->rxbd_free = chan->rxbds;
1170 chan->rxbd_done = chan->rxbds;
1171 qmc_write16(chan->s_param + QMC_SPE_RBPTR,
1172 qmc_read16(chan->s_param + QMC_SPE_RBASE));
1173
1174 chan->rx_pending = 0;
1175
1176 spin_unlock_irqrestore(&chan->rx_lock, flags);
1177 }
1178
qmc_chan_reset_tx(struct qmc_chan * chan)1179 static void qmc_chan_reset_tx(struct qmc_chan *chan)
1180 {
1181 struct qmc_xfer_desc *xfer_desc;
1182 unsigned long flags;
1183 cbd_t __iomem *bd;
1184 u16 ctrl;
1185
1186 spin_lock_irqsave(&chan->tx_lock, flags);
1187
1188 /* Disable transmitter. It will be re-enable on qmc_chan_start() */
1189 qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
1190
1191 bd = chan->txbds;
1192 do {
1193 ctrl = qmc_read16(&bd->cbd_sc);
1194 qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_TX_UB | QMC_BD_TX_R));
1195
1196 xfer_desc = &chan->tx_desc[bd - chan->txbds];
1197 xfer_desc->tx_complete = NULL;
1198 xfer_desc->context = NULL;
1199
1200 bd++;
1201 } while (!(ctrl & QMC_BD_TX_W));
1202
1203 chan->txbd_free = chan->txbds;
1204 chan->txbd_done = chan->txbds;
1205 qmc_write16(chan->s_param + QMC_SPE_TBPTR,
1206 qmc_read16(chan->s_param + QMC_SPE_TBASE));
1207
1208 /* Reset TSTATE and ZISTATE to their initial value */
1209 qmc_write32(chan->s_param + QMC_SPE_TSTATE, chan->qmc->data->tstate);
1210 qmc_write32(chan->s_param + QMC_SPE_ZISTATE, chan->qmc->data->zistate);
1211
1212 spin_unlock_irqrestore(&chan->tx_lock, flags);
1213 }
1214
qmc_chan_reset(struct qmc_chan * chan,int direction)1215 int qmc_chan_reset(struct qmc_chan *chan, int direction)
1216 {
1217 if (direction & QMC_CHAN_READ)
1218 qmc_chan_reset_rx(chan);
1219
1220 if (direction & QMC_CHAN_WRITE)
1221 qmc_chan_reset_tx(chan);
1222
1223 return 0;
1224 }
1225 EXPORT_SYMBOL(qmc_chan_reset);
1226
qmc_check_chans(struct qmc * qmc)1227 static int qmc_check_chans(struct qmc *qmc)
1228 {
1229 struct tsa_serial_info info;
1230 struct qmc_chan *chan;
1231 u64 tx_ts_assigned_mask;
1232 u64 rx_ts_assigned_mask;
1233 int ret;
1234
1235 /* Retrieve info from the TSA related serial */
1236 ret = tsa_serial_get_info(qmc->tsa_serial, &info);
1237 if (ret)
1238 return ret;
1239
1240 if (info.nb_tx_ts > 64 || info.nb_rx_ts > 64) {
1241 dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned not supported\n");
1242 return -EINVAL;
1243 }
1244
1245 /*
1246 * If more than 32 TS are assigned to this serial, one common table is
1247 * used for Tx and Rx and so masks must be equal for all channels.
1248 */
1249 if (info.nb_tx_ts > 32 || info.nb_rx_ts > 32) {
1250 if (info.nb_tx_ts != info.nb_rx_ts) {
1251 dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n");
1252 return -EINVAL;
1253 }
1254 }
1255
1256 tx_ts_assigned_mask = info.nb_tx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_tx_ts) - 1;
1257 rx_ts_assigned_mask = info.nb_rx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_rx_ts) - 1;
1258
1259 list_for_each_entry(chan, &qmc->chan_head, list) {
1260 if (chan->tx_ts_mask_avail > tx_ts_assigned_mask) {
1261 dev_err(qmc->dev, "chan %u can use TSA unassigned Tx TS\n", chan->id);
1262 return -EINVAL;
1263 }
1264
1265 if (chan->rx_ts_mask_avail > rx_ts_assigned_mask) {
1266 dev_err(qmc->dev, "chan %u can use TSA unassigned Rx TS\n", chan->id);
1267 return -EINVAL;
1268 }
1269 }
1270
1271 return 0;
1272 }
1273
qmc_nb_chans(struct qmc * qmc)1274 static unsigned int qmc_nb_chans(struct qmc *qmc)
1275 {
1276 unsigned int count = 0;
1277 struct qmc_chan *chan;
1278
1279 list_for_each_entry(chan, &qmc->chan_head, list)
1280 count++;
1281
1282 return count;
1283 }
1284
qmc_of_parse_chans(struct qmc * qmc,struct device_node * np)1285 static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
1286 {
1287 struct qmc_chan *chan;
1288 const char *mode;
1289 u32 chan_id;
1290 u64 ts_mask;
1291 int ret;
1292
1293 for_each_available_child_of_node_scoped(np, chan_np) {
1294 ret = of_property_read_u32(chan_np, "reg", &chan_id);
1295 if (ret) {
1296 dev_err(qmc->dev, "%pOF: failed to read reg\n", chan_np);
1297 return ret;
1298 }
1299 if (chan_id > 63) {
1300 dev_err(qmc->dev, "%pOF: Invalid chan_id\n", chan_np);
1301 return -EINVAL;
1302 }
1303
1304 chan = devm_kzalloc(qmc->dev, sizeof(*chan), GFP_KERNEL);
1305 if (!chan)
1306 return -ENOMEM;
1307
1308 chan->id = chan_id;
1309 spin_lock_init(&chan->ts_lock);
1310 spin_lock_init(&chan->rx_lock);
1311 spin_lock_init(&chan->tx_lock);
1312
1313 ret = of_property_read_u64(chan_np, "fsl,tx-ts-mask", &ts_mask);
1314 if (ret) {
1315 dev_err(qmc->dev, "%pOF: failed to read fsl,tx-ts-mask\n",
1316 chan_np);
1317 return ret;
1318 }
1319 chan->tx_ts_mask_avail = ts_mask;
1320 chan->tx_ts_mask = chan->tx_ts_mask_avail;
1321
1322 ret = of_property_read_u64(chan_np, "fsl,rx-ts-mask", &ts_mask);
1323 if (ret) {
1324 dev_err(qmc->dev, "%pOF: failed to read fsl,rx-ts-mask\n",
1325 chan_np);
1326 return ret;
1327 }
1328 chan->rx_ts_mask_avail = ts_mask;
1329 chan->rx_ts_mask = chan->rx_ts_mask_avail;
1330
1331 mode = "transparent";
1332 ret = of_property_read_string(chan_np, "fsl,operational-mode", &mode);
1333 if (ret && ret != -EINVAL) {
1334 dev_err(qmc->dev, "%pOF: failed to read fsl,operational-mode\n",
1335 chan_np);
1336 return ret;
1337 }
1338 if (!strcmp(mode, "transparent")) {
1339 chan->mode = QMC_TRANSPARENT;
1340 } else if (!strcmp(mode, "hdlc")) {
1341 chan->mode = QMC_HDLC;
1342 } else {
1343 dev_err(qmc->dev, "%pOF: Invalid fsl,operational-mode (%s)\n",
1344 chan_np, mode);
1345 return -EINVAL;
1346 }
1347
1348 chan->is_reverse_data = of_property_read_bool(chan_np,
1349 "fsl,reverse-data");
1350
1351 list_add_tail(&chan->list, &qmc->chan_head);
1352 qmc->chans[chan->id] = chan;
1353 }
1354
1355 return qmc_check_chans(qmc);
1356 }
1357
qmc_init_tsa_64rxtx(struct qmc * qmc,const struct tsa_serial_info * info)1358 static int qmc_init_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info)
1359 {
1360 unsigned int i;
1361 u16 val;
1362
1363 /*
1364 * Use a common Tx/Rx 64 entries table.
1365 * Everything was previously checked, Tx and Rx related stuffs are
1366 * identical -> Used Rx related stuff to build the table
1367 */
1368 qmc->is_tsa_64rxtx = true;
1369
1370 /* Invalidate all entries */
1371 for (i = 0; i < 64; i++)
1372 qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
1373
1374 /* Set Wrap bit on last entry */
1375 qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
1376 QMC_TSA_WRAP);
1377
1378 /* Init pointers to the table */
1379 val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
1380 qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
1381 qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
1382 qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
1383 qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
1384
1385 return 0;
1386 }
1387
qmc_init_tsa_32rx_32tx(struct qmc * qmc,const struct tsa_serial_info * info)1388 static int qmc_init_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info)
1389 {
1390 unsigned int i;
1391 u16 val;
1392
1393 /*
1394 * Use a Tx 32 entries table and a Rx 32 entries table.
1395 * Everything was previously checked.
1396 */
1397 qmc->is_tsa_64rxtx = false;
1398
1399 /* Invalidate all entries */
1400 for (i = 0; i < 32; i++) {
1401 qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
1402 qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), 0x0000);
1403 }
1404
1405 /* Set Wrap bit on last entries */
1406 qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
1407 QMC_TSA_WRAP);
1408 qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATTX + ((info->nb_tx_ts - 1) * 2),
1409 QMC_TSA_WRAP);
1410
1411 /* Init Rx pointers ...*/
1412 val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
1413 qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
1414 qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
1415
1416 /* ... and Tx pointers */
1417 val = qmc->scc_pram_offset + QMC_GBL_TSATTX;
1418 qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
1419 qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
1420
1421 return 0;
1422 }
1423
qmc_init_tsa(struct qmc * qmc)1424 static int qmc_init_tsa(struct qmc *qmc)
1425 {
1426 struct tsa_serial_info info;
1427 int ret;
1428
1429 /* Retrieve info from the TSA related serial */
1430 ret = tsa_serial_get_info(qmc->tsa_serial, &info);
1431 if (ret)
1432 return ret;
1433
1434 /*
1435 * Initialize one common 64 entries table or two 32 entries (one for Tx
1436 * and one for Tx) according to assigned TS numbers.
1437 */
1438 return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ?
1439 qmc_init_tsa_64rxtx(qmc, &info) :
1440 qmc_init_tsa_32rx_32tx(qmc, &info);
1441 }
1442
qmc_setup_chan(struct qmc * qmc,struct qmc_chan * chan)1443 static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
1444 {
1445 unsigned int i;
1446 cbd_t __iomem *bd;
1447 int ret;
1448 u16 val;
1449
1450 chan->qmc = qmc;
1451
1452 /* Set channel specific parameter base address */
1453 chan->s_param = qmc->dpram + (chan->id * 64);
1454 /* 16 bd per channel (8 rx and 8 tx) */
1455 chan->txbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS));
1456 chan->rxbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS;
1457
1458 chan->txbd_free = chan->txbds;
1459 chan->txbd_done = chan->txbds;
1460 chan->rxbd_free = chan->rxbds;
1461 chan->rxbd_done = chan->rxbds;
1462
1463 /* TBASE and TBPTR*/
1464 val = chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS) * sizeof(cbd_t);
1465 qmc_write16(chan->s_param + QMC_SPE_TBASE, val);
1466 qmc_write16(chan->s_param + QMC_SPE_TBPTR, val);
1467
1468 /* RBASE and RBPTR*/
1469 val = ((chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS) * sizeof(cbd_t);
1470 qmc_write16(chan->s_param + QMC_SPE_RBASE, val);
1471 qmc_write16(chan->s_param + QMC_SPE_RBPTR, val);
1472 qmc_write32(chan->s_param + QMC_SPE_TSTATE, chan->qmc->data->tstate);
1473 qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate);
1474 qmc_write32(chan->s_param + QMC_SPE_ZISTATE, chan->qmc->data->zistate);
1475 qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack);
1476 if (chan->mode == QMC_TRANSPARENT) {
1477 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, chan->qmc->data->zdstate_transp);
1478 qmc_write16(chan->s_param + QMC_SPE_TMRBLR, 60);
1479 val = QMC_SPE_CHAMR_MODE_TRANSP;
1480 if (chan->is_reverse_data)
1481 val |= QMC_SPE_CHAMR_TRANSP_RD;
1482 qmc_write16(chan->s_param + QMC_SPE_CHAMR, val);
1483 ret = qmc_setup_chan_trnsync(qmc, chan);
1484 if (ret)
1485 return ret;
1486 } else {
1487 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, chan->qmc->data->zdstate_hdlc);
1488 qmc_write16(chan->s_param + QMC_SPE_MFLR, 60);
1489 qmc_write16(chan->s_param + QMC_SPE_CHAMR,
1490 QMC_SPE_CHAMR_MODE_HDLC | QMC_SPE_CHAMR_HDLC_IDLM);
1491 }
1492
1493 /* Do not enable interrupts now. They will be enabled later */
1494 qmc_write16(chan->s_param + QMC_SPE_INTMSK, 0x0000);
1495
1496 /* Init Rx BDs and set Wrap bit on last descriptor */
1497 BUILD_BUG_ON(QMC_NB_RXBDS == 0);
1498 for (i = 0; i < QMC_NB_RXBDS; i++) {
1499 bd = chan->rxbds + i;
1500 qmc_write16(&bd->cbd_sc, 0);
1501 }
1502 bd = chan->rxbds + QMC_NB_RXBDS - 1;
1503 qmc_write16(&bd->cbd_sc, QMC_BD_RX_W);
1504
1505 /* Init Tx BDs and set Wrap bit on last descriptor */
1506 BUILD_BUG_ON(QMC_NB_TXBDS == 0);
1507 if (chan->mode == QMC_HDLC)
1508 val = QMC_BD_TX_L | QMC_BD_TX_TC;
1509 else
1510 val = 0;
1511 for (i = 0; i < QMC_NB_TXBDS; i++) {
1512 bd = chan->txbds + i;
1513 qmc_write16(&bd->cbd_sc, val);
1514 }
1515 bd = chan->txbds + QMC_NB_TXBDS - 1;
1516 qmc_write16(&bd->cbd_sc, val | QMC_BD_TX_W);
1517
1518 return 0;
1519 }
1520
qmc_setup_chans(struct qmc * qmc)1521 static int qmc_setup_chans(struct qmc *qmc)
1522 {
1523 struct qmc_chan *chan;
1524 int ret;
1525
1526 list_for_each_entry(chan, &qmc->chan_head, list) {
1527 ret = qmc_setup_chan(qmc, chan);
1528 if (ret)
1529 return ret;
1530 }
1531
1532 return 0;
1533 }
1534
qmc_finalize_chans(struct qmc * qmc)1535 static int qmc_finalize_chans(struct qmc *qmc)
1536 {
1537 struct qmc_chan *chan;
1538 int ret;
1539
1540 list_for_each_entry(chan, &qmc->chan_head, list) {
1541 /* Unmask channel interrupts */
1542 if (chan->mode == QMC_HDLC) {
1543 qmc_write16(chan->s_param + QMC_SPE_INTMSK,
1544 QMC_INT_NID | QMC_INT_IDL | QMC_INT_MRF |
1545 QMC_INT_UN | QMC_INT_RXF | QMC_INT_BSY |
1546 QMC_INT_TXB | QMC_INT_RXB);
1547 } else {
1548 qmc_write16(chan->s_param + QMC_SPE_INTMSK,
1549 QMC_INT_UN | QMC_INT_BSY |
1550 QMC_INT_TXB | QMC_INT_RXB);
1551 }
1552
1553 /* Forced stop the channel */
1554 ret = qmc_chan_stop(chan, QMC_CHAN_ALL);
1555 if (ret)
1556 return ret;
1557 }
1558
1559 return 0;
1560 }
1561
qmc_setup_ints(struct qmc * qmc)1562 static int qmc_setup_ints(struct qmc *qmc)
1563 {
1564 unsigned int i;
1565 u16 __iomem *last;
1566
1567 /* Raz all entries */
1568 for (i = 0; i < (qmc->int_size / sizeof(u16)); i++)
1569 qmc_write16(qmc->int_table + i, 0x0000);
1570
1571 /* Set Wrap bit on last entry */
1572 if (qmc->int_size >= sizeof(u16)) {
1573 last = qmc->int_table + (qmc->int_size / sizeof(u16)) - 1;
1574 qmc_write16(last, QMC_INT_W);
1575 }
1576
1577 return 0;
1578 }
1579
qmc_irq_gint(struct qmc * qmc)1580 static void qmc_irq_gint(struct qmc *qmc)
1581 {
1582 struct qmc_chan *chan;
1583 unsigned int chan_id;
1584 unsigned long flags;
1585 u16 int_entry;
1586
1587 int_entry = qmc_read16(qmc->int_curr);
1588 while (int_entry & QMC_INT_V) {
1589 /* Clear all but the Wrap bit */
1590 qmc_write16(qmc->int_curr, int_entry & QMC_INT_W);
1591
1592 chan_id = QMC_INT_GET_CHANNEL(int_entry);
1593 chan = qmc->chans[chan_id];
1594 if (!chan) {
1595 dev_err(qmc->dev, "interrupt on invalid chan %u\n", chan_id);
1596 goto int_next;
1597 }
1598
1599 if (int_entry & QMC_INT_TXB)
1600 qmc_chan_write_done(chan);
1601
1602 if (int_entry & QMC_INT_UN) {
1603 dev_info(qmc->dev, "intr chan %u, 0x%04x (UN)\n", chan_id,
1604 int_entry);
1605 chan->nb_tx_underrun++;
1606 }
1607
1608 if (int_entry & QMC_INT_BSY) {
1609 dev_info(qmc->dev, "intr chan %u, 0x%04x (BSY)\n", chan_id,
1610 int_entry);
1611 chan->nb_rx_busy++;
1612 /* Restart the receiver if needed */
1613 spin_lock_irqsave(&chan->rx_lock, flags);
1614 if (chan->rx_pending && !chan->is_rx_stopped) {
1615 qmc_write32(chan->s_param + QMC_SPE_RPACK,
1616 chan->qmc->data->rpack);
1617 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE,
1618 chan->mode == QMC_TRANSPARENT ?
1619 chan->qmc->data->zdstate_transp :
1620 chan->qmc->data->zdstate_hdlc);
1621 qmc_write32(chan->s_param + QMC_SPE_RSTATE,
1622 chan->qmc->data->rstate);
1623 chan->is_rx_halted = false;
1624 } else {
1625 chan->is_rx_halted = true;
1626 }
1627 spin_unlock_irqrestore(&chan->rx_lock, flags);
1628 }
1629
1630 if (int_entry & QMC_INT_RXB)
1631 qmc_chan_read_done(chan);
1632
1633 int_next:
1634 if (int_entry & QMC_INT_W)
1635 qmc->int_curr = qmc->int_table;
1636 else
1637 qmc->int_curr++;
1638 int_entry = qmc_read16(qmc->int_curr);
1639 }
1640 }
1641
qmc_irq_handler(int irq,void * priv)1642 static irqreturn_t qmc_irq_handler(int irq, void *priv)
1643 {
1644 struct qmc *qmc = (struct qmc *)priv;
1645 u16 scce;
1646
1647 scce = qmc_read16(qmc->scc_regs + SCC_SCCE);
1648 qmc_write16(qmc->scc_regs + SCC_SCCE, scce);
1649
1650 if (unlikely(scce & SCC_SCCE_IQOV))
1651 dev_info(qmc->dev, "IRQ queue overflow\n");
1652
1653 if (unlikely(scce & SCC_SCCE_GUN))
1654 dev_err(qmc->dev, "Global transmitter underrun\n");
1655
1656 if (unlikely(scce & SCC_SCCE_GOV))
1657 dev_err(qmc->dev, "Global receiver overrun\n");
1658
1659 /* normal interrupt */
1660 if (likely(scce & SCC_SCCE_GINT))
1661 qmc_irq_gint(qmc);
1662
1663 return IRQ_HANDLED;
1664 }
1665
qmc_qe_soft_qmc_init(struct qmc * qmc,struct device_node * np)1666 static int qmc_qe_soft_qmc_init(struct qmc *qmc, struct device_node *np)
1667 {
1668 struct qe_firmware_info *qe_fw_info;
1669 const struct qe_firmware *qe_fw;
1670 const struct firmware *fw;
1671 const char *filename;
1672 int ret;
1673
1674 ret = of_property_read_string(np, "fsl,soft-qmc", &filename);
1675 switch (ret) {
1676 case 0:
1677 break;
1678 case -EINVAL:
1679 /* fsl,soft-qmc property not set -> Simply do nothing */
1680 return 0;
1681 default:
1682 dev_err(qmc->dev, "%pOF: failed to read fsl,soft-qmc\n",
1683 np);
1684 return ret;
1685 }
1686
1687 qe_fw_info = qe_get_firmware_info();
1688 if (qe_fw_info) {
1689 if (!strstr(qe_fw_info->id, "Soft-QMC")) {
1690 dev_err(qmc->dev, "Another Firmware is already loaded\n");
1691 return -EALREADY;
1692 }
1693 dev_info(qmc->dev, "Firmware already loaded\n");
1694 return 0;
1695 }
1696
1697 dev_info(qmc->dev, "Using firmware %s\n", filename);
1698
1699 ret = request_firmware(&fw, filename, qmc->dev);
1700 if (ret) {
1701 dev_err(qmc->dev, "Failed to request firmware %s\n", filename);
1702 return ret;
1703 }
1704
1705 qe_fw = (const struct qe_firmware *)fw->data;
1706
1707 if (fw->size < sizeof(qe_fw->header) ||
1708 be32_to_cpu(qe_fw->header.length) != fw->size) {
1709 dev_err(qmc->dev, "Invalid firmware %s\n", filename);
1710 ret = -EINVAL;
1711 goto end;
1712 }
1713
1714 ret = qe_upload_firmware(qe_fw);
1715 if (ret) {
1716 dev_err(qmc->dev, "Failed to load firmware %s\n", filename);
1717 goto end;
1718 }
1719
1720 ret = 0;
1721 end:
1722 release_firmware(fw);
1723 return ret;
1724 }
1725
qmc_cpm1_init_resources(struct qmc * qmc,struct platform_device * pdev)1726 static int qmc_cpm1_init_resources(struct qmc *qmc, struct platform_device *pdev)
1727 {
1728 struct resource *res;
1729
1730 qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "scc_regs");
1731 if (IS_ERR(qmc->scc_regs))
1732 return PTR_ERR(qmc->scc_regs);
1733
1734 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scc_pram");
1735 if (!res)
1736 return -EINVAL;
1737 qmc->scc_pram_offset = res->start - get_immrbase();
1738 qmc->scc_pram = devm_ioremap_resource(qmc->dev, res);
1739 if (IS_ERR(qmc->scc_pram))
1740 return PTR_ERR(qmc->scc_pram);
1741
1742 qmc->dpram = devm_platform_ioremap_resource_byname(pdev, "dpram");
1743 if (IS_ERR(qmc->dpram))
1744 return PTR_ERR(qmc->dpram);
1745
1746 return 0;
1747 }
1748
qmc_qe_init_resources(struct qmc * qmc,struct platform_device * pdev)1749 static int qmc_qe_init_resources(struct qmc *qmc, struct platform_device *pdev)
1750 {
1751 struct resource *res;
1752 int ucc_num;
1753 s32 info;
1754
1755 qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "ucc_regs");
1756 if (IS_ERR(qmc->scc_regs))
1757 return PTR_ERR(qmc->scc_regs);
1758
1759 ucc_num = tsa_serial_get_num(qmc->tsa_serial);
1760 if (ucc_num < 0)
1761 return dev_err_probe(qmc->dev, ucc_num, "Failed to get UCC num\n");
1762
1763 qmc->qe_subblock = ucc_slow_get_qe_cr_subblock(ucc_num);
1764 if (qmc->qe_subblock == QE_CR_SUBBLOCK_INVALID) {
1765 dev_err(qmc->dev, "Unsupported ucc num %u\n", ucc_num);
1766 return -EINVAL;
1767 }
1768 /* Allocate the 'Global Multichannel Parameters' and the
1769 * 'Framer parameters' areas. The 'Framer parameters' area
1770 * is located right after the 'Global Multichannel Parameters'.
1771 * The 'Framer parameters' need 1 byte per receive and transmit
1772 * channel. The maximum number of receive or transmit channel
1773 * is 64. So reserve 2 * 64 bytes for the 'Framer parameters'.
1774 */
1775 info = devm_qe_muram_alloc(qmc->dev, UCC_SLOW_PRAM_SIZE + 2 * 64,
1776 ALIGNMENT_OF_UCC_SLOW_PRAM);
1777 if (info < 0)
1778 return info;
1779
1780 if (!qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, qmc->qe_subblock,
1781 QE_CR_PROTOCOL_UNSPECIFIED, info)) {
1782 dev_err(qmc->dev, "QE_ASSIGN_PAGE_TO_DEVICE cmd failed");
1783 return -EIO;
1784 }
1785 qmc->scc_pram = qe_muram_addr(info);
1786 qmc->scc_pram_offset = info;
1787
1788 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpram");
1789 if (!res)
1790 return -EINVAL;
1791 qmc->dpram_offset = res->start - qe_muram_dma(qe_muram_addr(0));
1792 qmc->dpram = devm_ioremap_resource(qmc->dev, res);
1793 if (IS_ERR(qmc->dpram))
1794 return PTR_ERR(qmc->dpram);
1795
1796 return 0;
1797 }
1798
qmc_init_resources(struct qmc * qmc,struct platform_device * pdev)1799 static int qmc_init_resources(struct qmc *qmc, struct platform_device *pdev)
1800 {
1801 return qmc_is_qe(qmc) ?
1802 qmc_qe_init_resources(qmc, pdev) :
1803 qmc_cpm1_init_resources(qmc, pdev);
1804 }
1805
qmc_cpm1_init_scc(struct qmc * qmc)1806 static int qmc_cpm1_init_scc(struct qmc *qmc)
1807 {
1808 u32 val;
1809 int ret;
1810
1811 /* Connect the serial (SCC) to TSA */
1812 ret = tsa_serial_connect(qmc->tsa_serial);
1813 if (ret)
1814 return dev_err_probe(qmc->dev, ret, "Failed to connect TSA serial\n");
1815
1816 /* Init GMSR_H and GMSR_L registers */
1817 val = SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP;
1818 qmc_write32(qmc->scc_regs + SCC_GSMRH, val);
1819
1820 /* enable QMC mode */
1821 qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_CPM1_GSMRL_MODE_QMC);
1822
1823 /* Disable and clear interrupts */
1824 qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
1825 qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
1826
1827 return 0;
1828 }
1829
qmc_qe_init_ucc(struct qmc * qmc)1830 static int qmc_qe_init_ucc(struct qmc *qmc)
1831 {
1832 u32 val;
1833 int ret;
1834
1835 /* Set the UCC in slow mode */
1836 qmc_write8(qmc->scc_regs + SCC_QE_UCC_GUEMR,
1837 UCC_GUEMR_SET_RESERVED3 | UCC_GUEMR_MODE_SLOW_RX | UCC_GUEMR_MODE_SLOW_TX);
1838
1839 /* Connect the serial (UCC) to TSA */
1840 ret = tsa_serial_connect(qmc->tsa_serial);
1841 if (ret)
1842 return dev_err_probe(qmc->dev, ret, "Failed to connect TSA serial\n");
1843
1844 /* Initialize the QMC tx startup addresses */
1845 if (!qe_issue_cmd(QE_PUSHSCHED, qmc->qe_subblock,
1846 QE_CR_PROTOCOL_UNSPECIFIED, 0x80)) {
1847 dev_err(qmc->dev, "QE_CMD_PUSH_SCHED tx cmd failed");
1848 ret = -EIO;
1849 goto err_tsa_serial_disconnect;
1850 }
1851
1852 /* Initialize the QMC rx startup addresses */
1853 if (!qe_issue_cmd(QE_PUSHSCHED, qmc->qe_subblock | 0x00020000,
1854 QE_CR_PROTOCOL_UNSPECIFIED, 0x82)) {
1855 dev_err(qmc->dev, "QE_CMD_PUSH_SCHED rx cmd failed");
1856 ret = -EIO;
1857 goto err_tsa_serial_disconnect;
1858 }
1859
1860 /* Re-init RXPTR and TXPTR with the content of RX_S_PTR and
1861 * TX_S_PTR (RX_S_PTR and TX_S_PTR are initialized during
1862 * qmc_setup_tsa() call
1863 */
1864 val = qmc_read16(qmc->scc_pram + QMC_GBL_RX_S_PTR);
1865 qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
1866 val = qmc_read16(qmc->scc_pram + QMC_GBL_TX_S_PTR);
1867 qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
1868
1869 /* Init GUMR_H and GUMR_L registers (SCC GSMR_H and GSMR_L) */
1870 val = SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP |
1871 SCC_GSMRH_TRX | SCC_GSMRH_TTX;
1872 qmc_write32(qmc->scc_regs + SCC_GSMRH, val);
1873
1874 /* enable QMC mode */
1875 qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_QE_GSMRL_MODE_QMC);
1876
1877 /* Disable and clear interrupts */
1878 qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
1879 qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
1880
1881 return 0;
1882
1883 err_tsa_serial_disconnect:
1884 tsa_serial_disconnect(qmc->tsa_serial);
1885 return ret;
1886 }
1887
qmc_init_xcc(struct qmc * qmc)1888 static int qmc_init_xcc(struct qmc *qmc)
1889 {
1890 return qmc_is_qe(qmc) ?
1891 qmc_qe_init_ucc(qmc) :
1892 qmc_cpm1_init_scc(qmc);
1893 }
1894
qmc_exit_xcc(struct qmc * qmc)1895 static void qmc_exit_xcc(struct qmc *qmc)
1896 {
1897 /* Disconnect the serial from TSA */
1898 tsa_serial_disconnect(qmc->tsa_serial);
1899 }
1900
qmc_probe(struct platform_device * pdev)1901 static int qmc_probe(struct platform_device *pdev)
1902 {
1903 struct device_node *np = pdev->dev.of_node;
1904 unsigned int nb_chans;
1905 struct qmc *qmc;
1906 int irq;
1907 int ret;
1908
1909 qmc = devm_kzalloc(&pdev->dev, sizeof(*qmc), GFP_KERNEL);
1910 if (!qmc)
1911 return -ENOMEM;
1912
1913 qmc->dev = &pdev->dev;
1914 qmc->data = of_device_get_match_data(&pdev->dev);
1915 if (!qmc->data) {
1916 dev_err(qmc->dev, "Missing match data\n");
1917 return -EINVAL;
1918 }
1919 INIT_LIST_HEAD(&qmc->chan_head);
1920
1921 qmc->tsa_serial = devm_tsa_serial_get_byphandle(qmc->dev, np, "fsl,tsa-serial");
1922 if (IS_ERR(qmc->tsa_serial)) {
1923 return dev_err_probe(qmc->dev, PTR_ERR(qmc->tsa_serial),
1924 "Failed to get TSA serial\n");
1925 }
1926
1927 ret = qmc_init_resources(qmc, pdev);
1928 if (ret)
1929 return ret;
1930
1931 if (qmc_is_qe(qmc)) {
1932 ret = qmc_qe_soft_qmc_init(qmc, np);
1933 if (ret)
1934 return ret;
1935 }
1936
1937 /* Parse channels informationss */
1938 ret = qmc_of_parse_chans(qmc, np);
1939 if (ret)
1940 return ret;
1941
1942 nb_chans = qmc_nb_chans(qmc);
1943
1944 /*
1945 * Allocate the buffer descriptor table
1946 * 8 rx and 8 tx descriptors per channel
1947 */
1948 qmc->bd_size = (nb_chans * (QMC_NB_TXBDS + QMC_NB_RXBDS)) * sizeof(cbd_t);
1949 qmc->bd_table = dmam_alloc_coherent(qmc->dev, qmc->bd_size,
1950 &qmc->bd_dma_addr, GFP_KERNEL);
1951 if (!qmc->bd_table) {
1952 dev_err(qmc->dev, "Failed to allocate bd table\n");
1953 return -ENOMEM;
1954 }
1955 memset(qmc->bd_table, 0, qmc->bd_size);
1956
1957 qmc_write32(qmc->scc_pram + QMC_GBL_MCBASE, qmc->bd_dma_addr);
1958
1959 /* Allocate the interrupt table */
1960 qmc->int_size = QMC_NB_INTS * sizeof(u16);
1961 qmc->int_table = dmam_alloc_coherent(qmc->dev, qmc->int_size,
1962 &qmc->int_dma_addr, GFP_KERNEL);
1963 if (!qmc->int_table) {
1964 dev_err(qmc->dev, "Failed to allocate interrupt table\n");
1965 return -ENOMEM;
1966 }
1967 memset(qmc->int_table, 0, qmc->int_size);
1968
1969 qmc->int_curr = qmc->int_table;
1970 qmc_write32(qmc->scc_pram + QMC_GBL_INTBASE, qmc->int_dma_addr);
1971 qmc_write32(qmc->scc_pram + QMC_GBL_INTPTR, qmc->int_dma_addr);
1972
1973 /* Set MRBLR (valid for HDLC only) max MRU + max CRC */
1974 qmc_write16(qmc->scc_pram + QMC_GBL_MRBLR, HDLC_MAX_MRU + 4);
1975
1976 qmc_write16(qmc->scc_pram + QMC_GBL_GRFTHR, 1);
1977 qmc_write16(qmc->scc_pram + QMC_GBL_GRFCNT, 1);
1978
1979 qmc_write32(qmc->scc_pram + QMC_GBL_C_MASK32, 0xDEBB20E3);
1980 qmc_write16(qmc->scc_pram + QMC_GBL_C_MASK16, 0xF0B8);
1981
1982 if (qmc_is_qe(qmc)) {
1983 /* Zeroed the reserved area */
1984 memset_io(qmc->scc_pram + QMC_QE_GBL_RSV_B0_START, 0,
1985 QMC_QE_GBL_RSV_B0_SIZE);
1986
1987 qmc_write32(qmc->scc_pram + QMC_QE_GBL_GCSBASE, qmc->dpram_offset);
1988
1989 /* Init 'framer parameters' area and set the base addresses */
1990 memset_io(qmc->scc_pram + UCC_SLOW_PRAM_SIZE, 0x01, 64);
1991 memset_io(qmc->scc_pram + UCC_SLOW_PRAM_SIZE + 64, 0x01, 64);
1992 qmc_write16(qmc->scc_pram + QMC_QE_GBL_RX_FRM_BASE,
1993 qmc->scc_pram_offset + UCC_SLOW_PRAM_SIZE);
1994 qmc_write16(qmc->scc_pram + QMC_QE_GBL_TX_FRM_BASE,
1995 qmc->scc_pram_offset + UCC_SLOW_PRAM_SIZE + 64);
1996 }
1997
1998 ret = qmc_init_tsa(qmc);
1999 if (ret)
2000 return ret;
2001
2002 qmc_write16(qmc->scc_pram + QMC_GBL_QMCSTATE, 0x8000);
2003
2004 ret = qmc_setup_chans(qmc);
2005 if (ret)
2006 return ret;
2007
2008 /* Init interrupts table */
2009 ret = qmc_setup_ints(qmc);
2010 if (ret)
2011 return ret;
2012
2013 /* Init SCC (CPM1) or UCC (QE) */
2014 ret = qmc_init_xcc(qmc);
2015 if (ret)
2016 return ret;
2017
2018 /* Set the irq handler */
2019 irq = platform_get_irq(pdev, 0);
2020 if (irq < 0) {
2021 ret = irq;
2022 goto err_exit_xcc;
2023 }
2024 ret = devm_request_irq(qmc->dev, irq, qmc_irq_handler, 0, "qmc", qmc);
2025 if (ret < 0)
2026 goto err_exit_xcc;
2027
2028 /* Enable interrupts */
2029 qmc_write16(qmc->scc_regs + SCC_SCCM,
2030 SCC_SCCE_IQOV | SCC_SCCE_GINT | SCC_SCCE_GUN | SCC_SCCE_GOV);
2031
2032 ret = qmc_finalize_chans(qmc);
2033 if (ret < 0)
2034 goto err_disable_intr;
2035
2036 /* Enable transmitter and receiver */
2037 qmc_setbits32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
2038
2039 platform_set_drvdata(pdev, qmc);
2040
2041 /* Populate channel related devices */
2042 ret = devm_of_platform_populate(qmc->dev);
2043 if (ret)
2044 goto err_disable_txrx;
2045
2046 return 0;
2047
2048 err_disable_txrx:
2049 qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
2050
2051 err_disable_intr:
2052 qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
2053
2054 err_exit_xcc:
2055 qmc_exit_xcc(qmc);
2056 return ret;
2057 }
2058
qmc_remove(struct platform_device * pdev)2059 static void qmc_remove(struct platform_device *pdev)
2060 {
2061 struct qmc *qmc = platform_get_drvdata(pdev);
2062
2063 /* Disable transmitter and receiver */
2064 qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
2065
2066 /* Disable interrupts */
2067 qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
2068
2069 /* Exit SCC (CPM1) or UCC (QE) */
2070 qmc_exit_xcc(qmc);
2071 }
2072
2073 static const struct qmc_data qmc_data_cpm1 __maybe_unused = {
2074 .version = QMC_CPM1,
2075 .tstate = 0x30000000,
2076 .rstate = 0x31000000,
2077 .zistate = 0x00000100,
2078 .zdstate_hdlc = 0x00000080,
2079 .zdstate_transp = 0x18000080,
2080 .rpack = 0x00000000,
2081 };
2082
2083 static const struct qmc_data qmc_data_qe __maybe_unused = {
2084 .version = QMC_QE,
2085 .tstate = 0x30000000,
2086 .rstate = 0x30000000,
2087 .zistate = 0x00000200,
2088 .zdstate_hdlc = 0x80FFFFE0,
2089 .zdstate_transp = 0x003FFFE2,
2090 .rpack = 0x80000000,
2091 };
2092
2093 static const struct of_device_id qmc_id_table[] = {
2094 #if IS_ENABLED(CONFIG_CPM1)
2095 { .compatible = "fsl,cpm1-scc-qmc", .data = &qmc_data_cpm1 },
2096 #endif
2097 #if IS_ENABLED(CONFIG_QUICC_ENGINE)
2098 { .compatible = "fsl,qe-ucc-qmc", .data = &qmc_data_qe },
2099 #endif
2100 {} /* sentinel */
2101 };
2102 MODULE_DEVICE_TABLE(of, qmc_id_table);
2103
2104 static struct platform_driver qmc_driver = {
2105 .driver = {
2106 .name = "fsl-qmc",
2107 .of_match_table = of_match_ptr(qmc_id_table),
2108 },
2109 .probe = qmc_probe,
2110 .remove = qmc_remove,
2111 };
2112 module_platform_driver(qmc_driver);
2113
qmc_chan_get_from_qmc(struct device_node * qmc_np,unsigned int chan_index)2114 static struct qmc_chan *qmc_chan_get_from_qmc(struct device_node *qmc_np, unsigned int chan_index)
2115 {
2116 struct platform_device *pdev;
2117 struct qmc_chan *qmc_chan;
2118 struct qmc *qmc;
2119
2120 if (!of_match_node(qmc_driver.driver.of_match_table, qmc_np))
2121 return ERR_PTR(-EINVAL);
2122
2123 pdev = of_find_device_by_node(qmc_np);
2124 if (!pdev)
2125 return ERR_PTR(-ENODEV);
2126
2127 qmc = platform_get_drvdata(pdev);
2128 if (!qmc) {
2129 platform_device_put(pdev);
2130 return ERR_PTR(-EPROBE_DEFER);
2131 }
2132
2133 if (chan_index >= ARRAY_SIZE(qmc->chans)) {
2134 platform_device_put(pdev);
2135 return ERR_PTR(-EINVAL);
2136 }
2137
2138 qmc_chan = qmc->chans[chan_index];
2139 if (!qmc_chan) {
2140 platform_device_put(pdev);
2141 return ERR_PTR(-ENOENT);
2142 }
2143
2144 return qmc_chan;
2145 }
2146
qmc_chan_count_phandles(struct device_node * np,const char * phandles_name)2147 int qmc_chan_count_phandles(struct device_node *np, const char *phandles_name)
2148 {
2149 int count;
2150
2151 /* phandles are fixed args phandles with one arg */
2152 count = of_count_phandle_with_args(np, phandles_name, NULL);
2153 if (count < 0)
2154 return count;
2155
2156 return count / 2;
2157 }
2158 EXPORT_SYMBOL(qmc_chan_count_phandles);
2159
qmc_chan_get_byphandles_index(struct device_node * np,const char * phandles_name,int index)2160 struct qmc_chan *qmc_chan_get_byphandles_index(struct device_node *np,
2161 const char *phandles_name,
2162 int index)
2163 {
2164 struct of_phandle_args out_args;
2165 struct qmc_chan *qmc_chan;
2166 int ret;
2167
2168 ret = of_parse_phandle_with_fixed_args(np, phandles_name, 1, index,
2169 &out_args);
2170 if (ret < 0)
2171 return ERR_PTR(ret);
2172
2173 if (out_args.args_count != 1) {
2174 of_node_put(out_args.np);
2175 return ERR_PTR(-EINVAL);
2176 }
2177
2178 qmc_chan = qmc_chan_get_from_qmc(out_args.np, out_args.args[0]);
2179 of_node_put(out_args.np);
2180 return qmc_chan;
2181 }
2182 EXPORT_SYMBOL(qmc_chan_get_byphandles_index);
2183
qmc_chan_get_bychild(struct device_node * np)2184 struct qmc_chan *qmc_chan_get_bychild(struct device_node *np)
2185 {
2186 struct device_node *qmc_np;
2187 u32 chan_index;
2188 int ret;
2189
2190 qmc_np = np->parent;
2191 ret = of_property_read_u32(np, "reg", &chan_index);
2192 if (ret)
2193 return ERR_PTR(-EINVAL);
2194
2195 return qmc_chan_get_from_qmc(qmc_np, chan_index);
2196 }
2197 EXPORT_SYMBOL(qmc_chan_get_bychild);
2198
qmc_chan_put(struct qmc_chan * chan)2199 void qmc_chan_put(struct qmc_chan *chan)
2200 {
2201 put_device(chan->qmc->dev);
2202 }
2203 EXPORT_SYMBOL(qmc_chan_put);
2204
devm_qmc_chan_release(struct device * dev,void * res)2205 static void devm_qmc_chan_release(struct device *dev, void *res)
2206 {
2207 struct qmc_chan **qmc_chan = res;
2208
2209 qmc_chan_put(*qmc_chan);
2210 }
2211
devm_qmc_chan_get_byphandles_index(struct device * dev,struct device_node * np,const char * phandles_name,int index)2212 struct qmc_chan *devm_qmc_chan_get_byphandles_index(struct device *dev,
2213 struct device_node *np,
2214 const char *phandles_name,
2215 int index)
2216 {
2217 struct qmc_chan *qmc_chan;
2218 struct qmc_chan **dr;
2219
2220 dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
2221 if (!dr)
2222 return ERR_PTR(-ENOMEM);
2223
2224 qmc_chan = qmc_chan_get_byphandles_index(np, phandles_name, index);
2225 if (!IS_ERR(qmc_chan)) {
2226 *dr = qmc_chan;
2227 devres_add(dev, dr);
2228 } else {
2229 devres_free(dr);
2230 }
2231
2232 return qmc_chan;
2233 }
2234 EXPORT_SYMBOL(devm_qmc_chan_get_byphandles_index);
2235
devm_qmc_chan_get_bychild(struct device * dev,struct device_node * np)2236 struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev,
2237 struct device_node *np)
2238 {
2239 struct qmc_chan *qmc_chan;
2240 struct qmc_chan **dr;
2241
2242 dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
2243 if (!dr)
2244 return ERR_PTR(-ENOMEM);
2245
2246 qmc_chan = qmc_chan_get_bychild(np);
2247 if (!IS_ERR(qmc_chan)) {
2248 *dr = qmc_chan;
2249 devres_add(dev, dr);
2250 } else {
2251 devres_free(dr);
2252 }
2253
2254 return qmc_chan;
2255 }
2256 EXPORT_SYMBOL(devm_qmc_chan_get_bychild);
2257
2258 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
2259 MODULE_DESCRIPTION("CPM/QE QMC driver");
2260 MODULE_LICENSE("GPL");
2261