1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * QMC driver
4 *
5 * Copyright 2022 CS GROUP France
6 *
7 * Author: Herve Codina <herve.codina@bootlin.com>
8 */
9
10 #include <soc/fsl/qe/qmc.h>
11 #include <linux/bitfield.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/firmware.h>
14 #include <linux/hdlc.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/of_platform.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22 #include <soc/fsl/cpm.h>
23 #include <soc/fsl/qe/ucc_slow.h>
24 #include <soc/fsl/qe/qe.h>
25 #include <sysdev/fsl_soc.h>
26 #include "tsa.h"
27
28 /* SCC general mode register low (32 bits) (GUMR_L in QE) */
29 #define SCC_GSMRL 0x00
30 #define SCC_GSMRL_ENR BIT(5)
31 #define SCC_GSMRL_ENT BIT(4)
32 #define SCC_GSMRL_MODE_MASK GENMASK(3, 0)
33 #define SCC_CPM1_GSMRL_MODE_QMC FIELD_PREP_CONST(SCC_GSMRL_MODE_MASK, 0x0A)
34 #define SCC_QE_GSMRL_MODE_QMC FIELD_PREP_CONST(SCC_GSMRL_MODE_MASK, 0x02)
35
36 /* SCC general mode register high (32 bits) (identical to GUMR_H in QE) */
37 #define SCC_GSMRH 0x04
38 #define SCC_GSMRH_CTSS BIT(7)
39 #define SCC_GSMRH_CDS BIT(8)
40 #define SCC_GSMRH_CTSP BIT(9)
41 #define SCC_GSMRH_CDP BIT(10)
42 #define SCC_GSMRH_TTX BIT(11)
43 #define SCC_GSMRH_TRX BIT(12)
44
45 /* SCC event register (16 bits) (identical to UCCE in QE) */
46 #define SCC_SCCE 0x10
47 #define SCC_SCCE_IQOV BIT(3)
48 #define SCC_SCCE_GINT BIT(2)
49 #define SCC_SCCE_GUN BIT(1)
50 #define SCC_SCCE_GOV BIT(0)
51
52 /* SCC mask register (16 bits) */
53 #define SCC_SCCM 0x14
54
55 /* UCC Extended Mode Register (8 bits, QE only) */
56 #define SCC_QE_UCC_GUEMR 0x90
57
58 /* Multichannel base pointer (32 bits) */
59 #define QMC_GBL_MCBASE 0x00
60 /* Multichannel controller state (16 bits) */
61 #define QMC_GBL_QMCSTATE 0x04
62 /* Maximum receive buffer length (16 bits) */
63 #define QMC_GBL_MRBLR 0x06
64 /* Tx time-slot assignment table pointer (16 bits) */
65 #define QMC_GBL_TX_S_PTR 0x08
66 /* Rx pointer (16 bits) */
67 #define QMC_GBL_RXPTR 0x0A
68 /* Global receive frame threshold (16 bits) */
69 #define QMC_GBL_GRFTHR 0x0C
70 /* Global receive frame count (16 bits) */
71 #define QMC_GBL_GRFCNT 0x0E
72 /* Multichannel interrupt base address (32 bits) */
73 #define QMC_GBL_INTBASE 0x10
74 /* Multichannel interrupt pointer (32 bits) */
75 #define QMC_GBL_INTPTR 0x14
76 /* Rx time-slot assignment table pointer (16 bits) */
77 #define QMC_GBL_RX_S_PTR 0x18
78 /* Tx pointer (16 bits) */
79 #define QMC_GBL_TXPTR 0x1A
80 /* CRC constant (32 bits) */
81 #define QMC_GBL_C_MASK32 0x1C
82 /* Time slot assignment table Rx (32 x 16 bits) */
83 #define QMC_GBL_TSATRX 0x20
84 /* Time slot assignment table Tx (32 x 16 bits) */
85 #define QMC_GBL_TSATTX 0x60
86 /* CRC constant (16 bits) */
87 #define QMC_GBL_C_MASK16 0xA0
88 /* Rx framer base pointer (16 bits, QE only) */
89 #define QMC_QE_GBL_RX_FRM_BASE 0xAC
90 /* Tx framer base pointer (16 bits, QE only) */
91 #define QMC_QE_GBL_TX_FRM_BASE 0xAE
92 /* A reserved area (0xB0 -> 0xC3) that must be initialized to 0 (QE only) */
93 #define QMC_QE_GBL_RSV_B0_START 0xB0
94 #define QMC_QE_GBL_RSV_B0_SIZE 0x14
95 /* QMC Global Channel specific base (32 bits, QE only) */
96 #define QMC_QE_GBL_GCSBASE 0xC4
97
98 /* TSA entry (16bit entry in TSATRX and TSATTX) */
99 #define QMC_TSA_VALID BIT(15)
100 #define QMC_TSA_WRAP BIT(14)
101 #define QMC_TSA_MASK_MASKH GENMASK(13, 12)
102 #define QMC_TSA_MASK_MASKL GENMASK(5, 0)
103 #define QMC_TSA_MASK_8BIT (FIELD_PREP_CONST(QMC_TSA_MASK_MASKH, 0x3) | \
104 FIELD_PREP_CONST(QMC_TSA_MASK_MASKL, 0x3F))
105 #define QMC_TSA_CHANNEL_MASK GENMASK(11, 6)
106 #define QMC_TSA_CHANNEL(x) FIELD_PREP(QMC_TSA_CHANNEL_MASK, x)
107
108 /* Tx buffer descriptor base address (16 bits, offset from MCBASE) */
109 #define QMC_SPE_TBASE 0x00
110
111 /* Channel mode register (16 bits) */
112 #define QMC_SPE_CHAMR 0x02
113 #define QMC_SPE_CHAMR_MODE_MASK GENMASK(15, 15)
114 #define QMC_SPE_CHAMR_MODE_HDLC FIELD_PREP_CONST(QMC_SPE_CHAMR_MODE_MASK, 1)
115 #define QMC_SPE_CHAMR_MODE_TRANSP (FIELD_PREP_CONST(QMC_SPE_CHAMR_MODE_MASK, 0) | BIT(13))
116 #define QMC_SPE_CHAMR_ENT BIT(12)
117 #define QMC_SPE_CHAMR_POL BIT(8)
118 #define QMC_SPE_CHAMR_HDLC_IDLM BIT(13)
119 #define QMC_SPE_CHAMR_HDLC_CRC BIT(7)
120 #define QMC_SPE_CHAMR_HDLC_NOF_MASK GENMASK(3, 0)
121 #define QMC_SPE_CHAMR_HDLC_NOF(x) FIELD_PREP(QMC_SPE_CHAMR_HDLC_NOF_MASK, x)
122 #define QMC_SPE_CHAMR_TRANSP_RD BIT(14)
123 #define QMC_SPE_CHAMR_TRANSP_SYNC BIT(10)
124
125 /* Tx internal state (32 bits) */
126 #define QMC_SPE_TSTATE 0x04
127 /* Tx buffer descriptor pointer (16 bits) */
128 #define QMC_SPE_TBPTR 0x0C
129 /* Zero-insertion state (32 bits) */
130 #define QMC_SPE_ZISTATE 0x14
131 /* Channel’s interrupt mask flags (16 bits) */
132 #define QMC_SPE_INTMSK 0x1C
133 /* Rx buffer descriptor base address (16 bits, offset from MCBASE) */
134 #define QMC_SPE_RBASE 0x20
135 /* HDLC: Maximum frame length register (16 bits) */
136 #define QMC_SPE_MFLR 0x22
137 /* TRANSPARENT: Transparent maximum receive length (16 bits) */
138 #define QMC_SPE_TMRBLR 0x22
139 /* Rx internal state (32 bits) */
140 #define QMC_SPE_RSTATE 0x24
141 /* Rx buffer descriptor pointer (16 bits) */
142 #define QMC_SPE_RBPTR 0x2C
143 /* Packs 4 bytes to 1 long word before writing to buffer (32 bits) */
144 #define QMC_SPE_RPACK 0x30
145 /* Zero deletion state (32 bits) */
146 #define QMC_SPE_ZDSTATE 0x34
147
148 /* Transparent synchronization (16 bits) */
149 #define QMC_SPE_TRNSYNC 0x3C
150 #define QMC_SPE_TRNSYNC_RX_MASK GENMASK(15, 8)
151 #define QMC_SPE_TRNSYNC_RX(x) FIELD_PREP(QMC_SPE_TRNSYNC_RX_MASK, x)
152 #define QMC_SPE_TRNSYNC_TX_MASK GENMASK(7, 0)
153 #define QMC_SPE_TRNSYNC_TX(x) FIELD_PREP(QMC_SPE_TRNSYNC_TX_MASK, x)
154
155 /* Interrupt related registers bits */
156 #define QMC_INT_V BIT(15)
157 #define QMC_INT_W BIT(14)
158 #define QMC_INT_NID BIT(13)
159 #define QMC_INT_IDL BIT(12)
160 #define QMC_INT_CHANNEL_MASK GENMASK(11, 6)
161 #define QMC_INT_GET_CHANNEL(x) FIELD_GET(QMC_INT_CHANNEL_MASK, x)
162 #define QMC_INT_MRF BIT(5)
163 #define QMC_INT_UN BIT(4)
164 #define QMC_INT_RXF BIT(3)
165 #define QMC_INT_BSY BIT(2)
166 #define QMC_INT_TXB BIT(1)
167 #define QMC_INT_RXB BIT(0)
168
169 /* BD related registers bits */
170 #define QMC_BD_RX_E BIT(15)
171 #define QMC_BD_RX_W BIT(13)
172 #define QMC_BD_RX_I BIT(12)
173 #define QMC_BD_RX_L BIT(11)
174 #define QMC_BD_RX_F BIT(10)
175 #define QMC_BD_RX_CM BIT(9)
176 #define QMC_BD_RX_UB BIT(7)
177 #define QMC_BD_RX_LG BIT(5)
178 #define QMC_BD_RX_NO BIT(4)
179 #define QMC_BD_RX_AB BIT(3)
180 #define QMC_BD_RX_CR BIT(2)
181
182 #define QMC_BD_TX_R BIT(15)
183 #define QMC_BD_TX_W BIT(13)
184 #define QMC_BD_TX_I BIT(12)
185 #define QMC_BD_TX_L BIT(11)
186 #define QMC_BD_TX_TC BIT(10)
187 #define QMC_BD_TX_CM BIT(9)
188 #define QMC_BD_TX_UB BIT(7)
189 #define QMC_BD_TX_PAD_MASK GENMASK(3, 0)
190 #define QMC_BD_TX_PAD(x) FIELD_PREP(QMC_BD_TX_PAD_MASK, x)
191
192 /* Numbers of BDs and interrupt items */
193 #define QMC_NB_TXBDS 8
194 #define QMC_NB_RXBDS 8
195 #define QMC_NB_INTS 128
196
197 struct qmc_xfer_desc {
198 union {
199 void (*tx_complete)(void *context);
200 void (*rx_complete)(void *context, size_t length, unsigned int flags);
201 };
202 void *context;
203 };
204
205 struct qmc_chan {
206 struct list_head list;
207 unsigned int id;
208 struct qmc *qmc;
209 void __iomem *s_param;
210 enum qmc_mode mode;
211 spinlock_t ts_lock; /* Protect timeslots */
212 u64 tx_ts_mask_avail;
213 u64 tx_ts_mask;
214 u64 rx_ts_mask_avail;
215 u64 rx_ts_mask;
216 bool is_reverse_data;
217
218 spinlock_t tx_lock; /* Protect Tx related data */
219 cbd_t __iomem *txbds;
220 cbd_t __iomem *txbd_free;
221 cbd_t __iomem *txbd_done;
222 struct qmc_xfer_desc tx_desc[QMC_NB_TXBDS];
223 u64 nb_tx_underrun;
224 bool is_tx_stopped;
225
226 spinlock_t rx_lock; /* Protect Rx related data */
227 cbd_t __iomem *rxbds;
228 cbd_t __iomem *rxbd_free;
229 cbd_t __iomem *rxbd_done;
230 struct qmc_xfer_desc rx_desc[QMC_NB_RXBDS];
231 u64 nb_rx_busy;
232 int rx_pending;
233 bool is_rx_halted;
234 bool is_rx_stopped;
235 };
236
237 enum qmc_version {
238 QMC_CPM1,
239 QMC_QE,
240 };
241
242 struct qmc_data {
243 enum qmc_version version;
244 u32 tstate; /* Initial TSTATE value */
245 u32 rstate; /* Initial RSTATE value */
246 u32 zistate; /* Initial ZISTATE value */
247 u32 zdstate_hdlc; /* Initial ZDSTATE value (HDLC mode) */
248 u32 zdstate_transp; /* Initial ZDSTATE value (Transparent mode) */
249 u32 rpack; /* Initial RPACK value */
250 };
251
252 struct qmc {
253 struct device *dev;
254 const struct qmc_data *data;
255 struct tsa_serial *tsa_serial;
256 void __iomem *scc_regs;
257 void __iomem *scc_pram;
258 void __iomem *dpram;
259 u16 scc_pram_offset;
260 u32 dpram_offset;
261 u32 qe_subblock;
262 cbd_t __iomem *bd_table;
263 dma_addr_t bd_dma_addr;
264 size_t bd_size;
265 u16 __iomem *int_table;
266 u16 __iomem *int_curr;
267 dma_addr_t int_dma_addr;
268 size_t int_size;
269 bool is_tsa_64rxtx;
270 struct list_head chan_head;
271 struct qmc_chan *chans[64];
272 };
273
qmc_write8(void __iomem * addr,u8 val)274 static void qmc_write8(void __iomem *addr, u8 val)
275 {
276 iowrite8(val, addr);
277 }
278
qmc_write16(void __iomem * addr,u16 val)279 static void qmc_write16(void __iomem *addr, u16 val)
280 {
281 iowrite16be(val, addr);
282 }
283
qmc_read16(void __iomem * addr)284 static u16 qmc_read16(void __iomem *addr)
285 {
286 return ioread16be(addr);
287 }
288
qmc_setbits16(void __iomem * addr,u16 set)289 static void qmc_setbits16(void __iomem *addr, u16 set)
290 {
291 qmc_write16(addr, qmc_read16(addr) | set);
292 }
293
qmc_clrbits16(void __iomem * addr,u16 clr)294 static void qmc_clrbits16(void __iomem *addr, u16 clr)
295 {
296 qmc_write16(addr, qmc_read16(addr) & ~clr);
297 }
298
qmc_clrsetbits16(void __iomem * addr,u16 clr,u16 set)299 static void qmc_clrsetbits16(void __iomem *addr, u16 clr, u16 set)
300 {
301 qmc_write16(addr, (qmc_read16(addr) & ~clr) | set);
302 }
303
qmc_write32(void __iomem * addr,u32 val)304 static void qmc_write32(void __iomem *addr, u32 val)
305 {
306 iowrite32be(val, addr);
307 }
308
qmc_read32(void __iomem * addr)309 static u32 qmc_read32(void __iomem *addr)
310 {
311 return ioread32be(addr);
312 }
313
qmc_setbits32(void __iomem * addr,u32 set)314 static void qmc_setbits32(void __iomem *addr, u32 set)
315 {
316 qmc_write32(addr, qmc_read32(addr) | set);
317 }
318
qmc_is_qe(const struct qmc * qmc)319 static bool qmc_is_qe(const struct qmc *qmc)
320 {
321 if (IS_ENABLED(CONFIG_QUICC_ENGINE) && IS_ENABLED(CONFIG_CPM))
322 return qmc->data->version == QMC_QE;
323
324 return IS_ENABLED(CONFIG_QUICC_ENGINE);
325 }
326
qmc_chan_get_info(struct qmc_chan * chan,struct qmc_chan_info * info)327 int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
328 {
329 struct tsa_serial_info tsa_info;
330 unsigned long flags;
331 int ret;
332
333 /* Retrieve info from the TSA related serial */
334 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &tsa_info);
335 if (ret)
336 return ret;
337
338 spin_lock_irqsave(&chan->ts_lock, flags);
339
340 info->mode = chan->mode;
341 info->rx_fs_rate = tsa_info.rx_fs_rate;
342 info->rx_bit_rate = tsa_info.rx_bit_rate;
343 info->nb_tx_ts = hweight64(chan->tx_ts_mask);
344 info->tx_fs_rate = tsa_info.tx_fs_rate;
345 info->tx_bit_rate = tsa_info.tx_bit_rate;
346 info->nb_rx_ts = hweight64(chan->rx_ts_mask);
347
348 spin_unlock_irqrestore(&chan->ts_lock, flags);
349
350 return 0;
351 }
352 EXPORT_SYMBOL(qmc_chan_get_info);
353
qmc_chan_get_ts_info(struct qmc_chan * chan,struct qmc_chan_ts_info * ts_info)354 int qmc_chan_get_ts_info(struct qmc_chan *chan, struct qmc_chan_ts_info *ts_info)
355 {
356 unsigned long flags;
357
358 spin_lock_irqsave(&chan->ts_lock, flags);
359
360 ts_info->rx_ts_mask_avail = chan->rx_ts_mask_avail;
361 ts_info->tx_ts_mask_avail = chan->tx_ts_mask_avail;
362 ts_info->rx_ts_mask = chan->rx_ts_mask;
363 ts_info->tx_ts_mask = chan->tx_ts_mask;
364
365 spin_unlock_irqrestore(&chan->ts_lock, flags);
366
367 return 0;
368 }
369 EXPORT_SYMBOL(qmc_chan_get_ts_info);
370
qmc_chan_set_ts_info(struct qmc_chan * chan,const struct qmc_chan_ts_info * ts_info)371 int qmc_chan_set_ts_info(struct qmc_chan *chan, const struct qmc_chan_ts_info *ts_info)
372 {
373 unsigned long flags;
374 int ret;
375
376 /* Only a subset of available timeslots is allowed */
377 if ((ts_info->rx_ts_mask & chan->rx_ts_mask_avail) != ts_info->rx_ts_mask)
378 return -EINVAL;
379 if ((ts_info->tx_ts_mask & chan->tx_ts_mask_avail) != ts_info->tx_ts_mask)
380 return -EINVAL;
381
382 /* In case of common rx/tx table, rx/tx masks must be identical */
383 if (chan->qmc->is_tsa_64rxtx) {
384 if (ts_info->rx_ts_mask != ts_info->tx_ts_mask)
385 return -EINVAL;
386 }
387
388 spin_lock_irqsave(&chan->ts_lock, flags);
389
390 if ((chan->tx_ts_mask != ts_info->tx_ts_mask && !chan->is_tx_stopped) ||
391 (chan->rx_ts_mask != ts_info->rx_ts_mask && !chan->is_rx_stopped)) {
392 dev_err(chan->qmc->dev, "Channel rx and/or tx not stopped\n");
393 ret = -EBUSY;
394 } else {
395 chan->tx_ts_mask = ts_info->tx_ts_mask;
396 chan->rx_ts_mask = ts_info->rx_ts_mask;
397 ret = 0;
398 }
399 spin_unlock_irqrestore(&chan->ts_lock, flags);
400
401 return ret;
402 }
403 EXPORT_SYMBOL(qmc_chan_set_ts_info);
404
qmc_chan_set_param(struct qmc_chan * chan,const struct qmc_chan_param * param)405 int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param)
406 {
407 if (param->mode != chan->mode)
408 return -EINVAL;
409
410 switch (param->mode) {
411 case QMC_HDLC:
412 if (param->hdlc.max_rx_buf_size % 4 ||
413 param->hdlc.max_rx_buf_size < 8)
414 return -EINVAL;
415
416 qmc_write16(chan->qmc->scc_pram + QMC_GBL_MRBLR,
417 param->hdlc.max_rx_buf_size - 8);
418 qmc_write16(chan->s_param + QMC_SPE_MFLR,
419 param->hdlc.max_rx_frame_size);
420 if (param->hdlc.is_crc32) {
421 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR,
422 QMC_SPE_CHAMR_HDLC_CRC);
423 } else {
424 qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR,
425 QMC_SPE_CHAMR_HDLC_CRC);
426 }
427 break;
428
429 case QMC_TRANSPARENT:
430 qmc_write16(chan->s_param + QMC_SPE_TMRBLR,
431 param->transp.max_rx_buf_size);
432 break;
433
434 default:
435 return -EINVAL;
436 }
437
438 return 0;
439 }
440 EXPORT_SYMBOL(qmc_chan_set_param);
441
qmc_chan_write_submit(struct qmc_chan * chan,dma_addr_t addr,size_t length,void (* complete)(void * context),void * context)442 int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
443 void (*complete)(void *context), void *context)
444 {
445 struct qmc_xfer_desc *xfer_desc;
446 unsigned long flags;
447 cbd_t __iomem *bd;
448 u16 ctrl;
449 int ret;
450
451 /*
452 * R bit UB bit
453 * 0 0 : The BD is free
454 * 1 1 : The BD is in used, waiting for transfer
455 * 0 1 : The BD is in used, waiting for completion
456 * 1 0 : Should not append
457 */
458
459 spin_lock_irqsave(&chan->tx_lock, flags);
460 bd = chan->txbd_free;
461
462 ctrl = qmc_read16(&bd->cbd_sc);
463 if (ctrl & (QMC_BD_TX_R | QMC_BD_TX_UB)) {
464 /* We are full ... */
465 ret = -EBUSY;
466 goto end;
467 }
468
469 qmc_write16(&bd->cbd_datlen, length);
470 qmc_write32(&bd->cbd_bufaddr, addr);
471
472 xfer_desc = &chan->tx_desc[bd - chan->txbds];
473 xfer_desc->tx_complete = complete;
474 xfer_desc->context = context;
475
476 /* Activate the descriptor */
477 ctrl |= (QMC_BD_TX_R | QMC_BD_TX_UB);
478 wmb(); /* Be sure to flush the descriptor before control update */
479 qmc_write16(&bd->cbd_sc, ctrl);
480
481 if (!chan->is_tx_stopped)
482 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
483
484 if (ctrl & QMC_BD_TX_W)
485 chan->txbd_free = chan->txbds;
486 else
487 chan->txbd_free++;
488
489 ret = 0;
490
491 end:
492 spin_unlock_irqrestore(&chan->tx_lock, flags);
493 return ret;
494 }
495 EXPORT_SYMBOL(qmc_chan_write_submit);
496
qmc_chan_write_done(struct qmc_chan * chan)497 static void qmc_chan_write_done(struct qmc_chan *chan)
498 {
499 struct qmc_xfer_desc *xfer_desc;
500 void (*complete)(void *context);
501 unsigned long flags;
502 void *context;
503 cbd_t __iomem *bd;
504 u16 ctrl;
505
506 /*
507 * R bit UB bit
508 * 0 0 : The BD is free
509 * 1 1 : The BD is in used, waiting for transfer
510 * 0 1 : The BD is in used, waiting for completion
511 * 1 0 : Should not append
512 */
513
514 spin_lock_irqsave(&chan->tx_lock, flags);
515 bd = chan->txbd_done;
516
517 ctrl = qmc_read16(&bd->cbd_sc);
518 while (!(ctrl & QMC_BD_TX_R)) {
519 if (!(ctrl & QMC_BD_TX_UB))
520 goto end;
521
522 xfer_desc = &chan->tx_desc[bd - chan->txbds];
523 complete = xfer_desc->tx_complete;
524 context = xfer_desc->context;
525 xfer_desc->tx_complete = NULL;
526 xfer_desc->context = NULL;
527
528 qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_TX_UB);
529
530 if (ctrl & QMC_BD_TX_W)
531 chan->txbd_done = chan->txbds;
532 else
533 chan->txbd_done++;
534
535 if (complete) {
536 spin_unlock_irqrestore(&chan->tx_lock, flags);
537 complete(context);
538 spin_lock_irqsave(&chan->tx_lock, flags);
539 }
540
541 bd = chan->txbd_done;
542 ctrl = qmc_read16(&bd->cbd_sc);
543 }
544
545 end:
546 spin_unlock_irqrestore(&chan->tx_lock, flags);
547 }
548
qmc_chan_read_submit(struct qmc_chan * chan,dma_addr_t addr,size_t length,void (* complete)(void * context,size_t length,unsigned int flags),void * context)549 int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
550 void (*complete)(void *context, size_t length, unsigned int flags),
551 void *context)
552 {
553 struct qmc_xfer_desc *xfer_desc;
554 unsigned long flags;
555 cbd_t __iomem *bd;
556 u16 ctrl;
557 int ret;
558
559 /*
560 * E bit UB bit
561 * 0 0 : The BD is free
562 * 1 1 : The BD is in used, waiting for transfer
563 * 0 1 : The BD is in used, waiting for completion
564 * 1 0 : Should not append
565 */
566
567 spin_lock_irqsave(&chan->rx_lock, flags);
568 bd = chan->rxbd_free;
569
570 ctrl = qmc_read16(&bd->cbd_sc);
571 if (ctrl & (QMC_BD_RX_E | QMC_BD_RX_UB)) {
572 /* We are full ... */
573 ret = -EBUSY;
574 goto end;
575 }
576
577 qmc_write16(&bd->cbd_datlen, 0); /* data length is updated by the QMC */
578 qmc_write32(&bd->cbd_bufaddr, addr);
579
580 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
581 xfer_desc->rx_complete = complete;
582 xfer_desc->context = context;
583
584 /* Clear previous status flags */
585 ctrl &= ~(QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG | QMC_BD_RX_NO |
586 QMC_BD_RX_AB | QMC_BD_RX_CR);
587
588 /* Activate the descriptor */
589 ctrl |= (QMC_BD_RX_E | QMC_BD_RX_UB);
590 wmb(); /* Be sure to flush data before descriptor activation */
591 qmc_write16(&bd->cbd_sc, ctrl);
592
593 /* Restart receiver if needed */
594 if (chan->is_rx_halted && !chan->is_rx_stopped) {
595 /* Restart receiver */
596 qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack);
597 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE,
598 chan->mode == QMC_TRANSPARENT ?
599 chan->qmc->data->zdstate_transp :
600 chan->qmc->data->zdstate_hdlc);
601 qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate);
602 chan->is_rx_halted = false;
603 }
604 chan->rx_pending++;
605
606 if (ctrl & QMC_BD_RX_W)
607 chan->rxbd_free = chan->rxbds;
608 else
609 chan->rxbd_free++;
610
611 ret = 0;
612 end:
613 spin_unlock_irqrestore(&chan->rx_lock, flags);
614 return ret;
615 }
616 EXPORT_SYMBOL(qmc_chan_read_submit);
617
qmc_chan_read_done(struct qmc_chan * chan)618 static void qmc_chan_read_done(struct qmc_chan *chan)
619 {
620 void (*complete)(void *context, size_t size, unsigned int flags);
621 struct qmc_xfer_desc *xfer_desc;
622 unsigned long flags;
623 cbd_t __iomem *bd;
624 void *context;
625 u16 datalen;
626 u16 ctrl;
627
628 /*
629 * E bit UB bit
630 * 0 0 : The BD is free
631 * 1 1 : The BD is in used, waiting for transfer
632 * 0 1 : The BD is in used, waiting for completion
633 * 1 0 : Should not append
634 */
635
636 spin_lock_irqsave(&chan->rx_lock, flags);
637 bd = chan->rxbd_done;
638
639 ctrl = qmc_read16(&bd->cbd_sc);
640 while (!(ctrl & QMC_BD_RX_E)) {
641 if (!(ctrl & QMC_BD_RX_UB))
642 goto end;
643
644 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
645 complete = xfer_desc->rx_complete;
646 context = xfer_desc->context;
647 xfer_desc->rx_complete = NULL;
648 xfer_desc->context = NULL;
649
650 datalen = qmc_read16(&bd->cbd_datlen);
651 qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_RX_UB);
652
653 if (ctrl & QMC_BD_RX_W)
654 chan->rxbd_done = chan->rxbds;
655 else
656 chan->rxbd_done++;
657
658 chan->rx_pending--;
659
660 if (complete) {
661 spin_unlock_irqrestore(&chan->rx_lock, flags);
662
663 /*
664 * Avoid conversion between internal hardware flags and
665 * the software API flags.
666 * -> Be sure that the software API flags are consistent
667 * with the hardware flags
668 */
669 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_LAST != QMC_BD_RX_L);
670 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_FIRST != QMC_BD_RX_F);
671 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_OVF != QMC_BD_RX_LG);
672 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_UNA != QMC_BD_RX_NO);
673 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_ABORT != QMC_BD_RX_AB);
674 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_CRC != QMC_BD_RX_CR);
675
676 complete(context, datalen,
677 ctrl & (QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG |
678 QMC_BD_RX_NO | QMC_BD_RX_AB | QMC_BD_RX_CR));
679 spin_lock_irqsave(&chan->rx_lock, flags);
680 }
681
682 bd = chan->rxbd_done;
683 ctrl = qmc_read16(&bd->cbd_sc);
684 }
685
686 end:
687 spin_unlock_irqrestore(&chan->rx_lock, flags);
688 }
689
qmc_chan_setup_tsa_64rxtx(struct qmc_chan * chan,const struct tsa_serial_info * info,bool enable)690 static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_serial_info *info,
691 bool enable)
692 {
693 unsigned int i;
694 u16 curr;
695 u16 val;
696
697 /*
698 * Use a common Tx/Rx 64 entries table.
699 * Tx and Rx related stuffs must be identical
700 */
701 if (chan->tx_ts_mask != chan->rx_ts_mask) {
702 dev_err(chan->qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
703 return -EINVAL;
704 }
705
706 val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id);
707
708 /* Check entries based on Rx stuff*/
709 for (i = 0; i < info->nb_rx_ts; i++) {
710 if (!(chan->rx_ts_mask & (((u64)1) << i)))
711 continue;
712
713 curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
714 if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
715 dev_err(chan->qmc->dev, "chan %u TxRx entry %d already used\n",
716 chan->id, i);
717 return -EBUSY;
718 }
719 }
720
721 /* Set entries based on Rx stuff*/
722 for (i = 0; i < info->nb_rx_ts; i++) {
723 if (!(chan->rx_ts_mask & (((u64)1) << i)))
724 continue;
725
726 qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
727 (u16)~QMC_TSA_WRAP, enable ? val : 0x0000);
728 }
729
730 return 0;
731 }
732
qmc_chan_setup_tsa_32rx(struct qmc_chan * chan,const struct tsa_serial_info * info,bool enable)733 static int qmc_chan_setup_tsa_32rx(struct qmc_chan *chan, const struct tsa_serial_info *info,
734 bool enable)
735 {
736 unsigned int i;
737 u16 curr;
738 u16 val;
739
740 /* Use a Rx 32 entries table */
741
742 val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id);
743
744 /* Check entries based on Rx stuff */
745 for (i = 0; i < info->nb_rx_ts; i++) {
746 if (!(chan->rx_ts_mask & (((u64)1) << i)))
747 continue;
748
749 curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
750 if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
751 dev_err(chan->qmc->dev, "chan %u Rx entry %d already used\n",
752 chan->id, i);
753 return -EBUSY;
754 }
755 }
756
757 /* Set entries based on Rx stuff */
758 for (i = 0; i < info->nb_rx_ts; i++) {
759 if (!(chan->rx_ts_mask & (((u64)1) << i)))
760 continue;
761
762 qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
763 (u16)~QMC_TSA_WRAP, enable ? val : 0x0000);
764 }
765
766 return 0;
767 }
768
qmc_chan_setup_tsa_32tx(struct qmc_chan * chan,const struct tsa_serial_info * info,bool enable)769 static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_serial_info *info,
770 bool enable)
771 {
772 unsigned int i;
773 u16 curr;
774 u16 val;
775
776 /* Use a Tx 32 entries table */
777
778 val = QMC_TSA_VALID | QMC_TSA_MASK_8BIT | QMC_TSA_CHANNEL(chan->id);
779
780 /* Check entries based on Tx stuff */
781 for (i = 0; i < info->nb_tx_ts; i++) {
782 if (!(chan->tx_ts_mask & (((u64)1) << i)))
783 continue;
784
785 curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2));
786 if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
787 dev_err(chan->qmc->dev, "chan %u Tx entry %d already used\n",
788 chan->id, i);
789 return -EBUSY;
790 }
791 }
792
793 /* Set entries based on Tx stuff */
794 for (i = 0; i < info->nb_tx_ts; i++) {
795 if (!(chan->tx_ts_mask & (((u64)1) << i)))
796 continue;
797
798 qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2),
799 (u16)~QMC_TSA_WRAP, enable ? val : 0x0000);
800 }
801
802 return 0;
803 }
804
qmc_chan_setup_tsa_tx(struct qmc_chan * chan,bool enable)805 static int qmc_chan_setup_tsa_tx(struct qmc_chan *chan, bool enable)
806 {
807 struct tsa_serial_info info;
808 int ret;
809
810 /* Retrieve info from the TSA related serial */
811 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
812 if (ret)
813 return ret;
814
815 /* Setup entries */
816 if (chan->qmc->is_tsa_64rxtx)
817 return qmc_chan_setup_tsa_64rxtx(chan, &info, enable);
818
819 return qmc_chan_setup_tsa_32tx(chan, &info, enable);
820 }
821
qmc_chan_setup_tsa_rx(struct qmc_chan * chan,bool enable)822 static int qmc_chan_setup_tsa_rx(struct qmc_chan *chan, bool enable)
823 {
824 struct tsa_serial_info info;
825 int ret;
826
827 /* Retrieve info from the TSA related serial */
828 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
829 if (ret)
830 return ret;
831
832 /* Setup entries */
833 if (chan->qmc->is_tsa_64rxtx)
834 return qmc_chan_setup_tsa_64rxtx(chan, &info, enable);
835
836 return qmc_chan_setup_tsa_32rx(chan, &info, enable);
837 }
838
qmc_chan_cpm1_command(struct qmc_chan * chan,u8 qmc_opcode)839 static int qmc_chan_cpm1_command(struct qmc_chan *chan, u8 qmc_opcode)
840 {
841 return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E);
842 }
843
qmc_chan_qe_command(struct qmc_chan * chan,u32 cmd)844 static int qmc_chan_qe_command(struct qmc_chan *chan, u32 cmd)
845 {
846 if (!qe_issue_cmd(cmd, chan->qmc->qe_subblock, chan->id, 0))
847 return -EIO;
848 return 0;
849 }
850
qmc_chan_stop_rx(struct qmc_chan * chan)851 static int qmc_chan_stop_rx(struct qmc_chan *chan)
852 {
853 unsigned long flags;
854 int ret;
855
856 spin_lock_irqsave(&chan->rx_lock, flags);
857
858 if (chan->is_rx_stopped) {
859 /* The channel is already stopped -> simply return ok */
860 ret = 0;
861 goto end;
862 }
863
864 /* Send STOP RECEIVE command */
865 ret = qmc_is_qe(chan->qmc) ?
866 qmc_chan_qe_command(chan, QE_QMC_STOP_RX) :
867 qmc_chan_cpm1_command(chan, 0x0);
868 if (ret) {
869 dev_err(chan->qmc->dev, "chan %u: Send STOP RECEIVE failed (%d)\n",
870 chan->id, ret);
871 goto end;
872 }
873
874 chan->is_rx_stopped = true;
875
876 if (!chan->qmc->is_tsa_64rxtx || chan->is_tx_stopped) {
877 ret = qmc_chan_setup_tsa_rx(chan, false);
878 if (ret) {
879 dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
880 chan->id, ret);
881 goto end;
882 }
883 }
884
885 end:
886 spin_unlock_irqrestore(&chan->rx_lock, flags);
887 return ret;
888 }
889
qmc_chan_stop_tx(struct qmc_chan * chan)890 static int qmc_chan_stop_tx(struct qmc_chan *chan)
891 {
892 unsigned long flags;
893 int ret;
894
895 spin_lock_irqsave(&chan->tx_lock, flags);
896
897 if (chan->is_tx_stopped) {
898 /* The channel is already stopped -> simply return ok */
899 ret = 0;
900 goto end;
901 }
902
903 /* Send STOP TRANSMIT command */
904 ret = qmc_is_qe(chan->qmc) ?
905 qmc_chan_qe_command(chan, QE_QMC_STOP_TX) :
906 qmc_chan_cpm1_command(chan, 0x1);
907 if (ret) {
908 dev_err(chan->qmc->dev, "chan %u: Send STOP TRANSMIT failed (%d)\n",
909 chan->id, ret);
910 goto end;
911 }
912
913 chan->is_tx_stopped = true;
914
915 if (!chan->qmc->is_tsa_64rxtx || chan->is_rx_stopped) {
916 ret = qmc_chan_setup_tsa_tx(chan, false);
917 if (ret) {
918 dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
919 chan->id, ret);
920 goto end;
921 }
922 }
923
924 end:
925 spin_unlock_irqrestore(&chan->tx_lock, flags);
926 return ret;
927 }
928
929 static int qmc_chan_start_rx(struct qmc_chan *chan);
930
qmc_chan_stop(struct qmc_chan * chan,int direction)931 int qmc_chan_stop(struct qmc_chan *chan, int direction)
932 {
933 bool is_rx_rollback_needed = false;
934 unsigned long flags;
935 int ret = 0;
936
937 spin_lock_irqsave(&chan->ts_lock, flags);
938
939 if (direction & QMC_CHAN_READ) {
940 is_rx_rollback_needed = !chan->is_rx_stopped;
941 ret = qmc_chan_stop_rx(chan);
942 if (ret)
943 goto end;
944 }
945
946 if (direction & QMC_CHAN_WRITE) {
947 ret = qmc_chan_stop_tx(chan);
948 if (ret) {
949 /* Restart rx if needed */
950 if (is_rx_rollback_needed)
951 qmc_chan_start_rx(chan);
952 goto end;
953 }
954 }
955
956 end:
957 spin_unlock_irqrestore(&chan->ts_lock, flags);
958 return ret;
959 }
960 EXPORT_SYMBOL(qmc_chan_stop);
961
qmc_setup_chan_trnsync(struct qmc * qmc,struct qmc_chan * chan)962 static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
963 {
964 struct tsa_serial_info info;
965 unsigned int w_rx, w_tx;
966 u16 first_rx, last_tx;
967 u16 trnsync;
968 int ret;
969
970 /* Retrieve info from the TSA related serial */
971 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
972 if (ret)
973 return ret;
974
975 w_rx = hweight64(chan->rx_ts_mask);
976 w_tx = hweight64(chan->tx_ts_mask);
977 if (w_rx <= 1 && w_tx <= 1) {
978 dev_dbg(qmc->dev, "only one or zero ts -> disable trnsync\n");
979 qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_TRANSP_SYNC);
980 return 0;
981 }
982
983 /* Find the first Rx TS allocated to the channel */
984 first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
985
986 /* Find the last Tx TS allocated to the channel */
987 last_tx = fls64(chan->tx_ts_mask);
988
989 trnsync = 0;
990 if (info.nb_rx_ts)
991 trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2);
992 if (info.nb_tx_ts)
993 trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
994
995 qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
996 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_TRANSP_SYNC);
997
998 dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
999 chan->id, trnsync,
1000 first_rx, info.nb_rx_ts, chan->rx_ts_mask,
1001 last_tx, info.nb_tx_ts, chan->tx_ts_mask);
1002
1003 return 0;
1004 }
1005
qmc_chan_start_rx(struct qmc_chan * chan)1006 static int qmc_chan_start_rx(struct qmc_chan *chan)
1007 {
1008 unsigned long flags;
1009 int ret;
1010
1011 spin_lock_irqsave(&chan->rx_lock, flags);
1012
1013 if (!chan->is_rx_stopped) {
1014 /* The channel is already started -> simply return ok */
1015 ret = 0;
1016 goto end;
1017 }
1018
1019 ret = qmc_chan_setup_tsa_rx(chan, true);
1020 if (ret) {
1021 dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
1022 chan->id, ret);
1023 goto end;
1024 }
1025
1026 if (chan->mode == QMC_TRANSPARENT) {
1027 ret = qmc_setup_chan_trnsync(chan->qmc, chan);
1028 if (ret) {
1029 dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
1030 chan->id, ret);
1031 goto end;
1032 }
1033 }
1034
1035 /* Restart the receiver */
1036 qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack);
1037 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE,
1038 chan->mode == QMC_TRANSPARENT ?
1039 chan->qmc->data->zdstate_transp :
1040 chan->qmc->data->zdstate_hdlc);
1041 qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate);
1042 chan->is_rx_halted = false;
1043
1044 chan->is_rx_stopped = false;
1045
1046 end:
1047 spin_unlock_irqrestore(&chan->rx_lock, flags);
1048 return ret;
1049 }
1050
qmc_chan_start_tx(struct qmc_chan * chan)1051 static int qmc_chan_start_tx(struct qmc_chan *chan)
1052 {
1053 unsigned long flags;
1054 int ret;
1055
1056 spin_lock_irqsave(&chan->tx_lock, flags);
1057
1058 if (!chan->is_tx_stopped) {
1059 /* The channel is already started -> simply return ok */
1060 ret = 0;
1061 goto end;
1062 }
1063
1064 ret = qmc_chan_setup_tsa_tx(chan, true);
1065 if (ret) {
1066 dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
1067 chan->id, ret);
1068 goto end;
1069 }
1070
1071 if (chan->mode == QMC_TRANSPARENT) {
1072 ret = qmc_setup_chan_trnsync(chan->qmc, chan);
1073 if (ret) {
1074 dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
1075 chan->id, ret);
1076 goto end;
1077 }
1078 }
1079
1080 /*
1081 * Enable channel transmitter as it could be disabled if
1082 * qmc_chan_reset() was called.
1083 */
1084 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
1085
1086 /* Set the POL bit in the channel mode register */
1087 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
1088
1089 chan->is_tx_stopped = false;
1090
1091 end:
1092 spin_unlock_irqrestore(&chan->tx_lock, flags);
1093 return ret;
1094 }
1095
qmc_chan_start(struct qmc_chan * chan,int direction)1096 int qmc_chan_start(struct qmc_chan *chan, int direction)
1097 {
1098 bool is_rx_rollback_needed = false;
1099 unsigned long flags;
1100 int ret = 0;
1101
1102 spin_lock_irqsave(&chan->ts_lock, flags);
1103
1104 if (direction & QMC_CHAN_READ) {
1105 is_rx_rollback_needed = chan->is_rx_stopped;
1106 ret = qmc_chan_start_rx(chan);
1107 if (ret)
1108 goto end;
1109 }
1110
1111 if (direction & QMC_CHAN_WRITE) {
1112 ret = qmc_chan_start_tx(chan);
1113 if (ret) {
1114 /* Restop rx if needed */
1115 if (is_rx_rollback_needed)
1116 qmc_chan_stop_rx(chan);
1117 goto end;
1118 }
1119 }
1120
1121 end:
1122 spin_unlock_irqrestore(&chan->ts_lock, flags);
1123 return ret;
1124 }
1125 EXPORT_SYMBOL(qmc_chan_start);
1126
qmc_chan_reset_rx(struct qmc_chan * chan)1127 static void qmc_chan_reset_rx(struct qmc_chan *chan)
1128 {
1129 struct qmc_xfer_desc *xfer_desc;
1130 unsigned long flags;
1131 cbd_t __iomem *bd;
1132 u16 ctrl;
1133
1134 spin_lock_irqsave(&chan->rx_lock, flags);
1135 bd = chan->rxbds;
1136 do {
1137 ctrl = qmc_read16(&bd->cbd_sc);
1138 qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_RX_UB | QMC_BD_RX_E));
1139
1140 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
1141 xfer_desc->rx_complete = NULL;
1142 xfer_desc->context = NULL;
1143
1144 bd++;
1145 } while (!(ctrl & QMC_BD_RX_W));
1146
1147 chan->rxbd_free = chan->rxbds;
1148 chan->rxbd_done = chan->rxbds;
1149 qmc_write16(chan->s_param + QMC_SPE_RBPTR,
1150 qmc_read16(chan->s_param + QMC_SPE_RBASE));
1151
1152 chan->rx_pending = 0;
1153
1154 spin_unlock_irqrestore(&chan->rx_lock, flags);
1155 }
1156
qmc_chan_reset_tx(struct qmc_chan * chan)1157 static void qmc_chan_reset_tx(struct qmc_chan *chan)
1158 {
1159 struct qmc_xfer_desc *xfer_desc;
1160 unsigned long flags;
1161 cbd_t __iomem *bd;
1162 u16 ctrl;
1163
1164 spin_lock_irqsave(&chan->tx_lock, flags);
1165
1166 /* Disable transmitter. It will be re-enable on qmc_chan_start() */
1167 qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
1168
1169 bd = chan->txbds;
1170 do {
1171 ctrl = qmc_read16(&bd->cbd_sc);
1172 qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_TX_UB | QMC_BD_TX_R));
1173
1174 xfer_desc = &chan->tx_desc[bd - chan->txbds];
1175 xfer_desc->tx_complete = NULL;
1176 xfer_desc->context = NULL;
1177
1178 bd++;
1179 } while (!(ctrl & QMC_BD_TX_W));
1180
1181 chan->txbd_free = chan->txbds;
1182 chan->txbd_done = chan->txbds;
1183 qmc_write16(chan->s_param + QMC_SPE_TBPTR,
1184 qmc_read16(chan->s_param + QMC_SPE_TBASE));
1185
1186 /* Reset TSTATE and ZISTATE to their initial value */
1187 qmc_write32(chan->s_param + QMC_SPE_TSTATE, chan->qmc->data->tstate);
1188 qmc_write32(chan->s_param + QMC_SPE_ZISTATE, chan->qmc->data->zistate);
1189
1190 spin_unlock_irqrestore(&chan->tx_lock, flags);
1191 }
1192
qmc_chan_reset(struct qmc_chan * chan,int direction)1193 int qmc_chan_reset(struct qmc_chan *chan, int direction)
1194 {
1195 if (direction & QMC_CHAN_READ)
1196 qmc_chan_reset_rx(chan);
1197
1198 if (direction & QMC_CHAN_WRITE)
1199 qmc_chan_reset_tx(chan);
1200
1201 return 0;
1202 }
1203 EXPORT_SYMBOL(qmc_chan_reset);
1204
qmc_check_chans(struct qmc * qmc)1205 static int qmc_check_chans(struct qmc *qmc)
1206 {
1207 struct tsa_serial_info info;
1208 struct qmc_chan *chan;
1209 u64 tx_ts_assigned_mask;
1210 u64 rx_ts_assigned_mask;
1211 int ret;
1212
1213 /* Retrieve info from the TSA related serial */
1214 ret = tsa_serial_get_info(qmc->tsa_serial, &info);
1215 if (ret)
1216 return ret;
1217
1218 if (info.nb_tx_ts > 64 || info.nb_rx_ts > 64) {
1219 dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned not supported\n");
1220 return -EINVAL;
1221 }
1222
1223 /*
1224 * If more than 32 TS are assigned to this serial, one common table is
1225 * used for Tx and Rx and so masks must be equal for all channels.
1226 */
1227 if (info.nb_tx_ts > 32 || info.nb_rx_ts > 32) {
1228 if (info.nb_tx_ts != info.nb_rx_ts) {
1229 dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n");
1230 return -EINVAL;
1231 }
1232 }
1233
1234 tx_ts_assigned_mask = info.nb_tx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_tx_ts) - 1;
1235 rx_ts_assigned_mask = info.nb_rx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_rx_ts) - 1;
1236
1237 list_for_each_entry(chan, &qmc->chan_head, list) {
1238 if (chan->tx_ts_mask_avail > tx_ts_assigned_mask) {
1239 dev_err(qmc->dev, "chan %u can use TSA unassigned Tx TS\n", chan->id);
1240 return -EINVAL;
1241 }
1242
1243 if (chan->rx_ts_mask_avail > rx_ts_assigned_mask) {
1244 dev_err(qmc->dev, "chan %u can use TSA unassigned Rx TS\n", chan->id);
1245 return -EINVAL;
1246 }
1247 }
1248
1249 return 0;
1250 }
1251
qmc_nb_chans(struct qmc * qmc)1252 static unsigned int qmc_nb_chans(struct qmc *qmc)
1253 {
1254 unsigned int count = 0;
1255 struct qmc_chan *chan;
1256
1257 list_for_each_entry(chan, &qmc->chan_head, list)
1258 count++;
1259
1260 return count;
1261 }
1262
qmc_of_parse_chans(struct qmc * qmc,struct device_node * np)1263 static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
1264 {
1265 struct device_node *chan_np;
1266 struct qmc_chan *chan;
1267 const char *mode;
1268 u32 chan_id;
1269 u64 ts_mask;
1270 int ret;
1271
1272 for_each_available_child_of_node(np, chan_np) {
1273 ret = of_property_read_u32(chan_np, "reg", &chan_id);
1274 if (ret) {
1275 dev_err(qmc->dev, "%pOF: failed to read reg\n", chan_np);
1276 of_node_put(chan_np);
1277 return ret;
1278 }
1279 if (chan_id > 63) {
1280 dev_err(qmc->dev, "%pOF: Invalid chan_id\n", chan_np);
1281 of_node_put(chan_np);
1282 return -EINVAL;
1283 }
1284
1285 chan = devm_kzalloc(qmc->dev, sizeof(*chan), GFP_KERNEL);
1286 if (!chan) {
1287 of_node_put(chan_np);
1288 return -ENOMEM;
1289 }
1290
1291 chan->id = chan_id;
1292 spin_lock_init(&chan->ts_lock);
1293 spin_lock_init(&chan->rx_lock);
1294 spin_lock_init(&chan->tx_lock);
1295
1296 ret = of_property_read_u64(chan_np, "fsl,tx-ts-mask", &ts_mask);
1297 if (ret) {
1298 dev_err(qmc->dev, "%pOF: failed to read fsl,tx-ts-mask\n",
1299 chan_np);
1300 of_node_put(chan_np);
1301 return ret;
1302 }
1303 chan->tx_ts_mask_avail = ts_mask;
1304 chan->tx_ts_mask = chan->tx_ts_mask_avail;
1305
1306 ret = of_property_read_u64(chan_np, "fsl,rx-ts-mask", &ts_mask);
1307 if (ret) {
1308 dev_err(qmc->dev, "%pOF: failed to read fsl,rx-ts-mask\n",
1309 chan_np);
1310 of_node_put(chan_np);
1311 return ret;
1312 }
1313 chan->rx_ts_mask_avail = ts_mask;
1314 chan->rx_ts_mask = chan->rx_ts_mask_avail;
1315
1316 mode = "transparent";
1317 ret = of_property_read_string(chan_np, "fsl,operational-mode", &mode);
1318 if (ret && ret != -EINVAL) {
1319 dev_err(qmc->dev, "%pOF: failed to read fsl,operational-mode\n",
1320 chan_np);
1321 of_node_put(chan_np);
1322 return ret;
1323 }
1324 if (!strcmp(mode, "transparent")) {
1325 chan->mode = QMC_TRANSPARENT;
1326 } else if (!strcmp(mode, "hdlc")) {
1327 chan->mode = QMC_HDLC;
1328 } else {
1329 dev_err(qmc->dev, "%pOF: Invalid fsl,operational-mode (%s)\n",
1330 chan_np, mode);
1331 of_node_put(chan_np);
1332 return -EINVAL;
1333 }
1334
1335 chan->is_reverse_data = of_property_read_bool(chan_np,
1336 "fsl,reverse-data");
1337
1338 list_add_tail(&chan->list, &qmc->chan_head);
1339 qmc->chans[chan->id] = chan;
1340 }
1341
1342 return qmc_check_chans(qmc);
1343 }
1344
qmc_init_tsa_64rxtx(struct qmc * qmc,const struct tsa_serial_info * info)1345 static int qmc_init_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info)
1346 {
1347 unsigned int i;
1348 u16 val;
1349
1350 /*
1351 * Use a common Tx/Rx 64 entries table.
1352 * Everything was previously checked, Tx and Rx related stuffs are
1353 * identical -> Used Rx related stuff to build the table
1354 */
1355 qmc->is_tsa_64rxtx = true;
1356
1357 /* Invalidate all entries */
1358 for (i = 0; i < 64; i++)
1359 qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
1360
1361 /* Set Wrap bit on last entry */
1362 qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
1363 QMC_TSA_WRAP);
1364
1365 /* Init pointers to the table */
1366 val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
1367 qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
1368 qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
1369 qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
1370 qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
1371
1372 return 0;
1373 }
1374
qmc_init_tsa_32rx_32tx(struct qmc * qmc,const struct tsa_serial_info * info)1375 static int qmc_init_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info)
1376 {
1377 unsigned int i;
1378 u16 val;
1379
1380 /*
1381 * Use a Tx 32 entries table and a Rx 32 entries table.
1382 * Everything was previously checked.
1383 */
1384 qmc->is_tsa_64rxtx = false;
1385
1386 /* Invalidate all entries */
1387 for (i = 0; i < 32; i++) {
1388 qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
1389 qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), 0x0000);
1390 }
1391
1392 /* Set Wrap bit on last entries */
1393 qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
1394 QMC_TSA_WRAP);
1395 qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATTX + ((info->nb_tx_ts - 1) * 2),
1396 QMC_TSA_WRAP);
1397
1398 /* Init Rx pointers ...*/
1399 val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
1400 qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
1401 qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
1402
1403 /* ... and Tx pointers */
1404 val = qmc->scc_pram_offset + QMC_GBL_TSATTX;
1405 qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
1406 qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
1407
1408 return 0;
1409 }
1410
qmc_init_tsa(struct qmc * qmc)1411 static int qmc_init_tsa(struct qmc *qmc)
1412 {
1413 struct tsa_serial_info info;
1414 int ret;
1415
1416 /* Retrieve info from the TSA related serial */
1417 ret = tsa_serial_get_info(qmc->tsa_serial, &info);
1418 if (ret)
1419 return ret;
1420
1421 /*
1422 * Initialize one common 64 entries table or two 32 entries (one for Tx
1423 * and one for Tx) according to assigned TS numbers.
1424 */
1425 return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ?
1426 qmc_init_tsa_64rxtx(qmc, &info) :
1427 qmc_init_tsa_32rx_32tx(qmc, &info);
1428 }
1429
qmc_setup_chan(struct qmc * qmc,struct qmc_chan * chan)1430 static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
1431 {
1432 unsigned int i;
1433 cbd_t __iomem *bd;
1434 int ret;
1435 u16 val;
1436
1437 chan->qmc = qmc;
1438
1439 /* Set channel specific parameter base address */
1440 chan->s_param = qmc->dpram + (chan->id * 64);
1441 /* 16 bd per channel (8 rx and 8 tx) */
1442 chan->txbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS));
1443 chan->rxbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS;
1444
1445 chan->txbd_free = chan->txbds;
1446 chan->txbd_done = chan->txbds;
1447 chan->rxbd_free = chan->rxbds;
1448 chan->rxbd_done = chan->rxbds;
1449
1450 /* TBASE and TBPTR*/
1451 val = chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS) * sizeof(cbd_t);
1452 qmc_write16(chan->s_param + QMC_SPE_TBASE, val);
1453 qmc_write16(chan->s_param + QMC_SPE_TBPTR, val);
1454
1455 /* RBASE and RBPTR*/
1456 val = ((chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS) * sizeof(cbd_t);
1457 qmc_write16(chan->s_param + QMC_SPE_RBASE, val);
1458 qmc_write16(chan->s_param + QMC_SPE_RBPTR, val);
1459 qmc_write32(chan->s_param + QMC_SPE_TSTATE, chan->qmc->data->tstate);
1460 qmc_write32(chan->s_param + QMC_SPE_RSTATE, chan->qmc->data->rstate);
1461 qmc_write32(chan->s_param + QMC_SPE_ZISTATE, chan->qmc->data->zistate);
1462 qmc_write32(chan->s_param + QMC_SPE_RPACK, chan->qmc->data->rpack);
1463 if (chan->mode == QMC_TRANSPARENT) {
1464 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, chan->qmc->data->zdstate_transp);
1465 qmc_write16(chan->s_param + QMC_SPE_TMRBLR, 60);
1466 val = QMC_SPE_CHAMR_MODE_TRANSP;
1467 if (chan->is_reverse_data)
1468 val |= QMC_SPE_CHAMR_TRANSP_RD;
1469 qmc_write16(chan->s_param + QMC_SPE_CHAMR, val);
1470 ret = qmc_setup_chan_trnsync(qmc, chan);
1471 if (ret)
1472 return ret;
1473 } else {
1474 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, chan->qmc->data->zdstate_hdlc);
1475 qmc_write16(chan->s_param + QMC_SPE_MFLR, 60);
1476 qmc_write16(chan->s_param + QMC_SPE_CHAMR,
1477 QMC_SPE_CHAMR_MODE_HDLC | QMC_SPE_CHAMR_HDLC_IDLM);
1478 }
1479
1480 /* Do not enable interrupts now. They will be enabled later */
1481 qmc_write16(chan->s_param + QMC_SPE_INTMSK, 0x0000);
1482
1483 /* Init Rx BDs and set Wrap bit on last descriptor */
1484 BUILD_BUG_ON(QMC_NB_RXBDS == 0);
1485 val = QMC_BD_RX_I;
1486 for (i = 0; i < QMC_NB_RXBDS; i++) {
1487 bd = chan->rxbds + i;
1488 qmc_write16(&bd->cbd_sc, val);
1489 }
1490 bd = chan->rxbds + QMC_NB_RXBDS - 1;
1491 qmc_write16(&bd->cbd_sc, val | QMC_BD_RX_W);
1492
1493 /* Init Tx BDs and set Wrap bit on last descriptor */
1494 BUILD_BUG_ON(QMC_NB_TXBDS == 0);
1495 val = QMC_BD_TX_I;
1496 if (chan->mode == QMC_HDLC)
1497 val |= QMC_BD_TX_L | QMC_BD_TX_TC;
1498 for (i = 0; i < QMC_NB_TXBDS; i++) {
1499 bd = chan->txbds + i;
1500 qmc_write16(&bd->cbd_sc, val);
1501 }
1502 bd = chan->txbds + QMC_NB_TXBDS - 1;
1503 qmc_write16(&bd->cbd_sc, val | QMC_BD_TX_W);
1504
1505 return 0;
1506 }
1507
qmc_setup_chans(struct qmc * qmc)1508 static int qmc_setup_chans(struct qmc *qmc)
1509 {
1510 struct qmc_chan *chan;
1511 int ret;
1512
1513 list_for_each_entry(chan, &qmc->chan_head, list) {
1514 ret = qmc_setup_chan(qmc, chan);
1515 if (ret)
1516 return ret;
1517 }
1518
1519 return 0;
1520 }
1521
qmc_finalize_chans(struct qmc * qmc)1522 static int qmc_finalize_chans(struct qmc *qmc)
1523 {
1524 struct qmc_chan *chan;
1525 int ret;
1526
1527 list_for_each_entry(chan, &qmc->chan_head, list) {
1528 /* Unmask channel interrupts */
1529 if (chan->mode == QMC_HDLC) {
1530 qmc_write16(chan->s_param + QMC_SPE_INTMSK,
1531 QMC_INT_NID | QMC_INT_IDL | QMC_INT_MRF |
1532 QMC_INT_UN | QMC_INT_RXF | QMC_INT_BSY |
1533 QMC_INT_TXB | QMC_INT_RXB);
1534 } else {
1535 qmc_write16(chan->s_param + QMC_SPE_INTMSK,
1536 QMC_INT_UN | QMC_INT_BSY |
1537 QMC_INT_TXB | QMC_INT_RXB);
1538 }
1539
1540 /* Forced stop the channel */
1541 ret = qmc_chan_stop(chan, QMC_CHAN_ALL);
1542 if (ret)
1543 return ret;
1544 }
1545
1546 return 0;
1547 }
1548
qmc_setup_ints(struct qmc * qmc)1549 static int qmc_setup_ints(struct qmc *qmc)
1550 {
1551 unsigned int i;
1552 u16 __iomem *last;
1553
1554 /* Raz all entries */
1555 for (i = 0; i < (qmc->int_size / sizeof(u16)); i++)
1556 qmc_write16(qmc->int_table + i, 0x0000);
1557
1558 /* Set Wrap bit on last entry */
1559 if (qmc->int_size >= sizeof(u16)) {
1560 last = qmc->int_table + (qmc->int_size / sizeof(u16)) - 1;
1561 qmc_write16(last, QMC_INT_W);
1562 }
1563
1564 return 0;
1565 }
1566
qmc_irq_gint(struct qmc * qmc)1567 static void qmc_irq_gint(struct qmc *qmc)
1568 {
1569 struct qmc_chan *chan;
1570 unsigned int chan_id;
1571 unsigned long flags;
1572 u16 int_entry;
1573
1574 int_entry = qmc_read16(qmc->int_curr);
1575 while (int_entry & QMC_INT_V) {
1576 /* Clear all but the Wrap bit */
1577 qmc_write16(qmc->int_curr, int_entry & QMC_INT_W);
1578
1579 chan_id = QMC_INT_GET_CHANNEL(int_entry);
1580 chan = qmc->chans[chan_id];
1581 if (!chan) {
1582 dev_err(qmc->dev, "interrupt on invalid chan %u\n", chan_id);
1583 goto int_next;
1584 }
1585
1586 if (int_entry & QMC_INT_TXB)
1587 qmc_chan_write_done(chan);
1588
1589 if (int_entry & QMC_INT_UN) {
1590 dev_info(qmc->dev, "intr chan %u, 0x%04x (UN)\n", chan_id,
1591 int_entry);
1592 chan->nb_tx_underrun++;
1593 }
1594
1595 if (int_entry & QMC_INT_BSY) {
1596 dev_info(qmc->dev, "intr chan %u, 0x%04x (BSY)\n", chan_id,
1597 int_entry);
1598 chan->nb_rx_busy++;
1599 /* Restart the receiver if needed */
1600 spin_lock_irqsave(&chan->rx_lock, flags);
1601 if (chan->rx_pending && !chan->is_rx_stopped) {
1602 qmc_write32(chan->s_param + QMC_SPE_RPACK,
1603 chan->qmc->data->rpack);
1604 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE,
1605 chan->mode == QMC_TRANSPARENT ?
1606 chan->qmc->data->zdstate_transp :
1607 chan->qmc->data->zdstate_hdlc);
1608 qmc_write32(chan->s_param + QMC_SPE_RSTATE,
1609 chan->qmc->data->rstate);
1610 chan->is_rx_halted = false;
1611 } else {
1612 chan->is_rx_halted = true;
1613 }
1614 spin_unlock_irqrestore(&chan->rx_lock, flags);
1615 }
1616
1617 if (int_entry & QMC_INT_RXB)
1618 qmc_chan_read_done(chan);
1619
1620 int_next:
1621 if (int_entry & QMC_INT_W)
1622 qmc->int_curr = qmc->int_table;
1623 else
1624 qmc->int_curr++;
1625 int_entry = qmc_read16(qmc->int_curr);
1626 }
1627 }
1628
qmc_irq_handler(int irq,void * priv)1629 static irqreturn_t qmc_irq_handler(int irq, void *priv)
1630 {
1631 struct qmc *qmc = (struct qmc *)priv;
1632 u16 scce;
1633
1634 scce = qmc_read16(qmc->scc_regs + SCC_SCCE);
1635 qmc_write16(qmc->scc_regs + SCC_SCCE, scce);
1636
1637 if (unlikely(scce & SCC_SCCE_IQOV))
1638 dev_info(qmc->dev, "IRQ queue overflow\n");
1639
1640 if (unlikely(scce & SCC_SCCE_GUN))
1641 dev_err(qmc->dev, "Global transmitter underrun\n");
1642
1643 if (unlikely(scce & SCC_SCCE_GOV))
1644 dev_err(qmc->dev, "Global receiver overrun\n");
1645
1646 /* normal interrupt */
1647 if (likely(scce & SCC_SCCE_GINT))
1648 qmc_irq_gint(qmc);
1649
1650 return IRQ_HANDLED;
1651 }
1652
qmc_qe_soft_qmc_init(struct qmc * qmc,struct device_node * np)1653 static int qmc_qe_soft_qmc_init(struct qmc *qmc, struct device_node *np)
1654 {
1655 struct qe_firmware_info *qe_fw_info;
1656 const struct qe_firmware *qe_fw;
1657 const struct firmware *fw;
1658 const char *filename;
1659 int ret;
1660
1661 ret = of_property_read_string(np, "fsl,soft-qmc", &filename);
1662 switch (ret) {
1663 case 0:
1664 break;
1665 case -EINVAL:
1666 /* fsl,soft-qmc property not set -> Simply do nothing */
1667 return 0;
1668 default:
1669 dev_err(qmc->dev, "%pOF: failed to read fsl,soft-qmc\n",
1670 np);
1671 return ret;
1672 }
1673
1674 qe_fw_info = qe_get_firmware_info();
1675 if (qe_fw_info) {
1676 if (!strstr(qe_fw_info->id, "Soft-QMC")) {
1677 dev_err(qmc->dev, "Another Firmware is already loaded\n");
1678 return -EALREADY;
1679 }
1680 dev_info(qmc->dev, "Firmware already loaded\n");
1681 return 0;
1682 }
1683
1684 dev_info(qmc->dev, "Using firmware %s\n", filename);
1685
1686 ret = request_firmware(&fw, filename, qmc->dev);
1687 if (ret) {
1688 dev_err(qmc->dev, "Failed to request firmware %s\n", filename);
1689 return ret;
1690 }
1691
1692 qe_fw = (const struct qe_firmware *)fw->data;
1693
1694 if (fw->size < sizeof(qe_fw->header) ||
1695 be32_to_cpu(qe_fw->header.length) != fw->size) {
1696 dev_err(qmc->dev, "Invalid firmware %s\n", filename);
1697 ret = -EINVAL;
1698 goto end;
1699 }
1700
1701 ret = qe_upload_firmware(qe_fw);
1702 if (ret) {
1703 dev_err(qmc->dev, "Failed to load firmware %s\n", filename);
1704 goto end;
1705 }
1706
1707 ret = 0;
1708 end:
1709 release_firmware(fw);
1710 return ret;
1711 }
1712
qmc_cpm1_init_resources(struct qmc * qmc,struct platform_device * pdev)1713 static int qmc_cpm1_init_resources(struct qmc *qmc, struct platform_device *pdev)
1714 {
1715 struct resource *res;
1716
1717 qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "scc_regs");
1718 if (IS_ERR(qmc->scc_regs))
1719 return PTR_ERR(qmc->scc_regs);
1720
1721 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scc_pram");
1722 if (!res)
1723 return -EINVAL;
1724 qmc->scc_pram_offset = res->start - get_immrbase();
1725 qmc->scc_pram = devm_ioremap_resource(qmc->dev, res);
1726 if (IS_ERR(qmc->scc_pram))
1727 return PTR_ERR(qmc->scc_pram);
1728
1729 qmc->dpram = devm_platform_ioremap_resource_byname(pdev, "dpram");
1730 if (IS_ERR(qmc->dpram))
1731 return PTR_ERR(qmc->dpram);
1732
1733 return 0;
1734 }
1735
qmc_qe_init_resources(struct qmc * qmc,struct platform_device * pdev)1736 static int qmc_qe_init_resources(struct qmc *qmc, struct platform_device *pdev)
1737 {
1738 struct resource *res;
1739 int ucc_num;
1740 s32 info;
1741
1742 qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "ucc_regs");
1743 if (IS_ERR(qmc->scc_regs))
1744 return PTR_ERR(qmc->scc_regs);
1745
1746 ucc_num = tsa_serial_get_num(qmc->tsa_serial);
1747 if (ucc_num < 0)
1748 return dev_err_probe(qmc->dev, ucc_num, "Failed to get UCC num\n");
1749
1750 qmc->qe_subblock = ucc_slow_get_qe_cr_subblock(ucc_num);
1751 if (qmc->qe_subblock == QE_CR_SUBBLOCK_INVALID) {
1752 dev_err(qmc->dev, "Unsupported ucc num %u\n", ucc_num);
1753 return -EINVAL;
1754 }
1755 /* Allocate the 'Global Multichannel Parameters' and the
1756 * 'Framer parameters' areas. The 'Framer parameters' area
1757 * is located right after the 'Global Multichannel Parameters'.
1758 * The 'Framer parameters' need 1 byte per receive and transmit
1759 * channel. The maximum number of receive or transmit channel
1760 * is 64. So reserve 2 * 64 bytes for the 'Framer parameters'.
1761 */
1762 info = devm_qe_muram_alloc(qmc->dev, UCC_SLOW_PRAM_SIZE + 2 * 64,
1763 ALIGNMENT_OF_UCC_SLOW_PRAM);
1764 if (info < 0)
1765 return info;
1766
1767 if (!qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, qmc->qe_subblock,
1768 QE_CR_PROTOCOL_UNSPECIFIED, info)) {
1769 dev_err(qmc->dev, "QE_ASSIGN_PAGE_TO_DEVICE cmd failed");
1770 return -EIO;
1771 }
1772 qmc->scc_pram = qe_muram_addr(info);
1773 qmc->scc_pram_offset = info;
1774
1775 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpram");
1776 if (!res)
1777 return -EINVAL;
1778 qmc->dpram_offset = res->start - qe_muram_dma(qe_muram_addr(0));
1779 qmc->dpram = devm_ioremap_resource(qmc->dev, res);
1780 if (IS_ERR(qmc->scc_pram))
1781 return PTR_ERR(qmc->scc_pram);
1782
1783 return 0;
1784 }
1785
qmc_init_resources(struct qmc * qmc,struct platform_device * pdev)1786 static int qmc_init_resources(struct qmc *qmc, struct platform_device *pdev)
1787 {
1788 return qmc_is_qe(qmc) ?
1789 qmc_qe_init_resources(qmc, pdev) :
1790 qmc_cpm1_init_resources(qmc, pdev);
1791 }
1792
qmc_cpm1_init_scc(struct qmc * qmc)1793 static int qmc_cpm1_init_scc(struct qmc *qmc)
1794 {
1795 u32 val;
1796 int ret;
1797
1798 /* Connect the serial (SCC) to TSA */
1799 ret = tsa_serial_connect(qmc->tsa_serial);
1800 if (ret)
1801 return dev_err_probe(qmc->dev, ret, "Failed to connect TSA serial\n");
1802
1803 /* Init GMSR_H and GMSR_L registers */
1804 val = SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP;
1805 qmc_write32(qmc->scc_regs + SCC_GSMRH, val);
1806
1807 /* enable QMC mode */
1808 qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_CPM1_GSMRL_MODE_QMC);
1809
1810 /* Disable and clear interrupts */
1811 qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
1812 qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
1813
1814 return 0;
1815 }
1816
qmc_qe_init_ucc(struct qmc * qmc)1817 static int qmc_qe_init_ucc(struct qmc *qmc)
1818 {
1819 u32 val;
1820 int ret;
1821
1822 /* Set the UCC in slow mode */
1823 qmc_write8(qmc->scc_regs + SCC_QE_UCC_GUEMR,
1824 UCC_GUEMR_SET_RESERVED3 | UCC_GUEMR_MODE_SLOW_RX | UCC_GUEMR_MODE_SLOW_TX);
1825
1826 /* Connect the serial (UCC) to TSA */
1827 ret = tsa_serial_connect(qmc->tsa_serial);
1828 if (ret)
1829 return dev_err_probe(qmc->dev, ret, "Failed to connect TSA serial\n");
1830
1831 /* Initialize the QMC tx startup addresses */
1832 if (!qe_issue_cmd(QE_PUSHSCHED, qmc->qe_subblock,
1833 QE_CR_PROTOCOL_UNSPECIFIED, 0x80)) {
1834 dev_err(qmc->dev, "QE_CMD_PUSH_SCHED tx cmd failed");
1835 ret = -EIO;
1836 goto err_tsa_serial_disconnect;
1837 }
1838
1839 /* Initialize the QMC rx startup addresses */
1840 if (!qe_issue_cmd(QE_PUSHSCHED, qmc->qe_subblock | 0x00020000,
1841 QE_CR_PROTOCOL_UNSPECIFIED, 0x82)) {
1842 dev_err(qmc->dev, "QE_CMD_PUSH_SCHED rx cmd failed");
1843 ret = -EIO;
1844 goto err_tsa_serial_disconnect;
1845 }
1846
1847 /* Re-init RXPTR and TXPTR with the content of RX_S_PTR and
1848 * TX_S_PTR (RX_S_PTR and TX_S_PTR are initialized during
1849 * qmc_setup_tsa() call
1850 */
1851 val = qmc_read16(qmc->scc_pram + QMC_GBL_RX_S_PTR);
1852 qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
1853 val = qmc_read16(qmc->scc_pram + QMC_GBL_TX_S_PTR);
1854 qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
1855
1856 /* Init GUMR_H and GUMR_L registers (SCC GSMR_H and GSMR_L) */
1857 val = SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP |
1858 SCC_GSMRH_TRX | SCC_GSMRH_TTX;
1859 qmc_write32(qmc->scc_regs + SCC_GSMRH, val);
1860
1861 /* enable QMC mode */
1862 qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_QE_GSMRL_MODE_QMC);
1863
1864 /* Disable and clear interrupts */
1865 qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
1866 qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
1867
1868 return 0;
1869
1870 err_tsa_serial_disconnect:
1871 tsa_serial_disconnect(qmc->tsa_serial);
1872 return ret;
1873 }
1874
qmc_init_xcc(struct qmc * qmc)1875 static int qmc_init_xcc(struct qmc *qmc)
1876 {
1877 return qmc_is_qe(qmc) ?
1878 qmc_qe_init_ucc(qmc) :
1879 qmc_cpm1_init_scc(qmc);
1880 }
1881
qmc_exit_xcc(struct qmc * qmc)1882 static void qmc_exit_xcc(struct qmc *qmc)
1883 {
1884 /* Disconnect the serial from TSA */
1885 tsa_serial_disconnect(qmc->tsa_serial);
1886 }
1887
qmc_probe(struct platform_device * pdev)1888 static int qmc_probe(struct platform_device *pdev)
1889 {
1890 struct device_node *np = pdev->dev.of_node;
1891 unsigned int nb_chans;
1892 struct qmc *qmc;
1893 int irq;
1894 int ret;
1895
1896 qmc = devm_kzalloc(&pdev->dev, sizeof(*qmc), GFP_KERNEL);
1897 if (!qmc)
1898 return -ENOMEM;
1899
1900 qmc->dev = &pdev->dev;
1901 qmc->data = of_device_get_match_data(&pdev->dev);
1902 if (!qmc->data) {
1903 dev_err(qmc->dev, "Missing match data\n");
1904 return -EINVAL;
1905 }
1906 INIT_LIST_HEAD(&qmc->chan_head);
1907
1908 qmc->tsa_serial = devm_tsa_serial_get_byphandle(qmc->dev, np, "fsl,tsa-serial");
1909 if (IS_ERR(qmc->tsa_serial)) {
1910 return dev_err_probe(qmc->dev, PTR_ERR(qmc->tsa_serial),
1911 "Failed to get TSA serial\n");
1912 }
1913
1914 ret = qmc_init_resources(qmc, pdev);
1915 if (ret)
1916 return ret;
1917
1918 if (qmc_is_qe(qmc)) {
1919 ret = qmc_qe_soft_qmc_init(qmc, np);
1920 if (ret)
1921 return ret;
1922 }
1923
1924 /* Parse channels informationss */
1925 ret = qmc_of_parse_chans(qmc, np);
1926 if (ret)
1927 return ret;
1928
1929 nb_chans = qmc_nb_chans(qmc);
1930
1931 /*
1932 * Allocate the buffer descriptor table
1933 * 8 rx and 8 tx descriptors per channel
1934 */
1935 qmc->bd_size = (nb_chans * (QMC_NB_TXBDS + QMC_NB_RXBDS)) * sizeof(cbd_t);
1936 qmc->bd_table = dmam_alloc_coherent(qmc->dev, qmc->bd_size,
1937 &qmc->bd_dma_addr, GFP_KERNEL);
1938 if (!qmc->bd_table) {
1939 dev_err(qmc->dev, "Failed to allocate bd table\n");
1940 return -ENOMEM;
1941 }
1942 memset(qmc->bd_table, 0, qmc->bd_size);
1943
1944 qmc_write32(qmc->scc_pram + QMC_GBL_MCBASE, qmc->bd_dma_addr);
1945
1946 /* Allocate the interrupt table */
1947 qmc->int_size = QMC_NB_INTS * sizeof(u16);
1948 qmc->int_table = dmam_alloc_coherent(qmc->dev, qmc->int_size,
1949 &qmc->int_dma_addr, GFP_KERNEL);
1950 if (!qmc->int_table) {
1951 dev_err(qmc->dev, "Failed to allocate interrupt table\n");
1952 return -ENOMEM;
1953 }
1954 memset(qmc->int_table, 0, qmc->int_size);
1955
1956 qmc->int_curr = qmc->int_table;
1957 qmc_write32(qmc->scc_pram + QMC_GBL_INTBASE, qmc->int_dma_addr);
1958 qmc_write32(qmc->scc_pram + QMC_GBL_INTPTR, qmc->int_dma_addr);
1959
1960 /* Set MRBLR (valid for HDLC only) max MRU + max CRC */
1961 qmc_write16(qmc->scc_pram + QMC_GBL_MRBLR, HDLC_MAX_MRU + 4);
1962
1963 qmc_write16(qmc->scc_pram + QMC_GBL_GRFTHR, 1);
1964 qmc_write16(qmc->scc_pram + QMC_GBL_GRFCNT, 1);
1965
1966 qmc_write32(qmc->scc_pram + QMC_GBL_C_MASK32, 0xDEBB20E3);
1967 qmc_write16(qmc->scc_pram + QMC_GBL_C_MASK16, 0xF0B8);
1968
1969 if (qmc_is_qe(qmc)) {
1970 /* Zeroed the reserved area */
1971 memset_io(qmc->scc_pram + QMC_QE_GBL_RSV_B0_START, 0,
1972 QMC_QE_GBL_RSV_B0_SIZE);
1973
1974 qmc_write32(qmc->scc_pram + QMC_QE_GBL_GCSBASE, qmc->dpram_offset);
1975
1976 /* Init 'framer parameters' area and set the base addresses */
1977 memset_io(qmc->scc_pram + UCC_SLOW_PRAM_SIZE, 0x01, 64);
1978 memset_io(qmc->scc_pram + UCC_SLOW_PRAM_SIZE + 64, 0x01, 64);
1979 qmc_write16(qmc->scc_pram + QMC_QE_GBL_RX_FRM_BASE,
1980 qmc->scc_pram_offset + UCC_SLOW_PRAM_SIZE);
1981 qmc_write16(qmc->scc_pram + QMC_QE_GBL_TX_FRM_BASE,
1982 qmc->scc_pram_offset + UCC_SLOW_PRAM_SIZE + 64);
1983 }
1984
1985 ret = qmc_init_tsa(qmc);
1986 if (ret)
1987 return ret;
1988
1989 qmc_write16(qmc->scc_pram + QMC_GBL_QMCSTATE, 0x8000);
1990
1991 ret = qmc_setup_chans(qmc);
1992 if (ret)
1993 return ret;
1994
1995 /* Init interrupts table */
1996 ret = qmc_setup_ints(qmc);
1997 if (ret)
1998 return ret;
1999
2000 /* Init SCC (CPM1) or UCC (QE) */
2001 ret = qmc_init_xcc(qmc);
2002 if (ret)
2003 return ret;
2004
2005 /* Set the irq handler */
2006 irq = platform_get_irq(pdev, 0);
2007 if (irq < 0)
2008 goto err_exit_xcc;
2009 ret = devm_request_irq(qmc->dev, irq, qmc_irq_handler, 0, "qmc", qmc);
2010 if (ret < 0)
2011 goto err_exit_xcc;
2012
2013 /* Enable interrupts */
2014 qmc_write16(qmc->scc_regs + SCC_SCCM,
2015 SCC_SCCE_IQOV | SCC_SCCE_GINT | SCC_SCCE_GUN | SCC_SCCE_GOV);
2016
2017 ret = qmc_finalize_chans(qmc);
2018 if (ret < 0)
2019 goto err_disable_intr;
2020
2021 /* Enable transmitter and receiver */
2022 qmc_setbits32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
2023
2024 platform_set_drvdata(pdev, qmc);
2025
2026 /* Populate channel related devices */
2027 ret = devm_of_platform_populate(qmc->dev);
2028 if (ret)
2029 goto err_disable_txrx;
2030
2031 return 0;
2032
2033 err_disable_txrx:
2034 qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
2035
2036 err_disable_intr:
2037 qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
2038
2039 err_exit_xcc:
2040 qmc_exit_xcc(qmc);
2041 return ret;
2042 }
2043
qmc_remove(struct platform_device * pdev)2044 static void qmc_remove(struct platform_device *pdev)
2045 {
2046 struct qmc *qmc = platform_get_drvdata(pdev);
2047
2048 /* Disable transmitter and receiver */
2049 qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
2050
2051 /* Disable interrupts */
2052 qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
2053
2054 /* Exit SCC (CPM1) or UCC (QE) */
2055 qmc_exit_xcc(qmc);
2056 }
2057
2058 static const struct qmc_data qmc_data_cpm1 __maybe_unused = {
2059 .version = QMC_CPM1,
2060 .tstate = 0x30000000,
2061 .rstate = 0x31000000,
2062 .zistate = 0x00000100,
2063 .zdstate_hdlc = 0x00000080,
2064 .zdstate_transp = 0x18000080,
2065 .rpack = 0x00000000,
2066 };
2067
2068 static const struct qmc_data qmc_data_qe __maybe_unused = {
2069 .version = QMC_QE,
2070 .tstate = 0x30000000,
2071 .rstate = 0x30000000,
2072 .zistate = 0x00000200,
2073 .zdstate_hdlc = 0x80FFFFE0,
2074 .zdstate_transp = 0x003FFFE2,
2075 .rpack = 0x80000000,
2076 };
2077
2078 static const struct of_device_id qmc_id_table[] = {
2079 #if IS_ENABLED(CONFIG_CPM1)
2080 { .compatible = "fsl,cpm1-scc-qmc", .data = &qmc_data_cpm1 },
2081 #endif
2082 #if IS_ENABLED(CONFIG_QUICC_ENGINE)
2083 { .compatible = "fsl,qe-ucc-qmc", .data = &qmc_data_qe },
2084 #endif
2085 {} /* sentinel */
2086 };
2087 MODULE_DEVICE_TABLE(of, qmc_id_table);
2088
2089 static struct platform_driver qmc_driver = {
2090 .driver = {
2091 .name = "fsl-qmc",
2092 .of_match_table = of_match_ptr(qmc_id_table),
2093 },
2094 .probe = qmc_probe,
2095 .remove_new = qmc_remove,
2096 };
2097 module_platform_driver(qmc_driver);
2098
qmc_chan_get_from_qmc(struct device_node * qmc_np,unsigned int chan_index)2099 static struct qmc_chan *qmc_chan_get_from_qmc(struct device_node *qmc_np, unsigned int chan_index)
2100 {
2101 struct platform_device *pdev;
2102 struct qmc_chan *qmc_chan;
2103 struct qmc *qmc;
2104
2105 if (!of_match_node(qmc_driver.driver.of_match_table, qmc_np))
2106 return ERR_PTR(-EINVAL);
2107
2108 pdev = of_find_device_by_node(qmc_np);
2109 if (!pdev)
2110 return ERR_PTR(-ENODEV);
2111
2112 qmc = platform_get_drvdata(pdev);
2113 if (!qmc) {
2114 platform_device_put(pdev);
2115 return ERR_PTR(-EPROBE_DEFER);
2116 }
2117
2118 if (chan_index >= ARRAY_SIZE(qmc->chans)) {
2119 platform_device_put(pdev);
2120 return ERR_PTR(-EINVAL);
2121 }
2122
2123 qmc_chan = qmc->chans[chan_index];
2124 if (!qmc_chan) {
2125 platform_device_put(pdev);
2126 return ERR_PTR(-ENOENT);
2127 }
2128
2129 return qmc_chan;
2130 }
2131
qmc_chan_count_phandles(struct device_node * np,const char * phandles_name)2132 int qmc_chan_count_phandles(struct device_node *np, const char *phandles_name)
2133 {
2134 int count;
2135
2136 /* phandles are fixed args phandles with one arg */
2137 count = of_count_phandle_with_args(np, phandles_name, NULL);
2138 if (count < 0)
2139 return count;
2140
2141 return count / 2;
2142 }
2143 EXPORT_SYMBOL(qmc_chan_count_phandles);
2144
qmc_chan_get_byphandles_index(struct device_node * np,const char * phandles_name,int index)2145 struct qmc_chan *qmc_chan_get_byphandles_index(struct device_node *np,
2146 const char *phandles_name,
2147 int index)
2148 {
2149 struct of_phandle_args out_args;
2150 struct qmc_chan *qmc_chan;
2151 int ret;
2152
2153 ret = of_parse_phandle_with_fixed_args(np, phandles_name, 1, index,
2154 &out_args);
2155 if (ret < 0)
2156 return ERR_PTR(ret);
2157
2158 if (out_args.args_count != 1) {
2159 of_node_put(out_args.np);
2160 return ERR_PTR(-EINVAL);
2161 }
2162
2163 qmc_chan = qmc_chan_get_from_qmc(out_args.np, out_args.args[0]);
2164 of_node_put(out_args.np);
2165 return qmc_chan;
2166 }
2167 EXPORT_SYMBOL(qmc_chan_get_byphandles_index);
2168
qmc_chan_get_bychild(struct device_node * np)2169 struct qmc_chan *qmc_chan_get_bychild(struct device_node *np)
2170 {
2171 struct device_node *qmc_np;
2172 u32 chan_index;
2173 int ret;
2174
2175 qmc_np = np->parent;
2176 ret = of_property_read_u32(np, "reg", &chan_index);
2177 if (ret)
2178 return ERR_PTR(-EINVAL);
2179
2180 return qmc_chan_get_from_qmc(qmc_np, chan_index);
2181 }
2182 EXPORT_SYMBOL(qmc_chan_get_bychild);
2183
qmc_chan_put(struct qmc_chan * chan)2184 void qmc_chan_put(struct qmc_chan *chan)
2185 {
2186 put_device(chan->qmc->dev);
2187 }
2188 EXPORT_SYMBOL(qmc_chan_put);
2189
devm_qmc_chan_release(struct device * dev,void * res)2190 static void devm_qmc_chan_release(struct device *dev, void *res)
2191 {
2192 struct qmc_chan **qmc_chan = res;
2193
2194 qmc_chan_put(*qmc_chan);
2195 }
2196
devm_qmc_chan_get_byphandles_index(struct device * dev,struct device_node * np,const char * phandles_name,int index)2197 struct qmc_chan *devm_qmc_chan_get_byphandles_index(struct device *dev,
2198 struct device_node *np,
2199 const char *phandles_name,
2200 int index)
2201 {
2202 struct qmc_chan *qmc_chan;
2203 struct qmc_chan **dr;
2204
2205 dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
2206 if (!dr)
2207 return ERR_PTR(-ENOMEM);
2208
2209 qmc_chan = qmc_chan_get_byphandles_index(np, phandles_name, index);
2210 if (!IS_ERR(qmc_chan)) {
2211 *dr = qmc_chan;
2212 devres_add(dev, dr);
2213 } else {
2214 devres_free(dr);
2215 }
2216
2217 return qmc_chan;
2218 }
2219 EXPORT_SYMBOL(devm_qmc_chan_get_byphandles_index);
2220
devm_qmc_chan_get_bychild(struct device * dev,struct device_node * np)2221 struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev,
2222 struct device_node *np)
2223 {
2224 struct qmc_chan *qmc_chan;
2225 struct qmc_chan **dr;
2226
2227 dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
2228 if (!dr)
2229 return ERR_PTR(-ENOMEM);
2230
2231 qmc_chan = qmc_chan_get_bychild(np);
2232 if (!IS_ERR(qmc_chan)) {
2233 *dr = qmc_chan;
2234 devres_add(dev, dr);
2235 } else {
2236 devres_free(dr);
2237 }
2238
2239 return qmc_chan;
2240 }
2241 EXPORT_SYMBOL(devm_qmc_chan_get_bychild);
2242
2243 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
2244 MODULE_DESCRIPTION("CPM/QE QMC driver");
2245 MODULE_LICENSE("GPL");
2246