1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * QMC driver 4 * 5 * Copyright 2022 CS GROUP France 6 * 7 * Author: Herve Codina <herve.codina@bootlin.com> 8 */ 9 10 #include <soc/fsl/qe/qmc.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/hdlc.h> 13 #include <linux/interrupt.h> 14 #include <linux/io.h> 15 #include <linux/module.h> 16 #include <linux/of.h> 17 #include <linux/of_platform.h> 18 #include <linux/platform_device.h> 19 #include <linux/slab.h> 20 #include <soc/fsl/cpm.h> 21 #include <sysdev/fsl_soc.h> 22 #include "tsa.h" 23 24 /* SCC general mode register high (32 bits) */ 25 #define SCC_GSMRL 0x00 26 #define SCC_GSMRL_ENR (1 << 5) 27 #define SCC_GSMRL_ENT (1 << 4) 28 #define SCC_GSMRL_MODE_QMC (0x0A << 0) 29 30 /* SCC general mode register low (32 bits) */ 31 #define SCC_GSMRH 0x04 32 #define SCC_GSMRH_CTSS (1 << 7) 33 #define SCC_GSMRH_CDS (1 << 8) 34 #define SCC_GSMRH_CTSP (1 << 9) 35 #define SCC_GSMRH_CDP (1 << 10) 36 37 /* SCC event register (16 bits) */ 38 #define SCC_SCCE 0x10 39 #define SCC_SCCE_IQOV (1 << 3) 40 #define SCC_SCCE_GINT (1 << 2) 41 #define SCC_SCCE_GUN (1 << 1) 42 #define SCC_SCCE_GOV (1 << 0) 43 44 /* SCC mask register (16 bits) */ 45 #define SCC_SCCM 0x14 46 /* Multichannel base pointer (32 bits) */ 47 #define QMC_GBL_MCBASE 0x00 48 /* Multichannel controller state (16 bits) */ 49 #define QMC_GBL_QMCSTATE 0x04 50 /* Maximum receive buffer length (16 bits) */ 51 #define QMC_GBL_MRBLR 0x06 52 /* Tx time-slot assignment table pointer (16 bits) */ 53 #define QMC_GBL_TX_S_PTR 0x08 54 /* Rx pointer (16 bits) */ 55 #define QMC_GBL_RXPTR 0x0A 56 /* Global receive frame threshold (16 bits) */ 57 #define QMC_GBL_GRFTHR 0x0C 58 /* Global receive frame count (16 bits) */ 59 #define QMC_GBL_GRFCNT 0x0E 60 /* Multichannel interrupt base address (32 bits) */ 61 #define QMC_GBL_INTBASE 0x10 62 /* Multichannel interrupt pointer (32 bits) */ 63 #define QMC_GBL_INTPTR 0x14 64 /* Rx time-slot assignment table pointer (16 bits) */ 65 #define QMC_GBL_RX_S_PTR 0x18 66 /* Tx pointer (16 bits) */ 67 #define QMC_GBL_TXPTR 0x1A 68 /* CRC constant (32 bits) */ 69 #define QMC_GBL_C_MASK32 0x1C 70 /* Time slot assignment table Rx (32 x 16 bits) */ 71 #define QMC_GBL_TSATRX 0x20 72 /* Time slot assignment table Tx (32 x 16 bits) */ 73 #define QMC_GBL_TSATTX 0x60 74 /* CRC constant (16 bits) */ 75 #define QMC_GBL_C_MASK16 0xA0 76 77 /* TSA entry (16bit entry in TSATRX and TSATTX) */ 78 #define QMC_TSA_VALID (1 << 15) 79 #define QMC_TSA_WRAP (1 << 14) 80 #define QMC_TSA_MASK (0x303F) 81 #define QMC_TSA_CHANNEL(x) ((x) << 6) 82 83 /* Tx buffer descriptor base address (16 bits, offset from MCBASE) */ 84 #define QMC_SPE_TBASE 0x00 85 86 /* Channel mode register (16 bits) */ 87 #define QMC_SPE_CHAMR 0x02 88 #define QMC_SPE_CHAMR_MODE_HDLC (1 << 15) 89 #define QMC_SPE_CHAMR_MODE_TRANSP ((0 << 15) | (1 << 13)) 90 #define QMC_SPE_CHAMR_ENT (1 << 12) 91 #define QMC_SPE_CHAMR_POL (1 << 8) 92 #define QMC_SPE_CHAMR_HDLC_IDLM (1 << 13) 93 #define QMC_SPE_CHAMR_HDLC_CRC (1 << 7) 94 #define QMC_SPE_CHAMR_HDLC_NOF (0x0f << 0) 95 #define QMC_SPE_CHAMR_TRANSP_RD (1 << 14) 96 #define QMC_SPE_CHAMR_TRANSP_SYNC (1 << 10) 97 98 /* Tx internal state (32 bits) */ 99 #define QMC_SPE_TSTATE 0x04 100 /* Tx buffer descriptor pointer (16 bits) */ 101 #define QMC_SPE_TBPTR 0x0C 102 /* Zero-insertion state (32 bits) */ 103 #define QMC_SPE_ZISTATE 0x14 104 /* Channel’s interrupt mask flags (16 bits) */ 105 #define QMC_SPE_INTMSK 0x1C 106 /* Rx buffer descriptor base address (16 bits, offset from MCBASE) */ 107 #define QMC_SPE_RBASE 0x20 108 /* HDLC: Maximum frame length register (16 bits) */ 109 #define QMC_SPE_MFLR 0x22 110 /* TRANSPARENT: Transparent maximum receive length (16 bits) */ 111 #define QMC_SPE_TMRBLR 0x22 112 /* Rx internal state (32 bits) */ 113 #define QMC_SPE_RSTATE 0x24 114 /* Rx buffer descriptor pointer (16 bits) */ 115 #define QMC_SPE_RBPTR 0x2C 116 /* Packs 4 bytes to 1 long word before writing to buffer (32 bits) */ 117 #define QMC_SPE_RPACK 0x30 118 /* Zero deletion state (32 bits) */ 119 #define QMC_SPE_ZDSTATE 0x34 120 121 /* Transparent synchronization (16 bits) */ 122 #define QMC_SPE_TRNSYNC 0x3C 123 #define QMC_SPE_TRNSYNC_RX(x) ((x) << 8) 124 #define QMC_SPE_TRNSYNC_TX(x) ((x) << 0) 125 126 /* Interrupt related registers bits */ 127 #define QMC_INT_V (1 << 15) 128 #define QMC_INT_W (1 << 14) 129 #define QMC_INT_NID (1 << 13) 130 #define QMC_INT_IDL (1 << 12) 131 #define QMC_INT_GET_CHANNEL(x) (((x) & 0x0FC0) >> 6) 132 #define QMC_INT_MRF (1 << 5) 133 #define QMC_INT_UN (1 << 4) 134 #define QMC_INT_RXF (1 << 3) 135 #define QMC_INT_BSY (1 << 2) 136 #define QMC_INT_TXB (1 << 1) 137 #define QMC_INT_RXB (1 << 0) 138 139 /* BD related registers bits */ 140 #define QMC_BD_RX_E (1 << 15) 141 #define QMC_BD_RX_W (1 << 13) 142 #define QMC_BD_RX_I (1 << 12) 143 #define QMC_BD_RX_L (1 << 11) 144 #define QMC_BD_RX_F (1 << 10) 145 #define QMC_BD_RX_CM (1 << 9) 146 #define QMC_BD_RX_UB (1 << 7) 147 #define QMC_BD_RX_LG (1 << 5) 148 #define QMC_BD_RX_NO (1 << 4) 149 #define QMC_BD_RX_AB (1 << 3) 150 #define QMC_BD_RX_CR (1 << 2) 151 152 #define QMC_BD_TX_R (1 << 15) 153 #define QMC_BD_TX_W (1 << 13) 154 #define QMC_BD_TX_I (1 << 12) 155 #define QMC_BD_TX_L (1 << 11) 156 #define QMC_BD_TX_TC (1 << 10) 157 #define QMC_BD_TX_CM (1 << 9) 158 #define QMC_BD_TX_UB (1 << 7) 159 #define QMC_BD_TX_PAD (0x0f << 0) 160 161 /* Numbers of BDs and interrupt items */ 162 #define QMC_NB_TXBDS 8 163 #define QMC_NB_RXBDS 8 164 #define QMC_NB_INTS 128 165 166 struct qmc_xfer_desc { 167 union { 168 void (*tx_complete)(void *context); 169 void (*rx_complete)(void *context, size_t length, unsigned int flags); 170 }; 171 void *context; 172 }; 173 174 struct qmc_chan { 175 struct list_head list; 176 unsigned int id; 177 struct qmc *qmc; 178 void __iomem *s_param; 179 enum qmc_mode mode; 180 spinlock_t ts_lock; /* Protect timeslots */ 181 u64 tx_ts_mask_avail; 182 u64 tx_ts_mask; 183 u64 rx_ts_mask_avail; 184 u64 rx_ts_mask; 185 bool is_reverse_data; 186 187 spinlock_t tx_lock; 188 cbd_t __iomem *txbds; 189 cbd_t __iomem *txbd_free; 190 cbd_t __iomem *txbd_done; 191 struct qmc_xfer_desc tx_desc[QMC_NB_TXBDS]; 192 u64 nb_tx_underrun; 193 bool is_tx_stopped; 194 195 spinlock_t rx_lock; 196 cbd_t __iomem *rxbds; 197 cbd_t __iomem *rxbd_free; 198 cbd_t __iomem *rxbd_done; 199 struct qmc_xfer_desc rx_desc[QMC_NB_RXBDS]; 200 u64 nb_rx_busy; 201 int rx_pending; 202 bool is_rx_halted; 203 bool is_rx_stopped; 204 }; 205 206 struct qmc { 207 struct device *dev; 208 struct tsa_serial *tsa_serial; 209 void __iomem *scc_regs; 210 void __iomem *scc_pram; 211 void __iomem *dpram; 212 u16 scc_pram_offset; 213 cbd_t __iomem *bd_table; 214 dma_addr_t bd_dma_addr; 215 size_t bd_size; 216 u16 __iomem *int_table; 217 u16 __iomem *int_curr; 218 dma_addr_t int_dma_addr; 219 size_t int_size; 220 bool is_tsa_64rxtx; 221 struct list_head chan_head; 222 struct qmc_chan *chans[64]; 223 }; 224 225 static void qmc_write16(void __iomem *addr, u16 val) 226 { 227 iowrite16be(val, addr); 228 } 229 230 static u16 qmc_read16(void __iomem *addr) 231 { 232 return ioread16be(addr); 233 } 234 235 static void qmc_setbits16(void __iomem *addr, u16 set) 236 { 237 qmc_write16(addr, qmc_read16(addr) | set); 238 } 239 240 static void qmc_clrbits16(void __iomem *addr, u16 clr) 241 { 242 qmc_write16(addr, qmc_read16(addr) & ~clr); 243 } 244 245 static void qmc_clrsetbits16(void __iomem *addr, u16 clr, u16 set) 246 { 247 qmc_write16(addr, (qmc_read16(addr) & ~clr) | set); 248 } 249 250 static void qmc_write32(void __iomem *addr, u32 val) 251 { 252 iowrite32be(val, addr); 253 } 254 255 static u32 qmc_read32(void __iomem *addr) 256 { 257 return ioread32be(addr); 258 } 259 260 static void qmc_setbits32(void __iomem *addr, u32 set) 261 { 262 qmc_write32(addr, qmc_read32(addr) | set); 263 } 264 265 266 int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info) 267 { 268 struct tsa_serial_info tsa_info; 269 unsigned long flags; 270 int ret; 271 272 /* Retrieve info from the TSA related serial */ 273 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &tsa_info); 274 if (ret) 275 return ret; 276 277 spin_lock_irqsave(&chan->ts_lock, flags); 278 279 info->mode = chan->mode; 280 info->rx_fs_rate = tsa_info.rx_fs_rate; 281 info->rx_bit_rate = tsa_info.rx_bit_rate; 282 info->nb_tx_ts = hweight64(chan->tx_ts_mask); 283 info->tx_fs_rate = tsa_info.tx_fs_rate; 284 info->tx_bit_rate = tsa_info.tx_bit_rate; 285 info->nb_rx_ts = hweight64(chan->rx_ts_mask); 286 287 spin_unlock_irqrestore(&chan->ts_lock, flags); 288 289 return 0; 290 } 291 EXPORT_SYMBOL(qmc_chan_get_info); 292 293 int qmc_chan_get_ts_info(struct qmc_chan *chan, struct qmc_chan_ts_info *ts_info) 294 { 295 unsigned long flags; 296 297 spin_lock_irqsave(&chan->ts_lock, flags); 298 299 ts_info->rx_ts_mask_avail = chan->rx_ts_mask_avail; 300 ts_info->tx_ts_mask_avail = chan->tx_ts_mask_avail; 301 ts_info->rx_ts_mask = chan->rx_ts_mask; 302 ts_info->tx_ts_mask = chan->tx_ts_mask; 303 304 spin_unlock_irqrestore(&chan->ts_lock, flags); 305 306 return 0; 307 } 308 EXPORT_SYMBOL(qmc_chan_get_ts_info); 309 310 int qmc_chan_set_ts_info(struct qmc_chan *chan, const struct qmc_chan_ts_info *ts_info) 311 { 312 unsigned long flags; 313 int ret; 314 315 /* Only a subset of available timeslots is allowed */ 316 if ((ts_info->rx_ts_mask & chan->rx_ts_mask_avail) != ts_info->rx_ts_mask) 317 return -EINVAL; 318 if ((ts_info->tx_ts_mask & chan->tx_ts_mask_avail) != ts_info->tx_ts_mask) 319 return -EINVAL; 320 321 /* In case of common rx/tx table, rx/tx masks must be identical */ 322 if (chan->qmc->is_tsa_64rxtx) { 323 if (ts_info->rx_ts_mask != ts_info->tx_ts_mask) 324 return -EINVAL; 325 } 326 327 spin_lock_irqsave(&chan->ts_lock, flags); 328 329 if ((chan->tx_ts_mask != ts_info->tx_ts_mask && !chan->is_tx_stopped) || 330 (chan->rx_ts_mask != ts_info->rx_ts_mask && !chan->is_rx_stopped)) { 331 dev_err(chan->qmc->dev, "Channel rx and/or tx not stopped\n"); 332 ret = -EBUSY; 333 } else { 334 chan->tx_ts_mask = ts_info->tx_ts_mask; 335 chan->rx_ts_mask = ts_info->rx_ts_mask; 336 ret = 0; 337 } 338 spin_unlock_irqrestore(&chan->ts_lock, flags); 339 340 return ret; 341 } 342 EXPORT_SYMBOL(qmc_chan_set_ts_info); 343 344 int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param) 345 { 346 if (param->mode != chan->mode) 347 return -EINVAL; 348 349 switch (param->mode) { 350 case QMC_HDLC: 351 if ((param->hdlc.max_rx_buf_size % 4) || 352 (param->hdlc.max_rx_buf_size < 8)) 353 return -EINVAL; 354 355 qmc_write16(chan->qmc->scc_pram + QMC_GBL_MRBLR, 356 param->hdlc.max_rx_buf_size - 8); 357 qmc_write16(chan->s_param + QMC_SPE_MFLR, 358 param->hdlc.max_rx_frame_size); 359 if (param->hdlc.is_crc32) { 360 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, 361 QMC_SPE_CHAMR_HDLC_CRC); 362 } else { 363 qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, 364 QMC_SPE_CHAMR_HDLC_CRC); 365 } 366 break; 367 368 case QMC_TRANSPARENT: 369 qmc_write16(chan->s_param + QMC_SPE_TMRBLR, 370 param->transp.max_rx_buf_size); 371 break; 372 373 default: 374 return -EINVAL; 375 } 376 377 return 0; 378 } 379 EXPORT_SYMBOL(qmc_chan_set_param); 380 381 int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length, 382 void (*complete)(void *context), void *context) 383 { 384 struct qmc_xfer_desc *xfer_desc; 385 unsigned long flags; 386 cbd_t __iomem *bd; 387 u16 ctrl; 388 int ret; 389 390 /* 391 * R bit UB bit 392 * 0 0 : The BD is free 393 * 1 1 : The BD is in used, waiting for transfer 394 * 0 1 : The BD is in used, waiting for completion 395 * 1 0 : Should not append 396 */ 397 398 spin_lock_irqsave(&chan->tx_lock, flags); 399 bd = chan->txbd_free; 400 401 ctrl = qmc_read16(&bd->cbd_sc); 402 if (ctrl & (QMC_BD_TX_R | QMC_BD_TX_UB)) { 403 /* We are full ... */ 404 ret = -EBUSY; 405 goto end; 406 } 407 408 qmc_write16(&bd->cbd_datlen, length); 409 qmc_write32(&bd->cbd_bufaddr, addr); 410 411 xfer_desc = &chan->tx_desc[bd - chan->txbds]; 412 xfer_desc->tx_complete = complete; 413 xfer_desc->context = context; 414 415 /* Activate the descriptor */ 416 ctrl |= (QMC_BD_TX_R | QMC_BD_TX_UB); 417 wmb(); /* Be sure to flush the descriptor before control update */ 418 qmc_write16(&bd->cbd_sc, ctrl); 419 420 if (!chan->is_tx_stopped) 421 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL); 422 423 if (ctrl & QMC_BD_TX_W) 424 chan->txbd_free = chan->txbds; 425 else 426 chan->txbd_free++; 427 428 ret = 0; 429 430 end: 431 spin_unlock_irqrestore(&chan->tx_lock, flags); 432 return ret; 433 } 434 EXPORT_SYMBOL(qmc_chan_write_submit); 435 436 static void qmc_chan_write_done(struct qmc_chan *chan) 437 { 438 struct qmc_xfer_desc *xfer_desc; 439 void (*complete)(void *context); 440 unsigned long flags; 441 void *context; 442 cbd_t __iomem *bd; 443 u16 ctrl; 444 445 /* 446 * R bit UB bit 447 * 0 0 : The BD is free 448 * 1 1 : The BD is in used, waiting for transfer 449 * 0 1 : The BD is in used, waiting for completion 450 * 1 0 : Should not append 451 */ 452 453 spin_lock_irqsave(&chan->tx_lock, flags); 454 bd = chan->txbd_done; 455 456 ctrl = qmc_read16(&bd->cbd_sc); 457 while (!(ctrl & QMC_BD_TX_R)) { 458 if (!(ctrl & QMC_BD_TX_UB)) 459 goto end; 460 461 xfer_desc = &chan->tx_desc[bd - chan->txbds]; 462 complete = xfer_desc->tx_complete; 463 context = xfer_desc->context; 464 xfer_desc->tx_complete = NULL; 465 xfer_desc->context = NULL; 466 467 qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_TX_UB); 468 469 if (ctrl & QMC_BD_TX_W) 470 chan->txbd_done = chan->txbds; 471 else 472 chan->txbd_done++; 473 474 if (complete) { 475 spin_unlock_irqrestore(&chan->tx_lock, flags); 476 complete(context); 477 spin_lock_irqsave(&chan->tx_lock, flags); 478 } 479 480 bd = chan->txbd_done; 481 ctrl = qmc_read16(&bd->cbd_sc); 482 } 483 484 end: 485 spin_unlock_irqrestore(&chan->tx_lock, flags); 486 } 487 488 int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length, 489 void (*complete)(void *context, size_t length, unsigned int flags), 490 void *context) 491 { 492 struct qmc_xfer_desc *xfer_desc; 493 unsigned long flags; 494 cbd_t __iomem *bd; 495 u16 ctrl; 496 int ret; 497 498 /* 499 * E bit UB bit 500 * 0 0 : The BD is free 501 * 1 1 : The BD is in used, waiting for transfer 502 * 0 1 : The BD is in used, waiting for completion 503 * 1 0 : Should not append 504 */ 505 506 spin_lock_irqsave(&chan->rx_lock, flags); 507 bd = chan->rxbd_free; 508 509 ctrl = qmc_read16(&bd->cbd_sc); 510 if (ctrl & (QMC_BD_RX_E | QMC_BD_RX_UB)) { 511 /* We are full ... */ 512 ret = -EBUSY; 513 goto end; 514 } 515 516 qmc_write16(&bd->cbd_datlen, 0); /* data length is updated by the QMC */ 517 qmc_write32(&bd->cbd_bufaddr, addr); 518 519 xfer_desc = &chan->rx_desc[bd - chan->rxbds]; 520 xfer_desc->rx_complete = complete; 521 xfer_desc->context = context; 522 523 /* Clear previous status flags */ 524 ctrl &= ~(QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG | QMC_BD_RX_NO | 525 QMC_BD_RX_AB | QMC_BD_RX_CR); 526 527 /* Activate the descriptor */ 528 ctrl |= (QMC_BD_RX_E | QMC_BD_RX_UB); 529 wmb(); /* Be sure to flush data before descriptor activation */ 530 qmc_write16(&bd->cbd_sc, ctrl); 531 532 /* Restart receiver if needed */ 533 if (chan->is_rx_halted && !chan->is_rx_stopped) { 534 /* Restart receiver */ 535 if (chan->mode == QMC_TRANSPARENT) 536 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080); 537 else 538 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080); 539 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000); 540 chan->is_rx_halted = false; 541 } 542 chan->rx_pending++; 543 544 if (ctrl & QMC_BD_RX_W) 545 chan->rxbd_free = chan->rxbds; 546 else 547 chan->rxbd_free++; 548 549 ret = 0; 550 end: 551 spin_unlock_irqrestore(&chan->rx_lock, flags); 552 return ret; 553 } 554 EXPORT_SYMBOL(qmc_chan_read_submit); 555 556 static void qmc_chan_read_done(struct qmc_chan *chan) 557 { 558 void (*complete)(void *context, size_t size, unsigned int flags); 559 struct qmc_xfer_desc *xfer_desc; 560 unsigned long flags; 561 cbd_t __iomem *bd; 562 void *context; 563 u16 datalen; 564 u16 ctrl; 565 566 /* 567 * E bit UB bit 568 * 0 0 : The BD is free 569 * 1 1 : The BD is in used, waiting for transfer 570 * 0 1 : The BD is in used, waiting for completion 571 * 1 0 : Should not append 572 */ 573 574 spin_lock_irqsave(&chan->rx_lock, flags); 575 bd = chan->rxbd_done; 576 577 ctrl = qmc_read16(&bd->cbd_sc); 578 while (!(ctrl & QMC_BD_RX_E)) { 579 if (!(ctrl & QMC_BD_RX_UB)) 580 goto end; 581 582 xfer_desc = &chan->rx_desc[bd - chan->rxbds]; 583 complete = xfer_desc->rx_complete; 584 context = xfer_desc->context; 585 xfer_desc->rx_complete = NULL; 586 xfer_desc->context = NULL; 587 588 datalen = qmc_read16(&bd->cbd_datlen); 589 qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_RX_UB); 590 591 if (ctrl & QMC_BD_RX_W) 592 chan->rxbd_done = chan->rxbds; 593 else 594 chan->rxbd_done++; 595 596 chan->rx_pending--; 597 598 if (complete) { 599 spin_unlock_irqrestore(&chan->rx_lock, flags); 600 601 /* 602 * Avoid conversion between internal hardware flags and 603 * the software API flags. 604 * -> Be sure that the software API flags are consistent 605 * with the hardware flags 606 */ 607 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_LAST != QMC_BD_RX_L); 608 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_FIRST != QMC_BD_RX_F); 609 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_OVF != QMC_BD_RX_LG); 610 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_UNA != QMC_BD_RX_NO); 611 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_ABORT != QMC_BD_RX_AB); 612 BUILD_BUG_ON(QMC_RX_FLAG_HDLC_CRC != QMC_BD_RX_CR); 613 614 complete(context, datalen, 615 ctrl & (QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG | 616 QMC_BD_RX_NO | QMC_BD_RX_AB | QMC_BD_RX_CR)); 617 spin_lock_irqsave(&chan->rx_lock, flags); 618 } 619 620 bd = chan->rxbd_done; 621 ctrl = qmc_read16(&bd->cbd_sc); 622 } 623 624 end: 625 spin_unlock_irqrestore(&chan->rx_lock, flags); 626 } 627 628 static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_serial_info *info, 629 bool enable) 630 { 631 unsigned int i; 632 u16 curr; 633 u16 val; 634 635 /* 636 * Use a common Tx/Rx 64 entries table. 637 * Tx and Rx related stuffs must be identical 638 */ 639 if (chan->tx_ts_mask != chan->rx_ts_mask) { 640 dev_err(chan->qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id); 641 return -EINVAL; 642 } 643 644 val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id); 645 646 /* Check entries based on Rx stuff*/ 647 for (i = 0; i < info->nb_rx_ts; i++) { 648 if (!(chan->rx_ts_mask & (((u64)1) << i))) 649 continue; 650 651 curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2)); 652 if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) { 653 dev_err(chan->qmc->dev, "chan %u TxRx entry %d already used\n", 654 chan->id, i); 655 return -EBUSY; 656 } 657 } 658 659 /* Set entries based on Rx stuff*/ 660 for (i = 0; i < info->nb_rx_ts; i++) { 661 if (!(chan->rx_ts_mask & (((u64)1) << i))) 662 continue; 663 664 qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 665 ~QMC_TSA_WRAP, enable ? val : 0x0000); 666 } 667 668 return 0; 669 } 670 671 static int qmc_chan_setup_tsa_32rx(struct qmc_chan *chan, const struct tsa_serial_info *info, 672 bool enable) 673 { 674 unsigned int i; 675 u16 curr; 676 u16 val; 677 678 /* Use a Rx 32 entries table */ 679 680 val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id); 681 682 /* Check entries based on Rx stuff */ 683 for (i = 0; i < info->nb_rx_ts; i++) { 684 if (!(chan->rx_ts_mask & (((u64)1) << i))) 685 continue; 686 687 curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2)); 688 if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) { 689 dev_err(chan->qmc->dev, "chan %u Rx entry %d already used\n", 690 chan->id, i); 691 return -EBUSY; 692 } 693 } 694 695 /* Set entries based on Rx stuff */ 696 for (i = 0; i < info->nb_rx_ts; i++) { 697 if (!(chan->rx_ts_mask & (((u64)1) << i))) 698 continue; 699 700 qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 701 ~QMC_TSA_WRAP, enable ? val : 0x0000); 702 } 703 704 return 0; 705 } 706 707 static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_serial_info *info, 708 bool enable) 709 { 710 unsigned int i; 711 u16 curr; 712 u16 val; 713 714 /* Use a Tx 32 entries table */ 715 716 val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id); 717 718 /* Check entries based on Tx stuff */ 719 for (i = 0; i < info->nb_tx_ts; i++) { 720 if (!(chan->tx_ts_mask & (((u64)1) << i))) 721 continue; 722 723 curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2)); 724 if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) { 725 dev_err(chan->qmc->dev, "chan %u Tx entry %d already used\n", 726 chan->id, i); 727 return -EBUSY; 728 } 729 } 730 731 /* Set entries based on Tx stuff */ 732 for (i = 0; i < info->nb_tx_ts; i++) { 733 if (!(chan->tx_ts_mask & (((u64)1) << i))) 734 continue; 735 736 qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), 737 ~QMC_TSA_WRAP, enable ? val : 0x0000); 738 } 739 740 return 0; 741 } 742 743 static int qmc_chan_setup_tsa_tx(struct qmc_chan *chan, bool enable) 744 { 745 struct tsa_serial_info info; 746 int ret; 747 748 /* Retrieve info from the TSA related serial */ 749 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info); 750 if (ret) 751 return ret; 752 753 /* Setup entries */ 754 if (chan->qmc->is_tsa_64rxtx) 755 return qmc_chan_setup_tsa_64rxtx(chan, &info, enable); 756 757 return qmc_chan_setup_tsa_32tx(chan, &info, enable); 758 } 759 760 static int qmc_chan_setup_tsa_rx(struct qmc_chan *chan, bool enable) 761 { 762 struct tsa_serial_info info; 763 int ret; 764 765 /* Retrieve info from the TSA related serial */ 766 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info); 767 if (ret) 768 return ret; 769 770 /* Setup entries */ 771 if (chan->qmc->is_tsa_64rxtx) 772 return qmc_chan_setup_tsa_64rxtx(chan, &info, enable); 773 774 return qmc_chan_setup_tsa_32rx(chan, &info, enable); 775 } 776 777 static int qmc_chan_command(struct qmc_chan *chan, u8 qmc_opcode) 778 { 779 return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E); 780 } 781 782 static int qmc_chan_stop_rx(struct qmc_chan *chan) 783 { 784 unsigned long flags; 785 int ret; 786 787 spin_lock_irqsave(&chan->rx_lock, flags); 788 789 if (chan->is_rx_stopped) { 790 /* The channel is already stopped -> simply return ok */ 791 ret = 0; 792 goto end; 793 } 794 795 /* Send STOP RECEIVE command */ 796 ret = qmc_chan_command(chan, 0x0); 797 if (ret) { 798 dev_err(chan->qmc->dev, "chan %u: Send STOP RECEIVE failed (%d)\n", 799 chan->id, ret); 800 goto end; 801 } 802 803 chan->is_rx_stopped = true; 804 805 if (!chan->qmc->is_tsa_64rxtx || chan->is_tx_stopped) { 806 ret = qmc_chan_setup_tsa_rx(chan, false); 807 if (ret) { 808 dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n", 809 chan->id, ret); 810 goto end; 811 } 812 } 813 814 end: 815 spin_unlock_irqrestore(&chan->rx_lock, flags); 816 return ret; 817 } 818 819 static int qmc_chan_stop_tx(struct qmc_chan *chan) 820 { 821 unsigned long flags; 822 int ret; 823 824 spin_lock_irqsave(&chan->tx_lock, flags); 825 826 if (chan->is_tx_stopped) { 827 /* The channel is already stopped -> simply return ok */ 828 ret = 0; 829 goto end; 830 } 831 832 /* Send STOP TRANSMIT command */ 833 ret = qmc_chan_command(chan, 0x1); 834 if (ret) { 835 dev_err(chan->qmc->dev, "chan %u: Send STOP TRANSMIT failed (%d)\n", 836 chan->id, ret); 837 goto end; 838 } 839 840 chan->is_tx_stopped = true; 841 842 if (!chan->qmc->is_tsa_64rxtx || chan->is_rx_stopped) { 843 ret = qmc_chan_setup_tsa_tx(chan, false); 844 if (ret) { 845 dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n", 846 chan->id, ret); 847 goto end; 848 } 849 } 850 851 end: 852 spin_unlock_irqrestore(&chan->tx_lock, flags); 853 return ret; 854 } 855 856 static int qmc_chan_start_rx(struct qmc_chan *chan); 857 858 int qmc_chan_stop(struct qmc_chan *chan, int direction) 859 { 860 bool is_rx_rollback_needed = false; 861 unsigned long flags; 862 int ret = 0; 863 864 spin_lock_irqsave(&chan->ts_lock, flags); 865 866 if (direction & QMC_CHAN_READ) { 867 is_rx_rollback_needed = !chan->is_rx_stopped; 868 ret = qmc_chan_stop_rx(chan); 869 if (ret) 870 goto end; 871 } 872 873 if (direction & QMC_CHAN_WRITE) { 874 ret = qmc_chan_stop_tx(chan); 875 if (ret) { 876 /* Restart rx if needed */ 877 if (is_rx_rollback_needed) 878 qmc_chan_start_rx(chan); 879 goto end; 880 } 881 } 882 883 end: 884 spin_unlock_irqrestore(&chan->ts_lock, flags); 885 return ret; 886 } 887 EXPORT_SYMBOL(qmc_chan_stop); 888 889 static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan) 890 { 891 struct tsa_serial_info info; 892 unsigned int w_rx, w_tx; 893 u16 first_rx, last_tx; 894 u16 trnsync; 895 int ret; 896 897 /* Retrieve info from the TSA related serial */ 898 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info); 899 if (ret) 900 return ret; 901 902 w_rx = hweight64(chan->rx_ts_mask); 903 w_tx = hweight64(chan->tx_ts_mask); 904 if (w_rx <= 1 && w_tx <= 1) { 905 dev_dbg(qmc->dev, "only one or zero ts -> disable trnsync\n"); 906 qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_TRANSP_SYNC); 907 return 0; 908 } 909 910 /* Find the first Rx TS allocated to the channel */ 911 first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0; 912 913 /* Find the last Tx TS allocated to the channel */ 914 last_tx = fls64(chan->tx_ts_mask); 915 916 trnsync = 0; 917 if (info.nb_rx_ts) 918 trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2); 919 if (info.nb_tx_ts) 920 trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2); 921 922 qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync); 923 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_TRANSP_SYNC); 924 925 dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n", 926 chan->id, trnsync, 927 first_rx, info.nb_rx_ts, chan->rx_ts_mask, 928 last_tx, info.nb_tx_ts, chan->tx_ts_mask); 929 930 return 0; 931 } 932 933 static int qmc_chan_start_rx(struct qmc_chan *chan) 934 { 935 unsigned long flags; 936 int ret; 937 938 spin_lock_irqsave(&chan->rx_lock, flags); 939 940 if (!chan->is_rx_stopped) { 941 /* The channel is already started -> simply return ok */ 942 ret = 0; 943 goto end; 944 } 945 946 ret = qmc_chan_setup_tsa_rx(chan, true); 947 if (ret) { 948 dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n", 949 chan->id, ret); 950 goto end; 951 } 952 953 if (chan->mode == QMC_TRANSPARENT) { 954 ret = qmc_setup_chan_trnsync(chan->qmc, chan); 955 if (ret) { 956 dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n", 957 chan->id, ret); 958 goto end; 959 } 960 } 961 962 /* Restart the receiver */ 963 if (chan->mode == QMC_TRANSPARENT) 964 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080); 965 else 966 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080); 967 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000); 968 chan->is_rx_halted = false; 969 970 chan->is_rx_stopped = false; 971 972 end: 973 spin_unlock_irqrestore(&chan->rx_lock, flags); 974 return ret; 975 } 976 977 static int qmc_chan_start_tx(struct qmc_chan *chan) 978 { 979 unsigned long flags; 980 int ret; 981 982 spin_lock_irqsave(&chan->tx_lock, flags); 983 984 if (!chan->is_tx_stopped) { 985 /* The channel is already started -> simply return ok */ 986 ret = 0; 987 goto end; 988 } 989 990 ret = qmc_chan_setup_tsa_tx(chan, true); 991 if (ret) { 992 dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n", 993 chan->id, ret); 994 goto end; 995 } 996 997 if (chan->mode == QMC_TRANSPARENT) { 998 ret = qmc_setup_chan_trnsync(chan->qmc, chan); 999 if (ret) { 1000 dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n", 1001 chan->id, ret); 1002 goto end; 1003 } 1004 } 1005 1006 /* 1007 * Enable channel transmitter as it could be disabled if 1008 * qmc_chan_reset() was called. 1009 */ 1010 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT); 1011 1012 /* Set the POL bit in the channel mode register */ 1013 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL); 1014 1015 chan->is_tx_stopped = false; 1016 1017 end: 1018 spin_unlock_irqrestore(&chan->tx_lock, flags); 1019 return ret; 1020 } 1021 1022 int qmc_chan_start(struct qmc_chan *chan, int direction) 1023 { 1024 bool is_rx_rollback_needed = false; 1025 unsigned long flags; 1026 int ret = 0; 1027 1028 spin_lock_irqsave(&chan->ts_lock, flags); 1029 1030 if (direction & QMC_CHAN_READ) { 1031 is_rx_rollback_needed = chan->is_rx_stopped; 1032 ret = qmc_chan_start_rx(chan); 1033 if (ret) 1034 goto end; 1035 } 1036 1037 if (direction & QMC_CHAN_WRITE) { 1038 ret = qmc_chan_start_tx(chan); 1039 if (ret) { 1040 /* Restop rx if needed */ 1041 if (is_rx_rollback_needed) 1042 qmc_chan_stop_rx(chan); 1043 goto end; 1044 } 1045 } 1046 1047 end: 1048 spin_unlock_irqrestore(&chan->ts_lock, flags); 1049 return ret; 1050 } 1051 EXPORT_SYMBOL(qmc_chan_start); 1052 1053 static void qmc_chan_reset_rx(struct qmc_chan *chan) 1054 { 1055 struct qmc_xfer_desc *xfer_desc; 1056 unsigned long flags; 1057 cbd_t __iomem *bd; 1058 u16 ctrl; 1059 1060 spin_lock_irqsave(&chan->rx_lock, flags); 1061 bd = chan->rxbds; 1062 do { 1063 ctrl = qmc_read16(&bd->cbd_sc); 1064 qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_RX_UB | QMC_BD_RX_E)); 1065 1066 xfer_desc = &chan->rx_desc[bd - chan->rxbds]; 1067 xfer_desc->rx_complete = NULL; 1068 xfer_desc->context = NULL; 1069 1070 bd++; 1071 } while (!(ctrl & QMC_BD_RX_W)); 1072 1073 chan->rxbd_free = chan->rxbds; 1074 chan->rxbd_done = chan->rxbds; 1075 qmc_write16(chan->s_param + QMC_SPE_RBPTR, 1076 qmc_read16(chan->s_param + QMC_SPE_RBASE)); 1077 1078 chan->rx_pending = 0; 1079 1080 spin_unlock_irqrestore(&chan->rx_lock, flags); 1081 } 1082 1083 static void qmc_chan_reset_tx(struct qmc_chan *chan) 1084 { 1085 struct qmc_xfer_desc *xfer_desc; 1086 unsigned long flags; 1087 cbd_t __iomem *bd; 1088 u16 ctrl; 1089 1090 spin_lock_irqsave(&chan->tx_lock, flags); 1091 1092 /* Disable transmitter. It will be re-enable on qmc_chan_start() */ 1093 qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT); 1094 1095 bd = chan->txbds; 1096 do { 1097 ctrl = qmc_read16(&bd->cbd_sc); 1098 qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_TX_UB | QMC_BD_TX_R)); 1099 1100 xfer_desc = &chan->tx_desc[bd - chan->txbds]; 1101 xfer_desc->tx_complete = NULL; 1102 xfer_desc->context = NULL; 1103 1104 bd++; 1105 } while (!(ctrl & QMC_BD_TX_W)); 1106 1107 chan->txbd_free = chan->txbds; 1108 chan->txbd_done = chan->txbds; 1109 qmc_write16(chan->s_param + QMC_SPE_TBPTR, 1110 qmc_read16(chan->s_param + QMC_SPE_TBASE)); 1111 1112 /* Reset TSTATE and ZISTATE to their initial value */ 1113 qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000); 1114 qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100); 1115 1116 spin_unlock_irqrestore(&chan->tx_lock, flags); 1117 } 1118 1119 int qmc_chan_reset(struct qmc_chan *chan, int direction) 1120 { 1121 if (direction & QMC_CHAN_READ) 1122 qmc_chan_reset_rx(chan); 1123 1124 if (direction & QMC_CHAN_WRITE) 1125 qmc_chan_reset_tx(chan); 1126 1127 return 0; 1128 } 1129 EXPORT_SYMBOL(qmc_chan_reset); 1130 1131 static int qmc_check_chans(struct qmc *qmc) 1132 { 1133 struct tsa_serial_info info; 1134 struct qmc_chan *chan; 1135 u64 tx_ts_assigned_mask; 1136 u64 rx_ts_assigned_mask; 1137 int ret; 1138 1139 /* Retrieve info from the TSA related serial */ 1140 ret = tsa_serial_get_info(qmc->tsa_serial, &info); 1141 if (ret) 1142 return ret; 1143 1144 if ((info.nb_tx_ts > 64) || (info.nb_rx_ts > 64)) { 1145 dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned not supported\n"); 1146 return -EINVAL; 1147 } 1148 1149 /* 1150 * If more than 32 TS are assigned to this serial, one common table is 1151 * used for Tx and Rx and so masks must be equal for all channels. 1152 */ 1153 if ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) { 1154 if (info.nb_tx_ts != info.nb_rx_ts) { 1155 dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n"); 1156 return -EINVAL; 1157 } 1158 } 1159 1160 tx_ts_assigned_mask = info.nb_tx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_tx_ts) - 1; 1161 rx_ts_assigned_mask = info.nb_rx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_rx_ts) - 1; 1162 1163 list_for_each_entry(chan, &qmc->chan_head, list) { 1164 if (chan->tx_ts_mask_avail > tx_ts_assigned_mask) { 1165 dev_err(qmc->dev, "chan %u can use TSA unassigned Tx TS\n", chan->id); 1166 return -EINVAL; 1167 } 1168 1169 if (chan->rx_ts_mask_avail > rx_ts_assigned_mask) { 1170 dev_err(qmc->dev, "chan %u can use TSA unassigned Rx TS\n", chan->id); 1171 return -EINVAL; 1172 } 1173 } 1174 1175 return 0; 1176 } 1177 1178 static unsigned int qmc_nb_chans(struct qmc *qmc) 1179 { 1180 unsigned int count = 0; 1181 struct qmc_chan *chan; 1182 1183 list_for_each_entry(chan, &qmc->chan_head, list) 1184 count++; 1185 1186 return count; 1187 } 1188 1189 static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np) 1190 { 1191 struct device_node *chan_np; 1192 struct qmc_chan *chan; 1193 const char *mode; 1194 u32 chan_id; 1195 u64 ts_mask; 1196 int ret; 1197 1198 for_each_available_child_of_node(np, chan_np) { 1199 ret = of_property_read_u32(chan_np, "reg", &chan_id); 1200 if (ret) { 1201 dev_err(qmc->dev, "%pOF: failed to read reg\n", chan_np); 1202 of_node_put(chan_np); 1203 return ret; 1204 } 1205 if (chan_id > 63) { 1206 dev_err(qmc->dev, "%pOF: Invalid chan_id\n", chan_np); 1207 of_node_put(chan_np); 1208 return -EINVAL; 1209 } 1210 1211 chan = devm_kzalloc(qmc->dev, sizeof(*chan), GFP_KERNEL); 1212 if (!chan) { 1213 of_node_put(chan_np); 1214 return -ENOMEM; 1215 } 1216 1217 chan->id = chan_id; 1218 spin_lock_init(&chan->ts_lock); 1219 spin_lock_init(&chan->rx_lock); 1220 spin_lock_init(&chan->tx_lock); 1221 1222 ret = of_property_read_u64(chan_np, "fsl,tx-ts-mask", &ts_mask); 1223 if (ret) { 1224 dev_err(qmc->dev, "%pOF: failed to read fsl,tx-ts-mask\n", 1225 chan_np); 1226 of_node_put(chan_np); 1227 return ret; 1228 } 1229 chan->tx_ts_mask_avail = ts_mask; 1230 chan->tx_ts_mask = chan->tx_ts_mask_avail; 1231 1232 ret = of_property_read_u64(chan_np, "fsl,rx-ts-mask", &ts_mask); 1233 if (ret) { 1234 dev_err(qmc->dev, "%pOF: failed to read fsl,rx-ts-mask\n", 1235 chan_np); 1236 of_node_put(chan_np); 1237 return ret; 1238 } 1239 chan->rx_ts_mask_avail = ts_mask; 1240 chan->rx_ts_mask = chan->rx_ts_mask_avail; 1241 1242 mode = "transparent"; 1243 ret = of_property_read_string(chan_np, "fsl,operational-mode", &mode); 1244 if (ret && ret != -EINVAL) { 1245 dev_err(qmc->dev, "%pOF: failed to read fsl,operational-mode\n", 1246 chan_np); 1247 of_node_put(chan_np); 1248 return ret; 1249 } 1250 if (!strcmp(mode, "transparent")) { 1251 chan->mode = QMC_TRANSPARENT; 1252 } else if (!strcmp(mode, "hdlc")) { 1253 chan->mode = QMC_HDLC; 1254 } else { 1255 dev_err(qmc->dev, "%pOF: Invalid fsl,operational-mode (%s)\n", 1256 chan_np, mode); 1257 of_node_put(chan_np); 1258 return -EINVAL; 1259 } 1260 1261 chan->is_reverse_data = of_property_read_bool(chan_np, 1262 "fsl,reverse-data"); 1263 1264 list_add_tail(&chan->list, &qmc->chan_head); 1265 qmc->chans[chan->id] = chan; 1266 } 1267 1268 return qmc_check_chans(qmc); 1269 } 1270 1271 static int qmc_init_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info) 1272 { 1273 unsigned int i; 1274 u16 val; 1275 1276 /* 1277 * Use a common Tx/Rx 64 entries table. 1278 * Everything was previously checked, Tx and Rx related stuffs are 1279 * identical -> Used Rx related stuff to build the table 1280 */ 1281 qmc->is_tsa_64rxtx = true; 1282 1283 /* Invalidate all entries */ 1284 for (i = 0; i < 64; i++) 1285 qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000); 1286 1287 /* Set Wrap bit on last entry */ 1288 qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2), 1289 QMC_TSA_WRAP); 1290 1291 /* Init pointers to the table */ 1292 val = qmc->scc_pram_offset + QMC_GBL_TSATRX; 1293 qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val); 1294 qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val); 1295 qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val); 1296 qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val); 1297 1298 return 0; 1299 } 1300 1301 static int qmc_init_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info) 1302 { 1303 unsigned int i; 1304 u16 val; 1305 1306 /* 1307 * Use a Tx 32 entries table and a Rx 32 entries table. 1308 * Everything was previously checked. 1309 */ 1310 qmc->is_tsa_64rxtx = false; 1311 1312 /* Invalidate all entries */ 1313 for (i = 0; i < 32; i++) { 1314 qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000); 1315 qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), 0x0000); 1316 } 1317 1318 /* Set Wrap bit on last entries */ 1319 qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2), 1320 QMC_TSA_WRAP); 1321 qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATTX + ((info->nb_tx_ts - 1) * 2), 1322 QMC_TSA_WRAP); 1323 1324 /* Init Rx pointers ...*/ 1325 val = qmc->scc_pram_offset + QMC_GBL_TSATRX; 1326 qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val); 1327 qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val); 1328 1329 /* ... and Tx pointers */ 1330 val = qmc->scc_pram_offset + QMC_GBL_TSATTX; 1331 qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val); 1332 qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val); 1333 1334 return 0; 1335 } 1336 1337 static int qmc_init_tsa(struct qmc *qmc) 1338 { 1339 struct tsa_serial_info info; 1340 int ret; 1341 1342 /* Retrieve info from the TSA related serial */ 1343 ret = tsa_serial_get_info(qmc->tsa_serial, &info); 1344 if (ret) 1345 return ret; 1346 1347 /* 1348 * Initialize one common 64 entries table or two 32 entries (one for Tx 1349 * and one for Tx) according to assigned TS numbers. 1350 */ 1351 return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ? 1352 qmc_init_tsa_64rxtx(qmc, &info) : 1353 qmc_init_tsa_32rx_32tx(qmc, &info); 1354 } 1355 1356 static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan) 1357 { 1358 unsigned int i; 1359 cbd_t __iomem *bd; 1360 int ret; 1361 u16 val; 1362 1363 chan->qmc = qmc; 1364 1365 /* Set channel specific parameter base address */ 1366 chan->s_param = qmc->dpram + (chan->id * 64); 1367 /* 16 bd per channel (8 rx and 8 tx) */ 1368 chan->txbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)); 1369 chan->rxbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS; 1370 1371 chan->txbd_free = chan->txbds; 1372 chan->txbd_done = chan->txbds; 1373 chan->rxbd_free = chan->rxbds; 1374 chan->rxbd_done = chan->rxbds; 1375 1376 /* TBASE and TBPTR*/ 1377 val = chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS) * sizeof(cbd_t); 1378 qmc_write16(chan->s_param + QMC_SPE_TBASE, val); 1379 qmc_write16(chan->s_param + QMC_SPE_TBPTR, val); 1380 1381 /* RBASE and RBPTR*/ 1382 val = ((chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS) * sizeof(cbd_t); 1383 qmc_write16(chan->s_param + QMC_SPE_RBASE, val); 1384 qmc_write16(chan->s_param + QMC_SPE_RBPTR, val); 1385 qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000); 1386 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000); 1387 qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100); 1388 if (chan->mode == QMC_TRANSPARENT) { 1389 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080); 1390 qmc_write16(chan->s_param + QMC_SPE_TMRBLR, 60); 1391 val = QMC_SPE_CHAMR_MODE_TRANSP; 1392 if (chan->is_reverse_data) 1393 val |= QMC_SPE_CHAMR_TRANSP_RD; 1394 qmc_write16(chan->s_param + QMC_SPE_CHAMR, val); 1395 ret = qmc_setup_chan_trnsync(qmc, chan); 1396 if (ret) 1397 return ret; 1398 } else { 1399 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080); 1400 qmc_write16(chan->s_param + QMC_SPE_MFLR, 60); 1401 qmc_write16(chan->s_param + QMC_SPE_CHAMR, 1402 QMC_SPE_CHAMR_MODE_HDLC | QMC_SPE_CHAMR_HDLC_IDLM); 1403 } 1404 1405 /* Do not enable interrupts now. They will be enabled later */ 1406 qmc_write16(chan->s_param + QMC_SPE_INTMSK, 0x0000); 1407 1408 /* Init Rx BDs and set Wrap bit on last descriptor */ 1409 BUILD_BUG_ON(QMC_NB_RXBDS == 0); 1410 val = QMC_BD_RX_I; 1411 for (i = 0; i < QMC_NB_RXBDS; i++) { 1412 bd = chan->rxbds + i; 1413 qmc_write16(&bd->cbd_sc, val); 1414 } 1415 bd = chan->rxbds + QMC_NB_RXBDS - 1; 1416 qmc_write16(&bd->cbd_sc, val | QMC_BD_RX_W); 1417 1418 /* Init Tx BDs and set Wrap bit on last descriptor */ 1419 BUILD_BUG_ON(QMC_NB_TXBDS == 0); 1420 val = QMC_BD_TX_I; 1421 if (chan->mode == QMC_HDLC) 1422 val |= QMC_BD_TX_L | QMC_BD_TX_TC; 1423 for (i = 0; i < QMC_NB_TXBDS; i++) { 1424 bd = chan->txbds + i; 1425 qmc_write16(&bd->cbd_sc, val); 1426 } 1427 bd = chan->txbds + QMC_NB_TXBDS - 1; 1428 qmc_write16(&bd->cbd_sc, val | QMC_BD_TX_W); 1429 1430 return 0; 1431 } 1432 1433 static int qmc_setup_chans(struct qmc *qmc) 1434 { 1435 struct qmc_chan *chan; 1436 int ret; 1437 1438 list_for_each_entry(chan, &qmc->chan_head, list) { 1439 ret = qmc_setup_chan(qmc, chan); 1440 if (ret) 1441 return ret; 1442 } 1443 1444 return 0; 1445 } 1446 1447 static int qmc_finalize_chans(struct qmc *qmc) 1448 { 1449 struct qmc_chan *chan; 1450 int ret; 1451 1452 list_for_each_entry(chan, &qmc->chan_head, list) { 1453 /* Unmask channel interrupts */ 1454 if (chan->mode == QMC_HDLC) { 1455 qmc_write16(chan->s_param + QMC_SPE_INTMSK, 1456 QMC_INT_NID | QMC_INT_IDL | QMC_INT_MRF | 1457 QMC_INT_UN | QMC_INT_RXF | QMC_INT_BSY | 1458 QMC_INT_TXB | QMC_INT_RXB); 1459 } else { 1460 qmc_write16(chan->s_param + QMC_SPE_INTMSK, 1461 QMC_INT_UN | QMC_INT_BSY | 1462 QMC_INT_TXB | QMC_INT_RXB); 1463 } 1464 1465 /* Forced stop the channel */ 1466 ret = qmc_chan_stop(chan, QMC_CHAN_ALL); 1467 if (ret) 1468 return ret; 1469 } 1470 1471 return 0; 1472 } 1473 1474 static int qmc_setup_ints(struct qmc *qmc) 1475 { 1476 unsigned int i; 1477 u16 __iomem *last; 1478 1479 /* Raz all entries */ 1480 for (i = 0; i < (qmc->int_size / sizeof(u16)); i++) 1481 qmc_write16(qmc->int_table + i, 0x0000); 1482 1483 /* Set Wrap bit on last entry */ 1484 if (qmc->int_size >= sizeof(u16)) { 1485 last = qmc->int_table + (qmc->int_size / sizeof(u16)) - 1; 1486 qmc_write16(last, QMC_INT_W); 1487 } 1488 1489 return 0; 1490 } 1491 1492 static void qmc_irq_gint(struct qmc *qmc) 1493 { 1494 struct qmc_chan *chan; 1495 unsigned int chan_id; 1496 unsigned long flags; 1497 u16 int_entry; 1498 1499 int_entry = qmc_read16(qmc->int_curr); 1500 while (int_entry & QMC_INT_V) { 1501 /* Clear all but the Wrap bit */ 1502 qmc_write16(qmc->int_curr, int_entry & QMC_INT_W); 1503 1504 chan_id = QMC_INT_GET_CHANNEL(int_entry); 1505 chan = qmc->chans[chan_id]; 1506 if (!chan) { 1507 dev_err(qmc->dev, "interrupt on invalid chan %u\n", chan_id); 1508 goto int_next; 1509 } 1510 1511 if (int_entry & QMC_INT_TXB) 1512 qmc_chan_write_done(chan); 1513 1514 if (int_entry & QMC_INT_UN) { 1515 dev_info(qmc->dev, "intr chan %u, 0x%04x (UN)\n", chan_id, 1516 int_entry); 1517 chan->nb_tx_underrun++; 1518 } 1519 1520 if (int_entry & QMC_INT_BSY) { 1521 dev_info(qmc->dev, "intr chan %u, 0x%04x (BSY)\n", chan_id, 1522 int_entry); 1523 chan->nb_rx_busy++; 1524 /* Restart the receiver if needed */ 1525 spin_lock_irqsave(&chan->rx_lock, flags); 1526 if (chan->rx_pending && !chan->is_rx_stopped) { 1527 if (chan->mode == QMC_TRANSPARENT) 1528 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080); 1529 else 1530 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080); 1531 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000); 1532 chan->is_rx_halted = false; 1533 } else { 1534 chan->is_rx_halted = true; 1535 } 1536 spin_unlock_irqrestore(&chan->rx_lock, flags); 1537 } 1538 1539 if (int_entry & QMC_INT_RXB) 1540 qmc_chan_read_done(chan); 1541 1542 int_next: 1543 if (int_entry & QMC_INT_W) 1544 qmc->int_curr = qmc->int_table; 1545 else 1546 qmc->int_curr++; 1547 int_entry = qmc_read16(qmc->int_curr); 1548 } 1549 } 1550 1551 static irqreturn_t qmc_irq_handler(int irq, void *priv) 1552 { 1553 struct qmc *qmc = (struct qmc *)priv; 1554 u16 scce; 1555 1556 scce = qmc_read16(qmc->scc_regs + SCC_SCCE); 1557 qmc_write16(qmc->scc_regs + SCC_SCCE, scce); 1558 1559 if (unlikely(scce & SCC_SCCE_IQOV)) 1560 dev_info(qmc->dev, "IRQ queue overflow\n"); 1561 1562 if (unlikely(scce & SCC_SCCE_GUN)) 1563 dev_err(qmc->dev, "Global transmitter underrun\n"); 1564 1565 if (unlikely(scce & SCC_SCCE_GOV)) 1566 dev_err(qmc->dev, "Global receiver overrun\n"); 1567 1568 /* normal interrupt */ 1569 if (likely(scce & SCC_SCCE_GINT)) 1570 qmc_irq_gint(qmc); 1571 1572 return IRQ_HANDLED; 1573 } 1574 1575 static int qmc_probe(struct platform_device *pdev) 1576 { 1577 struct device_node *np = pdev->dev.of_node; 1578 unsigned int nb_chans; 1579 struct resource *res; 1580 struct qmc *qmc; 1581 int irq; 1582 int ret; 1583 1584 qmc = devm_kzalloc(&pdev->dev, sizeof(*qmc), GFP_KERNEL); 1585 if (!qmc) 1586 return -ENOMEM; 1587 1588 qmc->dev = &pdev->dev; 1589 INIT_LIST_HEAD(&qmc->chan_head); 1590 1591 qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "scc_regs"); 1592 if (IS_ERR(qmc->scc_regs)) 1593 return PTR_ERR(qmc->scc_regs); 1594 1595 1596 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scc_pram"); 1597 if (!res) 1598 return -EINVAL; 1599 qmc->scc_pram_offset = res->start - get_immrbase(); 1600 qmc->scc_pram = devm_ioremap_resource(qmc->dev, res); 1601 if (IS_ERR(qmc->scc_pram)) 1602 return PTR_ERR(qmc->scc_pram); 1603 1604 qmc->dpram = devm_platform_ioremap_resource_byname(pdev, "dpram"); 1605 if (IS_ERR(qmc->dpram)) 1606 return PTR_ERR(qmc->dpram); 1607 1608 qmc->tsa_serial = devm_tsa_serial_get_byphandle(qmc->dev, np, "fsl,tsa-serial"); 1609 if (IS_ERR(qmc->tsa_serial)) { 1610 return dev_err_probe(qmc->dev, PTR_ERR(qmc->tsa_serial), 1611 "Failed to get TSA serial\n"); 1612 } 1613 1614 /* Connect the serial (SCC) to TSA */ 1615 ret = tsa_serial_connect(qmc->tsa_serial); 1616 if (ret) { 1617 dev_err(qmc->dev, "Failed to connect TSA serial\n"); 1618 return ret; 1619 } 1620 1621 /* Parse channels informationss */ 1622 ret = qmc_of_parse_chans(qmc, np); 1623 if (ret) 1624 goto err_tsa_serial_disconnect; 1625 1626 nb_chans = qmc_nb_chans(qmc); 1627 1628 /* Init GMSR_H and GMSR_L registers */ 1629 qmc_write32(qmc->scc_regs + SCC_GSMRH, 1630 SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP); 1631 1632 /* enable QMC mode */ 1633 qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_MODE_QMC); 1634 1635 /* 1636 * Allocate the buffer descriptor table 1637 * 8 rx and 8 tx descriptors per channel 1638 */ 1639 qmc->bd_size = (nb_chans * (QMC_NB_TXBDS + QMC_NB_RXBDS)) * sizeof(cbd_t); 1640 qmc->bd_table = dmam_alloc_coherent(qmc->dev, qmc->bd_size, 1641 &qmc->bd_dma_addr, GFP_KERNEL); 1642 if (!qmc->bd_table) { 1643 dev_err(qmc->dev, "Failed to allocate bd table\n"); 1644 ret = -ENOMEM; 1645 goto err_tsa_serial_disconnect; 1646 } 1647 memset(qmc->bd_table, 0, qmc->bd_size); 1648 1649 qmc_write32(qmc->scc_pram + QMC_GBL_MCBASE, qmc->bd_dma_addr); 1650 1651 /* Allocate the interrupt table */ 1652 qmc->int_size = QMC_NB_INTS * sizeof(u16); 1653 qmc->int_table = dmam_alloc_coherent(qmc->dev, qmc->int_size, 1654 &qmc->int_dma_addr, GFP_KERNEL); 1655 if (!qmc->int_table) { 1656 dev_err(qmc->dev, "Failed to allocate interrupt table\n"); 1657 ret = -ENOMEM; 1658 goto err_tsa_serial_disconnect; 1659 } 1660 memset(qmc->int_table, 0, qmc->int_size); 1661 1662 qmc->int_curr = qmc->int_table; 1663 qmc_write32(qmc->scc_pram + QMC_GBL_INTBASE, qmc->int_dma_addr); 1664 qmc_write32(qmc->scc_pram + QMC_GBL_INTPTR, qmc->int_dma_addr); 1665 1666 /* Set MRBLR (valid for HDLC only) max MRU + max CRC */ 1667 qmc_write16(qmc->scc_pram + QMC_GBL_MRBLR, HDLC_MAX_MRU + 4); 1668 1669 qmc_write16(qmc->scc_pram + QMC_GBL_GRFTHR, 1); 1670 qmc_write16(qmc->scc_pram + QMC_GBL_GRFCNT, 1); 1671 1672 qmc_write32(qmc->scc_pram + QMC_GBL_C_MASK32, 0xDEBB20E3); 1673 qmc_write16(qmc->scc_pram + QMC_GBL_C_MASK16, 0xF0B8); 1674 1675 ret = qmc_init_tsa(qmc); 1676 if (ret) 1677 goto err_tsa_serial_disconnect; 1678 1679 qmc_write16(qmc->scc_pram + QMC_GBL_QMCSTATE, 0x8000); 1680 1681 ret = qmc_setup_chans(qmc); 1682 if (ret) 1683 goto err_tsa_serial_disconnect; 1684 1685 /* Init interrupts table */ 1686 ret = qmc_setup_ints(qmc); 1687 if (ret) 1688 goto err_tsa_serial_disconnect; 1689 1690 /* Disable and clear interrupts, set the irq handler */ 1691 qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000); 1692 qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F); 1693 irq = platform_get_irq(pdev, 0); 1694 if (irq < 0) 1695 goto err_tsa_serial_disconnect; 1696 ret = devm_request_irq(qmc->dev, irq, qmc_irq_handler, 0, "qmc", qmc); 1697 if (ret < 0) 1698 goto err_tsa_serial_disconnect; 1699 1700 /* Enable interrupts */ 1701 qmc_write16(qmc->scc_regs + SCC_SCCM, 1702 SCC_SCCE_IQOV | SCC_SCCE_GINT | SCC_SCCE_GUN | SCC_SCCE_GOV); 1703 1704 ret = qmc_finalize_chans(qmc); 1705 if (ret < 0) 1706 goto err_disable_intr; 1707 1708 /* Enable transmiter and receiver */ 1709 qmc_setbits32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_ENR | SCC_GSMRL_ENT); 1710 1711 platform_set_drvdata(pdev, qmc); 1712 1713 /* Populate channel related devices */ 1714 ret = devm_of_platform_populate(qmc->dev); 1715 if (ret) 1716 goto err_disable_txrx; 1717 1718 return 0; 1719 1720 err_disable_txrx: 1721 qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0); 1722 1723 err_disable_intr: 1724 qmc_write16(qmc->scc_regs + SCC_SCCM, 0); 1725 1726 err_tsa_serial_disconnect: 1727 tsa_serial_disconnect(qmc->tsa_serial); 1728 return ret; 1729 } 1730 1731 static void qmc_remove(struct platform_device *pdev) 1732 { 1733 struct qmc *qmc = platform_get_drvdata(pdev); 1734 1735 /* Disable transmiter and receiver */ 1736 qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0); 1737 1738 /* Disable interrupts */ 1739 qmc_write16(qmc->scc_regs + SCC_SCCM, 0); 1740 1741 /* Disconnect the serial from TSA */ 1742 tsa_serial_disconnect(qmc->tsa_serial); 1743 } 1744 1745 static const struct of_device_id qmc_id_table[] = { 1746 { .compatible = "fsl,cpm1-scc-qmc" }, 1747 {} /* sentinel */ 1748 }; 1749 MODULE_DEVICE_TABLE(of, qmc_id_table); 1750 1751 static struct platform_driver qmc_driver = { 1752 .driver = { 1753 .name = "fsl-qmc", 1754 .of_match_table = of_match_ptr(qmc_id_table), 1755 }, 1756 .probe = qmc_probe, 1757 .remove_new = qmc_remove, 1758 }; 1759 module_platform_driver(qmc_driver); 1760 1761 static struct qmc_chan *qmc_chan_get_from_qmc(struct device_node *qmc_np, unsigned int chan_index) 1762 { 1763 struct platform_device *pdev; 1764 struct qmc_chan *qmc_chan; 1765 struct qmc *qmc; 1766 1767 if (!of_match_node(qmc_driver.driver.of_match_table, qmc_np)) 1768 return ERR_PTR(-EINVAL); 1769 1770 pdev = of_find_device_by_node(qmc_np); 1771 if (!pdev) 1772 return ERR_PTR(-ENODEV); 1773 1774 qmc = platform_get_drvdata(pdev); 1775 if (!qmc) { 1776 platform_device_put(pdev); 1777 return ERR_PTR(-EPROBE_DEFER); 1778 } 1779 1780 if (chan_index >= ARRAY_SIZE(qmc->chans)) { 1781 platform_device_put(pdev); 1782 return ERR_PTR(-EINVAL); 1783 } 1784 1785 qmc_chan = qmc->chans[chan_index]; 1786 if (!qmc_chan) { 1787 platform_device_put(pdev); 1788 return ERR_PTR(-ENOENT); 1789 } 1790 1791 return qmc_chan; 1792 } 1793 1794 int qmc_chan_count_phandles(struct device_node *np, const char *phandles_name) 1795 { 1796 int count; 1797 1798 /* phandles are fixed args phandles with one arg */ 1799 count = of_count_phandle_with_args(np, phandles_name, NULL); 1800 if (count < 0) 1801 return count; 1802 1803 return count / 2; 1804 } 1805 EXPORT_SYMBOL(qmc_chan_count_phandles); 1806 1807 struct qmc_chan *qmc_chan_get_byphandles_index(struct device_node *np, 1808 const char *phandles_name, 1809 int index) 1810 { 1811 struct of_phandle_args out_args; 1812 struct qmc_chan *qmc_chan; 1813 int ret; 1814 1815 ret = of_parse_phandle_with_fixed_args(np, phandles_name, 1, index, 1816 &out_args); 1817 if (ret < 0) 1818 return ERR_PTR(ret); 1819 1820 if (out_args.args_count != 1) { 1821 of_node_put(out_args.np); 1822 return ERR_PTR(-EINVAL); 1823 } 1824 1825 qmc_chan = qmc_chan_get_from_qmc(out_args.np, out_args.args[0]); 1826 of_node_put(out_args.np); 1827 return qmc_chan; 1828 } 1829 EXPORT_SYMBOL(qmc_chan_get_byphandles_index); 1830 1831 struct qmc_chan *qmc_chan_get_bychild(struct device_node *np) 1832 { 1833 struct device_node *qmc_np; 1834 u32 chan_index; 1835 int ret; 1836 1837 qmc_np = np->parent; 1838 ret = of_property_read_u32(np, "reg", &chan_index); 1839 if (ret) 1840 return ERR_PTR(-EINVAL); 1841 1842 return qmc_chan_get_from_qmc(qmc_np, chan_index); 1843 } 1844 EXPORT_SYMBOL(qmc_chan_get_bychild); 1845 1846 void qmc_chan_put(struct qmc_chan *chan) 1847 { 1848 put_device(chan->qmc->dev); 1849 } 1850 EXPORT_SYMBOL(qmc_chan_put); 1851 1852 static void devm_qmc_chan_release(struct device *dev, void *res) 1853 { 1854 struct qmc_chan **qmc_chan = res; 1855 1856 qmc_chan_put(*qmc_chan); 1857 } 1858 1859 struct qmc_chan *devm_qmc_chan_get_byphandles_index(struct device *dev, 1860 struct device_node *np, 1861 const char *phandles_name, 1862 int index) 1863 { 1864 struct qmc_chan *qmc_chan; 1865 struct qmc_chan **dr; 1866 1867 dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL); 1868 if (!dr) 1869 return ERR_PTR(-ENOMEM); 1870 1871 qmc_chan = qmc_chan_get_byphandles_index(np, phandles_name, index); 1872 if (!IS_ERR(qmc_chan)) { 1873 *dr = qmc_chan; 1874 devres_add(dev, dr); 1875 } else { 1876 devres_free(dr); 1877 } 1878 1879 return qmc_chan; 1880 } 1881 EXPORT_SYMBOL(devm_qmc_chan_get_byphandles_index); 1882 1883 struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev, 1884 struct device_node *np) 1885 { 1886 struct qmc_chan *qmc_chan; 1887 struct qmc_chan **dr; 1888 1889 dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL); 1890 if (!dr) 1891 return ERR_PTR(-ENOMEM); 1892 1893 qmc_chan = qmc_chan_get_bychild(np); 1894 if (!IS_ERR(qmc_chan)) { 1895 *dr = qmc_chan; 1896 devres_add(dev, dr); 1897 } else { 1898 devres_free(dr); 1899 } 1900 1901 return qmc_chan; 1902 } 1903 EXPORT_SYMBOL(devm_qmc_chan_get_bychild); 1904 1905 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>"); 1906 MODULE_DESCRIPTION("CPM QMC driver"); 1907 MODULE_LICENSE("GPL"); 1908