1 /* Freescale QUICC Engine HDLC Device Driver 2 * 3 * Copyright 2016 Freescale Semiconductor Inc. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License as published by the 7 * Free Software Foundation; either version 2 of the License, or (at your 8 * option) any later version. 9 */ 10 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/hdlc.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/irq.h> 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/netdevice.h> 21 #include <linux/of_address.h> 22 #include <linux/of_irq.h> 23 #include <linux/of_platform.h> 24 #include <linux/platform_device.h> 25 #include <linux/sched.h> 26 #include <linux/skbuff.h> 27 #include <linux/slab.h> 28 #include <linux/spinlock.h> 29 #include <linux/stddef.h> 30 #include <soc/fsl/qe/qe_tdm.h> 31 #include <uapi/linux/if_arp.h> 32 33 #include "fsl_ucc_hdlc.h" 34 35 #define DRV_DESC "Freescale QE UCC HDLC Driver" 36 #define DRV_NAME "ucc_hdlc" 37 38 #define TDM_PPPOHT_SLIC_MAXIN 39 40 static struct ucc_tdm_info utdm_primary_info = { 41 .uf_info = { 42 .tsa = 0, 43 .cdp = 0, 44 .cds = 1, 45 .ctsp = 1, 46 .ctss = 1, 47 .revd = 0, 48 .urfs = 256, 49 .utfs = 256, 50 .urfet = 128, 51 .urfset = 192, 52 .utfet = 128, 53 .utftt = 0x40, 54 .ufpt = 256, 55 .mode = UCC_FAST_PROTOCOL_MODE_HDLC, 56 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, 57 .tenc = UCC_FAST_TX_ENCODING_NRZ, 58 .renc = UCC_FAST_RX_ENCODING_NRZ, 59 .tcrc = UCC_FAST_16_BIT_CRC, 60 .synl = UCC_FAST_SYNC_LEN_NOT_USED, 61 }, 62 63 .si_info = { 64 #ifdef TDM_PPPOHT_SLIC_MAXIN 65 .simr_rfsd = 1, 66 .simr_tfsd = 2, 67 #else 68 .simr_rfsd = 0, 69 .simr_tfsd = 0, 70 #endif 71 .simr_crt = 0, 72 .simr_sl = 0, 73 .simr_ce = 1, 74 .simr_fe = 1, 75 .simr_gm = 0, 76 }, 77 }; 78 79 static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM]; 80 81 static int uhdlc_init(struct ucc_hdlc_private *priv) 82 { 83 struct ucc_tdm_info *ut_info; 84 struct ucc_fast_info *uf_info; 85 u32 cecr_subblock; 86 u16 bd_status; 87 int ret, i; 88 void *bd_buffer; 89 dma_addr_t bd_dma_addr; 90 u32 riptr; 91 u32 tiptr; 92 u32 gumr; 93 94 ut_info = priv->ut_info; 95 uf_info = &ut_info->uf_info; 96 97 if (priv->tsa) { 98 uf_info->tsa = 1; 99 uf_info->ctsp = 1; 100 } 101 102 /* This sets HPM register in CMXUCR register which configures a 103 * open drain connected HDLC bus 104 */ 105 if (priv->hdlc_bus) 106 uf_info->brkpt_support = 1; 107 108 uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF | 109 UCC_HDLC_UCCE_TXB) << 16); 110 111 ret = ucc_fast_init(uf_info, &priv->uccf); 112 if (ret) { 113 dev_err(priv->dev, "Failed to init uccf."); 114 return ret; 115 } 116 117 priv->uf_regs = priv->uccf->uf_regs; 118 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 119 120 /* Loopback mode */ 121 if (priv->loopback) { 122 dev_info(priv->dev, "Loopback Mode\n"); 123 /* use the same clock when work in loopback */ 124 qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1); 125 126 gumr = ioread32be(&priv->uf_regs->gumr); 127 gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS | 128 UCC_FAST_GUMR_TCI); 129 gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN); 130 iowrite32be(gumr, &priv->uf_regs->gumr); 131 } 132 133 /* Initialize SI */ 134 if (priv->tsa) 135 ucc_tdm_init(priv->utdm, priv->ut_info); 136 137 /* Write to QE CECR, UCCx channel to Stop Transmission */ 138 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); 139 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock, 140 QE_CR_PROTOCOL_UNSPECIFIED, 0); 141 142 /* Set UPSMR normal mode (need fixed)*/ 143 iowrite32be(0, &priv->uf_regs->upsmr); 144 145 /* hdlc_bus mode */ 146 if (priv->hdlc_bus) { 147 u32 upsmr; 148 149 dev_info(priv->dev, "HDLC bus Mode\n"); 150 upsmr = ioread32be(&priv->uf_regs->upsmr); 151 152 /* bus mode and retransmit enable, with collision window 153 * set to 8 bytes 154 */ 155 upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS | 156 UCC_HDLC_UPSMR_CW8; 157 iowrite32be(upsmr, &priv->uf_regs->upsmr); 158 159 /* explicitly disable CDS & CTSP */ 160 gumr = ioread32be(&priv->uf_regs->gumr); 161 gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP); 162 /* set automatic sync to explicitly ignore CD signal */ 163 gumr |= UCC_FAST_GUMR_SYNL_AUTO; 164 iowrite32be(gumr, &priv->uf_regs->gumr); 165 } 166 167 priv->rx_ring_size = RX_BD_RING_LEN; 168 priv->tx_ring_size = TX_BD_RING_LEN; 169 /* Alloc Rx BD */ 170 priv->rx_bd_base = dma_alloc_coherent(priv->dev, 171 RX_BD_RING_LEN * sizeof(struct qe_bd), 172 &priv->dma_rx_bd, GFP_KERNEL); 173 174 if (!priv->rx_bd_base) { 175 dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n"); 176 ret = -ENOMEM; 177 goto free_uccf; 178 } 179 180 /* Alloc Tx BD */ 181 priv->tx_bd_base = dma_alloc_coherent(priv->dev, 182 TX_BD_RING_LEN * sizeof(struct qe_bd), 183 &priv->dma_tx_bd, GFP_KERNEL); 184 185 if (!priv->tx_bd_base) { 186 dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n"); 187 ret = -ENOMEM; 188 goto free_rx_bd; 189 } 190 191 /* Alloc parameter ram for ucc hdlc */ 192 priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param), 193 ALIGNMENT_OF_UCC_HDLC_PRAM); 194 195 if (priv->ucc_pram_offset < 0) { 196 dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n"); 197 ret = -ENOMEM; 198 goto free_tx_bd; 199 } 200 201 priv->rx_skbuff = kzalloc(priv->rx_ring_size * sizeof(*priv->rx_skbuff), 202 GFP_KERNEL); 203 if (!priv->rx_skbuff) 204 goto free_ucc_pram; 205 206 priv->tx_skbuff = kzalloc(priv->tx_ring_size * sizeof(*priv->tx_skbuff), 207 GFP_KERNEL); 208 if (!priv->tx_skbuff) 209 goto free_rx_skbuff; 210 211 priv->skb_curtx = 0; 212 priv->skb_dirtytx = 0; 213 priv->curtx_bd = priv->tx_bd_base; 214 priv->dirty_tx = priv->tx_bd_base; 215 priv->currx_bd = priv->rx_bd_base; 216 priv->currx_bdnum = 0; 217 218 /* init parameter base */ 219 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); 220 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock, 221 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset); 222 223 priv->ucc_pram = (struct ucc_hdlc_param __iomem *) 224 qe_muram_addr(priv->ucc_pram_offset); 225 226 /* Zero out parameter ram */ 227 memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param)); 228 229 /* Alloc riptr, tiptr */ 230 riptr = qe_muram_alloc(32, 32); 231 if (riptr < 0) { 232 dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n"); 233 ret = -ENOMEM; 234 goto free_tx_skbuff; 235 } 236 237 tiptr = qe_muram_alloc(32, 32); 238 if (tiptr < 0) { 239 dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n"); 240 ret = -ENOMEM; 241 goto free_riptr; 242 } 243 244 /* Set RIPTR, TIPTR */ 245 iowrite16be(riptr, &priv->ucc_pram->riptr); 246 iowrite16be(tiptr, &priv->ucc_pram->tiptr); 247 248 /* Set MRBLR */ 249 iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr); 250 251 /* Set RBASE, TBASE */ 252 iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase); 253 iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase); 254 255 /* Set RSTATE, TSTATE */ 256 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate); 257 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate); 258 259 /* Set C_MASK, C_PRES for 16bit CRC */ 260 iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask); 261 iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres); 262 263 iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr); 264 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr); 265 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt); 266 iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask); 267 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1); 268 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2); 269 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3); 270 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4); 271 272 /* Get BD buffer */ 273 bd_buffer = dma_zalloc_coherent(priv->dev, 274 (RX_BD_RING_LEN + TX_BD_RING_LEN) * 275 MAX_RX_BUF_LENGTH, 276 &bd_dma_addr, GFP_KERNEL); 277 278 if (!bd_buffer) { 279 dev_err(priv->dev, "Could not allocate buffer descriptors\n"); 280 ret = -ENOMEM; 281 goto free_tiptr; 282 } 283 284 priv->rx_buffer = bd_buffer; 285 priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH; 286 287 priv->dma_rx_addr = bd_dma_addr; 288 priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH; 289 290 for (i = 0; i < RX_BD_RING_LEN; i++) { 291 if (i < (RX_BD_RING_LEN - 1)) 292 bd_status = R_E_S | R_I_S; 293 else 294 bd_status = R_E_S | R_I_S | R_W_S; 295 296 iowrite16be(bd_status, &priv->rx_bd_base[i].status); 297 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH, 298 &priv->rx_bd_base[i].buf); 299 } 300 301 for (i = 0; i < TX_BD_RING_LEN; i++) { 302 if (i < (TX_BD_RING_LEN - 1)) 303 bd_status = T_I_S | T_TC_S; 304 else 305 bd_status = T_I_S | T_TC_S | T_W_S; 306 307 iowrite16be(bd_status, &priv->tx_bd_base[i].status); 308 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH, 309 &priv->tx_bd_base[i].buf); 310 } 311 312 return 0; 313 314 free_tiptr: 315 qe_muram_free(tiptr); 316 free_riptr: 317 qe_muram_free(riptr); 318 free_tx_skbuff: 319 kfree(priv->tx_skbuff); 320 free_rx_skbuff: 321 kfree(priv->rx_skbuff); 322 free_ucc_pram: 323 qe_muram_free(priv->ucc_pram_offset); 324 free_tx_bd: 325 dma_free_coherent(priv->dev, 326 TX_BD_RING_LEN * sizeof(struct qe_bd), 327 priv->tx_bd_base, priv->dma_tx_bd); 328 free_rx_bd: 329 dma_free_coherent(priv->dev, 330 RX_BD_RING_LEN * sizeof(struct qe_bd), 331 priv->rx_bd_base, priv->dma_rx_bd); 332 free_uccf: 333 ucc_fast_free(priv->uccf); 334 335 return ret; 336 } 337 338 static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev) 339 { 340 hdlc_device *hdlc = dev_to_hdlc(dev); 341 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv; 342 struct qe_bd __iomem *bd; 343 u16 bd_status; 344 unsigned long flags; 345 u16 *proto_head; 346 347 switch (dev->type) { 348 case ARPHRD_RAWHDLC: 349 if (skb_headroom(skb) < HDLC_HEAD_LEN) { 350 dev->stats.tx_dropped++; 351 dev_kfree_skb(skb); 352 netdev_err(dev, "No enough space for hdlc head\n"); 353 return -ENOMEM; 354 } 355 356 skb_push(skb, HDLC_HEAD_LEN); 357 358 proto_head = (u16 *)skb->data; 359 *proto_head = htons(DEFAULT_HDLC_HEAD); 360 361 dev->stats.tx_bytes += skb->len; 362 break; 363 364 case ARPHRD_PPP: 365 proto_head = (u16 *)skb->data; 366 if (*proto_head != htons(DEFAULT_PPP_HEAD)) { 367 dev->stats.tx_dropped++; 368 dev_kfree_skb(skb); 369 netdev_err(dev, "Wrong ppp header\n"); 370 return -ENOMEM; 371 } 372 373 dev->stats.tx_bytes += skb->len; 374 break; 375 376 default: 377 dev->stats.tx_dropped++; 378 dev_kfree_skb(skb); 379 return -ENOMEM; 380 } 381 spin_lock_irqsave(&priv->lock, flags); 382 383 /* Start from the next BD that should be filled */ 384 bd = priv->curtx_bd; 385 bd_status = ioread16be(&bd->status); 386 /* Save the skb pointer so we can free it later */ 387 priv->tx_skbuff[priv->skb_curtx] = skb; 388 389 /* Update the current skb pointer (wrapping if this was the last) */ 390 priv->skb_curtx = 391 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN); 392 393 /* copy skb data to tx buffer for sdma processing */ 394 memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr), 395 skb->data, skb->len); 396 397 /* set bd status and length */ 398 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S; 399 400 iowrite16be(skb->len, &bd->length); 401 iowrite16be(bd_status, &bd->status); 402 403 /* Move to next BD in the ring */ 404 if (!(bd_status & T_W_S)) 405 bd += 1; 406 else 407 bd = priv->tx_bd_base; 408 409 if (bd == priv->dirty_tx) { 410 if (!netif_queue_stopped(dev)) 411 netif_stop_queue(dev); 412 } 413 414 priv->curtx_bd = bd; 415 416 spin_unlock_irqrestore(&priv->lock, flags); 417 418 return NETDEV_TX_OK; 419 } 420 421 static int hdlc_tx_done(struct ucc_hdlc_private *priv) 422 { 423 /* Start from the next BD that should be filled */ 424 struct net_device *dev = priv->ndev; 425 struct qe_bd *bd; /* BD pointer */ 426 u16 bd_status; 427 428 bd = priv->dirty_tx; 429 bd_status = ioread16be(&bd->status); 430 431 /* Normal processing. */ 432 while ((bd_status & T_R_S) == 0) { 433 struct sk_buff *skb; 434 435 /* BD contains already transmitted buffer. */ 436 /* Handle the transmitted buffer and release */ 437 /* the BD to be used with the current frame */ 438 439 skb = priv->tx_skbuff[priv->skb_dirtytx]; 440 if (!skb) 441 break; 442 dev->stats.tx_packets++; 443 memset(priv->tx_buffer + 444 (be32_to_cpu(bd->buf) - priv->dma_tx_addr), 445 0, skb->len); 446 dev_kfree_skb_irq(skb); 447 448 priv->tx_skbuff[priv->skb_dirtytx] = NULL; 449 priv->skb_dirtytx = 450 (priv->skb_dirtytx + 451 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN); 452 453 /* We freed a buffer, so now we can restart transmission */ 454 if (netif_queue_stopped(dev)) 455 netif_wake_queue(dev); 456 457 /* Advance the confirmation BD pointer */ 458 if (!(bd_status & T_W_S)) 459 bd += 1; 460 else 461 bd = priv->tx_bd_base; 462 bd_status = ioread16be(&bd->status); 463 } 464 priv->dirty_tx = bd; 465 466 return 0; 467 } 468 469 static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit) 470 { 471 struct net_device *dev = priv->ndev; 472 struct sk_buff *skb = NULL; 473 hdlc_device *hdlc = dev_to_hdlc(dev); 474 struct qe_bd *bd; 475 u16 bd_status; 476 u16 length, howmany = 0; 477 u8 *bdbuffer; 478 479 bd = priv->currx_bd; 480 bd_status = ioread16be(&bd->status); 481 482 /* while there are received buffers and BD is full (~R_E) */ 483 while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) { 484 if (bd_status & R_OV_S) 485 dev->stats.rx_over_errors++; 486 if (bd_status & R_CR_S) { 487 dev->stats.rx_crc_errors++; 488 dev->stats.rx_dropped++; 489 goto recycle; 490 } 491 bdbuffer = priv->rx_buffer + 492 (priv->currx_bdnum * MAX_RX_BUF_LENGTH); 493 length = ioread16be(&bd->length); 494 495 switch (dev->type) { 496 case ARPHRD_RAWHDLC: 497 bdbuffer += HDLC_HEAD_LEN; 498 length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE); 499 500 skb = dev_alloc_skb(length); 501 if (!skb) { 502 dev->stats.rx_dropped++; 503 return -ENOMEM; 504 } 505 506 skb_put(skb, length); 507 skb->len = length; 508 skb->dev = dev; 509 memcpy(skb->data, bdbuffer, length); 510 break; 511 512 case ARPHRD_PPP: 513 length -= HDLC_CRC_SIZE; 514 515 skb = dev_alloc_skb(length); 516 if (!skb) { 517 dev->stats.rx_dropped++; 518 return -ENOMEM; 519 } 520 521 skb_put(skb, length); 522 skb->len = length; 523 skb->dev = dev; 524 memcpy(skb->data, bdbuffer, length); 525 break; 526 } 527 528 dev->stats.rx_packets++; 529 dev->stats.rx_bytes += skb->len; 530 howmany++; 531 if (hdlc->proto) 532 skb->protocol = hdlc_type_trans(skb, dev); 533 netif_receive_skb(skb); 534 535 recycle: 536 iowrite16be(bd_status | R_E_S | R_I_S, &bd->status); 537 538 /* update to point at the next bd */ 539 if (bd_status & R_W_S) { 540 priv->currx_bdnum = 0; 541 bd = priv->rx_bd_base; 542 } else { 543 if (priv->currx_bdnum < (RX_BD_RING_LEN - 1)) 544 priv->currx_bdnum += 1; 545 else 546 priv->currx_bdnum = RX_BD_RING_LEN - 1; 547 548 bd += 1; 549 } 550 551 bd_status = ioread16be(&bd->status); 552 } 553 554 priv->currx_bd = bd; 555 return howmany; 556 } 557 558 static int ucc_hdlc_poll(struct napi_struct *napi, int budget) 559 { 560 struct ucc_hdlc_private *priv = container_of(napi, 561 struct ucc_hdlc_private, 562 napi); 563 int howmany; 564 565 /* Tx event processing */ 566 spin_lock(&priv->lock); 567 hdlc_tx_done(priv); 568 spin_unlock(&priv->lock); 569 570 howmany = 0; 571 howmany += hdlc_rx_done(priv, budget - howmany); 572 573 if (howmany < budget) { 574 napi_complete_done(napi, howmany); 575 qe_setbits32(priv->uccf->p_uccm, 576 (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16); 577 } 578 579 return howmany; 580 } 581 582 static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id) 583 { 584 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id; 585 struct net_device *dev = priv->ndev; 586 struct ucc_fast_private *uccf; 587 struct ucc_tdm_info *ut_info; 588 u32 ucce; 589 u32 uccm; 590 591 ut_info = priv->ut_info; 592 uccf = priv->uccf; 593 594 ucce = ioread32be(uccf->p_ucce); 595 uccm = ioread32be(uccf->p_uccm); 596 ucce &= uccm; 597 iowrite32be(ucce, uccf->p_ucce); 598 if (!ucce) 599 return IRQ_NONE; 600 601 if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) { 602 if (napi_schedule_prep(&priv->napi)) { 603 uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) 604 << 16); 605 iowrite32be(uccm, uccf->p_uccm); 606 __napi_schedule(&priv->napi); 607 } 608 } 609 610 /* Errors and other events */ 611 if (ucce >> 16 & UCC_HDLC_UCCE_BSY) 612 dev->stats.rx_errors++; 613 if (ucce >> 16 & UCC_HDLC_UCCE_TXE) 614 dev->stats.tx_errors++; 615 616 return IRQ_HANDLED; 617 } 618 619 static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 620 { 621 const size_t size = sizeof(te1_settings); 622 te1_settings line; 623 struct ucc_hdlc_private *priv = netdev_priv(dev); 624 625 if (cmd != SIOCWANDEV) 626 return hdlc_ioctl(dev, ifr, cmd); 627 628 switch (ifr->ifr_settings.type) { 629 case IF_GET_IFACE: 630 ifr->ifr_settings.type = IF_IFACE_E1; 631 if (ifr->ifr_settings.size < size) { 632 ifr->ifr_settings.size = size; /* data size wanted */ 633 return -ENOBUFS; 634 } 635 memset(&line, 0, sizeof(line)); 636 line.clock_type = priv->clocking; 637 638 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size)) 639 return -EFAULT; 640 return 0; 641 642 default: 643 return hdlc_ioctl(dev, ifr, cmd); 644 } 645 } 646 647 static int uhdlc_open(struct net_device *dev) 648 { 649 u32 cecr_subblock; 650 hdlc_device *hdlc = dev_to_hdlc(dev); 651 struct ucc_hdlc_private *priv = hdlc->priv; 652 struct ucc_tdm *utdm = priv->utdm; 653 654 if (priv->hdlc_busy != 1) { 655 if (request_irq(priv->ut_info->uf_info.irq, 656 ucc_hdlc_irq_handler, 0, "hdlc", priv)) 657 return -ENODEV; 658 659 cecr_subblock = ucc_fast_get_qe_cr_subblock( 660 priv->ut_info->uf_info.ucc_num); 661 662 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock, 663 QE_CR_PROTOCOL_UNSPECIFIED, 0); 664 665 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 666 667 /* Enable the TDM port */ 668 if (priv->tsa) 669 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port); 670 671 priv->hdlc_busy = 1; 672 netif_device_attach(priv->ndev); 673 napi_enable(&priv->napi); 674 netif_start_queue(dev); 675 hdlc_open(dev); 676 } 677 678 return 0; 679 } 680 681 static void uhdlc_memclean(struct ucc_hdlc_private *priv) 682 { 683 qe_muram_free(priv->ucc_pram->riptr); 684 qe_muram_free(priv->ucc_pram->tiptr); 685 686 if (priv->rx_bd_base) { 687 dma_free_coherent(priv->dev, 688 RX_BD_RING_LEN * sizeof(struct qe_bd), 689 priv->rx_bd_base, priv->dma_rx_bd); 690 691 priv->rx_bd_base = NULL; 692 priv->dma_rx_bd = 0; 693 } 694 695 if (priv->tx_bd_base) { 696 dma_free_coherent(priv->dev, 697 TX_BD_RING_LEN * sizeof(struct qe_bd), 698 priv->tx_bd_base, priv->dma_tx_bd); 699 700 priv->tx_bd_base = NULL; 701 priv->dma_tx_bd = 0; 702 } 703 704 if (priv->ucc_pram) { 705 qe_muram_free(priv->ucc_pram_offset); 706 priv->ucc_pram = NULL; 707 priv->ucc_pram_offset = 0; 708 } 709 710 kfree(priv->rx_skbuff); 711 priv->rx_skbuff = NULL; 712 713 kfree(priv->tx_skbuff); 714 priv->tx_skbuff = NULL; 715 716 if (priv->uf_regs) { 717 iounmap(priv->uf_regs); 718 priv->uf_regs = NULL; 719 } 720 721 if (priv->uccf) { 722 ucc_fast_free(priv->uccf); 723 priv->uccf = NULL; 724 } 725 726 if (priv->rx_buffer) { 727 dma_free_coherent(priv->dev, 728 RX_BD_RING_LEN * MAX_RX_BUF_LENGTH, 729 priv->rx_buffer, priv->dma_rx_addr); 730 priv->rx_buffer = NULL; 731 priv->dma_rx_addr = 0; 732 } 733 734 if (priv->tx_buffer) { 735 dma_free_coherent(priv->dev, 736 TX_BD_RING_LEN * MAX_RX_BUF_LENGTH, 737 priv->tx_buffer, priv->dma_tx_addr); 738 priv->tx_buffer = NULL; 739 priv->dma_tx_addr = 0; 740 } 741 } 742 743 static int uhdlc_close(struct net_device *dev) 744 { 745 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv; 746 struct ucc_tdm *utdm = priv->utdm; 747 u32 cecr_subblock; 748 749 napi_disable(&priv->napi); 750 cecr_subblock = ucc_fast_get_qe_cr_subblock( 751 priv->ut_info->uf_info.ucc_num); 752 753 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, 754 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); 755 qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock, 756 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); 757 758 if (priv->tsa) 759 utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port); 760 761 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 762 763 free_irq(priv->ut_info->uf_info.irq, priv); 764 netif_stop_queue(dev); 765 priv->hdlc_busy = 0; 766 767 return 0; 768 } 769 770 static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding, 771 unsigned short parity) 772 { 773 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv; 774 775 if (encoding != ENCODING_NRZ && 776 encoding != ENCODING_NRZI) 777 return -EINVAL; 778 779 if (parity != PARITY_NONE && 780 parity != PARITY_CRC32_PR1_CCITT && 781 parity != PARITY_CRC16_PR1_CCITT) 782 return -EINVAL; 783 784 priv->encoding = encoding; 785 priv->parity = parity; 786 787 return 0; 788 } 789 790 #ifdef CONFIG_PM 791 static void store_clk_config(struct ucc_hdlc_private *priv) 792 { 793 struct qe_mux *qe_mux_reg = &qe_immr->qmx; 794 795 /* store si clk */ 796 priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h); 797 priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l); 798 799 /* store si sync */ 800 priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr); 801 802 /* store ucc clk */ 803 memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32)); 804 } 805 806 static void resume_clk_config(struct ucc_hdlc_private *priv) 807 { 808 struct qe_mux *qe_mux_reg = &qe_immr->qmx; 809 810 memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32)); 811 812 iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h); 813 iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l); 814 815 iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr); 816 } 817 818 static int uhdlc_suspend(struct device *dev) 819 { 820 struct ucc_hdlc_private *priv = dev_get_drvdata(dev); 821 struct ucc_tdm_info *ut_info; 822 struct ucc_fast __iomem *uf_regs; 823 824 if (!priv) 825 return -EINVAL; 826 827 if (!netif_running(priv->ndev)) 828 return 0; 829 830 netif_device_detach(priv->ndev); 831 napi_disable(&priv->napi); 832 833 ut_info = priv->ut_info; 834 uf_regs = priv->uf_regs; 835 836 /* backup gumr guemr*/ 837 priv->gumr = ioread32be(&uf_regs->gumr); 838 priv->guemr = ioread8(&uf_regs->guemr); 839 840 priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak), 841 GFP_KERNEL); 842 if (!priv->ucc_pram_bak) 843 return -ENOMEM; 844 845 /* backup HDLC parameter */ 846 memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram, 847 sizeof(struct ucc_hdlc_param)); 848 849 /* store the clk configuration */ 850 store_clk_config(priv); 851 852 /* save power */ 853 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 854 855 return 0; 856 } 857 858 static int uhdlc_resume(struct device *dev) 859 { 860 struct ucc_hdlc_private *priv = dev_get_drvdata(dev); 861 struct ucc_tdm *utdm; 862 struct ucc_tdm_info *ut_info; 863 struct ucc_fast __iomem *uf_regs; 864 struct ucc_fast_private *uccf; 865 struct ucc_fast_info *uf_info; 866 int ret, i; 867 u32 cecr_subblock; 868 u16 bd_status; 869 870 if (!priv) 871 return -EINVAL; 872 873 if (!netif_running(priv->ndev)) 874 return 0; 875 876 utdm = priv->utdm; 877 ut_info = priv->ut_info; 878 uf_info = &ut_info->uf_info; 879 uf_regs = priv->uf_regs; 880 uccf = priv->uccf; 881 882 /* restore gumr guemr */ 883 iowrite8(priv->guemr, &uf_regs->guemr); 884 iowrite32be(priv->gumr, &uf_regs->gumr); 885 886 /* Set Virtual Fifo registers */ 887 iowrite16be(uf_info->urfs, &uf_regs->urfs); 888 iowrite16be(uf_info->urfet, &uf_regs->urfet); 889 iowrite16be(uf_info->urfset, &uf_regs->urfset); 890 iowrite16be(uf_info->utfs, &uf_regs->utfs); 891 iowrite16be(uf_info->utfet, &uf_regs->utfet); 892 iowrite16be(uf_info->utftt, &uf_regs->utftt); 893 /* utfb, urfb are offsets from MURAM base */ 894 iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb); 895 iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb); 896 897 /* Rx Tx and sync clock routing */ 898 resume_clk_config(priv); 899 900 iowrite32be(uf_info->uccm_mask, &uf_regs->uccm); 901 iowrite32be(0xffffffff, &uf_regs->ucce); 902 903 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 904 905 /* rebuild SIRAM */ 906 if (priv->tsa) 907 ucc_tdm_init(priv->utdm, priv->ut_info); 908 909 /* Write to QE CECR, UCCx channel to Stop Transmission */ 910 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); 911 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock, 912 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); 913 914 /* Set UPSMR normal mode */ 915 iowrite32be(0, &uf_regs->upsmr); 916 917 /* init parameter base */ 918 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); 919 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock, 920 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset); 921 922 priv->ucc_pram = (struct ucc_hdlc_param __iomem *) 923 qe_muram_addr(priv->ucc_pram_offset); 924 925 /* restore ucc parameter */ 926 memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak, 927 sizeof(struct ucc_hdlc_param)); 928 kfree(priv->ucc_pram_bak); 929 930 /* rebuild BD entry */ 931 for (i = 0; i < RX_BD_RING_LEN; i++) { 932 if (i < (RX_BD_RING_LEN - 1)) 933 bd_status = R_E_S | R_I_S; 934 else 935 bd_status = R_E_S | R_I_S | R_W_S; 936 937 iowrite16be(bd_status, &priv->rx_bd_base[i].status); 938 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH, 939 &priv->rx_bd_base[i].buf); 940 } 941 942 for (i = 0; i < TX_BD_RING_LEN; i++) { 943 if (i < (TX_BD_RING_LEN - 1)) 944 bd_status = T_I_S | T_TC_S; 945 else 946 bd_status = T_I_S | T_TC_S | T_W_S; 947 948 iowrite16be(bd_status, &priv->tx_bd_base[i].status); 949 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH, 950 &priv->tx_bd_base[i].buf); 951 } 952 953 /* if hdlc is busy enable TX and RX */ 954 if (priv->hdlc_busy == 1) { 955 cecr_subblock = ucc_fast_get_qe_cr_subblock( 956 priv->ut_info->uf_info.ucc_num); 957 958 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock, 959 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); 960 961 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); 962 963 /* Enable the TDM port */ 964 if (priv->tsa) 965 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port); 966 } 967 968 napi_enable(&priv->napi); 969 netif_device_attach(priv->ndev); 970 971 return 0; 972 } 973 974 static const struct dev_pm_ops uhdlc_pm_ops = { 975 .suspend = uhdlc_suspend, 976 .resume = uhdlc_resume, 977 .freeze = uhdlc_suspend, 978 .thaw = uhdlc_resume, 979 }; 980 981 #define HDLC_PM_OPS (&uhdlc_pm_ops) 982 983 #else 984 985 #define HDLC_PM_OPS NULL 986 987 #endif 988 static const struct net_device_ops uhdlc_ops = { 989 .ndo_open = uhdlc_open, 990 .ndo_stop = uhdlc_close, 991 .ndo_start_xmit = hdlc_start_xmit, 992 .ndo_do_ioctl = uhdlc_ioctl, 993 }; 994 995 static int ucc_hdlc_probe(struct platform_device *pdev) 996 { 997 struct device_node *np = pdev->dev.of_node; 998 struct ucc_hdlc_private *uhdlc_priv = NULL; 999 struct ucc_tdm_info *ut_info; 1000 struct ucc_tdm *utdm = NULL; 1001 struct resource res; 1002 struct net_device *dev; 1003 hdlc_device *hdlc; 1004 int ucc_num; 1005 const char *sprop; 1006 int ret; 1007 u32 val; 1008 1009 ret = of_property_read_u32_index(np, "cell-index", 0, &val); 1010 if (ret) { 1011 dev_err(&pdev->dev, "Invalid ucc property\n"); 1012 return -ENODEV; 1013 } 1014 1015 ucc_num = val - 1; 1016 if ((ucc_num > 3) || (ucc_num < 0)) { 1017 dev_err(&pdev->dev, ": Invalid UCC num\n"); 1018 return -EINVAL; 1019 } 1020 1021 memcpy(&utdm_info[ucc_num], &utdm_primary_info, 1022 sizeof(utdm_primary_info)); 1023 1024 ut_info = &utdm_info[ucc_num]; 1025 ut_info->uf_info.ucc_num = ucc_num; 1026 1027 sprop = of_get_property(np, "rx-clock-name", NULL); 1028 if (sprop) { 1029 ut_info->uf_info.rx_clock = qe_clock_source(sprop); 1030 if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) || 1031 (ut_info->uf_info.rx_clock > QE_CLK24)) { 1032 dev_err(&pdev->dev, "Invalid rx-clock-name property\n"); 1033 return -EINVAL; 1034 } 1035 } else { 1036 dev_err(&pdev->dev, "Invalid rx-clock-name property\n"); 1037 return -EINVAL; 1038 } 1039 1040 sprop = of_get_property(np, "tx-clock-name", NULL); 1041 if (sprop) { 1042 ut_info->uf_info.tx_clock = qe_clock_source(sprop); 1043 if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) || 1044 (ut_info->uf_info.tx_clock > QE_CLK24)) { 1045 dev_err(&pdev->dev, "Invalid tx-clock-name property\n"); 1046 return -EINVAL; 1047 } 1048 } else { 1049 dev_err(&pdev->dev, "Invalid tx-clock-name property\n"); 1050 return -EINVAL; 1051 } 1052 1053 ret = of_address_to_resource(np, 0, &res); 1054 if (ret) 1055 return -EINVAL; 1056 1057 ut_info->uf_info.regs = res.start; 1058 ut_info->uf_info.irq = irq_of_parse_and_map(np, 0); 1059 1060 uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL); 1061 if (!uhdlc_priv) { 1062 return -ENOMEM; 1063 } 1064 1065 dev_set_drvdata(&pdev->dev, uhdlc_priv); 1066 uhdlc_priv->dev = &pdev->dev; 1067 uhdlc_priv->ut_info = ut_info; 1068 1069 if (of_get_property(np, "fsl,tdm-interface", NULL)) 1070 uhdlc_priv->tsa = 1; 1071 1072 if (of_get_property(np, "fsl,ucc-internal-loopback", NULL)) 1073 uhdlc_priv->loopback = 1; 1074 1075 if (of_get_property(np, "fsl,hdlc-bus", NULL)) 1076 uhdlc_priv->hdlc_bus = 1; 1077 1078 if (uhdlc_priv->tsa == 1) { 1079 utdm = kzalloc(sizeof(*utdm), GFP_KERNEL); 1080 if (!utdm) { 1081 ret = -ENOMEM; 1082 dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n"); 1083 goto free_uhdlc_priv; 1084 } 1085 uhdlc_priv->utdm = utdm; 1086 ret = ucc_of_parse_tdm(np, utdm, ut_info); 1087 if (ret) 1088 goto free_utdm; 1089 } 1090 1091 ret = uhdlc_init(uhdlc_priv); 1092 if (ret) { 1093 dev_err(&pdev->dev, "Failed to init uhdlc\n"); 1094 goto free_utdm; 1095 } 1096 1097 dev = alloc_hdlcdev(uhdlc_priv); 1098 if (!dev) { 1099 ret = -ENOMEM; 1100 pr_err("ucc_hdlc: unable to allocate memory\n"); 1101 goto undo_uhdlc_init; 1102 } 1103 1104 uhdlc_priv->ndev = dev; 1105 hdlc = dev_to_hdlc(dev); 1106 dev->tx_queue_len = 16; 1107 dev->netdev_ops = &uhdlc_ops; 1108 hdlc->attach = ucc_hdlc_attach; 1109 hdlc->xmit = ucc_hdlc_tx; 1110 netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32); 1111 if (register_hdlc_device(dev)) { 1112 ret = -ENOBUFS; 1113 pr_err("ucc_hdlc: unable to register hdlc device\n"); 1114 free_netdev(dev); 1115 goto free_dev; 1116 } 1117 1118 return 0; 1119 1120 free_dev: 1121 free_netdev(dev); 1122 undo_uhdlc_init: 1123 free_utdm: 1124 if (uhdlc_priv->tsa) 1125 kfree(utdm); 1126 free_uhdlc_priv: 1127 kfree(uhdlc_priv); 1128 return ret; 1129 } 1130 1131 static int ucc_hdlc_remove(struct platform_device *pdev) 1132 { 1133 struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev); 1134 1135 uhdlc_memclean(priv); 1136 1137 if (priv->utdm->si_regs) { 1138 iounmap(priv->utdm->si_regs); 1139 priv->utdm->si_regs = NULL; 1140 } 1141 1142 if (priv->utdm->siram) { 1143 iounmap(priv->utdm->siram); 1144 priv->utdm->siram = NULL; 1145 } 1146 kfree(priv); 1147 1148 dev_info(&pdev->dev, "UCC based hdlc module removed\n"); 1149 1150 return 0; 1151 } 1152 1153 static const struct of_device_id fsl_ucc_hdlc_of_match[] = { 1154 { 1155 .compatible = "fsl,ucc-hdlc", 1156 }, 1157 {}, 1158 }; 1159 1160 MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match); 1161 1162 static struct platform_driver ucc_hdlc_driver = { 1163 .probe = ucc_hdlc_probe, 1164 .remove = ucc_hdlc_remove, 1165 .driver = { 1166 .name = DRV_NAME, 1167 .pm = HDLC_PM_OPS, 1168 .of_match_table = fsl_ucc_hdlc_of_match, 1169 }, 1170 }; 1171 1172 module_platform_driver(ucc_hdlc_driver); 1173 MODULE_LICENSE("GPL"); 1174