1 /* 2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. 3 * 4 * Copyright (c) 2003 Intracom S.A. 5 * by Pantelis Antoniou <panto@intracom.gr> 6 * 7 * 2005 (c) MontaVista Software, Inc. 8 * Vitaly Bordug <vbordug@ru.mvista.com> 9 * 10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> 11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> 12 * 13 * This file is licensed under the terms of the GNU General Public License 14 * version 2. This program is licensed "as is" without any warranty of any 15 * kind, whether express or implied. 16 */ 17 18 #include <linux/module.h> 19 #include <linux/kernel.h> 20 #include <linux/types.h> 21 #include <linux/string.h> 22 #include <linux/ptrace.h> 23 #include <linux/errno.h> 24 #include <linux/ioport.h> 25 #include <linux/slab.h> 26 #include <linux/interrupt.h> 27 #include <linux/delay.h> 28 #include <linux/netdevice.h> 29 #include <linux/etherdevice.h> 30 #include <linux/skbuff.h> 31 #include <linux/spinlock.h> 32 #include <linux/mii.h> 33 #include <linux/ethtool.h> 34 #include <linux/bitops.h> 35 #include <linux/fs.h> 36 #include <linux/platform_device.h> 37 #include <linux/phy.h> 38 #include <linux/property.h> 39 #include <linux/of.h> 40 #include <linux/of_mdio.h> 41 #include <linux/of_net.h> 42 #include <linux/pgtable.h> 43 44 #include <linux/vmalloc.h> 45 #include <asm/irq.h> 46 #include <linux/uaccess.h> 47 48 #include "fs_enet.h" 49 50 /*************************************************/ 51 52 MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>"); 53 MODULE_DESCRIPTION("Freescale Ethernet Driver"); 54 MODULE_LICENSE("GPL"); 55 56 static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */ 57 module_param(fs_enet_debug, int, 0); 58 MODULE_PARM_DESC(fs_enet_debug, 59 "Freescale bitmapped debugging message enable value"); 60 61 #define RX_RING_SIZE 32 62 #define TX_RING_SIZE 64 63 64 #ifdef CONFIG_NET_POLL_CONTROLLER 65 static void fs_enet_netpoll(struct net_device *dev); 66 #endif 67 68 static void fs_set_multicast_list(struct net_device *dev) 69 { 70 struct fs_enet_private *fep = netdev_priv(dev); 71 72 (*fep->ops->set_multicast_list)(dev); 73 } 74 75 static void skb_align(struct sk_buff *skb, int align) 76 { 77 int off = ((unsigned long)skb->data) & (align - 1); 78 79 if (off) 80 skb_reserve(skb, align - off); 81 } 82 83 /* NAPI function */ 84 static int fs_enet_napi(struct napi_struct *napi, int budget) 85 { 86 struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi); 87 struct net_device *dev = fep->ndev; 88 const struct fs_platform_info *fpi = fep->fpi; 89 cbd_t __iomem *bdp; 90 struct sk_buff *skb, *skbn; 91 int received = 0; 92 u16 pkt_len, sc; 93 int curidx; 94 int dirtyidx, do_wake, do_restart; 95 int tx_left = TX_RING_SIZE; 96 97 spin_lock(&fep->tx_lock); 98 bdp = fep->dirty_tx; 99 100 /* clear status bits for napi*/ 101 (*fep->ops->napi_clear_event)(dev); 102 103 do_wake = do_restart = 0; 104 while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0 && tx_left) { 105 dirtyidx = bdp - fep->tx_bd_base; 106 107 if (fep->tx_free == fep->tx_ring) 108 break; 109 110 skb = fep->tx_skbuff[dirtyidx]; 111 112 /* 113 * Check for errors. 114 */ 115 if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | 116 BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { 117 118 if (sc & BD_ENET_TX_HB) /* No heartbeat */ 119 dev->stats.tx_heartbeat_errors++; 120 if (sc & BD_ENET_TX_LC) /* Late collision */ 121 dev->stats.tx_window_errors++; 122 if (sc & BD_ENET_TX_RL) /* Retrans limit */ 123 dev->stats.tx_aborted_errors++; 124 if (sc & BD_ENET_TX_UN) /* Underrun */ 125 dev->stats.tx_fifo_errors++; 126 if (sc & BD_ENET_TX_CSL) /* Carrier lost */ 127 dev->stats.tx_carrier_errors++; 128 129 if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { 130 dev->stats.tx_errors++; 131 do_restart = 1; 132 } 133 } else 134 dev->stats.tx_packets++; 135 136 if (sc & BD_ENET_TX_READY) { 137 dev_warn(fep->dev, 138 "HEY! Enet xmit interrupt and TX_READY.\n"); 139 } 140 141 /* 142 * Deferred means some collisions occurred during transmit, 143 * but we eventually sent the packet OK. 144 */ 145 if (sc & BD_ENET_TX_DEF) 146 dev->stats.collisions++; 147 148 /* unmap */ 149 if (fep->mapped_as_page[dirtyidx]) 150 dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp), 151 CBDR_DATLEN(bdp), DMA_TO_DEVICE); 152 else 153 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 154 CBDR_DATLEN(bdp), DMA_TO_DEVICE); 155 156 /* 157 * Free the sk buffer associated with this last transmit. 158 */ 159 if (skb) { 160 dev_kfree_skb(skb); 161 fep->tx_skbuff[dirtyidx] = NULL; 162 } 163 164 /* 165 * Update pointer to next buffer descriptor to be transmitted. 166 */ 167 if ((sc & BD_ENET_TX_WRAP) == 0) 168 bdp++; 169 else 170 bdp = fep->tx_bd_base; 171 172 /* 173 * Since we have freed up a buffer, the ring is no longer 174 * full. 175 */ 176 if (++fep->tx_free == MAX_SKB_FRAGS) 177 do_wake = 1; 178 tx_left--; 179 } 180 181 fep->dirty_tx = bdp; 182 183 if (do_restart) 184 (*fep->ops->tx_restart)(dev); 185 186 spin_unlock(&fep->tx_lock); 187 188 if (do_wake) 189 netif_wake_queue(dev); 190 191 /* 192 * First, grab all of the stats for the incoming packet. 193 * These get messed up if we get called due to a busy condition. 194 */ 195 bdp = fep->cur_rx; 196 197 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0 && 198 received < budget) { 199 curidx = bdp - fep->rx_bd_base; 200 201 /* 202 * Since we have allocated space to hold a complete frame, 203 * the last indicator should be set. 204 */ 205 if ((sc & BD_ENET_RX_LAST) == 0) 206 dev_warn(fep->dev, "rcv is not +last\n"); 207 208 /* 209 * Check for errors. 210 */ 211 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | 212 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { 213 dev->stats.rx_errors++; 214 /* Frame too long or too short. */ 215 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) 216 dev->stats.rx_length_errors++; 217 /* Frame alignment */ 218 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) 219 dev->stats.rx_frame_errors++; 220 /* CRC Error */ 221 if (sc & BD_ENET_RX_CR) 222 dev->stats.rx_crc_errors++; 223 /* FIFO overrun */ 224 if (sc & BD_ENET_RX_OV) 225 dev->stats.rx_crc_errors++; 226 227 skbn = fep->rx_skbuff[curidx]; 228 } else { 229 skb = fep->rx_skbuff[curidx]; 230 231 /* 232 * Process the incoming frame. 233 */ 234 dev->stats.rx_packets++; 235 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ 236 dev->stats.rx_bytes += pkt_len + 4; 237 238 if (pkt_len <= fpi->rx_copybreak) { 239 /* +2 to make IP header L1 cache aligned */ 240 skbn = netdev_alloc_skb(dev, pkt_len + 2); 241 if (skbn != NULL) { 242 skb_reserve(skbn, 2); /* align IP header */ 243 skb_copy_from_linear_data(skb, 244 skbn->data, pkt_len); 245 swap(skb, skbn); 246 dma_sync_single_for_cpu(fep->dev, 247 CBDR_BUFADDR(bdp), 248 L1_CACHE_ALIGN(pkt_len), 249 DMA_FROM_DEVICE); 250 } 251 } else { 252 skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE); 253 254 if (skbn) { 255 dma_addr_t dma; 256 257 skb_align(skbn, ENET_RX_ALIGN); 258 259 dma_unmap_single(fep->dev, 260 CBDR_BUFADDR(bdp), 261 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 262 DMA_FROM_DEVICE); 263 264 dma = dma_map_single(fep->dev, 265 skbn->data, 266 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 267 DMA_FROM_DEVICE); 268 CBDW_BUFADDR(bdp, dma); 269 } 270 } 271 272 if (skbn != NULL) { 273 skb_put(skb, pkt_len); /* Make room */ 274 skb->protocol = eth_type_trans(skb, dev); 275 received++; 276 netif_receive_skb(skb); 277 } else { 278 dev->stats.rx_dropped++; 279 skbn = skb; 280 } 281 } 282 283 fep->rx_skbuff[curidx] = skbn; 284 CBDW_DATLEN(bdp, 0); 285 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); 286 287 /* 288 * Update BD pointer to next entry. 289 */ 290 if ((sc & BD_ENET_RX_WRAP) == 0) 291 bdp++; 292 else 293 bdp = fep->rx_bd_base; 294 295 (*fep->ops->rx_bd_done)(dev); 296 } 297 298 fep->cur_rx = bdp; 299 300 if (received < budget && tx_left) { 301 /* done */ 302 napi_complete_done(napi, received); 303 (*fep->ops->napi_enable)(dev); 304 305 return received; 306 } 307 308 return budget; 309 } 310 311 /* 312 * The interrupt handler. 313 * This is called from the MPC core interrupt. 314 */ 315 static irqreturn_t 316 fs_enet_interrupt(int irq, void *dev_id) 317 { 318 struct net_device *dev = dev_id; 319 struct fs_enet_private *fep; 320 u32 int_events; 321 u32 int_clr_events; 322 int nr, napi_ok; 323 int handled; 324 325 fep = netdev_priv(dev); 326 327 nr = 0; 328 while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) { 329 nr++; 330 331 int_clr_events = int_events; 332 int_clr_events &= ~fep->ev_napi; 333 334 (*fep->ops->clear_int_events)(dev, int_clr_events); 335 336 if (int_events & fep->ev_err) 337 (*fep->ops->ev_error)(dev, int_events); 338 339 if (int_events & fep->ev) { 340 napi_ok = napi_schedule_prep(&fep->napi); 341 342 (*fep->ops->napi_disable)(dev); 343 (*fep->ops->clear_int_events)(dev, fep->ev_napi); 344 345 /* NOTE: it is possible for FCCs in NAPI mode */ 346 /* to submit a spurious interrupt while in poll */ 347 if (napi_ok) 348 __napi_schedule(&fep->napi); 349 } 350 351 } 352 353 handled = nr > 0; 354 return IRQ_RETVAL(handled); 355 } 356 357 void fs_init_bds(struct net_device *dev) 358 { 359 struct fs_enet_private *fep = netdev_priv(dev); 360 cbd_t __iomem *bdp; 361 struct sk_buff *skb; 362 int i; 363 364 fs_cleanup_bds(dev); 365 366 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 367 fep->tx_free = fep->tx_ring; 368 fep->cur_rx = fep->rx_bd_base; 369 370 /* 371 * Initialize the receive buffer descriptors. 372 */ 373 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { 374 skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE); 375 if (skb == NULL) 376 break; 377 378 skb_align(skb, ENET_RX_ALIGN); 379 fep->rx_skbuff[i] = skb; 380 CBDW_BUFADDR(bdp, 381 dma_map_single(fep->dev, skb->data, 382 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 383 DMA_FROM_DEVICE)); 384 CBDW_DATLEN(bdp, 0); /* zero */ 385 CBDW_SC(bdp, BD_ENET_RX_EMPTY | 386 ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); 387 } 388 /* 389 * if we failed, fillup remainder 390 */ 391 for (; i < fep->rx_ring; i++, bdp++) { 392 fep->rx_skbuff[i] = NULL; 393 CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); 394 } 395 396 /* 397 * ...and the same for transmit. 398 */ 399 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { 400 fep->tx_skbuff[i] = NULL; 401 CBDW_BUFADDR(bdp, 0); 402 CBDW_DATLEN(bdp, 0); 403 CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP); 404 } 405 } 406 407 void fs_cleanup_bds(struct net_device *dev) 408 { 409 struct fs_enet_private *fep = netdev_priv(dev); 410 struct sk_buff *skb; 411 cbd_t __iomem *bdp; 412 int i; 413 414 /* 415 * Reset SKB transmit buffers. 416 */ 417 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { 418 if ((skb = fep->tx_skbuff[i]) == NULL) 419 continue; 420 421 /* unmap */ 422 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 423 skb->len, DMA_TO_DEVICE); 424 425 fep->tx_skbuff[i] = NULL; 426 dev_kfree_skb(skb); 427 } 428 429 /* 430 * Reset SKB receive buffers 431 */ 432 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { 433 if ((skb = fep->rx_skbuff[i]) == NULL) 434 continue; 435 436 /* unmap */ 437 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 438 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 439 DMA_FROM_DEVICE); 440 441 fep->rx_skbuff[i] = NULL; 442 443 dev_kfree_skb(skb); 444 } 445 } 446 447 /**********************************************************************************/ 448 449 #ifdef CONFIG_FS_ENET_MPC5121_FEC 450 /* 451 * MPC5121 FEC requeries 4-byte alignment for TX data buffer! 452 */ 453 static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, 454 struct sk_buff *skb) 455 { 456 struct sk_buff *new_skb; 457 458 if (skb_linearize(skb)) 459 return NULL; 460 461 /* Alloc new skb */ 462 new_skb = netdev_alloc_skb(dev, skb->len + 4); 463 if (!new_skb) 464 return NULL; 465 466 /* Make sure new skb is properly aligned */ 467 skb_align(new_skb, 4); 468 469 /* Copy data to new skb ... */ 470 skb_copy_from_linear_data(skb, new_skb->data, skb->len); 471 skb_put(new_skb, skb->len); 472 473 /* ... and free an old one */ 474 dev_kfree_skb_any(skb); 475 476 return new_skb; 477 } 478 #endif 479 480 static netdev_tx_t 481 fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 482 { 483 struct fs_enet_private *fep = netdev_priv(dev); 484 cbd_t __iomem *bdp; 485 int curidx; 486 u16 sc; 487 int nr_frags; 488 skb_frag_t *frag; 489 int len; 490 #ifdef CONFIG_FS_ENET_MPC5121_FEC 491 int is_aligned = 1; 492 int i; 493 494 if (!IS_ALIGNED((unsigned long)skb->data, 4)) { 495 is_aligned = 0; 496 } else { 497 nr_frags = skb_shinfo(skb)->nr_frags; 498 frag = skb_shinfo(skb)->frags; 499 for (i = 0; i < nr_frags; i++, frag++) { 500 if (!IS_ALIGNED(skb_frag_off(frag), 4)) { 501 is_aligned = 0; 502 break; 503 } 504 } 505 } 506 507 if (!is_aligned) { 508 skb = tx_skb_align_workaround(dev, skb); 509 if (!skb) { 510 /* 511 * We have lost packet due to memory allocation error 512 * in tx_skb_align_workaround(). Hopefully original 513 * skb is still valid, so try transmit it later. 514 */ 515 return NETDEV_TX_BUSY; 516 } 517 } 518 #endif 519 520 spin_lock(&fep->tx_lock); 521 522 /* 523 * Fill in a Tx ring entry 524 */ 525 bdp = fep->cur_tx; 526 527 nr_frags = skb_shinfo(skb)->nr_frags; 528 if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { 529 netif_stop_queue(dev); 530 spin_unlock(&fep->tx_lock); 531 532 /* 533 * Ooops. All transmit buffers are full. Bail out. 534 * This should not happen, since the tx queue should be stopped. 535 */ 536 dev_warn(fep->dev, "tx queue full!.\n"); 537 return NETDEV_TX_BUSY; 538 } 539 540 curidx = bdp - fep->tx_bd_base; 541 542 len = skb->len; 543 dev->stats.tx_bytes += len; 544 if (nr_frags) 545 len -= skb->data_len; 546 fep->tx_free -= nr_frags + 1; 547 /* 548 * Push the data cache so the CPM does not get stale memory data. 549 */ 550 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, 551 skb->data, len, DMA_TO_DEVICE)); 552 CBDW_DATLEN(bdp, len); 553 554 fep->mapped_as_page[curidx] = 0; 555 frag = skb_shinfo(skb)->frags; 556 while (nr_frags) { 557 CBDC_SC(bdp, 558 BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST | 559 BD_ENET_TX_TC); 560 CBDS_SC(bdp, BD_ENET_TX_READY); 561 562 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) { 563 bdp++; 564 curidx++; 565 } else { 566 bdp = fep->tx_bd_base; 567 curidx = 0; 568 } 569 570 len = skb_frag_size(frag); 571 CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len, 572 DMA_TO_DEVICE)); 573 CBDW_DATLEN(bdp, len); 574 575 fep->tx_skbuff[curidx] = NULL; 576 fep->mapped_as_page[curidx] = 1; 577 578 frag++; 579 nr_frags--; 580 } 581 582 /* Trigger transmission start */ 583 sc = BD_ENET_TX_READY | BD_ENET_TX_INTR | 584 BD_ENET_TX_LAST | BD_ENET_TX_TC; 585 586 /* note that while FEC does not have this bit 587 * it marks it as available for software use 588 * yay for hw reuse :) */ 589 if (skb->len <= 60) 590 sc |= BD_ENET_TX_PAD; 591 CBDC_SC(bdp, BD_ENET_TX_STATS); 592 CBDS_SC(bdp, sc); 593 594 /* Save skb pointer. */ 595 fep->tx_skbuff[curidx] = skb; 596 597 /* If this was the last BD in the ring, start at the beginning again. */ 598 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) 599 bdp++; 600 else 601 bdp = fep->tx_bd_base; 602 fep->cur_tx = bdp; 603 604 if (fep->tx_free < MAX_SKB_FRAGS) 605 netif_stop_queue(dev); 606 607 skb_tx_timestamp(skb); 608 609 (*fep->ops->tx_kickstart)(dev); 610 611 spin_unlock(&fep->tx_lock); 612 613 return NETDEV_TX_OK; 614 } 615 616 static void fs_timeout_work(struct work_struct *work) 617 { 618 struct fs_enet_private *fep = container_of(work, struct fs_enet_private, 619 timeout_work); 620 struct net_device *dev = fep->ndev; 621 unsigned long flags; 622 int wake = 0; 623 624 dev->stats.tx_errors++; 625 626 spin_lock_irqsave(&fep->lock, flags); 627 628 if (dev->flags & IFF_UP) { 629 phy_stop(dev->phydev); 630 (*fep->ops->stop)(dev); 631 (*fep->ops->restart)(dev); 632 } 633 634 phy_start(dev->phydev); 635 wake = fep->tx_free >= MAX_SKB_FRAGS && 636 !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); 637 spin_unlock_irqrestore(&fep->lock, flags); 638 639 if (wake) 640 netif_wake_queue(dev); 641 } 642 643 static void fs_timeout(struct net_device *dev, unsigned int txqueue) 644 { 645 struct fs_enet_private *fep = netdev_priv(dev); 646 647 schedule_work(&fep->timeout_work); 648 } 649 650 /*----------------------------------------------------------------------------- 651 * generic link-change handler - should be sufficient for most cases 652 *-----------------------------------------------------------------------------*/ 653 static void generic_adjust_link(struct net_device *dev) 654 { 655 struct fs_enet_private *fep = netdev_priv(dev); 656 struct phy_device *phydev = dev->phydev; 657 int new_state = 0; 658 659 if (phydev->link) { 660 /* adjust to duplex mode */ 661 if (phydev->duplex != fep->oldduplex) { 662 new_state = 1; 663 fep->oldduplex = phydev->duplex; 664 } 665 666 if (phydev->speed != fep->oldspeed) { 667 new_state = 1; 668 fep->oldspeed = phydev->speed; 669 } 670 671 if (!fep->oldlink) { 672 new_state = 1; 673 fep->oldlink = 1; 674 } 675 676 if (new_state) 677 fep->ops->restart(dev); 678 } else if (fep->oldlink) { 679 new_state = 1; 680 fep->oldlink = 0; 681 fep->oldspeed = 0; 682 fep->oldduplex = -1; 683 } 684 685 if (new_state && netif_msg_link(fep)) 686 phy_print_status(phydev); 687 } 688 689 690 static void fs_adjust_link(struct net_device *dev) 691 { 692 struct fs_enet_private *fep = netdev_priv(dev); 693 unsigned long flags; 694 695 spin_lock_irqsave(&fep->lock, flags); 696 697 if(fep->ops->adjust_link) 698 fep->ops->adjust_link(dev); 699 else 700 generic_adjust_link(dev); 701 702 spin_unlock_irqrestore(&fep->lock, flags); 703 } 704 705 static int fs_init_phy(struct net_device *dev) 706 { 707 struct fs_enet_private *fep = netdev_priv(dev); 708 struct phy_device *phydev; 709 phy_interface_t iface; 710 711 fep->oldlink = 0; 712 fep->oldspeed = 0; 713 fep->oldduplex = -1; 714 715 iface = fep->fpi->use_rmii ? 716 PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII; 717 718 phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0, 719 iface); 720 if (!phydev) { 721 dev_err(&dev->dev, "Could not attach to PHY\n"); 722 return -ENODEV; 723 } 724 725 return 0; 726 } 727 728 static int fs_enet_open(struct net_device *dev) 729 { 730 struct fs_enet_private *fep = netdev_priv(dev); 731 int r; 732 int err; 733 734 /* to initialize the fep->cur_rx,... */ 735 /* not doing this, will cause a crash in fs_enet_napi */ 736 fs_init_bds(fep->ndev); 737 738 napi_enable(&fep->napi); 739 740 /* Install our interrupt handler. */ 741 r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED, 742 "fs_enet-mac", dev); 743 if (r != 0) { 744 dev_err(fep->dev, "Could not allocate FS_ENET IRQ!"); 745 napi_disable(&fep->napi); 746 return -EINVAL; 747 } 748 749 err = fs_init_phy(dev); 750 if (err) { 751 free_irq(fep->interrupt, dev); 752 napi_disable(&fep->napi); 753 return err; 754 } 755 phy_start(dev->phydev); 756 757 netif_start_queue(dev); 758 759 return 0; 760 } 761 762 static int fs_enet_close(struct net_device *dev) 763 { 764 struct fs_enet_private *fep = netdev_priv(dev); 765 unsigned long flags; 766 767 netif_stop_queue(dev); 768 netif_carrier_off(dev); 769 napi_disable(&fep->napi); 770 cancel_work_sync(&fep->timeout_work); 771 phy_stop(dev->phydev); 772 773 spin_lock_irqsave(&fep->lock, flags); 774 spin_lock(&fep->tx_lock); 775 (*fep->ops->stop)(dev); 776 spin_unlock(&fep->tx_lock); 777 spin_unlock_irqrestore(&fep->lock, flags); 778 779 /* release any irqs */ 780 phy_disconnect(dev->phydev); 781 free_irq(fep->interrupt, dev); 782 783 return 0; 784 } 785 786 /*************************************************************************/ 787 788 static void fs_get_drvinfo(struct net_device *dev, 789 struct ethtool_drvinfo *info) 790 { 791 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 792 } 793 794 static int fs_get_regs_len(struct net_device *dev) 795 { 796 struct fs_enet_private *fep = netdev_priv(dev); 797 798 return (*fep->ops->get_regs_len)(dev); 799 } 800 801 static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, 802 void *p) 803 { 804 struct fs_enet_private *fep = netdev_priv(dev); 805 unsigned long flags; 806 int r, len; 807 808 len = regs->len; 809 810 spin_lock_irqsave(&fep->lock, flags); 811 r = (*fep->ops->get_regs)(dev, p, &len); 812 spin_unlock_irqrestore(&fep->lock, flags); 813 814 if (r == 0) 815 regs->version = 0; 816 } 817 818 static u32 fs_get_msglevel(struct net_device *dev) 819 { 820 struct fs_enet_private *fep = netdev_priv(dev); 821 return fep->msg_enable; 822 } 823 824 static void fs_set_msglevel(struct net_device *dev, u32 value) 825 { 826 struct fs_enet_private *fep = netdev_priv(dev); 827 fep->msg_enable = value; 828 } 829 830 static int fs_get_tunable(struct net_device *dev, 831 const struct ethtool_tunable *tuna, void *data) 832 { 833 struct fs_enet_private *fep = netdev_priv(dev); 834 struct fs_platform_info *fpi = fep->fpi; 835 int ret = 0; 836 837 switch (tuna->id) { 838 case ETHTOOL_RX_COPYBREAK: 839 *(u32 *)data = fpi->rx_copybreak; 840 break; 841 default: 842 ret = -EINVAL; 843 break; 844 } 845 846 return ret; 847 } 848 849 static int fs_set_tunable(struct net_device *dev, 850 const struct ethtool_tunable *tuna, const void *data) 851 { 852 struct fs_enet_private *fep = netdev_priv(dev); 853 struct fs_platform_info *fpi = fep->fpi; 854 int ret = 0; 855 856 switch (tuna->id) { 857 case ETHTOOL_RX_COPYBREAK: 858 fpi->rx_copybreak = *(u32 *)data; 859 break; 860 default: 861 ret = -EINVAL; 862 break; 863 } 864 865 return ret; 866 } 867 868 static const struct ethtool_ops fs_ethtool_ops = { 869 .get_drvinfo = fs_get_drvinfo, 870 .get_regs_len = fs_get_regs_len, 871 .nway_reset = phy_ethtool_nway_reset, 872 .get_link = ethtool_op_get_link, 873 .get_msglevel = fs_get_msglevel, 874 .set_msglevel = fs_set_msglevel, 875 .get_regs = fs_get_regs, 876 .get_ts_info = ethtool_op_get_ts_info, 877 .get_link_ksettings = phy_ethtool_get_link_ksettings, 878 .set_link_ksettings = phy_ethtool_set_link_ksettings, 879 .get_tunable = fs_get_tunable, 880 .set_tunable = fs_set_tunable, 881 }; 882 883 /**************************************************************************************/ 884 885 #ifdef CONFIG_FS_ENET_HAS_FEC 886 #define IS_FEC(ops) ((ops) == &fs_fec_ops) 887 #else 888 #define IS_FEC(ops) 0 889 #endif 890 891 static const struct net_device_ops fs_enet_netdev_ops = { 892 .ndo_open = fs_enet_open, 893 .ndo_stop = fs_enet_close, 894 .ndo_start_xmit = fs_enet_start_xmit, 895 .ndo_tx_timeout = fs_timeout, 896 .ndo_set_rx_mode = fs_set_multicast_list, 897 .ndo_eth_ioctl = phy_do_ioctl_running, 898 .ndo_validate_addr = eth_validate_addr, 899 .ndo_set_mac_address = eth_mac_addr, 900 #ifdef CONFIG_NET_POLL_CONTROLLER 901 .ndo_poll_controller = fs_enet_netpoll, 902 #endif 903 }; 904 905 static int fs_enet_probe(struct platform_device *ofdev) 906 { 907 const struct fs_ops *ops; 908 struct net_device *ndev; 909 struct fs_enet_private *fep; 910 struct fs_platform_info *fpi; 911 const u32 *data; 912 struct clk *clk; 913 int err; 914 const char *phy_connection_type; 915 int privsize, len, ret = -ENODEV; 916 917 ops = device_get_match_data(&ofdev->dev); 918 if (!ops) 919 return -EINVAL; 920 921 fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); 922 if (!fpi) 923 return -ENOMEM; 924 925 if (!IS_FEC(ops)) { 926 data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); 927 if (!data || len != 4) 928 goto out_free_fpi; 929 930 fpi->cp_command = *data; 931 } 932 933 fpi->rx_ring = RX_RING_SIZE; 934 fpi->tx_ring = TX_RING_SIZE; 935 fpi->rx_copybreak = 240; 936 fpi->napi_weight = 17; 937 fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); 938 if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) { 939 err = of_phy_register_fixed_link(ofdev->dev.of_node); 940 if (err) 941 goto out_free_fpi; 942 943 /* In the case of a fixed PHY, the DT node associated 944 * to the PHY is the Ethernet MAC DT node. 945 */ 946 fpi->phy_node = of_node_get(ofdev->dev.of_node); 947 } 948 949 if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) { 950 phy_connection_type = of_get_property(ofdev->dev.of_node, 951 "phy-connection-type", NULL); 952 if (phy_connection_type && !strcmp("rmii", phy_connection_type)) 953 fpi->use_rmii = 1; 954 } 955 956 /* make clock lookup non-fatal (the driver is shared among platforms), 957 * but require enable to succeed when a clock was specified/found, 958 * keep a reference to the clock upon successful acquisition 959 */ 960 clk = devm_clk_get(&ofdev->dev, "per"); 961 if (!IS_ERR(clk)) { 962 ret = clk_prepare_enable(clk); 963 if (ret) 964 goto out_deregister_fixed_link; 965 966 fpi->clk_per = clk; 967 } 968 969 privsize = sizeof(*fep) + 970 sizeof(struct sk_buff **) * 971 (fpi->rx_ring + fpi->tx_ring) + 972 sizeof(char) * fpi->tx_ring; 973 974 ndev = alloc_etherdev(privsize); 975 if (!ndev) { 976 ret = -ENOMEM; 977 goto out_put; 978 } 979 980 SET_NETDEV_DEV(ndev, &ofdev->dev); 981 platform_set_drvdata(ofdev, ndev); 982 983 fep = netdev_priv(ndev); 984 fep->dev = &ofdev->dev; 985 fep->ndev = ndev; 986 fep->fpi = fpi; 987 fep->ops = ops; 988 989 ret = fep->ops->setup_data(ndev); 990 if (ret) 991 goto out_free_dev; 992 993 fep->rx_skbuff = (struct sk_buff **)&fep[1]; 994 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; 995 fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring + 996 fpi->tx_ring); 997 998 spin_lock_init(&fep->lock); 999 spin_lock_init(&fep->tx_lock); 1000 1001 of_get_ethdev_address(ofdev->dev.of_node, ndev); 1002 1003 ret = fep->ops->allocate_bd(ndev); 1004 if (ret) 1005 goto out_cleanup_data; 1006 1007 fep->rx_bd_base = fep->ring_base; 1008 fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; 1009 1010 fep->tx_ring = fpi->tx_ring; 1011 fep->rx_ring = fpi->rx_ring; 1012 1013 ndev->netdev_ops = &fs_enet_netdev_ops; 1014 ndev->watchdog_timeo = 2 * HZ; 1015 INIT_WORK(&fep->timeout_work, fs_timeout_work); 1016 netif_napi_add_weight(ndev, &fep->napi, fs_enet_napi, 1017 fpi->napi_weight); 1018 1019 ndev->ethtool_ops = &fs_ethtool_ops; 1020 1021 netif_carrier_off(ndev); 1022 1023 ndev->features |= NETIF_F_SG; 1024 1025 ret = register_netdev(ndev); 1026 if (ret) 1027 goto out_free_bd; 1028 1029 pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr); 1030 1031 return 0; 1032 1033 out_free_bd: 1034 fep->ops->free_bd(ndev); 1035 out_cleanup_data: 1036 fep->ops->cleanup_data(ndev); 1037 out_free_dev: 1038 free_netdev(ndev); 1039 out_put: 1040 clk_disable_unprepare(fpi->clk_per); 1041 out_deregister_fixed_link: 1042 of_node_put(fpi->phy_node); 1043 if (of_phy_is_fixed_link(ofdev->dev.of_node)) 1044 of_phy_deregister_fixed_link(ofdev->dev.of_node); 1045 out_free_fpi: 1046 kfree(fpi); 1047 return ret; 1048 } 1049 1050 static void fs_enet_remove(struct platform_device *ofdev) 1051 { 1052 struct net_device *ndev = platform_get_drvdata(ofdev); 1053 struct fs_enet_private *fep = netdev_priv(ndev); 1054 1055 unregister_netdev(ndev); 1056 1057 fep->ops->free_bd(ndev); 1058 fep->ops->cleanup_data(ndev); 1059 dev_set_drvdata(fep->dev, NULL); 1060 of_node_put(fep->fpi->phy_node); 1061 clk_disable_unprepare(fep->fpi->clk_per); 1062 if (of_phy_is_fixed_link(ofdev->dev.of_node)) 1063 of_phy_deregister_fixed_link(ofdev->dev.of_node); 1064 free_netdev(ndev); 1065 } 1066 1067 static const struct of_device_id fs_enet_match[] = { 1068 #ifdef CONFIG_FS_ENET_HAS_SCC 1069 { 1070 .compatible = "fsl,cpm1-scc-enet", 1071 .data = (void *)&fs_scc_ops, 1072 }, 1073 { 1074 .compatible = "fsl,cpm2-scc-enet", 1075 .data = (void *)&fs_scc_ops, 1076 }, 1077 #endif 1078 #ifdef CONFIG_FS_ENET_HAS_FCC 1079 { 1080 .compatible = "fsl,cpm2-fcc-enet", 1081 .data = (void *)&fs_fcc_ops, 1082 }, 1083 #endif 1084 #ifdef CONFIG_FS_ENET_HAS_FEC 1085 #ifdef CONFIG_FS_ENET_MPC5121_FEC 1086 { 1087 .compatible = "fsl,mpc5121-fec", 1088 .data = (void *)&fs_fec_ops, 1089 }, 1090 { 1091 .compatible = "fsl,mpc5125-fec", 1092 .data = (void *)&fs_fec_ops, 1093 }, 1094 #else 1095 { 1096 .compatible = "fsl,pq1-fec-enet", 1097 .data = (void *)&fs_fec_ops, 1098 }, 1099 #endif 1100 #endif 1101 {} 1102 }; 1103 MODULE_DEVICE_TABLE(of, fs_enet_match); 1104 1105 static struct platform_driver fs_enet_driver = { 1106 .driver = { 1107 .name = "fs_enet", 1108 .of_match_table = fs_enet_match, 1109 }, 1110 .probe = fs_enet_probe, 1111 .remove_new = fs_enet_remove, 1112 }; 1113 1114 #ifdef CONFIG_NET_POLL_CONTROLLER 1115 static void fs_enet_netpoll(struct net_device *dev) 1116 { 1117 disable_irq(dev->irq); 1118 fs_enet_interrupt(dev->irq, dev); 1119 enable_irq(dev->irq); 1120 } 1121 #endif 1122 1123 module_platform_driver(fs_enet_driver); 1124