1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. 4 * 5 * Copyright (c) 2003 Intracom S.A. 6 * by Pantelis Antoniou <panto@intracom.gr> 7 * 8 * 2005 (c) MontaVista Software, Inc. 9 * Vitaly Bordug <vbordug@ru.mvista.com> 10 * 11 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> 12 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> 13 */ 14 15 #include <linux/module.h> 16 #include <linux/kernel.h> 17 #include <linux/types.h> 18 #include <linux/string.h> 19 #include <linux/ptrace.h> 20 #include <linux/errno.h> 21 #include <linux/ioport.h> 22 #include <linux/slab.h> 23 #include <linux/interrupt.h> 24 #include <linux/delay.h> 25 #include <linux/netdevice.h> 26 #include <linux/etherdevice.h> 27 #include <linux/skbuff.h> 28 #include <linux/spinlock.h> 29 #include <linux/ethtool.h> 30 #include <linux/bitops.h> 31 #include <linux/fs.h> 32 #include <linux/platform_device.h> 33 #include <linux/phy.h> 34 #include <linux/phylink.h> 35 #include <linux/property.h> 36 #include <linux/of.h> 37 #include <linux/of_mdio.h> 38 #include <linux/of_net.h> 39 #include <linux/pgtable.h> 40 #include <linux/rtnetlink.h> 41 42 #include <linux/vmalloc.h> 43 #include <asm/irq.h> 44 #include <linux/uaccess.h> 45 46 #include "fs_enet.h" 47 48 /*************************************************/ 49 50 MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>"); 51 MODULE_DESCRIPTION("Freescale Ethernet Driver"); 52 MODULE_LICENSE("GPL"); 53 54 static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */ 55 module_param(fs_enet_debug, int, 0); 56 MODULE_PARM_DESC(fs_enet_debug, 57 "Freescale bitmapped debugging message enable value"); 58 59 #define RX_RING_SIZE 32 60 #define TX_RING_SIZE 64 61 62 #ifdef CONFIG_NET_POLL_CONTROLLER 63 static void fs_enet_netpoll(struct net_device *dev); 64 #endif 65 66 static void fs_set_multicast_list(struct net_device *dev) 67 { 68 struct fs_enet_private *fep = netdev_priv(dev); 69 70 (*fep->ops->set_multicast_list)(dev); 71 } 72 73 static int fs_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 74 { 75 struct fs_enet_private *fep = netdev_priv(dev); 76 77 return phylink_mii_ioctl(fep->phylink, ifr, cmd); 78 } 79 80 static void skb_align(struct sk_buff *skb, int align) 81 { 82 int off = ((unsigned long)skb->data) & (align - 1); 83 84 if (off) 85 skb_reserve(skb, align - off); 86 } 87 88 /* NAPI function */ 89 static int fs_enet_napi(struct napi_struct *napi, int budget) 90 { 91 struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi); 92 const struct fs_platform_info *fpi = fep->fpi; 93 struct net_device *dev = fep->ndev; 94 int curidx, dirtyidx, received = 0; 95 int do_wake = 0, do_restart = 0; 96 int tx_left = TX_RING_SIZE; 97 struct sk_buff *skb, *skbn; 98 cbd_t __iomem *bdp; 99 u16 pkt_len, sc; 100 101 spin_lock(&fep->tx_lock); 102 bdp = fep->dirty_tx; 103 104 /* clear status bits for napi*/ 105 (*fep->ops->napi_clear_event)(dev); 106 107 while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0 && tx_left) { 108 dirtyidx = bdp - fep->tx_bd_base; 109 110 if (fep->tx_free == fep->tx_ring) 111 break; 112 113 skb = fep->tx_skbuff[dirtyidx]; 114 115 /* Check for errors. */ 116 if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | 117 BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { 118 if (sc & BD_ENET_TX_HB) /* No heartbeat */ 119 dev->stats.tx_heartbeat_errors++; 120 if (sc & BD_ENET_TX_LC) /* Late collision */ 121 dev->stats.tx_window_errors++; 122 if (sc & BD_ENET_TX_RL) /* Retrans limit */ 123 dev->stats.tx_aborted_errors++; 124 if (sc & BD_ENET_TX_UN) /* Underrun */ 125 dev->stats.tx_fifo_errors++; 126 if (sc & BD_ENET_TX_CSL) /* Carrier lost */ 127 dev->stats.tx_carrier_errors++; 128 129 if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { 130 dev->stats.tx_errors++; 131 do_restart = 1; 132 } 133 } else { 134 dev->stats.tx_packets++; 135 } 136 137 if (sc & BD_ENET_TX_READY) { 138 dev_warn(fep->dev, 139 "HEY! Enet xmit interrupt and TX_READY.\n"); 140 } 141 142 /* Deferred means some collisions occurred during transmit, 143 * but we eventually sent the packet OK. 144 */ 145 if (sc & BD_ENET_TX_DEF) 146 dev->stats.collisions++; 147 148 /* unmap */ 149 if (fep->mapped_as_page[dirtyidx]) 150 dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp), 151 CBDR_DATLEN(bdp), DMA_TO_DEVICE); 152 else 153 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 154 CBDR_DATLEN(bdp), DMA_TO_DEVICE); 155 156 /* Free the sk buffer associated with this last transmit. */ 157 if (skb) { 158 dev_kfree_skb(skb); 159 fep->tx_skbuff[dirtyidx] = NULL; 160 } 161 162 /* Update pointer to next buffer descriptor to be transmitted. 163 */ 164 if ((sc & BD_ENET_TX_WRAP) == 0) 165 bdp++; 166 else 167 bdp = fep->tx_bd_base; 168 169 /* Since we have freed up a buffer, the ring is no longer full. 170 */ 171 if (++fep->tx_free == MAX_SKB_FRAGS) 172 do_wake = 1; 173 tx_left--; 174 } 175 176 fep->dirty_tx = bdp; 177 178 if (do_restart) 179 (*fep->ops->tx_restart)(dev); 180 181 spin_unlock(&fep->tx_lock); 182 183 if (do_wake) 184 netif_wake_queue(dev); 185 186 /* First, grab all of the stats for the incoming packet. 187 * These get messed up if we get called due to a busy condition. 188 */ 189 bdp = fep->cur_rx; 190 191 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0 && 192 received < budget) { 193 curidx = bdp - fep->rx_bd_base; 194 195 /* Since we have allocated space to hold a complete frame, 196 * the last indicator should be set. 197 */ 198 if ((sc & BD_ENET_RX_LAST) == 0) 199 dev_warn(fep->dev, "rcv is not +last\n"); 200 201 /* Check for errors. */ 202 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | 203 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { 204 dev->stats.rx_errors++; 205 /* Frame too long or too short. */ 206 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) 207 dev->stats.rx_length_errors++; 208 /* Frame alignment */ 209 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) 210 dev->stats.rx_frame_errors++; 211 /* CRC Error */ 212 if (sc & BD_ENET_RX_CR) 213 dev->stats.rx_crc_errors++; 214 /* FIFO overrun */ 215 if (sc & BD_ENET_RX_OV) 216 dev->stats.rx_crc_errors++; 217 218 skbn = fep->rx_skbuff[curidx]; 219 } else { 220 skb = fep->rx_skbuff[curidx]; 221 222 /* Process the incoming frame */ 223 dev->stats.rx_packets++; 224 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ 225 dev->stats.rx_bytes += pkt_len + 4; 226 227 if (pkt_len <= fpi->rx_copybreak) { 228 /* +2 to make IP header L1 cache aligned */ 229 skbn = netdev_alloc_skb(dev, pkt_len + 2); 230 if (skbn) { 231 skb_reserve(skbn, 2); /* align IP header */ 232 skb_copy_from_linear_data(skb, skbn->data, 233 pkt_len); 234 swap(skb, skbn); 235 dma_sync_single_for_cpu(fep->dev, 236 CBDR_BUFADDR(bdp), 237 L1_CACHE_ALIGN(pkt_len), 238 DMA_FROM_DEVICE); 239 } 240 } else { 241 skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE); 242 243 if (skbn) { 244 dma_addr_t dma; 245 246 skb_align(skbn, ENET_RX_ALIGN); 247 248 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 249 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 250 DMA_FROM_DEVICE); 251 252 dma = dma_map_single(fep->dev, skbn->data, 253 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 254 DMA_FROM_DEVICE); 255 CBDW_BUFADDR(bdp, dma); 256 } 257 } 258 259 if (skbn) { 260 skb_put(skb, pkt_len); /* Make room */ 261 skb->protocol = eth_type_trans(skb, dev); 262 received++; 263 netif_receive_skb(skb); 264 } else { 265 dev->stats.rx_dropped++; 266 skbn = skb; 267 } 268 } 269 270 fep->rx_skbuff[curidx] = skbn; 271 CBDW_DATLEN(bdp, 0); 272 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); 273 274 /* Update BD pointer to next entry */ 275 if ((sc & BD_ENET_RX_WRAP) == 0) 276 bdp++; 277 else 278 bdp = fep->rx_bd_base; 279 280 (*fep->ops->rx_bd_done)(dev); 281 } 282 283 fep->cur_rx = bdp; 284 285 if (received < budget && tx_left) { 286 /* done */ 287 napi_complete_done(napi, received); 288 (*fep->ops->napi_enable)(dev); 289 290 return received; 291 } 292 293 return budget; 294 } 295 296 /* The interrupt handler. 297 * This is called from the MPC core interrupt. 298 */ 299 static irqreturn_t 300 fs_enet_interrupt(int irq, void *dev_id) 301 { 302 struct net_device *dev = dev_id; 303 u32 int_events, int_clr_events; 304 struct fs_enet_private *fep; 305 int nr, napi_ok, handled; 306 307 fep = netdev_priv(dev); 308 309 nr = 0; 310 while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) { 311 nr++; 312 313 int_clr_events = int_events; 314 int_clr_events &= ~fep->ev_napi; 315 316 (*fep->ops->clear_int_events)(dev, int_clr_events); 317 318 if (int_events & fep->ev_err) 319 (*fep->ops->ev_error)(dev, int_events); 320 321 if (int_events & fep->ev) { 322 napi_ok = napi_schedule_prep(&fep->napi); 323 324 (*fep->ops->napi_disable)(dev); 325 (*fep->ops->clear_int_events)(dev, fep->ev_napi); 326 327 /* NOTE: it is possible for FCCs in NAPI mode 328 * to submit a spurious interrupt while in poll 329 */ 330 if (napi_ok) 331 __napi_schedule(&fep->napi); 332 } 333 } 334 335 handled = nr > 0; 336 return IRQ_RETVAL(handled); 337 } 338 339 void fs_init_bds(struct net_device *dev) 340 { 341 struct fs_enet_private *fep = netdev_priv(dev); 342 struct sk_buff *skb; 343 cbd_t __iomem *bdp; 344 int i; 345 346 fs_cleanup_bds(dev); 347 348 fep->dirty_tx = fep->tx_bd_base; 349 fep->cur_tx = fep->tx_bd_base; 350 fep->tx_free = fep->tx_ring; 351 fep->cur_rx = fep->rx_bd_base; 352 353 /* Initialize the receive buffer descriptors */ 354 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { 355 skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE); 356 if (!skb) 357 break; 358 359 skb_align(skb, ENET_RX_ALIGN); 360 fep->rx_skbuff[i] = skb; 361 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skb->data, 362 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 363 DMA_FROM_DEVICE)); 364 CBDW_DATLEN(bdp, 0); /* zero */ 365 CBDW_SC(bdp, BD_ENET_RX_EMPTY | 366 ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); 367 } 368 369 /* if we failed, fillup remainder */ 370 for (; i < fep->rx_ring; i++, bdp++) { 371 fep->rx_skbuff[i] = NULL; 372 CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); 373 } 374 375 /* ...and the same for transmit. */ 376 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { 377 fep->tx_skbuff[i] = NULL; 378 CBDW_BUFADDR(bdp, 0); 379 CBDW_DATLEN(bdp, 0); 380 CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP); 381 } 382 } 383 384 void fs_cleanup_bds(struct net_device *dev) 385 { 386 struct fs_enet_private *fep = netdev_priv(dev); 387 struct sk_buff *skb; 388 cbd_t __iomem *bdp; 389 int i; 390 391 /* Reset SKB transmit buffers. */ 392 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { 393 skb = fep->tx_skbuff[i]; 394 if (!skb) 395 continue; 396 397 /* unmap */ 398 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 399 skb->len, DMA_TO_DEVICE); 400 401 fep->tx_skbuff[i] = NULL; 402 dev_kfree_skb(skb); 403 } 404 405 /* Reset SKB receive buffers */ 406 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { 407 skb = fep->rx_skbuff[i]; 408 if (!skb) 409 continue; 410 411 /* unmap */ 412 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 413 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 414 DMA_FROM_DEVICE); 415 416 fep->rx_skbuff[i] = NULL; 417 418 dev_kfree_skb(skb); 419 } 420 } 421 422 #ifdef CONFIG_FS_ENET_MPC5121_FEC 423 /* MPC5121 FEC requires 4-byte alignment for TX data buffer! */ 424 static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, 425 struct sk_buff *skb) 426 { 427 struct sk_buff *new_skb; 428 429 if (skb_linearize(skb)) 430 return NULL; 431 432 /* Alloc new skb */ 433 new_skb = netdev_alloc_skb(dev, skb->len + 4); 434 if (!new_skb) 435 return NULL; 436 437 /* Make sure new skb is properly aligned */ 438 skb_align(new_skb, 4); 439 440 /* Copy data to new skb ... */ 441 skb_copy_from_linear_data(skb, new_skb->data, skb->len); 442 skb_put(new_skb, skb->len); 443 444 /* ... and free an old one */ 445 dev_kfree_skb_any(skb); 446 447 return new_skb; 448 } 449 #endif 450 451 static netdev_tx_t 452 fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 453 { 454 struct fs_enet_private *fep = netdev_priv(dev); 455 int curidx, nr_frags, len; 456 cbd_t __iomem *bdp; 457 skb_frag_t *frag; 458 u16 sc; 459 #ifdef CONFIG_FS_ENET_MPC5121_FEC 460 int i, is_aligned = 1; 461 462 if (!IS_ALIGNED((unsigned long)skb->data, 4)) { 463 is_aligned = 0; 464 } else { 465 nr_frags = skb_shinfo(skb)->nr_frags; 466 frag = skb_shinfo(skb)->frags; 467 for (i = 0; i < nr_frags; i++, frag++) { 468 if (!IS_ALIGNED(skb_frag_off(frag), 4)) { 469 is_aligned = 0; 470 break; 471 } 472 } 473 } 474 475 if (!is_aligned) { 476 skb = tx_skb_align_workaround(dev, skb); 477 if (!skb) { 478 /* We have lost packet due to memory allocation error 479 * in tx_skb_align_workaround(). Hopefully original 480 * skb is still valid, so try transmit it later. 481 */ 482 return NETDEV_TX_BUSY; 483 } 484 } 485 #endif 486 487 spin_lock(&fep->tx_lock); 488 489 /* Fill in a Tx ring entry */ 490 bdp = fep->cur_tx; 491 492 nr_frags = skb_shinfo(skb)->nr_frags; 493 if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { 494 netif_stop_queue(dev); 495 spin_unlock(&fep->tx_lock); 496 497 /* Ooops. All transmit buffers are full. Bail out. 498 * This should not happen, since the tx queue should be stopped. 499 */ 500 dev_warn(fep->dev, "tx queue full!.\n"); 501 return NETDEV_TX_BUSY; 502 } 503 504 curidx = bdp - fep->tx_bd_base; 505 506 len = skb->len; 507 dev->stats.tx_bytes += len; 508 if (nr_frags) 509 len -= skb->data_len; 510 511 fep->tx_free -= nr_frags + 1; 512 /* Push the data cache so the CPM does not get stale memory data. 513 */ 514 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, 515 skb->data, len, DMA_TO_DEVICE)); 516 CBDW_DATLEN(bdp, len); 517 518 fep->mapped_as_page[curidx] = 0; 519 frag = skb_shinfo(skb)->frags; 520 while (nr_frags) { 521 CBDC_SC(bdp, 522 BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST | 523 BD_ENET_TX_TC); 524 CBDS_SC(bdp, BD_ENET_TX_READY); 525 526 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) { 527 bdp++; 528 curidx++; 529 } else { 530 bdp = fep->tx_bd_base; 531 curidx = 0; 532 } 533 534 len = skb_frag_size(frag); 535 CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len, 536 DMA_TO_DEVICE)); 537 CBDW_DATLEN(bdp, len); 538 539 fep->tx_skbuff[curidx] = NULL; 540 fep->mapped_as_page[curidx] = 1; 541 542 frag++; 543 nr_frags--; 544 } 545 546 /* Trigger transmission start */ 547 sc = BD_ENET_TX_READY | BD_ENET_TX_INTR | 548 BD_ENET_TX_LAST | BD_ENET_TX_TC; 549 550 /* note that while FEC does not have this bit 551 * it marks it as available for software use 552 * yay for hw reuse :) 553 */ 554 if (skb->len <= 60) 555 sc |= BD_ENET_TX_PAD; 556 557 CBDC_SC(bdp, BD_ENET_TX_STATS); 558 CBDS_SC(bdp, sc); 559 560 /* Save skb pointer. */ 561 fep->tx_skbuff[curidx] = skb; 562 563 /* If this was the last BD in the ring, start at the beginning again. */ 564 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) 565 bdp++; 566 else 567 bdp = fep->tx_bd_base; 568 569 fep->cur_tx = bdp; 570 571 if (fep->tx_free < MAX_SKB_FRAGS) 572 netif_stop_queue(dev); 573 574 skb_tx_timestamp(skb); 575 576 (*fep->ops->tx_kickstart)(dev); 577 578 spin_unlock(&fep->tx_lock); 579 580 return NETDEV_TX_OK; 581 } 582 583 static void fs_timeout_work(struct work_struct *work) 584 { 585 struct fs_enet_private *fep = container_of(work, struct fs_enet_private, 586 timeout_work); 587 struct net_device *dev = fep->ndev; 588 unsigned long flags; 589 int wake = 0; 590 591 dev->stats.tx_errors++; 592 593 /* In the event a timeout was detected, but the netdev is brought down 594 * shortly after, it no longer makes sense to try to recover from the 595 * timeout. netif_running() will return false when called from the 596 * .ndo_close() callback. Calling the following recovery code while 597 * called from .ndo_close() could deadlock on rtnl. 598 */ 599 if (!netif_running(dev)) 600 return; 601 602 rtnl_lock(); 603 phylink_stop(fep->phylink); 604 phylink_start(fep->phylink); 605 rtnl_unlock(); 606 607 spin_lock_irqsave(&fep->lock, flags); 608 wake = fep->tx_free >= MAX_SKB_FRAGS && 609 !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); 610 spin_unlock_irqrestore(&fep->lock, flags); 611 612 if (wake) 613 netif_wake_queue(dev); 614 } 615 616 static void fs_timeout(struct net_device *dev, unsigned int txqueue) 617 { 618 struct fs_enet_private *fep = netdev_priv(dev); 619 620 schedule_work(&fep->timeout_work); 621 } 622 623 static void fs_mac_link_up(struct phylink_config *config, 624 struct phy_device *phy, 625 unsigned int mode, phy_interface_t interface, 626 int speed, int duplex, 627 bool tx_pause, bool rx_pause) 628 { 629 struct net_device *ndev = to_net_dev(config->dev); 630 struct fs_enet_private *fep = netdev_priv(ndev); 631 unsigned long flags; 632 633 spin_lock_irqsave(&fep->lock, flags); 634 fep->ops->restart(ndev, interface, speed, duplex); 635 spin_unlock_irqrestore(&fep->lock, flags); 636 } 637 638 static void fs_mac_link_down(struct phylink_config *config, 639 unsigned int mode, phy_interface_t interface) 640 { 641 struct net_device *ndev = to_net_dev(config->dev); 642 struct fs_enet_private *fep = netdev_priv(ndev); 643 unsigned long flags; 644 645 spin_lock_irqsave(&fep->lock, flags); 646 fep->ops->stop(ndev); 647 spin_unlock_irqrestore(&fep->lock, flags); 648 } 649 650 static void fs_mac_config(struct phylink_config *config, unsigned int mode, 651 const struct phylink_link_state *state) 652 { 653 /* Nothing to do */ 654 } 655 656 static int fs_enet_open(struct net_device *dev) 657 { 658 struct fs_enet_private *fep = netdev_priv(dev); 659 int r; 660 int err; 661 662 /* to initialize the fep->cur_rx,... 663 * not doing this, will cause a crash in fs_enet_napi 664 */ 665 fs_init_bds(fep->ndev); 666 667 napi_enable(&fep->napi); 668 669 /* Install our interrupt handler. */ 670 r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED, 671 "fs_enet-mac", dev); 672 if (r != 0) { 673 dev_err(fep->dev, "Could not allocate FS_ENET IRQ!"); 674 napi_disable(&fep->napi); 675 return -EINVAL; 676 } 677 678 err = phylink_of_phy_connect(fep->phylink, fep->dev->of_node, 0); 679 if (err) { 680 free_irq(fep->interrupt, dev); 681 napi_disable(&fep->napi); 682 return err; 683 } 684 phylink_start(fep->phylink); 685 686 netif_start_queue(dev); 687 688 return 0; 689 } 690 691 static int fs_enet_close(struct net_device *dev) 692 { 693 struct fs_enet_private *fep = netdev_priv(dev); 694 unsigned long flags; 695 696 netif_stop_queue(dev); 697 napi_disable(&fep->napi); 698 cancel_work(&fep->timeout_work); 699 phylink_stop(fep->phylink); 700 701 spin_lock_irqsave(&fep->lock, flags); 702 spin_lock(&fep->tx_lock); 703 (*fep->ops->stop)(dev); 704 spin_unlock(&fep->tx_lock); 705 spin_unlock_irqrestore(&fep->lock, flags); 706 phylink_disconnect_phy(fep->phylink); 707 708 /* release any irqs */ 709 free_irq(fep->interrupt, dev); 710 711 return 0; 712 } 713 714 static void fs_get_drvinfo(struct net_device *dev, 715 struct ethtool_drvinfo *info) 716 { 717 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 718 } 719 720 static int fs_get_regs_len(struct net_device *dev) 721 { 722 struct fs_enet_private *fep = netdev_priv(dev); 723 724 return (*fep->ops->get_regs_len)(dev); 725 } 726 727 static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, 728 void *p) 729 { 730 struct fs_enet_private *fep = netdev_priv(dev); 731 unsigned long flags; 732 int r, len; 733 734 len = regs->len; 735 736 spin_lock_irqsave(&fep->lock, flags); 737 r = (*fep->ops->get_regs)(dev, p, &len); 738 spin_unlock_irqrestore(&fep->lock, flags); 739 740 if (r == 0) 741 regs->version = 0; 742 } 743 744 static u32 fs_get_msglevel(struct net_device *dev) 745 { 746 struct fs_enet_private *fep = netdev_priv(dev); 747 748 return fep->msg_enable; 749 } 750 751 static void fs_set_msglevel(struct net_device *dev, u32 value) 752 { 753 struct fs_enet_private *fep = netdev_priv(dev); 754 755 fep->msg_enable = value; 756 } 757 758 static int fs_get_tunable(struct net_device *dev, 759 const struct ethtool_tunable *tuna, void *data) 760 { 761 struct fs_enet_private *fep = netdev_priv(dev); 762 struct fs_platform_info *fpi = fep->fpi; 763 int ret = 0; 764 765 switch (tuna->id) { 766 case ETHTOOL_RX_COPYBREAK: 767 *(u32 *)data = fpi->rx_copybreak; 768 break; 769 default: 770 ret = -EINVAL; 771 break; 772 } 773 774 return ret; 775 } 776 777 static int fs_set_tunable(struct net_device *dev, 778 const struct ethtool_tunable *tuna, const void *data) 779 { 780 struct fs_enet_private *fep = netdev_priv(dev); 781 struct fs_platform_info *fpi = fep->fpi; 782 int ret = 0; 783 784 switch (tuna->id) { 785 case ETHTOOL_RX_COPYBREAK: 786 fpi->rx_copybreak = *(u32 *)data; 787 break; 788 default: 789 ret = -EINVAL; 790 break; 791 } 792 793 return ret; 794 } 795 796 static int fs_ethtool_set_link_ksettings(struct net_device *dev, 797 const struct ethtool_link_ksettings *cmd) 798 { 799 struct fs_enet_private *fep = netdev_priv(dev); 800 801 return phylink_ethtool_ksettings_set(fep->phylink, cmd); 802 } 803 804 static int fs_ethtool_get_link_ksettings(struct net_device *dev, 805 struct ethtool_link_ksettings *cmd) 806 { 807 struct fs_enet_private *fep = netdev_priv(dev); 808 809 return phylink_ethtool_ksettings_get(fep->phylink, cmd); 810 } 811 812 static const struct ethtool_ops fs_ethtool_ops = { 813 .get_drvinfo = fs_get_drvinfo, 814 .get_regs_len = fs_get_regs_len, 815 .nway_reset = phy_ethtool_nway_reset, 816 .get_link = ethtool_op_get_link, 817 .get_msglevel = fs_get_msglevel, 818 .set_msglevel = fs_set_msglevel, 819 .get_regs = fs_get_regs, 820 .get_ts_info = ethtool_op_get_ts_info, 821 .get_link_ksettings = fs_ethtool_get_link_ksettings, 822 .set_link_ksettings = fs_ethtool_set_link_ksettings, 823 .get_tunable = fs_get_tunable, 824 .set_tunable = fs_set_tunable, 825 }; 826 827 #ifdef CONFIG_FS_ENET_HAS_FEC 828 #define IS_FEC(ops) ((ops) == &fs_fec_ops) 829 #else 830 #define IS_FEC(ops) 0 831 #endif 832 833 static const struct net_device_ops fs_enet_netdev_ops = { 834 .ndo_open = fs_enet_open, 835 .ndo_stop = fs_enet_close, 836 .ndo_start_xmit = fs_enet_start_xmit, 837 .ndo_tx_timeout = fs_timeout, 838 .ndo_set_rx_mode = fs_set_multicast_list, 839 .ndo_eth_ioctl = fs_eth_ioctl, 840 .ndo_validate_addr = eth_validate_addr, 841 .ndo_set_mac_address = eth_mac_addr, 842 #ifdef CONFIG_NET_POLL_CONTROLLER 843 .ndo_poll_controller = fs_enet_netpoll, 844 #endif 845 }; 846 847 static const struct phylink_mac_ops fs_enet_phylink_mac_ops = { 848 .mac_config = fs_mac_config, 849 .mac_link_down = fs_mac_link_down, 850 .mac_link_up = fs_mac_link_up, 851 }; 852 853 static int fs_enet_probe(struct platform_device *ofdev) 854 { 855 int privsize, len, ret = -ENODEV; 856 struct fs_platform_info *fpi; 857 struct fs_enet_private *fep; 858 phy_interface_t phy_mode; 859 const struct fs_ops *ops; 860 struct net_device *ndev; 861 struct phylink *phylink; 862 const u32 *data; 863 struct clk *clk; 864 865 ops = device_get_match_data(&ofdev->dev); 866 if (!ops) 867 return -EINVAL; 868 869 fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); 870 if (!fpi) 871 return -ENOMEM; 872 873 if (!IS_FEC(ops)) { 874 data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); 875 if (!data || len != 4) 876 goto out_free_fpi; 877 878 fpi->cp_command = *data; 879 } 880 881 ret = of_get_phy_mode(ofdev->dev.of_node, &phy_mode); 882 if (ret) { 883 /* For compatibility, if the mode isn't specified in DT, 884 * assume MII 885 */ 886 phy_mode = PHY_INTERFACE_MODE_MII; 887 } 888 889 fpi->rx_ring = RX_RING_SIZE; 890 fpi->tx_ring = TX_RING_SIZE; 891 fpi->rx_copybreak = 240; 892 fpi->napi_weight = 17; 893 894 /* make clock lookup non-fatal (the driver is shared among platforms), 895 * but require enable to succeed when a clock was specified/found, 896 * keep a reference to the clock upon successful acquisition 897 */ 898 clk = devm_clk_get_optional_enabled(&ofdev->dev, "per"); 899 if (IS_ERR(clk)) 900 goto out_free_fpi; 901 902 privsize = sizeof(*fep) + 903 sizeof(struct sk_buff **) * 904 (fpi->rx_ring + fpi->tx_ring) + 905 sizeof(char) * fpi->tx_ring; 906 907 ndev = alloc_etherdev(privsize); 908 if (!ndev) { 909 ret = -ENOMEM; 910 goto out_free_fpi; 911 } 912 913 SET_NETDEV_DEV(ndev, &ofdev->dev); 914 platform_set_drvdata(ofdev, ndev); 915 916 fep = netdev_priv(ndev); 917 fep->dev = &ofdev->dev; 918 fep->ndev = ndev; 919 fep->fpi = fpi; 920 fep->ops = ops; 921 922 fep->phylink_config.dev = &ndev->dev; 923 fep->phylink_config.type = PHYLINK_NETDEV; 924 fep->phylink_config.mac_capabilities = MAC_10 | MAC_100; 925 926 __set_bit(PHY_INTERFACE_MODE_MII, 927 fep->phylink_config.supported_interfaces); 928 929 if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) 930 __set_bit(PHY_INTERFACE_MODE_RMII, 931 fep->phylink_config.supported_interfaces); 932 933 phylink = phylink_create(&fep->phylink_config, dev_fwnode(fep->dev), 934 phy_mode, &fs_enet_phylink_mac_ops); 935 if (IS_ERR(phylink)) { 936 ret = PTR_ERR(phylink); 937 goto out_free_dev; 938 } 939 940 fep->phylink = phylink; 941 942 ret = fep->ops->setup_data(ndev); 943 if (ret) 944 goto out_phylink; 945 946 fep->rx_skbuff = (struct sk_buff **)&fep[1]; 947 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; 948 fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring + 949 fpi->tx_ring); 950 951 spin_lock_init(&fep->lock); 952 spin_lock_init(&fep->tx_lock); 953 954 of_get_ethdev_address(ofdev->dev.of_node, ndev); 955 956 ret = fep->ops->allocate_bd(ndev); 957 if (ret) 958 goto out_cleanup_data; 959 960 fep->rx_bd_base = fep->ring_base; 961 fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; 962 963 fep->tx_ring = fpi->tx_ring; 964 fep->rx_ring = fpi->rx_ring; 965 966 ndev->netdev_ops = &fs_enet_netdev_ops; 967 ndev->watchdog_timeo = 2 * HZ; 968 INIT_WORK(&fep->timeout_work, fs_timeout_work); 969 netif_napi_add_weight(ndev, &fep->napi, fs_enet_napi, 970 fpi->napi_weight); 971 972 ndev->ethtool_ops = &fs_ethtool_ops; 973 974 ndev->features |= NETIF_F_SG; 975 976 ret = register_netdev(ndev); 977 if (ret) 978 goto out_free_bd; 979 980 pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr); 981 982 return 0; 983 984 out_free_bd: 985 fep->ops->free_bd(ndev); 986 out_cleanup_data: 987 fep->ops->cleanup_data(ndev); 988 out_phylink: 989 phylink_destroy(fep->phylink); 990 out_free_dev: 991 free_netdev(ndev); 992 out_free_fpi: 993 kfree(fpi); 994 return ret; 995 } 996 997 static void fs_enet_remove(struct platform_device *ofdev) 998 { 999 struct net_device *ndev = platform_get_drvdata(ofdev); 1000 struct fs_enet_private *fep = netdev_priv(ndev); 1001 1002 unregister_netdev(ndev); 1003 1004 fep->ops->free_bd(ndev); 1005 fep->ops->cleanup_data(ndev); 1006 dev_set_drvdata(fep->dev, NULL); 1007 phylink_destroy(fep->phylink); 1008 free_netdev(ndev); 1009 } 1010 1011 static const struct of_device_id fs_enet_match[] = { 1012 #ifdef CONFIG_FS_ENET_HAS_SCC 1013 { 1014 .compatible = "fsl,cpm1-scc-enet", 1015 .data = (void *)&fs_scc_ops, 1016 }, 1017 { 1018 .compatible = "fsl,cpm2-scc-enet", 1019 .data = (void *)&fs_scc_ops, 1020 }, 1021 #endif 1022 #ifdef CONFIG_FS_ENET_HAS_FCC 1023 { 1024 .compatible = "fsl,cpm2-fcc-enet", 1025 .data = (void *)&fs_fcc_ops, 1026 }, 1027 #endif 1028 #ifdef CONFIG_FS_ENET_HAS_FEC 1029 #ifdef CONFIG_FS_ENET_MPC5121_FEC 1030 { 1031 .compatible = "fsl,mpc5121-fec", 1032 .data = (void *)&fs_fec_ops, 1033 }, 1034 { 1035 .compatible = "fsl,mpc5125-fec", 1036 .data = (void *)&fs_fec_ops, 1037 }, 1038 #else 1039 { 1040 .compatible = "fsl,pq1-fec-enet", 1041 .data = (void *)&fs_fec_ops, 1042 }, 1043 #endif 1044 #endif 1045 {} 1046 }; 1047 MODULE_DEVICE_TABLE(of, fs_enet_match); 1048 1049 static struct platform_driver fs_enet_driver = { 1050 .driver = { 1051 .name = "fs_enet", 1052 .of_match_table = fs_enet_match, 1053 }, 1054 .probe = fs_enet_probe, 1055 .remove_new = fs_enet_remove, 1056 }; 1057 1058 #ifdef CONFIG_NET_POLL_CONTROLLER 1059 static void fs_enet_netpoll(struct net_device *dev) 1060 { 1061 disable_irq(dev->irq); 1062 fs_enet_interrupt(dev->irq, dev); 1063 enable_irq(dev->irq); 1064 } 1065 #endif 1066 1067 module_platform_driver(fs_enet_driver); 1068