1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for BCM963xx builtin Ethernet mac 4 * 5 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> 6 */ 7 #include <linux/init.h> 8 #include <linux/interrupt.h> 9 #include <linux/module.h> 10 #include <linux/clk.h> 11 #include <linux/etherdevice.h> 12 #include <linux/slab.h> 13 #include <linux/delay.h> 14 #include <linux/ethtool.h> 15 #include <linux/crc32.h> 16 #include <linux/err.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/platform_device.h> 19 #include <linux/if_vlan.h> 20 21 #include <bcm63xx_dev_enet.h> 22 #include "bcm63xx_enet.h" 23 24 static char bcm_enet_driver_name[] = "bcm63xx_enet"; 25 26 static int copybreak __read_mostly = 128; 27 module_param(copybreak, int, 0); 28 MODULE_PARM_DESC(copybreak, "Receive copy threshold"); 29 30 /* io registers memory shared between all devices */ 31 static void __iomem *bcm_enet_shared_base[3]; 32 33 /* 34 * io helpers to access mac registers 35 */ 36 static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off) 37 { 38 return bcm_readl(priv->base + off); 39 } 40 41 static inline void enet_writel(struct bcm_enet_priv *priv, 42 u32 val, u32 off) 43 { 44 bcm_writel(val, priv->base + off); 45 } 46 47 /* 48 * io helpers to access switch registers 49 */ 50 static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off) 51 { 52 return bcm_readl(priv->base + off); 53 } 54 55 static inline void enetsw_writel(struct bcm_enet_priv *priv, 56 u32 val, u32 off) 57 { 58 bcm_writel(val, priv->base + off); 59 } 60 61 static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off) 62 { 63 return bcm_readw(priv->base + off); 64 } 65 66 static inline void enetsw_writew(struct bcm_enet_priv *priv, 67 u16 val, u32 off) 68 { 69 bcm_writew(val, priv->base + off); 70 } 71 72 static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off) 73 { 74 return bcm_readb(priv->base + off); 75 } 76 77 static inline void enetsw_writeb(struct bcm_enet_priv *priv, 78 u8 val, u32 off) 79 { 80 bcm_writeb(val, priv->base + off); 81 } 82 83 84 /* io helpers to access shared registers */ 85 static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) 86 { 87 return bcm_readl(bcm_enet_shared_base[0] + off); 88 } 89 90 static inline void enet_dma_writel(struct bcm_enet_priv *priv, 91 u32 val, u32 off) 92 { 93 bcm_writel(val, bcm_enet_shared_base[0] + off); 94 } 95 96 static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan) 97 { 98 return bcm_readl(bcm_enet_shared_base[1] + 99 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); 100 } 101 102 static inline void enet_dmac_writel(struct bcm_enet_priv *priv, 103 u32 val, u32 off, int chan) 104 { 105 bcm_writel(val, bcm_enet_shared_base[1] + 106 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width); 107 } 108 109 static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan) 110 { 111 return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); 112 } 113 114 static inline void enet_dmas_writel(struct bcm_enet_priv *priv, 115 u32 val, u32 off, int chan) 116 { 117 bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width); 118 } 119 120 /* 121 * write given data into mii register and wait for transfer to end 122 * with timeout (average measured transfer time is 25us) 123 */ 124 static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data) 125 { 126 int limit; 127 128 /* make sure mii interrupt status is cleared */ 129 enet_writel(priv, ENET_IR_MII, ENET_IR_REG); 130 131 enet_writel(priv, data, ENET_MIIDATA_REG); 132 wmb(); 133 134 /* busy wait on mii interrupt bit, with timeout */ 135 limit = 1000; 136 do { 137 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) 138 break; 139 udelay(1); 140 } while (limit-- > 0); 141 142 return (limit < 0) ? 1 : 0; 143 } 144 145 /* 146 * MII internal read callback 147 */ 148 static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id, 149 int regnum) 150 { 151 u32 tmp, val; 152 153 tmp = regnum << ENET_MIIDATA_REG_SHIFT; 154 tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; 155 tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; 156 tmp |= ENET_MIIDATA_OP_READ_MASK; 157 158 if (do_mdio_op(priv, tmp)) 159 return -1; 160 161 val = enet_readl(priv, ENET_MIIDATA_REG); 162 val &= 0xffff; 163 return val; 164 } 165 166 /* 167 * MII internal write callback 168 */ 169 static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id, 170 int regnum, u16 value) 171 { 172 u32 tmp; 173 174 tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT; 175 tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; 176 tmp |= regnum << ENET_MIIDATA_REG_SHIFT; 177 tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; 178 tmp |= ENET_MIIDATA_OP_WRITE_MASK; 179 180 (void)do_mdio_op(priv, tmp); 181 return 0; 182 } 183 184 /* 185 * MII read callback from phylib 186 */ 187 static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id, 188 int regnum) 189 { 190 return bcm_enet_mdio_read(bus->priv, mii_id, regnum); 191 } 192 193 /* 194 * MII write callback from phylib 195 */ 196 static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id, 197 int regnum, u16 value) 198 { 199 return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value); 200 } 201 202 /* 203 * MII read callback from mii core 204 */ 205 static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id, 206 int regnum) 207 { 208 return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum); 209 } 210 211 /* 212 * MII write callback from mii core 213 */ 214 static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id, 215 int regnum, int value) 216 { 217 bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value); 218 } 219 220 /* 221 * refill rx queue 222 */ 223 static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode) 224 { 225 struct bcm_enet_priv *priv; 226 227 priv = netdev_priv(dev); 228 229 while (priv->rx_desc_count < priv->rx_ring_size) { 230 struct bcm_enet_desc *desc; 231 int desc_idx; 232 u32 len_stat; 233 234 desc_idx = priv->rx_dirty_desc; 235 desc = &priv->rx_desc_cpu[desc_idx]; 236 237 if (!priv->rx_buf[desc_idx]) { 238 void *buf; 239 240 if (likely(napi_mode)) 241 buf = napi_alloc_frag(priv->rx_frag_size); 242 else 243 buf = netdev_alloc_frag(priv->rx_frag_size); 244 if (unlikely(!buf)) 245 break; 246 priv->rx_buf[desc_idx] = buf; 247 desc->address = dma_map_single(&priv->pdev->dev, 248 buf + priv->rx_buf_offset, 249 priv->rx_buf_size, 250 DMA_FROM_DEVICE); 251 } 252 253 len_stat = priv->rx_buf_size << DMADESC_LENGTH_SHIFT; 254 len_stat |= DMADESC_OWNER_MASK; 255 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { 256 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); 257 priv->rx_dirty_desc = 0; 258 } else { 259 priv->rx_dirty_desc++; 260 } 261 wmb(); 262 desc->len_stat = len_stat; 263 264 priv->rx_desc_count++; 265 266 /* tell dma engine we allocated one buffer */ 267 if (priv->dma_has_sram) 268 enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); 269 else 270 enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan); 271 } 272 273 /* If rx ring is still empty, set a timer to try allocating 274 * again at a later time. */ 275 if (priv->rx_desc_count == 0 && netif_running(dev)) { 276 dev_warn(&priv->pdev->dev, "unable to refill rx ring\n"); 277 priv->rx_timeout.expires = jiffies + HZ; 278 add_timer(&priv->rx_timeout); 279 } 280 281 return 0; 282 } 283 284 /* 285 * timer callback to defer refill rx queue in case we're OOM 286 */ 287 static void bcm_enet_refill_rx_timer(struct timer_list *t) 288 { 289 struct bcm_enet_priv *priv = timer_container_of(priv, t, rx_timeout); 290 struct net_device *dev = priv->net_dev; 291 292 spin_lock(&priv->rx_lock); 293 bcm_enet_refill_rx(dev, false); 294 spin_unlock(&priv->rx_lock); 295 } 296 297 /* 298 * extract packet from rx queue 299 */ 300 static int bcm_enet_receive_queue(struct net_device *dev, int budget) 301 { 302 struct bcm_enet_priv *priv; 303 struct list_head rx_list; 304 struct device *kdev; 305 int processed; 306 307 priv = netdev_priv(dev); 308 INIT_LIST_HEAD(&rx_list); 309 kdev = &priv->pdev->dev; 310 processed = 0; 311 312 /* don't scan ring further than number of refilled 313 * descriptor */ 314 if (budget > priv->rx_desc_count) 315 budget = priv->rx_desc_count; 316 317 do { 318 struct bcm_enet_desc *desc; 319 struct sk_buff *skb; 320 int desc_idx; 321 u32 len_stat; 322 unsigned int len; 323 void *buf; 324 325 desc_idx = priv->rx_curr_desc; 326 desc = &priv->rx_desc_cpu[desc_idx]; 327 328 /* make sure we actually read the descriptor status at 329 * each loop */ 330 rmb(); 331 332 len_stat = desc->len_stat; 333 334 /* break if dma ownership belongs to hw */ 335 if (len_stat & DMADESC_OWNER_MASK) 336 break; 337 338 processed++; 339 priv->rx_curr_desc++; 340 if (priv->rx_curr_desc == priv->rx_ring_size) 341 priv->rx_curr_desc = 0; 342 343 /* if the packet does not have start of packet _and_ 344 * end of packet flag set, then just recycle it */ 345 if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) != 346 (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) { 347 dev->stats.rx_dropped++; 348 continue; 349 } 350 351 /* recycle packet if it's marked as bad */ 352 if (!priv->enet_is_sw && 353 unlikely(len_stat & DMADESC_ERR_MASK)) { 354 dev->stats.rx_errors++; 355 356 if (len_stat & DMADESC_OVSIZE_MASK) 357 dev->stats.rx_length_errors++; 358 if (len_stat & DMADESC_CRC_MASK) 359 dev->stats.rx_crc_errors++; 360 if (len_stat & DMADESC_UNDER_MASK) 361 dev->stats.rx_frame_errors++; 362 if (len_stat & DMADESC_OV_MASK) 363 dev->stats.rx_fifo_errors++; 364 continue; 365 } 366 367 /* valid packet */ 368 buf = priv->rx_buf[desc_idx]; 369 len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; 370 /* don't include FCS */ 371 len -= 4; 372 373 if (len < copybreak) { 374 skb = napi_alloc_skb(&priv->napi, len); 375 if (unlikely(!skb)) { 376 /* forget packet, just rearm desc */ 377 dev->stats.rx_dropped++; 378 continue; 379 } 380 381 dma_sync_single_for_cpu(kdev, desc->address, 382 len, DMA_FROM_DEVICE); 383 memcpy(skb->data, buf + priv->rx_buf_offset, len); 384 dma_sync_single_for_device(kdev, desc->address, 385 len, DMA_FROM_DEVICE); 386 } else { 387 dma_unmap_single(kdev, desc->address, 388 priv->rx_buf_size, DMA_FROM_DEVICE); 389 priv->rx_buf[desc_idx] = NULL; 390 391 skb = napi_build_skb(buf, priv->rx_frag_size); 392 if (unlikely(!skb)) { 393 skb_free_frag(buf); 394 dev->stats.rx_dropped++; 395 continue; 396 } 397 skb_reserve(skb, priv->rx_buf_offset); 398 } 399 400 skb_put(skb, len); 401 skb->protocol = eth_type_trans(skb, dev); 402 dev->stats.rx_packets++; 403 dev->stats.rx_bytes += len; 404 list_add_tail(&skb->list, &rx_list); 405 406 } while (processed < budget); 407 408 netif_receive_skb_list(&rx_list); 409 priv->rx_desc_count -= processed; 410 411 if (processed || !priv->rx_desc_count) { 412 bcm_enet_refill_rx(dev, true); 413 414 /* kick rx dma */ 415 enet_dmac_writel(priv, priv->dma_chan_en_mask, 416 ENETDMAC_CHANCFG, priv->rx_chan); 417 } 418 419 return processed; 420 } 421 422 423 /* 424 * try to or force reclaim of transmitted buffers 425 */ 426 static int bcm_enet_tx_reclaim(struct net_device *dev, int force, int budget) 427 { 428 struct bcm_enet_priv *priv; 429 unsigned int bytes; 430 int released; 431 432 priv = netdev_priv(dev); 433 bytes = 0; 434 released = 0; 435 436 while (priv->tx_desc_count < priv->tx_ring_size) { 437 struct bcm_enet_desc *desc; 438 struct sk_buff *skb; 439 440 /* We run in a bh and fight against start_xmit, which 441 * is called with bh disabled */ 442 spin_lock(&priv->tx_lock); 443 444 desc = &priv->tx_desc_cpu[priv->tx_dirty_desc]; 445 446 if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) { 447 spin_unlock(&priv->tx_lock); 448 break; 449 } 450 451 /* ensure other field of the descriptor were not read 452 * before we checked ownership */ 453 rmb(); 454 455 skb = priv->tx_skb[priv->tx_dirty_desc]; 456 priv->tx_skb[priv->tx_dirty_desc] = NULL; 457 dma_unmap_single(&priv->pdev->dev, desc->address, skb->len, 458 DMA_TO_DEVICE); 459 460 priv->tx_dirty_desc++; 461 if (priv->tx_dirty_desc == priv->tx_ring_size) 462 priv->tx_dirty_desc = 0; 463 priv->tx_desc_count++; 464 465 spin_unlock(&priv->tx_lock); 466 467 if (desc->len_stat & DMADESC_UNDER_MASK) 468 dev->stats.tx_errors++; 469 470 bytes += skb->len; 471 napi_consume_skb(skb, budget); 472 released++; 473 } 474 475 netdev_completed_queue(dev, released, bytes); 476 477 if (netif_queue_stopped(dev) && released) 478 netif_wake_queue(dev); 479 480 return released; 481 } 482 483 /* 484 * poll func, called by network core 485 */ 486 static int bcm_enet_poll(struct napi_struct *napi, int budget) 487 { 488 struct bcm_enet_priv *priv; 489 struct net_device *dev; 490 int rx_work_done; 491 492 priv = container_of(napi, struct bcm_enet_priv, napi); 493 dev = priv->net_dev; 494 495 /* ack interrupts */ 496 enet_dmac_writel(priv, priv->dma_chan_int_mask, 497 ENETDMAC_IR, priv->rx_chan); 498 enet_dmac_writel(priv, priv->dma_chan_int_mask, 499 ENETDMAC_IR, priv->tx_chan); 500 501 /* reclaim sent skb */ 502 bcm_enet_tx_reclaim(dev, 0, budget); 503 504 spin_lock(&priv->rx_lock); 505 rx_work_done = bcm_enet_receive_queue(dev, budget); 506 spin_unlock(&priv->rx_lock); 507 508 if (rx_work_done >= budget) { 509 /* rx queue is not yet empty/clean */ 510 return rx_work_done; 511 } 512 513 /* no more packet in rx/tx queue, remove device from poll 514 * queue */ 515 napi_complete_done(napi, rx_work_done); 516 517 /* restore rx/tx interrupt */ 518 enet_dmac_writel(priv, priv->dma_chan_int_mask, 519 ENETDMAC_IRMASK, priv->rx_chan); 520 enet_dmac_writel(priv, priv->dma_chan_int_mask, 521 ENETDMAC_IRMASK, priv->tx_chan); 522 523 return rx_work_done; 524 } 525 526 /* 527 * mac interrupt handler 528 */ 529 static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id) 530 { 531 struct net_device *dev; 532 struct bcm_enet_priv *priv; 533 u32 stat; 534 535 dev = dev_id; 536 priv = netdev_priv(dev); 537 538 stat = enet_readl(priv, ENET_IR_REG); 539 if (!(stat & ENET_IR_MIB)) 540 return IRQ_NONE; 541 542 /* clear & mask interrupt */ 543 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); 544 enet_writel(priv, 0, ENET_IRMASK_REG); 545 546 /* read mib registers in workqueue */ 547 schedule_work(&priv->mib_update_task); 548 549 return IRQ_HANDLED; 550 } 551 552 /* 553 * rx/tx dma interrupt handler 554 */ 555 static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) 556 { 557 struct net_device *dev; 558 struct bcm_enet_priv *priv; 559 560 dev = dev_id; 561 priv = netdev_priv(dev); 562 563 /* mask rx/tx interrupts */ 564 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 565 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 566 567 napi_schedule(&priv->napi); 568 569 return IRQ_HANDLED; 570 } 571 572 /* 573 * tx request callback 574 */ 575 static netdev_tx_t 576 bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 577 { 578 struct bcm_enet_priv *priv; 579 struct bcm_enet_desc *desc; 580 u32 len_stat; 581 netdev_tx_t ret; 582 583 priv = netdev_priv(dev); 584 585 /* lock against tx reclaim */ 586 spin_lock(&priv->tx_lock); 587 588 /* make sure the tx hw queue is not full, should not happen 589 * since we stop queue before it's the case */ 590 if (unlikely(!priv->tx_desc_count)) { 591 netif_stop_queue(dev); 592 dev_err(&priv->pdev->dev, "xmit called with no tx desc " 593 "available?\n"); 594 ret = NETDEV_TX_BUSY; 595 goto out_unlock; 596 } 597 598 /* pad small packets sent on a switch device */ 599 if (priv->enet_is_sw && skb->len < 64) { 600 int needed = 64 - skb->len; 601 char *data; 602 603 if (unlikely(skb_tailroom(skb) < needed)) { 604 struct sk_buff *nskb; 605 606 nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC); 607 if (!nskb) { 608 ret = NETDEV_TX_BUSY; 609 goto out_unlock; 610 } 611 dev_kfree_skb(skb); 612 skb = nskb; 613 } 614 data = skb_put_zero(skb, needed); 615 } 616 617 /* point to the next available desc */ 618 desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; 619 priv->tx_skb[priv->tx_curr_desc] = skb; 620 621 /* fill descriptor */ 622 desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len, 623 DMA_TO_DEVICE); 624 625 len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; 626 len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) | 627 DMADESC_APPEND_CRC | 628 DMADESC_OWNER_MASK; 629 630 priv->tx_curr_desc++; 631 if (priv->tx_curr_desc == priv->tx_ring_size) { 632 priv->tx_curr_desc = 0; 633 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift); 634 } 635 priv->tx_desc_count--; 636 637 /* dma might be already polling, make sure we update desc 638 * fields in correct order */ 639 wmb(); 640 desc->len_stat = len_stat; 641 wmb(); 642 643 netdev_sent_queue(dev, skb->len); 644 645 /* kick tx dma */ 646 if (!netdev_xmit_more() || !priv->tx_desc_count) 647 enet_dmac_writel(priv, priv->dma_chan_en_mask, 648 ENETDMAC_CHANCFG, priv->tx_chan); 649 650 /* stop queue if no more desc available */ 651 if (!priv->tx_desc_count) 652 netif_stop_queue(dev); 653 654 dev->stats.tx_bytes += skb->len; 655 dev->stats.tx_packets++; 656 ret = NETDEV_TX_OK; 657 658 out_unlock: 659 spin_unlock(&priv->tx_lock); 660 return ret; 661 } 662 663 /* 664 * Change the interface's mac address. 665 */ 666 static int bcm_enet_set_mac_address(struct net_device *dev, void *p) 667 { 668 struct bcm_enet_priv *priv; 669 struct sockaddr *addr = p; 670 u32 val; 671 672 priv = netdev_priv(dev); 673 eth_hw_addr_set(dev, addr->sa_data); 674 675 /* use perfect match register 0 to store my mac address */ 676 val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | 677 (dev->dev_addr[4] << 8) | dev->dev_addr[5]; 678 enet_writel(priv, val, ENET_PML_REG(0)); 679 680 val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]); 681 val |= ENET_PMH_DATAVALID_MASK; 682 enet_writel(priv, val, ENET_PMH_REG(0)); 683 684 return 0; 685 } 686 687 /* 688 * Change rx mode (promiscuous/allmulti) and update multicast list 689 */ 690 static void bcm_enet_set_multicast_list(struct net_device *dev) 691 { 692 struct bcm_enet_priv *priv; 693 struct netdev_hw_addr *ha; 694 u32 val; 695 int i; 696 697 priv = netdev_priv(dev); 698 699 val = enet_readl(priv, ENET_RXCFG_REG); 700 701 if (dev->flags & IFF_PROMISC) 702 val |= ENET_RXCFG_PROMISC_MASK; 703 else 704 val &= ~ENET_RXCFG_PROMISC_MASK; 705 706 /* only 3 perfect match registers left, first one is used for 707 * own mac address */ 708 if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3) 709 val |= ENET_RXCFG_ALLMCAST_MASK; 710 else 711 val &= ~ENET_RXCFG_ALLMCAST_MASK; 712 713 /* no need to set perfect match registers if we catch all 714 * multicast */ 715 if (val & ENET_RXCFG_ALLMCAST_MASK) { 716 enet_writel(priv, val, ENET_RXCFG_REG); 717 return; 718 } 719 720 i = 0; 721 netdev_for_each_mc_addr(ha, dev) { 722 u8 *dmi_addr; 723 u32 tmp; 724 725 if (i == 3) 726 break; 727 /* update perfect match registers */ 728 dmi_addr = ha->addr; 729 tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | 730 (dmi_addr[4] << 8) | dmi_addr[5]; 731 enet_writel(priv, tmp, ENET_PML_REG(i + 1)); 732 733 tmp = (dmi_addr[0] << 8 | dmi_addr[1]); 734 tmp |= ENET_PMH_DATAVALID_MASK; 735 enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1)); 736 } 737 738 for (; i < 3; i++) { 739 enet_writel(priv, 0, ENET_PML_REG(i + 1)); 740 enet_writel(priv, 0, ENET_PMH_REG(i + 1)); 741 } 742 743 enet_writel(priv, val, ENET_RXCFG_REG); 744 } 745 746 /* 747 * set mac duplex parameters 748 */ 749 static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex) 750 { 751 u32 val; 752 753 val = enet_readl(priv, ENET_TXCTL_REG); 754 if (fullduplex) 755 val |= ENET_TXCTL_FD_MASK; 756 else 757 val &= ~ENET_TXCTL_FD_MASK; 758 enet_writel(priv, val, ENET_TXCTL_REG); 759 } 760 761 /* 762 * set mac flow control parameters 763 */ 764 static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) 765 { 766 u32 val; 767 768 /* rx flow control (pause frame handling) */ 769 val = enet_readl(priv, ENET_RXCFG_REG); 770 if (rx_en) 771 val |= ENET_RXCFG_ENFLOW_MASK; 772 else 773 val &= ~ENET_RXCFG_ENFLOW_MASK; 774 enet_writel(priv, val, ENET_RXCFG_REG); 775 776 if (!priv->dma_has_sram) 777 return; 778 779 /* tx flow control (pause frame generation) */ 780 val = enet_dma_readl(priv, ENETDMA_CFG_REG); 781 if (tx_en) 782 val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); 783 else 784 val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); 785 enet_dma_writel(priv, val, ENETDMA_CFG_REG); 786 } 787 788 /* 789 * link changed callback (from phylib) 790 */ 791 static void bcm_enet_adjust_phy_link(struct net_device *dev) 792 { 793 struct bcm_enet_priv *priv; 794 struct phy_device *phydev; 795 int status_changed; 796 797 priv = netdev_priv(dev); 798 phydev = dev->phydev; 799 status_changed = 0; 800 801 if (priv->old_link != phydev->link) { 802 status_changed = 1; 803 priv->old_link = phydev->link; 804 } 805 806 /* reflect duplex change in mac configuration */ 807 if (phydev->link && phydev->duplex != priv->old_duplex) { 808 bcm_enet_set_duplex(priv, 809 (phydev->duplex == DUPLEX_FULL) ? 1 : 0); 810 status_changed = 1; 811 priv->old_duplex = phydev->duplex; 812 } 813 814 /* enable flow control if remote advertise it (trust phylib to 815 * check that duplex is full */ 816 if (phydev->link && phydev->pause != priv->old_pause) { 817 int rx_pause_en, tx_pause_en; 818 819 if (phydev->pause) { 820 /* pause was advertised by lpa and us */ 821 rx_pause_en = 1; 822 tx_pause_en = 1; 823 } else if (!priv->pause_auto) { 824 /* pause setting overridden by user */ 825 rx_pause_en = priv->pause_rx; 826 tx_pause_en = priv->pause_tx; 827 } else { 828 rx_pause_en = 0; 829 tx_pause_en = 0; 830 } 831 832 bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en); 833 status_changed = 1; 834 priv->old_pause = phydev->pause; 835 } 836 837 if (status_changed) { 838 pr_info("%s: link %s", dev->name, phydev->link ? 839 "UP" : "DOWN"); 840 if (phydev->link) 841 pr_cont(" - %d/%s - flow control %s", phydev->speed, 842 DUPLEX_FULL == phydev->duplex ? "full" : "half", 843 phydev->pause == 1 ? "rx&tx" : "off"); 844 845 pr_cont("\n"); 846 } 847 } 848 849 /* 850 * link changed callback (if phylib is not used) 851 */ 852 static void bcm_enet_adjust_link(struct net_device *dev) 853 { 854 struct bcm_enet_priv *priv; 855 856 priv = netdev_priv(dev); 857 bcm_enet_set_duplex(priv, priv->force_duplex_full); 858 bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx); 859 netif_carrier_on(dev); 860 861 pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n", 862 dev->name, 863 priv->force_speed_100 ? 100 : 10, 864 priv->force_duplex_full ? "full" : "half", 865 priv->pause_rx ? "rx" : "off", 866 priv->pause_tx ? "tx" : "off"); 867 } 868 869 static void bcm_enet_free_rx_buf_ring(struct device *kdev, struct bcm_enet_priv *priv) 870 { 871 int i; 872 873 for (i = 0; i < priv->rx_ring_size; i++) { 874 struct bcm_enet_desc *desc; 875 876 if (!priv->rx_buf[i]) 877 continue; 878 879 desc = &priv->rx_desc_cpu[i]; 880 dma_unmap_single(kdev, desc->address, priv->rx_buf_size, 881 DMA_FROM_DEVICE); 882 skb_free_frag(priv->rx_buf[i]); 883 } 884 kfree(priv->rx_buf); 885 } 886 887 /* 888 * open callback, allocate dma rings & buffers and start rx operation 889 */ 890 static int bcm_enet_open(struct net_device *dev) 891 { 892 struct bcm_enet_priv *priv; 893 struct sockaddr addr; 894 struct device *kdev; 895 struct phy_device *phydev; 896 int i, ret; 897 unsigned int size; 898 char phy_id[MII_BUS_ID_SIZE + 3]; 899 void *p; 900 u32 val; 901 902 priv = netdev_priv(dev); 903 kdev = &priv->pdev->dev; 904 905 if (priv->has_phy) { 906 /* connect to PHY */ 907 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 908 priv->mii_bus->id, priv->phy_id); 909 910 phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 911 PHY_INTERFACE_MODE_MII); 912 913 if (IS_ERR(phydev)) { 914 dev_err(kdev, "could not attach to PHY\n"); 915 return PTR_ERR(phydev); 916 } 917 918 /* mask with MAC supported features */ 919 phy_support_sym_pause(phydev); 920 phy_set_max_speed(phydev, SPEED_100); 921 phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx, 922 priv->pause_auto); 923 924 phy_attached_info(phydev); 925 926 priv->old_link = 0; 927 priv->old_duplex = -1; 928 priv->old_pause = -1; 929 } else { 930 phydev = NULL; 931 } 932 933 /* mask all interrupts and request them */ 934 enet_writel(priv, 0, ENET_IRMASK_REG); 935 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 936 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 937 938 ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); 939 if (ret) 940 goto out_phy_disconnect; 941 942 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0, 943 dev->name, dev); 944 if (ret) 945 goto out_freeirq; 946 947 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, 948 0, dev->name, dev); 949 if (ret) 950 goto out_freeirq_rx; 951 952 /* initialize perfect match registers */ 953 for (i = 0; i < 4; i++) { 954 enet_writel(priv, 0, ENET_PML_REG(i)); 955 enet_writel(priv, 0, ENET_PMH_REG(i)); 956 } 957 958 /* write device mac address */ 959 memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN); 960 bcm_enet_set_mac_address(dev, &addr); 961 962 /* allocate rx dma ring */ 963 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 964 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 965 if (!p) { 966 ret = -ENOMEM; 967 goto out_freeirq_tx; 968 } 969 970 priv->rx_desc_alloc_size = size; 971 priv->rx_desc_cpu = p; 972 973 /* allocate tx dma ring */ 974 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 975 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 976 if (!p) { 977 ret = -ENOMEM; 978 goto out_free_rx_ring; 979 } 980 981 priv->tx_desc_alloc_size = size; 982 priv->tx_desc_cpu = p; 983 984 priv->tx_skb = kzalloc_objs(struct sk_buff *, priv->tx_ring_size); 985 if (!priv->tx_skb) { 986 ret = -ENOMEM; 987 goto out_free_tx_ring; 988 } 989 990 priv->tx_desc_count = priv->tx_ring_size; 991 priv->tx_dirty_desc = 0; 992 priv->tx_curr_desc = 0; 993 spin_lock_init(&priv->tx_lock); 994 995 /* init & fill rx ring with buffers */ 996 priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *), 997 GFP_KERNEL); 998 if (!priv->rx_buf) { 999 ret = -ENOMEM; 1000 goto out_free_tx_skb; 1001 } 1002 1003 priv->rx_desc_count = 0; 1004 priv->rx_dirty_desc = 0; 1005 priv->rx_curr_desc = 0; 1006 1007 /* initialize flow control buffer allocation */ 1008 if (priv->dma_has_sram) 1009 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, 1010 ENETDMA_BUFALLOC_REG(priv->rx_chan)); 1011 else 1012 enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, 1013 ENETDMAC_BUFALLOC, priv->rx_chan); 1014 1015 if (bcm_enet_refill_rx(dev, false)) { 1016 dev_err(kdev, "cannot allocate rx buffer queue\n"); 1017 ret = -ENOMEM; 1018 goto out; 1019 } 1020 1021 /* write rx & tx ring addresses */ 1022 if (priv->dma_has_sram) { 1023 enet_dmas_writel(priv, priv->rx_desc_dma, 1024 ENETDMAS_RSTART_REG, priv->rx_chan); 1025 enet_dmas_writel(priv, priv->tx_desc_dma, 1026 ENETDMAS_RSTART_REG, priv->tx_chan); 1027 } else { 1028 enet_dmac_writel(priv, priv->rx_desc_dma, 1029 ENETDMAC_RSTART, priv->rx_chan); 1030 enet_dmac_writel(priv, priv->tx_desc_dma, 1031 ENETDMAC_RSTART, priv->tx_chan); 1032 } 1033 1034 /* clear remaining state ram for rx & tx channel */ 1035 if (priv->dma_has_sram) { 1036 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); 1037 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); 1038 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); 1039 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); 1040 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); 1041 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); 1042 } else { 1043 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan); 1044 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan); 1045 } 1046 1047 /* set max rx/tx length */ 1048 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); 1049 enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); 1050 1051 /* set dma maximum burst len */ 1052 enet_dmac_writel(priv, priv->dma_maxburst, 1053 ENETDMAC_MAXBURST, priv->rx_chan); 1054 enet_dmac_writel(priv, priv->dma_maxburst, 1055 ENETDMAC_MAXBURST, priv->tx_chan); 1056 1057 /* set correct transmit fifo watermark */ 1058 enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); 1059 1060 /* set flow control low/high threshold to 1/3 / 2/3 */ 1061 if (priv->dma_has_sram) { 1062 val = priv->rx_ring_size / 3; 1063 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); 1064 val = (priv->rx_ring_size * 2) / 3; 1065 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); 1066 } else { 1067 enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan); 1068 enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan); 1069 enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan); 1070 } 1071 1072 /* all set, enable mac and interrupts, start dma engine and 1073 * kick rx dma channel */ 1074 wmb(); 1075 val = enet_readl(priv, ENET_CTL_REG); 1076 val |= ENET_CTL_ENABLE_MASK; 1077 enet_writel(priv, val, ENET_CTL_REG); 1078 if (priv->dma_has_sram) 1079 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); 1080 enet_dmac_writel(priv, priv->dma_chan_en_mask, 1081 ENETDMAC_CHANCFG, priv->rx_chan); 1082 1083 /* watch "mib counters about to overflow" interrupt */ 1084 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); 1085 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); 1086 1087 /* watch "packet transferred" interrupt in rx and tx */ 1088 enet_dmac_writel(priv, priv->dma_chan_int_mask, 1089 ENETDMAC_IR, priv->rx_chan); 1090 enet_dmac_writel(priv, priv->dma_chan_int_mask, 1091 ENETDMAC_IR, priv->tx_chan); 1092 1093 /* make sure we enable napi before rx interrupt */ 1094 napi_enable(&priv->napi); 1095 1096 enet_dmac_writel(priv, priv->dma_chan_int_mask, 1097 ENETDMAC_IRMASK, priv->rx_chan); 1098 enet_dmac_writel(priv, priv->dma_chan_int_mask, 1099 ENETDMAC_IRMASK, priv->tx_chan); 1100 1101 if (phydev) 1102 phy_start(phydev); 1103 else 1104 bcm_enet_adjust_link(dev); 1105 1106 netif_start_queue(dev); 1107 return 0; 1108 1109 out: 1110 bcm_enet_free_rx_buf_ring(kdev, priv); 1111 1112 out_free_tx_skb: 1113 kfree(priv->tx_skb); 1114 1115 out_free_tx_ring: 1116 dma_free_coherent(kdev, priv->tx_desc_alloc_size, 1117 priv->tx_desc_cpu, priv->tx_desc_dma); 1118 1119 out_free_rx_ring: 1120 dma_free_coherent(kdev, priv->rx_desc_alloc_size, 1121 priv->rx_desc_cpu, priv->rx_desc_dma); 1122 1123 out_freeirq_tx: 1124 free_irq(priv->irq_tx, dev); 1125 1126 out_freeirq_rx: 1127 free_irq(priv->irq_rx, dev); 1128 1129 out_freeirq: 1130 free_irq(dev->irq, dev); 1131 1132 out_phy_disconnect: 1133 if (phydev) 1134 phy_disconnect(phydev); 1135 1136 return ret; 1137 } 1138 1139 /* 1140 * disable mac 1141 */ 1142 static void bcm_enet_disable_mac(struct bcm_enet_priv *priv) 1143 { 1144 int limit; 1145 u32 val; 1146 1147 val = enet_readl(priv, ENET_CTL_REG); 1148 val |= ENET_CTL_DISABLE_MASK; 1149 enet_writel(priv, val, ENET_CTL_REG); 1150 1151 limit = 1000; 1152 do { 1153 u32 val; 1154 1155 val = enet_readl(priv, ENET_CTL_REG); 1156 if (!(val & ENET_CTL_DISABLE_MASK)) 1157 break; 1158 udelay(1); 1159 } while (limit--); 1160 } 1161 1162 /* 1163 * disable dma in given channel 1164 */ 1165 static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan) 1166 { 1167 int limit; 1168 1169 enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan); 1170 1171 limit = 1000; 1172 do { 1173 u32 val; 1174 1175 val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan); 1176 if (!(val & ENETDMAC_CHANCFG_EN_MASK)) 1177 break; 1178 udelay(1); 1179 } while (limit--); 1180 } 1181 1182 /* 1183 * stop callback 1184 */ 1185 static int bcm_enet_stop(struct net_device *dev) 1186 { 1187 struct bcm_enet_priv *priv; 1188 struct device *kdev; 1189 1190 priv = netdev_priv(dev); 1191 kdev = &priv->pdev->dev; 1192 1193 netif_stop_queue(dev); 1194 napi_disable(&priv->napi); 1195 if (priv->has_phy) 1196 phy_stop(dev->phydev); 1197 timer_delete_sync(&priv->rx_timeout); 1198 1199 /* mask all interrupts */ 1200 enet_writel(priv, 0, ENET_IRMASK_REG); 1201 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 1202 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 1203 1204 /* make sure no mib update is scheduled */ 1205 cancel_work_sync(&priv->mib_update_task); 1206 1207 /* disable dma & mac */ 1208 bcm_enet_disable_dma(priv, priv->tx_chan); 1209 bcm_enet_disable_dma(priv, priv->rx_chan); 1210 bcm_enet_disable_mac(priv); 1211 1212 /* force reclaim of all tx buffers */ 1213 bcm_enet_tx_reclaim(dev, 1, 0); 1214 1215 /* free the rx buffer ring */ 1216 bcm_enet_free_rx_buf_ring(kdev, priv); 1217 1218 /* free remaining allocated memory */ 1219 kfree(priv->tx_skb); 1220 dma_free_coherent(kdev, priv->rx_desc_alloc_size, 1221 priv->rx_desc_cpu, priv->rx_desc_dma); 1222 dma_free_coherent(kdev, priv->tx_desc_alloc_size, 1223 priv->tx_desc_cpu, priv->tx_desc_dma); 1224 free_irq(priv->irq_tx, dev); 1225 free_irq(priv->irq_rx, dev); 1226 free_irq(dev->irq, dev); 1227 1228 /* release phy */ 1229 if (priv->has_phy) 1230 phy_disconnect(dev->phydev); 1231 1232 /* reset BQL after forced tx reclaim to prevent kernel panic */ 1233 netdev_reset_queue(dev); 1234 1235 return 0; 1236 } 1237 1238 /* 1239 * ethtool callbacks 1240 */ 1241 struct bcm_enet_stats { 1242 char stat_string[ETH_GSTRING_LEN]; 1243 int sizeof_stat; 1244 int stat_offset; 1245 int mib_reg; 1246 }; 1247 1248 #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \ 1249 offsetof(struct bcm_enet_priv, m) 1250 #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \ 1251 offsetof(struct net_device_stats, m) 1252 1253 static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = { 1254 { "rx_packets", DEV_STAT(rx_packets), -1 }, 1255 { "tx_packets", DEV_STAT(tx_packets), -1 }, 1256 { "rx_bytes", DEV_STAT(rx_bytes), -1 }, 1257 { "tx_bytes", DEV_STAT(tx_bytes), -1 }, 1258 { "rx_errors", DEV_STAT(rx_errors), -1 }, 1259 { "tx_errors", DEV_STAT(tx_errors), -1 }, 1260 { "rx_dropped", DEV_STAT(rx_dropped), -1 }, 1261 { "tx_dropped", DEV_STAT(tx_dropped), -1 }, 1262 1263 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS}, 1264 { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS }, 1265 { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST }, 1266 { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT }, 1267 { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 }, 1268 { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 }, 1269 { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 }, 1270 { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 }, 1271 { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 }, 1272 { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX }, 1273 { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB }, 1274 { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR }, 1275 { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG }, 1276 { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP }, 1277 { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN }, 1278 { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND }, 1279 { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC }, 1280 { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN }, 1281 { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM }, 1282 { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE }, 1283 { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL }, 1284 1285 { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS }, 1286 { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS }, 1287 { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST }, 1288 { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT }, 1289 { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 }, 1290 { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 }, 1291 { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 }, 1292 { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 }, 1293 { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023}, 1294 { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX }, 1295 { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB }, 1296 { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR }, 1297 { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG }, 1298 { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN }, 1299 { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL }, 1300 { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL }, 1301 { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL }, 1302 { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL }, 1303 { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE }, 1304 { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF }, 1305 { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS }, 1306 { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE }, 1307 1308 }; 1309 1310 #define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats) 1311 1312 static const u32 unused_mib_regs[] = { 1313 ETH_MIB_TX_ALL_OCTETS, 1314 ETH_MIB_TX_ALL_PKTS, 1315 ETH_MIB_RX_ALL_OCTETS, 1316 ETH_MIB_RX_ALL_PKTS, 1317 }; 1318 1319 1320 static void bcm_enet_get_drvinfo(struct net_device *netdev, 1321 struct ethtool_drvinfo *drvinfo) 1322 { 1323 strscpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver)); 1324 strscpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info)); 1325 } 1326 1327 static int bcm_enet_get_sset_count(struct net_device *netdev, 1328 int string_set) 1329 { 1330 switch (string_set) { 1331 case ETH_SS_STATS: 1332 return BCM_ENET_STATS_LEN; 1333 default: 1334 return -EINVAL; 1335 } 1336 } 1337 1338 static void bcm_enet_get_strings(struct net_device *netdev, 1339 u32 stringset, u8 *data) 1340 { 1341 const char *str; 1342 int i; 1343 1344 switch (stringset) { 1345 case ETH_SS_STATS: 1346 for (i = 0; i < BCM_ENET_STATS_LEN; i++) { 1347 str = bcm_enet_gstrings_stats[i].stat_string; 1348 ethtool_puts(&data, str); 1349 } 1350 break; 1351 } 1352 } 1353 1354 static void update_mib_counters(struct bcm_enet_priv *priv) 1355 { 1356 int i; 1357 1358 for (i = 0; i < BCM_ENET_STATS_LEN; i++) { 1359 const struct bcm_enet_stats *s; 1360 u32 val; 1361 char *p; 1362 1363 s = &bcm_enet_gstrings_stats[i]; 1364 if (s->mib_reg == -1) 1365 continue; 1366 1367 val = enet_readl(priv, ENET_MIB_REG(s->mib_reg)); 1368 p = (char *)priv + s->stat_offset; 1369 1370 if (s->sizeof_stat == sizeof(u64)) 1371 *(u64 *)p += val; 1372 else 1373 *(u32 *)p += val; 1374 } 1375 1376 /* also empty unused mib counters to make sure mib counter 1377 * overflow interrupt is cleared */ 1378 for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++) 1379 (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i])); 1380 } 1381 1382 static void bcm_enet_update_mib_counters_defer(struct work_struct *t) 1383 { 1384 struct bcm_enet_priv *priv; 1385 1386 priv = container_of(t, struct bcm_enet_priv, mib_update_task); 1387 mutex_lock(&priv->mib_update_lock); 1388 update_mib_counters(priv); 1389 mutex_unlock(&priv->mib_update_lock); 1390 1391 /* reenable mib interrupt */ 1392 if (netif_running(priv->net_dev)) 1393 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); 1394 } 1395 1396 static void bcm_enet_get_ethtool_stats(struct net_device *netdev, 1397 struct ethtool_stats *stats, 1398 u64 *data) 1399 { 1400 struct bcm_enet_priv *priv; 1401 int i; 1402 1403 priv = netdev_priv(netdev); 1404 1405 mutex_lock(&priv->mib_update_lock); 1406 update_mib_counters(priv); 1407 1408 for (i = 0; i < BCM_ENET_STATS_LEN; i++) { 1409 const struct bcm_enet_stats *s; 1410 char *p; 1411 1412 s = &bcm_enet_gstrings_stats[i]; 1413 if (s->mib_reg == -1) 1414 p = (char *)&netdev->stats; 1415 else 1416 p = (char *)priv; 1417 p += s->stat_offset; 1418 data[i] = (s->sizeof_stat == sizeof(u64)) ? 1419 *(u64 *)p : *(u32 *)p; 1420 } 1421 mutex_unlock(&priv->mib_update_lock); 1422 } 1423 1424 static int bcm_enet_nway_reset(struct net_device *dev) 1425 { 1426 struct bcm_enet_priv *priv; 1427 1428 priv = netdev_priv(dev); 1429 if (priv->has_phy) 1430 return phy_ethtool_nway_reset(dev); 1431 1432 return -EOPNOTSUPP; 1433 } 1434 1435 static int bcm_enet_get_link_ksettings(struct net_device *dev, 1436 struct ethtool_link_ksettings *cmd) 1437 { 1438 struct bcm_enet_priv *priv; 1439 u32 supported, advertising; 1440 1441 priv = netdev_priv(dev); 1442 1443 if (priv->has_phy) { 1444 if (!dev->phydev) 1445 return -ENODEV; 1446 1447 phy_ethtool_ksettings_get(dev->phydev, cmd); 1448 1449 return 0; 1450 } else { 1451 cmd->base.autoneg = 0; 1452 cmd->base.speed = (priv->force_speed_100) ? 1453 SPEED_100 : SPEED_10; 1454 cmd->base.duplex = (priv->force_duplex_full) ? 1455 DUPLEX_FULL : DUPLEX_HALF; 1456 supported = ADVERTISED_10baseT_Half | 1457 ADVERTISED_10baseT_Full | 1458 ADVERTISED_100baseT_Half | 1459 ADVERTISED_100baseT_Full; 1460 advertising = 0; 1461 ethtool_convert_legacy_u32_to_link_mode( 1462 cmd->link_modes.supported, supported); 1463 ethtool_convert_legacy_u32_to_link_mode( 1464 cmd->link_modes.advertising, advertising); 1465 cmd->base.port = PORT_MII; 1466 } 1467 return 0; 1468 } 1469 1470 static int bcm_enet_set_link_ksettings(struct net_device *dev, 1471 const struct ethtool_link_ksettings *cmd) 1472 { 1473 struct bcm_enet_priv *priv; 1474 1475 priv = netdev_priv(dev); 1476 if (priv->has_phy) { 1477 if (!dev->phydev) 1478 return -ENODEV; 1479 return phy_ethtool_ksettings_set(dev->phydev, cmd); 1480 } else { 1481 1482 if (cmd->base.autoneg || 1483 (cmd->base.speed != SPEED_100 && 1484 cmd->base.speed != SPEED_10) || 1485 cmd->base.port != PORT_MII) 1486 return -EINVAL; 1487 1488 priv->force_speed_100 = 1489 (cmd->base.speed == SPEED_100) ? 1 : 0; 1490 priv->force_duplex_full = 1491 (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0; 1492 1493 if (netif_running(dev)) 1494 bcm_enet_adjust_link(dev); 1495 return 0; 1496 } 1497 } 1498 1499 static void 1500 bcm_enet_get_ringparam(struct net_device *dev, 1501 struct ethtool_ringparam *ering, 1502 struct kernel_ethtool_ringparam *kernel_ering, 1503 struct netlink_ext_ack *extack) 1504 { 1505 struct bcm_enet_priv *priv; 1506 1507 priv = netdev_priv(dev); 1508 1509 /* rx/tx ring is actually only limited by memory */ 1510 ering->rx_max_pending = 8192; 1511 ering->tx_max_pending = 8192; 1512 ering->rx_pending = priv->rx_ring_size; 1513 ering->tx_pending = priv->tx_ring_size; 1514 } 1515 1516 static int bcm_enet_set_ringparam(struct net_device *dev, 1517 struct ethtool_ringparam *ering, 1518 struct kernel_ethtool_ringparam *kernel_ering, 1519 struct netlink_ext_ack *extack) 1520 { 1521 struct bcm_enet_priv *priv; 1522 int was_running; 1523 1524 priv = netdev_priv(dev); 1525 1526 was_running = 0; 1527 if (netif_running(dev)) { 1528 bcm_enet_stop(dev); 1529 was_running = 1; 1530 } 1531 1532 priv->rx_ring_size = ering->rx_pending; 1533 priv->tx_ring_size = ering->tx_pending; 1534 1535 if (was_running) { 1536 int err; 1537 1538 err = bcm_enet_open(dev); 1539 if (err) 1540 dev_close(dev); 1541 else 1542 bcm_enet_set_multicast_list(dev); 1543 } 1544 return 0; 1545 } 1546 1547 static void bcm_enet_get_pauseparam(struct net_device *dev, 1548 struct ethtool_pauseparam *ecmd) 1549 { 1550 struct bcm_enet_priv *priv; 1551 1552 priv = netdev_priv(dev); 1553 ecmd->autoneg = priv->pause_auto; 1554 ecmd->rx_pause = priv->pause_rx; 1555 ecmd->tx_pause = priv->pause_tx; 1556 } 1557 1558 static int bcm_enet_set_pauseparam(struct net_device *dev, 1559 struct ethtool_pauseparam *ecmd) 1560 { 1561 struct bcm_enet_priv *priv; 1562 1563 priv = netdev_priv(dev); 1564 1565 if (priv->has_phy) { 1566 if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) { 1567 /* asymetric pause mode not supported, 1568 * actually possible but integrated PHY has RO 1569 * asym_pause bit */ 1570 return -EINVAL; 1571 } 1572 } else { 1573 /* no pause autoneg on direct mii connection */ 1574 if (ecmd->autoneg) 1575 return -EINVAL; 1576 } 1577 1578 priv->pause_auto = ecmd->autoneg; 1579 priv->pause_rx = ecmd->rx_pause; 1580 priv->pause_tx = ecmd->tx_pause; 1581 1582 return 0; 1583 } 1584 1585 static const struct ethtool_ops bcm_enet_ethtool_ops = { 1586 .get_strings = bcm_enet_get_strings, 1587 .get_sset_count = bcm_enet_get_sset_count, 1588 .get_ethtool_stats = bcm_enet_get_ethtool_stats, 1589 .nway_reset = bcm_enet_nway_reset, 1590 .get_drvinfo = bcm_enet_get_drvinfo, 1591 .get_link = ethtool_op_get_link, 1592 .get_ringparam = bcm_enet_get_ringparam, 1593 .set_ringparam = bcm_enet_set_ringparam, 1594 .get_pauseparam = bcm_enet_get_pauseparam, 1595 .set_pauseparam = bcm_enet_set_pauseparam, 1596 .get_link_ksettings = bcm_enet_get_link_ksettings, 1597 .set_link_ksettings = bcm_enet_set_link_ksettings, 1598 }; 1599 1600 static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1601 { 1602 struct bcm_enet_priv *priv; 1603 1604 priv = netdev_priv(dev); 1605 if (priv->has_phy) { 1606 if (!dev->phydev) 1607 return -ENODEV; 1608 return phy_mii_ioctl(dev->phydev, rq, cmd); 1609 } else { 1610 struct mii_if_info mii; 1611 1612 mii.dev = dev; 1613 mii.mdio_read = bcm_enet_mdio_read_mii; 1614 mii.mdio_write = bcm_enet_mdio_write_mii; 1615 mii.phy_id = 0; 1616 mii.phy_id_mask = 0x3f; 1617 mii.reg_num_mask = 0x1f; 1618 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); 1619 } 1620 } 1621 1622 /* 1623 * adjust mtu, can't be called while device is running 1624 */ 1625 static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) 1626 { 1627 struct bcm_enet_priv *priv = netdev_priv(dev); 1628 int actual_mtu = new_mtu; 1629 1630 if (netif_running(dev)) 1631 return -EBUSY; 1632 1633 /* add ethernet header + vlan tag size */ 1634 actual_mtu += VLAN_ETH_HLEN; 1635 1636 /* 1637 * setup maximum size before we get overflow mark in 1638 * descriptor, note that this will not prevent reception of 1639 * big frames, they will be split into multiple buffers 1640 * anyway 1641 */ 1642 priv->hw_mtu = actual_mtu; 1643 1644 /* 1645 * align rx buffer size to dma burst len, account FCS since 1646 * it's appended 1647 */ 1648 priv->rx_buf_size = ALIGN(actual_mtu + ETH_FCS_LEN, 1649 priv->dma_maxburst * 4); 1650 1651 priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) + 1652 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1653 1654 WRITE_ONCE(dev->mtu, new_mtu); 1655 return 0; 1656 } 1657 1658 /* 1659 * preinit hardware to allow mii operation while device is down 1660 */ 1661 static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv) 1662 { 1663 u32 val; 1664 int limit; 1665 1666 /* make sure mac is disabled */ 1667 bcm_enet_disable_mac(priv); 1668 1669 /* soft reset mac */ 1670 val = ENET_CTL_SRESET_MASK; 1671 enet_writel(priv, val, ENET_CTL_REG); 1672 wmb(); 1673 1674 limit = 1000; 1675 do { 1676 val = enet_readl(priv, ENET_CTL_REG); 1677 if (!(val & ENET_CTL_SRESET_MASK)) 1678 break; 1679 udelay(1); 1680 } while (limit--); 1681 1682 /* select correct mii interface */ 1683 val = enet_readl(priv, ENET_CTL_REG); 1684 if (priv->use_external_mii) 1685 val |= ENET_CTL_EPHYSEL_MASK; 1686 else 1687 val &= ~ENET_CTL_EPHYSEL_MASK; 1688 enet_writel(priv, val, ENET_CTL_REG); 1689 1690 /* turn on mdc clock */ 1691 enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) | 1692 ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG); 1693 1694 /* set mib counters to self-clear when read */ 1695 val = enet_readl(priv, ENET_MIBCTL_REG); 1696 val |= ENET_MIBCTL_RDCLEAR_MASK; 1697 enet_writel(priv, val, ENET_MIBCTL_REG); 1698 } 1699 1700 static const struct net_device_ops bcm_enet_ops = { 1701 .ndo_open = bcm_enet_open, 1702 .ndo_stop = bcm_enet_stop, 1703 .ndo_start_xmit = bcm_enet_start_xmit, 1704 .ndo_set_mac_address = bcm_enet_set_mac_address, 1705 .ndo_set_rx_mode = bcm_enet_set_multicast_list, 1706 .ndo_eth_ioctl = bcm_enet_ioctl, 1707 .ndo_change_mtu = bcm_enet_change_mtu, 1708 }; 1709 1710 /* 1711 * allocate netdevice, request register memory and register device. 1712 */ 1713 static int bcm_enet_probe(struct platform_device *pdev) 1714 { 1715 struct bcm_enet_priv *priv; 1716 struct net_device *dev; 1717 struct bcm63xx_enet_platform_data *pd; 1718 int irq, irq_rx, irq_tx; 1719 struct mii_bus *bus; 1720 int i, ret; 1721 1722 if (!bcm_enet_shared_base[0]) 1723 return -EPROBE_DEFER; 1724 1725 irq = platform_get_irq(pdev, 0); 1726 irq_rx = platform_get_irq(pdev, 1); 1727 irq_tx = platform_get_irq(pdev, 2); 1728 if (irq < 0 || irq_rx < 0 || irq_tx < 0) 1729 return -ENODEV; 1730 1731 dev = alloc_etherdev(sizeof(*priv)); 1732 if (!dev) 1733 return -ENOMEM; 1734 priv = netdev_priv(dev); 1735 1736 priv->enet_is_sw = false; 1737 priv->dma_maxburst = BCMENET_DMA_MAXBURST; 1738 priv->rx_buf_offset = NET_SKB_PAD; 1739 1740 ret = bcm_enet_change_mtu(dev, dev->mtu); 1741 if (ret) 1742 goto out; 1743 1744 priv->base = devm_platform_ioremap_resource(pdev, 0); 1745 if (IS_ERR(priv->base)) { 1746 ret = PTR_ERR(priv->base); 1747 goto out; 1748 } 1749 1750 dev->irq = priv->irq = irq; 1751 priv->irq_rx = irq_rx; 1752 priv->irq_tx = irq_tx; 1753 1754 priv->mac_clk = devm_clk_get(&pdev->dev, "enet"); 1755 if (IS_ERR(priv->mac_clk)) { 1756 ret = PTR_ERR(priv->mac_clk); 1757 goto out; 1758 } 1759 ret = clk_prepare_enable(priv->mac_clk); 1760 if (ret) 1761 goto out; 1762 1763 /* initialize default and fetch platform data */ 1764 priv->rx_ring_size = BCMENET_DEF_RX_DESC; 1765 priv->tx_ring_size = BCMENET_DEF_TX_DESC; 1766 1767 pd = dev_get_platdata(&pdev->dev); 1768 if (pd) { 1769 eth_hw_addr_set(dev, pd->mac_addr); 1770 priv->has_phy = pd->has_phy; 1771 priv->phy_id = pd->phy_id; 1772 priv->has_phy_interrupt = pd->has_phy_interrupt; 1773 priv->phy_interrupt = pd->phy_interrupt; 1774 priv->use_external_mii = !pd->use_internal_phy; 1775 priv->pause_auto = pd->pause_auto; 1776 priv->pause_rx = pd->pause_rx; 1777 priv->pause_tx = pd->pause_tx; 1778 priv->force_duplex_full = pd->force_duplex_full; 1779 priv->force_speed_100 = pd->force_speed_100; 1780 priv->dma_chan_en_mask = pd->dma_chan_en_mask; 1781 priv->dma_chan_int_mask = pd->dma_chan_int_mask; 1782 priv->dma_chan_width = pd->dma_chan_width; 1783 priv->dma_has_sram = pd->dma_has_sram; 1784 priv->dma_desc_shift = pd->dma_desc_shift; 1785 priv->rx_chan = pd->rx_chan; 1786 priv->tx_chan = pd->tx_chan; 1787 } 1788 1789 if (priv->has_phy && !priv->use_external_mii) { 1790 /* using internal PHY, enable clock */ 1791 priv->phy_clk = devm_clk_get(&pdev->dev, "ephy"); 1792 if (IS_ERR(priv->phy_clk)) { 1793 ret = PTR_ERR(priv->phy_clk); 1794 priv->phy_clk = NULL; 1795 goto out_disable_clk_mac; 1796 } 1797 ret = clk_prepare_enable(priv->phy_clk); 1798 if (ret) 1799 goto out_disable_clk_mac; 1800 } 1801 1802 /* do minimal hardware init to be able to probe mii bus */ 1803 bcm_enet_hw_preinit(priv); 1804 1805 /* MII bus registration */ 1806 if (priv->has_phy) { 1807 1808 priv->mii_bus = mdiobus_alloc(); 1809 if (!priv->mii_bus) { 1810 ret = -ENOMEM; 1811 goto out_uninit_hw; 1812 } 1813 1814 bus = priv->mii_bus; 1815 bus->name = "bcm63xx_enet MII bus"; 1816 bus->parent = &pdev->dev; 1817 bus->priv = priv; 1818 bus->read = bcm_enet_mdio_read_phylib; 1819 bus->write = bcm_enet_mdio_write_phylib; 1820 sprintf(bus->id, "%s-%d", pdev->name, pdev->id); 1821 1822 /* only probe bus where we think the PHY is, because 1823 * the mdio read operation return 0 instead of 0xffff 1824 * if a slave is not present on hw */ 1825 bus->phy_mask = ~(1 << priv->phy_id); 1826 1827 if (priv->has_phy_interrupt) 1828 bus->irq[priv->phy_id] = priv->phy_interrupt; 1829 1830 ret = mdiobus_register(bus); 1831 if (ret) { 1832 dev_err(&pdev->dev, "unable to register mdio bus\n"); 1833 goto out_free_mdio; 1834 } 1835 } else { 1836 1837 /* run platform code to initialize PHY device */ 1838 if (pd && pd->mii_config && 1839 pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, 1840 bcm_enet_mdio_write_mii)) { 1841 dev_err(&pdev->dev, "unable to configure mdio bus\n"); 1842 goto out_uninit_hw; 1843 } 1844 } 1845 1846 spin_lock_init(&priv->rx_lock); 1847 1848 /* init rx timeout (used for oom) */ 1849 timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0); 1850 1851 /* init the mib update lock&work */ 1852 mutex_init(&priv->mib_update_lock); 1853 INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer); 1854 1855 /* zero mib counters */ 1856 for (i = 0; i < ENET_MIB_REG_COUNT; i++) 1857 enet_writel(priv, 0, ENET_MIB_REG(i)); 1858 1859 /* register netdevice */ 1860 dev->netdev_ops = &bcm_enet_ops; 1861 netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16); 1862 1863 dev->ethtool_ops = &bcm_enet_ethtool_ops; 1864 /* MTU range: 46 - 2028 */ 1865 dev->min_mtu = ETH_ZLEN - ETH_HLEN; 1866 dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN; 1867 SET_NETDEV_DEV(dev, &pdev->dev); 1868 1869 ret = register_netdev(dev); 1870 if (ret) 1871 goto out_unregister_mdio; 1872 1873 netif_carrier_off(dev); 1874 platform_set_drvdata(pdev, dev); 1875 priv->pdev = pdev; 1876 priv->net_dev = dev; 1877 1878 return 0; 1879 1880 out_unregister_mdio: 1881 if (priv->mii_bus) 1882 mdiobus_unregister(priv->mii_bus); 1883 1884 out_free_mdio: 1885 if (priv->mii_bus) 1886 mdiobus_free(priv->mii_bus); 1887 1888 out_uninit_hw: 1889 /* turn off mdc clock */ 1890 enet_writel(priv, 0, ENET_MIISC_REG); 1891 clk_disable_unprepare(priv->phy_clk); 1892 1893 out_disable_clk_mac: 1894 clk_disable_unprepare(priv->mac_clk); 1895 out: 1896 free_netdev(dev); 1897 return ret; 1898 } 1899 1900 1901 /* 1902 * exit func, stops hardware and unregisters netdevice 1903 */ 1904 static void bcm_enet_remove(struct platform_device *pdev) 1905 { 1906 struct bcm_enet_priv *priv; 1907 struct net_device *dev; 1908 1909 /* stop netdevice */ 1910 dev = platform_get_drvdata(pdev); 1911 priv = netdev_priv(dev); 1912 unregister_netdev(dev); 1913 1914 /* turn off mdc clock */ 1915 enet_writel(priv, 0, ENET_MIISC_REG); 1916 1917 if (priv->has_phy) { 1918 mdiobus_unregister(priv->mii_bus); 1919 mdiobus_free(priv->mii_bus); 1920 } else { 1921 struct bcm63xx_enet_platform_data *pd; 1922 1923 pd = dev_get_platdata(&pdev->dev); 1924 if (pd && pd->mii_config) 1925 pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, 1926 bcm_enet_mdio_write_mii); 1927 } 1928 1929 /* disable hw block clocks */ 1930 clk_disable_unprepare(priv->phy_clk); 1931 clk_disable_unprepare(priv->mac_clk); 1932 1933 free_netdev(dev); 1934 } 1935 1936 static struct platform_driver bcm63xx_enet_driver = { 1937 .probe = bcm_enet_probe, 1938 .remove = bcm_enet_remove, 1939 .driver = { 1940 .name = "bcm63xx_enet", 1941 }, 1942 }; 1943 1944 /* 1945 * switch mii access callbacks 1946 */ 1947 static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv, 1948 int ext, int phy_id, int location) 1949 { 1950 u32 reg; 1951 int ret; 1952 1953 spin_lock_bh(&priv->enetsw_mdio_lock); 1954 enetsw_writel(priv, 0, ENETSW_MDIOC_REG); 1955 1956 reg = ENETSW_MDIOC_RD_MASK | 1957 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | 1958 (location << ENETSW_MDIOC_REG_SHIFT); 1959 1960 if (ext) 1961 reg |= ENETSW_MDIOC_EXT_MASK; 1962 1963 enetsw_writel(priv, reg, ENETSW_MDIOC_REG); 1964 udelay(50); 1965 ret = enetsw_readw(priv, ENETSW_MDIOD_REG); 1966 spin_unlock_bh(&priv->enetsw_mdio_lock); 1967 return ret; 1968 } 1969 1970 static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv, 1971 int ext, int phy_id, int location, 1972 uint16_t data) 1973 { 1974 u32 reg; 1975 1976 spin_lock_bh(&priv->enetsw_mdio_lock); 1977 enetsw_writel(priv, 0, ENETSW_MDIOC_REG); 1978 1979 reg = ENETSW_MDIOC_WR_MASK | 1980 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) | 1981 (location << ENETSW_MDIOC_REG_SHIFT); 1982 1983 if (ext) 1984 reg |= ENETSW_MDIOC_EXT_MASK; 1985 1986 reg |= data; 1987 1988 enetsw_writel(priv, reg, ENETSW_MDIOC_REG); 1989 udelay(50); 1990 spin_unlock_bh(&priv->enetsw_mdio_lock); 1991 } 1992 1993 static inline int bcm_enet_port_is_rgmii(int portid) 1994 { 1995 return portid >= ENETSW_RGMII_PORT0; 1996 } 1997 1998 /* 1999 * enet sw PHY polling 2000 */ 2001 static void swphy_poll_timer(struct timer_list *t) 2002 { 2003 struct bcm_enet_priv *priv = timer_container_of(priv, t, swphy_poll); 2004 unsigned int i; 2005 2006 for (i = 0; i < priv->num_ports; i++) { 2007 struct bcm63xx_enetsw_port *port; 2008 int val, j, up, advertise, lpa, speed, duplex, media; 2009 int external_phy = bcm_enet_port_is_rgmii(i); 2010 u8 override; 2011 2012 port = &priv->used_ports[i]; 2013 if (!port->used) 2014 continue; 2015 2016 if (port->bypass_link) 2017 continue; 2018 2019 /* dummy read to clear */ 2020 for (j = 0; j < 2; j++) 2021 val = bcmenet_sw_mdio_read(priv, external_phy, 2022 port->phy_id, MII_BMSR); 2023 2024 if (val == 0xffff) 2025 continue; 2026 2027 up = (val & BMSR_LSTATUS) ? 1 : 0; 2028 if (!(up ^ priv->sw_port_link[i])) 2029 continue; 2030 2031 priv->sw_port_link[i] = up; 2032 2033 /* link changed */ 2034 if (!up) { 2035 dev_info(&priv->pdev->dev, "link DOWN on %s\n", 2036 port->name); 2037 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, 2038 ENETSW_PORTOV_REG(i)); 2039 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | 2040 ENETSW_PTCTRL_TXDIS_MASK, 2041 ENETSW_PTCTRL_REG(i)); 2042 continue; 2043 } 2044 2045 advertise = bcmenet_sw_mdio_read(priv, external_phy, 2046 port->phy_id, MII_ADVERTISE); 2047 2048 lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id, 2049 MII_LPA); 2050 2051 /* figure out media and duplex from advertise and LPA values */ 2052 media = mii_nway_result(lpa & advertise); 2053 duplex = (media & ADVERTISE_FULL) ? 1 : 0; 2054 2055 if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) 2056 speed = 100; 2057 else 2058 speed = 10; 2059 2060 if (val & BMSR_ESTATEN) { 2061 advertise = bcmenet_sw_mdio_read(priv, external_phy, 2062 port->phy_id, MII_CTRL1000); 2063 2064 lpa = bcmenet_sw_mdio_read(priv, external_phy, 2065 port->phy_id, MII_STAT1000); 2066 2067 if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF) 2068 && lpa & (LPA_1000FULL | LPA_1000HALF)) { 2069 speed = 1000; 2070 duplex = (lpa & LPA_1000FULL); 2071 } 2072 } 2073 2074 dev_info(&priv->pdev->dev, 2075 "link UP on %s, %dMbps, %s-duplex\n", 2076 port->name, speed, duplex ? "full" : "half"); 2077 2078 override = ENETSW_PORTOV_ENABLE_MASK | 2079 ENETSW_PORTOV_LINKUP_MASK; 2080 2081 if (speed == 1000) 2082 override |= ENETSW_IMPOV_1000_MASK; 2083 else if (speed == 100) 2084 override |= ENETSW_IMPOV_100_MASK; 2085 if (duplex) 2086 override |= ENETSW_IMPOV_FDX_MASK; 2087 2088 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); 2089 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); 2090 } 2091 2092 priv->swphy_poll.expires = jiffies + HZ; 2093 add_timer(&priv->swphy_poll); 2094 } 2095 2096 /* 2097 * open callback, allocate dma rings & buffers and start rx operation 2098 */ 2099 static int bcm_enetsw_open(struct net_device *dev) 2100 { 2101 struct bcm_enet_priv *priv; 2102 struct device *kdev; 2103 int i, ret; 2104 unsigned int size; 2105 void *p; 2106 u32 val; 2107 2108 priv = netdev_priv(dev); 2109 kdev = &priv->pdev->dev; 2110 2111 /* mask all interrupts and request them */ 2112 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 2113 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 2114 2115 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 2116 0, dev->name, dev); 2117 if (ret) 2118 goto out_freeirq; 2119 2120 if (priv->irq_tx != -1) { 2121 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, 2122 0, dev->name, dev); 2123 if (ret) 2124 goto out_freeirq_rx; 2125 } 2126 2127 /* allocate rx dma ring */ 2128 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 2129 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 2130 if (!p) { 2131 dev_err(kdev, "cannot allocate rx ring %u\n", size); 2132 ret = -ENOMEM; 2133 goto out_freeirq_tx; 2134 } 2135 2136 priv->rx_desc_alloc_size = size; 2137 priv->rx_desc_cpu = p; 2138 2139 /* allocate tx dma ring */ 2140 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 2141 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 2142 if (!p) { 2143 dev_err(kdev, "cannot allocate tx ring\n"); 2144 ret = -ENOMEM; 2145 goto out_free_rx_ring; 2146 } 2147 2148 priv->tx_desc_alloc_size = size; 2149 priv->tx_desc_cpu = p; 2150 2151 priv->tx_skb = kzalloc_objs(struct sk_buff *, priv->tx_ring_size); 2152 if (!priv->tx_skb) { 2153 dev_err(kdev, "cannot allocate tx skb queue\n"); 2154 ret = -ENOMEM; 2155 goto out_free_tx_ring; 2156 } 2157 2158 priv->tx_desc_count = priv->tx_ring_size; 2159 priv->tx_dirty_desc = 0; 2160 priv->tx_curr_desc = 0; 2161 spin_lock_init(&priv->tx_lock); 2162 2163 /* init & fill rx ring with buffers */ 2164 priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *), 2165 GFP_KERNEL); 2166 if (!priv->rx_buf) { 2167 dev_err(kdev, "cannot allocate rx buffer queue\n"); 2168 ret = -ENOMEM; 2169 goto out_free_tx_skb; 2170 } 2171 2172 priv->rx_desc_count = 0; 2173 priv->rx_dirty_desc = 0; 2174 priv->rx_curr_desc = 0; 2175 2176 /* disable all ports */ 2177 for (i = 0; i < priv->num_ports; i++) { 2178 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK, 2179 ENETSW_PORTOV_REG(i)); 2180 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK | 2181 ENETSW_PTCTRL_TXDIS_MASK, 2182 ENETSW_PTCTRL_REG(i)); 2183 2184 priv->sw_port_link[i] = 0; 2185 } 2186 2187 /* reset mib */ 2188 val = enetsw_readb(priv, ENETSW_GMCR_REG); 2189 val |= ENETSW_GMCR_RST_MIB_MASK; 2190 enetsw_writeb(priv, val, ENETSW_GMCR_REG); 2191 mdelay(1); 2192 val &= ~ENETSW_GMCR_RST_MIB_MASK; 2193 enetsw_writeb(priv, val, ENETSW_GMCR_REG); 2194 mdelay(1); 2195 2196 /* force CPU port state */ 2197 val = enetsw_readb(priv, ENETSW_IMPOV_REG); 2198 val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK; 2199 enetsw_writeb(priv, val, ENETSW_IMPOV_REG); 2200 2201 /* enable switch forward engine */ 2202 val = enetsw_readb(priv, ENETSW_SWMODE_REG); 2203 val |= ENETSW_SWMODE_FWD_EN_MASK; 2204 enetsw_writeb(priv, val, ENETSW_SWMODE_REG); 2205 2206 /* enable jumbo on all ports */ 2207 enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG); 2208 enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG); 2209 2210 /* initialize flow control buffer allocation */ 2211 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, 2212 ENETDMA_BUFALLOC_REG(priv->rx_chan)); 2213 2214 if (bcm_enet_refill_rx(dev, false)) { 2215 dev_err(kdev, "cannot allocate rx buffer queue\n"); 2216 ret = -ENOMEM; 2217 goto out; 2218 } 2219 2220 /* write rx & tx ring addresses */ 2221 enet_dmas_writel(priv, priv->rx_desc_dma, 2222 ENETDMAS_RSTART_REG, priv->rx_chan); 2223 enet_dmas_writel(priv, priv->tx_desc_dma, 2224 ENETDMAS_RSTART_REG, priv->tx_chan); 2225 2226 /* clear remaining state ram for rx & tx channel */ 2227 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan); 2228 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan); 2229 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan); 2230 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan); 2231 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan); 2232 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan); 2233 2234 /* set dma maximum burst len */ 2235 enet_dmac_writel(priv, priv->dma_maxburst, 2236 ENETDMAC_MAXBURST, priv->rx_chan); 2237 enet_dmac_writel(priv, priv->dma_maxburst, 2238 ENETDMAC_MAXBURST, priv->tx_chan); 2239 2240 /* set flow control low/high threshold to 1/3 / 2/3 */ 2241 val = priv->rx_ring_size / 3; 2242 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); 2243 val = (priv->rx_ring_size * 2) / 3; 2244 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); 2245 2246 /* all set, enable mac and interrupts, start dma engine and 2247 * kick rx dma channel 2248 */ 2249 wmb(); 2250 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); 2251 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, 2252 ENETDMAC_CHANCFG, priv->rx_chan); 2253 2254 /* watch "packet transferred" interrupt in rx and tx */ 2255 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 2256 ENETDMAC_IR, priv->rx_chan); 2257 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 2258 ENETDMAC_IR, priv->tx_chan); 2259 2260 /* make sure we enable napi before rx interrupt */ 2261 napi_enable(&priv->napi); 2262 2263 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 2264 ENETDMAC_IRMASK, priv->rx_chan); 2265 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 2266 ENETDMAC_IRMASK, priv->tx_chan); 2267 2268 netif_carrier_on(dev); 2269 netif_start_queue(dev); 2270 2271 /* apply override config for bypass_link ports here. */ 2272 for (i = 0; i < priv->num_ports; i++) { 2273 struct bcm63xx_enetsw_port *port; 2274 u8 override; 2275 port = &priv->used_ports[i]; 2276 if (!port->used) 2277 continue; 2278 2279 if (!port->bypass_link) 2280 continue; 2281 2282 override = ENETSW_PORTOV_ENABLE_MASK | 2283 ENETSW_PORTOV_LINKUP_MASK; 2284 2285 switch (port->force_speed) { 2286 case 1000: 2287 override |= ENETSW_IMPOV_1000_MASK; 2288 break; 2289 case 100: 2290 override |= ENETSW_IMPOV_100_MASK; 2291 break; 2292 case 10: 2293 break; 2294 default: 2295 pr_warn("invalid forced speed on port %s: assume 10\n", 2296 port->name); 2297 break; 2298 } 2299 2300 if (port->force_duplex_full) 2301 override |= ENETSW_IMPOV_FDX_MASK; 2302 2303 2304 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i)); 2305 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i)); 2306 } 2307 2308 /* start phy polling timer */ 2309 timer_setup(&priv->swphy_poll, swphy_poll_timer, 0); 2310 mod_timer(&priv->swphy_poll, jiffies); 2311 return 0; 2312 2313 out: 2314 bcm_enet_free_rx_buf_ring(kdev, priv); 2315 2316 out_free_tx_skb: 2317 kfree(priv->tx_skb); 2318 2319 out_free_tx_ring: 2320 dma_free_coherent(kdev, priv->tx_desc_alloc_size, 2321 priv->tx_desc_cpu, priv->tx_desc_dma); 2322 2323 out_free_rx_ring: 2324 dma_free_coherent(kdev, priv->rx_desc_alloc_size, 2325 priv->rx_desc_cpu, priv->rx_desc_dma); 2326 2327 out_freeirq_tx: 2328 if (priv->irq_tx != -1) 2329 free_irq(priv->irq_tx, dev); 2330 2331 out_freeirq_rx: 2332 free_irq(priv->irq_rx, dev); 2333 2334 out_freeirq: 2335 return ret; 2336 } 2337 2338 /* stop callback */ 2339 static int bcm_enetsw_stop(struct net_device *dev) 2340 { 2341 struct bcm_enet_priv *priv; 2342 struct device *kdev; 2343 2344 priv = netdev_priv(dev); 2345 kdev = &priv->pdev->dev; 2346 2347 timer_delete_sync(&priv->swphy_poll); 2348 netif_stop_queue(dev); 2349 napi_disable(&priv->napi); 2350 timer_delete_sync(&priv->rx_timeout); 2351 2352 /* mask all interrupts */ 2353 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan); 2354 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan); 2355 2356 /* disable dma & mac */ 2357 bcm_enet_disable_dma(priv, priv->tx_chan); 2358 bcm_enet_disable_dma(priv, priv->rx_chan); 2359 2360 /* force reclaim of all tx buffers */ 2361 bcm_enet_tx_reclaim(dev, 1, 0); 2362 2363 /* free the rx buffer ring */ 2364 bcm_enet_free_rx_buf_ring(kdev, priv); 2365 2366 /* free remaining allocated memory */ 2367 kfree(priv->tx_skb); 2368 dma_free_coherent(kdev, priv->rx_desc_alloc_size, 2369 priv->rx_desc_cpu, priv->rx_desc_dma); 2370 dma_free_coherent(kdev, priv->tx_desc_alloc_size, 2371 priv->tx_desc_cpu, priv->tx_desc_dma); 2372 if (priv->irq_tx != -1) 2373 free_irq(priv->irq_tx, dev); 2374 free_irq(priv->irq_rx, dev); 2375 2376 /* reset BQL after forced tx reclaim to prevent kernel panic */ 2377 netdev_reset_queue(dev); 2378 2379 return 0; 2380 } 2381 2382 /* try to sort out phy external status by walking the used_port field 2383 * in the bcm_enet_priv structure. in case the phy address is not 2384 * assigned to any physical port on the switch, assume it is external 2385 * (and yell at the user). 2386 */ 2387 static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id) 2388 { 2389 int i; 2390 2391 for (i = 0; i < priv->num_ports; ++i) { 2392 if (!priv->used_ports[i].used) 2393 continue; 2394 if (priv->used_ports[i].phy_id == phy_id) 2395 return bcm_enet_port_is_rgmii(i); 2396 } 2397 2398 printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n", 2399 phy_id); 2400 return 1; 2401 } 2402 2403 /* can't use bcmenet_sw_mdio_read directly as we need to sort out 2404 * external/internal status of the given phy_id first. 2405 */ 2406 static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id, 2407 int location) 2408 { 2409 struct bcm_enet_priv *priv; 2410 2411 priv = netdev_priv(dev); 2412 return bcmenet_sw_mdio_read(priv, 2413 bcm_enetsw_phy_is_external(priv, phy_id), 2414 phy_id, location); 2415 } 2416 2417 /* can't use bcmenet_sw_mdio_write directly as we need to sort out 2418 * external/internal status of the given phy_id first. 2419 */ 2420 static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id, 2421 int location, 2422 int val) 2423 { 2424 struct bcm_enet_priv *priv; 2425 2426 priv = netdev_priv(dev); 2427 bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id), 2428 phy_id, location, val); 2429 } 2430 2431 static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2432 { 2433 struct mii_if_info mii; 2434 2435 mii.dev = dev; 2436 mii.mdio_read = bcm_enetsw_mii_mdio_read; 2437 mii.mdio_write = bcm_enetsw_mii_mdio_write; 2438 mii.phy_id = 0; 2439 mii.phy_id_mask = 0x3f; 2440 mii.reg_num_mask = 0x1f; 2441 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); 2442 2443 } 2444 2445 static const struct net_device_ops bcm_enetsw_ops = { 2446 .ndo_open = bcm_enetsw_open, 2447 .ndo_stop = bcm_enetsw_stop, 2448 .ndo_start_xmit = bcm_enet_start_xmit, 2449 .ndo_change_mtu = bcm_enet_change_mtu, 2450 .ndo_eth_ioctl = bcm_enetsw_ioctl, 2451 }; 2452 2453 2454 static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = { 2455 { "rx_packets", DEV_STAT(rx_packets), -1 }, 2456 { "tx_packets", DEV_STAT(tx_packets), -1 }, 2457 { "rx_bytes", DEV_STAT(rx_bytes), -1 }, 2458 { "tx_bytes", DEV_STAT(tx_bytes), -1 }, 2459 { "rx_errors", DEV_STAT(rx_errors), -1 }, 2460 { "tx_errors", DEV_STAT(tx_errors), -1 }, 2461 { "rx_dropped", DEV_STAT(rx_dropped), -1 }, 2462 { "tx_dropped", DEV_STAT(tx_dropped), -1 }, 2463 2464 { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT }, 2465 { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST }, 2466 { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST }, 2467 { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT }, 2468 { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 }, 2469 { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 }, 2470 { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 }, 2471 { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 }, 2472 { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023}, 2473 { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max), 2474 ETHSW_MIB_RX_1024_1522 }, 2475 { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047), 2476 ETHSW_MIB_RX_1523_2047 }, 2477 { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095), 2478 ETHSW_MIB_RX_2048_4095 }, 2479 { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191), 2480 ETHSW_MIB_RX_4096_8191 }, 2481 { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728), 2482 ETHSW_MIB_RX_8192_9728 }, 2483 { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR }, 2484 { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC }, 2485 { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP }, 2486 { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND }, 2487 { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE }, 2488 2489 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT }, 2490 { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST }, 2491 { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT }, 2492 { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT }, 2493 { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE }, 2494 { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS }, 2495 2496 }; 2497 2498 #define BCM_ENETSW_STATS_LEN \ 2499 (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats)) 2500 2501 static void bcm_enetsw_get_strings(struct net_device *netdev, 2502 u32 stringset, u8 *data) 2503 { 2504 const char *str; 2505 int i; 2506 2507 switch (stringset) { 2508 case ETH_SS_STATS: 2509 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { 2510 str = bcm_enetsw_gstrings_stats[i].stat_string; 2511 ethtool_puts(&data, str); 2512 } 2513 break; 2514 } 2515 } 2516 2517 static int bcm_enetsw_get_sset_count(struct net_device *netdev, 2518 int string_set) 2519 { 2520 switch (string_set) { 2521 case ETH_SS_STATS: 2522 return BCM_ENETSW_STATS_LEN; 2523 default: 2524 return -EINVAL; 2525 } 2526 } 2527 2528 static void bcm_enetsw_get_drvinfo(struct net_device *netdev, 2529 struct ethtool_drvinfo *drvinfo) 2530 { 2531 strscpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver)); 2532 strscpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info)); 2533 } 2534 2535 static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev, 2536 struct ethtool_stats *stats, 2537 u64 *data) 2538 { 2539 struct bcm_enet_priv *priv; 2540 int i; 2541 2542 priv = netdev_priv(netdev); 2543 2544 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { 2545 const struct bcm_enet_stats *s; 2546 u32 lo, hi; 2547 char *p; 2548 int reg; 2549 2550 s = &bcm_enetsw_gstrings_stats[i]; 2551 2552 reg = s->mib_reg; 2553 if (reg == -1) 2554 continue; 2555 2556 lo = enetsw_readl(priv, ENETSW_MIB_REG(reg)); 2557 p = (char *)priv + s->stat_offset; 2558 2559 if (s->sizeof_stat == sizeof(u64)) { 2560 hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1)); 2561 *(u64 *)p = ((u64)hi << 32 | lo); 2562 } else { 2563 *(u32 *)p = lo; 2564 } 2565 } 2566 2567 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) { 2568 const struct bcm_enet_stats *s; 2569 char *p; 2570 2571 s = &bcm_enetsw_gstrings_stats[i]; 2572 2573 if (s->mib_reg == -1) 2574 p = (char *)&netdev->stats + s->stat_offset; 2575 else 2576 p = (char *)priv + s->stat_offset; 2577 2578 data[i] = (s->sizeof_stat == sizeof(u64)) ? 2579 *(u64 *)p : *(u32 *)p; 2580 } 2581 } 2582 2583 static void 2584 bcm_enetsw_get_ringparam(struct net_device *dev, 2585 struct ethtool_ringparam *ering, 2586 struct kernel_ethtool_ringparam *kernel_ering, 2587 struct netlink_ext_ack *extack) 2588 { 2589 struct bcm_enet_priv *priv; 2590 2591 priv = netdev_priv(dev); 2592 2593 /* rx/tx ring is actually only limited by memory */ 2594 ering->rx_max_pending = 8192; 2595 ering->tx_max_pending = 8192; 2596 ering->rx_mini_max_pending = 0; 2597 ering->rx_jumbo_max_pending = 0; 2598 ering->rx_pending = priv->rx_ring_size; 2599 ering->tx_pending = priv->tx_ring_size; 2600 } 2601 2602 static int 2603 bcm_enetsw_set_ringparam(struct net_device *dev, 2604 struct ethtool_ringparam *ering, 2605 struct kernel_ethtool_ringparam *kernel_ering, 2606 struct netlink_ext_ack *extack) 2607 { 2608 struct bcm_enet_priv *priv; 2609 int was_running; 2610 2611 priv = netdev_priv(dev); 2612 2613 was_running = 0; 2614 if (netif_running(dev)) { 2615 bcm_enetsw_stop(dev); 2616 was_running = 1; 2617 } 2618 2619 priv->rx_ring_size = ering->rx_pending; 2620 priv->tx_ring_size = ering->tx_pending; 2621 2622 if (was_running) { 2623 int err; 2624 2625 err = bcm_enetsw_open(dev); 2626 if (err) 2627 dev_close(dev); 2628 } 2629 return 0; 2630 } 2631 2632 static const struct ethtool_ops bcm_enetsw_ethtool_ops = { 2633 .get_strings = bcm_enetsw_get_strings, 2634 .get_sset_count = bcm_enetsw_get_sset_count, 2635 .get_ethtool_stats = bcm_enetsw_get_ethtool_stats, 2636 .get_drvinfo = bcm_enetsw_get_drvinfo, 2637 .get_ringparam = bcm_enetsw_get_ringparam, 2638 .set_ringparam = bcm_enetsw_set_ringparam, 2639 }; 2640 2641 /* allocate netdevice, request register memory and register device. */ 2642 static int bcm_enetsw_probe(struct platform_device *pdev) 2643 { 2644 struct bcm_enet_priv *priv; 2645 struct net_device *dev; 2646 struct bcm63xx_enetsw_platform_data *pd; 2647 struct resource *res_mem; 2648 int ret, irq_rx, irq_tx; 2649 2650 if (!bcm_enet_shared_base[0]) 2651 return -EPROBE_DEFER; 2652 2653 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2654 irq_rx = platform_get_irq(pdev, 0); 2655 irq_tx = platform_get_irq(pdev, 1); 2656 if (!res_mem || irq_rx < 0) 2657 return -ENODEV; 2658 2659 dev = alloc_etherdev(sizeof(*priv)); 2660 if (!dev) 2661 return -ENOMEM; 2662 priv = netdev_priv(dev); 2663 2664 /* initialize default and fetch platform data */ 2665 priv->enet_is_sw = true; 2666 priv->irq_rx = irq_rx; 2667 priv->irq_tx = irq_tx; 2668 priv->rx_ring_size = BCMENET_DEF_RX_DESC; 2669 priv->tx_ring_size = BCMENET_DEF_TX_DESC; 2670 priv->dma_maxburst = BCMENETSW_DMA_MAXBURST; 2671 priv->rx_buf_offset = NET_SKB_PAD + NET_IP_ALIGN; 2672 2673 pd = dev_get_platdata(&pdev->dev); 2674 if (pd) { 2675 eth_hw_addr_set(dev, pd->mac_addr); 2676 memcpy(priv->used_ports, pd->used_ports, 2677 sizeof(pd->used_ports)); 2678 priv->num_ports = pd->num_ports; 2679 priv->dma_has_sram = pd->dma_has_sram; 2680 priv->dma_chan_en_mask = pd->dma_chan_en_mask; 2681 priv->dma_chan_int_mask = pd->dma_chan_int_mask; 2682 priv->dma_chan_width = pd->dma_chan_width; 2683 } 2684 2685 ret = bcm_enet_change_mtu(dev, dev->mtu); 2686 if (ret) 2687 goto out; 2688 2689 priv->base = devm_ioremap_resource(&pdev->dev, res_mem); 2690 if (IS_ERR(priv->base)) { 2691 ret = PTR_ERR(priv->base); 2692 goto out; 2693 } 2694 2695 priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw"); 2696 if (IS_ERR(priv->mac_clk)) { 2697 ret = PTR_ERR(priv->mac_clk); 2698 goto out; 2699 } 2700 ret = clk_prepare_enable(priv->mac_clk); 2701 if (ret) 2702 goto out; 2703 2704 priv->rx_chan = 0; 2705 priv->tx_chan = 1; 2706 spin_lock_init(&priv->rx_lock); 2707 2708 /* init rx timeout (used for oom) */ 2709 timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0); 2710 2711 /* register netdevice */ 2712 dev->netdev_ops = &bcm_enetsw_ops; 2713 netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16); 2714 dev->ethtool_ops = &bcm_enetsw_ethtool_ops; 2715 SET_NETDEV_DEV(dev, &pdev->dev); 2716 2717 spin_lock_init(&priv->enetsw_mdio_lock); 2718 2719 ret = register_netdev(dev); 2720 if (ret) 2721 goto out_disable_clk; 2722 2723 netif_carrier_off(dev); 2724 platform_set_drvdata(pdev, dev); 2725 priv->pdev = pdev; 2726 priv->net_dev = dev; 2727 2728 return 0; 2729 2730 out_disable_clk: 2731 clk_disable_unprepare(priv->mac_clk); 2732 out: 2733 free_netdev(dev); 2734 return ret; 2735 } 2736 2737 2738 /* exit func, stops hardware and unregisters netdevice */ 2739 static void bcm_enetsw_remove(struct platform_device *pdev) 2740 { 2741 struct bcm_enet_priv *priv; 2742 struct net_device *dev; 2743 2744 /* stop netdevice */ 2745 dev = platform_get_drvdata(pdev); 2746 priv = netdev_priv(dev); 2747 unregister_netdev(dev); 2748 2749 clk_disable_unprepare(priv->mac_clk); 2750 2751 free_netdev(dev); 2752 } 2753 2754 static struct platform_driver bcm63xx_enetsw_driver = { 2755 .probe = bcm_enetsw_probe, 2756 .remove = bcm_enetsw_remove, 2757 .driver = { 2758 .name = "bcm63xx_enetsw", 2759 }, 2760 }; 2761 2762 /* reserve & remap memory space shared between all macs */ 2763 static int bcm_enet_shared_probe(struct platform_device *pdev) 2764 { 2765 void __iomem *p[3]; 2766 unsigned int i; 2767 2768 memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base)); 2769 2770 for (i = 0; i < 3; i++) { 2771 p[i] = devm_platform_ioremap_resource(pdev, i); 2772 if (IS_ERR(p[i])) 2773 return PTR_ERR(p[i]); 2774 } 2775 2776 memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base)); 2777 2778 return 0; 2779 } 2780 2781 /* this "shared" driver is needed because both macs share a single 2782 * address space 2783 */ 2784 struct platform_driver bcm63xx_enet_shared_driver = { 2785 .probe = bcm_enet_shared_probe, 2786 .driver = { 2787 .name = "bcm63xx_enet_shared", 2788 }, 2789 }; 2790 2791 static struct platform_driver * const drivers[] = { 2792 &bcm63xx_enet_shared_driver, 2793 &bcm63xx_enet_driver, 2794 &bcm63xx_enetsw_driver, 2795 }; 2796 2797 /* entry point */ 2798 static int __init bcm_enet_init(void) 2799 { 2800 return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 2801 } 2802 2803 static void __exit bcm_enet_exit(void) 2804 { 2805 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 2806 } 2807 2808 2809 module_init(bcm_enet_init); 2810 module_exit(bcm_enet_exit); 2811 2812 MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver"); 2813 MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); 2814 MODULE_LICENSE("GPL"); 2815