Lines Matching +full:revision +full:- +full:id2

1 // SPDX-License-Identifier: GPL-2.0-or-later
6 * engineered documentation written by Carl-Daniel Hailfinger
15 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
50 #include <linux/dma-mapping.h>
73 #define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */
483 * - DESC_VER_1: Original
484 * - DESC_VER_2: support for jumbo frames.
485 * - DESC_VER_3: 64-bit format.
568 /* MSI/MSI-X defines */
638 u64 tx_bytes; /* should be ifconfig->tx_bytes + 4*tx_packets */
659 u64 rx_packets; /* should be ifconfig->rx_packets */
665 u64 tx_packets; /* should be ifconfig->tx_packets */
666 u64 rx_bytes; /* should be ifconfig->rx_bytes + 4*rx_packets */
678 #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
679 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
727 __this_cpu_inc(np->txrx_stats->member)
729 __this_cpu_add(np->txrx_stats->member, (count))
733 * All hardware access under netdev_priv(dev)->lock, except the performance
735 * - rx is (pseudo-) lockless: it relies on the single-threading provided
737 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
738 * needs netdev_priv(dev)->lock :-(
739 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
742 * - updated by nv_do_stats_poll (timer). This is meant to avoid
745 * - updated by nv_get_ethtool_stats + nv_get_stats64
797 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
818 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
842 /* msi/msi-x fields */
852 /* for different msi-x irq type */
853 char name_rx[IFNAMSIZ + 3]; /* -rx */
854 char name_tx[IFNAMSIZ + 3]; /* -tx */
855 char name_other[IFNAMSIZ + 6]; /* -other */
884 static int poll_interval = -1;
941 return ((struct fe_priv *)netdev_priv(dev))->base; in get_hwbase()
952 return le32_to_cpu(prd->flaglen) in nv_descr_getlength()
958 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; in nv_descr_getlength_ex()
963 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) in nv_optimized()
976 delaymax -= delay; in reg_delay()
1003 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); in setup_hw_rings()
1005 …writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysA… in setup_hw_rings()
1008 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); in setup_hw_rings()
1009 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh); in setup_hw_rings()
1012 …writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPh… in setup_hw_rings()
1013 …writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingP… in setup_hw_rings()
1023 if (np->rx_ring.orig) in free_rings()
1024 dma_free_coherent(&np->pci_dev->dev, in free_rings()
1026 (np->rx_ring_size + in free_rings()
1027 np->tx_ring_size), in free_rings()
1028 np->rx_ring.orig, np->ring_addr); in free_rings()
1030 if (np->rx_ring.ex) in free_rings()
1031 dma_free_coherent(&np->pci_dev->dev, in free_rings()
1033 (np->rx_ring_size + in free_rings()
1034 np->tx_ring_size), in free_rings()
1035 np->rx_ring.ex, np->ring_addr); in free_rings()
1037 kfree(np->rx_skb); in free_rings()
1038 kfree(np->tx_skb); in free_rings()
1045 if (!(np->msi_flags & NV_MSI_X_ENABLED) || in using_multi_irqs()
1046 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)) in using_multi_irqs()
1058 if (!np->mac_in_use && in nv_txrx_gate()
1059 (np->driver_data & DEV_HAS_POWER_CNTRL)) { in nv_txrx_gate()
1074 if (np->msi_flags & NV_MSI_X_ENABLED) in nv_enable_irq()
1075 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); in nv_enable_irq()
1077 enable_irq(np->pci_dev->irq); in nv_enable_irq()
1079 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); in nv_enable_irq()
1080 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); in nv_enable_irq()
1081 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); in nv_enable_irq()
1090 if (np->msi_flags & NV_MSI_X_ENABLED) in nv_disable_irq()
1091 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); in nv_disable_irq()
1093 disable_irq(np->pci_dev->irq); in nv_disable_irq()
1095 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); in nv_disable_irq()
1096 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); in nv_disable_irq()
1097 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); in nv_disable_irq()
1114 if (np->msi_flags & NV_MSI_X_ENABLED) { in nv_disable_hw_interrupts()
1117 if (np->msi_flags & NV_MSI_ENABLED) in nv_disable_hw_interrupts()
1123 #define MII_READ (-1)
1151 retval = -1; in mii_rw()
1153 /* it was a write operation - fewer failures are detectable */ in mii_rw()
1156 retval = -1; in mii_rw()
1171 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) in phy_reset()
1172 return -1; in phy_reset()
1180 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in phy_reset()
1183 return -1; in phy_reset()
1205 if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init)) in init_realtek_8211b()
1227 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); in init_realtek_8211c()
1229 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) in init_realtek_8211c()
1231 if (mii_rw(dev, np->phyaddr, in init_realtek_8211c()
1234 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ); in init_realtek_8211c()
1237 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) in init_realtek_8211c()
1240 if (mii_rw(dev, np->phyaddr, in init_realtek_8211c()
1251 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) { in init_realtek_8201()
1252 phy_reserved = mii_rw(dev, np->phyaddr, in init_realtek_8201()
1255 if (mii_rw(dev, np->phyaddr, in init_realtek_8201()
1268 if (mii_rw(dev, np->phyaddr, in init_realtek_8201_cross()
1271 phy_reserved = mii_rw(dev, np->phyaddr, in init_realtek_8201_cross()
1275 if (mii_rw(dev, np->phyaddr, in init_realtek_8201_cross()
1278 if (mii_rw(dev, np->phyaddr, in init_realtek_8201_cross()
1292 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); in init_cicada()
1295 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) in init_cicada()
1297 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); in init_cicada()
1299 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) in init_cicada()
1302 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); in init_cicada()
1304 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) in init_cicada()
1314 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1317 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1320 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1322 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) in init_vitesse()
1324 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1328 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) in init_vitesse()
1330 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1333 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1336 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1340 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) in init_vitesse()
1342 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1344 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) in init_vitesse()
1346 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1349 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1352 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1354 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) in init_vitesse()
1356 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1360 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) in init_vitesse()
1362 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1365 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1380 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { in phy_init()
1381 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); in phy_init()
1383 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { in phy_init()
1385 pci_name(np->pci_dev)); in phy_init()
1389 if (np->phy_oui == PHY_OUI_REALTEK) { in phy_init()
1390 if (np->phy_model == PHY_MODEL_REALTEK_8211 && in phy_init()
1391 np->phy_rev == PHY_REV_REALTEK_8211B) { in phy_init()
1394 pci_name(np->pci_dev)); in phy_init()
1397 } else if (np->phy_model == PHY_MODEL_REALTEK_8211 && in phy_init()
1398 np->phy_rev == PHY_REV_REALTEK_8211C) { in phy_init()
1401 pci_name(np->pci_dev)); in phy_init()
1404 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) { in phy_init()
1407 pci_name(np->pci_dev)); in phy_init()
1414 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in phy_init()
1418 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { in phy_init()
1420 pci_name(np->pci_dev)); in phy_init()
1428 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in phy_init()
1430 np->gigabit = PHY_GIGABIT; in phy_init()
1431 mii_control_1000 = mii_rw(dev, np->phyaddr, in phy_init()
1439 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { in phy_init()
1441 pci_name(np->pci_dev)); in phy_init()
1445 np->gigabit = 0; in phy_init()
1447 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in phy_init()
1450 if (np->phy_oui == PHY_OUI_REALTEK && in phy_init()
1451 np->phy_model == PHY_MODEL_REALTEK_8211 && in phy_init()
1452 np->phy_rev == PHY_REV_REALTEK_8211C) { in phy_init()
1455 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { in phy_init()
1457 pci_name(np->pci_dev)); in phy_init()
1466 pci_name(np->pci_dev)); in phy_init()
1472 if (np->phy_oui == PHY_OUI_CICADA) { in phy_init()
1475 pci_name(np->pci_dev)); in phy_init()
1478 } else if (np->phy_oui == PHY_OUI_VITESSE) { in phy_init()
1481 pci_name(np->pci_dev)); in phy_init()
1484 } else if (np->phy_oui == PHY_OUI_REALTEK) { in phy_init()
1485 if (np->phy_model == PHY_MODEL_REALTEK_8211 && in phy_init()
1486 np->phy_rev == PHY_REV_REALTEK_8211B) { in phy_init()
1490 pci_name(np->pci_dev)); in phy_init()
1493 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) { in phy_init()
1497 pci_name(np->pci_dev)); in phy_init()
1504 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); in phy_init()
1507 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in phy_init()
1511 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) in phy_init()
1524 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { in nv_start_rx()
1529 writel(np->linkspeed, base + NvRegLinkSpeed); in nv_start_rx()
1532 if (np->mac_in_use) in nv_start_rx()
1544 if (!np->mac_in_use) in nv_stop_rx()
1555 if (!np->mac_in_use) in nv_stop_rx()
1566 if (np->mac_in_use) in nv_start_tx()
1578 if (!np->mac_in_use) in nv_stop_tx()
1589 if (!np->mac_in_use) in nv_stop_tx()
1611 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); in nv_txrx_reset()
1614 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); in nv_txrx_reset()
1624 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); in nv_mac_reset()
1644 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); in nv_mac_reset()
1648 /* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */
1654 lockdep_assert_held(&np->hwstats_lock); in nv_update_stats()
1657 np->estats.tx_bytes += readl(base + NvRegTxCnt); in nv_update_stats()
1658 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); in nv_update_stats()
1659 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); in nv_update_stats()
1660 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); in nv_update_stats()
1661 np->estats.tx_late_collision += readl(base + NvRegTxLateCol); in nv_update_stats()
1662 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); in nv_update_stats()
1663 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); in nv_update_stats()
1664 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); in nv_update_stats()
1665 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); in nv_update_stats()
1666 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); in nv_update_stats()
1667 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); in nv_update_stats()
1668 np->estats.rx_late_collision += readl(base + NvRegRxLateCol); in nv_update_stats()
1669 np->estats.rx_runt += readl(base + NvRegRxRunt); in nv_update_stats()
1670 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); in nv_update_stats()
1671 np->estats.rx_over_errors += readl(base + NvRegRxOverflow); in nv_update_stats()
1672 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); in nv_update_stats()
1673 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); in nv_update_stats()
1674 np->estats.rx_length_error += readl(base + NvRegRxLenErr); in nv_update_stats()
1675 np->estats.rx_unicast += readl(base + NvRegRxUnicast); in nv_update_stats()
1676 np->estats.rx_multicast += readl(base + NvRegRxMulticast); in nv_update_stats()
1677 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); in nv_update_stats()
1678 np->estats.rx_packets = in nv_update_stats()
1679 np->estats.rx_unicast + in nv_update_stats()
1680 np->estats.rx_multicast + in nv_update_stats()
1681 np->estats.rx_broadcast; in nv_update_stats()
1682 np->estats.rx_errors_total = in nv_update_stats()
1683 np->estats.rx_crc_errors + in nv_update_stats()
1684 np->estats.rx_over_errors + in nv_update_stats()
1685 np->estats.rx_frame_error + in nv_update_stats()
1686 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + in nv_update_stats()
1687 np->estats.rx_late_collision + in nv_update_stats()
1688 np->estats.rx_runt + in nv_update_stats()
1689 np->estats.rx_frame_too_long; in nv_update_stats()
1690 np->estats.tx_errors_total = in nv_update_stats()
1691 np->estats.tx_late_collision + in nv_update_stats()
1692 np->estats.tx_fifo_errors + in nv_update_stats()
1693 np->estats.tx_carrier_errors + in nv_update_stats()
1694 np->estats.tx_excess_deferral + in nv_update_stats()
1695 np->estats.tx_retry_error; in nv_update_stats()
1697 if (np->driver_data & DEV_HAS_STATISTICS_V2) { in nv_update_stats()
1698 np->estats.tx_deferral += readl(base + NvRegTxDef); in nv_update_stats()
1699 np->estats.tx_packets += readl(base + NvRegTxFrame); in nv_update_stats()
1700 np->estats.rx_bytes += readl(base + NvRegRxCnt); in nv_update_stats()
1701 np->estats.tx_pause += readl(base + NvRegTxPause); in nv_update_stats()
1702 np->estats.rx_pause += readl(base + NvRegRxPause); in nv_update_stats()
1703 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); in nv_update_stats()
1704 np->estats.rx_errors_total += np->estats.rx_drop_frame; in nv_update_stats()
1707 if (np->driver_data & DEV_HAS_STATISTICS_V3) { in nv_update_stats()
1708 np->estats.tx_unicast += readl(base + NvRegTxUnicast); in nv_update_stats()
1709 np->estats.tx_multicast += readl(base + NvRegTxMulticast); in nv_update_stats()
1710 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast); in nv_update_stats()
1717 struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu); in nv_get_stats()
1723 syncp_start = u64_stats_fetch_begin(&np->swstats_rx_syncp); in nv_get_stats()
1724 rx_packets = src->stat_rx_packets; in nv_get_stats()
1725 rx_bytes = src->stat_rx_bytes; in nv_get_stats()
1726 rx_dropped = src->stat_rx_dropped; in nv_get_stats()
1727 rx_missed_errors = src->stat_rx_missed_errors; in nv_get_stats()
1728 } while (u64_stats_fetch_retry(&np->swstats_rx_syncp, syncp_start)); in nv_get_stats()
1730 storage->rx_packets += rx_packets; in nv_get_stats()
1731 storage->rx_bytes += rx_bytes; in nv_get_stats()
1732 storage->rx_dropped += rx_dropped; in nv_get_stats()
1733 storage->rx_missed_errors += rx_missed_errors; in nv_get_stats()
1736 syncp_start = u64_stats_fetch_begin(&np->swstats_tx_syncp); in nv_get_stats()
1737 tx_packets = src->stat_tx_packets; in nv_get_stats()
1738 tx_bytes = src->stat_tx_bytes; in nv_get_stats()
1739 tx_dropped = src->stat_tx_dropped; in nv_get_stats()
1740 } while (u64_stats_fetch_retry(&np->swstats_tx_syncp, syncp_start)); in nv_get_stats()
1742 storage->tx_packets += tx_packets; in nv_get_stats()
1743 storage->tx_bytes += tx_bytes; in nv_get_stats()
1744 storage->tx_dropped += tx_dropped; in nv_get_stats()
1748 * nv_get_stats64: dev->ndo_get_stats64 function
1750 * Called with rcu_read_lock() held -
1755 __acquires(&netdev_priv(dev)->hwstats_lock) in nv_get_stats64()
1756 __releases(&netdev_priv(dev)->hwstats_lock) in nv_get_stats64()
1776 if (np->driver_data & DEV_HAS_STATISTICS_V123) { in nv_get_stats64()
1777 spin_lock_bh(&np->hwstats_lock); in nv_get_stats64()
1782 storage->rx_errors = np->estats.rx_errors_total; in nv_get_stats64()
1783 storage->tx_errors = np->estats.tx_errors_total; in nv_get_stats64()
1786 storage->multicast = np->estats.rx_multicast; in nv_get_stats64()
1789 storage->rx_length_errors = np->estats.rx_length_error; in nv_get_stats64()
1790 storage->rx_over_errors = np->estats.rx_over_errors; in nv_get_stats64()
1791 storage->rx_crc_errors = np->estats.rx_crc_errors; in nv_get_stats64()
1792 storage->rx_frame_errors = np->estats.rx_frame_align_error; in nv_get_stats64()
1793 storage->rx_fifo_errors = np->estats.rx_drop_frame; in nv_get_stats64()
1796 storage->tx_carrier_errors = np->estats.tx_carrier_errors; in nv_get_stats64()
1797 storage->tx_fifo_errors = np->estats.tx_fifo_errors; in nv_get_stats64()
1799 spin_unlock_bh(&np->hwstats_lock); in nv_get_stats64()
1813 less_rx = np->get_rx.orig; in nv_alloc_rx()
1814 if (less_rx-- == np->rx_ring.orig) in nv_alloc_rx()
1815 less_rx = np->last_rx.orig; in nv_alloc_rx()
1817 while (np->put_rx.orig != less_rx) { in nv_alloc_rx()
1818 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); in nv_alloc_rx()
1820 np->put_rx_ctx->skb = skb; in nv_alloc_rx()
1821 np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, in nv_alloc_rx()
1822 skb->data, in nv_alloc_rx()
1825 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_alloc_rx()
1826 np->put_rx_ctx->dma))) { in nv_alloc_rx()
1830 np->put_rx_ctx->dma_len = skb_tailroom(skb); in nv_alloc_rx()
1831 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); in nv_alloc_rx()
1833 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); in nv_alloc_rx()
1834 if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) in nv_alloc_rx()
1835 np->put_rx.orig = np->rx_ring.orig; in nv_alloc_rx()
1836 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) in nv_alloc_rx()
1837 np->put_rx_ctx = np->rx_skb; in nv_alloc_rx()
1840 u64_stats_update_begin(&np->swstats_rx_syncp); in nv_alloc_rx()
1842 u64_stats_update_end(&np->swstats_rx_syncp); in nv_alloc_rx()
1854 less_rx = np->get_rx.ex; in nv_alloc_rx_optimized()
1855 if (less_rx-- == np->rx_ring.ex) in nv_alloc_rx_optimized()
1856 less_rx = np->last_rx.ex; in nv_alloc_rx_optimized()
1858 while (np->put_rx.ex != less_rx) { in nv_alloc_rx_optimized()
1859 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); in nv_alloc_rx_optimized()
1861 np->put_rx_ctx->skb = skb; in nv_alloc_rx_optimized()
1862 np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, in nv_alloc_rx_optimized()
1863 skb->data, in nv_alloc_rx_optimized()
1866 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_alloc_rx_optimized()
1867 np->put_rx_ctx->dma))) { in nv_alloc_rx_optimized()
1871 np->put_rx_ctx->dma_len = skb_tailroom(skb); in nv_alloc_rx_optimized()
1872 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); in nv_alloc_rx_optimized()
1873 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); in nv_alloc_rx_optimized()
1875 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); in nv_alloc_rx_optimized()
1876 if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) in nv_alloc_rx_optimized()
1877 np->put_rx.ex = np->rx_ring.ex; in nv_alloc_rx_optimized()
1878 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) in nv_alloc_rx_optimized()
1879 np->put_rx_ctx = np->rx_skb; in nv_alloc_rx_optimized()
1882 u64_stats_update_begin(&np->swstats_rx_syncp); in nv_alloc_rx_optimized()
1884 u64_stats_update_end(&np->swstats_rx_syncp); in nv_alloc_rx_optimized()
1897 napi_schedule(&np->napi); in nv_do_rx_refill()
1905 np->get_rx = np->rx_ring; in nv_init_rx()
1906 np->put_rx = np->rx_ring; in nv_init_rx()
1909 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; in nv_init_rx()
1911 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; in nv_init_rx()
1912 np->get_rx_ctx = np->rx_skb; in nv_init_rx()
1913 np->put_rx_ctx = np->rx_skb; in nv_init_rx()
1914 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; in nv_init_rx()
1916 for (i = 0; i < np->rx_ring_size; i++) { in nv_init_rx()
1918 np->rx_ring.orig[i].flaglen = 0; in nv_init_rx()
1919 np->rx_ring.orig[i].buf = 0; in nv_init_rx()
1921 np->rx_ring.ex[i].flaglen = 0; in nv_init_rx()
1922 np->rx_ring.ex[i].txvlan = 0; in nv_init_rx()
1923 np->rx_ring.ex[i].bufhigh = 0; in nv_init_rx()
1924 np->rx_ring.ex[i].buflow = 0; in nv_init_rx()
1926 np->rx_skb[i].skb = NULL; in nv_init_rx()
1927 np->rx_skb[i].dma = 0; in nv_init_rx()
1936 np->get_tx = np->tx_ring; in nv_init_tx()
1937 np->put_tx = np->tx_ring; in nv_init_tx()
1940 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; in nv_init_tx()
1942 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; in nv_init_tx()
1943 np->get_tx_ctx = np->tx_skb; in nv_init_tx()
1944 np->put_tx_ctx = np->tx_skb; in nv_init_tx()
1945 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; in nv_init_tx()
1946 netdev_reset_queue(np->dev); in nv_init_tx()
1947 np->tx_pkts_in_progress = 0; in nv_init_tx()
1948 np->tx_change_owner = NULL; in nv_init_tx()
1949 np->tx_end_flip = NULL; in nv_init_tx()
1950 np->tx_stop = 0; in nv_init_tx()
1952 for (i = 0; i < np->tx_ring_size; i++) { in nv_init_tx()
1954 np->tx_ring.orig[i].flaglen = 0; in nv_init_tx()
1955 np->tx_ring.orig[i].buf = 0; in nv_init_tx()
1957 np->tx_ring.ex[i].flaglen = 0; in nv_init_tx()
1958 np->tx_ring.ex[i].txvlan = 0; in nv_init_tx()
1959 np->tx_ring.ex[i].bufhigh = 0; in nv_init_tx()
1960 np->tx_ring.ex[i].buflow = 0; in nv_init_tx()
1962 np->tx_skb[i].skb = NULL; in nv_init_tx()
1963 np->tx_skb[i].dma = 0; in nv_init_tx()
1964 np->tx_skb[i].dma_len = 0; in nv_init_tx()
1965 np->tx_skb[i].dma_single = 0; in nv_init_tx()
1966 np->tx_skb[i].first_tx_desc = NULL; in nv_init_tx()
1967 np->tx_skb[i].next_tx_ctx = NULL; in nv_init_tx()
1986 if (tx_skb->dma) { in nv_unmap_txskb()
1987 if (tx_skb->dma_single) in nv_unmap_txskb()
1988 dma_unmap_single(&np->pci_dev->dev, tx_skb->dma, in nv_unmap_txskb()
1989 tx_skb->dma_len, in nv_unmap_txskb()
1992 dma_unmap_page(&np->pci_dev->dev, tx_skb->dma, in nv_unmap_txskb()
1993 tx_skb->dma_len, in nv_unmap_txskb()
1995 tx_skb->dma = 0; in nv_unmap_txskb()
2002 if (tx_skb->skb) { in nv_release_txskb()
2003 dev_kfree_skb_any(tx_skb->skb); in nv_release_txskb()
2004 tx_skb->skb = NULL; in nv_release_txskb()
2015 for (i = 0; i < np->tx_ring_size; i++) { in nv_drain_tx()
2017 np->tx_ring.orig[i].flaglen = 0; in nv_drain_tx()
2018 np->tx_ring.orig[i].buf = 0; in nv_drain_tx()
2020 np->tx_ring.ex[i].flaglen = 0; in nv_drain_tx()
2021 np->tx_ring.ex[i].txvlan = 0; in nv_drain_tx()
2022 np->tx_ring.ex[i].bufhigh = 0; in nv_drain_tx()
2023 np->tx_ring.ex[i].buflow = 0; in nv_drain_tx()
2025 if (nv_release_txskb(np, &np->tx_skb[i])) { in nv_drain_tx()
2026 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_drain_tx()
2028 u64_stats_update_end(&np->swstats_tx_syncp); in nv_drain_tx()
2030 np->tx_skb[i].dma = 0; in nv_drain_tx()
2031 np->tx_skb[i].dma_len = 0; in nv_drain_tx()
2032 np->tx_skb[i].dma_single = 0; in nv_drain_tx()
2033 np->tx_skb[i].first_tx_desc = NULL; in nv_drain_tx()
2034 np->tx_skb[i].next_tx_ctx = NULL; in nv_drain_tx()
2036 np->tx_pkts_in_progress = 0; in nv_drain_tx()
2037 np->tx_change_owner = NULL; in nv_drain_tx()
2038 np->tx_end_flip = NULL; in nv_drain_tx()
2046 for (i = 0; i < np->rx_ring_size; i++) { in nv_drain_rx()
2048 np->rx_ring.orig[i].flaglen = 0; in nv_drain_rx()
2049 np->rx_ring.orig[i].buf = 0; in nv_drain_rx()
2051 np->rx_ring.ex[i].flaglen = 0; in nv_drain_rx()
2052 np->rx_ring.ex[i].txvlan = 0; in nv_drain_rx()
2053 np->rx_ring.ex[i].bufhigh = 0; in nv_drain_rx()
2054 np->rx_ring.ex[i].buflow = 0; in nv_drain_rx()
2057 if (np->rx_skb[i].skb) { in nv_drain_rx()
2058 dma_unmap_single(&np->pci_dev->dev, np->rx_skb[i].dma, in nv_drain_rx()
2059 (skb_end_pointer(np->rx_skb[i].skb) - in nv_drain_rx()
2060 np->rx_skb[i].skb->data), in nv_drain_rx()
2062 dev_kfree_skb(np->rx_skb[i].skb); in nv_drain_rx()
2063 np->rx_skb[i].skb = NULL; in nv_drain_rx()
2076 …return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_r… in nv_get_empty_tx_slots()
2091 * Caller has already gained np->lock. in nv_legacybackoff_reseed()
2181 temp |= main_seedset[seedset][i-1] & 0x3ff; in nv_gear_backoff_reseed()
2182 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR); in nv_gear_backoff_reseed()
2188 * nv_start_xmit: dev->hard_start_xmit function
2195 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); in nv_start_xmit()
2196 unsigned int fragments = skb_shinfo(skb)->nr_frags; in nv_start_xmit()
2201 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); in nv_start_xmit()
2213 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in nv_start_xmit()
2216 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); in nv_start_xmit()
2219 spin_lock_irqsave(&np->lock, flags); in nv_start_xmit()
2223 np->tx_stop = 1; in nv_start_xmit()
2224 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit()
2232 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit()
2234 start_tx = put_tx = np->put_tx.orig; in nv_start_xmit()
2239 np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, in nv_start_xmit()
2240 skb->data + offset, bcnt, in nv_start_xmit()
2242 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_start_xmit()
2243 np->put_tx_ctx->dma))) { in nv_start_xmit()
2244 /* on DMA mapping error - drop the packet */ in nv_start_xmit()
2246 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_start_xmit()
2248 u64_stats_update_end(&np->swstats_tx_syncp); in nv_start_xmit()
2254 np->put_tx_ctx->dma_len = bcnt; in nv_start_xmit()
2255 np->put_tx_ctx->dma_single = 1; in nv_start_xmit()
2256 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); in nv_start_xmit()
2257 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); in nv_start_xmit()
2259 tx_flags = np->tx_flags; in nv_start_xmit()
2261 size -= bcnt; in nv_start_xmit()
2262 if (unlikely(put_tx++ == np->last_tx.orig)) in nv_start_xmit()
2263 put_tx = np->tx_ring.orig; in nv_start_xmit()
2264 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit()
2265 np->put_tx_ctx = np->tx_skb; in nv_start_xmit()
2270 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in nv_start_xmit()
2276 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; in nv_start_xmit()
2279 np->put_tx_ctx->dma = skb_frag_dma_map( in nv_start_xmit()
2280 &np->pci_dev->dev, in nv_start_xmit()
2284 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_start_xmit()
2285 np->put_tx_ctx->dma))) { in nv_start_xmit()
2290 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit()
2291 tmp_tx_ctx = np->tx_skb; in nv_start_xmit()
2292 } while (tmp_tx_ctx != np->put_tx_ctx); in nv_start_xmit()
2294 np->put_tx_ctx = start_tx_ctx; in nv_start_xmit()
2295 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_start_xmit()
2297 u64_stats_update_end(&np->swstats_tx_syncp); in nv_start_xmit()
2304 np->put_tx_ctx->dma_len = bcnt; in nv_start_xmit()
2305 np->put_tx_ctx->dma_single = 0; in nv_start_xmit()
2306 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); in nv_start_xmit()
2307 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); in nv_start_xmit()
2310 frag_size -= bcnt; in nv_start_xmit()
2311 if (unlikely(put_tx++ == np->last_tx.orig)) in nv_start_xmit()
2312 put_tx = np->tx_ring.orig; in nv_start_xmit()
2313 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit()
2314 np->put_tx_ctx = np->tx_skb; in nv_start_xmit()
2318 if (unlikely(put_tx == np->tx_ring.orig)) in nv_start_xmit()
2319 prev_tx = np->last_tx.orig; in nv_start_xmit()
2321 prev_tx = put_tx - 1; in nv_start_xmit()
2323 if (unlikely(np->put_tx_ctx == np->tx_skb)) in nv_start_xmit()
2324 prev_tx_ctx = np->last_tx_ctx; in nv_start_xmit()
2326 prev_tx_ctx = np->put_tx_ctx - 1; in nv_start_xmit()
2329 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); in nv_start_xmit()
2332 prev_tx_ctx->skb = skb; in nv_start_xmit()
2335 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); in nv_start_xmit()
2337 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? in nv_start_xmit()
2340 spin_lock_irqsave(&np->lock, flags); in nv_start_xmit()
2343 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); in nv_start_xmit()
2345 netdev_sent_queue(np->dev, skb->len); in nv_start_xmit()
2349 np->put_tx.orig = put_tx; in nv_start_xmit()
2351 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit()
2357 txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits; in nv_start_xmit()
2370 unsigned int fragments = skb_shinfo(skb)->nr_frags; in nv_start_xmit_optimized()
2375 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); in nv_start_xmit_optimized()
2388 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in nv_start_xmit_optimized()
2391 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); in nv_start_xmit_optimized()
2394 spin_lock_irqsave(&np->lock, flags); in nv_start_xmit_optimized()
2398 np->tx_stop = 1; in nv_start_xmit_optimized()
2399 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit_optimized()
2408 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit_optimized()
2410 start_tx = put_tx = np->put_tx.ex; in nv_start_xmit_optimized()
2411 start_tx_ctx = np->put_tx_ctx; in nv_start_xmit_optimized()
2416 np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, in nv_start_xmit_optimized()
2417 skb->data + offset, bcnt, in nv_start_xmit_optimized()
2419 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_start_xmit_optimized()
2420 np->put_tx_ctx->dma))) { in nv_start_xmit_optimized()
2421 /* on DMA mapping error - drop the packet */ in nv_start_xmit_optimized()
2423 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_start_xmit_optimized()
2425 u64_stats_update_end(&np->swstats_tx_syncp); in nv_start_xmit_optimized()
2431 np->put_tx_ctx->dma_len = bcnt; in nv_start_xmit_optimized()
2432 np->put_tx_ctx->dma_single = 1; in nv_start_xmit_optimized()
2433 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); in nv_start_xmit_optimized()
2434 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); in nv_start_xmit_optimized()
2435 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); in nv_start_xmit_optimized()
2439 size -= bcnt; in nv_start_xmit_optimized()
2440 if (unlikely(put_tx++ == np->last_tx.ex)) in nv_start_xmit_optimized()
2441 put_tx = np->tx_ring.ex; in nv_start_xmit_optimized()
2442 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit_optimized()
2443 np->put_tx_ctx = np->tx_skb; in nv_start_xmit_optimized()
2448 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in nv_start_xmit_optimized()
2455 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; in nv_start_xmit_optimized()
2456 np->put_tx_ctx->dma = skb_frag_dma_map( in nv_start_xmit_optimized()
2457 &np->pci_dev->dev, in nv_start_xmit_optimized()
2462 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_start_xmit_optimized()
2463 np->put_tx_ctx->dma))) { in nv_start_xmit_optimized()
2468 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit_optimized()
2469 tmp_tx_ctx = np->tx_skb; in nv_start_xmit_optimized()
2470 } while (tmp_tx_ctx != np->put_tx_ctx); in nv_start_xmit_optimized()
2472 np->put_tx_ctx = start_tx_ctx; in nv_start_xmit_optimized()
2473 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_start_xmit_optimized()
2475 u64_stats_update_end(&np->swstats_tx_syncp); in nv_start_xmit_optimized()
2481 np->put_tx_ctx->dma_len = bcnt; in nv_start_xmit_optimized()
2482 np->put_tx_ctx->dma_single = 0; in nv_start_xmit_optimized()
2483 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); in nv_start_xmit_optimized()
2484 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); in nv_start_xmit_optimized()
2485 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); in nv_start_xmit_optimized()
2488 frag_size -= bcnt; in nv_start_xmit_optimized()
2489 if (unlikely(put_tx++ == np->last_tx.ex)) in nv_start_xmit_optimized()
2490 put_tx = np->tx_ring.ex; in nv_start_xmit_optimized()
2491 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit_optimized()
2492 np->put_tx_ctx = np->tx_skb; in nv_start_xmit_optimized()
2496 if (unlikely(put_tx == np->tx_ring.ex)) in nv_start_xmit_optimized()
2497 prev_tx = np->last_tx.ex; in nv_start_xmit_optimized()
2499 prev_tx = put_tx - 1; in nv_start_xmit_optimized()
2501 if (unlikely(np->put_tx_ctx == np->tx_skb)) in nv_start_xmit_optimized()
2502 prev_tx_ctx = np->last_tx_ctx; in nv_start_xmit_optimized()
2504 prev_tx_ctx = np->put_tx_ctx - 1; in nv_start_xmit_optimized()
2507 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); in nv_start_xmit_optimized()
2510 prev_tx_ctx->skb = skb; in nv_start_xmit_optimized()
2513 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); in nv_start_xmit_optimized()
2515 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? in nv_start_xmit_optimized()
2520 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | in nv_start_xmit_optimized()
2523 start_tx->txvlan = 0; in nv_start_xmit_optimized()
2525 spin_lock_irqsave(&np->lock, flags); in nv_start_xmit_optimized()
2527 if (np->tx_limit) { in nv_start_xmit_optimized()
2533 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) { in nv_start_xmit_optimized()
2534 if (!np->tx_change_owner) in nv_start_xmit_optimized()
2535 np->tx_change_owner = start_tx_ctx; in nv_start_xmit_optimized()
2539 start_tx_ctx->first_tx_desc = start_tx; in nv_start_xmit_optimized()
2540 start_tx_ctx->next_tx_ctx = np->put_tx_ctx; in nv_start_xmit_optimized()
2541 np->tx_end_flip = np->put_tx_ctx; in nv_start_xmit_optimized()
2543 np->tx_pkts_in_progress++; in nv_start_xmit_optimized()
2548 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); in nv_start_xmit_optimized()
2550 netdev_sent_queue(np->dev, skb->len); in nv_start_xmit_optimized()
2554 np->put_tx.ex = put_tx; in nv_start_xmit_optimized()
2556 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit_optimized()
2562 txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits; in nv_start_xmit_optimized()
2573 np->tx_pkts_in_progress--; in nv_tx_flip_ownership()
2574 if (np->tx_change_owner) { in nv_tx_flip_ownership()
2575 np->tx_change_owner->first_tx_desc->flaglen |= in nv_tx_flip_ownership()
2577 np->tx_pkts_in_progress++; in nv_tx_flip_ownership()
2579 np->tx_change_owner = np->tx_change_owner->next_tx_ctx; in nv_tx_flip_ownership()
2580 if (np->tx_change_owner == np->tx_end_flip) in nv_tx_flip_ownership()
2581 np->tx_change_owner = NULL; in nv_tx_flip_ownership()
2583 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_tx_flip_ownership()
2590 * Caller must own np->lock.
2597 struct ring_desc *orig_get_tx = np->get_tx.orig; in nv_tx_done()
2600 while ((np->get_tx.orig != np->put_tx.orig) && in nv_tx_done()
2601 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && in nv_tx_done()
2604 nv_unmap_txskb(np, np->get_tx_ctx); in nv_tx_done()
2606 if (np->desc_ver == DESC_VER_1) { in nv_tx_done()
2615 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_tx_done()
2617 len = np->get_tx_ctx->skb->len; in nv_tx_done()
2619 u64_stats_update_end(&np->swstats_tx_syncp); in nv_tx_done()
2621 bytes_compl += np->get_tx_ctx->skb->len; in nv_tx_done()
2622 dev_kfree_skb_any(np->get_tx_ctx->skb); in nv_tx_done()
2623 np->get_tx_ctx->skb = NULL; in nv_tx_done()
2635 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_tx_done()
2637 len = np->get_tx_ctx->skb->len; in nv_tx_done()
2639 u64_stats_update_end(&np->swstats_tx_syncp); in nv_tx_done()
2641 bytes_compl += np->get_tx_ctx->skb->len; in nv_tx_done()
2642 dev_kfree_skb_any(np->get_tx_ctx->skb); in nv_tx_done()
2643 np->get_tx_ctx->skb = NULL; in nv_tx_done()
2647 if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) in nv_tx_done()
2648 np->get_tx.orig = np->tx_ring.orig; in nv_tx_done()
2649 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) in nv_tx_done()
2650 np->get_tx_ctx = np->tx_skb; in nv_tx_done()
2653 netdev_completed_queue(np->dev, tx_work, bytes_compl); in nv_tx_done()
2655 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { in nv_tx_done()
2656 np->tx_stop = 0; in nv_tx_done()
2667 struct ring_desc_ex *orig_get_tx = np->get_tx.ex; in nv_tx_done_optimized()
2670 while ((np->get_tx.ex != np->put_tx.ex) && in nv_tx_done_optimized()
2671 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && in nv_tx_done_optimized()
2674 nv_unmap_txskb(np, np->get_tx_ctx); in nv_tx_done_optimized()
2680 if (np->driver_data & DEV_HAS_GEAR_MODE) in nv_tx_done_optimized()
2688 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_tx_done_optimized()
2690 len = np->get_tx_ctx->skb->len; in nv_tx_done_optimized()
2692 u64_stats_update_end(&np->swstats_tx_syncp); in nv_tx_done_optimized()
2695 bytes_cleaned += np->get_tx_ctx->skb->len; in nv_tx_done_optimized()
2696 dev_kfree_skb_any(np->get_tx_ctx->skb); in nv_tx_done_optimized()
2697 np->get_tx_ctx->skb = NULL; in nv_tx_done_optimized()
2700 if (np->tx_limit) in nv_tx_done_optimized()
2704 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) in nv_tx_done_optimized()
2705 np->get_tx.ex = np->tx_ring.ex; in nv_tx_done_optimized()
2706 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) in nv_tx_done_optimized()
2707 np->get_tx_ctx = np->tx_skb; in nv_tx_done_optimized()
2710 netdev_completed_queue(np->dev, tx_work, bytes_cleaned); in nv_tx_done_optimized()
2712 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { in nv_tx_done_optimized()
2713 np->tx_stop = 0; in nv_tx_done_optimized()
2720 * nv_tx_timeout: dev->tx_timeout function
2731 if (np->msi_flags & NV_MSI_X_ENABLED) in nv_tx_timeout()
2741 netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr); in nv_tx_timeout()
2743 for (i = 0; i <= np->register_size; i += 32) { in nv_tx_timeout()
2754 for (i = 0; i < np->tx_ring_size; i += 4) { in nv_tx_timeout()
2760 le32_to_cpu(np->tx_ring.orig[i].buf), in nv_tx_timeout()
2761 le32_to_cpu(np->tx_ring.orig[i].flaglen), in nv_tx_timeout()
2762 le32_to_cpu(np->tx_ring.orig[i+1].buf), in nv_tx_timeout()
2763 le32_to_cpu(np->tx_ring.orig[i+1].flaglen), in nv_tx_timeout()
2764 le32_to_cpu(np->tx_ring.orig[i+2].buf), in nv_tx_timeout()
2765 le32_to_cpu(np->tx_ring.orig[i+2].flaglen), in nv_tx_timeout()
2766 le32_to_cpu(np->tx_ring.orig[i+3].buf), in nv_tx_timeout()
2767 le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); in nv_tx_timeout()
2775 le32_to_cpu(np->tx_ring.ex[i].bufhigh), in nv_tx_timeout()
2776 le32_to_cpu(np->tx_ring.ex[i].buflow), in nv_tx_timeout()
2777 le32_to_cpu(np->tx_ring.ex[i].flaglen), in nv_tx_timeout()
2778 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), in nv_tx_timeout()
2779 le32_to_cpu(np->tx_ring.ex[i+1].buflow), in nv_tx_timeout()
2780 le32_to_cpu(np->tx_ring.ex[i+1].flaglen), in nv_tx_timeout()
2781 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), in nv_tx_timeout()
2782 le32_to_cpu(np->tx_ring.ex[i+2].buflow), in nv_tx_timeout()
2783 le32_to_cpu(np->tx_ring.ex[i+2].flaglen), in nv_tx_timeout()
2784 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), in nv_tx_timeout()
2785 le32_to_cpu(np->tx_ring.ex[i+3].buflow), in nv_tx_timeout()
2786 le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); in nv_tx_timeout()
2791 spin_lock_irq(&np->lock); in nv_tx_timeout()
2797 saved_tx_limit = np->tx_limit; in nv_tx_timeout()
2798 np->tx_limit = 0; /* prevent giving HW any limited pkts */ in nv_tx_timeout()
2799 np->tx_stop = 0; /* prevent waking tx queue */ in nv_tx_timeout()
2801 nv_tx_done(dev, np->tx_ring_size); in nv_tx_timeout()
2803 nv_tx_done_optimized(dev, np->tx_ring_size); in nv_tx_timeout()
2806 if (np->tx_change_owner) in nv_tx_timeout()
2807 put_tx.ex = np->tx_change_owner->first_tx_desc; in nv_tx_timeout()
2809 put_tx = np->put_tx; in nv_tx_timeout()
2816 np->get_tx = np->put_tx = put_tx; in nv_tx_timeout()
2817 np->tx_limit = saved_tx_limit; in nv_tx_timeout()
2822 spin_unlock_irq(&np->lock); in nv_tx_timeout()
2835 if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { in nv_getlen()
2836 protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto); in nv_getlen()
2839 protolen = ntohs(((struct ethhdr *)packet)->h_proto); in nv_getlen()
2857 return -1; in nv_getlen()
2862 return -1; in nv_getlen()
2871 u64_stats_update_begin(&np->swstats_rx_syncp); in rx_missing_handler()
2873 u64_stats_update_end(&np->swstats_rx_syncp); in rx_missing_handler()
2885 while ((np->get_rx.orig != np->put_rx.orig) && in nv_rx_process()
2886 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && in nv_rx_process()
2890 * the packet is for us - immediately tear down the pci mapping. in nv_rx_process()
2894 dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma, in nv_rx_process()
2895 np->get_rx_ctx->dma_len, in nv_rx_process()
2897 skb = np->get_rx_ctx->skb; in nv_rx_process()
2898 np->get_rx_ctx->skb = NULL; in nv_rx_process()
2901 if (np->desc_ver == DESC_VER_1) { in nv_rx_process()
2906 len = nv_getlen(dev, skb->data, len); in nv_rx_process()
2915 len--; in nv_rx_process()
2933 len = nv_getlen(dev, skb->data, len); in nv_rx_process()
2942 len--; in nv_rx_process()
2952 skb->ip_summed = CHECKSUM_UNNECESSARY; in nv_rx_process()
2958 /* got a valid packet - forward it to the network core */ in nv_rx_process()
2960 skb->protocol = eth_type_trans(skb, dev); in nv_rx_process()
2961 napi_gro_receive(&np->napi, skb); in nv_rx_process()
2962 u64_stats_update_begin(&np->swstats_rx_syncp); in nv_rx_process()
2965 u64_stats_update_end(&np->swstats_rx_syncp); in nv_rx_process()
2967 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) in nv_rx_process()
2968 np->get_rx.orig = np->rx_ring.orig; in nv_rx_process()
2969 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) in nv_rx_process()
2970 np->get_rx_ctx = np->rx_skb; in nv_rx_process()
2987 while ((np->get_rx.ex != np->put_rx.ex) && in nv_rx_process_optimized()
2988 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && in nv_rx_process_optimized()
2992 * the packet is for us - immediately tear down the pci mapping. in nv_rx_process_optimized()
2996 dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma, in nv_rx_process_optimized()
2997 np->get_rx_ctx->dma_len, in nv_rx_process_optimized()
2999 skb = np->get_rx_ctx->skb; in nv_rx_process_optimized()
3000 np->get_rx_ctx->skb = NULL; in nv_rx_process_optimized()
3007 len = nv_getlen(dev, skb->data, len); in nv_rx_process_optimized()
3016 len--; in nv_rx_process_optimized()
3027 skb->ip_summed = CHECKSUM_UNNECESSARY; in nv_rx_process_optimized()
3029 /* got a valid packet - forward it to the network core */ in nv_rx_process_optimized()
3031 skb->protocol = eth_type_trans(skb, dev); in nv_rx_process_optimized()
3032 prefetch(skb->data); in nv_rx_process_optimized()
3034 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); in nv_rx_process_optimized()
3041 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && in nv_rx_process_optimized()
3047 napi_gro_receive(&np->napi, skb); in nv_rx_process_optimized()
3048 u64_stats_update_begin(&np->swstats_rx_syncp); in nv_rx_process_optimized()
3051 u64_stats_update_end(&np->swstats_rx_syncp); in nv_rx_process_optimized()
3056 if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) in nv_rx_process_optimized()
3057 np->get_rx.ex = np->rx_ring.ex; in nv_rx_process_optimized()
3058 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) in nv_rx_process_optimized()
3059 np->get_rx_ctx = np->rx_skb; in nv_rx_process_optimized()
3071 if (dev->mtu <= ETH_DATA_LEN) in set_bufsize()
3072 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; in set_bufsize()
3074 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; in set_bufsize()
3078 * nv_change_mtu: dev->change_mtu function
3086 old_mtu = dev->mtu; in nv_change_mtu()
3087 WRITE_ONCE(dev->mtu, new_mtu); in nv_change_mtu()
3103 napi_disable(&np->napi); in nv_change_mtu()
3106 spin_lock(&np->lock); in nv_change_mtu()
3115 if (!np->in_shutdown) in nv_change_mtu()
3116 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_change_mtu()
3119 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_change_mtu()
3121 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_change_mtu()
3124 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_change_mtu()
3129 spin_unlock(&np->lock); in nv_change_mtu()
3132 napi_enable(&np->napi); in nv_change_mtu()
3143 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + in nv_copy_mac_to_hw()
3144 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); in nv_copy_mac_to_hw()
3145 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); in nv_copy_mac_to_hw()
3152 * nv_set_mac_address: dev->set_mac_address function
3160 if (!is_valid_ether_addr(macaddr->sa_data)) in nv_set_mac_address()
3161 return -EADDRNOTAVAIL; in nv_set_mac_address()
3164 eth_hw_addr_set(dev, macaddr->sa_data); in nv_set_mac_address()
3169 spin_lock_irq(&np->lock); in nv_set_mac_address()
3179 spin_unlock_irq(&np->lock); in nv_set_mac_address()
3189 * nv_set_multicast: dev->set_multicast function
3203 if (dev->flags & IFF_PROMISC) { in nv_set_multicast()
3208 if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) { in nv_set_multicast()
3213 if (dev->flags & IFF_ALLMULTI) { in nv_set_multicast()
3219 unsigned char *hw_addr = ha->addr; in nv_set_multicast()
3241 spin_lock_irq(&np->lock); in nv_set_multicast()
3249 spin_unlock_irq(&np->lock); in nv_set_multicast()
3257 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); in nv_update_pause()
3259 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { in nv_update_pause()
3263 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; in nv_update_pause()
3268 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { in nv_update_pause()
3272 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) in nv_update_pause()
3274 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) { in nv_update_pause()
3281 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; in nv_update_pause()
3296 np->linkspeed = NVREG_LINKSPEED_FORCE|speed; in nv_force_linkspeed()
3297 np->duplex = duplex; in nv_force_linkspeed()
3300 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_force_linkspeed()
3302 np->gigabit = PHY_GIGABIT; in nv_force_linkspeed()
3305 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) in nv_force_linkspeed()
3307 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) in nv_force_linkspeed()
3309 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) in nv_force_linkspeed()
3316 if (np->duplex == 0) in nv_force_linkspeed()
3318 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) in nv_force_linkspeed()
3320 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == in nv_force_linkspeed()
3326 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == in nv_force_linkspeed()
3336 if (np->desc_ver == DESC_VER_1) { in nv_force_linkspeed()
3339 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == in nv_force_linkspeed()
3347 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD), in nv_force_linkspeed()
3350 writel(np->linkspeed, base + NvRegLinkSpeed); in nv_force_linkspeed()
3355 * nv_update_linkspeed - Setup the MAC according to the link partner
3372 int newls = np->linkspeed; in nv_update_linkspeed()
3373 int newdup = np->duplex; in nv_update_linkspeed()
3384 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_update_linkspeed()
3397 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_update_linkspeed()
3398 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_update_linkspeed()
3407 if (np->autoneg == 0) { in nv_update_linkspeed()
3408 if (np->fixed_mode & LPA_100FULL) { in nv_update_linkspeed()
3411 } else if (np->fixed_mode & LPA_100HALF) { in nv_update_linkspeed()
3414 } else if (np->fixed_mode & LPA_10FULL) { in nv_update_linkspeed()
3426 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ in nv_update_linkspeed()
3433 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_update_linkspeed()
3434 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); in nv_update_linkspeed()
3437 if (np->gigabit == PHY_GIGABIT) { in nv_update_linkspeed()
3438 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); in nv_update_linkspeed()
3439 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); in nv_update_linkspeed()
3469 if (np->duplex == newdup && np->linkspeed == newls) in nv_update_linkspeed()
3472 np->duplex = newdup; in nv_update_linkspeed()
3473 np->linkspeed = newls; in nv_update_linkspeed()
3485 if (np->gigabit == PHY_GIGABIT) { in nv_update_linkspeed()
3488 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) || in nv_update_linkspeed()
3489 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)) in nv_update_linkspeed()
3491 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) in nv_update_linkspeed()
3498 if (np->duplex == 0) in nv_update_linkspeed()
3500 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) in nv_update_linkspeed()
3502 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) in nv_update_linkspeed()
3506 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */ in nv_update_linkspeed()
3508 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) { in nv_update_linkspeed()
3511 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) { in nv_update_linkspeed()
3512 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10) in nv_update_linkspeed()
3521 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) in nv_update_linkspeed()
3528 if (np->desc_ver == DESC_VER_1) { in nv_update_linkspeed()
3531 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) in nv_update_linkspeed()
3538 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD), in nv_update_linkspeed()
3541 writel(np->linkspeed, base + NvRegLinkSpeed); in nv_update_linkspeed()
3546 if (netif_running(dev) && (np->duplex != 0)) { in nv_update_linkspeed()
3547 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { in nv_update_linkspeed()
3555 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) in nv_update_linkspeed()
3566 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) in nv_update_linkspeed()
3574 pause_flags = np->pause_flags; in nv_update_linkspeed()
3624 if (np->msi_flags & NV_MSI_ENABLED) { in nv_msi_workaround()
3625 u8 __iomem *base = np->base; in nv_msi_workaround()
3639 np->quiet_count = 0; in nv_change_interrupt_mode()
3640 if (np->irqmask != NVREG_IRQMASK_CPU) { in nv_change_interrupt_mode()
3641 np->irqmask = NVREG_IRQMASK_CPU; in nv_change_interrupt_mode()
3645 if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) { in nv_change_interrupt_mode()
3646 np->quiet_count++; in nv_change_interrupt_mode()
3650 if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) { in nv_change_interrupt_mode()
3651 np->irqmask = NVREG_IRQMASK_THROUGHPUT; in nv_change_interrupt_mode()
3666 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { in nv_nic_irq()
3667 np->events = readl(base + NvRegIrqStatus); in nv_nic_irq()
3668 writel(np->events, base + NvRegIrqStatus); in nv_nic_irq()
3670 np->events = readl(base + NvRegMSIXIrqStatus); in nv_nic_irq()
3671 writel(np->events, base + NvRegMSIXIrqStatus); in nv_nic_irq()
3673 if (!(np->events & np->irqmask)) in nv_nic_irq()
3678 if (napi_schedule_prep(&np->napi)) { in nv_nic_irq()
3683 __napi_schedule(&np->napi); in nv_nic_irq()
3699 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { in nv_nic_irq_optimized()
3700 np->events = readl(base + NvRegIrqStatus); in nv_nic_irq_optimized()
3701 writel(np->events, base + NvRegIrqStatus); in nv_nic_irq_optimized()
3703 np->events = readl(base + NvRegMSIXIrqStatus); in nv_nic_irq_optimized()
3704 writel(np->events, base + NvRegMSIXIrqStatus); in nv_nic_irq_optimized()
3706 if (!(np->events & np->irqmask)) in nv_nic_irq_optimized()
3711 if (napi_schedule_prep(&np->napi)) { in nv_nic_irq_optimized()
3716 __napi_schedule(&np->napi); in nv_nic_irq_optimized()
3735 if (!(events & np->irqmask)) in nv_nic_irq_tx()
3738 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_tx()
3740 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_tx()
3743 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_tx()
3748 if (!np->in_shutdown) { in nv_nic_irq_tx()
3749 np->nic_poll_irq |= NVREG_IRQ_TX_ALL; in nv_nic_irq_tx()
3750 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_nic_irq_tx()
3752 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_tx()
3766 struct net_device *dev = np->dev; in nv_napi_poll()
3774 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3775 tx_work += nv_tx_done(dev, np->tx_ring_size); in nv_napi_poll()
3776 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3778 rx_count = nv_rx_process(dev, budget - rx_work); in nv_napi_poll()
3781 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3782 tx_work += nv_tx_done_optimized(dev, np->tx_ring_size); in nv_napi_poll()
3783 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3786 budget - rx_work); in nv_napi_poll()
3793 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3794 if (!np->in_shutdown) in nv_napi_poll()
3795 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_napi_poll()
3796 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3801 if (unlikely(np->events & NVREG_IRQ_LINK)) { in nv_napi_poll()
3802 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3804 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3806 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { in nv_napi_poll()
3807 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3809 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3810 np->link_timeout = jiffies + LINK_TIMEOUT; in nv_napi_poll()
3812 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { in nv_napi_poll()
3813 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3814 if (!np->in_shutdown) { in nv_napi_poll()
3815 np->nic_poll_irq = np->irqmask; in nv_napi_poll()
3816 np->recover_error = 1; in nv_napi_poll()
3817 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_napi_poll()
3819 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3825 /* re-enable interrupts in nv_napi_poll()
3829 writel(np->irqmask, base + NvRegIrqMask); in nv_napi_poll()
3847 if (!(events & np->irqmask)) in nv_nic_irq_rx()
3852 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_rx()
3853 if (!np->in_shutdown) in nv_nic_irq_rx()
3854 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_nic_irq_rx()
3855 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_rx()
3860 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_rx()
3865 if (!np->in_shutdown) { in nv_nic_irq_rx()
3866 np->nic_poll_irq |= NVREG_IRQ_RX_ALL; in nv_nic_irq_rx()
3867 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_nic_irq_rx()
3869 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_rx()
3892 if (!(events & np->irqmask)) in nv_nic_irq_other()
3896 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3898 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3901 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3903 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3905 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { in nv_nic_irq_other()
3906 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3908 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3909 np->link_timeout = jiffies + LINK_TIMEOUT; in nv_nic_irq_other()
3912 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3917 if (!np->in_shutdown) { in nv_nic_irq_other()
3918 np->nic_poll_irq |= NVREG_IRQ_OTHER; in nv_nic_irq_other()
3919 np->recover_error = 1; in nv_nic_irq_other()
3920 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_nic_irq_other()
3922 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3926 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3931 if (!np->in_shutdown) { in nv_nic_irq_other()
3932 np->nic_poll_irq |= NVREG_IRQ_OTHER; in nv_nic_irq_other()
3933 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_nic_irq_other()
3935 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3953 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { in nv_nic_irq_test()
3966 spin_lock(&np->lock); in nv_nic_irq_test()
3967 np->intr_test = 1; in nv_nic_irq_test()
3968 spin_unlock(&np->lock); in nv_nic_irq_test()
4014 if (np->msi_flags & NV_MSI_X_CAPABLE) { in nv_request_irq()
4015 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) in nv_request_irq()
4016 np->msi_x_entry[i].entry = i; in nv_request_irq()
4017 ret = pci_enable_msix_range(np->pci_dev, in nv_request_irq()
4018 np->msi_x_entry, in nv_request_irq()
4019 np->msi_flags & NV_MSI_X_VECTORS_MASK, in nv_request_irq()
4020 np->msi_flags & NV_MSI_X_VECTORS_MASK); in nv_request_irq()
4022 np->msi_flags |= NV_MSI_X_ENABLED; in nv_request_irq()
4025 sprintf(np->name_rx, "%s-rx", dev->name); in nv_request_irq()
4026 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, in nv_request_irq()
4027 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev); in nv_request_irq()
4032 pci_disable_msix(np->pci_dev); in nv_request_irq()
4033 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_request_irq()
4037 sprintf(np->name_tx, "%s-tx", dev->name); in nv_request_irq()
4038 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, in nv_request_irq()
4039 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev); in nv_request_irq()
4044 pci_disable_msix(np->pci_dev); in nv_request_irq()
4045 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_request_irq()
4049 sprintf(np->name_other, "%s-other", dev->name); in nv_request_irq()
4050 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, in nv_request_irq()
4051 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev); in nv_request_irq()
4056 pci_disable_msix(np->pci_dev); in nv_request_irq()
4057 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_request_irq()
4068 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, in nv_request_irq()
4069 handler, IRQF_SHARED, dev->name, dev); in nv_request_irq()
4074 pci_disable_msix(np->pci_dev); in nv_request_irq()
4075 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_request_irq()
4083 netdev_info(dev, "MSI-X enabled\n"); in nv_request_irq()
4087 if (np->msi_flags & NV_MSI_CAPABLE) { in nv_request_irq()
4088 ret = pci_enable_msi(np->pci_dev); in nv_request_irq()
4090 np->msi_flags |= NV_MSI_ENABLED; in nv_request_irq()
4091 ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev); in nv_request_irq()
4095 pci_disable_msi(np->pci_dev); in nv_request_irq()
4096 np->msi_flags &= ~NV_MSI_ENABLED; in nv_request_irq()
4110 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) in nv_request_irq()
4115 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); in nv_request_irq()
4117 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); in nv_request_irq()
4127 if (np->msi_flags & NV_MSI_X_ENABLED) { in nv_free_irq()
4128 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) in nv_free_irq()
4129 free_irq(np->msi_x_entry[i].vector, dev); in nv_free_irq()
4130 pci_disable_msix(np->pci_dev); in nv_free_irq()
4131 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_free_irq()
4133 free_irq(np->pci_dev->irq, dev); in nv_free_irq()
4134 if (np->msi_flags & NV_MSI_ENABLED) { in nv_free_irq()
4135 pci_disable_msi(np->pci_dev); in nv_free_irq()
4136 np->msi_flags &= ~NV_MSI_ENABLED; in nv_free_irq()
4144 struct net_device *dev = np->dev; in nv_do_nic_poll()
4157 if (np->msi_flags & NV_MSI_X_ENABLED) in nv_do_nic_poll()
4158 irq = np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector; in nv_do_nic_poll()
4160 irq = np->pci_dev->irq; in nv_do_nic_poll()
4161 mask = np->irqmask; in nv_do_nic_poll()
4163 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { in nv_do_nic_poll()
4164 irq = np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector; in nv_do_nic_poll()
4167 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { in nv_do_nic_poll()
4168 irq = np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector; in nv_do_nic_poll()
4171 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { in nv_do_nic_poll()
4172 irq = np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector; in nv_do_nic_poll()
4180 if (np->recover_error) { in nv_do_nic_poll()
4181 np->recover_error = 0; in nv_do_nic_poll()
4186 spin_lock(&np->lock); in nv_do_nic_poll()
4189 if (np->driver_data & DEV_HAS_POWER_CNTRL) in nv_do_nic_poll()
4197 if (!np->in_shutdown) in nv_do_nic_poll()
4198 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_do_nic_poll()
4201 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_do_nic_poll()
4203 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_do_nic_poll()
4206 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_do_nic_poll()
4209 if (!(np->msi_flags & NV_MSI_X_ENABLED)) in nv_do_nic_poll()
4216 spin_unlock(&np->lock); in nv_do_nic_poll()
4226 np->nic_poll_irq = 0; in nv_do_nic_poll()
4232 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { in nv_do_nic_poll()
4233 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL; in nv_do_nic_poll()
4236 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { in nv_do_nic_poll()
4237 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL; in nv_do_nic_poll()
4240 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { in nv_do_nic_poll()
4241 np->nic_poll_irq &= ~NVREG_IRQ_OTHER; in nv_do_nic_poll()
4254 nv_do_nic_poll(&np->nic_poll); in nv_poll_controller()
4259 __acquires(&netdev_priv(dev)->hwstats_lock) in nv_do_stats_poll()
4260 __releases(&netdev_priv(dev)->hwstats_lock) in nv_do_stats_poll()
4263 struct net_device *dev = np->dev; in nv_do_stats_poll()
4267 if (spin_trylock(&np->hwstats_lock)) { in nv_do_stats_poll()
4269 spin_unlock(&np->hwstats_lock); in nv_do_stats_poll()
4272 if (!np->in_shutdown) in nv_do_stats_poll()
4273 mod_timer(&np->stats_poll, in nv_do_stats_poll()
4280 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); in nv_get_drvinfo()
4281 strscpy(info->version, FORCEDETH_VERSION, sizeof(info->version)); in nv_get_drvinfo()
4282 strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); in nv_get_drvinfo()
4288 wolinfo->supported = WAKE_MAGIC; in nv_get_wol()
4290 spin_lock_irq(&np->lock); in nv_get_wol()
4291 if (np->wolenabled) in nv_get_wol()
4292 wolinfo->wolopts = WAKE_MAGIC; in nv_get_wol()
4293 spin_unlock_irq(&np->lock); in nv_get_wol()
4302 if (wolinfo->wolopts == 0) { in nv_set_wol()
4303 np->wolenabled = 0; in nv_set_wol()
4304 } else if (wolinfo->wolopts & WAKE_MAGIC) { in nv_set_wol()
4305 np->wolenabled = 1; in nv_set_wol()
4309 spin_lock_irq(&np->lock); in nv_set_wol()
4311 spin_unlock_irq(&np->lock); in nv_set_wol()
4313 device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled); in nv_set_wol()
4324 spin_lock_irq(&np->lock); in nv_get_link_ksettings()
4325 cmd->base.port = PORT_MII; in nv_get_link_ksettings()
4337 switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) { in nv_get_link_ksettings()
4348 speed = -1; in nv_get_link_ksettings()
4351 cmd->base.duplex = DUPLEX_HALF; in nv_get_link_ksettings()
4352 if (np->duplex) in nv_get_link_ksettings()
4353 cmd->base.duplex = DUPLEX_FULL; in nv_get_link_ksettings()
4356 cmd->base.duplex = DUPLEX_UNKNOWN; in nv_get_link_ksettings()
4358 cmd->base.speed = speed; in nv_get_link_ksettings()
4359 cmd->base.autoneg = np->autoneg; in nv_get_link_ksettings()
4362 if (np->autoneg) { in nv_get_link_ksettings()
4364 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_get_link_ksettings()
4373 if (np->gigabit == PHY_GIGABIT) { in nv_get_link_ksettings()
4374 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); in nv_get_link_ksettings()
4383 if (np->gigabit == PHY_GIGABIT) in nv_get_link_ksettings()
4386 cmd->base.phy_address = np->phyaddr; in nv_get_link_ksettings()
4388 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, in nv_get_link_ksettings()
4390 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, in nv_get_link_ksettings()
4394 spin_unlock_irq(&np->lock); in nv_get_link_ksettings()
4402 u32 speed = cmd->base.speed; in nv_set_link_ksettings()
4406 cmd->link_modes.advertising); in nv_set_link_ksettings()
4408 if (cmd->base.port != PORT_MII) in nv_set_link_ksettings()
4409 return -EINVAL; in nv_set_link_ksettings()
4410 if (cmd->base.phy_address != np->phyaddr) { in nv_set_link_ksettings()
4413 return -EINVAL; in nv_set_link_ksettings()
4415 if (cmd->base.autoneg == AUTONEG_ENABLE) { in nv_set_link_ksettings()
4420 if (np->gigabit == PHY_GIGABIT) in nv_set_link_ksettings()
4424 return -EINVAL; in nv_set_link_ksettings()
4426 } else if (cmd->base.autoneg == AUTONEG_DISABLE) { in nv_set_link_ksettings()
4428 * forbidden - no one should need that. */ in nv_set_link_ksettings()
4431 return -EINVAL; in nv_set_link_ksettings()
4432 if (cmd->base.duplex != DUPLEX_HALF && in nv_set_link_ksettings()
4433 cmd->base.duplex != DUPLEX_FULL) in nv_set_link_ksettings()
4434 return -EINVAL; in nv_set_link_ksettings()
4436 return -EINVAL; in nv_set_link_ksettings()
4447 spin_lock_irqsave(&np->lock, flags); in nv_set_link_ksettings()
4458 spin_unlock_irqrestore(&np->lock, flags); in nv_set_link_ksettings()
4463 if (cmd->base.autoneg == AUTONEG_ENABLE) { in nv_set_link_ksettings()
4466 np->autoneg = 1; in nv_set_link_ksettings()
4469 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_set_link_ksettings()
4479 …if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx p… in nv_set_link_ksettings()
4481 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) in nv_set_link_ksettings()
4483 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); in nv_set_link_ksettings()
4485 if (np->gigabit == PHY_GIGABIT) { in nv_set_link_ksettings()
4486 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); in nv_set_link_ksettings()
4490 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); in nv_set_link_ksettings()
4495 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_set_link_ksettings()
4496 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { in nv_set_link_ksettings()
4502 return -EINVAL; in nv_set_link_ksettings()
4506 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); in nv_set_link_ksettings()
4511 np->autoneg = 0; in nv_set_link_ksettings()
4513 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_set_link_ksettings()
4515 if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_HALF) in nv_set_link_ksettings()
4517 if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_FULL) in nv_set_link_ksettings()
4519 if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_HALF) in nv_set_link_ksettings()
4521 if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_FULL) in nv_set_link_ksettings()
4523 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); in nv_set_link_ksettings()
4524 …if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx p… in nv_set_link_ksettings()
4526 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; in nv_set_link_ksettings()
4528 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { in nv_set_link_ksettings()
4530 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; in nv_set_link_ksettings()
4532 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); in nv_set_link_ksettings()
4533 np->fixed_mode = adv; in nv_set_link_ksettings()
4535 if (np->gigabit == PHY_GIGABIT) { in nv_set_link_ksettings()
4536 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); in nv_set_link_ksettings()
4538 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); in nv_set_link_ksettings()
4541 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_set_link_ksettings()
4543 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) in nv_set_link_ksettings()
4545 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) in nv_set_link_ksettings()
4547 if (np->phy_oui == PHY_OUI_MARVELL) { in nv_set_link_ksettings()
4551 return -EINVAL; in nv_set_link_ksettings()
4554 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); in nv_set_link_ksettings()
4576 return np->register_size; in nv_get_regs_len()
4586 regs->version = FORCEDETH_REGS_VER; in nv_get_regs()
4587 spin_lock_irq(&np->lock); in nv_get_regs()
4588 for (i = 0; i < np->register_size/sizeof(u32); i++) in nv_get_regs()
4590 spin_unlock_irq(&np->lock); in nv_get_regs()
4598 if (np->autoneg) { in nv_nway_reset()
4606 spin_lock(&np->lock); in nv_nway_reset()
4609 spin_unlock(&np->lock); in nv_nway_reset()
4615 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_nway_reset()
4616 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { in nv_nway_reset()
4621 return -EINVAL; in nv_nway_reset()
4625 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); in nv_nway_reset()
4634 ret = -EINVAL; in nv_nway_reset()
4647 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; in nv_get_ringparam()
4648 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; in nv_get_ringparam()
4650 ring->rx_pending = np->rx_ring_size; in nv_get_ringparam()
4651 ring->tx_pending = np->tx_ring_size; in nv_get_ringparam()
4664 if (ring->rx_pending < RX_RING_MIN || in nv_set_ringparam()
4665 ring->tx_pending < TX_RING_MIN || in nv_set_ringparam()
4666 ring->rx_mini_pending != 0 || in nv_set_ringparam()
4667 ring->rx_jumbo_pending != 0 || in nv_set_ringparam()
4668 (np->desc_ver == DESC_VER_1 && in nv_set_ringparam()
4669 (ring->rx_pending > RING_MAX_DESC_VER_1 || in nv_set_ringparam()
4670 ring->tx_pending > RING_MAX_DESC_VER_1)) || in nv_set_ringparam()
4671 (np->desc_ver != DESC_VER_1 && in nv_set_ringparam()
4672 (ring->rx_pending > RING_MAX_DESC_VER_2_3 || in nv_set_ringparam()
4673 ring->tx_pending > RING_MAX_DESC_VER_2_3))) { in nv_set_ringparam()
4674 return -EINVAL; in nv_set_ringparam()
4679 rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev, in nv_set_ringparam()
4681 (ring->rx_pending + in nv_set_ringparam()
4682 ring->tx_pending), in nv_set_ringparam()
4685 rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev, in nv_set_ringparam()
4687 (ring->rx_pending + in nv_set_ringparam()
4688 ring->tx_pending), in nv_set_ringparam()
4691 rx_skbuff = kmalloc_array(ring->rx_pending, sizeof(struct nv_skb_map), in nv_set_ringparam()
4693 tx_skbuff = kmalloc_array(ring->tx_pending, sizeof(struct nv_skb_map), in nv_set_ringparam()
4699 dma_free_coherent(&np->pci_dev->dev, in nv_set_ringparam()
4701 (ring->rx_pending + in nv_set_ringparam()
4702 ring->tx_pending), in nv_set_ringparam()
4706 dma_free_coherent(&np->pci_dev->dev, in nv_set_ringparam()
4708 (ring->rx_pending + in nv_set_ringparam()
4709 ring->tx_pending), in nv_set_ringparam()
4720 napi_disable(&np->napi); in nv_set_ringparam()
4723 spin_lock(&np->lock); in nv_set_ringparam()
4734 np->rx_ring_size = ring->rx_pending; in nv_set_ringparam()
4735 np->tx_ring_size = ring->tx_pending; in nv_set_ringparam()
4738 np->rx_ring.orig = (struct ring_desc *)rxtx_ring; in nv_set_ringparam()
4739 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; in nv_set_ringparam()
4741 np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring; in nv_set_ringparam()
4742 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; in nv_set_ringparam()
4744 np->rx_skb = (struct nv_skb_map *)rx_skbuff; in nv_set_ringparam()
4745 np->tx_skb = (struct nv_skb_map *)tx_skbuff; in nv_set_ringparam()
4746 np->ring_addr = ring_addr; in nv_set_ringparam()
4748 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); in nv_set_ringparam()
4749 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); in nv_set_ringparam()
4755 if (!np->in_shutdown) in nv_set_ringparam()
4756 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_set_ringparam()
4760 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_set_ringparam()
4762 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_set_ringparam()
4765 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_set_ringparam()
4770 spin_unlock(&np->lock); in nv_set_ringparam()
4773 napi_enable(&np->napi); in nv_set_ringparam()
4778 return -ENOMEM; in nv_set_ringparam()
4785 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; in nv_get_pauseparam()
4786 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; in nv_get_pauseparam()
4787 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; in nv_get_pauseparam()
4795 if ((!np->autoneg && np->duplex == 0) || in nv_set_pauseparam()
4796 (np->autoneg && !pause->autoneg && np->duplex == 0)) { in nv_set_pauseparam()
4798 return -EINVAL; in nv_set_pauseparam()
4800 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { in nv_set_pauseparam()
4802 return -EINVAL; in nv_set_pauseparam()
4810 spin_lock(&np->lock); in nv_set_pauseparam()
4813 spin_unlock(&np->lock); in nv_set_pauseparam()
4818 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); in nv_set_pauseparam()
4819 if (pause->rx_pause) in nv_set_pauseparam()
4820 np->pause_flags |= NV_PAUSEFRAME_RX_REQ; in nv_set_pauseparam()
4821 if (pause->tx_pause) in nv_set_pauseparam()
4822 np->pause_flags |= NV_PAUSEFRAME_TX_REQ; in nv_set_pauseparam()
4824 if (np->autoneg && pause->autoneg) { in nv_set_pauseparam()
4825 np->pause_flags |= NV_PAUSEFRAME_AUTONEG; in nv_set_pauseparam()
4827 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_set_pauseparam()
4829 …if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pa… in nv_set_pauseparam()
4831 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) in nv_set_pauseparam()
4833 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); in nv_set_pauseparam()
4837 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_set_pauseparam()
4839 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); in nv_set_pauseparam()
4841 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); in nv_set_pauseparam()
4842 if (pause->rx_pause) in nv_set_pauseparam()
4843 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; in nv_set_pauseparam()
4844 if (pause->tx_pause) in nv_set_pauseparam()
4845 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; in nv_set_pauseparam()
4850 nv_update_pause(dev, np->pause_flags); in nv_set_pauseparam()
4867 spin_lock_irqsave(&np->lock, flags); in nv_set_loopback()
4868 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_set_loopback()
4871 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4878 err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol); in nv_set_loopback()
4881 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4885 /* Force 1000 Mbps full-duplex */ in nv_set_loopback()
4891 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4897 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4903 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4908 spin_lock_irqsave(&np->lock, flags); in nv_set_loopback()
4910 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4929 spin_lock_irq(&np->lock); in nv_vlan_mode()
4932 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP; in nv_vlan_mode()
4934 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; in nv_vlan_mode()
4937 np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS; in nv_vlan_mode()
4939 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; in nv_vlan_mode()
4941 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_vlan_mode()
4943 spin_unlock_irq(&np->lock); in nv_vlan_mode()
4950 netdev_features_t changed = dev->features ^ features; in nv_set_features()
4960 spin_lock_irq(&np->lock); in nv_set_features()
4963 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; in nv_set_features()
4965 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; in nv_set_features()
4968 writel(np->txrxctl_bits, base + NvRegTxRxControl); in nv_set_features()
4970 spin_unlock_irq(&np->lock); in nv_set_features()
4985 if (np->driver_data & DEV_HAS_TEST_EXTENDED) in nv_get_sset_count()
4990 if (np->driver_data & DEV_HAS_STATISTICS_V3) in nv_get_sset_count()
4992 else if (np->driver_data & DEV_HAS_STATISTICS_V2) in nv_get_sset_count()
4994 else if (np->driver_data & DEV_HAS_STATISTICS_V1) in nv_get_sset_count()
4999 return -EOPNOTSUPP; in nv_get_sset_count()
5005 __acquires(&netdev_priv(dev)->hwstats_lock) in nv_get_ethtool_stats()
5006 __releases(&netdev_priv(dev)->hwstats_lock) in nv_get_ethtool_stats()
5010 spin_lock_bh(&np->hwstats_lock); in nv_get_ethtool_stats()
5012 memcpy(buffer, &np->estats, in nv_get_ethtool_stats()
5014 spin_unlock_bh(&np->hwstats_lock); in nv_get_ethtool_stats()
5022 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_link_test()
5023 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_link_test()
5075 np->intr_test = 0; in nv_interrupt_test()
5078 save_msi_flags = np->msi_flags; in nv_interrupt_test()
5079 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; in nv_interrupt_test()
5080 np->msi_flags |= 0x001; /* setup 1 vector */ in nv_interrupt_test()
5093 spin_lock_irq(&np->lock); in nv_interrupt_test()
5096 testcnt = np->intr_test; in nv_interrupt_test()
5101 if (!(np->msi_flags & NV_MSI_X_ENABLED)) in nv_interrupt_test()
5106 spin_unlock_irq(&np->lock); in nv_interrupt_test()
5110 np->msi_flags = save_msi_flags; in nv_interrupt_test()
5129 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); in nv_loopback_test()
5154 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_loopback_test()
5156 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_loopback_test()
5170 test_dma_addr = dma_map_single(&np->pci_dev->dev, tx_skb->data, in nv_loopback_test()
5173 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_loopback_test()
5183 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); in nv_loopback_test()
5184 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); in nv_loopback_test()
5186 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr)); in nv_loopback_test()
5187 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr)); in nv_loopback_test()
5188 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); in nv_loopback_test()
5190 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_loopback_test()
5197 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); in nv_loopback_test()
5198 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); in nv_loopback_test()
5201 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); in nv_loopback_test()
5202 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); in nv_loopback_test()
5207 } else if (np->desc_ver == DESC_VER_1) { in nv_loopback_test()
5219 rx_skb = np->rx_skb[0].skb; in nv_loopback_test()
5221 if (rx_skb->data[i] != (u8)(i & 0xff)) { in nv_loopback_test()
5229 dma_unmap_single(&np->pci_dev->dev, test_dma_addr, in nv_loopback_test()
5230 (skb_end_pointer(tx_skb) - tx_skb->data), in nv_loopback_test()
5259 test->flags |= ETH_TEST_FL_FAILED; in nv_self_test()
5263 if (test->flags & ETH_TEST_FL_OFFLINE) { in nv_self_test()
5266 napi_disable(&np->napi); in nv_self_test()
5269 spin_lock_irq(&np->lock); in nv_self_test()
5270 nv_disable_hw_interrupts(dev, np->irqmask); in nv_self_test()
5271 if (!(np->msi_flags & NV_MSI_X_ENABLED)) in nv_self_test()
5280 spin_unlock_irq(&np->lock); in nv_self_test()
5286 test->flags |= ETH_TEST_FL_FAILED; in nv_self_test()
5292 test->flags |= ETH_TEST_FL_FAILED; in nv_self_test()
5301 test->flags |= ETH_TEST_FL_FAILED; in nv_self_test()
5309 if (!np->in_shutdown) in nv_self_test()
5310 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_self_test()
5313 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_self_test()
5315 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_self_test()
5318 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_self_test()
5323 napi_enable(&np->napi); in nv_self_test()
5324 nv_enable_hw_interrupts(dev, np->irqmask); in nv_self_test()
5389 np->mgmt_sema = 1; in nv_mgmt_acquire_sema()
5404 if (np->driver_data & DEV_HAS_MGMT_UNIT) { in nv_mgmt_release_sema()
5405 if (np->mgmt_sema) { in nv_mgmt_release_sema()
5438 np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION; in nv_mgmt_get_version()
5452 mii_rw(dev, np->phyaddr, MII_BMCR, in nv_open()
5453 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN); in nv_open()
5457 if (np->driver_data & DEV_HAS_POWER_CNTRL) in nv_open()
5470 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) in nv_open()
5482 np->in_shutdown = 0; in nv_open()
5486 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_open()
5489 writel(np->linkspeed, base + NvRegLinkSpeed); in nv_open()
5490 if (np->desc_ver == DESC_VER_1) in nv_open()
5494 writel(np->txrxctl_bits, base + NvRegTxRxControl); in nv_open()
5495 writel(np->vlanctl_bits, base + NvRegVlanControl); in nv_open()
5497 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); in nv_open()
5511 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_open()
5517 if (np->desc_ver == DESC_VER_1) { in nv_open()
5520 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) { in nv_open()
5530 if (poll_interval == -1) { in nv_open()
5538 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, in nv_open()
5542 if (np->wolenabled) in nv_open()
5553 nv_disable_hw_interrupts(dev, np->irqmask); in nv_open()
5563 nv_enable_hw_interrupts(dev, np->irqmask); in nv_open()
5566 spin_lock_irq(&np->lock); in nv_open()
5580 np->linkspeed = 0; in nv_open()
5584 napi_enable_locked(&np->napi); in nv_open()
5593 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_open()
5596 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) in nv_open()
5597 mod_timer(&np->stats_poll, in nv_open()
5600 spin_unlock_irq(&np->lock); in nv_open()
5606 if (dev->features & NETIF_F_LOOPBACK) in nv_open()
5607 nv_set_loopback(dev, dev->features); in nv_open()
5620 spin_lock_irq(&np->lock); in nv_close()
5621 np->in_shutdown = 1; in nv_close()
5622 spin_unlock_irq(&np->lock); in nv_close()
5623 napi_disable(&np->napi); in nv_close()
5624 synchronize_irq(np->pci_dev->irq); in nv_close()
5626 del_timer_sync(&np->oom_kick); in nv_close()
5627 del_timer_sync(&np->nic_poll); in nv_close()
5628 del_timer_sync(&np->stats_poll); in nv_close()
5631 spin_lock_irq(&np->lock); in nv_close()
5638 nv_disable_hw_interrupts(dev, np->irqmask); in nv_close()
5641 spin_unlock_irq(&np->lock); in nv_close()
5647 if (np->wolenabled || !phy_power_down) { in nv_close()
5653 mii_rw(dev, np->phyaddr, MII_BMCR, in nv_close()
5654 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN); in nv_close()
5715 err = -ENOMEM; in nv_probe()
5720 np->dev = dev; in nv_probe()
5721 np->pci_dev = pci_dev; in nv_probe()
5722 spin_lock_init(&np->lock); in nv_probe()
5723 spin_lock_init(&np->hwstats_lock); in nv_probe()
5724 SET_NETDEV_DEV(dev, &pci_dev->dev); in nv_probe()
5725 u64_stats_init(&np->swstats_rx_syncp); in nv_probe()
5726 u64_stats_init(&np->swstats_tx_syncp); in nv_probe()
5727 np->txrx_stats = alloc_percpu(struct nv_txrx_stats); in nv_probe()
5728 if (!np->txrx_stats) { in nv_probe()
5729 pr_err("np->txrx_stats, alloc memory error.\n"); in nv_probe()
5730 err = -ENOMEM; in nv_probe()
5734 timer_setup(&np->oom_kick, nv_do_rx_refill, 0); in nv_probe()
5735 timer_setup(&np->nic_poll, nv_do_nic_poll, 0); in nv_probe()
5736 timer_setup(&np->stats_poll, nv_do_stats_poll, TIMER_DEFERRABLE); in nv_probe()
5748 …if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HA… in nv_probe()
5749 np->register_size = NV_PCI_REGSZ_VER3; in nv_probe()
5750 else if (id->driver_data & DEV_HAS_STATISTICS_V1) in nv_probe()
5751 np->register_size = NV_PCI_REGSZ_VER2; in nv_probe()
5753 np->register_size = NV_PCI_REGSZ_VER1; in nv_probe()
5755 err = -EINVAL; in nv_probe()
5759 pci_resource_len(pci_dev, i) >= np->register_size) { in nv_probe()
5765 dev_info(&pci_dev->dev, "Couldn't find register window\n"); in nv_probe()
5770 np->driver_data = id->driver_data; in nv_probe()
5772 np->device_id = id->device; in nv_probe()
5775 if (id->driver_data & DEV_HAS_HIGH_DMA) { in nv_probe()
5776 /* packet format 3: supports 40-bit addressing */ in nv_probe()
5777 np->desc_ver = DESC_VER_3; in nv_probe()
5778 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; in nv_probe()
5780 if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(39))) in nv_probe()
5781 dev_info(&pci_dev->dev, in nv_probe()
5782 "64-bit DMA failed, using 32-bit addressing\n"); in nv_probe()
5784 dev->features |= NETIF_F_HIGHDMA; in nv_probe()
5786 } else if (id->driver_data & DEV_HAS_LARGEDESC) { in nv_probe()
5788 np->desc_ver = DESC_VER_2; in nv_probe()
5789 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; in nv_probe()
5792 np->desc_ver = DESC_VER_1; in nv_probe()
5793 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; in nv_probe()
5796 np->pkt_limit = NV_PKTLIMIT_1; in nv_probe()
5797 if (id->driver_data & DEV_HAS_LARGEDESC) in nv_probe()
5798 np->pkt_limit = NV_PKTLIMIT_2; in nv_probe()
5800 if (id->driver_data & DEV_HAS_CHECKSUM) { in nv_probe()
5801 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; in nv_probe()
5802 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | in nv_probe()
5806 np->vlanctl_bits = 0; in nv_probe()
5807 if (id->driver_data & DEV_HAS_VLAN) { in nv_probe()
5808 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; in nv_probe()
5809 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | in nv_probe()
5813 dev->features |= dev->hw_features; in nv_probe()
5816 dev->hw_features |= NETIF_F_LOOPBACK; in nv_probe()
5818 /* MTU range: 64 - 1500 or 9100 */ in nv_probe()
5819 dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN; in nv_probe()
5820 dev->max_mtu = np->pkt_limit; in nv_probe()
5822 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; in nv_probe()
5823 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) || in nv_probe()
5824 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) || in nv_probe()
5825 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) { in nv_probe()
5826 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; in nv_probe()
5829 err = -ENOMEM; in nv_probe()
5830 np->base = ioremap(addr, np->register_size); in nv_probe()
5831 if (!np->base) in nv_probe()
5834 np->rx_ring_size = RX_RING_DEFAULT; in nv_probe()
5835 np->tx_ring_size = TX_RING_DEFAULT; in nv_probe()
5838 np->rx_ring.orig = dma_alloc_coherent(&pci_dev->dev, in nv_probe()
5840 (np->rx_ring_size + in nv_probe()
5841 np->tx_ring_size), in nv_probe()
5842 &np->ring_addr, in nv_probe()
5844 if (!np->rx_ring.orig) in nv_probe()
5846 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; in nv_probe()
5848 np->rx_ring.ex = dma_alloc_coherent(&pci_dev->dev, in nv_probe()
5850 (np->rx_ring_size + in nv_probe()
5851 np->tx_ring_size), in nv_probe()
5852 &np->ring_addr, GFP_KERNEL); in nv_probe()
5853 if (!np->rx_ring.ex) in nv_probe()
5855 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; in nv_probe()
5857 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); in nv_probe()
5858 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); in nv_probe()
5859 if (!np->rx_skb || !np->tx_skb) in nv_probe()
5863 dev->netdev_ops = &nv_netdev_ops; in nv_probe()
5865 dev->netdev_ops = &nv_netdev_ops_optimized; in nv_probe()
5867 netif_napi_add(dev, &np->napi, nv_napi_poll); in nv_probe()
5868 dev->ethtool_ops = &ops; in nv_probe()
5869 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; in nv_probe()
5875 np->orig_mac[0] = readl(base + NvRegMacAddrA); in nv_probe()
5876 np->orig_mac[1] = readl(base + NvRegMacAddrB); in nv_probe()
5880 if (id->driver_data & DEV_HAS_CORRECT_MACADDR) { in nv_probe()
5882 mac[0] = (np->orig_mac[0] >> 0) & 0xff; in nv_probe()
5883 mac[1] = (np->orig_mac[0] >> 8) & 0xff; in nv_probe()
5884 mac[2] = (np->orig_mac[0] >> 16) & 0xff; in nv_probe()
5885 mac[3] = (np->orig_mac[0] >> 24) & 0xff; in nv_probe()
5886 mac[4] = (np->orig_mac[1] >> 0) & 0xff; in nv_probe()
5887 mac[5] = (np->orig_mac[1] >> 8) & 0xff; in nv_probe()
5890 mac[0] = (np->orig_mac[0] >> 0) & 0xff; in nv_probe()
5891 mac[1] = (np->orig_mac[0] >> 8) & 0xff; in nv_probe()
5892 mac[2] = (np->orig_mac[0] >> 16) & 0xff; in nv_probe()
5893 mac[3] = (np->orig_mac[0] >> 24) & 0xff; in nv_probe()
5894 mac[4] = (np->orig_mac[1] >> 0) & 0xff; in nv_probe()
5895 mac[5] = (np->orig_mac[1] >> 8) & 0xff; in nv_probe()
5901 np->orig_mac[0] = (mac[5] << 0) + (mac[4] << 8) + in nv_probe()
5903 np->orig_mac[1] = (mac[1] << 0) + (mac[0] << 8); in nv_probe()
5906 mac[0] = (np->orig_mac[1] >> 8) & 0xff; in nv_probe()
5907 mac[1] = (np->orig_mac[1] >> 0) & 0xff; in nv_probe()
5908 mac[2] = (np->orig_mac[0] >> 24) & 0xff; in nv_probe()
5909 mac[3] = (np->orig_mac[0] >> 16) & 0xff; in nv_probe()
5910 mac[4] = (np->orig_mac[0] >> 8) & 0xff; in nv_probe()
5911 mac[5] = (np->orig_mac[0] >> 0) & 0xff; in nv_probe()
5913 dev_dbg(&pci_dev->dev, in nv_probe()
5925 dev_err(&pci_dev->dev, in nv_probe()
5926 "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n", in nv_probe()
5929 dev_err(&pci_dev->dev, in nv_probe()
5930 "Using random MAC address: %pM\n", dev->dev_addr); in nv_probe()
5938 np->wolenabled = 0; in nv_probe()
5939 device_set_wakeup_enable(&pci_dev->dev, false); in nv_probe()
5941 if (id->driver_data & DEV_HAS_POWER_CNTRL) { in nv_probe()
5946 if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) && in nv_probe()
5947 pci_dev->revision >= 0xA3) in nv_probe()
5952 if (np->desc_ver == DESC_VER_1) in nv_probe()
5953 np->tx_flags = NV_TX_VALID; in nv_probe()
5955 np->tx_flags = NV_TX2_VALID; in nv_probe()
5957 np->msi_flags = 0; in nv_probe()
5958 if ((id->driver_data & DEV_HAS_MSI) && msi) in nv_probe()
5959 np->msi_flags |= NV_MSI_CAPABLE; in nv_probe()
5961 if ((id->driver_data & DEV_HAS_MSI_X) && msix) { in nv_probe()
5966 np->msi_flags |= NV_MSI_X_CAPABLE; in nv_probe()
5971 np->irqmask = NVREG_IRQMASK_CPU; in nv_probe()
5972 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ in nv_probe()
5973 np->msi_flags |= 0x0001; in nv_probe()
5975 !(id->driver_data & DEV_NEED_TIMERIRQ)) { in nv_probe()
5977 np->irqmask = NVREG_IRQMASK_THROUGHPUT; in nv_probe()
5979 np->msi_flags &= ~NV_MSI_X_CAPABLE; in nv_probe()
5982 np->irqmask = NVREG_IRQMASK_THROUGHPUT; in nv_probe()
5983 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ in nv_probe()
5984 np->msi_flags |= 0x0003; in nv_probe()
5987 if (id->driver_data & DEV_NEED_TIMERIRQ) in nv_probe()
5988 np->irqmask |= NVREG_IRQ_TIMER; in nv_probe()
5989 if (id->driver_data & DEV_NEED_LINKTIMER) { in nv_probe()
5990 np->need_linktimer = 1; in nv_probe()
5991 np->link_timeout = jiffies + LINK_TIMEOUT; in nv_probe()
5993 np->need_linktimer = 0; in nv_probe()
5997 if (id->driver_data & DEV_NEED_TX_LIMIT) { in nv_probe()
5998 np->tx_limit = 1; in nv_probe()
5999 if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) && in nv_probe()
6000 pci_dev->revision >= 0xA2) in nv_probe()
6001 np->tx_limit = 0; in nv_probe()
6014 if (id->driver_data & DEV_HAS_MGMT_UNIT) { in nv_probe()
6020 np->mac_in_use = 1; in nv_probe()
6021 if (np->mgmt_version > 0) in nv_probe()
6022 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE; in nv_probe()
6024 if (np->mac_in_use && in nv_probe()
6037 int id1, id2; in nv_probe() local
6040 spin_lock_irq(&np->lock); in nv_probe()
6042 spin_unlock_irq(&np->lock); in nv_probe()
6045 spin_lock_irq(&np->lock); in nv_probe()
6046 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); in nv_probe()
6047 spin_unlock_irq(&np->lock); in nv_probe()
6048 if (id2 < 0 || id2 == 0xffff) in nv_probe()
6051 np->phy_model = id2 & PHYID2_MODEL_MASK; in nv_probe()
6053 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; in nv_probe()
6054 np->phyaddr = phyaddr; in nv_probe()
6055 np->phy_oui = id1 | id2; in nv_probe()
6058 if (np->phy_oui == PHY_OUI_REALTEK2) in nv_probe()
6059 np->phy_oui = PHY_OUI_REALTEK; in nv_probe()
6060 /* Setup phy revision for Realtek */ in nv_probe()
6061 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211) in nv_probe()
6062 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK; in nv_probe()
6067 dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n"); in nv_probe()
6076 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_probe()
6078 np->gigabit = PHY_GIGABIT; in nv_probe()
6082 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; in nv_probe()
6083 np->duplex = 0; in nv_probe()
6084 np->autoneg = 1; in nv_probe()
6088 dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err); in nv_probe()
6102 if (id->driver_data & DEV_HAS_VLAN) in nv_probe()
6103 nv_vlan_mode(dev, dev->features); in nv_probe()
6105 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", in nv_probe()
6106 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); in nv_probe()
6108 dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", in nv_probe()
6109 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", in nv_probe()
6110 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? in nv_probe()
6112 dev->features & (NETIF_F_HW_VLAN_CTAG_RX | in nv_probe()
6115 dev->features & (NETIF_F_LOOPBACK) ? in nv_probe()
6117 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "", in nv_probe()
6118 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "", in nv_probe()
6119 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "", in nv_probe()
6120 np->gigabit == PHY_GIGABIT ? "gbit " : "", in nv_probe()
6121 np->need_linktimer ? "lnktim " : "", in nv_probe()
6122 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", in nv_probe()
6123 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", in nv_probe()
6124 np->desc_ver); in nv_probe()
6141 free_percpu(np->txrx_stats); in nv_probe()
6153 if (np->phy_oui == PHY_OUI_REALTEK && in nv_restore_phy()
6154 np->phy_model == PHY_MODEL_REALTEK_8201 && in nv_restore_phy()
6156 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3); in nv_restore_phy()
6157 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); in nv_restore_phy()
6160 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved); in nv_restore_phy()
6161 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1); in nv_restore_phy()
6164 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_restore_phy()
6166 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control); in nv_restore_phy()
6176 /* special op: write back the misordered MAC address - otherwise in nv_restore_mac_addr()
6179 writel(np->orig_mac[0], base + NvRegMacAddrA); in nv_restore_mac_addr()
6180 writel(np->orig_mac[1], base + NvRegMacAddrB); in nv_restore_mac_addr()
6190 free_percpu(np->txrx_stats); in nv_remove()
6223 /* save non-pci configuration space */ in nv_suspend()
6224 for (i = 0; i <= np->register_size/sizeof(u32); i++) in nv_suspend()
6225 np->saved_config_space[i] = readl(base + i*sizeof(u32)); in nv_suspend()
6238 /* restore non-pci configuration space */ in nv_resume()
6239 for (i = 0; i <= np->register_size/sizeof(u32); i++) in nv_resume()
6240 writel(np->saved_config_space[i], base+i*sizeof(u32)); in nv_resume()
6242 if (np->driver_data & DEV_NEED_MSI_FIX) in nv_resume()
6286 pci_wake_from_d3(pdev, np->wolenabled); in nv_shutdown()