Lines Matching +full:tx +full:- +full:termination +full:- +full:fix

1 // SPDX-License-Identifier: GPL-2.0-only
7 * of the original driver such as link fail-over and link management because
19 #include <linux/dma-mapping.h>
54 #define RX_MAX_PENDING (RX_LE_SIZE/6 - 2)
70 #define RING_NEXT(x, s) (((x)+1) & ((s)-1))
77 static int debug = -1; /* defaults above */
85 static int disable_msi = -1;
94 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
95 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
96 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E01) }, /* SK-9E21M */
97 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
98 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */
99 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */
100 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) }, /* DGE-550T */
169 dev_warn(&hw->pdev->dev, "%s: phy write timeout\n", hw->dev[port]->name); in gm_phy_write()
170 return -ETIMEDOUT; in gm_phy_write()
173 dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name); in gm_phy_write()
174 return -EIO; in gm_phy_write()
197 dev_warn(&hw->pdev->dev, "%s: phy read timeout\n", hw->dev[port]->name); in __gm_phy_read()
198 return -ETIMEDOUT; in __gm_phy_read()
200 dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name); in __gm_phy_read()
201 return -EIO; in __gm_phy_read()
221 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) in sky2_power_on()
230 if (hw->flags & SKY2_HW_ADV_POWER_CTL) { in sky2_power_on()
249 /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */ in sky2_power_on()
263 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) in sky2_power_aux()
274 pci_pme_capable(hw->pdev, PCI_D3cold)) in sky2_power_aux()
327 struct sky2_port *sky2 = netdev_priv(hw->dev[port]); in sky2_phy_init()
330 if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) && in sky2_phy_init()
331 !(hw->flags & SKY2_HW_NEWER_PHY)) { in sky2_phy_init()
339 if (hw->chip_id == CHIP_ID_YUKON_EC) in sky2_phy_init()
351 if (!(hw->flags & SKY2_HW_GIGABIT)) { in sky2_phy_init()
355 if (hw->chip_id == CHIP_ID_YUKON_FE_P && in sky2_phy_init()
356 hw->chip_rev == CHIP_REV_YU_FE2_A0) { in sky2_phy_init()
372 if ( (sky2->flags & SKY2_FLAG_AUTO_SPEED) && in sky2_phy_init()
373 (hw->flags & SKY2_HW_NEWER_PHY)) { in sky2_phy_init()
389 if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) { in sky2_phy_init()
392 /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */ in sky2_phy_init()
399 if (hw->pmd_type == 'P') { in sky2_phy_init()
403 /* for SFP-module set SIGDET polarity to low */ in sky2_phy_init()
417 if (sky2->flags & SKY2_FLAG_AUTO_SPEED) { in sky2_phy_init()
419 if (sky2->advertising & ADVERTISED_1000baseT_Full) in sky2_phy_init()
421 if (sky2->advertising & ADVERTISED_1000baseT_Half) in sky2_phy_init()
423 if (sky2->advertising & ADVERTISED_100baseT_Full) in sky2_phy_init()
425 if (sky2->advertising & ADVERTISED_100baseT_Half) in sky2_phy_init()
427 if (sky2->advertising & ADVERTISED_10baseT_Full) in sky2_phy_init()
429 if (sky2->advertising & ADVERTISED_10baseT_Half) in sky2_phy_init()
433 if (sky2->advertising & ADVERTISED_1000baseT_Full) in sky2_phy_init()
435 if (sky2->advertising & ADVERTISED_1000baseT_Half) in sky2_phy_init()
439 /* Restart Auto-negotiation */ in sky2_phy_init()
448 switch (sky2->speed) { in sky2_phy_init()
459 if (sky2->duplex == DUPLEX_FULL) { in sky2_phy_init()
462 } else if (sky2->speed < SPEED_1000) in sky2_phy_init()
463 sky2->flow_mode = FC_NONE; in sky2_phy_init()
466 if (sky2->flags & SKY2_FLAG_AUTO_PAUSE) { in sky2_phy_init()
468 adv |= copper_fc_adv[sky2->flow_mode]; in sky2_phy_init()
470 adv |= fiber_fc_adv[sky2->flow_mode]; in sky2_phy_init()
473 reg |= gm_fc_disable[sky2->flow_mode]; in sky2_phy_init()
476 if (sky2->flow_mode & FC_RX) in sky2_phy_init()
484 if (hw->flags & SKY2_HW_GIGABIT) in sky2_phy_init()
494 switch (hw->chip_id) { in sky2_phy_init()
517 /* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */ in sky2_phy_init()
574 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */ in sky2_phy_init()
581 if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_UL_2) { in sky2_phy_init()
585 /* increase differential signal amplitude in 10BASE-T */ in sky2_phy_init()
589 if (hw->chip_id == CHIP_ID_YUKON_EC_U) { in sky2_phy_init()
590 /* fix for IEEE A/B Symmetry failure in 1000BASE-T */ in sky2_phy_init()
597 } else if (hw->chip_id == CHIP_ID_YUKON_FE_P && in sky2_phy_init()
598 hw->chip_rev == CHIP_REV_YU_FE2_A0) { in sky2_phy_init()
602 } else if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) { in sky2_phy_init()
606 /* apply RDAC termination workaround */ in sky2_phy_init()
612 } else if (hw->chip_id != CHIP_ID_YUKON_EX && in sky2_phy_init()
613 hw->chip_id < CHIP_ID_YUKON_SUPR) { in sky2_phy_init()
614 /* no effect on Yukon-XL */ in sky2_phy_init()
617 if (!(sky2->flags & SKY2_FLAG_AUTO_SPEED) || in sky2_phy_init()
618 sky2->speed == SPEED_100) { in sky2_phy_init()
626 } else if (hw->chip_id == CHIP_ID_YUKON_PRM && in sky2_phy_init()
677 /* Enable 10Base-Te (EEE) */ in sky2_phy_init()
678 if (hw->chip_id >= CHIP_ID_YUKON_PRM) { in sky2_phy_init()
685 /* Enable phy interrupt on auto-negotiation complete (or link up) */ in sky2_phy_init()
686 if (sky2->flags & SKY2_FLAG_AUTO_SPEED) in sky2_phy_init()
703 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1) in sky2_phy_power_up()
710 if (hw->chip_id == CHIP_ID_YUKON_FE) in sky2_phy_power_up()
712 else if (hw->flags & SKY2_HW_ADV_POWER_CTL) in sky2_phy_power_up()
727 if (hw->flags & SKY2_HW_NEWER_PHY) { in sky2_phy_power_down()
746 if (hw->chip_id != CHIP_ID_YUKON_EC) { in sky2_phy_power_down()
747 if (hw->chip_id == CHIP_ID_YUKON_EC_U) { in sky2_phy_power_down()
776 reg = gma_read16(sky2->hw, sky2->port, GM_SERIAL_MODE); in sky2_set_ipg()
778 if (sky2->speed > SPEED_100) in sky2_set_ipg()
782 gma_write16(sky2->hw, sky2->port, GM_SERIAL_MODE, reg); in sky2_set_ipg()
785 /* Enable Rx/Tx */
788 struct sky2_hw *hw = sky2->hw; in sky2_enable_rx_tx()
789 unsigned port = sky2->port; in sky2_enable_rx_tx()
800 spin_lock_bh(&sky2->phy_lock); in sky2_phy_reinit()
801 sky2_phy_init(sky2->hw, sky2->port); in sky2_phy_reinit()
803 spin_unlock_bh(&sky2->phy_lock); in sky2_phy_reinit()
809 struct sky2_hw *hw = sky2->hw; in sky2_wol_init()
810 unsigned port = sky2->port; in sky2_wol_init()
822 * sky2_reset will re-enable on resume in sky2_wol_init()
824 save_mode = sky2->flow_mode; in sky2_wol_init()
825 ctrl = sky2->advertising; in sky2_wol_init()
827 sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full); in sky2_wol_init()
828 sky2->flow_mode = FC_NONE; in sky2_wol_init()
830 spin_lock_bh(&sky2->phy_lock); in sky2_wol_init()
833 spin_unlock_bh(&sky2->phy_lock); in sky2_wol_init()
835 sky2->flow_mode = save_mode; in sky2_wol_init()
836 sky2->advertising = ctrl; in sky2_wol_init()
844 memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), in sky2_wol_init()
845 sky2->netdev->dev_addr, ETH_ALEN); in sky2_wol_init()
850 if (sky2->wol & WAKE_PHY) in sky2_wol_init()
855 if (sky2->wol & WAKE_MAGIC) in sky2_wol_init()
866 /* Needed by some broken BIOSes, use PCI rather than PCI-e for WOL */ in sky2_wol_init()
880 struct net_device *dev = hw->dev[port]; in sky2_set_tx_stfwd()
882 if ( (hw->chip_id == CHIP_ID_YUKON_EX && in sky2_set_tx_stfwd()
883 hw->chip_rev != CHIP_REV_YU_EX_A0) || in sky2_set_tx_stfwd()
884 hw->chip_id >= CHIP_ID_YUKON_FE_P) { in sky2_set_tx_stfwd()
885 /* Yukon-Extreme B0 and further Extreme devices */ in sky2_set_tx_stfwd()
887 } else if (dev->mtu > ETH_DATA_LEN) { in sky2_set_tx_stfwd()
888 /* set Tx GMAC FIFO Almost Empty Threshold */ in sky2_set_tx_stfwd()
899 struct sky2_port *sky2 = netdev_priv(hw->dev[port]); in sky2_mac_init()
903 const u8 *addr = hw->dev[port]->dev_addr; in sky2_mac_init()
910 if (hw->chip_id == CHIP_ID_YUKON_XL && in sky2_mac_init()
911 hw->chip_rev == CHIP_REV_YU_XL_A0 && in sky2_mac_init()
913 /* WA DEV_472 -- looks like crossed wires on port 2 */ in sky2_mac_init()
929 spin_lock_bh(&sky2->phy_lock); in sky2_mac_init()
932 spin_unlock_bh(&sky2->phy_lock); in sky2_mac_init()
963 if (hw->dev[port]->mtu > ETH_DATA_LEN) in sky2_mac_init()
966 if (hw->chip_id == CHIP_ID_YUKON_EC_U && in sky2_mac_init()
967 hw->chip_rev == CHIP_REV_YU_EC_U_B1) in sky2_mac_init()
986 if (hw->chip_id == CHIP_ID_YUKON_EX || in sky2_mac_init()
987 hw->chip_id == CHIP_ID_YUKON_FE_P) in sky2_mac_init()
992 if (hw->chip_id == CHIP_ID_YUKON_XL) { in sky2_mac_init()
993 /* Hardware errata - clear flush mask */ in sky2_mac_init()
1003 if (hw->chip_id == CHIP_ID_YUKON_FE_P && in sky2_mac_init()
1004 hw->chip_rev == CHIP_REV_YU_FE2_A0) in sky2_mac_init()
1008 /* Configure Tx MAC FIFO */ in sky2_mac_init()
1013 if (!(hw->flags & SKY2_HW_RAM_BUFFER)) { in sky2_mac_init()
1015 if (hw->chip_id == CHIP_ID_YUKON_FE_P && in sky2_mac_init()
1016 hw->chip_rev == CHIP_REV_YU_FE2_A0) in sky2_mac_init()
1026 if (hw->chip_id == CHIP_ID_YUKON_FE_P && in sky2_mac_init()
1027 hw->chip_rev == CHIP_REV_YU_FE2_A0) { in sky2_mac_init()
1043 end = start + space - 1; in sky2_ramset()
1052 u32 tp = space - space/4; in sky2_ramset()
1061 tp = space - 8192/8; in sky2_ramset()
1065 /* Enable store & forward on Tx queue's because in sky2_ramset()
1066 * Tx FIFO is only 1K on Yukon in sky2_ramset()
1102 struct sky2_tx_le *le = sky2->tx_le + *slot; in get_tx_le()
1104 *slot = RING_NEXT(*slot, sky2->tx_ring_size); in get_tx_le()
1105 le->ctrl = 0; in get_tx_le()
1113 sky2->tx_prod = sky2->tx_cons = 0; in tx_init()
1114 sky2->tx_tcpsum = 0; in tx_init()
1115 sky2->tx_last_mss = 0; in tx_init()
1116 netdev_reset_queue(sky2->netdev); in tx_init()
1118 le = get_tx_le(sky2, &sky2->tx_prod); in tx_init()
1119 le->addr = 0; in tx_init()
1120 le->opcode = OP_ADDR64 | HW_OWNER; in tx_init()
1121 sky2->tx_last_upper = 0; in tx_init()
1135 struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put; in sky2_next_rx()
1136 sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE); in sky2_next_rx()
1137 le->ctrl = 0; in sky2_next_rx()
1146 size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8); in sky2_get_rx_threshold()
1149 return (size - 8) / sizeof(u32); in sky2_get_rx_threshold()
1158 size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8); in sky2_get_rx_data_size()
1160 sky2->rx_nfrags = size >> PAGE_SHIFT; in sky2_get_rx_data_size()
1161 BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr)); in sky2_get_rx_data_size()
1164 size -= sky2->rx_nfrags << PAGE_SHIFT; in sky2_get_rx_data_size()
1183 le->addr = cpu_to_le32(upper_32_bits(map)); in sky2_rx_add()
1184 le->opcode = OP_ADDR64 | HW_OWNER; in sky2_rx_add()
1188 le->addr = cpu_to_le32(lower_32_bits(map)); in sky2_rx_add()
1189 le->length = cpu_to_le16(len); in sky2_rx_add()
1190 le->opcode = op | HW_OWNER; in sky2_rx_add()
1199 sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size); in sky2_rx_submit()
1201 for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++) in sky2_rx_submit()
1202 sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE); in sky2_rx_submit()
1209 struct sk_buff *skb = re->skb; in sky2_rx_map_skb()
1212 re->data_addr = dma_map_single(&pdev->dev, skb->data, size, in sky2_rx_map_skb()
1214 if (dma_mapping_error(&pdev->dev, re->data_addr)) in sky2_rx_map_skb()
1219 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in sky2_rx_map_skb()
1220 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in sky2_rx_map_skb()
1222 re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0, in sky2_rx_map_skb()
1226 if (dma_mapping_error(&pdev->dev, re->frag_addr[i])) in sky2_rx_map_skb()
1232 while (--i >= 0) { in sky2_rx_map_skb()
1233 dma_unmap_page(&pdev->dev, re->frag_addr[i], in sky2_rx_map_skb()
1234 skb_frag_size(&skb_shinfo(skb)->frags[i]), in sky2_rx_map_skb()
1238 dma_unmap_single(&pdev->dev, re->data_addr, in sky2_rx_map_skb()
1243 dev_warn(&pdev->dev, "%s: rx mapping error\n", in sky2_rx_map_skb()
1244 skb->dev->name); in sky2_rx_map_skb()
1245 return -EIO; in sky2_rx_map_skb()
1250 struct sk_buff *skb = re->skb; in sky2_rx_unmap_skb()
1253 dma_unmap_single(&pdev->dev, re->data_addr, in sky2_rx_unmap_skb()
1256 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in sky2_rx_unmap_skb()
1257 dma_unmap_page(&pdev->dev, re->frag_addr[i], in sky2_rx_unmap_skb()
1258 skb_frag_size(&skb_shinfo(skb)->frags[i]), in sky2_rx_unmap_skb()
1270 le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN); in rx_set_checksum()
1271 le->ctrl = 0; in rx_set_checksum()
1272 le->opcode = OP_TCPSTART | HW_OWNER; in rx_set_checksum()
1274 sky2_write32(sky2->hw, in rx_set_checksum()
1275 Q_ADDR(rxqaddr[sky2->port], Q_CSR), in rx_set_checksum()
1276 (sky2->netdev->features & NETIF_F_RXCSUM) in rx_set_checksum()
1284 struct sky2_hw *hw = sky2->hw; in rx_set_rss()
1288 if (hw->flags & SKY2_HW_NEW_LE) { in rx_set_rss()
1290 sky2_write32(hw, SK_REG(sky2->port, RSS_CFG), HASH_ALL); in rx_set_rss()
1299 sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4), in rx_set_rss()
1303 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), in rx_set_rss()
1306 sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), in rx_set_rss()
1309 sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), in rx_set_rss()
1314 * The RX Stop command will not work for Yukon-2 if the BMU does not
1325 struct sky2_hw *hw = sky2->hw; in sky2_rx_stop()
1326 unsigned rxq = rxqaddr[sky2->port]; in sky2_rx_stop()
1337 netdev_warn(sky2->netdev, "receiver stop failed\n"); in sky2_rx_stop()
1350 if (sky2->rx_le) in sky2_rx_clean()
1351 memset(sky2->rx_le, 0, RX_LE_BYTES); in sky2_rx_clean()
1353 for (i = 0; i < sky2->rx_pending; i++) { in sky2_rx_clean()
1354 struct rx_ring_info *re = sky2->rx_ring + i; in sky2_rx_clean()
1356 if (re->skb) { in sky2_rx_clean()
1357 sky2_rx_unmap_skb(sky2->hw->pdev, re); in sky2_rx_clean()
1358 kfree_skb(re->skb); in sky2_rx_clean()
1359 re->skb = NULL; in sky2_rx_clean()
1369 struct sky2_hw *hw = sky2->hw; in sky2_ioctl()
1370 int err = -EOPNOTSUPP; in sky2_ioctl()
1373 return -ENODEV; /* Phy still in reset */ in sky2_ioctl()
1377 data->phy_id = PHY_ADDR_MARV; in sky2_ioctl()
1383 spin_lock_bh(&sky2->phy_lock); in sky2_ioctl()
1384 err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val); in sky2_ioctl()
1385 spin_unlock_bh(&sky2->phy_lock); in sky2_ioctl()
1387 data->val_out = val; in sky2_ioctl()
1392 spin_lock_bh(&sky2->phy_lock); in sky2_ioctl()
1393 err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f, in sky2_ioctl()
1394 data->val_in); in sky2_ioctl()
1395 spin_unlock_bh(&sky2->phy_lock); in sky2_ioctl()
1406 struct sky2_hw *hw = sky2->hw; in sky2_vlan_mode()
1407 u16 port = sky2->port; in sky2_vlan_mode()
1420 dev->vlan_features |= SKY2_VLAN_OFFLOADS; in sky2_vlan_mode()
1426 dev->vlan_features &= ~SKY2_VLAN_OFFLOADS; in sky2_vlan_mode()
1433 return (hw->flags & SKY2_HW_RAM_BUFFER) ? 8 : 2; in sky2_rx_pad()
1438 * make the skb non-linear with a fragment list of pages.
1445 skb = __netdev_alloc_skb(sky2->netdev, in sky2_rx_alloc()
1446 sky2->rx_data_size + sky2_rx_pad(sky2->hw), in sky2_rx_alloc()
1451 if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) { in sky2_rx_alloc()
1459 start = PTR_ALIGN(skb->data, 8); in sky2_rx_alloc()
1460 skb_reserve(skb, start - skb->data); in sky2_rx_alloc()
1464 for (i = 0; i < sky2->rx_nfrags; i++) { in sky2_rx_alloc()
1481 sky2_put_idx(sky2->hw, rxq, sky2->rx_put); in sky2_rx_update()
1486 struct sky2_hw *hw = sky2->hw; in sky2_alloc_rx_skbs()
1489 sky2->rx_data_size = sky2_get_rx_data_size(sky2); in sky2_alloc_rx_skbs()
1492 for (i = 0; i < sky2->rx_pending; i++) { in sky2_alloc_rx_skbs()
1493 struct rx_ring_info *re = sky2->rx_ring + i; in sky2_alloc_rx_skbs()
1495 re->skb = sky2_rx_alloc(sky2, GFP_KERNEL); in sky2_alloc_rx_skbs()
1496 if (!re->skb) in sky2_alloc_rx_skbs()
1497 return -ENOMEM; in sky2_alloc_rx_skbs()
1499 if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) { in sky2_alloc_rx_skbs()
1500 dev_kfree_skb(re->skb); in sky2_alloc_rx_skbs()
1501 re->skb = NULL; in sky2_alloc_rx_skbs()
1502 return -ENOMEM; in sky2_alloc_rx_skbs()
1519 struct sky2_hw *hw = sky2->hw; in sky2_rx_start()
1521 unsigned rxq = rxqaddr[sky2->port]; in sky2_rx_start()
1524 sky2->rx_put = sky2->rx_next = 0; in sky2_rx_start()
1528 if (pci_is_pcie(hw->pdev)) in sky2_rx_start()
1534 if (hw->chip_id == CHIP_ID_YUKON_EC_U && in sky2_rx_start()
1535 hw->chip_rev > CHIP_REV_YU_EC_U_A0) in sky2_rx_start()
1538 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); in sky2_rx_start()
1540 if (!(hw->flags & SKY2_HW_NEW_LE)) in sky2_rx_start()
1543 if (!(hw->flags & SKY2_HW_RSS_BROKEN)) in sky2_rx_start()
1544 rx_set_rss(sky2->netdev, sky2->netdev->features); in sky2_rx_start()
1547 for (i = 0; i < sky2->rx_pending; i++) { in sky2_rx_start()
1548 re = sky2->rx_ring + i; in sky2_rx_start()
1560 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF); in sky2_rx_start()
1562 sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh); in sky2_rx_start()
1563 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON); in sky2_rx_start()
1569 if (hw->chip_id == CHIP_ID_YUKON_EX || in sky2_rx_start()
1570 hw->chip_id == CHIP_ID_YUKON_SUPR) { in sky2_rx_start()
1578 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_MACSEC_FLUSH_OFF); in sky2_rx_start()
1581 if (hw->chip_id >= CHIP_ID_YUKON_SUPR) { in sky2_rx_start()
1582 /* Enable RX Home Address & Routing Header checksum fix */ in sky2_rx_start()
1583 sky2_write16(hw, SK_REG(sky2->port, RX_GMF_FL_CTRL), in sky2_rx_start()
1586 /* Enable TX Home Address & Routing Header checksum fix */ in sky2_rx_start()
1587 sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST), in sky2_rx_start()
1594 struct sky2_hw *hw = sky2->hw; in sky2_alloc_buffers()
1597 sky2->tx_le = dma_alloc_coherent(&hw->pdev->dev, in sky2_alloc_buffers()
1598 sky2->tx_ring_size * sizeof(struct sky2_tx_le), in sky2_alloc_buffers()
1599 &sky2->tx_le_map, GFP_KERNEL); in sky2_alloc_buffers()
1600 if (!sky2->tx_le) in sky2_alloc_buffers()
1603 sky2->tx_ring = kcalloc(sky2->tx_ring_size, sizeof(struct tx_ring_info), in sky2_alloc_buffers()
1605 if (!sky2->tx_ring) in sky2_alloc_buffers()
1608 sky2->rx_le = dma_alloc_coherent(&hw->pdev->dev, RX_LE_BYTES, in sky2_alloc_buffers()
1609 &sky2->rx_le_map, GFP_KERNEL); in sky2_alloc_buffers()
1610 if (!sky2->rx_le) in sky2_alloc_buffers()
1613 sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info), in sky2_alloc_buffers()
1615 if (!sky2->rx_ring) in sky2_alloc_buffers()
1620 return -ENOMEM; in sky2_alloc_buffers()
1625 struct sky2_hw *hw = sky2->hw; in sky2_free_buffers()
1629 if (sky2->rx_le) { in sky2_free_buffers()
1630 dma_free_coherent(&hw->pdev->dev, RX_LE_BYTES, sky2->rx_le, in sky2_free_buffers()
1631 sky2->rx_le_map); in sky2_free_buffers()
1632 sky2->rx_le = NULL; in sky2_free_buffers()
1634 if (sky2->tx_le) { in sky2_free_buffers()
1635 dma_free_coherent(&hw->pdev->dev, in sky2_free_buffers()
1636 sky2->tx_ring_size * sizeof(struct sky2_tx_le), in sky2_free_buffers()
1637 sky2->tx_le, sky2->tx_le_map); in sky2_free_buffers()
1638 sky2->tx_le = NULL; in sky2_free_buffers()
1640 kfree(sky2->tx_ring); in sky2_free_buffers()
1641 kfree(sky2->rx_ring); in sky2_free_buffers()
1643 sky2->tx_ring = NULL; in sky2_free_buffers()
1644 sky2->rx_ring = NULL; in sky2_free_buffers()
1649 struct sky2_hw *hw = sky2->hw; in sky2_hw_up()
1650 unsigned port = sky2->port; in sky2_hw_up()
1653 struct net_device *otherdev = hw->dev[sky2->port^1]; in sky2_hw_up()
1658 * On dual port PCI-X card, there is an problem where status in sky2_hw_up()
1662 (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) { in sky2_hw_up()
1677 netdev_dbg(sky2->netdev, "ram buffer %dK\n", ramsize); in sky2_hw_up()
1681 rxspace = 8 + (2*(ramsize - 16))/3; in sky2_hw_up()
1684 sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace); in sky2_hw_up()
1694 if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0) in sky2_hw_up()
1698 if (hw->chip_id == CHIP_ID_YUKON_EC_U && in sky2_hw_up()
1699 hw->chip_rev == CHIP_REV_YU_EC_U_A0) in sky2_hw_up()
1702 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, in sky2_hw_up()
1703 sky2->tx_ring_size - 1); in sky2_hw_up()
1705 sky2_vlan_mode(sky2->netdev, sky2->netdev->features); in sky2_hw_up()
1706 netdev_update_features(sky2->netdev); in sky2_hw_up()
1714 struct pci_dev *pdev = hw->pdev; in sky2_setup_irq()
1717 err = request_irq(pdev->irq, sky2_intr, in sky2_setup_irq()
1718 (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED, in sky2_setup_irq()
1721 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); in sky2_setup_irq()
1723 hw->flags |= SKY2_HW_IRQ_SETUP; in sky2_setup_irq()
1725 napi_enable(&hw->napi); in sky2_setup_irq()
1738 struct sky2_hw *hw = sky2->hw; in sky2_open()
1739 unsigned port = sky2->port; in sky2_open()
1750 if (hw->ports == 1 && (err = sky2_setup_irq(hw, dev->name))) in sky2_open()
1758 if (hw->chip_id == CHIP_ID_YUKON_OPT || in sky2_open()
1759 hw->chip_id == CHIP_ID_YUKON_PRM || in sky2_open()
1760 hw->chip_id == CHIP_ID_YUKON_OP_2) in sky2_open()
1779 return (sky2->tx_prod - sky2->tx_cons) & (sky2->tx_ring_size - 1); in tx_inuse()
1782 /* Number of list elements available for next tx */
1785 return sky2->tx_pending - tx_inuse(sky2); in tx_avail()
1793 count = (skb_shinfo(skb)->nr_frags + 1) in tx_le_req()
1801 if (skb->ip_summed == CHECKSUM_PARTIAL) in tx_le_req()
1809 if (re->flags & TX_MAP_SINGLE) in sky2_tx_unmap()
1810 dma_unmap_single(&pdev->dev, dma_unmap_addr(re, mapaddr), in sky2_tx_unmap()
1812 else if (re->flags & TX_MAP_PAGE) in sky2_tx_unmap()
1813 dma_unmap_page(&pdev->dev, dma_unmap_addr(re, mapaddr), in sky2_tx_unmap()
1815 re->flags = 0; in sky2_tx_unmap()
1828 struct sky2_hw *hw = sky2->hw; in sky2_xmit_frame()
1842 mapping = dma_map_single(&hw->pdev->dev, skb->data, len, in sky2_xmit_frame()
1845 if (dma_mapping_error(&hw->pdev->dev, mapping)) in sky2_xmit_frame()
1848 slot = sky2->tx_prod; in sky2_xmit_frame()
1850 "tx queued, slot %u, len %d\n", slot, skb->len); in sky2_xmit_frame()
1854 if (upper != sky2->tx_last_upper) { in sky2_xmit_frame()
1856 le->addr = cpu_to_le32(upper); in sky2_xmit_frame()
1857 sky2->tx_last_upper = upper; in sky2_xmit_frame()
1858 le->opcode = OP_ADDR64 | HW_OWNER; in sky2_xmit_frame()
1862 mss = skb_shinfo(skb)->gso_size; in sky2_xmit_frame()
1865 if (!(hw->flags & SKY2_HW_NEW_LE)) in sky2_xmit_frame()
1868 if (mss != sky2->tx_last_mss) { in sky2_xmit_frame()
1870 le->addr = cpu_to_le32(mss); in sky2_xmit_frame()
1872 if (hw->flags & SKY2_HW_NEW_LE) in sky2_xmit_frame()
1873 le->opcode = OP_MSS | HW_OWNER; in sky2_xmit_frame()
1875 le->opcode = OP_LRGLEN | HW_OWNER; in sky2_xmit_frame()
1876 sky2->tx_last_mss = mss; in sky2_xmit_frame()
1886 le->addr = 0; in sky2_xmit_frame()
1887 le->opcode = OP_VLAN|HW_OWNER; in sky2_xmit_frame()
1889 le->opcode |= OP_VLAN; in sky2_xmit_frame()
1890 le->length = cpu_to_be16(skb_vlan_tag_get(skb)); in sky2_xmit_frame()
1895 if (skb->ip_summed == CHECKSUM_PARTIAL) { in sky2_xmit_frame()
1897 if (hw->flags & SKY2_HW_AUTO_TX_SUM) in sky2_xmit_frame()
1904 tcpsum |= offset + skb->csum_offset; /* sum write */ in sky2_xmit_frame()
1907 if (ip_hdr(skb)->protocol == IPPROTO_UDP) in sky2_xmit_frame()
1910 if (tcpsum != sky2->tx_tcpsum) { in sky2_xmit_frame()
1911 sky2->tx_tcpsum = tcpsum; in sky2_xmit_frame()
1914 le->addr = cpu_to_le32(tcpsum); in sky2_xmit_frame()
1915 le->length = 0; /* initial checksum value */ in sky2_xmit_frame()
1916 le->ctrl = 1; /* one packet */ in sky2_xmit_frame()
1917 le->opcode = OP_TCPLISW | HW_OWNER; in sky2_xmit_frame()
1922 re = sky2->tx_ring + slot; in sky2_xmit_frame()
1923 re->flags = TX_MAP_SINGLE; in sky2_xmit_frame()
1928 le->addr = cpu_to_le32(lower_32_bits(mapping)); in sky2_xmit_frame()
1929 le->length = cpu_to_le16(len); in sky2_xmit_frame()
1930 le->ctrl = ctrl; in sky2_xmit_frame()
1931 le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER); in sky2_xmit_frame()
1934 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in sky2_xmit_frame()
1935 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in sky2_xmit_frame()
1937 mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0, in sky2_xmit_frame()
1940 if (dma_mapping_error(&hw->pdev->dev, mapping)) in sky2_xmit_frame()
1944 if (upper != sky2->tx_last_upper) { in sky2_xmit_frame()
1946 le->addr = cpu_to_le32(upper); in sky2_xmit_frame()
1947 sky2->tx_last_upper = upper; in sky2_xmit_frame()
1948 le->opcode = OP_ADDR64 | HW_OWNER; in sky2_xmit_frame()
1951 re = sky2->tx_ring + slot; in sky2_xmit_frame()
1952 re->flags = TX_MAP_PAGE; in sky2_xmit_frame()
1957 le->addr = cpu_to_le32(lower_32_bits(mapping)); in sky2_xmit_frame()
1958 le->length = cpu_to_le16(skb_frag_size(frag)); in sky2_xmit_frame()
1959 le->ctrl = ctrl; in sky2_xmit_frame()
1960 le->opcode = OP_BUFFER | HW_OWNER; in sky2_xmit_frame()
1963 re->skb = skb; in sky2_xmit_frame()
1964 le->ctrl |= EOP; in sky2_xmit_frame()
1966 sky2->tx_prod = slot; in sky2_xmit_frame()
1971 netdev_sent_queue(dev, skb->len); in sky2_xmit_frame()
1972 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod); in sky2_xmit_frame()
1977 for (i = sky2->tx_prod; i != slot; i = RING_NEXT(i, sky2->tx_ring_size)) { in sky2_xmit_frame()
1978 re = sky2->tx_ring + i; in sky2_xmit_frame()
1980 sky2_tx_unmap(hw->pdev, re); in sky2_xmit_frame()
1985 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); in sky2_xmit_frame()
1994 * 1. The hardware will tell us about partial completion of multi-part
2002 struct net_device *dev = sky2->netdev; in sky2_tx_complete()
2006 BUG_ON(done >= sky2->tx_ring_size); in sky2_tx_complete()
2008 for (idx = sky2->tx_cons; idx != done; in sky2_tx_complete()
2009 idx = RING_NEXT(idx, sky2->tx_ring_size)) { in sky2_tx_complete()
2010 struct tx_ring_info *re = sky2->tx_ring + idx; in sky2_tx_complete()
2011 struct sk_buff *skb = re->skb; in sky2_tx_complete()
2013 sky2_tx_unmap(sky2->hw->pdev, re); in sky2_tx_complete()
2017 "tx done %u\n", idx); in sky2_tx_complete()
2020 bytes_compl += skb->len; in sky2_tx_complete()
2022 re->skb = NULL; in sky2_tx_complete()
2025 sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size); in sky2_tx_complete()
2029 sky2->tx_cons = idx; in sky2_tx_complete()
2034 u64_stats_update_begin(&sky2->tx_stats.syncp); in sky2_tx_complete()
2035 sky2->tx_stats.packets += pkts_compl; in sky2_tx_complete()
2036 sky2->tx_stats.bytes += bytes_compl; in sky2_tx_complete()
2037 u64_stats_update_end(&sky2->tx_stats.syncp); in sky2_tx_complete()
2046 /* Stop Interval Timer and Limit Counter of Tx Arbiter */ in sky2_tx_reset()
2050 /* Reset the PCI FIFO of the async Tx queue */ in sky2_tx_reset()
2054 /* Reset the Tx prefetch units */ in sky2_tx_reset()
2066 struct sky2_hw *hw = sky2->hw; in sky2_hw_down()
2067 unsigned port = sky2->port; in sky2_hw_down()
2087 if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && in sky2_hw_down()
2088 port == 0 && hw->dev[1] && netif_running(hw->dev[1]))) in sky2_hw_down()
2101 spin_lock_bh(&sky2->phy_lock); in sky2_hw_down()
2103 spin_unlock_bh(&sky2->phy_lock); in sky2_hw_down()
2108 sky2_tx_complete(sky2, sky2->tx_prod); in sky2_hw_down()
2115 struct sky2_hw *hw = sky2->hw; in sky2_close()
2118 if (!sky2->tx_le) in sky2_close()
2123 if (hw->ports == 1) { in sky2_close()
2127 napi_disable(&hw->napi); in sky2_close()
2128 free_irq(hw->pdev->irq, hw); in sky2_close()
2129 hw->flags &= ~SKY2_HW_IRQ_SETUP; in sky2_close()
2135 imask &= ~portirq_msk[sky2->port]; in sky2_close()
2139 synchronize_irq(hw->pdev->irq); in sky2_close()
2140 napi_synchronize(&hw->napi); in sky2_close()
2152 if (hw->flags & SKY2_HW_FIBRE_PHY) in sky2_phy_speed()
2155 if (!(hw->flags & SKY2_HW_GIGABIT)) { in sky2_phy_speed()
2174 struct sky2_hw *hw = sky2->hw; in sky2_link_up()
2175 unsigned port = sky2->port; in sky2_link_up()
2178 [FC_TX] = "tx", in sky2_link_up()
2189 netif_carrier_on(sky2->netdev); in sky2_link_up()
2191 mod_timer(&hw->watchdog_timer, jiffies + 1); in sky2_link_up()
2197 netif_info(sky2, link, sky2->netdev, in sky2_link_up()
2199 sky2->speed, in sky2_link_up()
2200 sky2->duplex == DUPLEX_FULL ? "full" : "half", in sky2_link_up()
2201 fc_name[sky2->flow_status]); in sky2_link_up()
2206 struct sky2_hw *hw = sky2->hw; in sky2_link_down()
2207 unsigned port = sky2->port; in sky2_link_down()
2216 netif_carrier_off(sky2->netdev); in sky2_link_down()
2221 netif_info(sky2, link, sky2->netdev, "Link is down\n"); in sky2_link_down()
2226 static enum flow_control sky2_flow(int rx, int tx) in sky2_flow() argument
2229 return tx ? FC_BOTH : FC_RX; in sky2_flow()
2231 return tx ? FC_TX : FC_NONE; in sky2_flow()
2236 struct sky2_hw *hw = sky2->hw; in sky2_autoneg_done()
2237 unsigned port = sky2->port; in sky2_autoneg_done()
2243 netdev_err(sky2->netdev, "remote fault\n"); in sky2_autoneg_done()
2244 return -1; in sky2_autoneg_done()
2248 netdev_err(sky2->netdev, "speed/duplex mismatch\n"); in sky2_autoneg_done()
2249 return -1; in sky2_autoneg_done()
2252 sky2->speed = sky2_phy_speed(hw, aux); in sky2_autoneg_done()
2253 sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; in sky2_autoneg_done()
2258 if (hw->flags & SKY2_HW_FIBRE_PHY) { in sky2_autoneg_done()
2273 sky2->flow_status = FC_NONE; in sky2_autoneg_done()
2276 sky2->flow_status = FC_BOTH; in sky2_autoneg_done()
2278 sky2->flow_status = FC_RX; in sky2_autoneg_done()
2281 sky2->flow_status = FC_TX; in sky2_autoneg_done()
2284 if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000 && in sky2_autoneg_done()
2285 !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX)) in sky2_autoneg_done()
2286 sky2->flow_status = FC_NONE; in sky2_autoneg_done()
2288 if (sky2->flow_status & FC_TX) in sky2_autoneg_done()
2299 struct net_device *dev = hw->dev[port]; in sky2_phy_intr()
2306 spin_lock(&sky2->phy_lock); in sky2_phy_intr()
2310 netif_info(sky2, intr, sky2->netdev, "phy interrupt status 0x%x 0x%x\n", in sky2_phy_intr()
2321 sky2->speed = sky2_phy_speed(hw, phystat); in sky2_phy_intr()
2324 sky2->duplex = in sky2_phy_intr()
2334 spin_unlock(&sky2->phy_lock); in sky2_phy_intr()
2337 /* Special quick link interrupt (Yukon-2 Optima only) */
2340 struct sky2_port *sky2 = netdev_priv(hw->dev[0]); in sky2_qlink_intr()
2359 * and tx queue is full (stopped).
2364 struct sky2_hw *hw = sky2->hw; in sky2_tx_timeout()
2366 netif_err(sky2, timer, dev, "tx timeout\n"); in sky2_tx_timeout()
2369 sky2->tx_cons, sky2->tx_prod, in sky2_tx_timeout()
2370 sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX), in sky2_tx_timeout()
2371 sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE))); in sky2_tx_timeout()
2374 schedule_work(&hw->restart_work); in sky2_tx_timeout()
2380 struct sky2_hw *hw = sky2->hw; in sky2_change_mtu()
2381 unsigned port = sky2->port; in sky2_change_mtu()
2387 WRITE_ONCE(dev->mtu, new_mtu); in sky2_change_mtu()
2396 netif_trans_update(dev); /* prevent tx timeout */ in sky2_change_mtu()
2397 napi_disable(&hw->napi); in sky2_change_mtu()
2400 synchronize_irq(hw->pdev->irq); in sky2_change_mtu()
2402 if (!(hw->flags & SKY2_HW_RAM_BUFFER)) in sky2_change_mtu()
2410 WRITE_ONCE(dev->mtu, new_mtu); in sky2_change_mtu()
2414 if (sky2->speed > SPEED_100) in sky2_change_mtu()
2419 if (dev->mtu > ETH_DATA_LEN) in sky2_change_mtu()
2434 napi_enable(&hw->napi); in sky2_change_mtu()
2452 if (!IS_ALIGNED(re->data_addr + ETH_HLEN, sizeof(u32))) in needs_copy()
2465 skb = netdev_alloc_skb_ip_align(sky2->netdev, length); in receive_copy()
2467 dma_sync_single_for_cpu(&sky2->hw->pdev->dev, re->data_addr, in receive_copy()
2469 skb_copy_from_linear_data(re->skb, skb->data, length); in receive_copy()
2470 skb->ip_summed = re->skb->ip_summed; in receive_copy()
2471 skb->csum = re->skb->csum; in receive_copy()
2472 skb_copy_hash(skb, re->skb); in receive_copy()
2473 __vlan_hwaccel_copy_tag(skb, re->skb); in receive_copy()
2475 dma_sync_single_for_device(&sky2->hw->pdev->dev, in receive_copy()
2476 re->data_addr, length, in receive_copy()
2478 __vlan_hwaccel_clear_tag(re->skb); in receive_copy()
2479 skb_clear_hash(re->skb); in receive_copy()
2480 re->skb->ip_summed = CHECKSUM_NONE; in receive_copy()
2495 skb->tail += size; in skb_put_frags()
2496 skb->len += size; in skb_put_frags()
2497 length -= size; in skb_put_frags()
2499 num_frags = skb_shinfo(skb)->nr_frags; in skb_put_frags()
2501 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_put_frags()
2506 --skb_shinfo(skb)->nr_frags; in skb_put_frags()
2511 skb->data_len += size; in skb_put_frags()
2512 skb->truesize += PAGE_SIZE; in skb_put_frags()
2513 skb->len += size; in skb_put_frags()
2514 length -= size; in skb_put_frags()
2519 /* Normal packet - take skb from ring element and put in a new one */
2526 unsigned hdr_space = sky2->rx_data_size; in receive_new()
2532 if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space)) in receive_new()
2535 skb = re->skb; in receive_new()
2536 sky2_rx_unmap_skb(sky2->hw->pdev, re); in receive_new()
2537 prefetch(skb->data); in receive_new()
2540 if (skb_shinfo(skb)->nr_frags) in receive_new()
2560 struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next; in sky2_receive()
2566 sky2->rx_next, status, length); in sky2_receive()
2568 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; in sky2_receive()
2569 prefetch(sky2->rx_ring + sky2->rx_next); in sky2_receive()
2571 if (skb_vlan_tag_present(re->skb)) in sky2_receive()
2572 count -= VLAN_HLEN; /* Account for vlan tag */ in sky2_receive()
2578 if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && in sky2_receive()
2579 sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 && in sky2_receive()
2599 dev->stats.rx_dropped += (skb == NULL); in sky2_receive()
2607 ++dev->stats.rx_errors; in sky2_receive()
2633 if (skb->ip_summed == CHECKSUM_NONE) in sky2_skb_rx()
2636 napi_gro_receive(&sky2->hw->napi, skb); in sky2_skb_rx()
2642 struct net_device *dev = hw->dev[port]; in sky2_rx_done()
2648 u64_stats_update_begin(&sky2->rx_stats.syncp); in sky2_rx_done()
2649 sky2->rx_stats.packets += packets; in sky2_rx_done()
2650 sky2->rx_stats.bytes += bytes; in sky2_rx_done()
2651 u64_stats_update_end(&sky2->rx_stats.syncp); in sky2_rx_done()
2653 sky2->last_rx = jiffies; in sky2_rx_done()
2660 BUG_ON(sky2->hw->flags & SKY2_HW_NEW_LE); in sky2_rx_checksum()
2668 struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb; in sky2_rx_checksum()
2669 skb->ip_summed = CHECKSUM_COMPLETE; in sky2_rx_checksum()
2670 skb->csum = le16_to_cpu(status); in sky2_rx_checksum()
2672 dev_notice(&sky2->hw->pdev->dev, in sky2_rx_checksum()
2674 sky2->netdev->name, status); in sky2_rx_checksum()
2680 sky2->netdev->features &= ~NETIF_F_RXCSUM; in sky2_rx_checksum()
2681 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), in sky2_rx_checksum()
2690 skb = sky2->rx_ring[sky2->rx_next].skb; in sky2_rx_tag()
2698 skb = sky2->rx_ring[sky2->rx_next].skb; in sky2_rx_hash()
2715 struct sky2_status_le *le = hw->st_le + hw->st_idx; in sky2_status_intr()
2721 u8 opcode = le->opcode; in sky2_status_intr()
2726 hw->st_idx = RING_NEXT(hw->st_idx, hw->st_size); in sky2_status_intr()
2728 port = le->css & CSS_LINK_BIT; in sky2_status_intr()
2729 dev = hw->dev[port]; in sky2_status_intr()
2731 length = le16_to_cpu(le->length); in sky2_status_intr()
2732 status = le32_to_cpu(le->status); in sky2_status_intr()
2734 le->opcode = 0; in sky2_status_intr()
2745 if (hw->flags & SKY2_HW_NEW_LE) { in sky2_status_intr()
2746 if ((dev->features & NETIF_F_RXCSUM) && in sky2_status_intr()
2747 (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) && in sky2_status_intr()
2748 (le->css & CSS_TCPUDPCSOK)) in sky2_status_intr()
2749 skb->ip_summed = CHECKSUM_UNNECESSARY; in sky2_status_intr()
2751 skb->ip_summed = CHECKSUM_NONE; in sky2_status_intr()
2754 skb->protocol = eth_type_trans(skb, dev); in sky2_status_intr()
2770 if (likely(dev->features & NETIF_F_RXCSUM)) in sky2_status_intr()
2779 /* TX index reports status for both ports */ in sky2_status_intr()
2780 sky2_tx_done(hw->dev[0], status & 0xfff); in sky2_status_intr()
2781 if (hw->dev[1]) in sky2_status_intr()
2782 sky2_tx_done(hw->dev[1], in sky2_status_intr()
2791 } while (hw->st_idx != idx); in sky2_status_intr()
2805 struct net_device *dev = hw->dev[port]; in sky2_hw_error()
2845 struct pci_dev *pdev = hw->pdev; in sky2_hw_intr()
2860 dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", in sky2_hw_intr()
2869 /* PCI-Express uncorrectable Error occurred */ in sky2_hw_intr()
2877 dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); in sky2_hw_intr()
2892 struct net_device *dev = hw->dev[port]; in sky2_mac_intr()
2905 ++dev->stats.rx_fifo_errors; in sky2_mac_intr()
2910 ++dev->stats.tx_fifo_errors; in sky2_mac_intr()
2918 struct net_device *dev = hw->dev[port]; in sky2_le_error()
2921 dev_err(&hw->pdev->dev, "%s: descriptor error q=%#x get=%u put=%u\n", in sky2_le_error()
2922 dev->name, (unsigned) q, (unsigned) idx, in sky2_le_error()
2931 struct sky2_hw *hw = sky2->hw; in sky2_rx_hung()
2932 unsigned port = sky2->port; in sky2_rx_hung()
2940 if (sky2->check.last == sky2->last_rx && in sky2_rx_hung()
2941 ((mac_rp == sky2->check.mac_rp && in sky2_rx_hung()
2942 mac_lev != 0 && mac_lev >= sky2->check.mac_lev) || in sky2_rx_hung()
2944 (fifo_rp == sky2->check.fifo_rp && in sky2_rx_hung()
2945 fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) { in sky2_rx_hung()
2952 sky2->check.last = sky2->last_rx; in sky2_rx_hung()
2953 sky2->check.mac_rp = mac_rp; in sky2_rx_hung()
2954 sky2->check.mac_lev = mac_lev; in sky2_rx_hung()
2955 sky2->check.fifo_rp = fifo_rp; in sky2_rx_hung()
2956 sky2->check.fifo_lev = fifo_lev; in sky2_rx_hung()
2967 napi_schedule(&hw->napi); in sky2_watchdog()
2971 for (i = 0; i < hw->ports; i++) { in sky2_watchdog()
2972 struct net_device *dev = hw->dev[i]; in sky2_watchdog()
2978 if ((hw->flags & SKY2_HW_RAM_BUFFER) && in sky2_watchdog()
2981 schedule_work(&hw->restart_work); in sky2_watchdog()
2990 mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ)); in sky2_watchdog()
2997 dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status); in sky2_err_intr()
3040 while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) { in sky2_poll()
3041 work_done += sky2_status_intr(hw, work_limit - work_done, idx); in sky2_poll()
3066 prefetch(&hw->st_le[hw->st_idx]); in sky2_intr()
3068 napi_schedule(&hw->napi); in sky2_intr()
3078 napi_schedule(&sky2->hw->napi); in sky2_netpoll()
3085 switch (hw->chip_id) { in sky2_mhz()
3130 hw->chip_id = sky2_read8(hw, B2_CHIP_ID); in sky2_init()
3131 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; in sky2_init()
3133 switch (hw->chip_id) { in sky2_init()
3135 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY; in sky2_init()
3136 if (hw->chip_rev < CHIP_REV_YU_XL_A2) in sky2_init()
3137 hw->flags |= SKY2_HW_RSS_BROKEN; in sky2_init()
3141 hw->flags = SKY2_HW_GIGABIT in sky2_init()
3147 hw->flags = SKY2_HW_GIGABIT in sky2_init()
3154 if (hw->chip_rev != CHIP_REV_YU_EX_B0) in sky2_init()
3155 hw->flags |= SKY2_HW_AUTO_TX_SUM; in sky2_init()
3160 if (hw->chip_rev == CHIP_REV_YU_EC_A1) { in sky2_init()
3161 dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n"); in sky2_init()
3162 return -EOPNOTSUPP; in sky2_init()
3164 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RSS_BROKEN; in sky2_init()
3168 hw->flags = SKY2_HW_RSS_BROKEN; in sky2_init()
3172 hw->flags = SKY2_HW_NEWER_PHY in sky2_init()
3178 if (hw->chip_rev == CHIP_REV_YU_FE2_A0) in sky2_init()
3179 hw->flags |= SKY2_HW_VLAN_BROKEN | SKY2_HW_RSS_CHKSUM; in sky2_init()
3183 hw->flags = SKY2_HW_GIGABIT in sky2_init()
3189 if (hw->chip_rev == CHIP_REV_YU_SU_A0) in sky2_init()
3190 hw->flags |= SKY2_HW_RSS_CHKSUM; in sky2_init()
3194 hw->flags = SKY2_HW_GIGABIT in sky2_init()
3201 hw->flags = SKY2_HW_GIGABIT in sky2_init()
3207 dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", in sky2_init()
3208 hw->chip_id); in sky2_init()
3209 return -EOPNOTSUPP; in sky2_init()
3212 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); in sky2_init()
3213 if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') in sky2_init()
3214 hw->flags |= SKY2_HW_FIBRE_PHY; in sky2_init()
3216 hw->ports = 1; in sky2_init()
3220 ++hw->ports; in sky2_init()
3224 hw->flags |= SKY2_HW_RAM_BUFFER; in sky2_init()
3231 struct pci_dev *pdev = hw->pdev; in sky2_reset()
3237 if (hw->chip_id == CHIP_ID_YUKON_EX in sky2_reset()
3238 || hw->chip_id == CHIP_ID_YUKON_SUPR) { in sky2_reset()
3245 * - ASF firmware may malfunction in sky2_reset()
3246 * - Yukon-Supreme: Parallel FLASH doesn't support divided clocks in sky2_reset()
3275 dev_info(&pdev->dev, "ignoring stuck error report bit\n"); in sky2_reset()
3283 for (i = 0; i < hw->ports; i++) { in sky2_reset()
3287 if (hw->chip_id == CHIP_ID_YUKON_EX || in sky2_reset()
3288 hw->chip_id == CHIP_ID_YUKON_SUPR) in sky2_reset()
3295 if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev > CHIP_REV_YU_SU_B0) { in sky2_reset()
3300 if (hw->chip_id == CHIP_ID_YUKON_OPT || in sky2_reset()
3301 hw->chip_id == CHIP_ID_YUKON_PRM || in sky2_reset()
3302 hw->chip_id == CHIP_ID_YUKON_OP_2) { in sky2_reset()
3305 if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) { in sky2_reset()
3306 /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */ in sky2_reset()
3312 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ in sky2_reset()
3330 sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL, in sky2_reset()
3333 if (hw->chip_id == CHIP_ID_YUKON_PRM && in sky2_reset()
3334 hw->chip_rev == CHIP_REV_YU_PRM_A0) { in sky2_reset()
3346 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ in sky2_reset()
3364 /* enable the Tx Arbiters */ in sky2_reset()
3365 for (i = 0; i < hw->ports; i++) in sky2_reset()
3369 for (i = 0; i < hw->ports; i++) { in sky2_reset()
3388 for (i = 0; i < hw->ports; i++) in sky2_reset()
3391 memset(hw->st_le, 0, hw->st_size * sizeof(struct sky2_status_le)); in sky2_reset()
3392 hw->st_idx = 0; in sky2_reset()
3397 sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma); in sky2_reset()
3398 sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32); in sky2_reset()
3401 sky2_write16(hw, STAT_LAST_IDX, hw->st_size - 1); in sky2_reset()
3406 /* set Status-FIFO ISR watermark */ in sky2_reset()
3407 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0) in sky2_reset()
3461 if (hw->flags & SKY2_HW_IRQ_SETUP) { in sky2_all_down()
3465 synchronize_irq(hw->pdev->irq); in sky2_all_down()
3466 napi_disable(&hw->napi); in sky2_all_down()
3469 for (i = 0; i < hw->ports; i++) { in sky2_all_down()
3470 struct net_device *dev = hw->dev[i]; in sky2_all_down()
3487 for (i = 0; i < hw->ports; i++) { in sky2_all_up()
3488 struct net_device *dev = hw->dev[i]; in sky2_all_up()
3500 if (hw->flags & SKY2_HW_IRQ_SETUP) { in sky2_all_up()
3504 napi_enable(&hw->napi); in sky2_all_up()
3530 wol->supported = sky2_wol_supported(sky2->hw); in sky2_get_wol()
3531 wol->wolopts = sky2->wol; in sky2_get_wol()
3537 struct sky2_hw *hw = sky2->hw; in sky2_set_wol()
3541 if ((wol->wolopts & ~sky2_wol_supported(sky2->hw)) || in sky2_set_wol()
3542 !device_can_wakeup(&hw->pdev->dev)) in sky2_set_wol()
3543 return -EOPNOTSUPP; in sky2_set_wol()
3545 sky2->wol = wol->wolopts; in sky2_set_wol()
3547 for (i = 0; i < hw->ports; i++) { in sky2_set_wol()
3548 struct net_device *dev = hw->dev[i]; in sky2_set_wol()
3551 if (sky2->wol) in sky2_set_wol()
3554 device_set_wakeup_enable(&hw->pdev->dev, enable_wakeup); in sky2_set_wol()
3567 if (hw->flags & SKY2_HW_GIGABIT) in sky2_supported_modes()
3580 struct sky2_hw *hw = sky2->hw; in sky2_get_link_ksettings()
3584 cmd->base.phy_address = PHY_ADDR_MARV; in sky2_get_link_ksettings()
3586 cmd->base.port = PORT_TP; in sky2_get_link_ksettings()
3587 cmd->base.speed = sky2->speed; in sky2_get_link_ksettings()
3590 cmd->base.speed = SPEED_1000; in sky2_get_link_ksettings()
3591 cmd->base.port = PORT_FIBRE; in sky2_get_link_ksettings()
3595 advertising = sky2->advertising; in sky2_get_link_ksettings()
3596 cmd->base.autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED) in sky2_get_link_ksettings()
3598 cmd->base.duplex = sky2->duplex; in sky2_get_link_ksettings()
3600 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, in sky2_get_link_ksettings()
3602 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, in sky2_get_link_ksettings()
3612 const struct sky2_hw *hw = sky2->hw; in sky2_set_link_ksettings()
3617 cmd->link_modes.advertising); in sky2_set_link_ksettings()
3619 if (cmd->base.autoneg == AUTONEG_ENABLE) { in sky2_set_link_ksettings()
3621 return -EINVAL; in sky2_set_link_ksettings()
3624 sky2->advertising = new_advertising | in sky2_set_link_ksettings()
3628 sky2->advertising = new_advertising | in sky2_set_link_ksettings()
3632 sky2->flags |= SKY2_FLAG_AUTO_SPEED; in sky2_set_link_ksettings()
3633 sky2->duplex = -1; in sky2_set_link_ksettings()
3634 sky2->speed = -1; in sky2_set_link_ksettings()
3637 u32 speed = cmd->base.speed; in sky2_set_link_ksettings()
3641 if (cmd->base.duplex == DUPLEX_FULL) in sky2_set_link_ksettings()
3643 else if (cmd->base.duplex == DUPLEX_HALF) in sky2_set_link_ksettings()
3646 return -EINVAL; in sky2_set_link_ksettings()
3649 if (cmd->base.duplex == DUPLEX_FULL) in sky2_set_link_ksettings()
3651 else if (cmd->base.duplex == DUPLEX_HALF) in sky2_set_link_ksettings()
3654 return -EINVAL; in sky2_set_link_ksettings()
3658 if (cmd->base.duplex == DUPLEX_FULL) in sky2_set_link_ksettings()
3660 else if (cmd->base.duplex == DUPLEX_HALF) in sky2_set_link_ksettings()
3663 return -EINVAL; in sky2_set_link_ksettings()
3666 return -EINVAL; in sky2_set_link_ksettings()
3670 return -EINVAL; in sky2_set_link_ksettings()
3672 sky2->speed = speed; in sky2_set_link_ksettings()
3673 sky2->duplex = cmd->base.duplex; in sky2_set_link_ksettings()
3674 sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; in sky2_set_link_ksettings()
3690 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); in sky2_get_drvinfo()
3691 strscpy(info->version, DRV_VERSION, sizeof(info->version)); in sky2_get_drvinfo()
3692 strscpy(info->bus_info, pci_name(sky2->hw->pdev), in sky2_get_drvinfo()
3693 sizeof(info->bus_info)); in sky2_get_drvinfo()
3743 return sky2->msg_enable; in sky2_get_msglevel()
3750 if (!netif_running(dev) || !(sky2->flags & SKY2_FLAG_AUTO_SPEED)) in sky2_nway_reset()
3751 return -EINVAL; in sky2_nway_reset()
3761 struct sky2_hw *hw = sky2->hw; in sky2_phy_stats()
3762 unsigned port = sky2->port; in sky2_phy_stats()
3775 sky2->msg_enable = value; in sky2_set_msglevel()
3784 return -EOPNOTSUPP; in sky2_get_sset_count()
3812 struct sky2_hw *hw = sky2->hw; in sky2_set_mac_address()
3813 unsigned port = sky2->port; in sky2_set_mac_address()
3816 if (!is_valid_ether_addr(addr->sa_data)) in sky2_set_mac_address()
3817 return -EADDRNOTAVAIL; in sky2_set_mac_address()
3819 eth_hw_addr_set(dev, addr->sa_data); in sky2_set_mac_address()
3820 memcpy_toio(hw->regs + B2_MAC_1 + port * 8, in sky2_set_mac_address()
3821 dev->dev_addr, ETH_ALEN); in sky2_set_mac_address()
3822 memcpy_toio(hw->regs + B2_MAC_2 + port * 8, in sky2_set_mac_address()
3823 dev->dev_addr, ETH_ALEN); in sky2_set_mac_address()
3826 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); in sky2_set_mac_address()
3829 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); in sky2_set_mac_address()
3845 struct sky2_hw *hw = sky2->hw; in sky2_set_multicast()
3846 unsigned port = sky2->port; in sky2_set_multicast()
3853 rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH); in sky2_set_multicast()
3859 if (dev->flags & IFF_PROMISC) /* promiscuous */ in sky2_set_multicast()
3861 else if (dev->flags & IFF_ALLMULTI) in sky2_set_multicast()
3872 sky2_add_filter(filter, ha->addr); in sky2_set_multicast()
3891 struct sky2_hw *hw = sky2->hw; in sky2_get_stats()
3892 unsigned port = sky2->port; in sky2_get_stats()
3897 start = u64_stats_fetch_begin(&sky2->rx_stats.syncp); in sky2_get_stats()
3898 _bytes = sky2->rx_stats.bytes; in sky2_get_stats()
3899 _packets = sky2->rx_stats.packets; in sky2_get_stats()
3900 } while (u64_stats_fetch_retry(&sky2->rx_stats.syncp, start)); in sky2_get_stats()
3902 stats->rx_packets = _packets; in sky2_get_stats()
3903 stats->rx_bytes = _bytes; in sky2_get_stats()
3906 start = u64_stats_fetch_begin(&sky2->tx_stats.syncp); in sky2_get_stats()
3907 _bytes = sky2->tx_stats.bytes; in sky2_get_stats()
3908 _packets = sky2->tx_stats.packets; in sky2_get_stats()
3909 } while (u64_stats_fetch_retry(&sky2->tx_stats.syncp, start)); in sky2_get_stats()
3911 stats->tx_packets = _packets; in sky2_get_stats()
3912 stats->tx_bytes = _bytes; in sky2_get_stats()
3914 stats->multicast = get_stats32(hw, port, GM_RXF_MC_OK) in sky2_get_stats()
3917 stats->collisions = get_stats32(hw, port, GM_TXF_COL); in sky2_get_stats()
3919 stats->rx_length_errors = get_stats32(hw, port, GM_RXF_LNG_ERR); in sky2_get_stats()
3920 stats->rx_crc_errors = get_stats32(hw, port, GM_RXF_FCS_ERR); in sky2_get_stats()
3921 stats->rx_frame_errors = get_stats32(hw, port, GM_RXF_SHT) in sky2_get_stats()
3923 stats->rx_over_errors = get_stats32(hw, port, GM_RXE_FIFO_OV); in sky2_get_stats()
3925 stats->rx_dropped = dev->stats.rx_dropped; in sky2_get_stats()
3926 stats->rx_fifo_errors = dev->stats.rx_fifo_errors; in sky2_get_stats()
3927 stats->tx_fifo_errors = dev->stats.tx_fifo_errors; in sky2_get_stats()
3935 struct sky2_hw *hw = sky2->hw; in sky2_led()
3936 unsigned port = sky2->port; in sky2_led()
3938 spin_lock_bh(&sky2->phy_lock); in sky2_led()
3939 if (hw->chip_id == CHIP_ID_YUKON_EC_U || in sky2_led()
3940 hw->chip_id == CHIP_ID_YUKON_EX || in sky2_led()
3941 hw->chip_id == CHIP_ID_YUKON_SUPR) { in sky2_led()
3986 spin_unlock_bh(&sky2->phy_lock); in sky2_led()
4017 switch (sky2->flow_mode) { in sky2_get_pauseparam()
4019 ecmd->tx_pause = ecmd->rx_pause = 0; in sky2_get_pauseparam()
4022 ecmd->tx_pause = 1, ecmd->rx_pause = 0; in sky2_get_pauseparam()
4025 ecmd->tx_pause = 0, ecmd->rx_pause = 1; in sky2_get_pauseparam()
4028 ecmd->tx_pause = ecmd->rx_pause = 1; in sky2_get_pauseparam()
4031 ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_PAUSE) in sky2_get_pauseparam()
4040 if (ecmd->autoneg == AUTONEG_ENABLE) in sky2_set_pauseparam()
4041 sky2->flags |= SKY2_FLAG_AUTO_PAUSE; in sky2_set_pauseparam()
4043 sky2->flags &= ~SKY2_FLAG_AUTO_PAUSE; in sky2_set_pauseparam()
4045 sky2->flow_mode = sky2_flow(ecmd->rx_pause, ecmd->tx_pause); in sky2_set_pauseparam()
4059 struct sky2_hw *hw = sky2->hw; in sky2_get_coalesce()
4062 ecmd->tx_coalesce_usecs = 0; in sky2_get_coalesce()
4065 ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks); in sky2_get_coalesce()
4067 ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH); in sky2_get_coalesce()
4070 ecmd->rx_coalesce_usecs = 0; in sky2_get_coalesce()
4073 ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks); in sky2_get_coalesce()
4075 ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM); in sky2_get_coalesce()
4078 ecmd->rx_coalesce_usecs_irq = 0; in sky2_get_coalesce()
4081 ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks); in sky2_get_coalesce()
4084 ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM); in sky2_get_coalesce()
4096 struct sky2_hw *hw = sky2->hw; in sky2_set_coalesce()
4099 if (ecmd->tx_coalesce_usecs > tmax || in sky2_set_coalesce()
4100 ecmd->rx_coalesce_usecs > tmax || in sky2_set_coalesce()
4101 ecmd->rx_coalesce_usecs_irq > tmax) in sky2_set_coalesce()
4102 return -EINVAL; in sky2_set_coalesce()
4104 if (ecmd->tx_max_coalesced_frames >= sky2->tx_ring_size-1) in sky2_set_coalesce()
4105 return -EINVAL; in sky2_set_coalesce()
4106 if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING) in sky2_set_coalesce()
4107 return -EINVAL; in sky2_set_coalesce()
4108 if (ecmd->rx_max_coalesced_frames_irq > RX_MAX_PENDING) in sky2_set_coalesce()
4109 return -EINVAL; in sky2_set_coalesce()
4111 if (ecmd->tx_coalesce_usecs == 0) in sky2_set_coalesce()
4115 sky2_us2clk(hw, ecmd->tx_coalesce_usecs)); in sky2_set_coalesce()
4118 sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames); in sky2_set_coalesce()
4120 if (ecmd->rx_coalesce_usecs == 0) in sky2_set_coalesce()
4124 sky2_us2clk(hw, ecmd->rx_coalesce_usecs)); in sky2_set_coalesce()
4127 sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames); in sky2_set_coalesce()
4129 if (ecmd->rx_coalesce_usecs_irq == 0) in sky2_set_coalesce()
4133 sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq)); in sky2_set_coalesce()
4136 sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq); in sky2_set_coalesce()
4157 ering->rx_max_pending = RX_MAX_PENDING; in sky2_get_ringparam()
4158 ering->tx_max_pending = TX_MAX_PENDING; in sky2_get_ringparam()
4160 ering->rx_pending = sky2->rx_pending; in sky2_get_ringparam()
4161 ering->tx_pending = sky2->tx_pending; in sky2_get_ringparam()
4171 if (ering->rx_pending > RX_MAX_PENDING || in sky2_set_ringparam()
4172 ering->rx_pending < 8 || in sky2_set_ringparam()
4173 ering->tx_pending < TX_MIN_PENDING || in sky2_set_ringparam()
4174 ering->tx_pending > TX_MAX_PENDING) in sky2_set_ringparam()
4175 return -EINVAL; in sky2_set_ringparam()
4179 sky2->rx_pending = ering->rx_pending; in sky2_set_ringparam()
4180 sky2->tx_pending = ering->tx_pending; in sky2_set_ringparam()
4181 sky2->tx_ring_size = roundup_ring_size(sky2->tx_pending); in sky2_set_ringparam()
4199 case 5: /* Tx Arbiter 2 */ in sky2_reg_access_ok()
4203 case 22 ... 23: /* Tx Ram Buffer 2 */ in sky2_reg_access_ok()
4205 case 27: /* Tx MAC Fifo 2 */ in sky2_reg_access_ok()
4210 return hw->ports > 1; in sky2_reg_access_ok()
4214 case 4: /* Tx Arbiter 1 */ in sky2_reg_access_ok()
4219 case 20 ... 21: /* Tx Ram Buffer 1 */ in sky2_reg_access_ok()
4221 case 26: /* Tx MAC Fifo 1 */ in sky2_reg_access_ok()
4243 const void __iomem *io = sky2->hw->regs; in sky2_get_regs()
4246 regs->version = 1; in sky2_get_regs()
4251 memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10); in sky2_get_regs()
4252 else if (sky2_reg_access_ok(sky2->hw, b)) in sky2_get_regs()
4265 struct sky2_hw *hw = sky2->hw; in sky2_get_eeprom_len()
4278 eeprom->magic = SKY2_EEPROM_MAGIC; in sky2_get_eeprom()
4279 rc = pci_read_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len, in sky2_get_eeprom()
4284 eeprom->len = rc; in sky2_get_eeprom()
4295 if (eeprom->magic != SKY2_EEPROM_MAGIC) in sky2_set_eeprom()
4296 return -EINVAL; in sky2_set_eeprom()
4298 rc = pci_write_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len, in sky2_set_eeprom()
4308 const struct sky2_hw *hw = sky2->hw; in sky2_fix_features()
4313 if (dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U) { in sky2_fix_features()
4321 (sky2->hw->flags & SKY2_HW_RSS_CHKSUM)) { in sky2_fix_features()
4332 netdev_features_t changed = dev->features ^ features; in sky2_set_features()
4335 !(sky2->hw->flags & SKY2_HW_NEW_LE)) { in sky2_set_features()
4336 sky2_write32(sky2->hw, in sky2_set_features()
4337 Q_ADDR(rxqaddr[sky2->port], Q_CSR), in sky2_set_features()
4388 struct net_device *dev = seq->private; in sky2_debug_show()
4390 struct sky2_hw *hw = sky2->hw; in sky2_debug_show()
4391 unsigned port = sky2->port; in sky2_debug_show()
4405 napi_disable(&hw->napi); in sky2_debug_show()
4408 seq_printf(seq, "Status ring %u\n", hw->st_size); in sky2_debug_show()
4409 if (hw->st_idx == last) in sky2_debug_show()
4413 for (idx = hw->st_idx; idx != last && idx < hw->st_size; in sky2_debug_show()
4414 idx = RING_NEXT(idx, hw->st_size)) { in sky2_debug_show()
4415 const struct sky2_status_le *le = hw->st_le + idx; in sky2_debug_show()
4417 idx, le->opcode, le->length, le->status); in sky2_debug_show()
4422 seq_printf(seq, "Tx ring pending=%u...%u report=%d done=%d\n", in sky2_debug_show()
4423 sky2->tx_cons, sky2->tx_prod, in sky2_debug_show()
4427 /* Dump contents of tx ring */ in sky2_debug_show()
4429 for (idx = sky2->tx_next; idx != sky2->tx_prod && idx < sky2->tx_ring_size; in sky2_debug_show()
4430 idx = RING_NEXT(idx, sky2->tx_ring_size)) { in sky2_debug_show()
4431 const struct sky2_tx_le *le = sky2->tx_le + idx; in sky2_debug_show()
4432 u32 a = le32_to_cpu(le->addr); in sky2_debug_show()
4438 switch (le->opcode & ~HW_OWNER) { in sky2_debug_show()
4446 seq_printf(seq, " vlan=%d", be16_to_cpu(le->length)); in sky2_debug_show()
4452 seq_printf(seq, " tso=%#x(%d)", a, le16_to_cpu(le->length)); in sky2_debug_show()
4455 seq_printf(seq, " %#x(%d)", a, le16_to_cpu(le->length)); in sky2_debug_show()
4458 seq_printf(seq, " frag=%#x(%d)", a, le16_to_cpu(le->length)); in sky2_debug_show()
4461 seq_printf(seq, " op=%#x,%#x(%d)", le->opcode, in sky2_debug_show()
4462 a, le16_to_cpu(le->length)); in sky2_debug_show()
4465 if (le->ctrl & EOP) { in sky2_debug_show()
4477 napi_enable(&hw->napi); in sky2_debug_show()
4492 if (dev->netdev_ops->ndo_open != sky2_open || !sky2_debug) in sky2_device_event()
4497 if (sky2->debugfs) { in sky2_device_event()
4498 sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs, in sky2_device_event()
4499 sky2_debug, dev->name); in sky2_device_event()
4504 if (sky2->debugfs) { in sky2_device_event()
4506 debugfs_remove(sky2->debugfs); in sky2_device_event()
4507 sky2->debugfs = NULL; in sky2_device_event()
4512 sky2->debugfs = debugfs_create_file(dev->name, 0444, in sky2_device_event()
4515 if (IS_ERR(sky2->debugfs)) in sky2_device_event()
4516 sky2->debugfs = NULL; in sky2_device_event()
4601 SET_NETDEV_DEV(dev, &hw->pdev->dev); in sky2_init_netdev()
4602 dev->irq = hw->pdev->irq; in sky2_init_netdev()
4603 dev->ethtool_ops = &sky2_ethtool_ops; in sky2_init_netdev()
4604 dev->watchdog_timeo = TX_WATCHDOG; in sky2_init_netdev()
4605 dev->netdev_ops = &sky2_netdev_ops[port]; in sky2_init_netdev()
4608 sky2->netdev = dev; in sky2_init_netdev()
4609 sky2->hw = hw; in sky2_init_netdev()
4610 sky2->msg_enable = netif_msg_init(debug, default_msg); in sky2_init_netdev()
4612 u64_stats_init(&sky2->tx_stats.syncp); in sky2_init_netdev()
4613 u64_stats_init(&sky2->rx_stats.syncp); in sky2_init_netdev()
4616 sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE; in sky2_init_netdev()
4617 if (hw->chip_id != CHIP_ID_YUKON_XL) in sky2_init_netdev()
4618 dev->hw_features |= NETIF_F_RXCSUM; in sky2_init_netdev()
4620 sky2->flow_mode = FC_BOTH; in sky2_init_netdev()
4622 sky2->duplex = -1; in sky2_init_netdev()
4623 sky2->speed = -1; in sky2_init_netdev()
4624 sky2->advertising = sky2_supported_modes(hw); in sky2_init_netdev()
4625 sky2->wol = wol; in sky2_init_netdev()
4627 spin_lock_init(&sky2->phy_lock); in sky2_init_netdev()
4629 sky2->tx_pending = TX_DEF_PENDING; in sky2_init_netdev()
4630 sky2->tx_ring_size = roundup_ring_size(TX_DEF_PENDING); in sky2_init_netdev()
4631 sky2->rx_pending = RX_DEF_PENDING; in sky2_init_netdev()
4633 hw->dev[port] = dev; in sky2_init_netdev()
4635 sky2->port = port; in sky2_init_netdev()
4637 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO; in sky2_init_netdev()
4640 dev->features |= NETIF_F_HIGHDMA; in sky2_init_netdev()
4643 if (!(hw->flags & SKY2_HW_RSS_BROKEN)) in sky2_init_netdev()
4644 dev->hw_features |= NETIF_F_RXHASH; in sky2_init_netdev()
4646 if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) { in sky2_init_netdev()
4647 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | in sky2_init_netdev()
4649 dev->vlan_features |= SKY2_VLAN_OFFLOADS; in sky2_init_netdev()
4652 dev->features |= dev->hw_features; in sky2_init_netdev()
4654 /* MTU range: 60 - 1500 or 9000 */ in sky2_init_netdev()
4655 dev->min_mtu = ETH_ZLEN; in sky2_init_netdev()
4656 if (hw->chip_id == CHIP_ID_YUKON_FE || in sky2_init_netdev()
4657 hw->chip_id == CHIP_ID_YUKON_FE_P) in sky2_init_netdev()
4658 dev->max_mtu = ETH_DATA_LEN; in sky2_init_netdev()
4660 dev->max_mtu = ETH_JUMBO_MTU; in sky2_init_netdev()
4666 ret = of_get_ethdev_address(hw->pdev->dev.of_node, dev); in sky2_init_netdev()
4670 memcpy_fromio(addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); in sky2_init_netdev()
4675 if (!is_valid_ether_addr(dev->dev_addr)) { in sky2_init_netdev()
4678 dev_warn(&hw->pdev->dev, "Invalid MAC address, defaulting to random\n"); in sky2_init_netdev()
4680 memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN); in sky2_init_netdev()
4682 dev_warn(&hw->pdev->dev, "Failed to set MAC address.\n"); in sky2_init_netdev()
4692 netif_info(sky2, probe, dev, "addr %pM\n", dev->dev_addr); in sky2_show_addr()
4705 hw->flags |= SKY2_HW_USE_MSI; in sky2_test_intr()
4706 wake_up(&hw->msi_wait); in sky2_test_intr()
4717 struct pci_dev *pdev = hw->pdev; in sky2_test_msi()
4720 init_waitqueue_head(&hw->msi_wait); in sky2_test_msi()
4722 err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw); in sky2_test_msi()
4724 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); in sky2_test_msi()
4733 wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10); in sky2_test_msi()
4735 if (!(hw->flags & SKY2_HW_USE_MSI)) { in sky2_test_msi()
4737 dev_info(&pdev->dev, "No interrupt generated using MSI, " in sky2_test_msi()
4740 err = -EOPNOTSUPP; in sky2_test_msi()
4747 free_irq(pdev->irq, hw); in sky2_test_msi()
4771 snprintf(buf, sz, "%s", name[chipid - CHIP_ID_YUKON_XL]); in sky2_name()
4786 .ident = "Gateway P-79",
4789 DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
4826 dev_err(&pdev->dev, "cannot enable PCI device\n"); in sky2_probe()
4837 dev_err(&pdev->dev, "PCI read config failed\n"); in sky2_probe()
4842 dev_err(&pdev->dev, "PCI configuration read error\n"); in sky2_probe()
4843 err = -EIO; in sky2_probe()
4849 dev_err(&pdev->dev, "cannot obtain PCI resources\n"); in sky2_probe()
4856 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { in sky2_probe()
4858 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); in sky2_probe()
4860 dev_err(&pdev->dev, "unable to obtain 64 bit DMA " in sky2_probe()
4865 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); in sky2_probe()
4867 dev_err(&pdev->dev, "no usable DMA configuration\n"); in sky2_probe()
4880 dev_err(&pdev->dev, "PCI write config failed\n"); in sky2_probe()
4885 wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0; in sky2_probe()
4887 err = -ENOMEM; in sky2_probe()
4894 hw->pdev = pdev; in sky2_probe()
4895 sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); in sky2_probe()
4897 hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000); in sky2_probe()
4898 if (!hw->regs) { in sky2_probe()
4899 dev_err(&pdev->dev, "cannot map device registers\n"); in sky2_probe()
4908 hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING); in sky2_probe()
4909 hw->st_le = dma_alloc_coherent(&pdev->dev, in sky2_probe()
4910 hw->st_size * sizeof(struct sky2_status_le), in sky2_probe()
4911 &hw->st_dma, GFP_KERNEL); in sky2_probe()
4912 if (!hw->st_le) { in sky2_probe()
4913 err = -ENOMEM; in sky2_probe()
4917 dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n", in sky2_probe()
4918 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev); in sky2_probe()
4924 err = -ENOMEM; in sky2_probe()
4928 if (disable_msi == -1) in sky2_probe()
4935 if (err != -EOPNOTSUPP) in sky2_probe()
4940 netif_napi_add(dev, &hw->napi, sky2_poll); in sky2_probe()
4944 dev_err(&pdev->dev, "cannot register net device\n"); in sky2_probe()
4952 if (hw->ports > 1) { in sky2_probe()
4955 err = -ENOMEM; in sky2_probe()
4961 dev_err(&pdev->dev, "cannot register second net device\n"); in sky2_probe()
4965 err = sky2_setup_irq(hw, hw->irq_name); in sky2_probe()
4972 timer_setup(&hw->watchdog_timer, sky2_watchdog, 0); in sky2_probe()
4973 INIT_WORK(&hw->restart_work, sky2_restart); in sky2_probe()
4976 pdev->d3hot_delay = 300; in sky2_probe()
4987 if (hw->flags & SKY2_HW_USE_MSI) in sky2_probe()
4991 dma_free_coherent(&pdev->dev, in sky2_probe()
4992 hw->st_size * sizeof(struct sky2_status_le), in sky2_probe()
4993 hw->st_le, hw->st_dma); in sky2_probe()
4997 iounmap(hw->regs); in sky2_probe()
5016 timer_shutdown_sync(&hw->watchdog_timer); in sky2_remove()
5017 cancel_work_sync(&hw->restart_work); in sky2_remove()
5019 for (i = hw->ports-1; i >= 0; --i) in sky2_remove()
5020 unregister_netdev(hw->dev[i]); in sky2_remove()
5030 if (hw->ports > 1) { in sky2_remove()
5031 napi_disable(&hw->napi); in sky2_remove()
5032 free_irq(pdev->irq, hw); in sky2_remove()
5035 if (hw->flags & SKY2_HW_USE_MSI) in sky2_remove()
5037 dma_free_coherent(&pdev->dev, in sky2_remove()
5038 hw->st_size * sizeof(struct sky2_status_le), in sky2_remove()
5039 hw->st_le, hw->st_dma); in sky2_remove()
5043 for (i = hw->ports-1; i >= 0; --i) in sky2_remove()
5044 free_netdev(hw->dev[i]); in sky2_remove()
5046 iounmap(hw->regs); in sky2_remove()
5058 del_timer_sync(&hw->watchdog_timer); in sky2_suspend()
5059 cancel_work_sync(&hw->restart_work); in sky2_suspend()
5064 for (i = 0; i < hw->ports; i++) { in sky2_suspend()
5065 struct net_device *dev = hw->dev[i]; in sky2_suspend()
5068 if (sky2->wol) in sky2_suspend()
5088 /* Re-enable all clocks */ in sky2_resume()
5091 dev_err(&pdev->dev, "PCI write config failed\n"); in sky2_resume()
5103 dev_err(&pdev->dev, "resume failed (%d)\n", err); in sky2_resume()
5121 for (port = 0; port < hw->ports; port++) { in sky2_shutdown()
5122 struct net_device *ndev = hw->dev[port]; in sky2_shutdown()
5131 sky2_suspend(&pdev->dev); in sky2_shutdown()
5132 pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); in sky2_shutdown()
5163 MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");