Lines Matching +full:pci +full:- +full:ep
3 Written/copyright 1997-2001 by Donald Becker.
13 SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
22 [this link no longer provides anything useful -jgarzik]
24 ---------------------------------------------------------------------
32 /* The user-configurable values.
37 /* Used to pass the full-duplex flag, etc. */
39 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
40 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
42 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
52 There are no ill effects from too-large receive rings. */
68 #define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
77 #include <linux/pci.h>
106 MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
107 MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
108 MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
117 single-chip Ethernet controllers for PCI. This chip is used on
120 II. Board-specific settings
122 PCI bus devices are configured by the system at boot time, so no jumpers
124 PCI INTA signal to a (preferably otherwise unused) system IRQ line.
125 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
183 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
228 * really ARE host-endian; it's not a misannotation. We tell
229 * the card to byteswap them internally on big-endian hosts -
255 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
257 /* The addresses of receive-in-place skbuffs. */
274 struct pci_dev *pci_dev; /* PCI bus location. */
286 unsigned int default_port:4; /* Last dev->if_port value. */
322 static int card_idx = -1; in epic_init_one()
324 int chip_idx = (int) ent->driver_data; in epic_init_one()
326 struct epic_private *ep; in epic_init_one() local
344 dev_err(&pdev->dev, "no PCI region space\n"); in epic_init_one()
345 ret = -ENODEV; in epic_init_one()
355 ret = -ENOMEM; in epic_init_one()
357 dev = alloc_etherdev(sizeof (*ep)); in epic_init_one()
361 SET_NETDEV_DEV(dev, &pdev->dev); in epic_init_one()
365 dev_err(&pdev->dev, "ioremap failed\n"); in epic_init_one()
370 ep = netdev_priv(dev); in epic_init_one()
371 ep->ioaddr = ioaddr; in epic_init_one()
372 ep->mii.dev = dev; in epic_init_one()
373 ep->mii.mdio_read = mdio_read; in epic_init_one()
374 ep->mii.mdio_write = mdio_write; in epic_init_one()
375 ep->mii.phy_id_mask = 0x1f; in epic_init_one()
376 ep->mii.reg_num_mask = 0x1f; in epic_init_one()
378 ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma, in epic_init_one()
382 ep->tx_ring = ring_space; in epic_init_one()
383 ep->tx_ring_dma = ring_dma; in epic_init_one()
385 ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma, in epic_init_one()
389 ep->rx_ring = ring_space; in epic_init_one()
390 ep->rx_ring_dma = ring_dma; in epic_init_one()
392 if (dev->mem_start) { in epic_init_one()
393 option = dev->mem_start; in epic_init_one()
394 duplex = (dev->mem_start & 16) ? 1 : 0; in epic_init_one()
402 spin_lock_init(&ep->lock); in epic_init_one()
403 spin_lock_init(&ep->napi_lock); in epic_init_one()
405 /* Bring the chip out of low-power mode. */ in epic_init_one()
409 for (i = 16; i > 0; i--) in epic_init_one()
424 dev_dbg(&pdev->dev, "EEPROM contents:\n"); in epic_init_one()
426 pr_cont(" %4.4x%s", read_eeprom(ep, i), in epic_init_one()
430 ep->pci_dev = pdev; in epic_init_one()
431 ep->chip_id = chip_idx; in epic_init_one()
432 ep->chip_flags = pci_id_tbl[chip_idx].drv_flags; in epic_init_one()
433 ep->irq_mask = in epic_init_one()
434 (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) in epic_init_one()
442 for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) { in epic_init_one()
445 ep->phys[phy_idx++] = phy; in epic_init_one()
446 dev_info(&pdev->dev, in epic_init_one()
452 ep->mii_phy_cnt = phy_idx; in epic_init_one()
454 phy = ep->phys[0]; in epic_init_one()
455 ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE); in epic_init_one()
456 dev_info(&pdev->dev, in epic_init_one()
459 ep->mii.advertising, mdio_read(dev, phy, 5)); in epic_init_one()
460 } else if ( ! (ep->chip_flags & NO_MII)) { in epic_init_one()
461 dev_warn(&pdev->dev, in epic_init_one()
464 ep->phys[0] = 3; in epic_init_one()
466 ep->mii.phy_id = ep->phys[0]; in epic_init_one()
469 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */ in epic_init_one()
470 if (ep->chip_flags & MII_PWRDWN) in epic_init_one()
476 ep->mii.force_media = ep->mii.full_duplex = 1; in epic_init_one()
477 dev_info(&pdev->dev, "Forced full duplex requested.\n"); in epic_init_one()
479 dev->if_port = ep->default_port = option; in epic_init_one()
481 /* The Epic-specific entries in the device structure. */ in epic_init_one()
482 dev->netdev_ops = &epic_netdev_ops; in epic_init_one()
483 dev->ethtool_ops = &netdev_ethtool_ops; in epic_init_one()
484 dev->watchdog_timeo = TX_TIMEOUT; in epic_init_one()
485 netif_napi_add(dev, &ep->napi, epic_poll); in epic_init_one()
493 (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq, in epic_init_one()
494 dev->dev_addr); in epic_init_one()
500 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring, in epic_init_one()
501 ep->rx_ring_dma); in epic_init_one()
503 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring, in epic_init_one()
504 ep->tx_ring_dma); in epic_init_one()
528 This serves to flush the operation to the PCI bus.
533 /* The EEPROM commands include the alway-set leading bit. */
539 static void epic_disable_int(struct net_device *dev, struct epic_private *ep) in epic_disable_int() argument
541 void __iomem *ioaddr = ep->ioaddr; in epic_disable_int()
554 struct epic_private *ep) in epic_napi_irq_off() argument
556 void __iomem *ioaddr = ep->ioaddr; in epic_napi_irq_off()
558 ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent); in epic_napi_irq_off()
563 struct epic_private *ep) in epic_napi_irq_on() argument
565 void __iomem *ioaddr = ep->ioaddr; in epic_napi_irq_on()
568 ew32(INTMASK, ep->irq_mask | EpicNapiEvent); in epic_napi_irq_on()
571 static int read_eeprom(struct epic_private *ep, int location) in read_eeprom() argument
573 void __iomem *ioaddr = ep->ioaddr; in read_eeprom()
583 for (i = 12; i >= 0; i--) { in read_eeprom()
592 for (i = 16; i > 0; i--) { in read_eeprom()
609 struct epic_private *ep = netdev_priv(dev); in mdio_read() local
610 void __iomem *ioaddr = ep->ioaddr; in mdio_read()
616 for (i = 400; i > 0; i--) { in mdio_read()
633 struct epic_private *ep = netdev_priv(dev); in mdio_write() local
634 void __iomem *ioaddr = ep->ioaddr; in mdio_write()
639 for (i = 10000; i > 0; i--) { in mdio_write()
649 struct epic_private *ep = netdev_priv(dev); in epic_open() local
650 void __iomem *ioaddr = ep->ioaddr; in epic_open()
651 const int irq = ep->pci_dev->irq; in epic_open()
657 napi_enable(&ep->napi); in epic_open()
658 rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev); in epic_open()
660 napi_disable(&ep->napi); in epic_open()
668 for (i = 16; i > 0; i--) in epic_open()
671 /* Pull the chip out of low-power mode, enable interrupts, and set for in epic_open()
672 PCI read multiple. The MIIcfg setting and strange write order are in epic_open()
677 ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12); in epic_open()
679 if (ep->chip_flags & MII_PWRDWN) in epic_open()
682 /* Tell the chip to byteswap descriptors on big-endian hosts */ in epic_open()
693 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */ in epic_open()
696 ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i])); in epic_open()
698 ep->tx_threshold = TX_FIFO_THRESH; in epic_open()
699 ew32(TxThresh, ep->tx_threshold); in epic_open()
701 if (media2miictl[dev->if_port & 15]) { in epic_open()
702 if (ep->mii_phy_cnt) in epic_open()
703 mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]); in epic_open()
704 if (dev->if_port == 1) { in epic_open()
707 mdio_read(dev, ep->phys[0], MII_BMSR)); in epic_open()
710 int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA); in epic_open()
713 ep->mii.full_duplex = 1; in epic_open()
715 mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART); in epic_open()
717 netdev_info(dev, "Setting %s-duplex based on MII xcvr %d register read of %4.4x.\n", in epic_open()
718 ep->mii.full_duplex ? "full" in epic_open()
720 ep->phys[0], mii_lpa); in epic_open()
724 ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79); in epic_open()
725 ew32(PRxCDAR, ep->rx_ring_dma); in epic_open()
726 ew32(PTxCDAR, ep->tx_ring_dma); in epic_open()
736 ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) | in epic_open()
740 netdev_dbg(dev, "epic_open() ioaddr %p IRQ %d status %4.4x %s-duplex.\n", in epic_open()
742 ep->mii.full_duplex ? "full" : "half"); in epic_open()
747 timer_setup(&ep->timer, epic_timer, 0); in epic_open()
748 ep->timer.expires = jiffies + 3*HZ; in epic_open()
749 add_timer(&ep->timer); in epic_open()
754 /* Reset the chip to recover from a PCI transaction error.
758 struct net_device_stats *stats = &dev->stats; in epic_pause()
759 struct epic_private *ep = netdev_priv(dev); in epic_pause() local
760 void __iomem *ioaddr = ep->ioaddr; in epic_pause()
771 stats->rx_missed_errors += er8(MPCNT); in epic_pause()
772 stats->rx_frame_errors += er8(ALICNT); in epic_pause()
773 stats->rx_crc_errors += er8(CRCCNT); in epic_pause()
782 struct epic_private *ep = netdev_priv(dev); in epic_restart() local
783 void __iomem *ioaddr = ep->ioaddr; in epic_restart()
790 ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx); in epic_restart()
794 for (i = 16; i > 0; i--) in epic_restart()
802 ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12); in epic_restart()
803 if (ep->chip_flags & MII_PWRDWN) in epic_restart()
807 ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i])); in epic_restart()
809 ep->tx_threshold = TX_FIFO_THRESH; in epic_restart()
810 ew32(TxThresh, ep->tx_threshold); in epic_restart()
811 ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79); in epic_restart()
812 ew32(PRxCDAR, ep->rx_ring_dma + in epic_restart()
813 (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc)); in epic_restart()
814 ew32(PTxCDAR, ep->tx_ring_dma + in epic_restart()
815 (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc)); in epic_restart()
823 ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) | in epic_restart()
832 struct epic_private *ep = netdev_priv(dev); in check_media() local
833 void __iomem *ioaddr = ep->ioaddr; in check_media()
834 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0; in check_media()
835 int negotiated = mii_lpa & ep->mii.advertising; in check_media()
838 if (ep->mii.force_media) in check_media()
842 if (ep->mii.full_duplex != duplex) { in check_media()
843 ep->mii.full_duplex = duplex; in check_media()
844 netdev_info(dev, "Setting %s-duplex based on MII #%d link partner capability of %4.4x.\n", in check_media()
845 ep->mii.full_duplex ? "full" : "half", in check_media()
846 ep->phys[0], mii_lpa); in check_media()
847 ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79); in check_media()
853 struct epic_private *ep = from_timer(ep, t, timer); in epic_timer() local
854 struct net_device *dev = ep->mii.dev; in epic_timer()
855 void __iomem *ioaddr = ep->ioaddr; in epic_timer()
867 ep->timer.expires = jiffies + next_tick; in epic_timer()
868 add_timer(&ep->timer); in epic_timer()
873 struct epic_private *ep = netdev_priv(dev); in epic_tx_timeout() local
874 void __iomem *ioaddr = ep->ioaddr; in epic_tx_timeout()
881 ep->dirty_tx, ep->cur_tx); in epic_tx_timeout()
885 dev->stats.tx_fifo_errors++; in epic_tx_timeout()
893 dev->stats.tx_errors++; in epic_tx_timeout()
894 if (!ep->tx_full) in epic_tx_timeout()
901 struct epic_private *ep = netdev_priv(dev); in epic_init_ring() local
904 ep->tx_full = 0; in epic_init_ring()
905 ep->dirty_tx = ep->cur_tx = 0; in epic_init_ring()
906 ep->cur_rx = ep->dirty_rx = 0; in epic_init_ring()
907 ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); in epic_init_ring()
911 ep->rx_ring[i].rxstatus = 0; in epic_init_ring()
912 ep->rx_ring[i].buflength = ep->rx_buf_sz; in epic_init_ring()
913 ep->rx_ring[i].next = ep->rx_ring_dma + in epic_init_ring()
915 ep->rx_skbuff[i] = NULL; in epic_init_ring()
918 ep->rx_ring[i-1].next = ep->rx_ring_dma; in epic_init_ring()
922 struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2); in epic_init_ring()
923 ep->rx_skbuff[i] = skb; in epic_init_ring()
927 ep->rx_ring[i].bufaddr = dma_map_single(&ep->pci_dev->dev, in epic_init_ring()
928 skb->data, in epic_init_ring()
929 ep->rx_buf_sz, in epic_init_ring()
931 ep->rx_ring[i].rxstatus = DescOwn; in epic_init_ring()
933 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE); in epic_init_ring()
938 ep->tx_skbuff[i] = NULL; in epic_init_ring()
939 ep->tx_ring[i].txstatus = 0x0000; in epic_init_ring()
940 ep->tx_ring[i].next = ep->tx_ring_dma + in epic_init_ring()
943 ep->tx_ring[i-1].next = ep->tx_ring_dma; in epic_init_ring()
948 struct epic_private *ep = netdev_priv(dev); in epic_start_xmit() local
949 void __iomem *ioaddr = ep->ioaddr; in epic_start_xmit()
961 spin_lock_irqsave(&ep->lock, flags); in epic_start_xmit()
962 free_count = ep->cur_tx - ep->dirty_tx; in epic_start_xmit()
963 entry = ep->cur_tx % TX_RING_SIZE; in epic_start_xmit()
965 ep->tx_skbuff[entry] = skb; in epic_start_xmit()
966 ep->tx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev, in epic_start_xmit()
967 skb->data, skb->len, in epic_start_xmit()
972 ctrl_word = 0x140000; /* Tx-done intr. */ in epic_start_xmit()
973 } else if (free_count < TX_QUEUE_LEN - 1) { in epic_start_xmit()
974 ctrl_word = 0x100000; /* No Tx-done intr. */ in epic_start_xmit()
977 ctrl_word = 0x140000; /* Tx-done intr. */ in epic_start_xmit()
978 ep->tx_full = 1; in epic_start_xmit()
980 ep->tx_ring[entry].buflength = ctrl_word | skb->len; in epic_start_xmit()
981 ep->tx_ring[entry].txstatus = in epic_start_xmit()
982 ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16) in epic_start_xmit()
985 ep->cur_tx++; in epic_start_xmit()
986 if (ep->tx_full) in epic_start_xmit()
989 spin_unlock_irqrestore(&ep->lock, flags); in epic_start_xmit()
995 skb->len, entry, ctrl_word, er32(TxSTAT)); in epic_start_xmit()
1000 static void epic_tx_error(struct net_device *dev, struct epic_private *ep, in epic_tx_error() argument
1003 struct net_device_stats *stats = &dev->stats; in epic_tx_error()
1011 stats->tx_errors++; in epic_tx_error()
1013 stats->tx_aborted_errors++; in epic_tx_error()
1015 stats->tx_carrier_errors++; in epic_tx_error()
1017 stats->tx_window_errors++; in epic_tx_error()
1019 stats->tx_fifo_errors++; in epic_tx_error()
1022 static void epic_tx(struct net_device *dev, struct epic_private *ep) in epic_tx() argument
1030 cur_tx = ep->cur_tx; in epic_tx()
1031 for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) { in epic_tx()
1034 int txstatus = ep->tx_ring[entry].txstatus; in epic_tx()
1040 dev->stats.collisions += (txstatus >> 8) & 15; in epic_tx()
1041 dev->stats.tx_packets++; in epic_tx()
1042 dev->stats.tx_bytes += ep->tx_skbuff[entry]->len; in epic_tx()
1044 epic_tx_error(dev, ep, txstatus); in epic_tx()
1047 skb = ep->tx_skbuff[entry]; in epic_tx()
1048 dma_unmap_single(&ep->pci_dev->dev, in epic_tx()
1049 ep->tx_ring[entry].bufaddr, skb->len, in epic_tx()
1052 ep->tx_skbuff[entry] = NULL; in epic_tx()
1056 if (cur_tx - dirty_tx > TX_RING_SIZE) { in epic_tx()
1057 netdev_warn(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d.\n", in epic_tx()
1058 dirty_tx, cur_tx, ep->tx_full); in epic_tx()
1062 ep->dirty_tx = dirty_tx; in epic_tx()
1063 if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) { in epic_tx()
1065 ep->tx_full = 0; in epic_tx()
1075 struct epic_private *ep = netdev_priv(dev); in epic_interrupt() local
1076 void __iomem *ioaddr = ep->ioaddr; in epic_interrupt()
1095 spin_lock(&ep->napi_lock); in epic_interrupt()
1096 if (napi_schedule_prep(&ep->napi)) { in epic_interrupt()
1097 epic_napi_irq_off(dev, ep); in epic_interrupt()
1098 __napi_schedule(&ep->napi); in epic_interrupt()
1100 spin_unlock(&ep->napi_lock); in epic_interrupt()
1106 struct net_device_stats *stats = &dev->stats; in epic_interrupt()
1112 stats->rx_missed_errors += er8(MPCNT); in epic_interrupt()
1113 stats->rx_frame_errors += er8(ALICNT); in epic_interrupt()
1114 stats->rx_crc_errors += er8(CRCCNT); in epic_interrupt()
1117 stats->tx_fifo_errors++; in epic_interrupt()
1118 ew32(TxThresh, ep->tx_threshold += 128); in epic_interrupt()
1123 netdev_err(dev, "PCI Bus Error! status %4.4x.\n", in epic_interrupt()
1143 struct epic_private *ep = netdev_priv(dev); in epic_rx() local
1144 int entry = ep->cur_rx % RX_RING_SIZE; in epic_rx()
1145 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx; in epic_rx()
1150 ep->rx_ring[entry].rxstatus); in epic_rx()
1156 while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) { in epic_rx()
1157 int status = ep->rx_ring[entry].rxstatus; in epic_rx()
1162 if (--rx_work_limit < 0) in epic_rx()
1171 dev->stats.rx_length_errors++; in epic_rx()
1174 dev->stats.rx_errors++; in epic_rx()
1176 /* Malloc up new buffer, compatible with net-2e. */ in epic_rx()
1178 short pkt_len = (status >> 16) - 4; in epic_rx()
1181 if (pkt_len > PKT_BUF_SZ - 4) { in epic_rx()
1187 to a minimally-sized skbuff. */ in epic_rx()
1191 dma_sync_single_for_cpu(&ep->pci_dev->dev, in epic_rx()
1192 ep->rx_ring[entry].bufaddr, in epic_rx()
1193 ep->rx_buf_sz, in epic_rx()
1195 skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len); in epic_rx()
1197 dma_sync_single_for_device(&ep->pci_dev->dev, in epic_rx()
1198 ep->rx_ring[entry].bufaddr, in epic_rx()
1199 ep->rx_buf_sz, in epic_rx()
1202 dma_unmap_single(&ep->pci_dev->dev, in epic_rx()
1203 ep->rx_ring[entry].bufaddr, in epic_rx()
1204 ep->rx_buf_sz, in epic_rx()
1206 skb_put(skb = ep->rx_skbuff[entry], pkt_len); in epic_rx()
1207 ep->rx_skbuff[entry] = NULL; in epic_rx()
1209 skb->protocol = eth_type_trans(skb, dev); in epic_rx()
1211 dev->stats.rx_packets++; in epic_rx()
1212 dev->stats.rx_bytes += pkt_len; in epic_rx()
1215 entry = (++ep->cur_rx) % RX_RING_SIZE; in epic_rx()
1219 for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) { in epic_rx()
1220 entry = ep->dirty_rx % RX_RING_SIZE; in epic_rx()
1221 if (ep->rx_skbuff[entry] == NULL) { in epic_rx()
1223 skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2); in epic_rx()
1227 ep->rx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev, in epic_rx()
1228 skb->data, in epic_rx()
1229 ep->rx_buf_sz, in epic_rx()
1234 ep->rx_ring[entry].rxstatus = DescOwn; in epic_rx()
1239 static void epic_rx_err(struct net_device *dev, struct epic_private *ep) in epic_rx_err() argument
1241 void __iomem *ioaddr = ep->ioaddr; in epic_rx_err()
1249 dev->stats.rx_errors++; in epic_rx_err()
1256 struct epic_private *ep = container_of(napi, struct epic_private, napi); in epic_poll() local
1257 struct net_device *dev = ep->mii.dev; in epic_poll()
1258 void __iomem *ioaddr = ep->ioaddr; in epic_poll()
1261 epic_tx(dev, ep); in epic_poll()
1265 epic_rx_err(dev, ep); in epic_poll()
1270 spin_lock_irqsave(&ep->napi_lock, flags); in epic_poll()
1273 epic_napi_irq_on(dev, ep); in epic_poll()
1274 spin_unlock_irqrestore(&ep->napi_lock, flags); in epic_poll()
1282 struct epic_private *ep = netdev_priv(dev); in epic_close() local
1283 struct pci_dev *pdev = ep->pci_dev; in epic_close()
1284 void __iomem *ioaddr = ep->ioaddr; in epic_close()
1289 napi_disable(&ep->napi); in epic_close()
1295 del_timer_sync(&ep->timer); in epic_close()
1297 epic_disable_int(dev, ep); in epic_close()
1299 free_irq(pdev->irq, dev); in epic_close()
1305 skb = ep->rx_skbuff[i]; in epic_close()
1306 ep->rx_skbuff[i] = NULL; in epic_close()
1307 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */ in epic_close()
1308 ep->rx_ring[i].buflength = 0; in epic_close()
1310 dma_unmap_single(&pdev->dev, ep->rx_ring[i].bufaddr, in epic_close()
1311 ep->rx_buf_sz, DMA_FROM_DEVICE); in epic_close()
1314 ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */ in epic_close()
1317 skb = ep->tx_skbuff[i]; in epic_close()
1318 ep->tx_skbuff[i] = NULL; in epic_close()
1321 dma_unmap_single(&pdev->dev, ep->tx_ring[i].bufaddr, skb->len, in epic_close()
1326 /* Green! Leave the chip in low-power mode. */ in epic_close()
1334 struct epic_private *ep = netdev_priv(dev); in epic_get_stats() local
1335 void __iomem *ioaddr = ep->ioaddr; in epic_get_stats()
1338 struct net_device_stats *stats = &dev->stats; in epic_get_stats()
1340 stats->rx_missed_errors += er8(MPCNT); in epic_get_stats()
1341 stats->rx_frame_errors += er8(ALICNT); in epic_get_stats()
1342 stats->rx_crc_errors += er8(CRCCNT); in epic_get_stats()
1345 return &dev->stats; in epic_get_stats()
1350 new frame, not around filling ep->setup_frame. This is non-deterministic
1351 when re-entered but still correct. */
1355 struct epic_private *ep = netdev_priv(dev); in set_rx_mode() local
1356 void __iomem *ioaddr = ep->ioaddr; in set_rx_mode()
1360 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ in set_rx_mode()
1364 } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) { in set_rx_mode()
1367 /* Too many to filter perfectly -- accept all multicasts. */ in set_rx_mode()
1379 ether_crc_le(ETH_ALEN, ha->addr) & 0x3f; in set_rx_mode()
1384 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) { in set_rx_mode()
1387 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter)); in set_rx_mode()
1395 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); in netdev_get_drvinfo()
1396 strscpy(info->version, DRV_VERSION, sizeof(info->version)); in netdev_get_drvinfo()
1397 strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); in netdev_get_drvinfo()
1405 spin_lock_irq(&np->lock); in netdev_get_link_ksettings()
1406 mii_ethtool_get_link_ksettings(&np->mii, cmd); in netdev_get_link_ksettings()
1407 spin_unlock_irq(&np->lock); in netdev_get_link_ksettings()
1418 spin_lock_irq(&np->lock); in netdev_set_link_ksettings()
1419 rc = mii_ethtool_set_link_ksettings(&np->mii, cmd); in netdev_set_link_ksettings()
1420 spin_unlock_irq(&np->lock); in netdev_set_link_ksettings()
1428 return mii_nway_restart(&np->mii); in netdev_nway_reset()
1434 return mii_link_ok(&np->mii); in netdev_get_link()
1449 struct epic_private *ep = netdev_priv(dev); in ethtool_begin() local
1450 void __iomem *ioaddr = ep->ioaddr; in ethtool_begin()
1452 if (ep->ethtool_ops_nesting == U32_MAX) in ethtool_begin()
1453 return -EBUSY; in ethtool_begin()
1454 /* power-up, if interface is down */ in ethtool_begin()
1455 if (!ep->ethtool_ops_nesting++ && !netif_running(dev)) { in ethtool_begin()
1464 struct epic_private *ep = netdev_priv(dev); in ethtool_complete() local
1465 void __iomem *ioaddr = ep->ioaddr; in ethtool_complete()
1467 /* power-down, if interface is down */ in ethtool_complete()
1468 if (!--ep->ethtool_ops_nesting && !netif_running(dev)) { in ethtool_complete()
1489 void __iomem *ioaddr = np->ioaddr; in netdev_ioctl()
1493 /* power-up, if interface is down */ in netdev_ioctl()
1499 /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */ in netdev_ioctl()
1500 spin_lock_irq(&np->lock); in netdev_ioctl()
1501 rc = generic_mii_ioctl(&np->mii, data, cmd, NULL); in netdev_ioctl()
1502 spin_unlock_irq(&np->lock); in netdev_ioctl()
1504 /* power-down, if interface is down */ in netdev_ioctl()
1516 struct epic_private *ep = netdev_priv(dev); in epic_remove_one() local
1519 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring, in epic_remove_one()
1520 ep->tx_ring_dma); in epic_remove_one()
1521 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring, in epic_remove_one()
1522 ep->rx_ring_dma); in epic_remove_one()
1523 pci_iounmap(pdev, ep->ioaddr); in epic_remove_one()
1527 /* pci_power_off(pdev, -1); */ in epic_remove_one()
1533 struct epic_private *ep = netdev_priv(dev); in epic_suspend() local
1534 void __iomem *ioaddr = ep->ioaddr; in epic_suspend()
1539 /* Put the chip into low-power mode. */ in epic_suspend()
1541 /* pci_power_off(pdev, -1); */ in epic_suspend()