Lines Matching full:ep

326 	struct epic_private *ep;  in epic_init_one()  local
357 dev = alloc_etherdev(sizeof (*ep)); in epic_init_one()
370 ep = netdev_priv(dev); in epic_init_one()
371 ep->ioaddr = ioaddr; in epic_init_one()
372 ep->mii.dev = dev; in epic_init_one()
373 ep->mii.mdio_read = mdio_read; in epic_init_one()
374 ep->mii.mdio_write = mdio_write; in epic_init_one()
375 ep->mii.phy_id_mask = 0x1f; in epic_init_one()
376 ep->mii.reg_num_mask = 0x1f; in epic_init_one()
382 ep->tx_ring = ring_space; in epic_init_one()
383 ep->tx_ring_dma = ring_dma; in epic_init_one()
389 ep->rx_ring = ring_space; in epic_init_one()
390 ep->rx_ring_dma = ring_dma; in epic_init_one()
402 spin_lock_init(&ep->lock); in epic_init_one()
403 spin_lock_init(&ep->napi_lock); in epic_init_one()
426 pr_cont(" %4.4x%s", read_eeprom(ep, i), in epic_init_one()
430 ep->pci_dev = pdev; in epic_init_one()
431 ep->chip_id = chip_idx; in epic_init_one()
432 ep->chip_flags = pci_id_tbl[chip_idx].drv_flags; in epic_init_one()
433 ep->irq_mask = in epic_init_one()
434 (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) in epic_init_one()
442 for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) { in epic_init_one()
445 ep->phys[phy_idx++] = phy; in epic_init_one()
452 ep->mii_phy_cnt = phy_idx; in epic_init_one()
454 phy = ep->phys[0]; in epic_init_one()
455 ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE); in epic_init_one()
459 ep->mii.advertising, mdio_read(dev, phy, 5)); in epic_init_one()
460 } else if ( ! (ep->chip_flags & NO_MII)) { in epic_init_one()
464 ep->phys[0] = 3; in epic_init_one()
466 ep->mii.phy_id = ep->phys[0]; in epic_init_one()
470 if (ep->chip_flags & MII_PWRDWN) in epic_init_one()
476 ep->mii.force_media = ep->mii.full_duplex = 1; in epic_init_one()
479 dev->if_port = ep->default_port = option; in epic_init_one()
485 netif_napi_add(dev, &ep->napi, epic_poll); in epic_init_one()
500 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring, in epic_init_one()
501 ep->rx_ring_dma); in epic_init_one()
503 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring, in epic_init_one()
504 ep->tx_ring_dma); in epic_init_one()
539 static void epic_disable_int(struct net_device *dev, struct epic_private *ep) in epic_disable_int() argument
541 void __iomem *ioaddr = ep->ioaddr; in epic_disable_int()
554 struct epic_private *ep) in epic_napi_irq_off() argument
556 void __iomem *ioaddr = ep->ioaddr; in epic_napi_irq_off()
558 ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent); in epic_napi_irq_off()
563 struct epic_private *ep) in epic_napi_irq_on() argument
565 void __iomem *ioaddr = ep->ioaddr; in epic_napi_irq_on()
568 ew32(INTMASK, ep->irq_mask | EpicNapiEvent); in epic_napi_irq_on()
571 static int read_eeprom(struct epic_private *ep, int location) in read_eeprom() argument
573 void __iomem *ioaddr = ep->ioaddr; in read_eeprom()
609 struct epic_private *ep = netdev_priv(dev); in mdio_read() local
610 void __iomem *ioaddr = ep->ioaddr; in mdio_read()
633 struct epic_private *ep = netdev_priv(dev); in mdio_write() local
634 void __iomem *ioaddr = ep->ioaddr; in mdio_write()
649 struct epic_private *ep = netdev_priv(dev); in epic_open() local
650 void __iomem *ioaddr = ep->ioaddr; in epic_open()
651 const int irq = ep->pci_dev->irq; in epic_open()
657 napi_enable(&ep->napi); in epic_open()
660 napi_disable(&ep->napi); in epic_open()
679 if (ep->chip_flags & MII_PWRDWN) in epic_open()
698 ep->tx_threshold = TX_FIFO_THRESH; in epic_open()
699 ew32(TxThresh, ep->tx_threshold); in epic_open()
702 if (ep->mii_phy_cnt) in epic_open()
703 mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]); in epic_open()
707 mdio_read(dev, ep->phys[0], MII_BMSR)); in epic_open()
710 int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA); in epic_open()
713 ep->mii.full_duplex = 1; in epic_open()
715 mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART); in epic_open()
718 ep->mii.full_duplex ? "full" in epic_open()
720 ep->phys[0], mii_lpa); in epic_open()
724 ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79); in epic_open()
725 ew32(PRxCDAR, ep->rx_ring_dma); in epic_open()
726 ew32(PTxCDAR, ep->tx_ring_dma); in epic_open()
736 ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) | in epic_open()
742 ep->mii.full_duplex ? "full" : "half"); in epic_open()
747 timer_setup(&ep->timer, epic_timer, 0); in epic_open()
748 ep->timer.expires = jiffies + 3*HZ; in epic_open()
749 add_timer(&ep->timer); in epic_open()
759 struct epic_private *ep = netdev_priv(dev); in epic_pause() local
760 void __iomem *ioaddr = ep->ioaddr; in epic_pause()
782 struct epic_private *ep = netdev_priv(dev); in epic_restart() local
783 void __iomem *ioaddr = ep->ioaddr; in epic_restart()
790 ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx); in epic_restart()
803 if (ep->chip_flags & MII_PWRDWN) in epic_restart()
809 ep->tx_threshold = TX_FIFO_THRESH; in epic_restart()
810 ew32(TxThresh, ep->tx_threshold); in epic_restart()
811 ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79); in epic_restart()
812 ew32(PRxCDAR, ep->rx_ring_dma + in epic_restart()
813 (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc)); in epic_restart()
814 ew32(PTxCDAR, ep->tx_ring_dma + in epic_restart()
815 (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc)); in epic_restart()
823 ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) | in epic_restart()
832 struct epic_private *ep = netdev_priv(dev); in check_media() local
833 void __iomem *ioaddr = ep->ioaddr; in check_media()
834 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0; in check_media()
835 int negotiated = mii_lpa & ep->mii.advertising; in check_media()
838 if (ep->mii.force_media) in check_media()
842 if (ep->mii.full_duplex != duplex) { in check_media()
843 ep->mii.full_duplex = duplex; in check_media()
845 ep->mii.full_duplex ? "full" : "half", in check_media()
846 ep->phys[0], mii_lpa); in check_media()
847 ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79); in check_media()
853 struct epic_private *ep = from_timer(ep, t, timer); in epic_timer() local
854 struct net_device *dev = ep->mii.dev; in epic_timer()
855 void __iomem *ioaddr = ep->ioaddr; in epic_timer()
867 ep->timer.expires = jiffies + next_tick; in epic_timer()
868 add_timer(&ep->timer); in epic_timer()
873 struct epic_private *ep = netdev_priv(dev); in epic_tx_timeout() local
874 void __iomem *ioaddr = ep->ioaddr; in epic_tx_timeout()
881 ep->dirty_tx, ep->cur_tx); in epic_tx_timeout()
894 if (!ep->tx_full) in epic_tx_timeout()
901 struct epic_private *ep = netdev_priv(dev); in epic_init_ring() local
904 ep->tx_full = 0; in epic_init_ring()
905 ep->dirty_tx = ep->cur_tx = 0; in epic_init_ring()
906 ep->cur_rx = ep->dirty_rx = 0; in epic_init_ring()
907 ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); in epic_init_ring()
911 ep->rx_ring[i].rxstatus = 0; in epic_init_ring()
912 ep->rx_ring[i].buflength = ep->rx_buf_sz; in epic_init_ring()
913 ep->rx_ring[i].next = ep->rx_ring_dma + in epic_init_ring()
915 ep->rx_skbuff[i] = NULL; in epic_init_ring()
918 ep->rx_ring[i-1].next = ep->rx_ring_dma; in epic_init_ring()
922 struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2); in epic_init_ring()
923 ep->rx_skbuff[i] = skb; in epic_init_ring()
927 ep->rx_ring[i].bufaddr = dma_map_single(&ep->pci_dev->dev, in epic_init_ring()
929 ep->rx_buf_sz, in epic_init_ring()
931 ep->rx_ring[i].rxstatus = DescOwn; in epic_init_ring()
933 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE); in epic_init_ring()
938 ep->tx_skbuff[i] = NULL; in epic_init_ring()
939 ep->tx_ring[i].txstatus = 0x0000; in epic_init_ring()
940 ep->tx_ring[i].next = ep->tx_ring_dma + in epic_init_ring()
943 ep->tx_ring[i-1].next = ep->tx_ring_dma; in epic_init_ring()
948 struct epic_private *ep = netdev_priv(dev); in epic_start_xmit() local
949 void __iomem *ioaddr = ep->ioaddr; in epic_start_xmit()
961 spin_lock_irqsave(&ep->lock, flags); in epic_start_xmit()
962 free_count = ep->cur_tx - ep->dirty_tx; in epic_start_xmit()
963 entry = ep->cur_tx % TX_RING_SIZE; in epic_start_xmit()
965 ep->tx_skbuff[entry] = skb; in epic_start_xmit()
966 ep->tx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev, in epic_start_xmit()
978 ep->tx_full = 1; in epic_start_xmit()
980 ep->tx_ring[entry].buflength = ctrl_word | skb->len; in epic_start_xmit()
981 ep->tx_ring[entry].txstatus = in epic_start_xmit()
985 ep->cur_tx++; in epic_start_xmit()
986 if (ep->tx_full) in epic_start_xmit()
989 spin_unlock_irqrestore(&ep->lock, flags); in epic_start_xmit()
1000 static void epic_tx_error(struct net_device *dev, struct epic_private *ep, in epic_tx_error() argument
1022 static void epic_tx(struct net_device *dev, struct epic_private *ep) in epic_tx() argument
1030 cur_tx = ep->cur_tx; in epic_tx()
1031 for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) { in epic_tx()
1034 int txstatus = ep->tx_ring[entry].txstatus; in epic_tx()
1042 dev->stats.tx_bytes += ep->tx_skbuff[entry]->len; in epic_tx()
1044 epic_tx_error(dev, ep, txstatus); in epic_tx()
1047 skb = ep->tx_skbuff[entry]; in epic_tx()
1048 dma_unmap_single(&ep->pci_dev->dev, in epic_tx()
1049 ep->tx_ring[entry].bufaddr, skb->len, in epic_tx()
1052 ep->tx_skbuff[entry] = NULL; in epic_tx()
1058 dirty_tx, cur_tx, ep->tx_full); in epic_tx()
1062 ep->dirty_tx = dirty_tx; in epic_tx()
1063 if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) { in epic_tx()
1065 ep->tx_full = 0; in epic_tx()
1075 struct epic_private *ep = netdev_priv(dev); in epic_interrupt() local
1076 void __iomem *ioaddr = ep->ioaddr; in epic_interrupt()
1095 spin_lock(&ep->napi_lock); in epic_interrupt()
1096 if (napi_schedule_prep(&ep->napi)) { in epic_interrupt()
1097 epic_napi_irq_off(dev, ep); in epic_interrupt()
1098 __napi_schedule(&ep->napi); in epic_interrupt()
1100 spin_unlock(&ep->napi_lock); in epic_interrupt()
1118 ew32(TxThresh, ep->tx_threshold += 128); in epic_interrupt()
1143 struct epic_private *ep = netdev_priv(dev); in epic_rx() local
1144 int entry = ep->cur_rx % RX_RING_SIZE; in epic_rx()
1145 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx; in epic_rx()
1150 ep->rx_ring[entry].rxstatus); in epic_rx()
1156 while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) { in epic_rx()
1157 int status = ep->rx_ring[entry].rxstatus; in epic_rx()
1191 dma_sync_single_for_cpu(&ep->pci_dev->dev, in epic_rx()
1192 ep->rx_ring[entry].bufaddr, in epic_rx()
1193 ep->rx_buf_sz, in epic_rx()
1195 skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len); in epic_rx()
1197 dma_sync_single_for_device(&ep->pci_dev->dev, in epic_rx()
1198 ep->rx_ring[entry].bufaddr, in epic_rx()
1199 ep->rx_buf_sz, in epic_rx()
1202 dma_unmap_single(&ep->pci_dev->dev, in epic_rx()
1203 ep->rx_ring[entry].bufaddr, in epic_rx()
1204 ep->rx_buf_sz, in epic_rx()
1206 skb_put(skb = ep->rx_skbuff[entry], pkt_len); in epic_rx()
1207 ep->rx_skbuff[entry] = NULL; in epic_rx()
1215 entry = (++ep->cur_rx) % RX_RING_SIZE; in epic_rx()
1219 for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) { in epic_rx()
1220 entry = ep->dirty_rx % RX_RING_SIZE; in epic_rx()
1221 if (ep->rx_skbuff[entry] == NULL) { in epic_rx()
1223 skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2); in epic_rx()
1227 ep->rx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev, in epic_rx()
1229 ep->rx_buf_sz, in epic_rx()
1234 ep->rx_ring[entry].rxstatus = DescOwn; in epic_rx()
1239 static void epic_rx_err(struct net_device *dev, struct epic_private *ep) in epic_rx_err() argument
1241 void __iomem *ioaddr = ep->ioaddr; in epic_rx_err()
1256 struct epic_private *ep = container_of(napi, struct epic_private, napi); in epic_poll() local
1257 struct net_device *dev = ep->mii.dev; in epic_poll()
1258 void __iomem *ioaddr = ep->ioaddr; in epic_poll()
1261 epic_tx(dev, ep); in epic_poll()
1265 epic_rx_err(dev, ep); in epic_poll()
1270 spin_lock_irqsave(&ep->napi_lock, flags); in epic_poll()
1273 epic_napi_irq_on(dev, ep); in epic_poll()
1274 spin_unlock_irqrestore(&ep->napi_lock, flags); in epic_poll()
1282 struct epic_private *ep = netdev_priv(dev); in epic_close() local
1283 struct pci_dev *pdev = ep->pci_dev; in epic_close()
1284 void __iomem *ioaddr = ep->ioaddr; in epic_close()
1289 napi_disable(&ep->napi); in epic_close()
1295 del_timer_sync(&ep->timer); in epic_close()
1297 epic_disable_int(dev, ep); in epic_close()
1305 skb = ep->rx_skbuff[i]; in epic_close()
1306 ep->rx_skbuff[i] = NULL; in epic_close()
1307 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */ in epic_close()
1308 ep->rx_ring[i].buflength = 0; in epic_close()
1310 dma_unmap_single(&pdev->dev, ep->rx_ring[i].bufaddr, in epic_close()
1311 ep->rx_buf_sz, DMA_FROM_DEVICE); in epic_close()
1314 ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */ in epic_close()
1317 skb = ep->tx_skbuff[i]; in epic_close()
1318 ep->tx_skbuff[i] = NULL; in epic_close()
1321 dma_unmap_single(&pdev->dev, ep->tx_ring[i].bufaddr, skb->len, in epic_close()
1334 struct epic_private *ep = netdev_priv(dev); in epic_get_stats() local
1335 void __iomem *ioaddr = ep->ioaddr; in epic_get_stats()
1350 new frame, not around filling ep->setup_frame. This is non-deterministic
1355 struct epic_private *ep = netdev_priv(dev); in set_rx_mode() local
1356 void __iomem *ioaddr = ep->ioaddr; in set_rx_mode()
1384 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) { in set_rx_mode()
1387 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter)); in set_rx_mode()
1449 struct epic_private *ep = netdev_priv(dev); in ethtool_begin() local
1450 void __iomem *ioaddr = ep->ioaddr; in ethtool_begin()
1452 if (ep->ethtool_ops_nesting == U32_MAX) in ethtool_begin()
1455 if (!ep->ethtool_ops_nesting++ && !netif_running(dev)) { in ethtool_begin()
1464 struct epic_private *ep = netdev_priv(dev); in ethtool_complete() local
1465 void __iomem *ioaddr = ep->ioaddr; in ethtool_complete()
1468 if (!--ep->ethtool_ops_nesting && !netif_running(dev)) { in ethtool_complete()
1516 struct epic_private *ep = netdev_priv(dev); in epic_remove_one() local
1519 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring, in epic_remove_one()
1520 ep->tx_ring_dma); in epic_remove_one()
1521 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring, in epic_remove_one()
1522 ep->rx_ring_dma); in epic_remove_one()
1523 pci_iounmap(pdev, ep->ioaddr); in epic_remove_one()
1533 struct epic_private *ep = netdev_priv(dev); in epic_suspend() local
1534 void __iomem *ioaddr = ep->ioaddr; in epic_suspend()