Lines Matching full:np
448 struct netdev_private *np = netdev_priv(dev); in sundance_reset() local
449 void __iomem *ioaddr = np->base + ASICCtrl; in sundance_reset()
468 struct netdev_private *np = netdev_priv(dev); in sundance_poll_controller() local
470 disable_irq(np->pci_dev->irq); in sundance_poll_controller()
471 intr_handler(np->pci_dev->irq, dev); in sundance_poll_controller()
472 enable_irq(np->pci_dev->irq); in sundance_poll_controller()
496 struct netdev_private *np; in sundance_probe1() local
519 dev = alloc_etherdev(sizeof(*np)); in sundance_probe1()
536 np = netdev_priv(dev); in sundance_probe1()
537 np->ndev = dev; in sundance_probe1()
538 np->base = ioaddr; in sundance_probe1()
539 np->pci_dev = pdev; in sundance_probe1()
540 np->chip_id = chip_idx; in sundance_probe1()
541 np->msg_enable = (1 << debug) - 1; in sundance_probe1()
542 spin_lock_init(&np->lock); in sundance_probe1()
543 spin_lock_init(&np->statlock); in sundance_probe1()
544 tasklet_setup(&np->rx_tasklet, rx_poll); in sundance_probe1()
545 tasklet_setup(&np->tx_tasklet, tx_poll); in sundance_probe1()
551 np->tx_ring = (struct netdev_desc *)ring_space; in sundance_probe1()
552 np->tx_ring_dma = ring_dma; in sundance_probe1()
558 np->rx_ring = (struct netdev_desc *)ring_space; in sundance_probe1()
559 np->rx_ring_dma = ring_dma; in sundance_probe1()
561 np->mii_if.dev = dev; in sundance_probe1()
562 np->mii_if.mdio_read = mdio_read; in sundance_probe1()
563 np->mii_if.mdio_write = mdio_write; in sundance_probe1()
564 np->mii_if.phy_id_mask = 0x1f; in sundance_probe1()
565 np->mii_if.reg_num_mask = 0x1f; in sundance_probe1()
586 np->phys[0] = 1; /* Default setting */ in sundance_probe1()
587 np->mii_preamble_required++; in sundance_probe1()
593 if (sundance_pci_tbl[np->chip_id].device == 0x0200) { in sundance_probe1()
604 np->phys[phy_idx++] = phyx; in sundance_probe1()
605 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE); in sundance_probe1()
607 np->mii_preamble_required++; in sundance_probe1()
610 dev->name, phyx, mii_status, np->mii_if.advertising); in sundance_probe1()
613 np->mii_preamble_required--; in sundance_probe1()
621 np->mii_if.phy_id = np->phys[0]; in sundance_probe1()
624 np->an_enable = 1; in sundance_probe1()
627 np->an_enable = 0; in sundance_probe1()
630 np->speed = 100; in sundance_probe1()
631 np->mii_if.full_duplex = 1; in sundance_probe1()
634 np->speed = 100; in sundance_probe1()
635 np->mii_if.full_duplex = 0; in sundance_probe1()
638 np->speed = 10; in sundance_probe1()
639 np->mii_if.full_duplex = 1; in sundance_probe1()
642 np->speed = 10; in sundance_probe1()
643 np->mii_if.full_duplex = 0; in sundance_probe1()
645 np->an_enable = 1; in sundance_probe1()
649 np->flowctrl = 1; in sundance_probe1()
655 if (np->an_enable) { in sundance_probe1()
656 np->speed = 100; in sundance_probe1()
657 np->mii_if.full_duplex = 1; in sundance_probe1()
658 np->an_enable = 0; in sundance_probe1()
662 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET); in sundance_probe1()
665 if (np->flowctrl) in sundance_probe1()
666 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400); in sundance_probe1()
667 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART); in sundance_probe1()
669 if (!np->an_enable) { in sundance_probe1()
671 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0; in sundance_probe1()
672 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0; in sundance_probe1()
673 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl); in sundance_probe1()
675 np->speed, np->mii_if.full_duplex ? "Full" : "Half"); in sundance_probe1()
681 if (netif_msg_hw(np)) in sundance_probe1()
684 if (netif_msg_hw(np)) in sundance_probe1()
694 np->rx_ring, np->rx_ring_dma); in sundance_probe1()
697 np->tx_ring, np->tx_ring_dma); in sundance_probe1()
763 struct netdev_private *np = netdev_priv(dev); in mdio_read() local
764 void __iomem *mdio_addr = np->base + MIICtrl; in mdio_read()
768 if (np->mii_preamble_required) in mdio_read()
793 struct netdev_private *np = netdev_priv(dev); in mdio_write() local
794 void __iomem *mdio_addr = np->base + MIICtrl; in mdio_write()
798 if (np->mii_preamble_required) in mdio_write()
823 struct netdev_private *np; in mdio_wait_link() local
825 np = netdev_priv(dev); in mdio_wait_link()
826 phy_id = np->phys[0]; in mdio_wait_link()
839 struct netdev_private *np = netdev_priv(dev); in netdev_open() local
840 void __iomem *ioaddr = np->base; in netdev_open()
841 const int irq = np->pci_dev->irq; in netdev_open()
851 if (netif_msg_ifup(np)) in netdev_open()
856 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr); in netdev_open()
872 dev->if_port = np->default_port; in netdev_open()
874 spin_lock_init(&np->mcastlock); in netdev_open()
883 if (np->pci_dev->revision >= 0x14) in netdev_open()
887 spin_lock_irqsave(&np->lock, flags); in netdev_open()
889 spin_unlock_irqrestore(&np->lock, flags); in netdev_open()
895 np->wol_enabled = 0; in netdev_open()
897 if (netif_msg_ifup(np)) in netdev_open()
905 timer_setup(&np->timer, netdev_timer, 0); in netdev_open()
906 np->timer.expires = jiffies + 3*HZ; in netdev_open()
907 add_timer(&np->timer); in netdev_open()
917 struct netdev_private *np = netdev_priv(dev); in check_duplex() local
918 void __iomem *ioaddr = np->base; in check_duplex()
919 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); in check_duplex()
920 int negotiated = mii_lpa & np->mii_if.advertising; in check_duplex()
924 if (!np->an_enable || mii_lpa == 0xffff) { in check_duplex()
925 if (np->mii_if.full_duplex) in check_duplex()
933 if (np->mii_if.full_duplex != duplex) { in check_duplex()
934 np->mii_if.full_duplex = duplex; in check_duplex()
935 if (netif_msg_link(np)) in check_duplex()
938 duplex ? "full" : "half", np->phys[0], negotiated); in check_duplex()
945 struct netdev_private *np = timer_container_of(np, t, timer); in netdev_timer() local
946 struct net_device *dev = np->mii_if.dev; in netdev_timer()
947 void __iomem *ioaddr = np->base; in netdev_timer()
950 if (netif_msg_timer(np)) { in netdev_timer()
957 np->timer.expires = jiffies + next_tick; in netdev_timer()
958 add_timer(&np->timer); in netdev_timer()
963 struct netdev_private *np = netdev_priv(dev); in tx_timeout() local
964 void __iomem *ioaddr = np->base; in tx_timeout()
968 tasklet_disable_in_atomic(&np->tx_tasklet); in tx_timeout()
979 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)), in tx_timeout()
980 le32_to_cpu(np->tx_ring[i].next_desc), in tx_timeout()
981 le32_to_cpu(np->tx_ring[i].status), in tx_timeout()
982 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff, in tx_timeout()
983 le32_to_cpu(np->tx_ring[i].frag.addr), in tx_timeout()
984 le32_to_cpu(np->tx_ring[i].frag.length)); in tx_timeout()
987 ioread32(np->base + TxListPtr), in tx_timeout()
990 np->cur_tx, np->cur_tx % TX_RING_SIZE, in tx_timeout()
991 np->dirty_tx, np->dirty_tx % TX_RING_SIZE); in tx_timeout()
992 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx); in tx_timeout()
993 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task); in tx_timeout()
995 spin_lock_irqsave(&np->lock, flag); in tx_timeout()
999 spin_unlock_irqrestore(&np->lock, flag); in tx_timeout()
1005 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { in tx_timeout()
1009 tasklet_enable(&np->tx_tasklet); in tx_timeout()
1016 struct netdev_private *np = netdev_priv(dev); in init_ring() local
1019 np->cur_rx = np->cur_tx = 0; in init_ring()
1020 np->dirty_rx = np->dirty_tx = 0; in init_ring()
1021 np->cur_task = 0; in init_ring()
1023 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16); in init_ring()
1027 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma + in init_ring()
1028 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring)); in init_ring()
1029 np->rx_ring[i].status = 0; in init_ring()
1030 np->rx_ring[i].frag.length = 0; in init_ring()
1031 np->rx_skbuff[i] = NULL; in init_ring()
1039 netdev_alloc_skb(dev, np->rx_buf_sz + 2); in init_ring()
1040 np->rx_skbuff[i] = skb; in init_ring()
1044 addr = dma_map_single(&np->pci_dev->dev, skb->data, in init_ring()
1045 np->rx_buf_sz, DMA_FROM_DEVICE); in init_ring()
1046 if (dma_mapping_error(&np->pci_dev->dev, addr)) { in init_ring()
1048 np->rx_skbuff[i] = NULL; in init_ring()
1051 np->rx_ring[i].frag.addr = cpu_to_le32(addr); in init_ring()
1052 np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag); in init_ring()
1054 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); in init_ring()
1057 np->tx_skbuff[i] = NULL; in init_ring()
1058 np->tx_ring[i].status = 0; in init_ring()
1064 struct netdev_private *np = from_tasklet(np, t, tx_tasklet); in tx_poll() local
1065 unsigned head = np->cur_task % TX_RING_SIZE; in tx_poll()
1067 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE]; in tx_poll()
1070 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) { in tx_poll()
1071 int entry = np->cur_task % TX_RING_SIZE; in tx_poll()
1072 txdesc = &np->tx_ring[entry]; in tx_poll()
1073 if (np->last_tx) { in tx_poll()
1074 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma + in tx_poll()
1077 np->last_tx = txdesc; in tx_poll()
1082 if (ioread32 (np->base + TxListPtr) == 0) in tx_poll()
1083 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc), in tx_poll()
1084 np->base + TxListPtr); in tx_poll()
1090 struct netdev_private *np = netdev_priv(dev); in start_tx() local
1096 entry = np->cur_tx % TX_RING_SIZE; in start_tx()
1097 np->tx_skbuff[entry] = skb; in start_tx()
1098 txdesc = &np->tx_ring[entry]; in start_tx()
1100 addr = dma_map_single(&np->pci_dev->dev, skb->data, skb->len, in start_tx()
1102 if (dma_mapping_error(&np->pci_dev->dev, addr)) in start_tx()
1111 np->cur_tx++; in start_tx()
1114 tasklet_schedule(&np->tx_tasklet); in start_tx()
1117 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 && in start_tx()
1123 if (netif_msg_tx_queued(np)) { in start_tx()
1126 dev->name, np->cur_tx, entry); in start_tx()
1132 np->tx_skbuff[entry] = NULL; in start_tx()
1141 struct netdev_private *np = netdev_priv(dev); in reset_tx() local
1142 void __iomem *ioaddr = np->base; in reset_tx()
1152 np->tx_ring[i].next_desc = 0; in reset_tx()
1154 skb = np->tx_skbuff[i]; in reset_tx()
1156 dma_unmap_single(&np->pci_dev->dev, in reset_tx()
1157 le32_to_cpu(np->tx_ring[i].frag.addr), in reset_tx()
1160 np->tx_skbuff[i] = NULL; in reset_tx()
1164 np->cur_tx = np->dirty_tx = 0; in reset_tx()
1165 np->cur_task = 0; in reset_tx()
1167 np->last_tx = NULL; in reset_tx()
1179 struct netdev_private *np = netdev_priv(dev); in intr_handler() local
1180 void __iomem *ioaddr = np->base; in intr_handler()
1191 if (netif_msg_intr(np)) in intr_handler()
1203 if (np->budget < 0) in intr_handler()
1204 np->budget = RX_BUDGET; in intr_handler()
1205 tasklet_schedule(&np->rx_tasklet); in intr_handler()
1210 if (netif_msg_tx_done(np)) in intr_handler()
1215 if (netif_msg_tx_err(np)) in intr_handler()
1259 if (np->pci_dev->revision >= 0x14) { in intr_handler()
1260 spin_lock(&np->lock); in intr_handler()
1261 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { in intr_handler()
1262 int entry = np->dirty_tx % TX_RING_SIZE; in intr_handler()
1266 np->tx_ring[entry].status) >> 2) & 0xff; in intr_handler()
1268 !(le32_to_cpu(np->tx_ring[entry].status) in intr_handler()
1274 skb = np->tx_skbuff[entry]; in intr_handler()
1276 dma_unmap_single(&np->pci_dev->dev, in intr_handler()
1277 le32_to_cpu(np->tx_ring[entry].frag.addr), in intr_handler()
1279 dev_consume_skb_irq(np->tx_skbuff[entry]); in intr_handler()
1280 np->tx_skbuff[entry] = NULL; in intr_handler()
1281 np->tx_ring[entry].frag.addr = 0; in intr_handler()
1282 np->tx_ring[entry].frag.length = 0; in intr_handler()
1284 spin_unlock(&np->lock); in intr_handler()
1286 spin_lock(&np->lock); in intr_handler()
1287 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { in intr_handler()
1288 int entry = np->dirty_tx % TX_RING_SIZE; in intr_handler()
1290 if (!(le32_to_cpu(np->tx_ring[entry].status) in intr_handler()
1293 skb = np->tx_skbuff[entry]; in intr_handler()
1295 dma_unmap_single(&np->pci_dev->dev, in intr_handler()
1296 le32_to_cpu(np->tx_ring[entry].frag.addr), in intr_handler()
1298 dev_consume_skb_irq(np->tx_skbuff[entry]); in intr_handler()
1299 np->tx_skbuff[entry] = NULL; in intr_handler()
1300 np->tx_ring[entry].frag.addr = 0; in intr_handler()
1301 np->tx_ring[entry].frag.length = 0; in intr_handler()
1303 spin_unlock(&np->lock); in intr_handler()
1307 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { in intr_handler()
1315 if (netif_msg_intr(np)) in intr_handler()
1323 struct netdev_private *np = from_tasklet(np, t, rx_tasklet); in rx_poll() local
1324 struct net_device *dev = np->ndev; in rx_poll()
1325 int entry = np->cur_rx % RX_RING_SIZE; in rx_poll()
1326 int boguscnt = np->budget; in rx_poll()
1327 void __iomem *ioaddr = np->base; in rx_poll()
1332 struct netdev_desc *desc = &(np->rx_ring[entry]); in rx_poll()
1342 if (netif_msg_rx_status(np)) in rx_poll()
1347 if (netif_msg_rx_err(np)) in rx_poll()
1367 if (netif_msg_rx_status(np)) in rx_poll()
1377 dma_sync_single_for_cpu(&np->pci_dev->dev, in rx_poll()
1379 np->rx_buf_sz, DMA_FROM_DEVICE); in rx_poll()
1380 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); in rx_poll()
1381 dma_sync_single_for_device(&np->pci_dev->dev, in rx_poll()
1383 np->rx_buf_sz, DMA_FROM_DEVICE); in rx_poll()
1386 dma_unmap_single(&np->pci_dev->dev, in rx_poll()
1388 np->rx_buf_sz, DMA_FROM_DEVICE); in rx_poll()
1389 skb_put(skb = np->rx_skbuff[entry], pkt_len); in rx_poll()
1390 np->rx_skbuff[entry] = NULL; in rx_poll()
1399 np->cur_rx = entry; in rx_poll()
1401 np->budget -= received; in rx_poll()
1406 np->cur_rx = entry; in rx_poll()
1410 np->budget -= received; in rx_poll()
1411 if (np->budget <= 0) in rx_poll()
1412 np->budget = RX_BUDGET; in rx_poll()
1413 tasklet_schedule(&np->rx_tasklet); in rx_poll()
1418 struct netdev_private *np = netdev_priv(dev); in refill_rx() local
1422 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0; in refill_rx()
1423 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) { in refill_rx()
1427 entry = np->dirty_rx % RX_RING_SIZE; in refill_rx()
1428 if (np->rx_skbuff[entry] == NULL) { in refill_rx()
1429 skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2); in refill_rx()
1430 np->rx_skbuff[entry] = skb; in refill_rx()
1434 addr = dma_map_single(&np->pci_dev->dev, skb->data, in refill_rx()
1435 np->rx_buf_sz, DMA_FROM_DEVICE); in refill_rx()
1436 if (dma_mapping_error(&np->pci_dev->dev, addr)) { in refill_rx()
1438 np->rx_skbuff[entry] = NULL; in refill_rx()
1442 np->rx_ring[entry].frag.addr = cpu_to_le32(addr); in refill_rx()
1445 np->rx_ring[entry].frag.length = in refill_rx()
1446 cpu_to_le32(np->rx_buf_sz | LastFrag); in refill_rx()
1447 np->rx_ring[entry].status = 0; in refill_rx()
1452 struct netdev_private *np = netdev_priv(dev); in netdev_error() local
1453 void __iomem *ioaddr = np->base; in netdev_error()
1460 if (np->an_enable) { in netdev_error()
1461 mii_advertise = mdio_read(dev, np->phys[0], in netdev_error()
1463 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); in netdev_error()
1468 np->speed = 100; in netdev_error()
1471 np->speed = 100; in netdev_error()
1474 np->speed = 10; in netdev_error()
1477 np->speed = 10; in netdev_error()
1483 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR); in netdev_error()
1485 np->speed = speed; in netdev_error()
1493 if (np->flowctrl && np->mii_if.full_duplex) { in netdev_error()
1517 struct netdev_private *np = netdev_priv(dev); in get_stats() local
1518 void __iomem *ioaddr = np->base; in get_stats()
1522 spin_lock_irqsave(&np->statlock, flags); in get_stats()
1530 np->xstats.tx_multiple_collisions += mult_coll; in get_stats()
1532 np->xstats.tx_single_collisions += single_coll; in get_stats()
1534 np->xstats.tx_late_collisions += late_coll; in get_stats()
1539 np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer); in get_stats()
1540 np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer); in get_stats()
1541 np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort); in get_stats()
1542 np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx); in get_stats()
1543 np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx); in get_stats()
1544 np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx); in get_stats()
1545 np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx); in get_stats()
1552 spin_unlock_irqrestore(&np->statlock, flags); in get_stats()
1559 struct netdev_private *np = netdev_priv(dev); in set_rx_mode() local
1560 void __iomem *ioaddr = np->base; in set_rx_mode()
1590 if (np->mii_if.full_duplex && np->flowctrl) in set_rx_mode()
1600 struct netdev_private *np = netdev_priv(dev); in __set_mac_addr() local
1604 iowrite16(addr16, np->base + StationAddr); in __set_mac_addr()
1606 iowrite16(addr16, np->base + StationAddr+2); in __set_mac_addr()
1608 iowrite16(addr16, np->base + StationAddr+4); in __set_mac_addr()
1649 struct netdev_private *np = netdev_priv(dev); in get_drvinfo() local
1651 strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); in get_drvinfo()
1657 struct netdev_private *np = netdev_priv(dev); in get_link_ksettings() local
1658 spin_lock_irq(&np->lock); in get_link_ksettings()
1659 mii_ethtool_get_link_ksettings(&np->mii_if, cmd); in get_link_ksettings()
1660 spin_unlock_irq(&np->lock); in get_link_ksettings()
1667 struct netdev_private *np = netdev_priv(dev); in set_link_ksettings() local
1669 spin_lock_irq(&np->lock); in set_link_ksettings()
1670 res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd); in set_link_ksettings()
1671 spin_unlock_irq(&np->lock); in set_link_ksettings()
1677 struct netdev_private *np = netdev_priv(dev); in nway_reset() local
1678 return mii_nway_restart(&np->mii_if); in nway_reset()
1683 struct netdev_private *np = netdev_priv(dev); in get_link() local
1684 return mii_link_ok(&np->mii_if); in get_link()
1689 struct netdev_private *np = netdev_priv(dev); in get_msglevel() local
1690 return np->msg_enable; in get_msglevel()
1695 struct netdev_private *np = netdev_priv(dev); in set_msglevel() local
1696 np->msg_enable = val; in set_msglevel()
1719 struct netdev_private *np = netdev_priv(dev); in get_ethtool_stats() local
1723 data[i++] = np->xstats.tx_multiple_collisions; in get_ethtool_stats()
1724 data[i++] = np->xstats.tx_single_collisions; in get_ethtool_stats()
1725 data[i++] = np->xstats.tx_late_collisions; in get_ethtool_stats()
1726 data[i++] = np->xstats.tx_deferred; in get_ethtool_stats()
1727 data[i++] = np->xstats.tx_deferred_excessive; in get_ethtool_stats()
1728 data[i++] = np->xstats.tx_aborted; in get_ethtool_stats()
1729 data[i++] = np->xstats.tx_bcasts; in get_ethtool_stats()
1730 data[i++] = np->xstats.rx_bcasts; in get_ethtool_stats()
1731 data[i++] = np->xstats.tx_mcasts; in get_ethtool_stats()
1732 data[i++] = np->xstats.rx_mcasts; in get_ethtool_stats()
1740 struct netdev_private *np = netdev_priv(dev); in sundance_get_wol() local
1741 void __iomem *ioaddr = np->base; in sundance_get_wol()
1747 if (!np->wol_enabled) in sundance_get_wol()
1760 struct netdev_private *np = netdev_priv(dev); in sundance_set_wol() local
1761 void __iomem *ioaddr = np->base; in sundance_set_wol()
1764 if (!device_can_wakeup(&np->pci_dev->dev)) in sundance_set_wol()
1767 np->wol_enabled = !!(wol->wolopts); in sundance_set_wol()
1772 if (np->wol_enabled) { in sundance_set_wol()
1780 device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled); in sundance_set_wol()
1807 struct netdev_private *np = netdev_priv(dev); in netdev_ioctl() local
1813 spin_lock_irq(&np->lock); in netdev_ioctl()
1814 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL); in netdev_ioctl()
1815 spin_unlock_irq(&np->lock); in netdev_ioctl()
1822 struct netdev_private *np = netdev_priv(dev); in netdev_close() local
1823 void __iomem *ioaddr = np->base; in netdev_close()
1828 tasklet_kill(&np->rx_tasklet); in netdev_close()
1829 tasklet_kill(&np->tx_tasklet); in netdev_close()
1830 np->cur_tx = 0; in netdev_close()
1831 np->dirty_tx = 0; in netdev_close()
1832 np->cur_task = 0; in netdev_close()
1833 np->last_tx = NULL; in netdev_close()
1837 if (netif_msg_ifdown(np)) { in netdev_close()
1843 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); in netdev_close()
1871 if (netif_msg_hw(np)) { in netdev_close()
1873 (int)(np->tx_ring_dma)); in netdev_close()
1876 i, np->tx_ring[i].status, np->tx_ring[i].frag.addr, in netdev_close()
1877 np->tx_ring[i].frag.length); in netdev_close()
1879 (int)(np->rx_ring_dma)); in netdev_close()
1882 i, np->rx_ring[i].status, np->rx_ring[i].frag.addr, in netdev_close()
1883 np->rx_ring[i].frag.length); in netdev_close()
1888 free_irq(np->pci_dev->irq, dev); in netdev_close()
1890 timer_delete_sync(&np->timer); in netdev_close()
1894 np->rx_ring[i].status = 0; in netdev_close()
1895 skb = np->rx_skbuff[i]; in netdev_close()
1897 dma_unmap_single(&np->pci_dev->dev, in netdev_close()
1898 le32_to_cpu(np->rx_ring[i].frag.addr), in netdev_close()
1899 np->rx_buf_sz, DMA_FROM_DEVICE); in netdev_close()
1901 np->rx_skbuff[i] = NULL; in netdev_close()
1903 np->rx_ring[i].frag.addr = cpu_to_le32(0xBADF00D0); /* poison */ in netdev_close()
1906 np->tx_ring[i].next_desc = 0; in netdev_close()
1907 skb = np->tx_skbuff[i]; in netdev_close()
1909 dma_unmap_single(&np->pci_dev->dev, in netdev_close()
1910 le32_to_cpu(np->tx_ring[i].frag.addr), in netdev_close()
1913 np->tx_skbuff[i] = NULL; in netdev_close()
1925 struct netdev_private *np = netdev_priv(dev); in sundance_remove1() local
1928 np->rx_ring, np->rx_ring_dma); in sundance_remove1()
1930 np->tx_ring, np->tx_ring_dma); in sundance_remove1()
1931 pci_iounmap(pdev, np->base); in sundance_remove1()
1940 struct netdev_private *np = netdev_priv(dev); in sundance_suspend() local
1941 void __iomem *ioaddr = np->base; in sundance_suspend()
1949 if (np->wol_enabled) { in sundance_suspend()
1954 device_set_wakeup_enable(dev_d, np->wol_enabled); in sundance_suspend()