Lines Matching +full:enable +full:- +full:lpa

1 /* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
3 Written 1997-2001 by Donald Becker.
12 This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
22 [link no longer provides useful info -jgarzik]
32 /* The user-configurable values.
40 /* System-wide count of bogus-rx frames. */
44 #elif defined(YF_NEW) /* A future perfect board :->. */
52 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
61 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
62 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
72 There are no ill effects from too-large receive rings. */
114 MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
124 MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
125 MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
126 MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
127 MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
128 MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
129 MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
130 MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
138 Ethernet adapter. The G-NIC 64-bit PCI card is supported, as well as the
141 II. Board-specific settings
155 Tulip. This driver uses two statically allocated fixed-size descriptor lists
160 open() time and passes the skb->data field to the Yellowfin as receive data
166 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
167 using a full-sized skbuff for small frames vs. the copying costs of larger
169 that we are pre-loading the cache with immediately useful header
170 information). For large frames the copying cost is non-trivial, and the
175 The driver runs as two independent, single-threaded flows of control. One
176 is the send-packet routine, which enforces single-threaded use by the
177 dev->tbusy flag. The other thread is the interrupt handler, which is single
180 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
183 the 'yp->tx_full' flag.
187 empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
192 Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
199 Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
232 {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
329 unsigned int full_duplex:1; /* Full-duplex operation requested. */
332 unsigned int default_port:4; /* Last dev->if_port value. */
375 int chip_idx = ent->driver_data; in yellowfin_init_one()
401 return -ENOMEM; in yellowfin_init_one()
403 SET_NETDEV_DEV(dev, &pdev->dev); in yellowfin_init_one()
416 irq = pdev->irq; in yellowfin_init_one()
432 spin_lock_init(&np->lock); in yellowfin_init_one()
434 np->pci_dev = pdev; in yellowfin_init_one()
435 np->chip_id = chip_idx; in yellowfin_init_one()
436 np->drv_flags = drv_flags; in yellowfin_init_one()
437 np->base = ioaddr; in yellowfin_init_one()
439 ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma, in yellowfin_init_one()
443 np->tx_ring = ring_space; in yellowfin_init_one()
444 np->tx_ring_dma = ring_dma; in yellowfin_init_one()
446 ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma, in yellowfin_init_one()
450 np->rx_ring = ring_space; in yellowfin_init_one()
451 np->rx_ring_dma = ring_dma; in yellowfin_init_one()
453 ring_space = dma_alloc_coherent(&pdev->dev, STATUS_TOTAL_SIZE, in yellowfin_init_one()
457 np->tx_status = ring_space; in yellowfin_init_one()
458 np->tx_status_dma = ring_dma; in yellowfin_init_one()
460 if (dev->mem_start) in yellowfin_init_one()
461 option = dev->mem_start; in yellowfin_init_one()
466 np->full_duplex = 1; in yellowfin_init_one()
467 np->default_port = option & 15; in yellowfin_init_one()
468 if (np->default_port) in yellowfin_init_one()
469 np->medialock = 1; in yellowfin_init_one()
472 np->full_duplex = 1; in yellowfin_init_one()
474 if (np->full_duplex) in yellowfin_init_one()
475 np->duplex_lock = 1; in yellowfin_init_one()
477 /* The Yellowfin-specific entries in the device structure. */ in yellowfin_init_one()
478 dev->netdev_ops = &netdev_ops; in yellowfin_init_one()
479 dev->ethtool_ops = &ethtool_ops; in yellowfin_init_one()
480 dev->watchdog_timeo = TX_TIMEOUT; in yellowfin_init_one()
483 dev->mtu = mtu; in yellowfin_init_one()
492 dev->dev_addr, irq); in yellowfin_init_one()
494 if (np->drv_flags & HasMII) { in yellowfin_init_one()
499 np->phys[phy_idx++] = phy; in yellowfin_init_one()
500 np->advertising = mdio_read(ioaddr, phy, 4); in yellowfin_init_one()
502 phy, mii_status, np->advertising); in yellowfin_init_one()
505 np->mii_cnt = phy_idx; in yellowfin_init_one()
513 dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status, in yellowfin_init_one()
514 np->tx_status_dma); in yellowfin_init_one()
516 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, in yellowfin_init_one()
517 np->rx_ring_dma); in yellowfin_init_one()
519 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, in yellowfin_init_one()
520 np->tx_ring_dma); in yellowfin_init_one()
527 return -ENODEV; in yellowfin_init_one()
536 while ((ioread8(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0) in read_eeprom()
551 for (i = 10000; i >= 0; i--) in mdio_read()
565 for (i = 10000; i >= 0; i--) in mdio_write()
574 const int irq = yp->pci_dev->irq; in yellowfin_open()
575 void __iomem *ioaddr = yp->base; in yellowfin_open()
581 rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev); in yellowfin_open()
589 iowrite32(yp->rx_ring_dma, ioaddr + RxPtr); in yellowfin_open()
590 iowrite32(yp->tx_ring_dma, ioaddr + TxPtr); in yellowfin_open()
593 iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i); in yellowfin_open()
608 /* Enable automatic generation of flow control frames, period 0xffff. */ in yellowfin_open()
611 yp->tx_threshold = 32; in yellowfin_open()
612 iowrite32(yp->tx_threshold, ioaddr + TxThreshold); in yellowfin_open()
614 if (dev->if_port == 0) in yellowfin_open()
615 dev->if_port = yp->default_port; in yellowfin_open()
620 if (yp->drv_flags & IsGigabit) { in yellowfin_open()
621 /* We are always in full-duplex mode with gigabit! */ in yellowfin_open()
622 yp->full_duplex = 1; in yellowfin_open()
625 iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */ in yellowfin_open()
627 iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg); in yellowfin_open()
631 /* Enable interrupts by setting the interrupt mask. */ in yellowfin_open()
633 iowrite16(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */ in yellowfin_open()
642 timer_setup(&yp->timer, yellowfin_timer, 0); in yellowfin_open()
643 yp->timer.expires = jiffies + 3*HZ; in yellowfin_open()
644 add_timer(&yp->timer); in yellowfin_open()
656 struct net_device *dev = pci_get_drvdata(yp->pci_dev); in yellowfin_timer()
657 void __iomem *ioaddr = yp->base; in yellowfin_timer()
665 if (yp->mii_cnt) { in yellowfin_timer()
666 int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR); in yellowfin_timer()
667 int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA); in yellowfin_timer() local
668 int negotiated = lpa & yp->advertising; in yellowfin_timer()
671 yp->phys[0], bmsr, lpa); in yellowfin_timer()
673 yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated); in yellowfin_timer()
675 iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg); in yellowfin_timer()
683 yp->timer.expires = jiffies + next_tick; in yellowfin_timer()
684 add_timer(&yp->timer); in yellowfin_timer()
690 void __iomem *ioaddr = yp->base; in yellowfin_tx_timeout()
693 yp->cur_tx, yp->dirty_tx, in yellowfin_tx_timeout()
700 pr_warn(" Rx ring %p: ", yp->rx_ring); in yellowfin_tx_timeout()
702 pr_cont(" %08x", yp->rx_ring[i].result_status); in yellowfin_tx_timeout()
704 pr_warn(" Tx ring %p: ", yp->tx_ring); in yellowfin_tx_timeout()
707 yp->tx_status[i].tx_errs, in yellowfin_tx_timeout()
708 yp->tx_ring[i].result_status); in yellowfin_tx_timeout()
714 dev->if_port = 0; in yellowfin_tx_timeout()
716 /* Wake the potentially-idle transmit channel. */ in yellowfin_tx_timeout()
717 iowrite32(0x10001000, yp->base + TxCtrl); in yellowfin_tx_timeout()
718 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE) in yellowfin_tx_timeout()
722 dev->stats.tx_errors++; in yellowfin_tx_timeout()
731 yp->tx_full = 0; in yellowfin_init_ring()
732 yp->cur_rx = yp->cur_tx = 0; in yellowfin_init_ring()
733 yp->dirty_tx = 0; in yellowfin_init_ring()
735 yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); in yellowfin_init_ring()
738 yp->rx_ring[i].dbdma_cmd = in yellowfin_init_ring()
739 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz); in yellowfin_init_ring()
740 yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma + in yellowfin_init_ring()
745 struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2); in yellowfin_init_ring()
746 yp->rx_skbuff[i] = skb; in yellowfin_init_ring()
750 yp->rx_ring[i].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev, in yellowfin_init_ring()
751 skb->data, in yellowfin_init_ring()
752 yp->rx_buf_sz, in yellowfin_init_ring()
757 dev_kfree_skb(yp->rx_skbuff[j]); in yellowfin_init_ring()
758 return -ENOMEM; in yellowfin_init_ring()
760 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_init_ring()
761 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); in yellowfin_init_ring()
767 yp->tx_skbuff[i] = NULL; in yellowfin_init_ring()
768 yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_init_ring()
769 yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma + in yellowfin_init_ring()
773 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS); in yellowfin_init_ring()
779 yp->tx_skbuff[i] = 0; in yellowfin_init_ring()
781 yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_init_ring()
782 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma + in yellowfin_init_ring()
785 if (yp->flags & FullTxStatus) { in yellowfin_init_ring()
786 yp->tx_ring[j].dbdma_cmd = in yellowfin_init_ring()
787 cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status)); in yellowfin_init_ring()
788 yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status); in yellowfin_init_ring()
789 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma + in yellowfin_init_ring()
793 yp->tx_ring[j].dbdma_cmd = in yellowfin_init_ring()
795 yp->tx_ring[j].request_cnt = 2; in yellowfin_init_ring()
797 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma + in yellowfin_init_ring()
799 &(yp->tx_status[0].tx_errs) - in yellowfin_init_ring()
800 &(yp->tx_status[0])); in yellowfin_init_ring()
802 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma + in yellowfin_init_ring()
806 yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS); in yellowfin_init_ring()
809 yp->tx_tail_desc = &yp->tx_status[0]; in yellowfin_init_ring()
818 int len = skb->len; in yellowfin_start_xmit()
826 entry = yp->cur_tx % TX_RING_SIZE; in yellowfin_start_xmit()
829 int cacheline_end = ((unsigned long)skb->data + skb->len) % 32; in yellowfin_start_xmit()
832 len = skb->len + 32 - cacheline_end + 1; in yellowfin_start_xmit()
834 yp->tx_skbuff[entry] = NULL; in yellowfin_start_xmit()
840 yp->tx_skbuff[entry] = skb; in yellowfin_start_xmit()
843 yp->tx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev, in yellowfin_start_xmit()
844 skb->data, in yellowfin_start_xmit()
846 yp->tx_ring[entry].result_status = 0; in yellowfin_start_xmit()
847 if (entry >= TX_RING_SIZE-1) { in yellowfin_start_xmit()
849 yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_start_xmit()
850 yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd = in yellowfin_start_xmit()
853 yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_start_xmit()
854 yp->tx_ring[entry].dbdma_cmd = in yellowfin_start_xmit()
857 yp->cur_tx++; in yellowfin_start_xmit()
859 yp->tx_ring[entry<<1].request_cnt = len; in yellowfin_start_xmit()
860 yp->tx_ring[entry<<1].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev, in yellowfin_start_xmit()
861 skb->data, in yellowfin_start_xmit()
863 /* The input_last (status-write) command is constant, but we must in yellowfin_start_xmit()
866 yp->cur_tx++; in yellowfin_start_xmit()
868 unsigned next_entry = yp->cur_tx % TX_RING_SIZE; in yellowfin_start_xmit()
869 yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_start_xmit()
871 /* Final step -- overwrite the old 'stop' command. */ in yellowfin_start_xmit()
873 yp->tx_ring[entry<<1].dbdma_cmd = in yellowfin_start_xmit()
878 /* Non-x86 Todo: explicitly flush cache lines here. */ in yellowfin_start_xmit()
880 /* Wake the potentially-idle transmit channel. */ in yellowfin_start_xmit()
881 iowrite32(0x10001000, yp->base + TxCtrl); in yellowfin_start_xmit()
883 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE) in yellowfin_start_xmit()
886 yp->tx_full = 1; in yellowfin_start_xmit()
890 yp->cur_tx, entry); in yellowfin_start_xmit()
906 ioaddr = yp->base; in yellowfin_interrupt()
908 spin_lock (&yp->lock); in yellowfin_interrupt()
927 for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) { in yellowfin_interrupt()
928 int entry = yp->dirty_tx % TX_RING_SIZE; in yellowfin_interrupt()
931 if (yp->tx_ring[entry].result_status == 0) in yellowfin_interrupt()
933 skb = yp->tx_skbuff[entry]; in yellowfin_interrupt()
934 dev->stats.tx_packets++; in yellowfin_interrupt()
935 dev->stats.tx_bytes += skb->len; in yellowfin_interrupt()
937 dma_unmap_single(&yp->pci_dev->dev, in yellowfin_interrupt()
938 le32_to_cpu(yp->tx_ring[entry].addr), in yellowfin_interrupt()
939 skb->len, DMA_TO_DEVICE); in yellowfin_interrupt()
941 yp->tx_skbuff[entry] = NULL; in yellowfin_interrupt()
943 if (yp->tx_full && in yellowfin_interrupt()
944 yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) { in yellowfin_interrupt()
946 yp->tx_full = 0; in yellowfin_interrupt()
950 if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) { in yellowfin_interrupt()
951 unsigned dirty_tx = yp->dirty_tx; in yellowfin_interrupt()
953 for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0; in yellowfin_interrupt()
957 u16 tx_errs = yp->tx_status[entry].tx_errs; in yellowfin_interrupt()
964 yp->tx_status[entry].tx_cnt, in yellowfin_interrupt()
965 yp->tx_status[entry].tx_errs, in yellowfin_interrupt()
966 yp->tx_status[entry].total_tx_cnt, in yellowfin_interrupt()
967 yp->tx_status[entry].paused); in yellowfin_interrupt()
971 skb = yp->tx_skbuff[entry]; in yellowfin_interrupt()
979 dev->stats.tx_errors++; in yellowfin_interrupt()
980 if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++; in yellowfin_interrupt()
981 if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++; in yellowfin_interrupt()
982 if (tx_errs & 0x2000) dev->stats.tx_window_errors++; in yellowfin_interrupt()
983 if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++; in yellowfin_interrupt()
990 dev->stats.tx_bytes += skb->len; in yellowfin_interrupt()
991 dev->stats.collisions += tx_errs & 15; in yellowfin_interrupt()
992 dev->stats.tx_packets++; in yellowfin_interrupt()
995 dma_unmap_single(&yp->pci_dev->dev, in yellowfin_interrupt()
996 yp->tx_ring[entry << 1].addr, in yellowfin_interrupt()
997 skb->len, DMA_TO_DEVICE); in yellowfin_interrupt()
999 yp->tx_skbuff[entry] = 0; in yellowfin_interrupt()
1001 yp->tx_status[entry].tx_errs = 0; in yellowfin_interrupt()
1005 if (yp->cur_tx - dirty_tx > TX_RING_SIZE) { in yellowfin_interrupt()
1006 netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n", in yellowfin_interrupt()
1007 dirty_tx, yp->cur_tx, yp->tx_full); in yellowfin_interrupt()
1012 if (yp->tx_full && in yellowfin_interrupt()
1013 yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) { in yellowfin_interrupt()
1015 yp->tx_full = 0; in yellowfin_interrupt()
1019 yp->dirty_tx = dirty_tx; in yellowfin_interrupt()
1020 yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE]; in yellowfin_interrupt()
1028 if (--boguscnt < 0) { in yellowfin_interrupt()
1039 spin_unlock (&yp->lock); in yellowfin_interrupt()
1048 int entry = yp->cur_rx % RX_RING_SIZE; in yellowfin_rx()
1049 int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx; in yellowfin_rx()
1053 entry, yp->rx_ring[entry].result_status); in yellowfin_rx()
1055 entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr, in yellowfin_rx()
1056 yp->rx_ring[entry].result_status); in yellowfin_rx()
1061 struct yellowfin_desc *desc = &yp->rx_ring[entry]; in yellowfin_rx()
1062 struct sk_buff *rx_skb = yp->rx_skbuff[entry]; in yellowfin_rx()
1068 if(!desc->result_status) in yellowfin_rx()
1070 dma_sync_single_for_cpu(&yp->pci_dev->dev, in yellowfin_rx()
1071 le32_to_cpu(desc->addr), in yellowfin_rx()
1072 yp->rx_buf_sz, DMA_FROM_DEVICE); in yellowfin_rx()
1073 desc_status = le32_to_cpu(desc->result_status) >> 16; in yellowfin_rx()
1074 buf_addr = rx_skb->data; in yellowfin_rx()
1075 data_size = (le32_to_cpu(desc->dbdma_cmd) - in yellowfin_rx()
1076 le32_to_cpu(desc->result_status)) & 0xffff; in yellowfin_rx()
1077 frame_status = get_unaligned_le16(&(buf_addr[data_size - 2])); in yellowfin_rx()
1081 if (--boguscnt < 0) in yellowfin_rx()
1090 dev->stats.rx_length_errors++; in yellowfin_rx()
1091 } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) { in yellowfin_rx()
1096 dev->stats.rx_errors++; in yellowfin_rx()
1097 if (frame_status & 0x0060) dev->stats.rx_length_errors++; in yellowfin_rx()
1098 if (frame_status & 0x0008) dev->stats.rx_frame_errors++; in yellowfin_rx()
1099 if (frame_status & 0x0010) dev->stats.rx_crc_errors++; in yellowfin_rx()
1100 if (frame_status < 0) dev->stats.rx_dropped++; in yellowfin_rx()
1101 } else if ( !(yp->drv_flags & IsGigabit) && in yellowfin_rx()
1102 ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) { in yellowfin_rx()
1103 u8 status1 = buf_addr[data_size-2]; in yellowfin_rx()
1104 u8 status2 = buf_addr[data_size-1]; in yellowfin_rx()
1105 dev->stats.rx_errors++; in yellowfin_rx()
1106 if (status1 & 0xC0) dev->stats.rx_length_errors++; in yellowfin_rx()
1107 if (status2 & 0x03) dev->stats.rx_frame_errors++; in yellowfin_rx()
1108 if (status2 & 0x04) dev->stats.rx_crc_errors++; in yellowfin_rx()
1109 if (status2 & 0x80) dev->stats.rx_dropped++; in yellowfin_rx()
1111 } else if ((yp->flags & HasMACAddrBug) && in yellowfin_rx()
1112 !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma + in yellowfin_rx()
1114 dev->dev_addr) && in yellowfin_rx()
1115 !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma + in yellowfin_rx()
1124 int pkt_len = data_size - in yellowfin_rx()
1125 (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]); in yellowfin_rx()
1137 dma_unmap_single(&yp->pci_dev->dev, in yellowfin_rx()
1138 le32_to_cpu(yp->rx_ring[entry].addr), in yellowfin_rx()
1139 yp->rx_buf_sz, in yellowfin_rx()
1141 yp->rx_skbuff[entry] = NULL; in yellowfin_rx()
1147 skb_copy_to_linear_data(skb, rx_skb->data, pkt_len); in yellowfin_rx()
1149 dma_sync_single_for_device(&yp->pci_dev->dev, in yellowfin_rx()
1150 le32_to_cpu(desc->addr), in yellowfin_rx()
1151 yp->rx_buf_sz, in yellowfin_rx()
1154 skb->protocol = eth_type_trans(skb, dev); in yellowfin_rx()
1156 dev->stats.rx_packets++; in yellowfin_rx()
1157 dev->stats.rx_bytes += pkt_len; in yellowfin_rx()
1159 entry = (++yp->cur_rx) % RX_RING_SIZE; in yellowfin_rx()
1163 for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) { in yellowfin_rx()
1164 entry = yp->dirty_rx % RX_RING_SIZE; in yellowfin_rx()
1165 if (yp->rx_skbuff[entry] == NULL) { in yellowfin_rx()
1166 struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2); in yellowfin_rx()
1169 yp->rx_skbuff[entry] = skb; in yellowfin_rx()
1171 yp->rx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev, in yellowfin_rx()
1172 skb->data, in yellowfin_rx()
1173 yp->rx_buf_sz, in yellowfin_rx()
1176 yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_rx()
1177 yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */ in yellowfin_rx()
1179 yp->rx_ring[entry - 1].dbdma_cmd = in yellowfin_rx()
1180 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz); in yellowfin_rx()
1182 yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd = in yellowfin_rx()
1184 | yp->rx_buf_sz); in yellowfin_rx()
1195 dev->stats.tx_errors++; in yellowfin_error()
1197 dev->stats.rx_errors++; in yellowfin_error()
1203 void __iomem *ioaddr = yp->base; in yellowfin_close()
1214 yp->cur_tx, yp->dirty_tx, in yellowfin_close()
1215 yp->cur_rx, yp->dirty_rx); in yellowfin_close()
1225 timer_delete(&yp->timer); in yellowfin_close()
1230 (unsigned long long)yp->tx_ring_dma); in yellowfin_close()
1233 ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ', in yellowfin_close()
1234 i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr, in yellowfin_close()
1235 yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status); in yellowfin_close()
1236 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status); in yellowfin_close()
1239 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs, in yellowfin_close()
1240 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused); in yellowfin_close()
1243 (unsigned long long)yp->rx_ring_dma); in yellowfin_close()
1246 ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ', in yellowfin_close()
1247 i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr, in yellowfin_close()
1248 yp->rx_ring[i].result_status); in yellowfin_close()
1250 if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) { in yellowfin_close()
1256 get_unaligned(((u16*)yp->rx_ring[i].addr) + j)); in yellowfin_close()
1264 free_irq(yp->pci_dev->irq, dev); in yellowfin_close()
1268 yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_close()
1269 yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ in yellowfin_close()
1270 if (yp->rx_skbuff[i]) { in yellowfin_close()
1271 dev_kfree_skb(yp->rx_skbuff[i]); in yellowfin_close()
1273 yp->rx_skbuff[i] = NULL; in yellowfin_close()
1276 dev_kfree_skb(yp->tx_skbuff[i]); in yellowfin_close()
1277 yp->tx_skbuff[i] = NULL; in yellowfin_close()
1295 void __iomem *ioaddr = yp->base; in set_rx_mode()
1300 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ in set_rx_mode()
1303 (dev->flags & IFF_ALLMULTI)) { in set_rx_mode()
1317 if (yp->drv_flags & HasMulticastBug) { in set_rx_mode()
1318 bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f; in set_rx_mode()
1320 bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f; in set_rx_mode()
1322 bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f; in set_rx_mode()
1325 bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f; in set_rx_mode()
1332 } else { /* Normal, unicast/broadcast-only mode. */ in set_rx_mode()
1343 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); in yellowfin_get_drvinfo()
1344 strscpy(info->version, DRV_VERSION, sizeof(info->version)); in yellowfin_get_drvinfo()
1345 strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); in yellowfin_get_drvinfo()
1355 void __iomem *ioaddr = np->base; in netdev_ioctl()
1360 data->phy_id = np->phys[0] & 0x1f; in netdev_ioctl()
1364 data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f); in netdev_ioctl()
1368 if (data->phy_id == np->phys[0]) { in netdev_ioctl()
1369 u16 value = data->val_in; in netdev_ioctl()
1370 switch (data->reg_num) { in netdev_ioctl()
1373 np->medialock = (value & 0x9000) ? 0 : 1; in netdev_ioctl()
1374 if (np->medialock) in netdev_ioctl()
1375 np->full_duplex = (value & 0x0100) ? 1 : 0; in netdev_ioctl()
1377 case 4: np->advertising = value; break; in netdev_ioctl()
1381 mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); in netdev_ioctl()
1384 return -EOPNOTSUPP; in netdev_ioctl()
1397 dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status, in yellowfin_remove_one()
1398 np->tx_status_dma); in yellowfin_remove_one()
1399 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, in yellowfin_remove_one()
1400 np->rx_ring_dma); in yellowfin_remove_one()
1401 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, in yellowfin_remove_one()
1402 np->tx_ring_dma); in yellowfin_remove_one()
1405 pci_iounmap(pdev, np->base); in yellowfin_remove_one()