Lines Matching +full:reference +full:- +full:manuals

3 	Written 1998-2000 by Donald Becker.
10 the GNU General Public License (GPL), incorporated herein by reference.
25 [link no longer provides useful info -jgarzik]
56 * If using the broken firmware, data must be padded to the next 32-bit boundary.
63 * Define this if using the driver with the zero-copy patch
71 /* The user-configurable values.
81 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
89 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
95 * packets as the starfire doesn't allow for misaligned DMAs ;-(
96 * 23/10/2000 - Jes
100 * penalty. -Ion
108 /* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 1…
124 /* All queues must be aligned on a 256-byte boundary */
138 /* 64-bit dma_addr_t */
148 #else /* 32-bit dma_addr_t */
160 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
181 MODULE_PARM_DESC(debug, "Debug level (0-6)");
182 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
194 II. Board-specific settings
200 The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
207 utilize the hardware two-level priority queue. When modifying the
208 RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
218 minimum-length padding. It does not use the completion queue
219 consumer index, but instead checks for non-zero status entries.
224 queue consumer index, but instead checks for non-zero status entries.
237 skbuff at an offset of "+2", thus 16-byte aligning the IP header.
241 The driver runs as two independent, single-threaded flows of control. One
242 is the send-packet routine, which enforces single-threaded use by the
243 dev->tbusy flag. The other thread is the interrupt handler, which is single
260 The Adaptec Starfire manuals, available only from Adaptec.
266 - StopOnPerr is broken, don't enable
267 - Hardware ethernet padding exposes random data, perform software padding
268 instead (unverified -- works correctly for all the hardware I have)
296 Unlike software-only systems, device drivers interact with complex hardware.
457 /* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
521 /* The addresses of rx/tx-in-place skbuffs. */
586 spin_lock(&np->lock); in netdev_vlan_rx_add_vid()
588 printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid); in netdev_vlan_rx_add_vid()
589 set_bit(vid, np->active_vlans); in netdev_vlan_rx_add_vid()
591 spin_unlock(&np->lock); in netdev_vlan_rx_add_vid()
601 spin_lock(&np->lock); in netdev_vlan_rx_kill_vid()
603 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid); in netdev_vlan_rx_kill_vid()
604 clear_bit(vid, np->active_vlans); in netdev_vlan_rx_kill_vid()
606 spin_unlock(&np->lock); in netdev_vlan_rx_kill_vid()
632 struct device *d = &pdev->dev; in starfire_init_one()
634 int i, irq, chip_idx = ent->driver_data; in starfire_init_one()
643 return -EIO; in starfire_init_one()
649 return -ENODEV; in starfire_init_one()
654 return -ENOMEM; in starfire_init_one()
656 SET_NETDEV_DEV(dev, &pdev->dev); in starfire_init_one()
658 irq = pdev->irq; in starfire_init_one()
674 /* enable MWI -- it vastly improves Rx performance on sparc64 */ in starfire_init_one()
680 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; in starfire_init_one()
684 dev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; in starfire_init_one()
687 dev->features |= NETIF_F_HIGHDMA; in starfire_init_one()
692 addr[i] = readb(base + EEPROMCtrl + 20 - i); in starfire_init_one()
711 while (--boguscnt > 0) { in starfire_init_one()
717 printk("%s: chipset reset never completed!\n", dev->name); in starfire_init_one()
722 np->dev = dev; in starfire_init_one()
723 np->base = base; in starfire_init_one()
724 spin_lock_init(&np->lock); in starfire_init_one()
727 np->pci_dev = pdev; in starfire_init_one()
729 np->mii_if.dev = dev; in starfire_init_one()
730 np->mii_if.mdio_read = mdio_read; in starfire_init_one()
731 np->mii_if.mdio_write = mdio_write; in starfire_init_one()
732 np->mii_if.phy_id_mask = 0x1f; in starfire_init_one()
733 np->mii_if.reg_num_mask = 0x1f; in starfire_init_one()
737 np->speed100 = 1; in starfire_init_one()
740 np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) | in starfire_init_one()
744 np->intr_timer_ctrl |= SmallFrameBypass; in starfire_init_one()
747 np->intr_timer_ctrl |= SmallFrame64; in starfire_init_one()
750 np->intr_timer_ctrl |= SmallFrame128; in starfire_init_one()
753 np->intr_timer_ctrl |= SmallFrame256; in starfire_init_one()
756 np->intr_timer_ctrl |= SmallFrame512; in starfire_init_one()
763 dev->netdev_ops = &netdev_ops; in starfire_init_one()
764 dev->watchdog_timeo = TX_TIMEOUT; in starfire_init_one()
765 dev->ethtool_ops = &ethtool_ops; in starfire_init_one()
767 netif_napi_add_weight(dev, &np->napi, netdev_poll, max_interrupt_work); in starfire_init_one()
770 dev->mtu = mtu; in starfire_init_one()
776 dev->name, netdrv_tbl[chip_idx].name, base, in starfire_init_one()
777 dev->dev_addr, irq); in starfire_init_one()
786 while (--boguscnt > 0) in starfire_init_one()
790 printk("%s: PHY#%d reset never completed!\n", dev->name, phy); in starfire_init_one()
795 np->phys[phy_idx++] = phy; in starfire_init_one()
796 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE); in starfire_init_one()
799 dev->name, phy, mii_status, np->mii_if.advertising); in starfire_init_one()
800 /* there can be only one PHY on-board */ in starfire_init_one()
804 np->phy_cnt = phy_idx; in starfire_init_one()
805 if (np->phy_cnt > 0) in starfire_init_one()
806 np->mii_if.phy_id = np->phys[0]; in starfire_init_one()
808 memset(&np->mii_if, 0, sizeof(np->mii_if)); in starfire_init_one()
811 printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n", in starfire_init_one()
812 dev->name, enable_hw_cksum ? "enabled" : "disabled"); in starfire_init_one()
821 return -ENODEV; in starfire_init_one()
829 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2); in mdio_read()
831 /* ??? Should we add a busy-wait here? */ in mdio_read()
834 } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0); in mdio_read()
846 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2); in mdio_write()
848 /* The busy-wait will occur before a read. */ in mdio_write()
857 void __iomem *ioaddr = np->base; in netdev_open()
858 const int irq = np->pci_dev->irq; in netdev_open()
865 retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev); in netdev_open()
874 dev->name, irq); in netdev_open()
877 if (!np->queue_mem) { in netdev_open()
878 …tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * Q… in netdev_open()
879 …rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_AL… in netdev_open()
880 …tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE… in netdev_open()
882 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size; in netdev_open()
883 np->queue_mem = dma_alloc_coherent(&np->pci_dev->dev, in netdev_open()
884 np->queue_mem_size, in netdev_open()
885 &np->queue_mem_dma, GFP_ATOMIC); in netdev_open()
886 if (np->queue_mem == NULL) { in netdev_open()
888 return -ENOMEM; in netdev_open()
891 np->tx_done_q = np->queue_mem; in netdev_open()
892 np->tx_done_q_dma = np->queue_mem_dma; in netdev_open()
893 np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size; in netdev_open()
894 np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size; in netdev_open()
895 np->tx_ring = (void *) np->rx_done_q + rx_done_q_size; in netdev_open()
896 np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size; in netdev_open()
897 np->rx_ring = (void *) np->tx_ring + tx_ring_size; in netdev_open()
898 np->rx_ring_dma = np->tx_ring_dma + tx_ring_size; in netdev_open()
905 writel((np->rx_buf_sz << RxBufferLenShift) | in netdev_open()
928 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr); in netdev_open()
929 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr); in netdev_open()
930 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr); in netdev_open()
931 writel(np->rx_ring_dma, ioaddr + RxDescQAddr); in netdev_open()
932 writel(np->tx_ring_dma, ioaddr + TxRingPtr); in netdev_open()
934 writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr); in netdev_open()
935 writel(np->rx_done_q_dma | in netdev_open()
941 printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name); in netdev_open()
945 writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i); in netdev_open()
952 const __be16 *eaddrs = (const __be16 *)dev->dev_addr; in netdev_open()
961 np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */ in netdev_open()
962 writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode); in netdev_open()
964 writel(np->tx_mode, ioaddr + TxMode); in netdev_open()
965 np->tx_threshold = 4; in netdev_open()
966 writel(np->tx_threshold, ioaddr + TxThreshold); in netdev_open()
968 writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl); in netdev_open()
970 napi_enable(&np->napi); in netdev_open()
975 printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name); in netdev_open()
978 np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE); in netdev_open()
998 retval = request_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev); in netdev_open()
1004 if (fw_rx->size % 4) { in netdev_open()
1006 fw_rx->size, FIRMWARE_RX); in netdev_open()
1007 retval = -EINVAL; in netdev_open()
1010 retval = request_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev); in netdev_open()
1016 if (fw_tx->size % 4) { in netdev_open()
1018 fw_tx->size, FIRMWARE_TX); in netdev_open()
1019 retval = -EINVAL; in netdev_open()
1022 fw_rx_data = (const __be32 *)&fw_rx->data[0]; in netdev_open()
1023 fw_tx_data = (const __be32 *)&fw_tx->data[0]; in netdev_open()
1024 rx_size = fw_rx->size / 4; in netdev_open()
1025 tx_size = fw_tx->size / 4; in netdev_open()
1041 dev->name); in netdev_open()
1060 mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising); in check_duplex()
1061 mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET); in check_duplex()
1063 while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET) in check_duplex()
1066 printk("%s: MII reset failed!\n", dev->name); in check_duplex()
1070 reg0 = mdio_read(dev, np->phys[0], MII_BMCR); in check_duplex()
1072 if (!np->mii_if.force_media) { in check_duplex()
1076 if (np->speed100) in check_duplex()
1078 if (np->mii_if.full_duplex) in check_duplex()
1080 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n", in check_duplex()
1081 dev->name, in check_duplex()
1082 np->speed100 ? "100" : "10", in check_duplex()
1083 np->mii_if.full_duplex ? "full" : "half"); in check_duplex()
1085 mdio_write(dev, np->phys[0], MII_BMCR, reg0); in check_duplex()
1092 void __iomem *ioaddr = np->base; in tx_timeout()
1096 "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus)); in tx_timeout()
1113 dev->stats.tx_errors++; in tx_timeout()
1124 np->cur_rx = np->cur_tx = np->reap_tx = 0; in init_ring()
1125 np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0; in init_ring()
1127 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); in init_ring()
1131 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz); in init_ring()
1132 np->rx_info[i].skb = skb; in init_ring()
1135 np->rx_info[i].mapping = dma_map_single(&np->pci_dev->dev, in init_ring()
1136 skb->data, in init_ring()
1137 np->rx_buf_sz, in init_ring()
1139 if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[i].mapping)) { in init_ring()
1141 np->rx_info[i].skb = NULL; in init_ring()
1145 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); in init_ring()
1147 writew(i - 1, np->base + RxDescQIdx); in init_ring()
1148 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); in init_ring()
1152 np->rx_ring[i].rxaddr = 0; in init_ring()
1153 np->rx_info[i].skb = NULL; in init_ring()
1154 np->rx_info[i].mapping = 0; in init_ring()
1157 np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing); in init_ring()
1161 np->rx_done_q[i].status = 0; in init_ring()
1162 np->tx_done_q[i].status = 0; in init_ring()
1166 memset(&np->tx_info[i], 0, sizeof(np->tx_info[i])); in init_ring()
1182 if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) { in start_tx()
1188 if (skb->ip_summed == CHECKSUM_PARTIAL) { in start_tx()
1189 if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK)) in start_tx()
1194 prev_tx = np->cur_tx; in start_tx()
1195 entry = np->cur_tx % TX_RING_SIZE; in start_tx()
1201 np->tx_info[entry].skb = skb; in start_tx()
1203 if (entry >= TX_RING_SIZE - skb_num_frags(skb)) { in start_tx()
1207 if (np->reap_tx) { in start_tx()
1209 np->reap_tx = 0; in start_tx()
1211 if (skb->ip_summed == CHECKSUM_PARTIAL) { in start_tx()
1213 dev->stats.tx_compressed++; in start_tx()
1217 np->tx_info[entry].mapping = in start_tx()
1218 dma_map_single(&np->pci_dev->dev, skb->data, in start_tx()
1222 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1]; in start_tx()
1224 np->tx_info[entry].mapping = in start_tx()
1225 dma_map_single(&np->pci_dev->dev, in start_tx()
1230 if (dma_mapping_error(&np->pci_dev->dev, np->tx_info[entry].mapping)) { in start_tx()
1231 dev->stats.tx_dropped++; in start_tx()
1235 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); in start_tx()
1236 np->tx_ring[entry].status = cpu_to_le32(status); in start_tx()
1239 dev->name, np->cur_tx, np->dirty_tx, in start_tx()
1242 np->tx_info[entry].used_slots = TX_RING_SIZE - entry; in start_tx()
1243 np->cur_tx += np->tx_info[entry].used_slots; in start_tx()
1246 np->tx_info[entry].used_slots = 1; in start_tx()
1247 np->cur_tx += np->tx_info[entry].used_slots; in start_tx()
1251 if (np->cur_tx % (TX_RING_SIZE / 2) == 0) in start_tx()
1252 np->reap_tx = 1; in start_tx()
1255 /* Non-x86: explicitly flush descriptor cache lines here. */ in start_tx()
1257 initiated. - Jes */ in start_tx()
1261 writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx); in start_tx()
1264 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE) in start_tx()
1271 np->tx_info[entry].skb = NULL; in start_tx()
1273 dma_unmap_single(&np->pci_dev->dev, in start_tx()
1274 np->tx_info[entry].mapping, in start_tx()
1276 np->tx_info[entry].mapping = 0; in start_tx()
1277 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE; in start_tx()
1279 dma_unmap_single(&np->pci_dev->dev, in start_tx()
1280 np->tx_info[entry].mapping, in start_tx()
1281 skb_frag_size(&skb_shinfo(skb)->frags[j - 1]), in start_tx()
1287 np->cur_tx = prev_tx; in start_tx()
1297 void __iomem *ioaddr = np->base; in intr_handler()
1308 dev->name, intr_status); in intr_handler()
1310 if (intr_status == 0 || intr_status == (u32) -1) in intr_handler()
1318 if (likely(napi_schedule_prep(&np->napi))) { in intr_handler()
1319 __napi_schedule(&np->napi); in intr_handler()
1331 dev->name); in intr_handler()
1338 /* Scavenge the skbuff list based on the Tx-done queue. in intr_handler()
1344 dev->name, consumer); in intr_handler()
1346 while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) { in intr_handler()
1349 dev->name, np->dirty_tx, np->tx_done, tx_status); in intr_handler()
1351 dev->stats.tx_packets++; in intr_handler()
1354 struct sk_buff *skb = np->tx_info[entry].skb; in intr_handler()
1355 np->tx_info[entry].skb = NULL; in intr_handler()
1356 dma_unmap_single(&np->pci_dev->dev, in intr_handler()
1357 np->tx_info[entry].mapping, in intr_handler()
1360 np->tx_info[entry].mapping = 0; in intr_handler()
1361 np->dirty_tx += np->tx_info[entry].used_slots; in intr_handler()
1362 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE; in intr_handler()
1365 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in intr_handler()
1366 dma_unmap_single(&np->pci_dev->dev, in intr_handler()
1367 np->tx_info[entry].mapping, in intr_handler()
1368 skb_frag_size(&skb_shinfo(skb)->frags[i]), in intr_handler()
1370 np->dirty_tx++; in intr_handler()
1377 np->tx_done_q[np->tx_done].status = 0; in intr_handler()
1378 np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE; in intr_handler()
1380 writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2); in intr_handler()
1383 (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) { in intr_handler()
1400 if (--boguscnt < 0) { in intr_handler()
1404 dev->name, intr_status); in intr_handler()
1411 dev->name, (int) readl(ioaddr + IntrStatus)); in intr_handler()
1427 while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) { in __netdev_rx()
1431 rx_done_desc *desc = &np->rx_done_q[np->rx_done]; in __netdev_rx()
1434 printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status); in __netdev_rx()
1439 dev->stats.rx_errors++; in __netdev_rx()
1441 dev->stats.rx_fifo_errors++; in __netdev_rx()
1449 (*quota)--; in __netdev_rx()
1457 to a minimally-sized skbuff. */ in __netdev_rx()
1461 dma_sync_single_for_cpu(&np->pci_dev->dev, in __netdev_rx()
1462 np->rx_info[entry].mapping, in __netdev_rx()
1464 skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len); in __netdev_rx()
1465 dma_sync_single_for_device(&np->pci_dev->dev, in __netdev_rx()
1466 np->rx_info[entry].mapping, in __netdev_rx()
1470 dma_unmap_single(&np->pci_dev->dev, in __netdev_rx()
1471 np->rx_info[entry].mapping, in __netdev_rx()
1472 np->rx_buf_sz, DMA_FROM_DEVICE); in __netdev_rx()
1473 skb = np->rx_info[entry].skb; in __netdev_rx()
1475 np->rx_info[entry].skb = NULL; in __netdev_rx()
1476 np->rx_info[entry].mapping = 0; in __netdev_rx()
1482 skb->data, skb->data + 6, in __netdev_rx()
1483 skb->data[12], skb->data[13]); in __netdev_rx()
1487 skb->protocol = eth_type_trans(skb, dev); in __netdev_rx()
1490 …printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->stat… in __netdev_rx()
1492 if (le16_to_cpu(desc->status2) & 0x0100) { in __netdev_rx()
1493 skb->ip_summed = CHECKSUM_UNNECESSARY; in __netdev_rx()
1494 dev->stats.rx_compressed++; in __netdev_rx()
1502 * Maybe I missed something -- corrections are welcome. in __netdev_rx()
1503 * Until then, the printk stays. :-) -Ion in __netdev_rx()
1505 else if (le16_to_cpu(desc->status2) & 0x0040) { in __netdev_rx()
1506 skb->ip_summed = CHECKSUM_COMPLETE; in __netdev_rx()
1507 skb->csum = le16_to_cpu(desc->csum); in __netdev_rx()
1508 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2)); in __netdev_rx()
1511 if (le16_to_cpu(desc->status2) & 0x0200) { in __netdev_rx()
1512 u16 vlid = le16_to_cpu(desc->vlanid); in __netdev_rx()
1522 dev->stats.rx_packets++; in __netdev_rx()
1525 np->cur_rx++; in __netdev_rx()
1526 desc->status = 0; in __netdev_rx()
1527 np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE; in __netdev_rx()
1534 writew(np->rx_done, np->base + CompletionQConsumerIdx); in __netdev_rx()
1540 retcode, np->rx_done, desc_status); in __netdev_rx()
1547 struct net_device *dev = np->dev; in netdev_poll()
1549 void __iomem *ioaddr = np->base; in netdev_poll()
1569 budget - quota); in netdev_poll()
1572 return budget - quota; in netdev_poll()
1579 int entry = -1; in refill_rx_ring()
1582 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { in refill_rx_ring()
1583 entry = np->dirty_rx % RX_RING_SIZE; in refill_rx_ring()
1584 if (np->rx_info[entry].skb == NULL) { in refill_rx_ring()
1585 skb = netdev_alloc_skb(dev, np->rx_buf_sz); in refill_rx_ring()
1586 np->rx_info[entry].skb = skb; in refill_rx_ring()
1589 np->rx_info[entry].mapping = in refill_rx_ring()
1590 dma_map_single(&np->pci_dev->dev, skb->data, in refill_rx_ring()
1591 np->rx_buf_sz, DMA_FROM_DEVICE); in refill_rx_ring()
1592 if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[entry].mapping)) { in refill_rx_ring()
1594 np->rx_info[entry].skb = NULL; in refill_rx_ring()
1597 np->rx_ring[entry].rxaddr = in refill_rx_ring()
1598 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); in refill_rx_ring()
1600 if (entry == RX_RING_SIZE - 1) in refill_rx_ring()
1601 np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing); in refill_rx_ring()
1604 writew(entry, np->base + RxDescQIdx); in refill_rx_ring()
1611 void __iomem *ioaddr = np->base; in netdev_media_change()
1617 mdio_read(dev, np->phys[0], MII_BMCR); in netdev_media_change()
1618 mdio_read(dev, np->phys[0], MII_BMSR); in netdev_media_change()
1620 reg0 = mdio_read(dev, np->phys[0], MII_BMCR); in netdev_media_change()
1621 reg1 = mdio_read(dev, np->phys[0], MII_BMSR); in netdev_media_change()
1627 reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE); in netdev_media_change()
1628 reg5 = mdio_read(dev, np->phys[0], MII_LPA); in netdev_media_change()
1630 np->speed100 = 1; in netdev_media_change()
1631 np->mii_if.full_duplex = 1; in netdev_media_change()
1633 np->speed100 = 1; in netdev_media_change()
1634 np->mii_if.full_duplex = 0; in netdev_media_change()
1636 np->speed100 = 0; in netdev_media_change()
1637 np->mii_if.full_duplex = 1; in netdev_media_change()
1639 np->speed100 = 0; in netdev_media_change()
1640 np->mii_if.full_duplex = 0; in netdev_media_change()
1645 np->speed100 = 1; in netdev_media_change()
1647 np->speed100 = 0; in netdev_media_change()
1649 np->mii_if.full_duplex = 1; in netdev_media_change()
1651 np->mii_if.full_duplex = 0; in netdev_media_change()
1654 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n", in netdev_media_change()
1655 dev->name, in netdev_media_change()
1656 np->speed100 ? "100" : "10", in netdev_media_change()
1657 np->mii_if.full_duplex ? "full" : "half"); in netdev_media_change()
1659 new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */ in netdev_media_change()
1660 if (np->mii_if.full_duplex) in netdev_media_change()
1662 if (np->tx_mode != new_tx_mode) { in netdev_media_change()
1663 np->tx_mode = new_tx_mode; in netdev_media_change()
1664 writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode); in netdev_media_change()
1666 writel(np->tx_mode, ioaddr + TxMode); in netdev_media_change()
1669 new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X; in netdev_media_change()
1670 if (np->speed100) in netdev_media_change()
1672 if (np->intr_timer_ctrl != new_intr_timer_ctrl) { in netdev_media_change()
1673 np->intr_timer_ctrl = new_intr_timer_ctrl; in netdev_media_change()
1678 printk(KERN_DEBUG "%s: Link is down\n", dev->name); in netdev_media_change()
1689 if (np->tx_threshold <= PKT_BUF_SZ / 16) { in netdev_error()
1690 writel(++np->tx_threshold, np->base + TxThreshold); in netdev_error()
1692 dev->name, np->tx_threshold * 16); in netdev_error()
1694 printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name); in netdev_error()
1697 dev->stats.rx_fifo_errors++; in netdev_error()
1698 dev->stats.rx_errors++; in netdev_error()
1701 dev->stats.tx_fifo_errors++; in netdev_error()
1702 dev->stats.tx_errors++; in netdev_error()
1706 dev->name, intr_status); in netdev_error()
1713 void __iomem *ioaddr = np->base; in get_stats()
1716 dev->stats.tx_bytes = readl(ioaddr + 0x57010); in get_stats()
1717 dev->stats.rx_bytes = readl(ioaddr + 0x57044); in get_stats()
1718 dev->stats.tx_packets = readl(ioaddr + 0x57000); in get_stats()
1719 dev->stats.tx_aborted_errors = in get_stats()
1721 dev->stats.tx_window_errors = readl(ioaddr + 0x57018); in get_stats()
1722 dev->stats.collisions = in get_stats()
1726 dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus); in get_stats()
1728 dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C); in get_stats()
1729 dev->stats.rx_frame_errors = readl(ioaddr + 0x57040); in get_stats()
1730 dev->stats.rx_length_errors = readl(ioaddr + 0x57058); in get_stats()
1731 dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C); in get_stats()
1733 return &dev->stats; in get_stats()
1741 void __iomem *filter_addr = np->base + HashTable + 8; in set_vlan_mode()
1744 for_each_set_bit(vid, np->active_vlans, VLAN_N_VID) { in set_vlan_mode()
1766 void __iomem *ioaddr = np->base; in set_rx_mode()
1775 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ in set_rx_mode()
1778 (dev->flags & IFF_ALLMULTI)) { in set_rx_mode()
1786 eaddrs = (__be16 *) ha->addr; in set_rx_mode()
1791 eaddrs = (const __be16 *)dev->dev_addr; in set_rx_mode()
1809 int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23; in set_rx_mode()
1816 eaddrs = (const __be16 *)dev->dev_addr; in set_rx_mode()
1832 return -EINVAL; in check_if_running()
1839 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); in get_drvinfo()
1840 strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); in get_drvinfo()
1847 spin_lock_irq(&np->lock); in get_link_ksettings()
1848 mii_ethtool_get_link_ksettings(&np->mii_if, cmd); in get_link_ksettings()
1849 spin_unlock_irq(&np->lock); in get_link_ksettings()
1858 spin_lock_irq(&np->lock); in set_link_ksettings()
1859 res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd); in set_link_ksettings()
1860 spin_unlock_irq(&np->lock); in set_link_ksettings()
1868 return mii_nway_restart(&np->mii_if); in nway_reset()
1874 return mii_link_ok(&np->mii_if); in get_link()
1905 return -EINVAL; in netdev_ioctl()
1907 spin_lock_irq(&np->lock); in netdev_ioctl()
1908 rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL); in netdev_ioctl()
1909 spin_unlock_irq(&np->lock); in netdev_ioctl()
1911 if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0])) in netdev_ioctl()
1920 void __iomem *ioaddr = np->base; in netdev_close()
1925 napi_disable(&np->napi); in netdev_close()
1929 dev->name, (int) readl(ioaddr + IntrStatus)); in netdev_close()
1931 dev->name, np->cur_tx, np->dirty_tx, in netdev_close()
1932 np->cur_rx, np->dirty_rx); in netdev_close()
1944 (long long) np->tx_ring_dma); in netdev_close()
1946 printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n", in netdev_close()
1947 i, le32_to_cpu(np->tx_ring[i].status), in netdev_close()
1948 (long long) dma_to_cpu(np->tx_ring[i].addr), in netdev_close()
1949 le32_to_cpu(np->tx_done_q[i].status)); in netdev_close()
1950 printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n", in netdev_close()
1951 (long long) np->rx_ring_dma, np->rx_done_q); in netdev_close()
1952 if (np->rx_done_q) in netdev_close()
1954 printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n", in netdev_close()
1955 i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status)); in netdev_close()
1959 free_irq(np->pci_dev->irq, dev); in netdev_close()
1963 np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */ in netdev_close()
1964 if (np->rx_info[i].skb != NULL) { in netdev_close()
1965 dma_unmap_single(&np->pci_dev->dev, in netdev_close()
1966 np->rx_info[i].mapping, in netdev_close()
1967 np->rx_buf_sz, DMA_FROM_DEVICE); in netdev_close()
1968 dev_kfree_skb(np->rx_info[i].skb); in netdev_close()
1970 np->rx_info[i].skb = NULL; in netdev_close()
1971 np->rx_info[i].mapping = 0; in netdev_close()
1974 struct sk_buff *skb = np->tx_info[i].skb; in netdev_close()
1977 dma_unmap_single(&np->pci_dev->dev, np->tx_info[i].mapping, in netdev_close()
1979 np->tx_info[i].mapping = 0; in netdev_close()
1981 np->tx_info[i].skb = NULL; in netdev_close()
2020 if (np->queue_mem) in starfire_remove_one()
2021 dma_free_coherent(&pdev->dev, np->queue_mem_size, in starfire_remove_one()
2022 np->queue_mem, np->queue_mem_dma); in starfire_remove_one()
2025 /* XXX: add wakeup code -- requires firmware for MagicPacket */ in starfire_remove_one()
2029 iounmap(np->base); in starfire_remove_one()