Lines Matching +full:wire +full:- +full:sized

1 // SPDX-License-Identifier: GPL-2.0
8 * o IOC3 ASIC specification 4.51, 1996-04-18
19 * o For the 2.5 branch kill the mii-tool ioctls.
22 #define IOC3_NAME "ioc3-eth"
23 #define IOC3_VERSION "2.6.3-4"
44 #include <linux/dma-mapping.h>
46 #include <linux/nvmem-consumer.h>
61 #define RX_RING_MASK (RX_RING_ENTRIES - 1)
66 #define TX_RING_MASK (TX_RING_ENTRIES - 1)
120 return (~addr + 1) & (IOC3_DMA_XFER_LEN - 1UL); in aligned_rx_skb_addr()
130 new_skb = alloc_skb(RX_BUF_SIZE + IOC3_DMA_XFER_LEN - 1, GFP_ATOMIC); in ioc3_alloc_skb()
132 return -ENOMEM; in ioc3_alloc_skb()
135 offset = aligned_rx_skb_addr((unsigned long)new_skb->data); in ioc3_alloc_skb()
139 d = dma_map_single(ip->dma_dev, new_skb->data, in ioc3_alloc_skb()
142 if (dma_mapping_error(ip->dma_dev, d)) { in ioc3_alloc_skb()
144 return -ENOMEM; in ioc3_alloc_skb()
147 *rxb = (struct ioc3_erxbuf *)new_skb->data; in ioc3_alloc_skb()
184 * now check for one wire family code 09, 89 and 91 in ioc3eth_nvmem_match()
186 if (memcmp(name + prefix_len, "09-", 3) == 0) in ioc3eth_nvmem_match()
188 if (memcmp(name + prefix_len, "89-", 3) == 0) in ioc3eth_nvmem_match()
190 if (memcmp(name + prefix_len, "91-", 3) == 0) in ioc3eth_nvmem_match()
204 snprintf(prefix, sizeof(prefix), "ioc3-%012llx-", in ioc3eth_get_mac_addr()
205 res->start & ~0xffff); in ioc3eth_get_mac_addr()
219 return -EINVAL; in ioc3eth_get_mac_addr()
222 mac_addr[i] = prom[10 - i]; in ioc3eth_get_mac_addr()
231 writel((dev->dev_addr[5] << 8) | in __ioc3_set_mac_address()
232 dev->dev_addr[4], in __ioc3_set_mac_address()
233 &ip->regs->emar_h); in __ioc3_set_mac_address()
234 writel((dev->dev_addr[3] << 24) | in __ioc3_set_mac_address()
235 (dev->dev_addr[2] << 16) | in __ioc3_set_mac_address()
236 (dev->dev_addr[1] << 8) | in __ioc3_set_mac_address()
237 dev->dev_addr[0], in __ioc3_set_mac_address()
238 &ip->regs->emar_l); in __ioc3_set_mac_address()
246 eth_hw_addr_set(dev, sa->sa_data); in ioc3_set_mac_address()
248 spin_lock_irq(&ip->ioc3_lock); in ioc3_set_mac_address()
250 spin_unlock_irq(&ip->ioc3_lock); in ioc3_set_mac_address()
261 struct ioc3_ethregs *regs = ip->regs; in ioc3_mdio_read()
263 while (readl(&regs->micr) & MICR_BUSY) in ioc3_mdio_read()
266 &regs->micr); in ioc3_mdio_read()
267 while (readl(&regs->micr) & MICR_BUSY) in ioc3_mdio_read()
270 return readl(&regs->midr_r) & MIDR_DATA_MASK; in ioc3_mdio_read()
276 struct ioc3_ethregs *regs = ip->regs; in ioc3_mdio_write()
278 while (readl(&regs->micr) & MICR_BUSY) in ioc3_mdio_write()
280 writel(data, &regs->midr_w); in ioc3_mdio_write()
281 writel((phy << MICR_PHYADDR_SHIFT) | reg, &regs->micr); in ioc3_mdio_write()
282 while (readl(&regs->micr) & MICR_BUSY) in ioc3_mdio_write()
291 struct ioc3_ethregs *regs = ip->regs; in ioc3_get_stats()
293 dev->stats.collisions += readl(&regs->etcdc) & ETCDC_COLLCNT_MASK; in ioc3_get_stats()
294 return &dev->stats; in ioc3_get_stats()
309 * - TCP and UDP checksums of IPv4 only. in ioc3_tcpudp_checksum()
310 * - IPv6 would be doable but we keep that for later ... in ioc3_tcpudp_checksum()
311 * - Only unfragmented packets. Did somebody already tell you in ioc3_tcpudp_checksum()
313 * - don't care about packet size. Worst case when processing a in ioc3_tcpudp_checksum()
319 if (eh->h_proto != htons(ETH_P_IP)) in ioc3_tcpudp_checksum()
326 proto = ih->protocol; in ioc3_tcpudp_checksum()
330 /* Same as tx - compute csum of pseudo header */ in ioc3_tcpudp_checksum()
332 (ih->tot_len - (ih->ihl << 2)) + in ioc3_tcpudp_checksum()
333 htons((u16)ih->protocol) + in ioc3_tcpudp_checksum()
334 (ih->saddr >> 16) + (ih->saddr & 0xffff) + in ioc3_tcpudp_checksum()
335 (ih->daddr >> 16) + (ih->daddr & 0xffff); in ioc3_tcpudp_checksum()
362 skb->ip_summed = CHECKSUM_UNNECESSARY; in ioc3_tcpudp_checksum()
375 rxr = ip->rxr; /* Ring base */ in ioc3_rx()
376 rx_entry = ip->rx_ci; /* RX consume index */ in ioc3_rx()
377 n_entry = ip->rx_pi; in ioc3_rx()
379 skb = ip->rx_skbs[rx_entry]; in ioc3_rx()
380 rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET); in ioc3_rx()
381 w0 = be32_to_cpu(rxb->w0); in ioc3_rx()
384 err = be32_to_cpu(rxb->err); /* It's valid ... */ in ioc3_rx()
386 len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4; in ioc3_rx()
388 skb->protocol = eth_type_trans(skb, dev); in ioc3_rx()
394 dev->stats.rx_dropped++; in ioc3_rx()
400 if (likely(dev->features & NETIF_F_RXCSUM)) in ioc3_rx()
405 dma_unmap_single(ip->dma_dev, rxr[rx_entry], in ioc3_rx()
410 ip->rx_skbs[rx_entry] = NULL; /* Poison */ in ioc3_rx()
412 dev->stats.rx_packets++; /* Statistics */ in ioc3_rx()
413 dev->stats.rx_bytes += len; in ioc3_rx()
421 dev->stats.rx_errors++; in ioc3_rx()
424 dev->stats.rx_crc_errors++; in ioc3_rx()
426 dev->stats.rx_frame_errors++; in ioc3_rx()
429 ip->rx_skbs[n_entry] = new_skb; in ioc3_rx()
431 rxb->w0 = 0; /* Clear valid flag */ in ioc3_rx()
436 skb = ip->rx_skbs[rx_entry]; in ioc3_rx()
437 rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET); in ioc3_rx()
438 w0 = be32_to_cpu(rxb->w0); in ioc3_rx()
440 writel((n_entry << 3) | ERPIR_ARM, &ip->regs->erpir); in ioc3_rx()
441 ip->rx_pi = n_entry; in ioc3_rx()
442 ip->rx_ci = rx_entry; in ioc3_rx()
448 struct ioc3_ethregs *regs = ip->regs; in ioc3_tx()
454 spin_lock(&ip->ioc3_lock); in ioc3_tx()
455 etcir = readl(&regs->etcir); in ioc3_tx()
458 o_entry = ip->tx_ci; in ioc3_tx()
464 skb = ip->tx_skbs[o_entry]; in ioc3_tx()
465 bytes += skb->len; in ioc3_tx()
467 ip->tx_skbs[o_entry] = NULL; in ioc3_tx()
471 etcir = readl(&regs->etcir); /* More pkts sent? */ in ioc3_tx()
475 dev->stats.tx_packets += packets; in ioc3_tx()
476 dev->stats.tx_bytes += bytes; in ioc3_tx()
477 ip->txqlen -= packets; in ioc3_tx()
479 if (netif_queue_stopped(dev) && ip->txqlen < TX_RING_ENTRIES) in ioc3_tx()
482 ip->tx_ci = o_entry; in ioc3_tx()
483 spin_unlock(&ip->ioc3_lock); in ioc3_tx()
496 spin_lock(&ip->ioc3_lock); in ioc3_error()
499 net_err_ratelimited("%s: RX overflow.\n", dev->name); in ioc3_error()
501 net_err_ratelimited("%s: RX buffer overflow.\n", dev->name); in ioc3_error()
503 net_err_ratelimited("%s: RX PCI error.\n", dev->name); in ioc3_error()
505 net_err_ratelimited("%s: RX SSRAM parity error.\n", dev->name); in ioc3_error()
507 net_err_ratelimited("%s: TX buffer underflow.\n", dev->name); in ioc3_error()
509 net_err_ratelimited("%s: TX PCI error.\n", dev->name); in ioc3_error()
518 spin_unlock(&ip->ioc3_lock); in ioc3_error()
526 spin_unlock(&ip->ioc3_lock); in ioc3_error()
535 struct ioc3_ethregs *regs = ip->regs; in ioc3_interrupt()
538 eisr = readl(&regs->eisr); in ioc3_interrupt()
539 writel(eisr, &regs->eisr); in ioc3_interrupt()
540 readl(&regs->eisr); /* Flush */ in ioc3_interrupt()
555 struct ioc3_ethregs *regs = ip->regs; in ioc3_setup_duplex()
557 spin_lock_irq(&ip->ioc3_lock); in ioc3_setup_duplex()
559 if (ip->mii.full_duplex) { in ioc3_setup_duplex()
560 writel(ETCSR_FD, &regs->etcsr); in ioc3_setup_duplex()
561 ip->emcr |= EMCR_DUPLEX; in ioc3_setup_duplex()
563 writel(ETCSR_HD, &regs->etcsr); in ioc3_setup_duplex()
564 ip->emcr &= ~EMCR_DUPLEX; in ioc3_setup_duplex()
566 writel(ip->emcr, &regs->emcr); in ioc3_setup_duplex()
568 spin_unlock_irq(&ip->ioc3_lock); in ioc3_setup_duplex()
576 mii_check_media(&ip->mii, 1, 0); in ioc3_timer()
579 ip->ioc3_timer.expires = jiffies + ((12 * HZ) / 10); /* 1.2s */ in ioc3_timer()
580 add_timer(&ip->ioc3_timer); in ioc3_timer()
593 word = ioc3_mdio_read(ip->mii.dev, i, MII_PHYSID1); in ioc3_mii_init()
596 ip->mii.phy_id = i; in ioc3_mii_init()
600 ip->mii.phy_id = -1; in ioc3_mii_init()
601 return -ENODEV; in ioc3_mii_init()
606 ip->ioc3_timer.expires = jiffies + (12 * HZ) / 10; /* 1.2 sec. */ in ioc3_mii_start()
607 add_timer(&ip->ioc3_timer); in ioc3_mii_start()
615 desc = &ip->txr[entry]; in ioc3_tx_unmap()
616 cmd = be32_to_cpu(desc->cmd); in ioc3_tx_unmap()
617 bufcnt = be32_to_cpu(desc->bufcnt); in ioc3_tx_unmap()
620 dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p1), in ioc3_tx_unmap()
625 dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p2), in ioc3_tx_unmap()
636 skb = ip->tx_skbs[i]; in ioc3_clean_tx_ring()
639 ip->tx_skbs[i] = NULL; in ioc3_clean_tx_ring()
642 ip->txr[i].cmd = 0; in ioc3_clean_tx_ring()
644 ip->tx_pi = 0; in ioc3_clean_tx_ring()
645 ip->tx_ci = 0; in ioc3_clean_tx_ring()
653 n_entry = ip->rx_ci; in ioc3_free_rx_bufs()
654 rx_entry = ip->rx_pi; in ioc3_free_rx_bufs()
657 skb = ip->rx_skbs[n_entry]; in ioc3_free_rx_bufs()
659 dma_unmap_single(ip->dma_dev, in ioc3_free_rx_bufs()
660 be64_to_cpu(ip->rxr[n_entry]), in ioc3_free_rx_bufs()
680 if (ioc3_alloc_skb(ip, &ip->rx_skbs[i], &rxb, &d)) in ioc3_alloc_rx_bufs()
681 return -ENOMEM; in ioc3_alloc_rx_bufs()
683 rxb->w0 = 0; /* Clear valid flag */ in ioc3_alloc_rx_bufs()
684 ip->rxr[i] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR)); in ioc3_alloc_rx_bufs()
686 ip->rx_ci = 0; in ioc3_alloc_rx_bufs()
687 ip->rx_pi = RX_BUFFS; in ioc3_alloc_rx_bufs()
694 struct ioc3_ethregs *regs = ip->regs; in ioc3_ssram_disc()
695 u32 *ssram0 = &ip->ssram[0x0000]; in ioc3_ssram_disc()
696 u32 *ssram1 = &ip->ssram[0x4000]; in ioc3_ssram_disc()
700 writel(readl(&regs->emcr) | (EMCR_BUFSIZ | EMCR_RAMPAR), &regs->emcr); in ioc3_ssram_disc()
701 readl(&regs->emcr); /* Flush */ in ioc3_ssram_disc()
709 ip->emcr |= EMCR_RAMPAR; in ioc3_ssram_disc()
710 writel(readl(&regs->emcr) & ~EMCR_BUFSIZ, &regs->emcr); in ioc3_ssram_disc()
712 ip->emcr |= EMCR_BUFSIZ | EMCR_RAMPAR; in ioc3_ssram_disc()
719 struct ioc3_ethregs *regs = ip->regs; in ioc3_init()
721 del_timer_sync(&ip->ioc3_timer); /* Kill if running */ in ioc3_init()
723 writel(EMCR_RST, &regs->emcr); /* Reset */ in ioc3_init()
724 readl(&regs->emcr); /* Flush WB */ in ioc3_init()
726 writel(0, &regs->emcr); in ioc3_init()
727 readl(&regs->emcr); in ioc3_init()
730 writel(ERBAR_VAL, &regs->erbar); in ioc3_init()
731 readl(&regs->etcdc); /* Clear on read */ in ioc3_init()
732 writel(15, &regs->ercsr); /* RX low watermark */ in ioc3_init()
733 writel(0, &regs->ertr); /* Interrupt immediately */ in ioc3_init()
735 writel(ip->ehar_h, &regs->ehar_h); in ioc3_init()
736 writel(ip->ehar_l, &regs->ehar_l); in ioc3_init()
737 writel(42, &regs->ersr); /* XXX should be random */ in ioc3_init()
742 struct ioc3_ethregs *regs = ip->regs; in ioc3_start()
746 ring = ioc3_map(ip->rxr_dma, PCI64_ATTR_PREC); in ioc3_start()
747 writel(ring >> 32, &regs->erbr_h); in ioc3_start()
748 writel(ring & 0xffffffff, &regs->erbr_l); in ioc3_start()
749 writel(ip->rx_ci << 3, &regs->ercir); in ioc3_start()
750 writel((ip->rx_pi << 3) | ERPIR_ARM, &regs->erpir); in ioc3_start()
752 ring = ioc3_map(ip->txr_dma, PCI64_ATTR_PREC); in ioc3_start()
754 ip->txqlen = 0; /* nothing queued */ in ioc3_start()
757 writel(ring >> 32, &regs->etbr_h); in ioc3_start()
758 writel(ring & 0xffffffff, &regs->etbr_l); in ioc3_start()
759 writel(ip->tx_pi << 7, &regs->etpir); in ioc3_start()
760 writel(ip->tx_ci << 7, &regs->etcir); in ioc3_start()
761 readl(&regs->etcir); /* Flush */ in ioc3_start()
763 ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN | in ioc3_start()
765 writel(ip->emcr, &regs->emcr); in ioc3_start()
768 EISR_TXEXPLICIT | EISR_TXMEMERR, &regs->eier); in ioc3_start()
769 readl(&regs->eier); in ioc3_start()
774 struct ioc3_ethregs *regs = ip->regs; in ioc3_stop()
776 writel(0, &regs->emcr); /* Shutup */ in ioc3_stop()
777 writel(0, &regs->eier); /* Disable interrupts */ in ioc3_stop()
778 readl(&regs->eier); /* Flush */ in ioc3_stop()
785 ip->ehar_h = 0; in ioc3_open()
786 ip->ehar_l = 0; in ioc3_open()
791 return -ENOMEM; in ioc3_open()
804 del_timer_sync(&ip->ioc3_timer); in ioc3_close()
839 dev_err(&pdev->dev, "Invalid resource\n"); in ioc3eth_probe()
840 return -EINVAL; in ioc3eth_probe()
842 /* get mac addr from one wire prom */ in ioc3eth_probe()
844 return -EPROBE_DEFER; /* not available yet */ in ioc3eth_probe()
848 return -ENOMEM; in ioc3eth_probe()
850 SET_NETDEV_DEV(dev, &pdev->dev); in ioc3eth_probe()
853 ip->dma_dev = pdev->dev.parent; in ioc3eth_probe()
854 ip->regs = devm_platform_ioremap_resource(pdev, 0); in ioc3eth_probe()
855 if (IS_ERR(ip->regs)) { in ioc3eth_probe()
856 err = PTR_ERR(ip->regs); in ioc3eth_probe()
860 ip->ssram = devm_platform_ioremap_resource(pdev, 1); in ioc3eth_probe()
861 if (IS_ERR(ip->ssram)) { in ioc3eth_probe()
862 err = PTR_ERR(ip->ssram); in ioc3eth_probe()
866 dev->irq = platform_get_irq(pdev, 0); in ioc3eth_probe()
867 if (dev->irq < 0) { in ioc3eth_probe()
868 err = dev->irq; in ioc3eth_probe()
872 if (devm_request_irq(&pdev->dev, dev->irq, ioc3_interrupt, in ioc3eth_probe()
873 IRQF_SHARED, "ioc3-eth", dev)) { in ioc3eth_probe()
874 dev_err(&pdev->dev, "Can't get irq %d\n", dev->irq); in ioc3eth_probe()
875 err = -ENODEV; in ioc3eth_probe()
879 spin_lock_init(&ip->ioc3_lock); in ioc3eth_probe()
880 timer_setup(&ip->ioc3_timer, ioc3_timer, 0); in ioc3eth_probe()
885 ip->rxr = dma_alloc_coherent(ip->dma_dev, RX_RING_SIZE, &ip->rxr_dma, in ioc3eth_probe()
887 if (!ip->rxr) { in ioc3eth_probe()
888 pr_err("ioc3-eth: rx ring allocation failed\n"); in ioc3eth_probe()
889 err = -ENOMEM; in ioc3eth_probe()
894 ip->tx_ring = dma_alloc_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, in ioc3eth_probe()
895 &ip->txr_dma, GFP_KERNEL); in ioc3eth_probe()
896 if (!ip->tx_ring) { in ioc3eth_probe()
897 pr_err("ioc3-eth: tx ring allocation failed\n"); in ioc3eth_probe()
898 err = -ENOMEM; in ioc3eth_probe()
902 ip->txr = PTR_ALIGN(ip->tx_ring, SZ_16K); in ioc3eth_probe()
903 ip->txr_dma = ALIGN(ip->txr_dma, SZ_16K); in ioc3eth_probe()
907 ip->mii.phy_id_mask = 0x1f; in ioc3eth_probe()
908 ip->mii.reg_num_mask = 0x1f; in ioc3eth_probe()
909 ip->mii.dev = dev; in ioc3eth_probe()
910 ip->mii.mdio_read = ioc3_mdio_read; in ioc3eth_probe()
911 ip->mii.mdio_write = ioc3_mdio_write; in ioc3eth_probe()
915 if (ip->mii.phy_id == -1) { in ioc3eth_probe()
917 err = -ENODEV; in ioc3eth_probe()
925 /* The IOC3-specific entries in the device structure. */ in ioc3eth_probe()
926 dev->watchdog_timeo = 5 * HZ; in ioc3eth_probe()
927 dev->netdev_ops = &ioc3_netdev_ops; in ioc3eth_probe()
928 dev->ethtool_ops = &ioc3_ethtool_ops; in ioc3eth_probe()
929 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM; in ioc3eth_probe()
930 dev->features = NETIF_F_IP_CSUM | NETIF_F_HIGHDMA; in ioc3eth_probe()
932 sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1); in ioc3eth_probe()
933 sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2); in ioc3eth_probe()
939 mii_check_media(&ip->mii, 1, 1); in ioc3eth_probe()
946 ip->mii.phy_id, vendor, model, rev); in ioc3eth_probe()
948 ip->emcr & EMCR_BUFSIZ ? 128 : 64); in ioc3eth_probe()
953 del_timer_sync(&ip->ioc3_timer); in ioc3eth_probe()
954 if (ip->rxr) in ioc3eth_probe()
955 dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, in ioc3eth_probe()
956 ip->rxr_dma); in ioc3eth_probe()
957 if (ip->tx_ring) in ioc3eth_probe()
958 dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, in ioc3eth_probe()
959 ip->txr_dma); in ioc3eth_probe()
970 dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma); in ioc3eth_remove()
971 dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, ip->txr_dma); in ioc3eth_remove()
974 del_timer_sync(&ip->ioc3_timer); in ioc3eth_remove()
995 if (skb->ip_summed == CHECKSUM_PARTIAL) { in ioc3_start_xmit()
997 const int proto = ntohs(ih->protocol); in ioc3_start_xmit()
1002 /* The MAC header. skb->mac seem the logic approach in ioc3_start_xmit()
1003 * to find the MAC header - except it's a NULL pointer ... in ioc3_start_xmit()
1005 eh = (u16 *)skb->data; in ioc3_start_xmit()
1013 csum = csum_tcpudp_nofold(ih->saddr, ih->daddr, in ioc3_start_xmit()
1014 ih->tot_len - (ih->ihl << 2), in ioc3_start_xmit()
1020 csoff = ETH_HLEN + (ih->ihl << 2); in ioc3_start_xmit()
1023 udp_hdr(skb)->check = csum; in ioc3_start_xmit()
1027 tcp_hdr(skb)->check = csum; in ioc3_start_xmit()
1033 spin_lock_irq(&ip->ioc3_lock); in ioc3_start_xmit()
1035 data = (unsigned long)skb->data; in ioc3_start_xmit()
1036 len = skb->len; in ioc3_start_xmit()
1038 produce = ip->tx_pi; in ioc3_start_xmit()
1039 desc = &ip->txr[produce]; in ioc3_start_xmit()
1043 skb_copy_from_linear_data(skb, desc->data, skb->len); in ioc3_start_xmit()
1046 memset(desc->data + len, 0, ETH_ZLEN - len); in ioc3_start_xmit()
1049 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V | w0); in ioc3_start_xmit()
1050 desc->bufcnt = cpu_to_be32(len); in ioc3_start_xmit()
1051 } else if ((data ^ (data + len - 1)) & 0x4000) { in ioc3_start_xmit()
1053 unsigned long s1 = b2 - data; in ioc3_start_xmit()
1054 unsigned long s2 = data + len - b2; in ioc3_start_xmit()
1057 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | in ioc3_start_xmit()
1059 desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) | in ioc3_start_xmit()
1061 d1 = dma_map_single(ip->dma_dev, skb->data, s1, DMA_TO_DEVICE); in ioc3_start_xmit()
1062 if (dma_mapping_error(ip->dma_dev, d1)) in ioc3_start_xmit()
1064 d2 = dma_map_single(ip->dma_dev, (void *)b2, s1, DMA_TO_DEVICE); in ioc3_start_xmit()
1065 if (dma_mapping_error(ip->dma_dev, d2)) { in ioc3_start_xmit()
1066 dma_unmap_single(ip->dma_dev, d1, len, DMA_TO_DEVICE); in ioc3_start_xmit()
1069 desc->p1 = cpu_to_be64(ioc3_map(d1, PCI64_ATTR_PREF)); in ioc3_start_xmit()
1070 desc->p2 = cpu_to_be64(ioc3_map(d2, PCI64_ATTR_PREF)); in ioc3_start_xmit()
1074 /* Normal sized packet that doesn't cross a page boundary. */ in ioc3_start_xmit()
1075 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0); in ioc3_start_xmit()
1076 desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT); in ioc3_start_xmit()
1077 d = dma_map_single(ip->dma_dev, skb->data, len, DMA_TO_DEVICE); in ioc3_start_xmit()
1078 if (dma_mapping_error(ip->dma_dev, d)) in ioc3_start_xmit()
1080 desc->p1 = cpu_to_be64(ioc3_map(d, PCI64_ATTR_PREF)); in ioc3_start_xmit()
1085 ip->tx_skbs[produce] = skb; /* Remember skb */ in ioc3_start_xmit()
1087 ip->tx_pi = produce; in ioc3_start_xmit()
1088 writel(produce << 7, &ip->regs->etpir); /* Fire ... */ in ioc3_start_xmit()
1090 ip->txqlen++; in ioc3_start_xmit()
1092 if (ip->txqlen >= (TX_RING_ENTRIES - 1)) in ioc3_start_xmit()
1095 spin_unlock_irq(&ip->ioc3_lock); in ioc3_start_xmit()
1101 dev->stats.tx_dropped++; in ioc3_start_xmit()
1103 spin_unlock_irq(&ip->ioc3_lock); in ioc3_start_xmit()
1114 spin_lock_irq(&ip->ioc3_lock); in ioc3_timeout()
1123 spin_unlock_irq(&ip->ioc3_lock); in ioc3_timeout()
1130 spin_unlock_irq(&ip->ioc3_lock); in ioc3_timeout()
1147 for (bits = 6; --bits >= 0; ) { in ioc3_hash()
1159 strscpy(info->driver, IOC3_NAME, sizeof(info->driver)); in ioc3_get_drvinfo()
1160 strscpy(info->version, IOC3_VERSION, sizeof(info->version)); in ioc3_get_drvinfo()
1161 strscpy(info->bus_info, pci_name(to_pci_dev(dev->dev.parent)), in ioc3_get_drvinfo()
1162 sizeof(info->bus_info)); in ioc3_get_drvinfo()
1170 spin_lock_irq(&ip->ioc3_lock); in ioc3_get_link_ksettings()
1171 mii_ethtool_get_link_ksettings(&ip->mii, cmd); in ioc3_get_link_ksettings()
1172 spin_unlock_irq(&ip->ioc3_lock); in ioc3_get_link_ksettings()
1183 spin_lock_irq(&ip->ioc3_lock); in ioc3_set_link_ksettings()
1184 rc = mii_ethtool_set_link_ksettings(&ip->mii, cmd); in ioc3_set_link_ksettings()
1185 spin_unlock_irq(&ip->ioc3_lock); in ioc3_set_link_ksettings()
1195 spin_lock_irq(&ip->ioc3_lock); in ioc3_nway_reset()
1196 rc = mii_nway_restart(&ip->mii); in ioc3_nway_reset()
1197 spin_unlock_irq(&ip->ioc3_lock); in ioc3_nway_reset()
1207 spin_lock_irq(&ip->ioc3_lock); in ioc3_get_link()
1208 rc = mii_link_ok(&ip->mii); in ioc3_get_link()
1209 spin_unlock_irq(&ip->ioc3_lock); in ioc3_get_link()
1227 spin_lock_irq(&ip->ioc3_lock); in ioc3_ioctl()
1228 rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL); in ioc3_ioctl()
1229 spin_unlock_irq(&ip->ioc3_lock); in ioc3_ioctl()
1237 struct ioc3_ethregs *regs = ip->regs; in ioc3_set_multicast_list()
1241 spin_lock_irq(&ip->ioc3_lock); in ioc3_set_multicast_list()
1243 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ in ioc3_set_multicast_list()
1244 ip->emcr |= EMCR_PROMISC; in ioc3_set_multicast_list()
1245 writel(ip->emcr, &regs->emcr); in ioc3_set_multicast_list()
1246 readl(&regs->emcr); in ioc3_set_multicast_list()
1248 ip->emcr &= ~EMCR_PROMISC; in ioc3_set_multicast_list()
1249 writel(ip->emcr, &regs->emcr); /* Clear promiscuous. */ in ioc3_set_multicast_list()
1250 readl(&regs->emcr); in ioc3_set_multicast_list()
1252 if ((dev->flags & IFF_ALLMULTI) || in ioc3_set_multicast_list()
1258 ip->ehar_h = 0xffffffff; in ioc3_set_multicast_list()
1259 ip->ehar_l = 0xffffffff; in ioc3_set_multicast_list()
1262 ehar |= (1UL << ioc3_hash(ha->addr)); in ioc3_set_multicast_list()
1264 ip->ehar_h = ehar >> 32; in ioc3_set_multicast_list()
1265 ip->ehar_l = ehar & 0xffffffff; in ioc3_set_multicast_list()
1267 writel(ip->ehar_h, &regs->ehar_h); in ioc3_set_multicast_list()
1268 writel(ip->ehar_l, &regs->ehar_l); in ioc3_set_multicast_list()
1271 spin_unlock_irq(&ip->ioc3_lock); in ioc3_set_multicast_list()
1278 .name = "ioc3-eth",
1284 MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");