18fb6b090SJeff Kirsher /* 28fb6b090SJeff Kirsher * Network device driver for the MACE ethernet controller on 38fb6b090SJeff Kirsher * Apple Powermacs. Assumes it's under a DBDMA controller. 48fb6b090SJeff Kirsher * 58fb6b090SJeff Kirsher * Copyright (C) 1996 Paul Mackerras. 68fb6b090SJeff Kirsher */ 78fb6b090SJeff Kirsher 88fb6b090SJeff Kirsher #include <linux/module.h> 98fb6b090SJeff Kirsher #include <linux/kernel.h> 108fb6b090SJeff Kirsher #include <linux/netdevice.h> 118fb6b090SJeff Kirsher #include <linux/etherdevice.h> 128fb6b090SJeff Kirsher #include <linux/delay.h> 138fb6b090SJeff Kirsher #include <linux/string.h> 148fb6b090SJeff Kirsher #include <linux/timer.h> 158fb6b090SJeff Kirsher #include <linux/init.h> 168fb6b090SJeff Kirsher #include <linux/interrupt.h> 178fb6b090SJeff Kirsher #include <linux/crc32.h> 188fb6b090SJeff Kirsher #include <linux/spinlock.h> 198fb6b090SJeff Kirsher #include <linux/bitrev.h> 208fb6b090SJeff Kirsher #include <linux/slab.h> 218fb6b090SJeff Kirsher #include <asm/prom.h> 228fb6b090SJeff Kirsher #include <asm/dbdma.h> 238fb6b090SJeff Kirsher #include <asm/io.h> 248fb6b090SJeff Kirsher #include <asm/pgtable.h> 258fb6b090SJeff Kirsher #include <asm/macio.h> 268fb6b090SJeff Kirsher 278fb6b090SJeff Kirsher #include "mace.h" 288fb6b090SJeff Kirsher 298fb6b090SJeff Kirsher static int port_aaui = -1; 308fb6b090SJeff Kirsher 318fb6b090SJeff Kirsher #define N_RX_RING 8 328fb6b090SJeff Kirsher #define N_TX_RING 6 338fb6b090SJeff Kirsher #define MAX_TX_ACTIVE 1 348fb6b090SJeff Kirsher #define NCMDS_TX 1 /* dma commands per element in tx ring */ 358fb6b090SJeff Kirsher #define RX_BUFLEN (ETH_FRAME_LEN + 8) 368fb6b090SJeff Kirsher #define TX_TIMEOUT HZ /* 1 second */ 378fb6b090SJeff Kirsher 388fb6b090SJeff Kirsher /* Chip rev needs workaround on HW & multicast addr change */ 398fb6b090SJeff Kirsher #define BROKEN_ADDRCHG_REV 0x0941 408fb6b090SJeff Kirsher 418fb6b090SJeff Kirsher /* Bits in transmit DMA status */ 428fb6b090SJeff Kirsher #define TX_DMA_ERR 0x80 438fb6b090SJeff Kirsher 448fb6b090SJeff Kirsher struct mace_data { 458fb6b090SJeff Kirsher volatile struct mace __iomem *mace; 468fb6b090SJeff Kirsher volatile struct dbdma_regs __iomem *tx_dma; 478fb6b090SJeff Kirsher int tx_dma_intr; 488fb6b090SJeff Kirsher volatile struct dbdma_regs __iomem *rx_dma; 498fb6b090SJeff Kirsher int rx_dma_intr; 508fb6b090SJeff Kirsher volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */ 518fb6b090SJeff Kirsher volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */ 528fb6b090SJeff Kirsher struct sk_buff *rx_bufs[N_RX_RING]; 538fb6b090SJeff Kirsher int rx_fill; 548fb6b090SJeff Kirsher int rx_empty; 558fb6b090SJeff Kirsher struct sk_buff *tx_bufs[N_TX_RING]; 568fb6b090SJeff Kirsher int tx_fill; 578fb6b090SJeff Kirsher int tx_empty; 588fb6b090SJeff Kirsher unsigned char maccc; 598fb6b090SJeff Kirsher unsigned char tx_fullup; 608fb6b090SJeff Kirsher unsigned char tx_active; 618fb6b090SJeff Kirsher unsigned char tx_bad_runt; 628fb6b090SJeff Kirsher struct timer_list tx_timeout; 638fb6b090SJeff Kirsher int timeout_active; 648fb6b090SJeff Kirsher int port_aaui; 658fb6b090SJeff Kirsher int chipid; 668fb6b090SJeff Kirsher struct macio_dev *mdev; 678fb6b090SJeff Kirsher spinlock_t lock; 688fb6b090SJeff Kirsher }; 698fb6b090SJeff Kirsher 708fb6b090SJeff Kirsher /* 718fb6b090SJeff Kirsher * Number of bytes of private data per MACE: allow enough for 728fb6b090SJeff Kirsher * the rx and tx dma commands plus a branch dma command each, 738fb6b090SJeff Kirsher * and another 16 bytes to allow us to align the dma command 748fb6b090SJeff Kirsher * buffers on a 16 byte boundary. 758fb6b090SJeff Kirsher */ 768fb6b090SJeff Kirsher #define PRIV_BYTES (sizeof(struct mace_data) \ 778fb6b090SJeff Kirsher + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd)) 788fb6b090SJeff Kirsher 798fb6b090SJeff Kirsher static int mace_open(struct net_device *dev); 808fb6b090SJeff Kirsher static int mace_close(struct net_device *dev); 818fb6b090SJeff Kirsher static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); 828fb6b090SJeff Kirsher static void mace_set_multicast(struct net_device *dev); 838fb6b090SJeff Kirsher static void mace_reset(struct net_device *dev); 848fb6b090SJeff Kirsher static int mace_set_address(struct net_device *dev, void *addr); 858fb6b090SJeff Kirsher static irqreturn_t mace_interrupt(int irq, void *dev_id); 868fb6b090SJeff Kirsher static irqreturn_t mace_txdma_intr(int irq, void *dev_id); 878fb6b090SJeff Kirsher static irqreturn_t mace_rxdma_intr(int irq, void *dev_id); 888fb6b090SJeff Kirsher static void mace_set_timeout(struct net_device *dev); 898fb6b090SJeff Kirsher static void mace_tx_timeout(unsigned long data); 908fb6b090SJeff Kirsher static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma); 918fb6b090SJeff Kirsher static inline void mace_clean_rings(struct mace_data *mp); 928fb6b090SJeff Kirsher static void __mace_set_address(struct net_device *dev, void *addr); 938fb6b090SJeff Kirsher 948fb6b090SJeff Kirsher /* 958fb6b090SJeff Kirsher * If we can't get a skbuff when we need it, we use this area for DMA. 968fb6b090SJeff Kirsher */ 978fb6b090SJeff Kirsher static unsigned char *dummy_buf; 988fb6b090SJeff Kirsher 998fb6b090SJeff Kirsher static const struct net_device_ops mace_netdev_ops = { 1008fb6b090SJeff Kirsher .ndo_open = mace_open, 1018fb6b090SJeff Kirsher .ndo_stop = mace_close, 1028fb6b090SJeff Kirsher .ndo_start_xmit = mace_xmit_start, 103afc4b13dSJiri Pirko .ndo_set_rx_mode = mace_set_multicast, 1048fb6b090SJeff Kirsher .ndo_set_mac_address = mace_set_address, 1058fb6b090SJeff Kirsher .ndo_change_mtu = eth_change_mtu, 1068fb6b090SJeff Kirsher .ndo_validate_addr = eth_validate_addr, 1078fb6b090SJeff Kirsher }; 1088fb6b090SJeff Kirsher 109*97c71ad4SBill Pemberton static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match) 1108fb6b090SJeff Kirsher { 1118fb6b090SJeff Kirsher struct device_node *mace = macio_get_of_node(mdev); 1128fb6b090SJeff Kirsher struct net_device *dev; 1138fb6b090SJeff Kirsher struct mace_data *mp; 1148fb6b090SJeff Kirsher const unsigned char *addr; 1158fb6b090SJeff Kirsher int j, rev, rc = -EBUSY; 1168fb6b090SJeff Kirsher 1178fb6b090SJeff Kirsher if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { 1188fb6b090SJeff Kirsher printk(KERN_ERR "can't use MACE %s: need 3 addrs and 3 irqs\n", 1198fb6b090SJeff Kirsher mace->full_name); 1208fb6b090SJeff Kirsher return -ENODEV; 1218fb6b090SJeff Kirsher } 1228fb6b090SJeff Kirsher 1238fb6b090SJeff Kirsher addr = of_get_property(mace, "mac-address", NULL); 1248fb6b090SJeff Kirsher if (addr == NULL) { 1258fb6b090SJeff Kirsher addr = of_get_property(mace, "local-mac-address", NULL); 1268fb6b090SJeff Kirsher if (addr == NULL) { 1278fb6b090SJeff Kirsher printk(KERN_ERR "Can't get mac-address for MACE %s\n", 1288fb6b090SJeff Kirsher mace->full_name); 1298fb6b090SJeff Kirsher return -ENODEV; 1308fb6b090SJeff Kirsher } 1318fb6b090SJeff Kirsher } 1328fb6b090SJeff Kirsher 1338fb6b090SJeff Kirsher /* 1348fb6b090SJeff Kirsher * lazy allocate the driver-wide dummy buffer. (Note that we 1358fb6b090SJeff Kirsher * never have more than one MACE in the system anyway) 1368fb6b090SJeff Kirsher */ 1378fb6b090SJeff Kirsher if (dummy_buf == NULL) { 1388fb6b090SJeff Kirsher dummy_buf = kmalloc(RX_BUFLEN+2, GFP_KERNEL); 139e404decbSJoe Perches if (dummy_buf == NULL) 1408fb6b090SJeff Kirsher return -ENOMEM; 1418fb6b090SJeff Kirsher } 1428fb6b090SJeff Kirsher 1438fb6b090SJeff Kirsher if (macio_request_resources(mdev, "mace")) { 1448fb6b090SJeff Kirsher printk(KERN_ERR "MACE: can't request IO resources !\n"); 1458fb6b090SJeff Kirsher return -EBUSY; 1468fb6b090SJeff Kirsher } 1478fb6b090SJeff Kirsher 1488fb6b090SJeff Kirsher dev = alloc_etherdev(PRIV_BYTES); 1498fb6b090SJeff Kirsher if (!dev) { 1508fb6b090SJeff Kirsher rc = -ENOMEM; 1518fb6b090SJeff Kirsher goto err_release; 1528fb6b090SJeff Kirsher } 1538fb6b090SJeff Kirsher SET_NETDEV_DEV(dev, &mdev->ofdev.dev); 1548fb6b090SJeff Kirsher 1558fb6b090SJeff Kirsher mp = netdev_priv(dev); 1568fb6b090SJeff Kirsher mp->mdev = mdev; 1578fb6b090SJeff Kirsher macio_set_drvdata(mdev, dev); 1588fb6b090SJeff Kirsher 1598fb6b090SJeff Kirsher dev->base_addr = macio_resource_start(mdev, 0); 1608fb6b090SJeff Kirsher mp->mace = ioremap(dev->base_addr, 0x1000); 1618fb6b090SJeff Kirsher if (mp->mace == NULL) { 1628fb6b090SJeff Kirsher printk(KERN_ERR "MACE: can't map IO resources !\n"); 1638fb6b090SJeff Kirsher rc = -ENOMEM; 1648fb6b090SJeff Kirsher goto err_free; 1658fb6b090SJeff Kirsher } 1668fb6b090SJeff Kirsher dev->irq = macio_irq(mdev, 0); 1678fb6b090SJeff Kirsher 1688fb6b090SJeff Kirsher rev = addr[0] == 0 && addr[1] == 0xA0; 1698fb6b090SJeff Kirsher for (j = 0; j < 6; ++j) { 1708fb6b090SJeff Kirsher dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j]; 1718fb6b090SJeff Kirsher } 1728fb6b090SJeff Kirsher mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) | 1738fb6b090SJeff Kirsher in_8(&mp->mace->chipid_lo); 1748fb6b090SJeff Kirsher 1758fb6b090SJeff Kirsher 1768fb6b090SJeff Kirsher mp = netdev_priv(dev); 1778fb6b090SJeff Kirsher mp->maccc = ENXMT | ENRCV; 1788fb6b090SJeff Kirsher 1798fb6b090SJeff Kirsher mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000); 1808fb6b090SJeff Kirsher if (mp->tx_dma == NULL) { 1818fb6b090SJeff Kirsher printk(KERN_ERR "MACE: can't map TX DMA resources !\n"); 1828fb6b090SJeff Kirsher rc = -ENOMEM; 1838fb6b090SJeff Kirsher goto err_unmap_io; 1848fb6b090SJeff Kirsher } 1858fb6b090SJeff Kirsher mp->tx_dma_intr = macio_irq(mdev, 1); 1868fb6b090SJeff Kirsher 1878fb6b090SJeff Kirsher mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000); 1888fb6b090SJeff Kirsher if (mp->rx_dma == NULL) { 1898fb6b090SJeff Kirsher printk(KERN_ERR "MACE: can't map RX DMA resources !\n"); 1908fb6b090SJeff Kirsher rc = -ENOMEM; 1918fb6b090SJeff Kirsher goto err_unmap_tx_dma; 1928fb6b090SJeff Kirsher } 1938fb6b090SJeff Kirsher mp->rx_dma_intr = macio_irq(mdev, 2); 1948fb6b090SJeff Kirsher 1958fb6b090SJeff Kirsher mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1); 1968fb6b090SJeff Kirsher mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1; 1978fb6b090SJeff Kirsher 1988fb6b090SJeff Kirsher memset((char *) mp->tx_cmds, 0, 1998fb6b090SJeff Kirsher (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd)); 2008fb6b090SJeff Kirsher init_timer(&mp->tx_timeout); 2018fb6b090SJeff Kirsher spin_lock_init(&mp->lock); 2028fb6b090SJeff Kirsher mp->timeout_active = 0; 2038fb6b090SJeff Kirsher 2048fb6b090SJeff Kirsher if (port_aaui >= 0) 2058fb6b090SJeff Kirsher mp->port_aaui = port_aaui; 2068fb6b090SJeff Kirsher else { 2078fb6b090SJeff Kirsher /* Apple Network Server uses the AAUI port */ 2088fb6b090SJeff Kirsher if (of_machine_is_compatible("AAPL,ShinerESB")) 2098fb6b090SJeff Kirsher mp->port_aaui = 1; 2108fb6b090SJeff Kirsher else { 2118fb6b090SJeff Kirsher #ifdef CONFIG_MACE_AAUI_PORT 2128fb6b090SJeff Kirsher mp->port_aaui = 1; 2138fb6b090SJeff Kirsher #else 2148fb6b090SJeff Kirsher mp->port_aaui = 0; 2158fb6b090SJeff Kirsher #endif 2168fb6b090SJeff Kirsher } 2178fb6b090SJeff Kirsher } 2188fb6b090SJeff Kirsher 2198fb6b090SJeff Kirsher dev->netdev_ops = &mace_netdev_ops; 2208fb6b090SJeff Kirsher 2218fb6b090SJeff Kirsher /* 2228fb6b090SJeff Kirsher * Most of what is below could be moved to mace_open() 2238fb6b090SJeff Kirsher */ 2248fb6b090SJeff Kirsher mace_reset(dev); 2258fb6b090SJeff Kirsher 2268fb6b090SJeff Kirsher rc = request_irq(dev->irq, mace_interrupt, 0, "MACE", dev); 2278fb6b090SJeff Kirsher if (rc) { 2288fb6b090SJeff Kirsher printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq); 2298fb6b090SJeff Kirsher goto err_unmap_rx_dma; 2308fb6b090SJeff Kirsher } 2318fb6b090SJeff Kirsher rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev); 2328fb6b090SJeff Kirsher if (rc) { 2338fb6b090SJeff Kirsher printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr); 2348fb6b090SJeff Kirsher goto err_free_irq; 2358fb6b090SJeff Kirsher } 2368fb6b090SJeff Kirsher rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev); 2378fb6b090SJeff Kirsher if (rc) { 2388fb6b090SJeff Kirsher printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr); 2398fb6b090SJeff Kirsher goto err_free_tx_irq; 2408fb6b090SJeff Kirsher } 2418fb6b090SJeff Kirsher 2428fb6b090SJeff Kirsher rc = register_netdev(dev); 2438fb6b090SJeff Kirsher if (rc) { 2448fb6b090SJeff Kirsher printk(KERN_ERR "MACE: Cannot register net device, aborting.\n"); 2458fb6b090SJeff Kirsher goto err_free_rx_irq; 2468fb6b090SJeff Kirsher } 2478fb6b090SJeff Kirsher 2488fb6b090SJeff Kirsher printk(KERN_INFO "%s: MACE at %pM, chip revision %d.%d\n", 2498fb6b090SJeff Kirsher dev->name, dev->dev_addr, 2508fb6b090SJeff Kirsher mp->chipid >> 8, mp->chipid & 0xff); 2518fb6b090SJeff Kirsher 2528fb6b090SJeff Kirsher return 0; 2538fb6b090SJeff Kirsher 2548fb6b090SJeff Kirsher err_free_rx_irq: 2558fb6b090SJeff Kirsher free_irq(macio_irq(mdev, 2), dev); 2568fb6b090SJeff Kirsher err_free_tx_irq: 2578fb6b090SJeff Kirsher free_irq(macio_irq(mdev, 1), dev); 2588fb6b090SJeff Kirsher err_free_irq: 2598fb6b090SJeff Kirsher free_irq(macio_irq(mdev, 0), dev); 2608fb6b090SJeff Kirsher err_unmap_rx_dma: 2618fb6b090SJeff Kirsher iounmap(mp->rx_dma); 2628fb6b090SJeff Kirsher err_unmap_tx_dma: 2638fb6b090SJeff Kirsher iounmap(mp->tx_dma); 2648fb6b090SJeff Kirsher err_unmap_io: 2658fb6b090SJeff Kirsher iounmap(mp->mace); 2668fb6b090SJeff Kirsher err_free: 2678fb6b090SJeff Kirsher free_netdev(dev); 2688fb6b090SJeff Kirsher err_release: 2698fb6b090SJeff Kirsher macio_release_resources(mdev); 2708fb6b090SJeff Kirsher 2718fb6b090SJeff Kirsher return rc; 2728fb6b090SJeff Kirsher } 2738fb6b090SJeff Kirsher 274*97c71ad4SBill Pemberton static int mace_remove(struct macio_dev *mdev) 2758fb6b090SJeff Kirsher { 2768fb6b090SJeff Kirsher struct net_device *dev = macio_get_drvdata(mdev); 2778fb6b090SJeff Kirsher struct mace_data *mp; 2788fb6b090SJeff Kirsher 2798fb6b090SJeff Kirsher BUG_ON(dev == NULL); 2808fb6b090SJeff Kirsher 2818fb6b090SJeff Kirsher macio_set_drvdata(mdev, NULL); 2828fb6b090SJeff Kirsher 2838fb6b090SJeff Kirsher mp = netdev_priv(dev); 2848fb6b090SJeff Kirsher 2858fb6b090SJeff Kirsher unregister_netdev(dev); 2868fb6b090SJeff Kirsher 2878fb6b090SJeff Kirsher free_irq(dev->irq, dev); 2888fb6b090SJeff Kirsher free_irq(mp->tx_dma_intr, dev); 2898fb6b090SJeff Kirsher free_irq(mp->rx_dma_intr, dev); 2908fb6b090SJeff Kirsher 2918fb6b090SJeff Kirsher iounmap(mp->rx_dma); 2928fb6b090SJeff Kirsher iounmap(mp->tx_dma); 2938fb6b090SJeff Kirsher iounmap(mp->mace); 2948fb6b090SJeff Kirsher 2958fb6b090SJeff Kirsher free_netdev(dev); 2968fb6b090SJeff Kirsher 2978fb6b090SJeff Kirsher macio_release_resources(mdev); 2988fb6b090SJeff Kirsher 2998fb6b090SJeff Kirsher return 0; 3008fb6b090SJeff Kirsher } 3018fb6b090SJeff Kirsher 3028fb6b090SJeff Kirsher static void dbdma_reset(volatile struct dbdma_regs __iomem *dma) 3038fb6b090SJeff Kirsher { 3048fb6b090SJeff Kirsher int i; 3058fb6b090SJeff Kirsher 3068fb6b090SJeff Kirsher out_le32(&dma->control, (WAKE|FLUSH|PAUSE|RUN) << 16); 3078fb6b090SJeff Kirsher 3088fb6b090SJeff Kirsher /* 3098fb6b090SJeff Kirsher * Yes this looks peculiar, but apparently it needs to be this 3108fb6b090SJeff Kirsher * way on some machines. 3118fb6b090SJeff Kirsher */ 3128fb6b090SJeff Kirsher for (i = 200; i > 0; --i) 3138fb6b090SJeff Kirsher if (ld_le32(&dma->control) & RUN) 3148fb6b090SJeff Kirsher udelay(1); 3158fb6b090SJeff Kirsher } 3168fb6b090SJeff Kirsher 3178fb6b090SJeff Kirsher static void mace_reset(struct net_device *dev) 3188fb6b090SJeff Kirsher { 3198fb6b090SJeff Kirsher struct mace_data *mp = netdev_priv(dev); 3208fb6b090SJeff Kirsher volatile struct mace __iomem *mb = mp->mace; 3218fb6b090SJeff Kirsher int i; 3228fb6b090SJeff Kirsher 3238fb6b090SJeff Kirsher /* soft-reset the chip */ 3248fb6b090SJeff Kirsher i = 200; 3258fb6b090SJeff Kirsher while (--i) { 3268fb6b090SJeff Kirsher out_8(&mb->biucc, SWRST); 3278fb6b090SJeff Kirsher if (in_8(&mb->biucc) & SWRST) { 3288fb6b090SJeff Kirsher udelay(10); 3298fb6b090SJeff Kirsher continue; 3308fb6b090SJeff Kirsher } 3318fb6b090SJeff Kirsher break; 3328fb6b090SJeff Kirsher } 3338fb6b090SJeff Kirsher if (!i) { 3348fb6b090SJeff Kirsher printk(KERN_ERR "mace: cannot reset chip!\n"); 3358fb6b090SJeff Kirsher return; 3368fb6b090SJeff Kirsher } 3378fb6b090SJeff Kirsher 3388fb6b090SJeff Kirsher out_8(&mb->imr, 0xff); /* disable all intrs for now */ 3398fb6b090SJeff Kirsher i = in_8(&mb->ir); 3408fb6b090SJeff Kirsher out_8(&mb->maccc, 0); /* turn off tx, rx */ 3418fb6b090SJeff Kirsher 3428fb6b090SJeff Kirsher out_8(&mb->biucc, XMTSP_64); 3438fb6b090SJeff Kirsher out_8(&mb->utr, RTRD); 3448fb6b090SJeff Kirsher out_8(&mb->fifocc, RCVFW_32 | XMTFW_16 | XMTFWU | RCVFWU | XMTBRST); 3458fb6b090SJeff Kirsher out_8(&mb->xmtfc, AUTO_PAD_XMIT); /* auto-pad short frames */ 3468fb6b090SJeff Kirsher out_8(&mb->rcvfc, 0); 3478fb6b090SJeff Kirsher 3488fb6b090SJeff Kirsher /* load up the hardware address */ 3498fb6b090SJeff Kirsher __mace_set_address(dev, dev->dev_addr); 3508fb6b090SJeff Kirsher 3518fb6b090SJeff Kirsher /* clear the multicast filter */ 3528fb6b090SJeff Kirsher if (mp->chipid == BROKEN_ADDRCHG_REV) 3538fb6b090SJeff Kirsher out_8(&mb->iac, LOGADDR); 3548fb6b090SJeff Kirsher else { 3558fb6b090SJeff Kirsher out_8(&mb->iac, ADDRCHG | LOGADDR); 3568fb6b090SJeff Kirsher while ((in_8(&mb->iac) & ADDRCHG) != 0) 3578fb6b090SJeff Kirsher ; 3588fb6b090SJeff Kirsher } 3598fb6b090SJeff Kirsher for (i = 0; i < 8; ++i) 3608fb6b090SJeff Kirsher out_8(&mb->ladrf, 0); 3618fb6b090SJeff Kirsher 3628fb6b090SJeff Kirsher /* done changing address */ 3638fb6b090SJeff Kirsher if (mp->chipid != BROKEN_ADDRCHG_REV) 3648fb6b090SJeff Kirsher out_8(&mb->iac, 0); 3658fb6b090SJeff Kirsher 3668fb6b090SJeff Kirsher if (mp->port_aaui) 3678fb6b090SJeff Kirsher out_8(&mb->plscc, PORTSEL_AUI + ENPLSIO); 3688fb6b090SJeff Kirsher else 3698fb6b090SJeff Kirsher out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO); 3708fb6b090SJeff Kirsher } 3718fb6b090SJeff Kirsher 3728fb6b090SJeff Kirsher static void __mace_set_address(struct net_device *dev, void *addr) 3738fb6b090SJeff Kirsher { 3748fb6b090SJeff Kirsher struct mace_data *mp = netdev_priv(dev); 3758fb6b090SJeff Kirsher volatile struct mace __iomem *mb = mp->mace; 3768fb6b090SJeff Kirsher unsigned char *p = addr; 3778fb6b090SJeff Kirsher int i; 3788fb6b090SJeff Kirsher 3798fb6b090SJeff Kirsher /* load up the hardware address */ 3808fb6b090SJeff Kirsher if (mp->chipid == BROKEN_ADDRCHG_REV) 3818fb6b090SJeff Kirsher out_8(&mb->iac, PHYADDR); 3828fb6b090SJeff Kirsher else { 3838fb6b090SJeff Kirsher out_8(&mb->iac, ADDRCHG | PHYADDR); 3848fb6b090SJeff Kirsher while ((in_8(&mb->iac) & ADDRCHG) != 0) 3858fb6b090SJeff Kirsher ; 3868fb6b090SJeff Kirsher } 3878fb6b090SJeff Kirsher for (i = 0; i < 6; ++i) 3888fb6b090SJeff Kirsher out_8(&mb->padr, dev->dev_addr[i] = p[i]); 3898fb6b090SJeff Kirsher if (mp->chipid != BROKEN_ADDRCHG_REV) 3908fb6b090SJeff Kirsher out_8(&mb->iac, 0); 3918fb6b090SJeff Kirsher } 3928fb6b090SJeff Kirsher 3938fb6b090SJeff Kirsher static int mace_set_address(struct net_device *dev, void *addr) 3948fb6b090SJeff Kirsher { 3958fb6b090SJeff Kirsher struct mace_data *mp = netdev_priv(dev); 3968fb6b090SJeff Kirsher volatile struct mace __iomem *mb = mp->mace; 3978fb6b090SJeff Kirsher unsigned long flags; 3988fb6b090SJeff Kirsher 3998fb6b090SJeff Kirsher spin_lock_irqsave(&mp->lock, flags); 4008fb6b090SJeff Kirsher 4018fb6b090SJeff Kirsher __mace_set_address(dev, addr); 4028fb6b090SJeff Kirsher 4038fb6b090SJeff Kirsher /* note: setting ADDRCHG clears ENRCV */ 4048fb6b090SJeff Kirsher out_8(&mb->maccc, mp->maccc); 4058fb6b090SJeff Kirsher 4068fb6b090SJeff Kirsher spin_unlock_irqrestore(&mp->lock, flags); 4078fb6b090SJeff Kirsher return 0; 4088fb6b090SJeff Kirsher } 4098fb6b090SJeff Kirsher 4108fb6b090SJeff Kirsher static inline void mace_clean_rings(struct mace_data *mp) 4118fb6b090SJeff Kirsher { 4128fb6b090SJeff Kirsher int i; 4138fb6b090SJeff Kirsher 4148fb6b090SJeff Kirsher /* free some skb's */ 4158fb6b090SJeff Kirsher for (i = 0; i < N_RX_RING; ++i) { 4168fb6b090SJeff Kirsher if (mp->rx_bufs[i] != NULL) { 4178fb6b090SJeff Kirsher dev_kfree_skb(mp->rx_bufs[i]); 4188fb6b090SJeff Kirsher mp->rx_bufs[i] = NULL; 4198fb6b090SJeff Kirsher } 4208fb6b090SJeff Kirsher } 4218fb6b090SJeff Kirsher for (i = mp->tx_empty; i != mp->tx_fill; ) { 4228fb6b090SJeff Kirsher dev_kfree_skb(mp->tx_bufs[i]); 4238fb6b090SJeff Kirsher if (++i >= N_TX_RING) 4248fb6b090SJeff Kirsher i = 0; 4258fb6b090SJeff Kirsher } 4268fb6b090SJeff Kirsher } 4278fb6b090SJeff Kirsher 4288fb6b090SJeff Kirsher static int mace_open(struct net_device *dev) 4298fb6b090SJeff Kirsher { 4308fb6b090SJeff Kirsher struct mace_data *mp = netdev_priv(dev); 4318fb6b090SJeff Kirsher volatile struct mace __iomem *mb = mp->mace; 4328fb6b090SJeff Kirsher volatile struct dbdma_regs __iomem *rd = mp->rx_dma; 4338fb6b090SJeff Kirsher volatile struct dbdma_regs __iomem *td = mp->tx_dma; 4348fb6b090SJeff Kirsher volatile struct dbdma_cmd *cp; 4358fb6b090SJeff Kirsher int i; 4368fb6b090SJeff Kirsher struct sk_buff *skb; 4378fb6b090SJeff Kirsher unsigned char *data; 4388fb6b090SJeff Kirsher 4398fb6b090SJeff Kirsher /* reset the chip */ 4408fb6b090SJeff Kirsher mace_reset(dev); 4418fb6b090SJeff Kirsher 4428fb6b090SJeff Kirsher /* initialize list of sk_buffs for receiving and set up recv dma */ 4438fb6b090SJeff Kirsher mace_clean_rings(mp); 4448fb6b090SJeff Kirsher memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd)); 4458fb6b090SJeff Kirsher cp = mp->rx_cmds; 4468fb6b090SJeff Kirsher for (i = 0; i < N_RX_RING - 1; ++i) { 4471d266430SPradeep A Dalvi skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); 4488fb6b090SJeff Kirsher if (!skb) { 4498fb6b090SJeff Kirsher data = dummy_buf; 4508fb6b090SJeff Kirsher } else { 4518fb6b090SJeff Kirsher skb_reserve(skb, 2); /* so IP header lands on 4-byte bdry */ 4528fb6b090SJeff Kirsher data = skb->data; 4538fb6b090SJeff Kirsher } 4548fb6b090SJeff Kirsher mp->rx_bufs[i] = skb; 4558fb6b090SJeff Kirsher st_le16(&cp->req_count, RX_BUFLEN); 4568fb6b090SJeff Kirsher st_le16(&cp->command, INPUT_LAST + INTR_ALWAYS); 4578fb6b090SJeff Kirsher st_le32(&cp->phy_addr, virt_to_bus(data)); 4588fb6b090SJeff Kirsher cp->xfer_status = 0; 4598fb6b090SJeff Kirsher ++cp; 4608fb6b090SJeff Kirsher } 4618fb6b090SJeff Kirsher mp->rx_bufs[i] = NULL; 4628fb6b090SJeff Kirsher st_le16(&cp->command, DBDMA_STOP); 4638fb6b090SJeff Kirsher mp->rx_fill = i; 4648fb6b090SJeff Kirsher mp->rx_empty = 0; 4658fb6b090SJeff Kirsher 4668fb6b090SJeff Kirsher /* Put a branch back to the beginning of the receive command list */ 4678fb6b090SJeff Kirsher ++cp; 4688fb6b090SJeff Kirsher st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS); 4698fb6b090SJeff Kirsher st_le32(&cp->cmd_dep, virt_to_bus(mp->rx_cmds)); 4708fb6b090SJeff Kirsher 4718fb6b090SJeff Kirsher /* start rx dma */ 4728fb6b090SJeff Kirsher out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ 4738fb6b090SJeff Kirsher out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds)); 4748fb6b090SJeff Kirsher out_le32(&rd->control, (RUN << 16) | RUN); 4758fb6b090SJeff Kirsher 4768fb6b090SJeff Kirsher /* put a branch at the end of the tx command list */ 4778fb6b090SJeff Kirsher cp = mp->tx_cmds + NCMDS_TX * N_TX_RING; 4788fb6b090SJeff Kirsher st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS); 4798fb6b090SJeff Kirsher st_le32(&cp->cmd_dep, virt_to_bus(mp->tx_cmds)); 4808fb6b090SJeff Kirsher 4818fb6b090SJeff Kirsher /* reset tx dma */ 4828fb6b090SJeff Kirsher out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); 4838fb6b090SJeff Kirsher out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds)); 4848fb6b090SJeff Kirsher mp->tx_fill = 0; 4858fb6b090SJeff Kirsher mp->tx_empty = 0; 4868fb6b090SJeff Kirsher mp->tx_fullup = 0; 4878fb6b090SJeff Kirsher mp->tx_active = 0; 4888fb6b090SJeff Kirsher mp->tx_bad_runt = 0; 4898fb6b090SJeff Kirsher 4908fb6b090SJeff Kirsher /* turn it on! */ 4918fb6b090SJeff Kirsher out_8(&mb->maccc, mp->maccc); 4928fb6b090SJeff Kirsher /* enable all interrupts except receive interrupts */ 4938fb6b090SJeff Kirsher out_8(&mb->imr, RCVINT); 4948fb6b090SJeff Kirsher 4958fb6b090SJeff Kirsher return 0; 4968fb6b090SJeff Kirsher } 4978fb6b090SJeff Kirsher 4988fb6b090SJeff Kirsher static int mace_close(struct net_device *dev) 4998fb6b090SJeff Kirsher { 5008fb6b090SJeff Kirsher struct mace_data *mp = netdev_priv(dev); 5018fb6b090SJeff Kirsher volatile struct mace __iomem *mb = mp->mace; 5028fb6b090SJeff Kirsher volatile struct dbdma_regs __iomem *rd = mp->rx_dma; 5038fb6b090SJeff Kirsher volatile struct dbdma_regs __iomem *td = mp->tx_dma; 5048fb6b090SJeff Kirsher 5058fb6b090SJeff Kirsher /* disable rx and tx */ 5068fb6b090SJeff Kirsher out_8(&mb->maccc, 0); 5078fb6b090SJeff Kirsher out_8(&mb->imr, 0xff); /* disable all intrs */ 5088fb6b090SJeff Kirsher 5098fb6b090SJeff Kirsher /* disable rx and tx dma */ 5108fb6b090SJeff Kirsher st_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ 5118fb6b090SJeff Kirsher st_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */ 5128fb6b090SJeff Kirsher 5138fb6b090SJeff Kirsher mace_clean_rings(mp); 5148fb6b090SJeff Kirsher 5158fb6b090SJeff Kirsher return 0; 5168fb6b090SJeff Kirsher } 5178fb6b090SJeff Kirsher 5188fb6b090SJeff Kirsher static inline void mace_set_timeout(struct net_device *dev) 5198fb6b090SJeff Kirsher { 5208fb6b090SJeff Kirsher struct mace_data *mp = netdev_priv(dev); 5218fb6b090SJeff Kirsher 5228fb6b090SJeff Kirsher if (mp->timeout_active) 5238fb6b090SJeff Kirsher del_timer(&mp->tx_timeout); 5248fb6b090SJeff Kirsher mp->tx_timeout.expires = jiffies + TX_TIMEOUT; 5258fb6b090SJeff Kirsher mp->tx_timeout.function = mace_tx_timeout; 5268fb6b090SJeff Kirsher mp->tx_timeout.data = (unsigned long) dev; 5278fb6b090SJeff Kirsher add_timer(&mp->tx_timeout); 5288fb6b090SJeff Kirsher mp->timeout_active = 1; 5298fb6b090SJeff Kirsher } 5308fb6b090SJeff Kirsher 5318fb6b090SJeff Kirsher static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) 5328fb6b090SJeff Kirsher { 5338fb6b090SJeff Kirsher struct mace_data *mp = netdev_priv(dev); 5348fb6b090SJeff Kirsher volatile struct dbdma_regs __iomem *td = mp->tx_dma; 5358fb6b090SJeff Kirsher volatile struct dbdma_cmd *cp, *np; 5368fb6b090SJeff Kirsher unsigned long flags; 5378fb6b090SJeff Kirsher int fill, next, len; 5388fb6b090SJeff Kirsher 5398fb6b090SJeff Kirsher /* see if there's a free slot in the tx ring */ 5408fb6b090SJeff Kirsher spin_lock_irqsave(&mp->lock, flags); 5418fb6b090SJeff Kirsher fill = mp->tx_fill; 5428fb6b090SJeff Kirsher next = fill + 1; 5438fb6b090SJeff Kirsher if (next >= N_TX_RING) 5448fb6b090SJeff Kirsher next = 0; 5458fb6b090SJeff Kirsher if (next == mp->tx_empty) { 5468fb6b090SJeff Kirsher netif_stop_queue(dev); 5478fb6b090SJeff Kirsher mp->tx_fullup = 1; 5488fb6b090SJeff Kirsher spin_unlock_irqrestore(&mp->lock, flags); 5498fb6b090SJeff Kirsher return NETDEV_TX_BUSY; /* can't take it at the moment */ 5508fb6b090SJeff Kirsher } 5518fb6b090SJeff Kirsher spin_unlock_irqrestore(&mp->lock, flags); 5528fb6b090SJeff Kirsher 5538fb6b090SJeff Kirsher /* partially fill in the dma command block */ 5548fb6b090SJeff Kirsher len = skb->len; 5558fb6b090SJeff Kirsher if (len > ETH_FRAME_LEN) { 5568fb6b090SJeff Kirsher printk(KERN_DEBUG "mace: xmit frame too long (%d)\n", len); 5578fb6b090SJeff Kirsher len = ETH_FRAME_LEN; 5588fb6b090SJeff Kirsher } 5598fb6b090SJeff Kirsher mp->tx_bufs[fill] = skb; 5608fb6b090SJeff Kirsher cp = mp->tx_cmds + NCMDS_TX * fill; 5618fb6b090SJeff Kirsher st_le16(&cp->req_count, len); 5628fb6b090SJeff Kirsher st_le32(&cp->phy_addr, virt_to_bus(skb->data)); 5638fb6b090SJeff Kirsher 5648fb6b090SJeff Kirsher np = mp->tx_cmds + NCMDS_TX * next; 5658fb6b090SJeff Kirsher out_le16(&np->command, DBDMA_STOP); 5668fb6b090SJeff Kirsher 5678fb6b090SJeff Kirsher /* poke the tx dma channel */ 5688fb6b090SJeff Kirsher spin_lock_irqsave(&mp->lock, flags); 5698fb6b090SJeff Kirsher mp->tx_fill = next; 5708fb6b090SJeff Kirsher if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) { 5718fb6b090SJeff Kirsher out_le16(&cp->xfer_status, 0); 5728fb6b090SJeff Kirsher out_le16(&cp->command, OUTPUT_LAST); 5738fb6b090SJeff Kirsher out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); 5748fb6b090SJeff Kirsher ++mp->tx_active; 5758fb6b090SJeff Kirsher mace_set_timeout(dev); 5768fb6b090SJeff Kirsher } 5778fb6b090SJeff Kirsher if (++next >= N_TX_RING) 5788fb6b090SJeff Kirsher next = 0; 5798fb6b090SJeff Kirsher if (next == mp->tx_empty) 5808fb6b090SJeff Kirsher netif_stop_queue(dev); 5818fb6b090SJeff Kirsher spin_unlock_irqrestore(&mp->lock, flags); 5828fb6b090SJeff Kirsher 5838fb6b090SJeff Kirsher return NETDEV_TX_OK; 5848fb6b090SJeff Kirsher } 5858fb6b090SJeff Kirsher 5868fb6b090SJeff Kirsher static void mace_set_multicast(struct net_device *dev) 5878fb6b090SJeff Kirsher { 5888fb6b090SJeff Kirsher struct mace_data *mp = netdev_priv(dev); 5898fb6b090SJeff Kirsher volatile struct mace __iomem *mb = mp->mace; 5908fb6b090SJeff Kirsher int i; 5918fb6b090SJeff Kirsher u32 crc; 5928fb6b090SJeff Kirsher unsigned long flags; 5938fb6b090SJeff Kirsher 5948fb6b090SJeff Kirsher spin_lock_irqsave(&mp->lock, flags); 5958fb6b090SJeff Kirsher mp->maccc &= ~PROM; 5968fb6b090SJeff Kirsher if (dev->flags & IFF_PROMISC) { 5978fb6b090SJeff Kirsher mp->maccc |= PROM; 5988fb6b090SJeff Kirsher } else { 5998fb6b090SJeff Kirsher unsigned char multicast_filter[8]; 6008fb6b090SJeff Kirsher struct netdev_hw_addr *ha; 6018fb6b090SJeff Kirsher 6028fb6b090SJeff Kirsher if (dev->flags & IFF_ALLMULTI) { 6038fb6b090SJeff Kirsher for (i = 0; i < 8; i++) 6048fb6b090SJeff Kirsher multicast_filter[i] = 0xff; 6058fb6b090SJeff Kirsher } else { 6068fb6b090SJeff Kirsher for (i = 0; i < 8; i++) 6078fb6b090SJeff Kirsher multicast_filter[i] = 0; 6088fb6b090SJeff Kirsher netdev_for_each_mc_addr(ha, dev) { 6098fb6b090SJeff Kirsher crc = ether_crc_le(6, ha->addr); 6108fb6b090SJeff Kirsher i = crc >> 26; /* bit number in multicast_filter */ 6118fb6b090SJeff Kirsher multicast_filter[i >> 3] |= 1 << (i & 7); 6128fb6b090SJeff Kirsher } 6138fb6b090SJeff Kirsher } 6148fb6b090SJeff Kirsher #if 0 6158fb6b090SJeff Kirsher printk("Multicast filter :"); 6168fb6b090SJeff Kirsher for (i = 0; i < 8; i++) 6178fb6b090SJeff Kirsher printk("%02x ", multicast_filter[i]); 6188fb6b090SJeff Kirsher printk("\n"); 6198fb6b090SJeff Kirsher #endif 6208fb6b090SJeff Kirsher 6218fb6b090SJeff Kirsher if (mp->chipid == BROKEN_ADDRCHG_REV) 6228fb6b090SJeff Kirsher out_8(&mb->iac, LOGADDR); 6238fb6b090SJeff Kirsher else { 6248fb6b090SJeff Kirsher out_8(&mb->iac, ADDRCHG | LOGADDR); 6258fb6b090SJeff Kirsher while ((in_8(&mb->iac) & ADDRCHG) != 0) 6268fb6b090SJeff Kirsher ; 6278fb6b090SJeff Kirsher } 6288fb6b090SJeff Kirsher for (i = 0; i < 8; ++i) 6298fb6b090SJeff Kirsher out_8(&mb->ladrf, multicast_filter[i]); 6308fb6b090SJeff Kirsher if (mp->chipid != BROKEN_ADDRCHG_REV) 6318fb6b090SJeff Kirsher out_8(&mb->iac, 0); 6328fb6b090SJeff Kirsher } 6338fb6b090SJeff Kirsher /* reset maccc */ 6348fb6b090SJeff Kirsher out_8(&mb->maccc, mp->maccc); 6358fb6b090SJeff Kirsher spin_unlock_irqrestore(&mp->lock, flags); 6368fb6b090SJeff Kirsher } 6378fb6b090SJeff Kirsher 6388fb6b090SJeff Kirsher static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev) 6398fb6b090SJeff Kirsher { 6408fb6b090SJeff Kirsher volatile struct mace __iomem *mb = mp->mace; 6418fb6b090SJeff Kirsher static int mace_babbles, mace_jabbers; 6428fb6b090SJeff Kirsher 6438fb6b090SJeff Kirsher if (intr & MPCO) 6448fb6b090SJeff Kirsher dev->stats.rx_missed_errors += 256; 6458fb6b090SJeff Kirsher dev->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */ 6468fb6b090SJeff Kirsher if (intr & RNTPCO) 6478fb6b090SJeff Kirsher dev->stats.rx_length_errors += 256; 6488fb6b090SJeff Kirsher dev->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */ 6498fb6b090SJeff Kirsher if (intr & CERR) 6508fb6b090SJeff Kirsher ++dev->stats.tx_heartbeat_errors; 6518fb6b090SJeff Kirsher if (intr & BABBLE) 6528fb6b090SJeff Kirsher if (mace_babbles++ < 4) 6538fb6b090SJeff Kirsher printk(KERN_DEBUG "mace: babbling transmitter\n"); 6548fb6b090SJeff Kirsher if (intr & JABBER) 6558fb6b090SJeff Kirsher if (mace_jabbers++ < 4) 6568fb6b090SJeff Kirsher printk(KERN_DEBUG "mace: jabbering transceiver\n"); 6578fb6b090SJeff Kirsher } 6588fb6b090SJeff Kirsher 6598fb6b090SJeff Kirsher static irqreturn_t mace_interrupt(int irq, void *dev_id) 6608fb6b090SJeff Kirsher { 6618fb6b090SJeff Kirsher struct net_device *dev = (struct net_device *) dev_id; 6628fb6b090SJeff Kirsher struct mace_data *mp = netdev_priv(dev); 6638fb6b090SJeff Kirsher volatile struct mace __iomem *mb = mp->mace; 6648fb6b090SJeff Kirsher volatile struct dbdma_regs __iomem *td = mp->tx_dma; 6658fb6b090SJeff Kirsher volatile struct dbdma_cmd *cp; 6668fb6b090SJeff Kirsher int intr, fs, i, stat, x; 6678fb6b090SJeff Kirsher int xcount, dstat; 6688fb6b090SJeff Kirsher unsigned long flags; 6698fb6b090SJeff Kirsher /* static int mace_last_fs, mace_last_xcount; */ 6708fb6b090SJeff Kirsher 6718fb6b090SJeff Kirsher spin_lock_irqsave(&mp->lock, flags); 6728fb6b090SJeff Kirsher intr = in_8(&mb->ir); /* read interrupt register */ 6738fb6b090SJeff Kirsher in_8(&mb->xmtrc); /* get retries */ 6748fb6b090SJeff Kirsher mace_handle_misc_intrs(mp, intr, dev); 6758fb6b090SJeff Kirsher 6768fb6b090SJeff Kirsher i = mp->tx_empty; 6778fb6b090SJeff Kirsher while (in_8(&mb->pr) & XMTSV) { 6788fb6b090SJeff Kirsher del_timer(&mp->tx_timeout); 6798fb6b090SJeff Kirsher mp->timeout_active = 0; 6808fb6b090SJeff Kirsher /* 6818fb6b090SJeff Kirsher * Clear any interrupt indication associated with this status 6828fb6b090SJeff Kirsher * word. This appears to unlatch any error indication from 6838fb6b090SJeff Kirsher * the DMA controller. 6848fb6b090SJeff Kirsher */ 6858fb6b090SJeff Kirsher intr = in_8(&mb->ir); 6868fb6b090SJeff Kirsher if (intr != 0) 6878fb6b090SJeff Kirsher mace_handle_misc_intrs(mp, intr, dev); 6888fb6b090SJeff Kirsher if (mp->tx_bad_runt) { 6898fb6b090SJeff Kirsher fs = in_8(&mb->xmtfs); 6908fb6b090SJeff Kirsher mp->tx_bad_runt = 0; 6918fb6b090SJeff Kirsher out_8(&mb->xmtfc, AUTO_PAD_XMIT); 6928fb6b090SJeff Kirsher continue; 6938fb6b090SJeff Kirsher } 6948fb6b090SJeff Kirsher dstat = ld_le32(&td->status); 6958fb6b090SJeff Kirsher /* stop DMA controller */ 6968fb6b090SJeff Kirsher out_le32(&td->control, RUN << 16); 6978fb6b090SJeff Kirsher /* 6988fb6b090SJeff Kirsher * xcount is the number of complete frames which have been 6998fb6b090SJeff Kirsher * written to the fifo but for which status has not been read. 7008fb6b090SJeff Kirsher */ 7018fb6b090SJeff Kirsher xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK; 7028fb6b090SJeff Kirsher if (xcount == 0 || (dstat & DEAD)) { 7038fb6b090SJeff Kirsher /* 7048fb6b090SJeff Kirsher * If a packet was aborted before the DMA controller has 7058fb6b090SJeff Kirsher * finished transferring it, it seems that there are 2 bytes 7068fb6b090SJeff Kirsher * which are stuck in some buffer somewhere. These will get 7078fb6b090SJeff Kirsher * transmitted as soon as we read the frame status (which 7088fb6b090SJeff Kirsher * reenables the transmit data transfer request). Turning 7098fb6b090SJeff Kirsher * off the DMA controller and/or resetting the MACE doesn't 7108fb6b090SJeff Kirsher * help. So we disable auto-padding and FCS transmission 7118fb6b090SJeff Kirsher * so the two bytes will only be a runt packet which should 7128fb6b090SJeff Kirsher * be ignored by other stations. 7138fb6b090SJeff Kirsher */ 7148fb6b090SJeff Kirsher out_8(&mb->xmtfc, DXMTFCS); 7158fb6b090SJeff Kirsher } 7168fb6b090SJeff Kirsher fs = in_8(&mb->xmtfs); 7178fb6b090SJeff Kirsher if ((fs & XMTSV) == 0) { 7188fb6b090SJeff Kirsher printk(KERN_ERR "mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n", 7198fb6b090SJeff Kirsher fs, xcount, dstat); 7208fb6b090SJeff Kirsher mace_reset(dev); 7218fb6b090SJeff Kirsher /* 7228fb6b090SJeff Kirsher * XXX mace likes to hang the machine after a xmtfs error. 7238fb6b090SJeff Kirsher * This is hard to reproduce, reseting *may* help 7248fb6b090SJeff Kirsher */ 7258fb6b090SJeff Kirsher } 7268fb6b090SJeff Kirsher cp = mp->tx_cmds + NCMDS_TX * i; 7278fb6b090SJeff Kirsher stat = ld_le16(&cp->xfer_status); 7288fb6b090SJeff Kirsher if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) { 7298fb6b090SJeff Kirsher /* 7308fb6b090SJeff Kirsher * Check whether there were in fact 2 bytes written to 7318fb6b090SJeff Kirsher * the transmit FIFO. 7328fb6b090SJeff Kirsher */ 7338fb6b090SJeff Kirsher udelay(1); 7348fb6b090SJeff Kirsher x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK; 7358fb6b090SJeff Kirsher if (x != 0) { 7368fb6b090SJeff Kirsher /* there were two bytes with an end-of-packet indication */ 7378fb6b090SJeff Kirsher mp->tx_bad_runt = 1; 7388fb6b090SJeff Kirsher mace_set_timeout(dev); 7398fb6b090SJeff Kirsher } else { 7408fb6b090SJeff Kirsher /* 7418fb6b090SJeff Kirsher * Either there weren't the two bytes buffered up, or they 7428fb6b090SJeff Kirsher * didn't have an end-of-packet indication. 7438fb6b090SJeff Kirsher * We flush the transmit FIFO just in case (by setting the 7448fb6b090SJeff Kirsher * XMTFWU bit with the transmitter disabled). 7458fb6b090SJeff Kirsher */ 7468fb6b090SJeff Kirsher out_8(&mb->maccc, in_8(&mb->maccc) & ~ENXMT); 7478fb6b090SJeff Kirsher out_8(&mb->fifocc, in_8(&mb->fifocc) | XMTFWU); 7488fb6b090SJeff Kirsher udelay(1); 7498fb6b090SJeff Kirsher out_8(&mb->maccc, in_8(&mb->maccc) | ENXMT); 7508fb6b090SJeff Kirsher out_8(&mb->xmtfc, AUTO_PAD_XMIT); 7518fb6b090SJeff Kirsher } 7528fb6b090SJeff Kirsher } 7538fb6b090SJeff Kirsher /* dma should have finished */ 7548fb6b090SJeff Kirsher if (i == mp->tx_fill) { 7558fb6b090SJeff Kirsher printk(KERN_DEBUG "mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n", 7568fb6b090SJeff Kirsher fs, xcount, dstat); 7578fb6b090SJeff Kirsher continue; 7588fb6b090SJeff Kirsher } 7598fb6b090SJeff Kirsher /* Update stats */ 7608fb6b090SJeff Kirsher if (fs & (UFLO|LCOL|LCAR|RTRY)) { 7618fb6b090SJeff Kirsher ++dev->stats.tx_errors; 7628fb6b090SJeff Kirsher if (fs & LCAR) 7638fb6b090SJeff Kirsher ++dev->stats.tx_carrier_errors; 7648fb6b090SJeff Kirsher if (fs & (UFLO|LCOL|RTRY)) 7658fb6b090SJeff Kirsher ++dev->stats.tx_aborted_errors; 7668fb6b090SJeff Kirsher } else { 7678fb6b090SJeff Kirsher dev->stats.tx_bytes += mp->tx_bufs[i]->len; 7688fb6b090SJeff Kirsher ++dev->stats.tx_packets; 7698fb6b090SJeff Kirsher } 7708fb6b090SJeff Kirsher dev_kfree_skb_irq(mp->tx_bufs[i]); 7718fb6b090SJeff Kirsher --mp->tx_active; 7728fb6b090SJeff Kirsher if (++i >= N_TX_RING) 7738fb6b090SJeff Kirsher i = 0; 7748fb6b090SJeff Kirsher #if 0 7758fb6b090SJeff Kirsher mace_last_fs = fs; 7768fb6b090SJeff Kirsher mace_last_xcount = xcount; 7778fb6b090SJeff Kirsher #endif 7788fb6b090SJeff Kirsher } 7798fb6b090SJeff Kirsher 7808fb6b090SJeff Kirsher if (i != mp->tx_empty) { 7818fb6b090SJeff Kirsher mp->tx_fullup = 0; 7828fb6b090SJeff Kirsher netif_wake_queue(dev); 7838fb6b090SJeff Kirsher } 7848fb6b090SJeff Kirsher mp->tx_empty = i; 7858fb6b090SJeff Kirsher i += mp->tx_active; 7868fb6b090SJeff Kirsher if (i >= N_TX_RING) 7878fb6b090SJeff Kirsher i -= N_TX_RING; 7888fb6b090SJeff Kirsher if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) { 7898fb6b090SJeff Kirsher do { 7908fb6b090SJeff Kirsher /* set up the next one */ 7918fb6b090SJeff Kirsher cp = mp->tx_cmds + NCMDS_TX * i; 7928fb6b090SJeff Kirsher out_le16(&cp->xfer_status, 0); 7938fb6b090SJeff Kirsher out_le16(&cp->command, OUTPUT_LAST); 7948fb6b090SJeff Kirsher ++mp->tx_active; 7958fb6b090SJeff Kirsher if (++i >= N_TX_RING) 7968fb6b090SJeff Kirsher i = 0; 7978fb6b090SJeff Kirsher } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE); 7988fb6b090SJeff Kirsher out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE)); 7998fb6b090SJeff Kirsher mace_set_timeout(dev); 8008fb6b090SJeff Kirsher } 8018fb6b090SJeff Kirsher spin_unlock_irqrestore(&mp->lock, flags); 8028fb6b090SJeff Kirsher return IRQ_HANDLED; 8038fb6b090SJeff Kirsher } 8048fb6b090SJeff Kirsher 8058fb6b090SJeff Kirsher static void mace_tx_timeout(unsigned long data) 8068fb6b090SJeff Kirsher { 8078fb6b090SJeff Kirsher struct net_device *dev = (struct net_device *) data; 8088fb6b090SJeff Kirsher struct mace_data *mp = netdev_priv(dev); 8098fb6b090SJeff Kirsher volatile struct mace __iomem *mb = mp->mace; 8108fb6b090SJeff Kirsher volatile struct dbdma_regs __iomem *td = mp->tx_dma; 8118fb6b090SJeff Kirsher volatile struct dbdma_regs __iomem *rd = mp->rx_dma; 8128fb6b090SJeff Kirsher volatile struct dbdma_cmd *cp; 8138fb6b090SJeff Kirsher unsigned long flags; 8148fb6b090SJeff Kirsher int i; 8158fb6b090SJeff Kirsher 8168fb6b090SJeff Kirsher spin_lock_irqsave(&mp->lock, flags); 8178fb6b090SJeff Kirsher mp->timeout_active = 0; 8188fb6b090SJeff Kirsher if (mp->tx_active == 0 && !mp->tx_bad_runt) 8198fb6b090SJeff Kirsher goto out; 8208fb6b090SJeff Kirsher 8218fb6b090SJeff Kirsher /* update various counters */ 8228fb6b090SJeff Kirsher mace_handle_misc_intrs(mp, in_8(&mb->ir), dev); 8238fb6b090SJeff Kirsher 8248fb6b090SJeff Kirsher cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty; 8258fb6b090SJeff Kirsher 8268fb6b090SJeff Kirsher /* turn off both tx and rx and reset the chip */ 8278fb6b090SJeff Kirsher out_8(&mb->maccc, 0); 8288fb6b090SJeff Kirsher printk(KERN_ERR "mace: transmit timeout - resetting\n"); 8298fb6b090SJeff Kirsher dbdma_reset(td); 8308fb6b090SJeff Kirsher mace_reset(dev); 8318fb6b090SJeff Kirsher 8328fb6b090SJeff Kirsher /* restart rx dma */ 8338fb6b090SJeff Kirsher cp = bus_to_virt(ld_le32(&rd->cmdptr)); 8348fb6b090SJeff Kirsher dbdma_reset(rd); 8358fb6b090SJeff Kirsher out_le16(&cp->xfer_status, 0); 8368fb6b090SJeff Kirsher out_le32(&rd->cmdptr, virt_to_bus(cp)); 8378fb6b090SJeff Kirsher out_le32(&rd->control, (RUN << 16) | RUN); 8388fb6b090SJeff Kirsher 8398fb6b090SJeff Kirsher /* fix up the transmit side */ 8408fb6b090SJeff Kirsher i = mp->tx_empty; 8418fb6b090SJeff Kirsher mp->tx_active = 0; 8428fb6b090SJeff Kirsher ++dev->stats.tx_errors; 8438fb6b090SJeff Kirsher if (mp->tx_bad_runt) { 8448fb6b090SJeff Kirsher mp->tx_bad_runt = 0; 8458fb6b090SJeff Kirsher } else if (i != mp->tx_fill) { 8468fb6b090SJeff Kirsher dev_kfree_skb(mp->tx_bufs[i]); 8478fb6b090SJeff Kirsher if (++i >= N_TX_RING) 8488fb6b090SJeff Kirsher i = 0; 8498fb6b090SJeff Kirsher mp->tx_empty = i; 8508fb6b090SJeff Kirsher } 8518fb6b090SJeff Kirsher mp->tx_fullup = 0; 8528fb6b090SJeff Kirsher netif_wake_queue(dev); 8538fb6b090SJeff Kirsher if (i != mp->tx_fill) { 8548fb6b090SJeff Kirsher cp = mp->tx_cmds + NCMDS_TX * i; 8558fb6b090SJeff Kirsher out_le16(&cp->xfer_status, 0); 8568fb6b090SJeff Kirsher out_le16(&cp->command, OUTPUT_LAST); 8578fb6b090SJeff Kirsher out_le32(&td->cmdptr, virt_to_bus(cp)); 8588fb6b090SJeff Kirsher out_le32(&td->control, (RUN << 16) | RUN); 8598fb6b090SJeff Kirsher ++mp->tx_active; 8608fb6b090SJeff Kirsher mace_set_timeout(dev); 8618fb6b090SJeff Kirsher } 8628fb6b090SJeff Kirsher 8638fb6b090SJeff Kirsher /* turn it back on */ 8648fb6b090SJeff Kirsher out_8(&mb->imr, RCVINT); 8658fb6b090SJeff Kirsher out_8(&mb->maccc, mp->maccc); 8668fb6b090SJeff Kirsher 8678fb6b090SJeff Kirsher out: 8688fb6b090SJeff Kirsher spin_unlock_irqrestore(&mp->lock, flags); 8698fb6b090SJeff Kirsher } 8708fb6b090SJeff Kirsher 8718fb6b090SJeff Kirsher static irqreturn_t mace_txdma_intr(int irq, void *dev_id) 8728fb6b090SJeff Kirsher { 8738fb6b090SJeff Kirsher return IRQ_HANDLED; 8748fb6b090SJeff Kirsher } 8758fb6b090SJeff Kirsher 8768fb6b090SJeff Kirsher static irqreturn_t mace_rxdma_intr(int irq, void *dev_id) 8778fb6b090SJeff Kirsher { 8788fb6b090SJeff Kirsher struct net_device *dev = (struct net_device *) dev_id; 8798fb6b090SJeff Kirsher struct mace_data *mp = netdev_priv(dev); 8808fb6b090SJeff Kirsher volatile struct dbdma_regs __iomem *rd = mp->rx_dma; 8818fb6b090SJeff Kirsher volatile struct dbdma_cmd *cp, *np; 8828fb6b090SJeff Kirsher int i, nb, stat, next; 8838fb6b090SJeff Kirsher struct sk_buff *skb; 8848fb6b090SJeff Kirsher unsigned frame_status; 8858fb6b090SJeff Kirsher static int mace_lost_status; 8868fb6b090SJeff Kirsher unsigned char *data; 8878fb6b090SJeff Kirsher unsigned long flags; 8888fb6b090SJeff Kirsher 8898fb6b090SJeff Kirsher spin_lock_irqsave(&mp->lock, flags); 8908fb6b090SJeff Kirsher for (i = mp->rx_empty; i != mp->rx_fill; ) { 8918fb6b090SJeff Kirsher cp = mp->rx_cmds + i; 8928fb6b090SJeff Kirsher stat = ld_le16(&cp->xfer_status); 8938fb6b090SJeff Kirsher if ((stat & ACTIVE) == 0) { 8948fb6b090SJeff Kirsher next = i + 1; 8958fb6b090SJeff Kirsher if (next >= N_RX_RING) 8968fb6b090SJeff Kirsher next = 0; 8978fb6b090SJeff Kirsher np = mp->rx_cmds + next; 8988fb6b090SJeff Kirsher if (next != mp->rx_fill && 8998fb6b090SJeff Kirsher (ld_le16(&np->xfer_status) & ACTIVE) != 0) { 9008fb6b090SJeff Kirsher printk(KERN_DEBUG "mace: lost a status word\n"); 9018fb6b090SJeff Kirsher ++mace_lost_status; 9028fb6b090SJeff Kirsher } else 9038fb6b090SJeff Kirsher break; 9048fb6b090SJeff Kirsher } 9058fb6b090SJeff Kirsher nb = ld_le16(&cp->req_count) - ld_le16(&cp->res_count); 9068fb6b090SJeff Kirsher out_le16(&cp->command, DBDMA_STOP); 9078fb6b090SJeff Kirsher /* got a packet, have a look at it */ 9088fb6b090SJeff Kirsher skb = mp->rx_bufs[i]; 9098fb6b090SJeff Kirsher if (!skb) { 9108fb6b090SJeff Kirsher ++dev->stats.rx_dropped; 9118fb6b090SJeff Kirsher } else if (nb > 8) { 9128fb6b090SJeff Kirsher data = skb->data; 9138fb6b090SJeff Kirsher frame_status = (data[nb-3] << 8) + data[nb-4]; 9148fb6b090SJeff Kirsher if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) { 9158fb6b090SJeff Kirsher ++dev->stats.rx_errors; 9168fb6b090SJeff Kirsher if (frame_status & RS_OFLO) 9178fb6b090SJeff Kirsher ++dev->stats.rx_over_errors; 9188fb6b090SJeff Kirsher if (frame_status & RS_FRAMERR) 9198fb6b090SJeff Kirsher ++dev->stats.rx_frame_errors; 9208fb6b090SJeff Kirsher if (frame_status & RS_FCSERR) 9218fb6b090SJeff Kirsher ++dev->stats.rx_crc_errors; 9228fb6b090SJeff Kirsher } else { 9238fb6b090SJeff Kirsher /* Mace feature AUTO_STRIP_RCV is on by default, dropping the 9248fb6b090SJeff Kirsher * FCS on frames with 802.3 headers. This means that Ethernet 9258fb6b090SJeff Kirsher * frames have 8 extra octets at the end, while 802.3 frames 9268fb6b090SJeff Kirsher * have only 4. We need to correctly account for this. */ 9278fb6b090SJeff Kirsher if (*(unsigned short *)(data+12) < 1536) /* 802.3 header */ 9288fb6b090SJeff Kirsher nb -= 4; 9298fb6b090SJeff Kirsher else /* Ethernet header; mace includes FCS */ 9308fb6b090SJeff Kirsher nb -= 8; 9318fb6b090SJeff Kirsher skb_put(skb, nb); 9328fb6b090SJeff Kirsher skb->protocol = eth_type_trans(skb, dev); 9338fb6b090SJeff Kirsher dev->stats.rx_bytes += skb->len; 9348fb6b090SJeff Kirsher netif_rx(skb); 9358fb6b090SJeff Kirsher mp->rx_bufs[i] = NULL; 9368fb6b090SJeff Kirsher ++dev->stats.rx_packets; 9378fb6b090SJeff Kirsher } 9388fb6b090SJeff Kirsher } else { 9398fb6b090SJeff Kirsher ++dev->stats.rx_errors; 9408fb6b090SJeff Kirsher ++dev->stats.rx_length_errors; 9418fb6b090SJeff Kirsher } 9428fb6b090SJeff Kirsher 9438fb6b090SJeff Kirsher /* advance to next */ 9448fb6b090SJeff Kirsher if (++i >= N_RX_RING) 9458fb6b090SJeff Kirsher i = 0; 9468fb6b090SJeff Kirsher } 9478fb6b090SJeff Kirsher mp->rx_empty = i; 9488fb6b090SJeff Kirsher 9498fb6b090SJeff Kirsher i = mp->rx_fill; 9508fb6b090SJeff Kirsher for (;;) { 9518fb6b090SJeff Kirsher next = i + 1; 9528fb6b090SJeff Kirsher if (next >= N_RX_RING) 9538fb6b090SJeff Kirsher next = 0; 9548fb6b090SJeff Kirsher if (next == mp->rx_empty) 9558fb6b090SJeff Kirsher break; 9568fb6b090SJeff Kirsher cp = mp->rx_cmds + i; 9578fb6b090SJeff Kirsher skb = mp->rx_bufs[i]; 9588fb6b090SJeff Kirsher if (!skb) { 95931a4c8b8SPradeep A. Dalvi skb = netdev_alloc_skb(dev, RX_BUFLEN + 2); 9608fb6b090SJeff Kirsher if (skb) { 9618fb6b090SJeff Kirsher skb_reserve(skb, 2); 9628fb6b090SJeff Kirsher mp->rx_bufs[i] = skb; 9638fb6b090SJeff Kirsher } 9648fb6b090SJeff Kirsher } 9658fb6b090SJeff Kirsher st_le16(&cp->req_count, RX_BUFLEN); 9668fb6b090SJeff Kirsher data = skb? skb->data: dummy_buf; 9678fb6b090SJeff Kirsher st_le32(&cp->phy_addr, virt_to_bus(data)); 9688fb6b090SJeff Kirsher out_le16(&cp->xfer_status, 0); 9698fb6b090SJeff Kirsher out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS); 9708fb6b090SJeff Kirsher #if 0 9718fb6b090SJeff Kirsher if ((ld_le32(&rd->status) & ACTIVE) != 0) { 9728fb6b090SJeff Kirsher out_le32(&rd->control, (PAUSE << 16) | PAUSE); 9738fb6b090SJeff Kirsher while ((in_le32(&rd->status) & ACTIVE) != 0) 9748fb6b090SJeff Kirsher ; 9758fb6b090SJeff Kirsher } 9768fb6b090SJeff Kirsher #endif 9778fb6b090SJeff Kirsher i = next; 9788fb6b090SJeff Kirsher } 9798fb6b090SJeff Kirsher if (i != mp->rx_fill) { 9808fb6b090SJeff Kirsher out_le32(&rd->control, ((RUN|WAKE) << 16) | (RUN|WAKE)); 9818fb6b090SJeff Kirsher mp->rx_fill = i; 9828fb6b090SJeff Kirsher } 9838fb6b090SJeff Kirsher spin_unlock_irqrestore(&mp->lock, flags); 9848fb6b090SJeff Kirsher return IRQ_HANDLED; 9858fb6b090SJeff Kirsher } 9868fb6b090SJeff Kirsher 9878fb6b090SJeff Kirsher static struct of_device_id mace_match[] = 9888fb6b090SJeff Kirsher { 9898fb6b090SJeff Kirsher { 9908fb6b090SJeff Kirsher .name = "mace", 9918fb6b090SJeff Kirsher }, 9928fb6b090SJeff Kirsher {}, 9938fb6b090SJeff Kirsher }; 9948fb6b090SJeff Kirsher MODULE_DEVICE_TABLE (of, mace_match); 9958fb6b090SJeff Kirsher 9968fb6b090SJeff Kirsher static struct macio_driver mace_driver = 9978fb6b090SJeff Kirsher { 9988fb6b090SJeff Kirsher .driver = { 9998fb6b090SJeff Kirsher .name = "mace", 10008fb6b090SJeff Kirsher .owner = THIS_MODULE, 10018fb6b090SJeff Kirsher .of_match_table = mace_match, 10028fb6b090SJeff Kirsher }, 10038fb6b090SJeff Kirsher .probe = mace_probe, 10048fb6b090SJeff Kirsher .remove = mace_remove, 10058fb6b090SJeff Kirsher }; 10068fb6b090SJeff Kirsher 10078fb6b090SJeff Kirsher 10088fb6b090SJeff Kirsher static int __init mace_init(void) 10098fb6b090SJeff Kirsher { 10108fb6b090SJeff Kirsher return macio_register_driver(&mace_driver); 10118fb6b090SJeff Kirsher } 10128fb6b090SJeff Kirsher 10138fb6b090SJeff Kirsher static void __exit mace_cleanup(void) 10148fb6b090SJeff Kirsher { 10158fb6b090SJeff Kirsher macio_unregister_driver(&mace_driver); 10168fb6b090SJeff Kirsher 10178fb6b090SJeff Kirsher kfree(dummy_buf); 10188fb6b090SJeff Kirsher dummy_buf = NULL; 10198fb6b090SJeff Kirsher } 10208fb6b090SJeff Kirsher 10218fb6b090SJeff Kirsher MODULE_AUTHOR("Paul Mackerras"); 10228fb6b090SJeff Kirsher MODULE_DESCRIPTION("PowerMac MACE driver."); 10238fb6b090SJeff Kirsher module_param(port_aaui, int, 0); 10248fb6b090SJeff Kirsher MODULE_PARM_DESC(port_aaui, "MACE uses AAUI port (0-1)"); 10258fb6b090SJeff Kirsher MODULE_LICENSE("GPL"); 10268fb6b090SJeff Kirsher 10278fb6b090SJeff Kirsher module_init(mace_init); 10288fb6b090SJeff Kirsher module_exit(mace_cleanup); 1029