111597885SJeff Kirsher /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as 211597885SJeff Kirsher munged into HPPA boxen . 311597885SJeff Kirsher 411597885SJeff Kirsher This driver is based upon 82596.c, original credits are below... 511597885SJeff Kirsher but there were too many hoops which HP wants jumped through to 611597885SJeff Kirsher keep this code in there in a sane manner. 711597885SJeff Kirsher 811597885SJeff Kirsher 3 primary sources of the mess -- 911597885SJeff Kirsher 1) hppa needs *lots* of cacheline flushing to keep this kind of 1011597885SJeff Kirsher MMIO running. 1111597885SJeff Kirsher 1211597885SJeff Kirsher 2) The 82596 needs to see all of its pointers as their physical 1311597885SJeff Kirsher address. Thus virt_to_bus/bus_to_virt are *everywhere*. 1411597885SJeff Kirsher 1511597885SJeff Kirsher 3) The implementation HP is using seems to be significantly pickier 1611597885SJeff Kirsher about when and how the command and RX units are started. some 1711597885SJeff Kirsher command ordering was changed. 1811597885SJeff Kirsher 1911597885SJeff Kirsher Examination of the mach driver leads one to believe that there 2011597885SJeff Kirsher might be a saner way to pull this off... anyone who feels like a 2111597885SJeff Kirsher full rewrite can be my guest. 2211597885SJeff Kirsher 2311597885SJeff Kirsher Split 02/13/2000 Sam Creasey (sammy@oh.verio.com) 2411597885SJeff Kirsher 2511597885SJeff Kirsher 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de) 2611597885SJeff Kirsher 03/02/2000 changes for better/correct(?) cache-flushing (deller) 2711597885SJeff Kirsher */ 2811597885SJeff Kirsher 2911597885SJeff Kirsher /* 82596.c: A generic 82596 ethernet driver for linux. */ 3011597885SJeff Kirsher /* 3111597885SJeff Kirsher Based on Apricot.c 3211597885SJeff Kirsher Written 1994 by Mark Evans. 3311597885SJeff Kirsher This driver is for the Apricot 82596 bus-master interface 3411597885SJeff Kirsher 3511597885SJeff Kirsher Modularised 12/94 Mark Evans 3611597885SJeff Kirsher 3711597885SJeff Kirsher 3811597885SJeff Kirsher Modified to support the 82596 ethernet chips on 680x0 VME boards. 3911597885SJeff Kirsher by Richard Hirst <richard@sleepie.demon.co.uk> 4011597885SJeff Kirsher Renamed to be 82596.c 4111597885SJeff Kirsher 4211597885SJeff Kirsher 980825: Changed to receive directly in to sk_buffs which are 4311597885SJeff Kirsher allocated at open() time. Eliminates copy on incoming frames 4411597885SJeff Kirsher (small ones are still copied). Shared data now held in a 4511597885SJeff Kirsher non-cached page, so we can run on 68060 in copyback mode. 4611597885SJeff Kirsher 4711597885SJeff Kirsher TBD: 4811597885SJeff Kirsher * look at deferring rx frames rather than discarding (as per tulip) 4911597885SJeff Kirsher * handle tx ring full as per tulip 5011597885SJeff Kirsher * performance test to tune rx_copybreak 5111597885SJeff Kirsher 5211597885SJeff Kirsher Most of my modifications relate to the braindead big-endian 5311597885SJeff Kirsher implementation by Intel. When the i596 is operating in 5411597885SJeff Kirsher 'big-endian' mode, it thinks a 32 bit value of 0x12345678 5511597885SJeff Kirsher should be stored as 0x56781234. This is a real pain, when 5611597885SJeff Kirsher you have linked lists which are shared by the 680x0 and the 5711597885SJeff Kirsher i596. 5811597885SJeff Kirsher 5911597885SJeff Kirsher Driver skeleton 6011597885SJeff Kirsher Written 1993 by Donald Becker. 6111597885SJeff Kirsher Copyright 1993 United States Government as represented by the Director, 6211597885SJeff Kirsher National Security Agency. This software may only be used and distributed 6311597885SJeff Kirsher according to the terms of the GNU General Public License as modified by SRC, 6411597885SJeff Kirsher incorporated herein by reference. 6511597885SJeff Kirsher 6611597885SJeff Kirsher The author may be reached as becker@scyld.com, or C/O 6711597885SJeff Kirsher Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 6811597885SJeff Kirsher 6911597885SJeff Kirsher */ 7011597885SJeff Kirsher 7111597885SJeff Kirsher #include <linux/module.h> 7211597885SJeff Kirsher #include <linux/kernel.h> 7311597885SJeff Kirsher #include <linux/string.h> 7411597885SJeff Kirsher #include <linux/errno.h> 7511597885SJeff Kirsher #include <linux/ioport.h> 7611597885SJeff Kirsher #include <linux/interrupt.h> 7711597885SJeff Kirsher #include <linux/delay.h> 7811597885SJeff Kirsher #include <linux/netdevice.h> 7911597885SJeff Kirsher #include <linux/etherdevice.h> 8011597885SJeff Kirsher #include <linux/skbuff.h> 8111597885SJeff Kirsher #include <linux/types.h> 8211597885SJeff Kirsher #include <linux/bitops.h> 8311597885SJeff Kirsher #include <linux/dma-mapping.h> 8411597885SJeff Kirsher #include <linux/io.h> 8511597885SJeff Kirsher #include <linux/irq.h> 8611597885SJeff Kirsher #include <linux/gfp.h> 8711597885SJeff Kirsher 8811597885SJeff Kirsher /* DEBUG flags 8911597885SJeff Kirsher */ 9011597885SJeff Kirsher 9111597885SJeff Kirsher #define DEB_INIT 0x0001 9211597885SJeff Kirsher #define DEB_PROBE 0x0002 9311597885SJeff Kirsher #define DEB_SERIOUS 0x0004 9411597885SJeff Kirsher #define DEB_ERRORS 0x0008 9511597885SJeff Kirsher #define DEB_MULTI 0x0010 9611597885SJeff Kirsher #define DEB_TDR 0x0020 9711597885SJeff Kirsher #define DEB_OPEN 0x0040 9811597885SJeff Kirsher #define DEB_RESET 0x0080 9911597885SJeff Kirsher #define DEB_ADDCMD 0x0100 10011597885SJeff Kirsher #define DEB_STATUS 0x0200 10111597885SJeff Kirsher #define DEB_STARTTX 0x0400 10211597885SJeff Kirsher #define DEB_RXADDR 0x0800 10311597885SJeff Kirsher #define DEB_TXADDR 0x1000 10411597885SJeff Kirsher #define DEB_RXFRAME 0x2000 10511597885SJeff Kirsher #define DEB_INTS 0x4000 10611597885SJeff Kirsher #define DEB_STRUCT 0x8000 10711597885SJeff Kirsher #define DEB_ANY 0xffff 10811597885SJeff Kirsher 10911597885SJeff Kirsher 11011597885SJeff Kirsher #define DEB(x, y) if (i596_debug & (x)) { y; } 11111597885SJeff Kirsher 11211597885SJeff Kirsher 11311597885SJeff Kirsher /* 11411597885SJeff Kirsher * The MPU_PORT command allows direct access to the 82596. With PORT access 11511597885SJeff Kirsher * the following commands are available (p5-18). The 32-bit port command 11611597885SJeff Kirsher * must be word-swapped with the most significant word written first. 11711597885SJeff Kirsher * This only applies to VME boards. 11811597885SJeff Kirsher */ 11911597885SJeff Kirsher #define PORT_RESET 0x00 /* reset 82596 */ 12011597885SJeff Kirsher #define PORT_SELFTEST 0x01 /* selftest */ 12111597885SJeff Kirsher #define PORT_ALTSCP 0x02 /* alternate SCB address */ 12211597885SJeff Kirsher #define PORT_ALTDUMP 0x03 /* Alternate DUMP address */ 12311597885SJeff Kirsher 12411597885SJeff Kirsher static int i596_debug = (DEB_SERIOUS|DEB_PROBE); 12511597885SJeff Kirsher 12611597885SJeff Kirsher /* Copy frames shorter than rx_copybreak, otherwise pass on up in 12711597885SJeff Kirsher * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha). 12811597885SJeff Kirsher */ 12911597885SJeff Kirsher static int rx_copybreak = 100; 13011597885SJeff Kirsher 13111597885SJeff Kirsher #define PKT_BUF_SZ 1536 13211597885SJeff Kirsher #define MAX_MC_CNT 64 13311597885SJeff Kirsher 13411597885SJeff Kirsher #define ISCP_BUSY 0x0001 13511597885SJeff Kirsher 13611597885SJeff Kirsher #define I596_NULL ((u32)0xffffffff) 13711597885SJeff Kirsher 13811597885SJeff Kirsher #define CMD_EOL 0x8000 /* The last command of the list, stop. */ 13911597885SJeff Kirsher #define CMD_SUSP 0x4000 /* Suspend after doing cmd. */ 14011597885SJeff Kirsher #define CMD_INTR 0x2000 /* Interrupt after doing cmd. */ 14111597885SJeff Kirsher 14211597885SJeff Kirsher #define CMD_FLEX 0x0008 /* Enable flexible memory model */ 14311597885SJeff Kirsher 14411597885SJeff Kirsher enum commands { 14511597885SJeff Kirsher CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3, 14611597885SJeff Kirsher CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7 14711597885SJeff Kirsher }; 14811597885SJeff Kirsher 14911597885SJeff Kirsher #define STAT_C 0x8000 /* Set to 0 after execution */ 15011597885SJeff Kirsher #define STAT_B 0x4000 /* Command being executed */ 15111597885SJeff Kirsher #define STAT_OK 0x2000 /* Command executed ok */ 15211597885SJeff Kirsher #define STAT_A 0x1000 /* Command aborted */ 15311597885SJeff Kirsher 15411597885SJeff Kirsher #define CUC_START 0x0100 15511597885SJeff Kirsher #define CUC_RESUME 0x0200 15611597885SJeff Kirsher #define CUC_SUSPEND 0x0300 15711597885SJeff Kirsher #define CUC_ABORT 0x0400 15811597885SJeff Kirsher #define RX_START 0x0010 15911597885SJeff Kirsher #define RX_RESUME 0x0020 16011597885SJeff Kirsher #define RX_SUSPEND 0x0030 16111597885SJeff Kirsher #define RX_ABORT 0x0040 16211597885SJeff Kirsher 16311597885SJeff Kirsher #define TX_TIMEOUT (HZ/20) 16411597885SJeff Kirsher 16511597885SJeff Kirsher 16611597885SJeff Kirsher struct i596_reg { 16711597885SJeff Kirsher unsigned short porthi; 16811597885SJeff Kirsher unsigned short portlo; 16911597885SJeff Kirsher u32 ca; 17011597885SJeff Kirsher }; 17111597885SJeff Kirsher 17211597885SJeff Kirsher #define EOF 0x8000 17311597885SJeff Kirsher #define SIZE_MASK 0x3fff 17411597885SJeff Kirsher 17511597885SJeff Kirsher struct i596_tbd { 17611597885SJeff Kirsher unsigned short size; 17711597885SJeff Kirsher unsigned short pad; 17811597885SJeff Kirsher u32 next; 17911597885SJeff Kirsher u32 data; 18011597885SJeff Kirsher u32 cache_pad[5]; /* Total 32 bytes... */ 18111597885SJeff Kirsher }; 18211597885SJeff Kirsher 18311597885SJeff Kirsher /* The command structure has two 'next' pointers; v_next is the address of 18411597885SJeff Kirsher * the next command as seen by the CPU, b_next is the address of the next 18511597885SJeff Kirsher * command as seen by the 82596. The b_next pointer, as used by the 82596 18611597885SJeff Kirsher * always references the status field of the next command, rather than the 18711597885SJeff Kirsher * v_next field, because the 82596 is unaware of v_next. It may seem more 18811597885SJeff Kirsher * logical to put v_next at the end of the structure, but we cannot do that 18911597885SJeff Kirsher * because the 82596 expects other fields to be there, depending on command 19011597885SJeff Kirsher * type. 19111597885SJeff Kirsher */ 19211597885SJeff Kirsher 19311597885SJeff Kirsher struct i596_cmd { 19411597885SJeff Kirsher struct i596_cmd *v_next; /* Address from CPUs viewpoint */ 19511597885SJeff Kirsher unsigned short status; 19611597885SJeff Kirsher unsigned short command; 19711597885SJeff Kirsher u32 b_next; /* Address from i596 viewpoint */ 19811597885SJeff Kirsher }; 19911597885SJeff Kirsher 20011597885SJeff Kirsher struct tx_cmd { 20111597885SJeff Kirsher struct i596_cmd cmd; 20211597885SJeff Kirsher u32 tbd; 20311597885SJeff Kirsher unsigned short size; 20411597885SJeff Kirsher unsigned short pad; 20511597885SJeff Kirsher struct sk_buff *skb; /* So we can free it after tx */ 20611597885SJeff Kirsher dma_addr_t dma_addr; 20711597885SJeff Kirsher #ifdef __LP64__ 20811597885SJeff Kirsher u32 cache_pad[6]; /* Total 64 bytes... */ 20911597885SJeff Kirsher #else 21011597885SJeff Kirsher u32 cache_pad[1]; /* Total 32 bytes... */ 21111597885SJeff Kirsher #endif 21211597885SJeff Kirsher }; 21311597885SJeff Kirsher 21411597885SJeff Kirsher struct tdr_cmd { 21511597885SJeff Kirsher struct i596_cmd cmd; 21611597885SJeff Kirsher unsigned short status; 21711597885SJeff Kirsher unsigned short pad; 21811597885SJeff Kirsher }; 21911597885SJeff Kirsher 22011597885SJeff Kirsher struct mc_cmd { 22111597885SJeff Kirsher struct i596_cmd cmd; 22211597885SJeff Kirsher short mc_cnt; 22311597885SJeff Kirsher char mc_addrs[MAX_MC_CNT*6]; 22411597885SJeff Kirsher }; 22511597885SJeff Kirsher 22611597885SJeff Kirsher struct sa_cmd { 22711597885SJeff Kirsher struct i596_cmd cmd; 22811597885SJeff Kirsher char eth_addr[8]; 22911597885SJeff Kirsher }; 23011597885SJeff Kirsher 23111597885SJeff Kirsher struct cf_cmd { 23211597885SJeff Kirsher struct i596_cmd cmd; 23311597885SJeff Kirsher char i596_config[16]; 23411597885SJeff Kirsher }; 23511597885SJeff Kirsher 23611597885SJeff Kirsher struct i596_rfd { 23711597885SJeff Kirsher unsigned short stat; 23811597885SJeff Kirsher unsigned short cmd; 23911597885SJeff Kirsher u32 b_next; /* Address from i596 viewpoint */ 24011597885SJeff Kirsher u32 rbd; 24111597885SJeff Kirsher unsigned short count; 24211597885SJeff Kirsher unsigned short size; 24311597885SJeff Kirsher struct i596_rfd *v_next; /* Address from CPUs viewpoint */ 24411597885SJeff Kirsher struct i596_rfd *v_prev; 24511597885SJeff Kirsher #ifndef __LP64__ 24611597885SJeff Kirsher u32 cache_pad[2]; /* Total 32 bytes... */ 24711597885SJeff Kirsher #endif 24811597885SJeff Kirsher }; 24911597885SJeff Kirsher 25011597885SJeff Kirsher struct i596_rbd { 25111597885SJeff Kirsher /* hardware data */ 25211597885SJeff Kirsher unsigned short count; 25311597885SJeff Kirsher unsigned short zero1; 25411597885SJeff Kirsher u32 b_next; 25511597885SJeff Kirsher u32 b_data; /* Address from i596 viewpoint */ 25611597885SJeff Kirsher unsigned short size; 25711597885SJeff Kirsher unsigned short zero2; 25811597885SJeff Kirsher /* driver data */ 25911597885SJeff Kirsher struct sk_buff *skb; 26011597885SJeff Kirsher struct i596_rbd *v_next; 26111597885SJeff Kirsher u32 b_addr; /* This rbd addr from i596 view */ 26211597885SJeff Kirsher unsigned char *v_data; /* Address from CPUs viewpoint */ 26311597885SJeff Kirsher /* Total 32 bytes... */ 26411597885SJeff Kirsher #ifdef __LP64__ 26511597885SJeff Kirsher u32 cache_pad[4]; 26611597885SJeff Kirsher #endif 26711597885SJeff Kirsher }; 26811597885SJeff Kirsher 26911597885SJeff Kirsher /* These values as chosen so struct i596_dma fits in one page... */ 27011597885SJeff Kirsher 27111597885SJeff Kirsher #define TX_RING_SIZE 32 27211597885SJeff Kirsher #define RX_RING_SIZE 16 27311597885SJeff Kirsher 27411597885SJeff Kirsher struct i596_scb { 27511597885SJeff Kirsher unsigned short status; 27611597885SJeff Kirsher unsigned short command; 27711597885SJeff Kirsher u32 cmd; 27811597885SJeff Kirsher u32 rfd; 27911597885SJeff Kirsher u32 crc_err; 28011597885SJeff Kirsher u32 align_err; 28111597885SJeff Kirsher u32 resource_err; 28211597885SJeff Kirsher u32 over_err; 28311597885SJeff Kirsher u32 rcvdt_err; 28411597885SJeff Kirsher u32 short_err; 28511597885SJeff Kirsher unsigned short t_on; 28611597885SJeff Kirsher unsigned short t_off; 28711597885SJeff Kirsher }; 28811597885SJeff Kirsher 28911597885SJeff Kirsher struct i596_iscp { 29011597885SJeff Kirsher u32 stat; 29111597885SJeff Kirsher u32 scb; 29211597885SJeff Kirsher }; 29311597885SJeff Kirsher 29411597885SJeff Kirsher struct i596_scp { 29511597885SJeff Kirsher u32 sysbus; 29611597885SJeff Kirsher u32 pad; 29711597885SJeff Kirsher u32 iscp; 29811597885SJeff Kirsher }; 29911597885SJeff Kirsher 30011597885SJeff Kirsher struct i596_dma { 30111597885SJeff Kirsher struct i596_scp scp __attribute__((aligned(32))); 30211597885SJeff Kirsher volatile struct i596_iscp iscp __attribute__((aligned(32))); 30311597885SJeff Kirsher volatile struct i596_scb scb __attribute__((aligned(32))); 30411597885SJeff Kirsher struct sa_cmd sa_cmd __attribute__((aligned(32))); 30511597885SJeff Kirsher struct cf_cmd cf_cmd __attribute__((aligned(32))); 30611597885SJeff Kirsher struct tdr_cmd tdr_cmd __attribute__((aligned(32))); 30711597885SJeff Kirsher struct mc_cmd mc_cmd __attribute__((aligned(32))); 30811597885SJeff Kirsher struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32))); 30911597885SJeff Kirsher struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32))); 31011597885SJeff Kirsher struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32))); 31111597885SJeff Kirsher struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32))); 31211597885SJeff Kirsher }; 31311597885SJeff Kirsher 31411597885SJeff Kirsher struct i596_private { 31511597885SJeff Kirsher struct i596_dma *dma; 31611597885SJeff Kirsher u32 stat; 31711597885SJeff Kirsher int last_restart; 31811597885SJeff Kirsher struct i596_rfd *rfd_head; 31911597885SJeff Kirsher struct i596_rbd *rbd_head; 32011597885SJeff Kirsher struct i596_cmd *cmd_tail; 32111597885SJeff Kirsher struct i596_cmd *cmd_head; 32211597885SJeff Kirsher int cmd_backlog; 32311597885SJeff Kirsher u32 last_cmd; 32411597885SJeff Kirsher int next_tx_cmd; 32511597885SJeff Kirsher int options; 32611597885SJeff Kirsher spinlock_t lock; /* serialize access to chip */ 32711597885SJeff Kirsher dma_addr_t dma_addr; 32811597885SJeff Kirsher void __iomem *mpu_port; 32911597885SJeff Kirsher void __iomem *ca; 33011597885SJeff Kirsher }; 33111597885SJeff Kirsher 33211597885SJeff Kirsher static const char init_setup[] = 33311597885SJeff Kirsher { 33411597885SJeff Kirsher 0x8E, /* length, prefetch on */ 33511597885SJeff Kirsher 0xC8, /* fifo to 8, monitor off */ 33611597885SJeff Kirsher 0x80, /* don't save bad frames */ 33711597885SJeff Kirsher 0x2E, /* No source address insertion, 8 byte preamble */ 33811597885SJeff Kirsher 0x00, /* priority and backoff defaults */ 33911597885SJeff Kirsher 0x60, /* interframe spacing */ 34011597885SJeff Kirsher 0x00, /* slot time LSB */ 34111597885SJeff Kirsher 0xf2, /* slot time and retries */ 34211597885SJeff Kirsher 0x00, /* promiscuous mode */ 34311597885SJeff Kirsher 0x00, /* collision detect */ 34411597885SJeff Kirsher 0x40, /* minimum frame length */ 34511597885SJeff Kirsher 0xff, 34611597885SJeff Kirsher 0x00, 34711597885SJeff Kirsher 0x7f /* *multi IA */ }; 34811597885SJeff Kirsher 34911597885SJeff Kirsher static int i596_open(struct net_device *dev); 350*648c361aSYueHaibing static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev); 35111597885SJeff Kirsher static irqreturn_t i596_interrupt(int irq, void *dev_id); 35211597885SJeff Kirsher static int i596_close(struct net_device *dev); 35311597885SJeff Kirsher static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); 35411597885SJeff Kirsher static void i596_tx_timeout (struct net_device *dev); 35511597885SJeff Kirsher static void print_eth(unsigned char *buf, char *str); 35611597885SJeff Kirsher static void set_multicast_list(struct net_device *dev); 35711597885SJeff Kirsher static inline void ca(struct net_device *dev); 35811597885SJeff Kirsher static void mpu_port(struct net_device *dev, int c, dma_addr_t x); 35911597885SJeff Kirsher 36011597885SJeff Kirsher static int rx_ring_size = RX_RING_SIZE; 36111597885SJeff Kirsher static int ticks_limit = 100; 36211597885SJeff Kirsher static int max_cmd_backlog = TX_RING_SIZE-1; 36311597885SJeff Kirsher 36411597885SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 36511597885SJeff Kirsher static void i596_poll_controller(struct net_device *dev); 36611597885SJeff Kirsher #endif 36711597885SJeff Kirsher 36811597885SJeff Kirsher 36911597885SJeff Kirsher static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str) 37011597885SJeff Kirsher { 37111597885SJeff Kirsher DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp)); 37211597885SJeff Kirsher while (--delcnt && dma->iscp.stat) { 37311597885SJeff Kirsher udelay(10); 37411597885SJeff Kirsher DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp)); 37511597885SJeff Kirsher } 37611597885SJeff Kirsher if (!delcnt) { 37711597885SJeff Kirsher printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n", 37811597885SJeff Kirsher dev->name, str, SWAP16(dma->iscp.stat)); 37911597885SJeff Kirsher return -1; 38011597885SJeff Kirsher } else 38111597885SJeff Kirsher return 0; 38211597885SJeff Kirsher } 38311597885SJeff Kirsher 38411597885SJeff Kirsher 38511597885SJeff Kirsher static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str) 38611597885SJeff Kirsher { 38711597885SJeff Kirsher DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb)); 38811597885SJeff Kirsher while (--delcnt && dma->scb.command) { 38911597885SJeff Kirsher udelay(10); 39011597885SJeff Kirsher DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb)); 39111597885SJeff Kirsher } 39211597885SJeff Kirsher if (!delcnt) { 39311597885SJeff Kirsher printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n", 39411597885SJeff Kirsher dev->name, str, 39511597885SJeff Kirsher SWAP16(dma->scb.status), 39611597885SJeff Kirsher SWAP16(dma->scb.command)); 39711597885SJeff Kirsher return -1; 39811597885SJeff Kirsher } else 39911597885SJeff Kirsher return 0; 40011597885SJeff Kirsher } 40111597885SJeff Kirsher 40211597885SJeff Kirsher 40311597885SJeff Kirsher static void i596_display_data(struct net_device *dev) 40411597885SJeff Kirsher { 40511597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev); 40611597885SJeff Kirsher struct i596_dma *dma = lp->dma; 40711597885SJeff Kirsher struct i596_cmd *cmd; 40811597885SJeff Kirsher struct i596_rfd *rfd; 40911597885SJeff Kirsher struct i596_rbd *rbd; 41011597885SJeff Kirsher 41111597885SJeff Kirsher printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n", 41211597885SJeff Kirsher &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp)); 41311597885SJeff Kirsher printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n", 41411597885SJeff Kirsher &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb)); 41511597885SJeff Kirsher printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x," 41611597885SJeff Kirsher " .cmd = %08x, .rfd = %08x\n", 41711597885SJeff Kirsher &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command), 41811597885SJeff Kirsher SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd)); 41911597885SJeff Kirsher printk(KERN_DEBUG " errors: crc %x, align %x, resource %x," 42011597885SJeff Kirsher " over %x, rcvdt %x, short %x\n", 42111597885SJeff Kirsher SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err), 42211597885SJeff Kirsher SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err), 42311597885SJeff Kirsher SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err)); 42411597885SJeff Kirsher cmd = lp->cmd_head; 42511597885SJeff Kirsher while (cmd != NULL) { 42611597885SJeff Kirsher printk(KERN_DEBUG 42711597885SJeff Kirsher "cmd at %p, .status = %04x, .command = %04x," 42811597885SJeff Kirsher " .b_next = %08x\n", 42911597885SJeff Kirsher cmd, SWAP16(cmd->status), SWAP16(cmd->command), 43011597885SJeff Kirsher SWAP32(cmd->b_next)); 43111597885SJeff Kirsher cmd = cmd->v_next; 43211597885SJeff Kirsher } 43311597885SJeff Kirsher rfd = lp->rfd_head; 43411597885SJeff Kirsher printk(KERN_DEBUG "rfd_head = %p\n", rfd); 43511597885SJeff Kirsher do { 43611597885SJeff Kirsher printk(KERN_DEBUG 43711597885SJeff Kirsher " %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x," 43811597885SJeff Kirsher " count %04x\n", 43911597885SJeff Kirsher rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd), 44011597885SJeff Kirsher SWAP32(rfd->b_next), SWAP32(rfd->rbd), 44111597885SJeff Kirsher SWAP16(rfd->count)); 44211597885SJeff Kirsher rfd = rfd->v_next; 44311597885SJeff Kirsher } while (rfd != lp->rfd_head); 44411597885SJeff Kirsher rbd = lp->rbd_head; 44511597885SJeff Kirsher printk(KERN_DEBUG "rbd_head = %p\n", rbd); 44611597885SJeff Kirsher do { 44711597885SJeff Kirsher printk(KERN_DEBUG 44811597885SJeff Kirsher " %p .count %04x, b_next %08x, b_data %08x," 44911597885SJeff Kirsher " size %04x\n", 45011597885SJeff Kirsher rbd, SWAP16(rbd->count), SWAP32(rbd->b_next), 45111597885SJeff Kirsher SWAP32(rbd->b_data), SWAP16(rbd->size)); 45211597885SJeff Kirsher rbd = rbd->v_next; 45311597885SJeff Kirsher } while (rbd != lp->rbd_head); 45411597885SJeff Kirsher DMA_INV(dev, dma, sizeof(struct i596_dma)); 45511597885SJeff Kirsher } 45611597885SJeff Kirsher 45711597885SJeff Kirsher 45811597885SJeff Kirsher #define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->dma))) 45911597885SJeff Kirsher 46011597885SJeff Kirsher static inline int init_rx_bufs(struct net_device *dev) 46111597885SJeff Kirsher { 46211597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev); 46311597885SJeff Kirsher struct i596_dma *dma = lp->dma; 46411597885SJeff Kirsher int i; 46511597885SJeff Kirsher struct i596_rfd *rfd; 46611597885SJeff Kirsher struct i596_rbd *rbd; 46711597885SJeff Kirsher 46811597885SJeff Kirsher /* First build the Receive Buffer Descriptor List */ 46911597885SJeff Kirsher 47011597885SJeff Kirsher for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) { 47111597885SJeff Kirsher dma_addr_t dma_addr; 47211597885SJeff Kirsher struct sk_buff *skb; 47311597885SJeff Kirsher 47411597885SJeff Kirsher skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); 47511597885SJeff Kirsher if (skb == NULL) 47611597885SJeff Kirsher return -1; 47711597885SJeff Kirsher dma_addr = dma_map_single(dev->dev.parent, skb->data, 47811597885SJeff Kirsher PKT_BUF_SZ, DMA_FROM_DEVICE); 47911597885SJeff Kirsher rbd->v_next = rbd+1; 48011597885SJeff Kirsher rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1)); 48111597885SJeff Kirsher rbd->b_addr = SWAP32(virt_to_dma(lp, rbd)); 48211597885SJeff Kirsher rbd->skb = skb; 48311597885SJeff Kirsher rbd->v_data = skb->data; 48411597885SJeff Kirsher rbd->b_data = SWAP32(dma_addr); 48511597885SJeff Kirsher rbd->size = SWAP16(PKT_BUF_SZ); 48611597885SJeff Kirsher } 48711597885SJeff Kirsher lp->rbd_head = dma->rbds; 48811597885SJeff Kirsher rbd = dma->rbds + rx_ring_size - 1; 48911597885SJeff Kirsher rbd->v_next = dma->rbds; 49011597885SJeff Kirsher rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds)); 49111597885SJeff Kirsher 49211597885SJeff Kirsher /* Now build the Receive Frame Descriptor List */ 49311597885SJeff Kirsher 49411597885SJeff Kirsher for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) { 49511597885SJeff Kirsher rfd->rbd = I596_NULL; 49611597885SJeff Kirsher rfd->v_next = rfd+1; 49711597885SJeff Kirsher rfd->v_prev = rfd-1; 49811597885SJeff Kirsher rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1)); 49911597885SJeff Kirsher rfd->cmd = SWAP16(CMD_FLEX); 50011597885SJeff Kirsher } 50111597885SJeff Kirsher lp->rfd_head = dma->rfds; 50211597885SJeff Kirsher dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); 50311597885SJeff Kirsher rfd = dma->rfds; 50411597885SJeff Kirsher rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head)); 50511597885SJeff Kirsher rfd->v_prev = dma->rfds + rx_ring_size - 1; 50611597885SJeff Kirsher rfd = dma->rfds + rx_ring_size - 1; 50711597885SJeff Kirsher rfd->v_next = dma->rfds; 50811597885SJeff Kirsher rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds)); 50911597885SJeff Kirsher rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX); 51011597885SJeff Kirsher 51111597885SJeff Kirsher DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma)); 51211597885SJeff Kirsher return 0; 51311597885SJeff Kirsher } 51411597885SJeff Kirsher 51511597885SJeff Kirsher static inline void remove_rx_bufs(struct net_device *dev) 51611597885SJeff Kirsher { 51711597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev); 51811597885SJeff Kirsher struct i596_rbd *rbd; 51911597885SJeff Kirsher int i; 52011597885SJeff Kirsher 52111597885SJeff Kirsher for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) { 52211597885SJeff Kirsher if (rbd->skb == NULL) 52311597885SJeff Kirsher break; 52411597885SJeff Kirsher dma_unmap_single(dev->dev.parent, 52511597885SJeff Kirsher (dma_addr_t)SWAP32(rbd->b_data), 52611597885SJeff Kirsher PKT_BUF_SZ, DMA_FROM_DEVICE); 52711597885SJeff Kirsher dev_kfree_skb(rbd->skb); 52811597885SJeff Kirsher } 52911597885SJeff Kirsher } 53011597885SJeff Kirsher 53111597885SJeff Kirsher 53211597885SJeff Kirsher static void rebuild_rx_bufs(struct net_device *dev) 53311597885SJeff Kirsher { 53411597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev); 53511597885SJeff Kirsher struct i596_dma *dma = lp->dma; 53611597885SJeff Kirsher int i; 53711597885SJeff Kirsher 53811597885SJeff Kirsher /* Ensure rx frame/buffer descriptors are tidy */ 53911597885SJeff Kirsher 54011597885SJeff Kirsher for (i = 0; i < rx_ring_size; i++) { 54111597885SJeff Kirsher dma->rfds[i].rbd = I596_NULL; 54211597885SJeff Kirsher dma->rfds[i].cmd = SWAP16(CMD_FLEX); 54311597885SJeff Kirsher } 54411597885SJeff Kirsher dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX); 54511597885SJeff Kirsher lp->rfd_head = dma->rfds; 54611597885SJeff Kirsher dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); 54711597885SJeff Kirsher lp->rbd_head = dma->rbds; 54811597885SJeff Kirsher dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds)); 54911597885SJeff Kirsher 55011597885SJeff Kirsher DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma)); 55111597885SJeff Kirsher } 55211597885SJeff Kirsher 55311597885SJeff Kirsher 55411597885SJeff Kirsher static int init_i596_mem(struct net_device *dev) 55511597885SJeff Kirsher { 55611597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev); 55711597885SJeff Kirsher struct i596_dma *dma = lp->dma; 55811597885SJeff Kirsher unsigned long flags; 55911597885SJeff Kirsher 56011597885SJeff Kirsher mpu_port(dev, PORT_RESET, 0); 56111597885SJeff Kirsher udelay(100); /* Wait 100us - seems to help */ 56211597885SJeff Kirsher 56311597885SJeff Kirsher /* change the scp address */ 56411597885SJeff Kirsher 56511597885SJeff Kirsher lp->last_cmd = jiffies; 56611597885SJeff Kirsher 56711597885SJeff Kirsher dma->scp.sysbus = SYSBUS; 56811597885SJeff Kirsher dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp))); 56911597885SJeff Kirsher dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb))); 57011597885SJeff Kirsher dma->iscp.stat = SWAP32(ISCP_BUSY); 57111597885SJeff Kirsher lp->cmd_backlog = 0; 57211597885SJeff Kirsher 57311597885SJeff Kirsher lp->cmd_head = NULL; 57411597885SJeff Kirsher dma->scb.cmd = I596_NULL; 57511597885SJeff Kirsher 57611597885SJeff Kirsher DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name)); 57711597885SJeff Kirsher 57811597885SJeff Kirsher DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp)); 57911597885SJeff Kirsher DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp)); 58011597885SJeff Kirsher DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); 58111597885SJeff Kirsher 58211597885SJeff Kirsher mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp)); 58311597885SJeff Kirsher ca(dev); 58411597885SJeff Kirsher if (wait_istat(dev, dma, 1000, "initialization timed out")) 58511597885SJeff Kirsher goto failed; 58611597885SJeff Kirsher DEB(DEB_INIT, printk(KERN_DEBUG 58711597885SJeff Kirsher "%s: i82596 initialization successful\n", 58811597885SJeff Kirsher dev->name)); 58911597885SJeff Kirsher 59011597885SJeff Kirsher if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) { 59111597885SJeff Kirsher printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); 59211597885SJeff Kirsher goto failed; 59311597885SJeff Kirsher } 59411597885SJeff Kirsher 59511597885SJeff Kirsher /* Ensure rx frame/buffer descriptors are tidy */ 59611597885SJeff Kirsher rebuild_rx_bufs(dev); 59711597885SJeff Kirsher 59811597885SJeff Kirsher dma->scb.command = 0; 59911597885SJeff Kirsher DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); 60011597885SJeff Kirsher 60111597885SJeff Kirsher DEB(DEB_INIT, printk(KERN_DEBUG 60211597885SJeff Kirsher "%s: queuing CmdConfigure\n", dev->name)); 60311597885SJeff Kirsher memcpy(dma->cf_cmd.i596_config, init_setup, 14); 60411597885SJeff Kirsher dma->cf_cmd.cmd.command = SWAP16(CmdConfigure); 60511597885SJeff Kirsher DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd)); 60611597885SJeff Kirsher i596_add_cmd(dev, &dma->cf_cmd.cmd); 60711597885SJeff Kirsher 60811597885SJeff Kirsher DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name)); 609d458cdf7SJoe Perches memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN); 61011597885SJeff Kirsher dma->sa_cmd.cmd.command = SWAP16(CmdSASetup); 61111597885SJeff Kirsher DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd)); 61211597885SJeff Kirsher i596_add_cmd(dev, &dma->sa_cmd.cmd); 61311597885SJeff Kirsher 61411597885SJeff Kirsher DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name)); 61511597885SJeff Kirsher dma->tdr_cmd.cmd.command = SWAP16(CmdTDR); 61611597885SJeff Kirsher DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd)); 61711597885SJeff Kirsher i596_add_cmd(dev, &dma->tdr_cmd.cmd); 61811597885SJeff Kirsher 61911597885SJeff Kirsher spin_lock_irqsave (&lp->lock, flags); 62011597885SJeff Kirsher 62111597885SJeff Kirsher if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) { 62211597885SJeff Kirsher spin_unlock_irqrestore (&lp->lock, flags); 62311597885SJeff Kirsher goto failed_free_irq; 62411597885SJeff Kirsher } 62511597885SJeff Kirsher DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name)); 62611597885SJeff Kirsher dma->scb.command = SWAP16(RX_START); 62711597885SJeff Kirsher dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); 62811597885SJeff Kirsher DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); 62911597885SJeff Kirsher 63011597885SJeff Kirsher ca(dev); 63111597885SJeff Kirsher 63211597885SJeff Kirsher spin_unlock_irqrestore (&lp->lock, flags); 63311597885SJeff Kirsher if (wait_cmd(dev, dma, 1000, "RX_START not processed")) 63411597885SJeff Kirsher goto failed_free_irq; 63511597885SJeff Kirsher DEB(DEB_INIT, printk(KERN_DEBUG 63611597885SJeff Kirsher "%s: Receive unit started OK\n", dev->name)); 63711597885SJeff Kirsher return 0; 63811597885SJeff Kirsher 63911597885SJeff Kirsher failed_free_irq: 64011597885SJeff Kirsher free_irq(dev->irq, dev); 64111597885SJeff Kirsher failed: 64211597885SJeff Kirsher printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name); 64311597885SJeff Kirsher mpu_port(dev, PORT_RESET, 0); 64411597885SJeff Kirsher return -1; 64511597885SJeff Kirsher } 64611597885SJeff Kirsher 64711597885SJeff Kirsher 64811597885SJeff Kirsher static inline int i596_rx(struct net_device *dev) 64911597885SJeff Kirsher { 65011597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev); 65111597885SJeff Kirsher struct i596_rfd *rfd; 65211597885SJeff Kirsher struct i596_rbd *rbd; 65311597885SJeff Kirsher int frames = 0; 65411597885SJeff Kirsher 65511597885SJeff Kirsher DEB(DEB_RXFRAME, printk(KERN_DEBUG 65611597885SJeff Kirsher "i596_rx(), rfd_head %p, rbd_head %p\n", 65711597885SJeff Kirsher lp->rfd_head, lp->rbd_head)); 65811597885SJeff Kirsher 65911597885SJeff Kirsher 66011597885SJeff Kirsher rfd = lp->rfd_head; /* Ref next frame to check */ 66111597885SJeff Kirsher 66211597885SJeff Kirsher DMA_INV(dev, rfd, sizeof(struct i596_rfd)); 66311597885SJeff Kirsher while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */ 66411597885SJeff Kirsher if (rfd->rbd == I596_NULL) 66511597885SJeff Kirsher rbd = NULL; 66611597885SJeff Kirsher else if (rfd->rbd == lp->rbd_head->b_addr) { 66711597885SJeff Kirsher rbd = lp->rbd_head; 66811597885SJeff Kirsher DMA_INV(dev, rbd, sizeof(struct i596_rbd)); 66911597885SJeff Kirsher } else { 67011597885SJeff Kirsher printk(KERN_ERR "%s: rbd chain broken!\n", dev->name); 67111597885SJeff Kirsher /* XXX Now what? */ 67211597885SJeff Kirsher rbd = NULL; 67311597885SJeff Kirsher } 67411597885SJeff Kirsher DEB(DEB_RXFRAME, printk(KERN_DEBUG 67511597885SJeff Kirsher " rfd %p, rfd.rbd %08x, rfd.stat %04x\n", 67611597885SJeff Kirsher rfd, rfd->rbd, rfd->stat)); 67711597885SJeff Kirsher 67811597885SJeff Kirsher if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) { 67911597885SJeff Kirsher /* a good frame */ 68011597885SJeff Kirsher int pkt_len = SWAP16(rbd->count) & 0x3fff; 68111597885SJeff Kirsher struct sk_buff *skb = rbd->skb; 68211597885SJeff Kirsher int rx_in_place = 0; 68311597885SJeff Kirsher 68411597885SJeff Kirsher DEB(DEB_RXADDR, print_eth(rbd->v_data, "received")); 68511597885SJeff Kirsher frames++; 68611597885SJeff Kirsher 68711597885SJeff Kirsher /* Check if the packet is long enough to just accept 68811597885SJeff Kirsher * without copying to a properly sized skbuff. 68911597885SJeff Kirsher */ 69011597885SJeff Kirsher 69111597885SJeff Kirsher if (pkt_len > rx_copybreak) { 69211597885SJeff Kirsher struct sk_buff *newskb; 69311597885SJeff Kirsher dma_addr_t dma_addr; 69411597885SJeff Kirsher 69511597885SJeff Kirsher dma_unmap_single(dev->dev.parent, 69611597885SJeff Kirsher (dma_addr_t)SWAP32(rbd->b_data), 69711597885SJeff Kirsher PKT_BUF_SZ, DMA_FROM_DEVICE); 69811597885SJeff Kirsher /* Get fresh skbuff to replace filled one. */ 69911597885SJeff Kirsher newskb = netdev_alloc_skb_ip_align(dev, 70011597885SJeff Kirsher PKT_BUF_SZ); 70111597885SJeff Kirsher if (newskb == NULL) { 70211597885SJeff Kirsher skb = NULL; /* drop pkt */ 70311597885SJeff Kirsher goto memory_squeeze; 70411597885SJeff Kirsher } 70511597885SJeff Kirsher 70611597885SJeff Kirsher /* Pass up the skb already on the Rx ring. */ 70711597885SJeff Kirsher skb_put(skb, pkt_len); 70811597885SJeff Kirsher rx_in_place = 1; 70911597885SJeff Kirsher rbd->skb = newskb; 71011597885SJeff Kirsher dma_addr = dma_map_single(dev->dev.parent, 71111597885SJeff Kirsher newskb->data, 71211597885SJeff Kirsher PKT_BUF_SZ, 71311597885SJeff Kirsher DMA_FROM_DEVICE); 71411597885SJeff Kirsher rbd->v_data = newskb->data; 71511597885SJeff Kirsher rbd->b_data = SWAP32(dma_addr); 71611597885SJeff Kirsher DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); 717720a43efSJoe Perches } else { 71811597885SJeff Kirsher skb = netdev_alloc_skb_ip_align(dev, pkt_len); 719720a43efSJoe Perches } 72011597885SJeff Kirsher memory_squeeze: 72111597885SJeff Kirsher if (skb == NULL) { 72211597885SJeff Kirsher /* XXX tulip.c can defer packets here!! */ 72311597885SJeff Kirsher dev->stats.rx_dropped++; 72411597885SJeff Kirsher } else { 72511597885SJeff Kirsher if (!rx_in_place) { 72611597885SJeff Kirsher /* 16 byte align the data fields */ 72711597885SJeff Kirsher dma_sync_single_for_cpu(dev->dev.parent, 72811597885SJeff Kirsher (dma_addr_t)SWAP32(rbd->b_data), 72911597885SJeff Kirsher PKT_BUF_SZ, DMA_FROM_DEVICE); 73059ae1d12SJohannes Berg skb_put_data(skb, rbd->v_data, 73159ae1d12SJohannes Berg pkt_len); 73211597885SJeff Kirsher dma_sync_single_for_device(dev->dev.parent, 73311597885SJeff Kirsher (dma_addr_t)SWAP32(rbd->b_data), 73411597885SJeff Kirsher PKT_BUF_SZ, DMA_FROM_DEVICE); 73511597885SJeff Kirsher } 73611597885SJeff Kirsher skb->len = pkt_len; 73711597885SJeff Kirsher skb->protocol = eth_type_trans(skb, dev); 73811597885SJeff Kirsher netif_rx(skb); 73911597885SJeff Kirsher dev->stats.rx_packets++; 74011597885SJeff Kirsher dev->stats.rx_bytes += pkt_len; 74111597885SJeff Kirsher } 74211597885SJeff Kirsher } else { 74311597885SJeff Kirsher DEB(DEB_ERRORS, printk(KERN_DEBUG 74411597885SJeff Kirsher "%s: Error, rfd.stat = 0x%04x\n", 74511597885SJeff Kirsher dev->name, rfd->stat)); 74611597885SJeff Kirsher dev->stats.rx_errors++; 74711597885SJeff Kirsher if (rfd->stat & SWAP16(0x0100)) 74811597885SJeff Kirsher dev->stats.collisions++; 74911597885SJeff Kirsher if (rfd->stat & SWAP16(0x8000)) 75011597885SJeff Kirsher dev->stats.rx_length_errors++; 75111597885SJeff Kirsher if (rfd->stat & SWAP16(0x0001)) 75211597885SJeff Kirsher dev->stats.rx_over_errors++; 75311597885SJeff Kirsher if (rfd->stat & SWAP16(0x0002)) 75411597885SJeff Kirsher dev->stats.rx_fifo_errors++; 75511597885SJeff Kirsher if (rfd->stat & SWAP16(0x0004)) 75611597885SJeff Kirsher dev->stats.rx_frame_errors++; 75711597885SJeff Kirsher if (rfd->stat & SWAP16(0x0008)) 75811597885SJeff Kirsher dev->stats.rx_crc_errors++; 75911597885SJeff Kirsher if (rfd->stat & SWAP16(0x0010)) 76011597885SJeff Kirsher dev->stats.rx_length_errors++; 76111597885SJeff Kirsher } 76211597885SJeff Kirsher 76311597885SJeff Kirsher /* Clear the buffer descriptor count and EOF + F flags */ 76411597885SJeff Kirsher 76511597885SJeff Kirsher if (rbd != NULL && (rbd->count & SWAP16(0x4000))) { 76611597885SJeff Kirsher rbd->count = 0; 76711597885SJeff Kirsher lp->rbd_head = rbd->v_next; 76811597885SJeff Kirsher DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); 76911597885SJeff Kirsher } 77011597885SJeff Kirsher 77111597885SJeff Kirsher /* Tidy the frame descriptor, marking it as end of list */ 77211597885SJeff Kirsher 77311597885SJeff Kirsher rfd->rbd = I596_NULL; 77411597885SJeff Kirsher rfd->stat = 0; 77511597885SJeff Kirsher rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX); 77611597885SJeff Kirsher rfd->count = 0; 77711597885SJeff Kirsher 77811597885SJeff Kirsher /* Update record of next frame descriptor to process */ 77911597885SJeff Kirsher 78011597885SJeff Kirsher lp->dma->scb.rfd = rfd->b_next; 78111597885SJeff Kirsher lp->rfd_head = rfd->v_next; 78211597885SJeff Kirsher DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd)); 78311597885SJeff Kirsher 78411597885SJeff Kirsher /* Remove end-of-list from old end descriptor */ 78511597885SJeff Kirsher 78611597885SJeff Kirsher rfd->v_prev->cmd = SWAP16(CMD_FLEX); 78711597885SJeff Kirsher DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd)); 78811597885SJeff Kirsher rfd = lp->rfd_head; 78911597885SJeff Kirsher DMA_INV(dev, rfd, sizeof(struct i596_rfd)); 79011597885SJeff Kirsher } 79111597885SJeff Kirsher 79211597885SJeff Kirsher DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames)); 79311597885SJeff Kirsher 79411597885SJeff Kirsher return 0; 79511597885SJeff Kirsher } 79611597885SJeff Kirsher 79711597885SJeff Kirsher 79811597885SJeff Kirsher static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) 79911597885SJeff Kirsher { 80011597885SJeff Kirsher struct i596_cmd *ptr; 80111597885SJeff Kirsher 80211597885SJeff Kirsher while (lp->cmd_head != NULL) { 80311597885SJeff Kirsher ptr = lp->cmd_head; 80411597885SJeff Kirsher lp->cmd_head = ptr->v_next; 80511597885SJeff Kirsher lp->cmd_backlog--; 80611597885SJeff Kirsher 80711597885SJeff Kirsher switch (SWAP16(ptr->command) & 0x7) { 80811597885SJeff Kirsher case CmdTx: 80911597885SJeff Kirsher { 81011597885SJeff Kirsher struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; 81111597885SJeff Kirsher struct sk_buff *skb = tx_cmd->skb; 81211597885SJeff Kirsher dma_unmap_single(dev->dev.parent, 81311597885SJeff Kirsher tx_cmd->dma_addr, 81411597885SJeff Kirsher skb->len, DMA_TO_DEVICE); 81511597885SJeff Kirsher 81611597885SJeff Kirsher dev_kfree_skb(skb); 81711597885SJeff Kirsher 81811597885SJeff Kirsher dev->stats.tx_errors++; 81911597885SJeff Kirsher dev->stats.tx_aborted_errors++; 82011597885SJeff Kirsher 82111597885SJeff Kirsher ptr->v_next = NULL; 82211597885SJeff Kirsher ptr->b_next = I596_NULL; 82311597885SJeff Kirsher tx_cmd->cmd.command = 0; /* Mark as free */ 82411597885SJeff Kirsher break; 82511597885SJeff Kirsher } 82611597885SJeff Kirsher default: 82711597885SJeff Kirsher ptr->v_next = NULL; 82811597885SJeff Kirsher ptr->b_next = I596_NULL; 82911597885SJeff Kirsher } 83011597885SJeff Kirsher DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd)); 83111597885SJeff Kirsher } 83211597885SJeff Kirsher 83311597885SJeff Kirsher wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out"); 83411597885SJeff Kirsher lp->dma->scb.cmd = I596_NULL; 83511597885SJeff Kirsher DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb)); 83611597885SJeff Kirsher } 83711597885SJeff Kirsher 83811597885SJeff Kirsher 83911597885SJeff Kirsher static inline void i596_reset(struct net_device *dev, struct i596_private *lp) 84011597885SJeff Kirsher { 84111597885SJeff Kirsher unsigned long flags; 84211597885SJeff Kirsher 84311597885SJeff Kirsher DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n")); 84411597885SJeff Kirsher 84511597885SJeff Kirsher spin_lock_irqsave (&lp->lock, flags); 84611597885SJeff Kirsher 84711597885SJeff Kirsher wait_cmd(dev, lp->dma, 100, "i596_reset timed out"); 84811597885SJeff Kirsher 84911597885SJeff Kirsher netif_stop_queue(dev); 85011597885SJeff Kirsher 85111597885SJeff Kirsher /* FIXME: this command might cause an lpmc */ 85211597885SJeff Kirsher lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT); 85311597885SJeff Kirsher DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb)); 85411597885SJeff Kirsher ca(dev); 85511597885SJeff Kirsher 85611597885SJeff Kirsher /* wait for shutdown */ 85711597885SJeff Kirsher wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out"); 85811597885SJeff Kirsher spin_unlock_irqrestore (&lp->lock, flags); 85911597885SJeff Kirsher 86011597885SJeff Kirsher i596_cleanup_cmd(dev, lp); 86111597885SJeff Kirsher i596_rx(dev); 86211597885SJeff Kirsher 86311597885SJeff Kirsher netif_start_queue(dev); 86411597885SJeff Kirsher init_i596_mem(dev); 86511597885SJeff Kirsher } 86611597885SJeff Kirsher 86711597885SJeff Kirsher 86811597885SJeff Kirsher static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) 86911597885SJeff Kirsher { 87011597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev); 87111597885SJeff Kirsher struct i596_dma *dma = lp->dma; 87211597885SJeff Kirsher unsigned long flags; 87311597885SJeff Kirsher 87411597885SJeff Kirsher DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n", 87511597885SJeff Kirsher lp->cmd_head)); 87611597885SJeff Kirsher 87711597885SJeff Kirsher cmd->status = 0; 87811597885SJeff Kirsher cmd->command |= SWAP16(CMD_EOL | CMD_INTR); 87911597885SJeff Kirsher cmd->v_next = NULL; 88011597885SJeff Kirsher cmd->b_next = I596_NULL; 88111597885SJeff Kirsher DMA_WBACK(dev, cmd, sizeof(struct i596_cmd)); 88211597885SJeff Kirsher 88311597885SJeff Kirsher spin_lock_irqsave (&lp->lock, flags); 88411597885SJeff Kirsher 88511597885SJeff Kirsher if (lp->cmd_head != NULL) { 88611597885SJeff Kirsher lp->cmd_tail->v_next = cmd; 88711597885SJeff Kirsher lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status)); 88811597885SJeff Kirsher DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd)); 88911597885SJeff Kirsher } else { 89011597885SJeff Kirsher lp->cmd_head = cmd; 89111597885SJeff Kirsher wait_cmd(dev, dma, 100, "i596_add_cmd timed out"); 89211597885SJeff Kirsher dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status)); 89311597885SJeff Kirsher dma->scb.command = SWAP16(CUC_START); 89411597885SJeff Kirsher DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb)); 89511597885SJeff Kirsher ca(dev); 89611597885SJeff Kirsher } 89711597885SJeff Kirsher lp->cmd_tail = cmd; 89811597885SJeff Kirsher lp->cmd_backlog++; 89911597885SJeff Kirsher 90011597885SJeff Kirsher spin_unlock_irqrestore (&lp->lock, flags); 90111597885SJeff Kirsher 90211597885SJeff Kirsher if (lp->cmd_backlog > max_cmd_backlog) { 90311597885SJeff Kirsher unsigned long tickssofar = jiffies - lp->last_cmd; 90411597885SJeff Kirsher 90511597885SJeff Kirsher if (tickssofar < ticks_limit) 90611597885SJeff Kirsher return; 90711597885SJeff Kirsher 90811597885SJeff Kirsher printk(KERN_ERR 90911597885SJeff Kirsher "%s: command unit timed out, status resetting.\n", 91011597885SJeff Kirsher dev->name); 91111597885SJeff Kirsher #if 1 91211597885SJeff Kirsher i596_reset(dev, lp); 91311597885SJeff Kirsher #endif 91411597885SJeff Kirsher } 91511597885SJeff Kirsher } 91611597885SJeff Kirsher 91711597885SJeff Kirsher static int i596_open(struct net_device *dev) 91811597885SJeff Kirsher { 91911597885SJeff Kirsher DEB(DEB_OPEN, printk(KERN_DEBUG 92011597885SJeff Kirsher "%s: i596_open() irq %d.\n", dev->name, dev->irq)); 92111597885SJeff Kirsher 92211597885SJeff Kirsher if (init_rx_bufs(dev)) { 92311597885SJeff Kirsher printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name); 92411597885SJeff Kirsher return -EAGAIN; 92511597885SJeff Kirsher } 92611597885SJeff Kirsher if (init_i596_mem(dev)) { 92711597885SJeff Kirsher printk(KERN_ERR "%s: Failed to init memory\n", dev->name); 92811597885SJeff Kirsher goto out_remove_rx_bufs; 92911597885SJeff Kirsher } 93011597885SJeff Kirsher netif_start_queue(dev); 93111597885SJeff Kirsher 93211597885SJeff Kirsher return 0; 93311597885SJeff Kirsher 93411597885SJeff Kirsher out_remove_rx_bufs: 93511597885SJeff Kirsher remove_rx_bufs(dev); 93611597885SJeff Kirsher return -EAGAIN; 93711597885SJeff Kirsher } 93811597885SJeff Kirsher 93911597885SJeff Kirsher static void i596_tx_timeout (struct net_device *dev) 94011597885SJeff Kirsher { 94111597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev); 94211597885SJeff Kirsher 94311597885SJeff Kirsher /* Transmitter timeout, serious problems. */ 94411597885SJeff Kirsher DEB(DEB_ERRORS, printk(KERN_DEBUG 94511597885SJeff Kirsher "%s: transmit timed out, status resetting.\n", 94611597885SJeff Kirsher dev->name)); 94711597885SJeff Kirsher 94811597885SJeff Kirsher dev->stats.tx_errors++; 94911597885SJeff Kirsher 95011597885SJeff Kirsher /* Try to restart the adaptor */ 95111597885SJeff Kirsher if (lp->last_restart == dev->stats.tx_packets) { 95211597885SJeff Kirsher DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n")); 95311597885SJeff Kirsher /* Shutdown and restart */ 95411597885SJeff Kirsher i596_reset (dev, lp); 95511597885SJeff Kirsher } else { 95611597885SJeff Kirsher /* Issue a channel attention signal */ 95711597885SJeff Kirsher DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n")); 95811597885SJeff Kirsher lp->dma->scb.command = SWAP16(CUC_START | RX_START); 95911597885SJeff Kirsher DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb)); 96011597885SJeff Kirsher ca (dev); 96111597885SJeff Kirsher lp->last_restart = dev->stats.tx_packets; 96211597885SJeff Kirsher } 96311597885SJeff Kirsher 964860e9538SFlorian Westphal netif_trans_update(dev); /* prevent tx timeout */ 96511597885SJeff Kirsher netif_wake_queue (dev); 96611597885SJeff Kirsher } 96711597885SJeff Kirsher 96811597885SJeff Kirsher 969*648c361aSYueHaibing static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev) 97011597885SJeff Kirsher { 97111597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev); 97211597885SJeff Kirsher struct tx_cmd *tx_cmd; 97311597885SJeff Kirsher struct i596_tbd *tbd; 97411597885SJeff Kirsher short length = skb->len; 97511597885SJeff Kirsher 97611597885SJeff Kirsher DEB(DEB_STARTTX, printk(KERN_DEBUG 97711597885SJeff Kirsher "%s: i596_start_xmit(%x,%p) called\n", 97811597885SJeff Kirsher dev->name, skb->len, skb->data)); 97911597885SJeff Kirsher 98011597885SJeff Kirsher if (length < ETH_ZLEN) { 98111597885SJeff Kirsher if (skb_padto(skb, ETH_ZLEN)) 98211597885SJeff Kirsher return NETDEV_TX_OK; 98311597885SJeff Kirsher length = ETH_ZLEN; 98411597885SJeff Kirsher } 98511597885SJeff Kirsher 98611597885SJeff Kirsher netif_stop_queue(dev); 98711597885SJeff Kirsher 98811597885SJeff Kirsher tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd; 98911597885SJeff Kirsher tbd = lp->dma->tbds + lp->next_tx_cmd; 99011597885SJeff Kirsher 99111597885SJeff Kirsher if (tx_cmd->cmd.command) { 99211597885SJeff Kirsher DEB(DEB_ERRORS, printk(KERN_DEBUG 99311597885SJeff Kirsher "%s: xmit ring full, dropping packet.\n", 99411597885SJeff Kirsher dev->name)); 99511597885SJeff Kirsher dev->stats.tx_dropped++; 99611597885SJeff Kirsher 997374e29daSEric W. Biederman dev_kfree_skb_any(skb); 99811597885SJeff Kirsher } else { 99911597885SJeff Kirsher if (++lp->next_tx_cmd == TX_RING_SIZE) 100011597885SJeff Kirsher lp->next_tx_cmd = 0; 100111597885SJeff Kirsher tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd)); 100211597885SJeff Kirsher tbd->next = I596_NULL; 100311597885SJeff Kirsher 100411597885SJeff Kirsher tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx); 100511597885SJeff Kirsher tx_cmd->skb = skb; 100611597885SJeff Kirsher 100711597885SJeff Kirsher tx_cmd->pad = 0; 100811597885SJeff Kirsher tx_cmd->size = 0; 100911597885SJeff Kirsher tbd->pad = 0; 101011597885SJeff Kirsher tbd->size = SWAP16(EOF | length); 101111597885SJeff Kirsher 101211597885SJeff Kirsher tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data, 101311597885SJeff Kirsher skb->len, DMA_TO_DEVICE); 101411597885SJeff Kirsher tbd->data = SWAP32(tx_cmd->dma_addr); 101511597885SJeff Kirsher 101611597885SJeff Kirsher DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued")); 101711597885SJeff Kirsher DMA_WBACK_INV(dev, tx_cmd, sizeof(struct tx_cmd)); 101811597885SJeff Kirsher DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd)); 101911597885SJeff Kirsher i596_add_cmd(dev, &tx_cmd->cmd); 102011597885SJeff Kirsher 102111597885SJeff Kirsher dev->stats.tx_packets++; 102211597885SJeff Kirsher dev->stats.tx_bytes += length; 102311597885SJeff Kirsher } 102411597885SJeff Kirsher 102511597885SJeff Kirsher netif_start_queue(dev); 102611597885SJeff Kirsher 102711597885SJeff Kirsher return NETDEV_TX_OK; 102811597885SJeff Kirsher } 102911597885SJeff Kirsher 103011597885SJeff Kirsher static void print_eth(unsigned char *add, char *str) 103111597885SJeff Kirsher { 103211597885SJeff Kirsher printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n", 103311597885SJeff Kirsher add, add + 6, add, add[12], add[13], str); 103411597885SJeff Kirsher } 103511597885SJeff Kirsher static const struct net_device_ops i596_netdev_ops = { 103611597885SJeff Kirsher .ndo_open = i596_open, 103711597885SJeff Kirsher .ndo_stop = i596_close, 103811597885SJeff Kirsher .ndo_start_xmit = i596_start_xmit, 1039afc4b13dSJiri Pirko .ndo_set_rx_mode = set_multicast_list, 104011597885SJeff Kirsher .ndo_tx_timeout = i596_tx_timeout, 104111597885SJeff Kirsher .ndo_validate_addr = eth_validate_addr, 104211597885SJeff Kirsher .ndo_set_mac_address = eth_mac_addr, 104311597885SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 104411597885SJeff Kirsher .ndo_poll_controller = i596_poll_controller, 104511597885SJeff Kirsher #endif 104611597885SJeff Kirsher }; 104711597885SJeff Kirsher 104858b10698SBill Pemberton static int i82596_probe(struct net_device *dev) 104911597885SJeff Kirsher { 105011597885SJeff Kirsher int i; 105111597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev); 105211597885SJeff Kirsher struct i596_dma *dma; 105311597885SJeff Kirsher 105411597885SJeff Kirsher /* This lot is ensure things have been cache line aligned. */ 105511597885SJeff Kirsher BUILD_BUG_ON(sizeof(struct i596_rfd) != 32); 105611597885SJeff Kirsher BUILD_BUG_ON(sizeof(struct i596_rbd) & 31); 105711597885SJeff Kirsher BUILD_BUG_ON(sizeof(struct tx_cmd) & 31); 105811597885SJeff Kirsher BUILD_BUG_ON(sizeof(struct i596_tbd) != 32); 105911597885SJeff Kirsher #ifndef __LP64__ 106011597885SJeff Kirsher BUILD_BUG_ON(sizeof(struct i596_dma) > 4096); 106111597885SJeff Kirsher #endif 106211597885SJeff Kirsher 106311597885SJeff Kirsher if (!dev->base_addr || !dev->irq) 106411597885SJeff Kirsher return -ENODEV; 106511597885SJeff Kirsher 10667f683b92SChristoph Hellwig dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma), 10677f683b92SChristoph Hellwig &lp->dma_addr, GFP_KERNEL, 10687f683b92SChristoph Hellwig DMA_ATTR_NON_CONSISTENT); 106911597885SJeff Kirsher if (!dma) { 107011597885SJeff Kirsher printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__); 107111597885SJeff Kirsher return -ENOMEM; 107211597885SJeff Kirsher } 107311597885SJeff Kirsher 107411597885SJeff Kirsher dev->netdev_ops = &i596_netdev_ops; 107511597885SJeff Kirsher dev->watchdog_timeo = TX_TIMEOUT; 107611597885SJeff Kirsher 107711597885SJeff Kirsher memset(dma, 0, sizeof(struct i596_dma)); 107811597885SJeff Kirsher lp->dma = dma; 107911597885SJeff Kirsher 108011597885SJeff Kirsher dma->scb.command = 0; 108111597885SJeff Kirsher dma->scb.cmd = I596_NULL; 108211597885SJeff Kirsher dma->scb.rfd = I596_NULL; 108311597885SJeff Kirsher spin_lock_init(&lp->lock); 108411597885SJeff Kirsher 108511597885SJeff Kirsher DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma)); 108611597885SJeff Kirsher 108711597885SJeff Kirsher i = register_netdev(dev); 108811597885SJeff Kirsher if (i) { 10897f683b92SChristoph Hellwig dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma), 10907f683b92SChristoph Hellwig dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT); 109111597885SJeff Kirsher return i; 109211597885SJeff Kirsher } 109311597885SJeff Kirsher 109411597885SJeff Kirsher DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n", 109511597885SJeff Kirsher dev->name, dev->base_addr, dev->dev_addr, 109611597885SJeff Kirsher dev->irq)); 109711597885SJeff Kirsher DEB(DEB_INIT, printk(KERN_INFO 109811597885SJeff Kirsher "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n", 109911597885SJeff Kirsher dev->name, dma, (int)sizeof(struct i596_dma), 110011597885SJeff Kirsher &dma->scb)); 110111597885SJeff Kirsher 110211597885SJeff Kirsher return 0; 110311597885SJeff Kirsher } 110411597885SJeff Kirsher 110511597885SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER 110611597885SJeff Kirsher static void i596_poll_controller(struct net_device *dev) 110711597885SJeff Kirsher { 110811597885SJeff Kirsher disable_irq(dev->irq); 110911597885SJeff Kirsher i596_interrupt(dev->irq, dev); 111011597885SJeff Kirsher enable_irq(dev->irq); 111111597885SJeff Kirsher } 111211597885SJeff Kirsher #endif 111311597885SJeff Kirsher 111411597885SJeff Kirsher static irqreturn_t i596_interrupt(int irq, void *dev_id) 111511597885SJeff Kirsher { 111611597885SJeff Kirsher struct net_device *dev = dev_id; 111711597885SJeff Kirsher struct i596_private *lp; 111811597885SJeff Kirsher struct i596_dma *dma; 111911597885SJeff Kirsher unsigned short status, ack_cmd = 0; 112011597885SJeff Kirsher 112111597885SJeff Kirsher lp = netdev_priv(dev); 112211597885SJeff Kirsher dma = lp->dma; 112311597885SJeff Kirsher 112411597885SJeff Kirsher spin_lock (&lp->lock); 112511597885SJeff Kirsher 112611597885SJeff Kirsher wait_cmd(dev, dma, 100, "i596 interrupt, timeout"); 112711597885SJeff Kirsher status = SWAP16(dma->scb.status); 112811597885SJeff Kirsher 112911597885SJeff Kirsher DEB(DEB_INTS, printk(KERN_DEBUG 113011597885SJeff Kirsher "%s: i596 interrupt, IRQ %d, status %4.4x.\n", 113111597885SJeff Kirsher dev->name, dev->irq, status)); 113211597885SJeff Kirsher 113311597885SJeff Kirsher ack_cmd = status & 0xf000; 113411597885SJeff Kirsher 113511597885SJeff Kirsher if (!ack_cmd) { 113611597885SJeff Kirsher DEB(DEB_ERRORS, printk(KERN_DEBUG 113711597885SJeff Kirsher "%s: interrupt with no events\n", 113811597885SJeff Kirsher dev->name)); 113911597885SJeff Kirsher spin_unlock (&lp->lock); 114011597885SJeff Kirsher return IRQ_NONE; 114111597885SJeff Kirsher } 114211597885SJeff Kirsher 114311597885SJeff Kirsher if ((status & 0x8000) || (status & 0x2000)) { 114411597885SJeff Kirsher struct i596_cmd *ptr; 114511597885SJeff Kirsher 114611597885SJeff Kirsher if ((status & 0x8000)) 114711597885SJeff Kirsher DEB(DEB_INTS, 114811597885SJeff Kirsher printk(KERN_DEBUG 114911597885SJeff Kirsher "%s: i596 interrupt completed command.\n", 115011597885SJeff Kirsher dev->name)); 115111597885SJeff Kirsher if ((status & 0x2000)) 115211597885SJeff Kirsher DEB(DEB_INTS, 115311597885SJeff Kirsher printk(KERN_DEBUG 115411597885SJeff Kirsher "%s: i596 interrupt command unit inactive %x.\n", 115511597885SJeff Kirsher dev->name, status & 0x0700)); 115611597885SJeff Kirsher 115711597885SJeff Kirsher while (lp->cmd_head != NULL) { 115811597885SJeff Kirsher DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd)); 115911597885SJeff Kirsher if (!(lp->cmd_head->status & SWAP16(STAT_C))) 116011597885SJeff Kirsher break; 116111597885SJeff Kirsher 116211597885SJeff Kirsher ptr = lp->cmd_head; 116311597885SJeff Kirsher 116411597885SJeff Kirsher DEB(DEB_STATUS, 116511597885SJeff Kirsher printk(KERN_DEBUG 116611597885SJeff Kirsher "cmd_head->status = %04x, ->command = %04x\n", 116711597885SJeff Kirsher SWAP16(lp->cmd_head->status), 116811597885SJeff Kirsher SWAP16(lp->cmd_head->command))); 116911597885SJeff Kirsher lp->cmd_head = ptr->v_next; 117011597885SJeff Kirsher lp->cmd_backlog--; 117111597885SJeff Kirsher 117211597885SJeff Kirsher switch (SWAP16(ptr->command) & 0x7) { 117311597885SJeff Kirsher case CmdTx: 117411597885SJeff Kirsher { 117511597885SJeff Kirsher struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; 117611597885SJeff Kirsher struct sk_buff *skb = tx_cmd->skb; 117711597885SJeff Kirsher 117811597885SJeff Kirsher if (ptr->status & SWAP16(STAT_OK)) { 117911597885SJeff Kirsher DEB(DEB_TXADDR, 118011597885SJeff Kirsher print_eth(skb->data, "tx-done")); 118111597885SJeff Kirsher } else { 118211597885SJeff Kirsher dev->stats.tx_errors++; 118311597885SJeff Kirsher if (ptr->status & SWAP16(0x0020)) 118411597885SJeff Kirsher dev->stats.collisions++; 118511597885SJeff Kirsher if (!(ptr->status & SWAP16(0x0040))) 118611597885SJeff Kirsher dev->stats.tx_heartbeat_errors++; 118711597885SJeff Kirsher if (ptr->status & SWAP16(0x0400)) 118811597885SJeff Kirsher dev->stats.tx_carrier_errors++; 118911597885SJeff Kirsher if (ptr->status & SWAP16(0x0800)) 119011597885SJeff Kirsher dev->stats.collisions++; 119111597885SJeff Kirsher if (ptr->status & SWAP16(0x1000)) 119211597885SJeff Kirsher dev->stats.tx_aborted_errors++; 119311597885SJeff Kirsher } 119411597885SJeff Kirsher dma_unmap_single(dev->dev.parent, 119511597885SJeff Kirsher tx_cmd->dma_addr, 119611597885SJeff Kirsher skb->len, DMA_TO_DEVICE); 119711597885SJeff Kirsher dev_kfree_skb_irq(skb); 119811597885SJeff Kirsher 119911597885SJeff Kirsher tx_cmd->cmd.command = 0; /* Mark free */ 120011597885SJeff Kirsher break; 120111597885SJeff Kirsher } 120211597885SJeff Kirsher case CmdTDR: 120311597885SJeff Kirsher { 120411597885SJeff Kirsher unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status); 120511597885SJeff Kirsher 120611597885SJeff Kirsher if (status & 0x8000) { 120711597885SJeff Kirsher DEB(DEB_ANY, 120811597885SJeff Kirsher printk(KERN_DEBUG "%s: link ok.\n", 120911597885SJeff Kirsher dev->name)); 121011597885SJeff Kirsher } else { 121111597885SJeff Kirsher if (status & 0x4000) 121211597885SJeff Kirsher printk(KERN_ERR 121311597885SJeff Kirsher "%s: Transceiver problem.\n", 121411597885SJeff Kirsher dev->name); 121511597885SJeff Kirsher if (status & 0x2000) 121611597885SJeff Kirsher printk(KERN_ERR 121711597885SJeff Kirsher "%s: Termination problem.\n", 121811597885SJeff Kirsher dev->name); 121911597885SJeff Kirsher if (status & 0x1000) 122011597885SJeff Kirsher printk(KERN_ERR 122111597885SJeff Kirsher "%s: Short circuit.\n", 122211597885SJeff Kirsher dev->name); 122311597885SJeff Kirsher 122411597885SJeff Kirsher DEB(DEB_TDR, 122511597885SJeff Kirsher printk(KERN_DEBUG "%s: Time %d.\n", 122611597885SJeff Kirsher dev->name, status & 0x07ff)); 122711597885SJeff Kirsher } 122811597885SJeff Kirsher break; 122911597885SJeff Kirsher } 123011597885SJeff Kirsher case CmdConfigure: 123111597885SJeff Kirsher /* 123211597885SJeff Kirsher * Zap command so set_multicast_list() know 123311597885SJeff Kirsher * it is free 123411597885SJeff Kirsher */ 123511597885SJeff Kirsher ptr->command = 0; 123611597885SJeff Kirsher break; 123711597885SJeff Kirsher } 123811597885SJeff Kirsher ptr->v_next = NULL; 123911597885SJeff Kirsher ptr->b_next = I596_NULL; 124011597885SJeff Kirsher DMA_WBACK(dev, ptr, sizeof(struct i596_cmd)); 124111597885SJeff Kirsher lp->last_cmd = jiffies; 124211597885SJeff Kirsher } 124311597885SJeff Kirsher 124411597885SJeff Kirsher /* This mess is arranging that only the last of any outstanding 124511597885SJeff Kirsher * commands has the interrupt bit set. Should probably really 124611597885SJeff Kirsher * only add to the cmd queue when the CU is stopped. 124711597885SJeff Kirsher */ 124811597885SJeff Kirsher ptr = lp->cmd_head; 124911597885SJeff Kirsher while ((ptr != NULL) && (ptr != lp->cmd_tail)) { 125011597885SJeff Kirsher struct i596_cmd *prev = ptr; 125111597885SJeff Kirsher 125211597885SJeff Kirsher ptr->command &= SWAP16(0x1fff); 125311597885SJeff Kirsher ptr = ptr->v_next; 125411597885SJeff Kirsher DMA_WBACK_INV(dev, prev, sizeof(struct i596_cmd)); 125511597885SJeff Kirsher } 125611597885SJeff Kirsher 125711597885SJeff Kirsher if (lp->cmd_head != NULL) 125811597885SJeff Kirsher ack_cmd |= CUC_START; 125911597885SJeff Kirsher dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status)); 126011597885SJeff Kirsher DMA_WBACK_INV(dev, &dma->scb, sizeof(struct i596_scb)); 126111597885SJeff Kirsher } 126211597885SJeff Kirsher if ((status & 0x1000) || (status & 0x4000)) { 126311597885SJeff Kirsher if ((status & 0x4000)) 126411597885SJeff Kirsher DEB(DEB_INTS, 126511597885SJeff Kirsher printk(KERN_DEBUG 126611597885SJeff Kirsher "%s: i596 interrupt received a frame.\n", 126711597885SJeff Kirsher dev->name)); 126811597885SJeff Kirsher i596_rx(dev); 126911597885SJeff Kirsher /* Only RX_START if stopped - RGH 07-07-96 */ 127011597885SJeff Kirsher if (status & 0x1000) { 127111597885SJeff Kirsher if (netif_running(dev)) { 127211597885SJeff Kirsher DEB(DEB_ERRORS, 127311597885SJeff Kirsher printk(KERN_DEBUG 127411597885SJeff Kirsher "%s: i596 interrupt receive unit inactive, status 0x%x\n", 127511597885SJeff Kirsher dev->name, status)); 127611597885SJeff Kirsher ack_cmd |= RX_START; 127711597885SJeff Kirsher dev->stats.rx_errors++; 127811597885SJeff Kirsher dev->stats.rx_fifo_errors++; 127911597885SJeff Kirsher rebuild_rx_bufs(dev); 128011597885SJeff Kirsher } 128111597885SJeff Kirsher } 128211597885SJeff Kirsher } 128311597885SJeff Kirsher wait_cmd(dev, dma, 100, "i596 interrupt, timeout"); 128411597885SJeff Kirsher dma->scb.command = SWAP16(ack_cmd); 128511597885SJeff Kirsher DMA_WBACK(dev, &dma->scb, sizeof(struct i596_scb)); 128611597885SJeff Kirsher 128711597885SJeff Kirsher /* DANGER: I suspect that some kind of interrupt 128811597885SJeff Kirsher acknowledgement aside from acking the 82596 might be needed 128911597885SJeff Kirsher here... but it's running acceptably without */ 129011597885SJeff Kirsher 129111597885SJeff Kirsher ca(dev); 129211597885SJeff Kirsher 129311597885SJeff Kirsher wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout"); 129411597885SJeff Kirsher DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name)); 129511597885SJeff Kirsher 129611597885SJeff Kirsher spin_unlock (&lp->lock); 129711597885SJeff Kirsher return IRQ_HANDLED; 129811597885SJeff Kirsher } 129911597885SJeff Kirsher 130011597885SJeff Kirsher static int i596_close(struct net_device *dev) 130111597885SJeff Kirsher { 130211597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev); 130311597885SJeff Kirsher unsigned long flags; 130411597885SJeff Kirsher 130511597885SJeff Kirsher netif_stop_queue(dev); 130611597885SJeff Kirsher 130711597885SJeff Kirsher DEB(DEB_INIT, 130811597885SJeff Kirsher printk(KERN_DEBUG 130911597885SJeff Kirsher "%s: Shutting down ethercard, status was %4.4x.\n", 131011597885SJeff Kirsher dev->name, SWAP16(lp->dma->scb.status))); 131111597885SJeff Kirsher 131211597885SJeff Kirsher spin_lock_irqsave(&lp->lock, flags); 131311597885SJeff Kirsher 131411597885SJeff Kirsher wait_cmd(dev, lp->dma, 100, "close1 timed out"); 131511597885SJeff Kirsher lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT); 131611597885SJeff Kirsher DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb)); 131711597885SJeff Kirsher 131811597885SJeff Kirsher ca(dev); 131911597885SJeff Kirsher 132011597885SJeff Kirsher wait_cmd(dev, lp->dma, 100, "close2 timed out"); 132111597885SJeff Kirsher spin_unlock_irqrestore(&lp->lock, flags); 132211597885SJeff Kirsher DEB(DEB_STRUCT, i596_display_data(dev)); 132311597885SJeff Kirsher i596_cleanup_cmd(dev, lp); 132411597885SJeff Kirsher 132511597885SJeff Kirsher free_irq(dev->irq, dev); 132611597885SJeff Kirsher remove_rx_bufs(dev); 132711597885SJeff Kirsher 132811597885SJeff Kirsher return 0; 132911597885SJeff Kirsher } 133011597885SJeff Kirsher 133111597885SJeff Kirsher /* 133211597885SJeff Kirsher * Set or clear the multicast filter for this adaptor. 133311597885SJeff Kirsher */ 133411597885SJeff Kirsher 133511597885SJeff Kirsher static void set_multicast_list(struct net_device *dev) 133611597885SJeff Kirsher { 133711597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev); 133811597885SJeff Kirsher struct i596_dma *dma = lp->dma; 133911597885SJeff Kirsher int config = 0, cnt; 134011597885SJeff Kirsher 134111597885SJeff Kirsher DEB(DEB_MULTI, 134211597885SJeff Kirsher printk(KERN_DEBUG 134311597885SJeff Kirsher "%s: set multicast list, %d entries, promisc %s, allmulti %s\n", 134411597885SJeff Kirsher dev->name, netdev_mc_count(dev), 134511597885SJeff Kirsher dev->flags & IFF_PROMISC ? "ON" : "OFF", 134611597885SJeff Kirsher dev->flags & IFF_ALLMULTI ? "ON" : "OFF")); 134711597885SJeff Kirsher 134811597885SJeff Kirsher if ((dev->flags & IFF_PROMISC) && 134911597885SJeff Kirsher !(dma->cf_cmd.i596_config[8] & 0x01)) { 135011597885SJeff Kirsher dma->cf_cmd.i596_config[8] |= 0x01; 135111597885SJeff Kirsher config = 1; 135211597885SJeff Kirsher } 135311597885SJeff Kirsher if (!(dev->flags & IFF_PROMISC) && 135411597885SJeff Kirsher (dma->cf_cmd.i596_config[8] & 0x01)) { 135511597885SJeff Kirsher dma->cf_cmd.i596_config[8] &= ~0x01; 135611597885SJeff Kirsher config = 1; 135711597885SJeff Kirsher } 135811597885SJeff Kirsher if ((dev->flags & IFF_ALLMULTI) && 135911597885SJeff Kirsher (dma->cf_cmd.i596_config[11] & 0x20)) { 136011597885SJeff Kirsher dma->cf_cmd.i596_config[11] &= ~0x20; 136111597885SJeff Kirsher config = 1; 136211597885SJeff Kirsher } 136311597885SJeff Kirsher if (!(dev->flags & IFF_ALLMULTI) && 136411597885SJeff Kirsher !(dma->cf_cmd.i596_config[11] & 0x20)) { 136511597885SJeff Kirsher dma->cf_cmd.i596_config[11] |= 0x20; 136611597885SJeff Kirsher config = 1; 136711597885SJeff Kirsher } 136811597885SJeff Kirsher if (config) { 136911597885SJeff Kirsher if (dma->cf_cmd.cmd.command) 137011597885SJeff Kirsher printk(KERN_INFO 137111597885SJeff Kirsher "%s: config change request already queued\n", 137211597885SJeff Kirsher dev->name); 137311597885SJeff Kirsher else { 137411597885SJeff Kirsher dma->cf_cmd.cmd.command = SWAP16(CmdConfigure); 137511597885SJeff Kirsher DMA_WBACK_INV(dev, &dma->cf_cmd, sizeof(struct cf_cmd)); 137611597885SJeff Kirsher i596_add_cmd(dev, &dma->cf_cmd.cmd); 137711597885SJeff Kirsher } 137811597885SJeff Kirsher } 137911597885SJeff Kirsher 138011597885SJeff Kirsher cnt = netdev_mc_count(dev); 138111597885SJeff Kirsher if (cnt > MAX_MC_CNT) { 138211597885SJeff Kirsher cnt = MAX_MC_CNT; 138311597885SJeff Kirsher printk(KERN_NOTICE "%s: Only %d multicast addresses supported", 138411597885SJeff Kirsher dev->name, cnt); 138511597885SJeff Kirsher } 138611597885SJeff Kirsher 138711597885SJeff Kirsher if (!netdev_mc_empty(dev)) { 138811597885SJeff Kirsher struct netdev_hw_addr *ha; 138911597885SJeff Kirsher unsigned char *cp; 139011597885SJeff Kirsher struct mc_cmd *cmd; 139111597885SJeff Kirsher 139211597885SJeff Kirsher cmd = &dma->mc_cmd; 139311597885SJeff Kirsher cmd->cmd.command = SWAP16(CmdMulticastList); 139411597885SJeff Kirsher cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6); 139511597885SJeff Kirsher cp = cmd->mc_addrs; 139611597885SJeff Kirsher netdev_for_each_mc_addr(ha, dev) { 139711597885SJeff Kirsher if (!cnt--) 139811597885SJeff Kirsher break; 1399d458cdf7SJoe Perches memcpy(cp, ha->addr, ETH_ALEN); 140011597885SJeff Kirsher if (i596_debug > 1) 140111597885SJeff Kirsher DEB(DEB_MULTI, 140211597885SJeff Kirsher printk(KERN_DEBUG 140311597885SJeff Kirsher "%s: Adding address %pM\n", 140411597885SJeff Kirsher dev->name, cp)); 1405d458cdf7SJoe Perches cp += ETH_ALEN; 140611597885SJeff Kirsher } 140711597885SJeff Kirsher DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd)); 140811597885SJeff Kirsher i596_add_cmd(dev, &cmd->cmd); 140911597885SJeff Kirsher } 141011597885SJeff Kirsher } 1411