1*9ac40d08SBagas Sanjaya // SPDX-License-Identifier: GPL-1.0+
211597885SJeff Kirsher /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
311597885SJeff Kirsher munged into HPPA boxen .
411597885SJeff Kirsher
511597885SJeff Kirsher This driver is based upon 82596.c, original credits are below...
611597885SJeff Kirsher but there were too many hoops which HP wants jumped through to
711597885SJeff Kirsher keep this code in there in a sane manner.
811597885SJeff Kirsher
911597885SJeff Kirsher 3 primary sources of the mess --
1011597885SJeff Kirsher 1) hppa needs *lots* of cacheline flushing to keep this kind of
1111597885SJeff Kirsher MMIO running.
1211597885SJeff Kirsher
1311597885SJeff Kirsher 2) The 82596 needs to see all of its pointers as their physical
1411597885SJeff Kirsher address. Thus virt_to_bus/bus_to_virt are *everywhere*.
1511597885SJeff Kirsher
1611597885SJeff Kirsher 3) The implementation HP is using seems to be significantly pickier
1711597885SJeff Kirsher about when and how the command and RX units are started. some
1811597885SJeff Kirsher command ordering was changed.
1911597885SJeff Kirsher
2011597885SJeff Kirsher Examination of the mach driver leads one to believe that there
2111597885SJeff Kirsher might be a saner way to pull this off... anyone who feels like a
2211597885SJeff Kirsher full rewrite can be my guest.
2311597885SJeff Kirsher
2411597885SJeff Kirsher Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
2511597885SJeff Kirsher
2611597885SJeff Kirsher 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de)
2711597885SJeff Kirsher 03/02/2000 changes for better/correct(?) cache-flushing (deller)
2811597885SJeff Kirsher */
2911597885SJeff Kirsher
3011597885SJeff Kirsher /* 82596.c: A generic 82596 ethernet driver for linux. */
3111597885SJeff Kirsher /*
3211597885SJeff Kirsher Based on Apricot.c
3311597885SJeff Kirsher Written 1994 by Mark Evans.
3411597885SJeff Kirsher This driver is for the Apricot 82596 bus-master interface
3511597885SJeff Kirsher
3611597885SJeff Kirsher Modularised 12/94 Mark Evans
3711597885SJeff Kirsher
3811597885SJeff Kirsher
3911597885SJeff Kirsher Modified to support the 82596 ethernet chips on 680x0 VME boards.
4011597885SJeff Kirsher by Richard Hirst <richard@sleepie.demon.co.uk>
4111597885SJeff Kirsher Renamed to be 82596.c
4211597885SJeff Kirsher
4311597885SJeff Kirsher 980825: Changed to receive directly in to sk_buffs which are
4411597885SJeff Kirsher allocated at open() time. Eliminates copy on incoming frames
4511597885SJeff Kirsher (small ones are still copied). Shared data now held in a
4611597885SJeff Kirsher non-cached page, so we can run on 68060 in copyback mode.
4711597885SJeff Kirsher
4811597885SJeff Kirsher TBD:
4911597885SJeff Kirsher * look at deferring rx frames rather than discarding (as per tulip)
5011597885SJeff Kirsher * handle tx ring full as per tulip
5111597885SJeff Kirsher * performance test to tune rx_copybreak
5211597885SJeff Kirsher
5311597885SJeff Kirsher Most of my modifications relate to the braindead big-endian
5411597885SJeff Kirsher implementation by Intel. When the i596 is operating in
5511597885SJeff Kirsher 'big-endian' mode, it thinks a 32 bit value of 0x12345678
5611597885SJeff Kirsher should be stored as 0x56781234. This is a real pain, when
5711597885SJeff Kirsher you have linked lists which are shared by the 680x0 and the
5811597885SJeff Kirsher i596.
5911597885SJeff Kirsher
6011597885SJeff Kirsher Driver skeleton
6111597885SJeff Kirsher Written 1993 by Donald Becker.
6211597885SJeff Kirsher Copyright 1993 United States Government as represented by the Director,
63*9ac40d08SBagas Sanjaya National Security Agency.
6411597885SJeff Kirsher
6511597885SJeff Kirsher The author may be reached as becker@scyld.com, or C/O
6611597885SJeff Kirsher Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
6711597885SJeff Kirsher
6811597885SJeff Kirsher */
6911597885SJeff Kirsher
7011597885SJeff Kirsher #include <linux/module.h>
7111597885SJeff Kirsher #include <linux/kernel.h>
7211597885SJeff Kirsher #include <linux/string.h>
7311597885SJeff Kirsher #include <linux/errno.h>
7411597885SJeff Kirsher #include <linux/ioport.h>
7511597885SJeff Kirsher #include <linux/interrupt.h>
7611597885SJeff Kirsher #include <linux/delay.h>
7711597885SJeff Kirsher #include <linux/netdevice.h>
7811597885SJeff Kirsher #include <linux/etherdevice.h>
7911597885SJeff Kirsher #include <linux/skbuff.h>
8011597885SJeff Kirsher #include <linux/types.h>
8111597885SJeff Kirsher #include <linux/bitops.h>
8211597885SJeff Kirsher #include <linux/dma-mapping.h>
8311597885SJeff Kirsher #include <linux/io.h>
8411597885SJeff Kirsher #include <linux/irq.h>
8511597885SJeff Kirsher #include <linux/gfp.h>
8611597885SJeff Kirsher
8711597885SJeff Kirsher /* DEBUG flags
8811597885SJeff Kirsher */
8911597885SJeff Kirsher
9011597885SJeff Kirsher #define DEB_INIT 0x0001
9111597885SJeff Kirsher #define DEB_PROBE 0x0002
9211597885SJeff Kirsher #define DEB_SERIOUS 0x0004
9311597885SJeff Kirsher #define DEB_ERRORS 0x0008
9411597885SJeff Kirsher #define DEB_MULTI 0x0010
9511597885SJeff Kirsher #define DEB_TDR 0x0020
9611597885SJeff Kirsher #define DEB_OPEN 0x0040
9711597885SJeff Kirsher #define DEB_RESET 0x0080
9811597885SJeff Kirsher #define DEB_ADDCMD 0x0100
9911597885SJeff Kirsher #define DEB_STATUS 0x0200
10011597885SJeff Kirsher #define DEB_STARTTX 0x0400
10111597885SJeff Kirsher #define DEB_RXADDR 0x0800
10211597885SJeff Kirsher #define DEB_TXADDR 0x1000
10311597885SJeff Kirsher #define DEB_RXFRAME 0x2000
10411597885SJeff Kirsher #define DEB_INTS 0x4000
10511597885SJeff Kirsher #define DEB_STRUCT 0x8000
10611597885SJeff Kirsher #define DEB_ANY 0xffff
10711597885SJeff Kirsher
10811597885SJeff Kirsher
10911597885SJeff Kirsher #define DEB(x, y) if (i596_debug & (x)) { y; }
11011597885SJeff Kirsher
11111597885SJeff Kirsher
11211597885SJeff Kirsher /*
11311597885SJeff Kirsher * The MPU_PORT command allows direct access to the 82596. With PORT access
11411597885SJeff Kirsher * the following commands are available (p5-18). The 32-bit port command
11511597885SJeff Kirsher * must be word-swapped with the most significant word written first.
11611597885SJeff Kirsher * This only applies to VME boards.
11711597885SJeff Kirsher */
11811597885SJeff Kirsher #define PORT_RESET 0x00 /* reset 82596 */
11911597885SJeff Kirsher #define PORT_SELFTEST 0x01 /* selftest */
12011597885SJeff Kirsher #define PORT_ALTSCP 0x02 /* alternate SCB address */
12111597885SJeff Kirsher #define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
12211597885SJeff Kirsher
12311597885SJeff Kirsher static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
12411597885SJeff Kirsher
12511597885SJeff Kirsher /* Copy frames shorter than rx_copybreak, otherwise pass on up in
12611597885SJeff Kirsher * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
12711597885SJeff Kirsher */
12811597885SJeff Kirsher static int rx_copybreak = 100;
12911597885SJeff Kirsher
13011597885SJeff Kirsher #define PKT_BUF_SZ 1536
13111597885SJeff Kirsher #define MAX_MC_CNT 64
13211597885SJeff Kirsher
13311597885SJeff Kirsher #define ISCP_BUSY 0x0001
13411597885SJeff Kirsher
13511597885SJeff Kirsher #define I596_NULL ((u32)0xffffffff)
13611597885SJeff Kirsher
13711597885SJeff Kirsher #define CMD_EOL 0x8000 /* The last command of the list, stop. */
13811597885SJeff Kirsher #define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
13911597885SJeff Kirsher #define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
14011597885SJeff Kirsher
14111597885SJeff Kirsher #define CMD_FLEX 0x0008 /* Enable flexible memory model */
14211597885SJeff Kirsher
14311597885SJeff Kirsher enum commands {
14411597885SJeff Kirsher CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
14511597885SJeff Kirsher CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
14611597885SJeff Kirsher };
14711597885SJeff Kirsher
14811597885SJeff Kirsher #define STAT_C 0x8000 /* Set to 0 after execution */
14911597885SJeff Kirsher #define STAT_B 0x4000 /* Command being executed */
15011597885SJeff Kirsher #define STAT_OK 0x2000 /* Command executed ok */
15111597885SJeff Kirsher #define STAT_A 0x1000 /* Command aborted */
15211597885SJeff Kirsher
15311597885SJeff Kirsher #define CUC_START 0x0100
15411597885SJeff Kirsher #define CUC_RESUME 0x0200
15511597885SJeff Kirsher #define CUC_SUSPEND 0x0300
15611597885SJeff Kirsher #define CUC_ABORT 0x0400
15711597885SJeff Kirsher #define RX_START 0x0010
15811597885SJeff Kirsher #define RX_RESUME 0x0020
15911597885SJeff Kirsher #define RX_SUSPEND 0x0030
16011597885SJeff Kirsher #define RX_ABORT 0x0040
16111597885SJeff Kirsher
16211597885SJeff Kirsher #define TX_TIMEOUT (HZ/20)
16311597885SJeff Kirsher
16411597885SJeff Kirsher
16511597885SJeff Kirsher struct i596_reg {
16611597885SJeff Kirsher unsigned short porthi;
16711597885SJeff Kirsher unsigned short portlo;
16811597885SJeff Kirsher u32 ca;
16911597885SJeff Kirsher };
17011597885SJeff Kirsher
17111597885SJeff Kirsher #define EOF 0x8000
17211597885SJeff Kirsher #define SIZE_MASK 0x3fff
17311597885SJeff Kirsher
17411597885SJeff Kirsher struct i596_tbd {
17511597885SJeff Kirsher unsigned short size;
17611597885SJeff Kirsher unsigned short pad;
17711597885SJeff Kirsher u32 next;
17811597885SJeff Kirsher u32 data;
17911597885SJeff Kirsher u32 cache_pad[5]; /* Total 32 bytes... */
18011597885SJeff Kirsher };
18111597885SJeff Kirsher
18211597885SJeff Kirsher /* The command structure has two 'next' pointers; v_next is the address of
18311597885SJeff Kirsher * the next command as seen by the CPU, b_next is the address of the next
18411597885SJeff Kirsher * command as seen by the 82596. The b_next pointer, as used by the 82596
18511597885SJeff Kirsher * always references the status field of the next command, rather than the
18611597885SJeff Kirsher * v_next field, because the 82596 is unaware of v_next. It may seem more
18711597885SJeff Kirsher * logical to put v_next at the end of the structure, but we cannot do that
18811597885SJeff Kirsher * because the 82596 expects other fields to be there, depending on command
18911597885SJeff Kirsher * type.
19011597885SJeff Kirsher */
19111597885SJeff Kirsher
19211597885SJeff Kirsher struct i596_cmd {
19311597885SJeff Kirsher struct i596_cmd *v_next; /* Address from CPUs viewpoint */
19411597885SJeff Kirsher unsigned short status;
19511597885SJeff Kirsher unsigned short command;
19611597885SJeff Kirsher u32 b_next; /* Address from i596 viewpoint */
19711597885SJeff Kirsher };
19811597885SJeff Kirsher
19911597885SJeff Kirsher struct tx_cmd {
20011597885SJeff Kirsher struct i596_cmd cmd;
20111597885SJeff Kirsher u32 tbd;
20211597885SJeff Kirsher unsigned short size;
20311597885SJeff Kirsher unsigned short pad;
20411597885SJeff Kirsher struct sk_buff *skb; /* So we can free it after tx */
20511597885SJeff Kirsher dma_addr_t dma_addr;
20611597885SJeff Kirsher #ifdef __LP64__
20711597885SJeff Kirsher u32 cache_pad[6]; /* Total 64 bytes... */
20811597885SJeff Kirsher #else
20911597885SJeff Kirsher u32 cache_pad[1]; /* Total 32 bytes... */
21011597885SJeff Kirsher #endif
21111597885SJeff Kirsher };
21211597885SJeff Kirsher
21311597885SJeff Kirsher struct tdr_cmd {
21411597885SJeff Kirsher struct i596_cmd cmd;
21511597885SJeff Kirsher unsigned short status;
21611597885SJeff Kirsher unsigned short pad;
21711597885SJeff Kirsher };
21811597885SJeff Kirsher
21911597885SJeff Kirsher struct mc_cmd {
22011597885SJeff Kirsher struct i596_cmd cmd;
22111597885SJeff Kirsher short mc_cnt;
22211597885SJeff Kirsher char mc_addrs[MAX_MC_CNT*6];
22311597885SJeff Kirsher };
22411597885SJeff Kirsher
22511597885SJeff Kirsher struct sa_cmd {
22611597885SJeff Kirsher struct i596_cmd cmd;
22711597885SJeff Kirsher char eth_addr[8];
22811597885SJeff Kirsher };
22911597885SJeff Kirsher
23011597885SJeff Kirsher struct cf_cmd {
23111597885SJeff Kirsher struct i596_cmd cmd;
23211597885SJeff Kirsher char i596_config[16];
23311597885SJeff Kirsher };
23411597885SJeff Kirsher
23511597885SJeff Kirsher struct i596_rfd {
23611597885SJeff Kirsher unsigned short stat;
23711597885SJeff Kirsher unsigned short cmd;
23811597885SJeff Kirsher u32 b_next; /* Address from i596 viewpoint */
23911597885SJeff Kirsher u32 rbd;
24011597885SJeff Kirsher unsigned short count;
24111597885SJeff Kirsher unsigned short size;
24211597885SJeff Kirsher struct i596_rfd *v_next; /* Address from CPUs viewpoint */
24311597885SJeff Kirsher struct i596_rfd *v_prev;
24411597885SJeff Kirsher #ifndef __LP64__
24511597885SJeff Kirsher u32 cache_pad[2]; /* Total 32 bytes... */
24611597885SJeff Kirsher #endif
24711597885SJeff Kirsher };
24811597885SJeff Kirsher
24911597885SJeff Kirsher struct i596_rbd {
25011597885SJeff Kirsher /* hardware data */
25111597885SJeff Kirsher unsigned short count;
25211597885SJeff Kirsher unsigned short zero1;
25311597885SJeff Kirsher u32 b_next;
25411597885SJeff Kirsher u32 b_data; /* Address from i596 viewpoint */
25511597885SJeff Kirsher unsigned short size;
25611597885SJeff Kirsher unsigned short zero2;
25711597885SJeff Kirsher /* driver data */
25811597885SJeff Kirsher struct sk_buff *skb;
25911597885SJeff Kirsher struct i596_rbd *v_next;
26011597885SJeff Kirsher u32 b_addr; /* This rbd addr from i596 view */
26111597885SJeff Kirsher unsigned char *v_data; /* Address from CPUs viewpoint */
26211597885SJeff Kirsher /* Total 32 bytes... */
26311597885SJeff Kirsher #ifdef __LP64__
26411597885SJeff Kirsher u32 cache_pad[4];
26511597885SJeff Kirsher #endif
26611597885SJeff Kirsher };
26711597885SJeff Kirsher
26811597885SJeff Kirsher /* These values as chosen so struct i596_dma fits in one page... */
26911597885SJeff Kirsher
27011597885SJeff Kirsher #define TX_RING_SIZE 32
27111597885SJeff Kirsher #define RX_RING_SIZE 16
27211597885SJeff Kirsher
27311597885SJeff Kirsher struct i596_scb {
27411597885SJeff Kirsher unsigned short status;
27511597885SJeff Kirsher unsigned short command;
27611597885SJeff Kirsher u32 cmd;
27711597885SJeff Kirsher u32 rfd;
27811597885SJeff Kirsher u32 crc_err;
27911597885SJeff Kirsher u32 align_err;
28011597885SJeff Kirsher u32 resource_err;
28111597885SJeff Kirsher u32 over_err;
28211597885SJeff Kirsher u32 rcvdt_err;
28311597885SJeff Kirsher u32 short_err;
28411597885SJeff Kirsher unsigned short t_on;
28511597885SJeff Kirsher unsigned short t_off;
28611597885SJeff Kirsher };
28711597885SJeff Kirsher
28811597885SJeff Kirsher struct i596_iscp {
28911597885SJeff Kirsher u32 stat;
29011597885SJeff Kirsher u32 scb;
29111597885SJeff Kirsher };
29211597885SJeff Kirsher
29311597885SJeff Kirsher struct i596_scp {
29411597885SJeff Kirsher u32 sysbus;
29511597885SJeff Kirsher u32 pad;
29611597885SJeff Kirsher u32 iscp;
29711597885SJeff Kirsher };
29811597885SJeff Kirsher
29911597885SJeff Kirsher struct i596_dma {
30011597885SJeff Kirsher struct i596_scp scp __attribute__((aligned(32)));
30111597885SJeff Kirsher volatile struct i596_iscp iscp __attribute__((aligned(32)));
30211597885SJeff Kirsher volatile struct i596_scb scb __attribute__((aligned(32)));
30311597885SJeff Kirsher struct sa_cmd sa_cmd __attribute__((aligned(32)));
30411597885SJeff Kirsher struct cf_cmd cf_cmd __attribute__((aligned(32)));
30511597885SJeff Kirsher struct tdr_cmd tdr_cmd __attribute__((aligned(32)));
30611597885SJeff Kirsher struct mc_cmd mc_cmd __attribute__((aligned(32)));
30711597885SJeff Kirsher struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32)));
30811597885SJeff Kirsher struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32)));
30911597885SJeff Kirsher struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32)));
31011597885SJeff Kirsher struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32)));
31111597885SJeff Kirsher };
31211597885SJeff Kirsher
31311597885SJeff Kirsher struct i596_private {
31411597885SJeff Kirsher struct i596_dma *dma;
31511597885SJeff Kirsher u32 stat;
31611597885SJeff Kirsher int last_restart;
31711597885SJeff Kirsher struct i596_rfd *rfd_head;
31811597885SJeff Kirsher struct i596_rbd *rbd_head;
31911597885SJeff Kirsher struct i596_cmd *cmd_tail;
32011597885SJeff Kirsher struct i596_cmd *cmd_head;
32111597885SJeff Kirsher int cmd_backlog;
32211597885SJeff Kirsher u32 last_cmd;
32311597885SJeff Kirsher int next_tx_cmd;
32411597885SJeff Kirsher int options;
32511597885SJeff Kirsher spinlock_t lock; /* serialize access to chip */
32611597885SJeff Kirsher dma_addr_t dma_addr;
32711597885SJeff Kirsher void __iomem *mpu_port;
32811597885SJeff Kirsher void __iomem *ca;
32911597885SJeff Kirsher };
33011597885SJeff Kirsher
33111597885SJeff Kirsher static const char init_setup[] =
33211597885SJeff Kirsher {
33311597885SJeff Kirsher 0x8E, /* length, prefetch on */
33411597885SJeff Kirsher 0xC8, /* fifo to 8, monitor off */
33511597885SJeff Kirsher 0x80, /* don't save bad frames */
33611597885SJeff Kirsher 0x2E, /* No source address insertion, 8 byte preamble */
33711597885SJeff Kirsher 0x00, /* priority and backoff defaults */
33811597885SJeff Kirsher 0x60, /* interframe spacing */
33911597885SJeff Kirsher 0x00, /* slot time LSB */
34011597885SJeff Kirsher 0xf2, /* slot time and retries */
34111597885SJeff Kirsher 0x00, /* promiscuous mode */
34211597885SJeff Kirsher 0x00, /* collision detect */
34311597885SJeff Kirsher 0x40, /* minimum frame length */
34411597885SJeff Kirsher 0xff,
34511597885SJeff Kirsher 0x00,
34611597885SJeff Kirsher 0x7f /* *multi IA */ };
34711597885SJeff Kirsher
34811597885SJeff Kirsher static int i596_open(struct net_device *dev);
349648c361aSYueHaibing static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
35011597885SJeff Kirsher static irqreturn_t i596_interrupt(int irq, void *dev_id);
35111597885SJeff Kirsher static int i596_close(struct net_device *dev);
35211597885SJeff Kirsher static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
3530290bd29SMichael S. Tsirkin static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue);
35411597885SJeff Kirsher static void print_eth(unsigned char *buf, char *str);
35511597885SJeff Kirsher static void set_multicast_list(struct net_device *dev);
35611597885SJeff Kirsher static inline void ca(struct net_device *dev);
35711597885SJeff Kirsher static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
35811597885SJeff Kirsher
35911597885SJeff Kirsher static int rx_ring_size = RX_RING_SIZE;
36011597885SJeff Kirsher static int ticks_limit = 100;
36111597885SJeff Kirsher static int max_cmd_backlog = TX_RING_SIZE-1;
36211597885SJeff Kirsher
36311597885SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
36411597885SJeff Kirsher static void i596_poll_controller(struct net_device *dev);
36511597885SJeff Kirsher #endif
36611597885SJeff Kirsher
virt_to_dma(struct i596_private * lp,volatile void * v)36700718b23SChristoph Hellwig static inline dma_addr_t virt_to_dma(struct i596_private *lp, volatile void *v)
36800718b23SChristoph Hellwig {
36900718b23SChristoph Hellwig return lp->dma_addr + ((unsigned long)v - (unsigned long)lp->dma);
37000718b23SChristoph Hellwig }
37100718b23SChristoph Hellwig
37200718b23SChristoph Hellwig #ifdef NONCOHERENT_DMA
dma_sync_dev(struct net_device * ndev,volatile void * addr,size_t len)37300718b23SChristoph Hellwig static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
37400718b23SChristoph Hellwig size_t len)
37500718b23SChristoph Hellwig {
37600718b23SChristoph Hellwig dma_sync_single_for_device(ndev->dev.parent,
37700718b23SChristoph Hellwig virt_to_dma(netdev_priv(ndev), addr), len,
37800718b23SChristoph Hellwig DMA_BIDIRECTIONAL);
37900718b23SChristoph Hellwig }
38000718b23SChristoph Hellwig
dma_sync_cpu(struct net_device * ndev,volatile void * addr,size_t len)38100718b23SChristoph Hellwig static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
38200718b23SChristoph Hellwig size_t len)
38300718b23SChristoph Hellwig {
38400718b23SChristoph Hellwig dma_sync_single_for_cpu(ndev->dev.parent,
38500718b23SChristoph Hellwig virt_to_dma(netdev_priv(ndev), addr), len,
38600718b23SChristoph Hellwig DMA_BIDIRECTIONAL);
38700718b23SChristoph Hellwig }
38800718b23SChristoph Hellwig #else
dma_sync_dev(struct net_device * ndev,volatile void * addr,size_t len)38900718b23SChristoph Hellwig static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
39000718b23SChristoph Hellwig size_t len)
39100718b23SChristoph Hellwig {
39200718b23SChristoph Hellwig }
dma_sync_cpu(struct net_device * ndev,volatile void * addr,size_t len)39300718b23SChristoph Hellwig static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
39400718b23SChristoph Hellwig size_t len)
39500718b23SChristoph Hellwig {
39600718b23SChristoph Hellwig }
39700718b23SChristoph Hellwig #endif /* NONCOHERENT_DMA */
39811597885SJeff Kirsher
wait_istat(struct net_device * dev,struct i596_dma * dma,int delcnt,char * str)39911597885SJeff Kirsher static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
40011597885SJeff Kirsher {
40100718b23SChristoph Hellwig dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
40211597885SJeff Kirsher while (--delcnt && dma->iscp.stat) {
40311597885SJeff Kirsher udelay(10);
40400718b23SChristoph Hellwig dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp));
40511597885SJeff Kirsher }
40611597885SJeff Kirsher if (!delcnt) {
40711597885SJeff Kirsher printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
40811597885SJeff Kirsher dev->name, str, SWAP16(dma->iscp.stat));
40911597885SJeff Kirsher return -1;
41011597885SJeff Kirsher } else
41111597885SJeff Kirsher return 0;
41211597885SJeff Kirsher }
41311597885SJeff Kirsher
41411597885SJeff Kirsher
wait_cmd(struct net_device * dev,struct i596_dma * dma,int delcnt,char * str)41511597885SJeff Kirsher static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
41611597885SJeff Kirsher {
41700718b23SChristoph Hellwig dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
41811597885SJeff Kirsher while (--delcnt && dma->scb.command) {
41911597885SJeff Kirsher udelay(10);
42000718b23SChristoph Hellwig dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb));
42111597885SJeff Kirsher }
42211597885SJeff Kirsher if (!delcnt) {
42311597885SJeff Kirsher printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
42411597885SJeff Kirsher dev->name, str,
42511597885SJeff Kirsher SWAP16(dma->scb.status),
42611597885SJeff Kirsher SWAP16(dma->scb.command));
42711597885SJeff Kirsher return -1;
42811597885SJeff Kirsher } else
42911597885SJeff Kirsher return 0;
43011597885SJeff Kirsher }
43111597885SJeff Kirsher
43211597885SJeff Kirsher
i596_display_data(struct net_device * dev)43311597885SJeff Kirsher static void i596_display_data(struct net_device *dev)
43411597885SJeff Kirsher {
43511597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev);
43611597885SJeff Kirsher struct i596_dma *dma = lp->dma;
43711597885SJeff Kirsher struct i596_cmd *cmd;
43811597885SJeff Kirsher struct i596_rfd *rfd;
43911597885SJeff Kirsher struct i596_rbd *rbd;
44011597885SJeff Kirsher
44111597885SJeff Kirsher printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
44211597885SJeff Kirsher &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
44311597885SJeff Kirsher printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
44411597885SJeff Kirsher &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
44511597885SJeff Kirsher printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
44611597885SJeff Kirsher " .cmd = %08x, .rfd = %08x\n",
44711597885SJeff Kirsher &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
44811597885SJeff Kirsher SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
44911597885SJeff Kirsher printk(KERN_DEBUG " errors: crc %x, align %x, resource %x,"
45011597885SJeff Kirsher " over %x, rcvdt %x, short %x\n",
45111597885SJeff Kirsher SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
45211597885SJeff Kirsher SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
45311597885SJeff Kirsher SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
45411597885SJeff Kirsher cmd = lp->cmd_head;
45511597885SJeff Kirsher while (cmd != NULL) {
45611597885SJeff Kirsher printk(KERN_DEBUG
45711597885SJeff Kirsher "cmd at %p, .status = %04x, .command = %04x,"
45811597885SJeff Kirsher " .b_next = %08x\n",
45911597885SJeff Kirsher cmd, SWAP16(cmd->status), SWAP16(cmd->command),
46011597885SJeff Kirsher SWAP32(cmd->b_next));
46111597885SJeff Kirsher cmd = cmd->v_next;
46211597885SJeff Kirsher }
46311597885SJeff Kirsher rfd = lp->rfd_head;
46411597885SJeff Kirsher printk(KERN_DEBUG "rfd_head = %p\n", rfd);
46511597885SJeff Kirsher do {
46611597885SJeff Kirsher printk(KERN_DEBUG
46711597885SJeff Kirsher " %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
46811597885SJeff Kirsher " count %04x\n",
46911597885SJeff Kirsher rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
47011597885SJeff Kirsher SWAP32(rfd->b_next), SWAP32(rfd->rbd),
47111597885SJeff Kirsher SWAP16(rfd->count));
47211597885SJeff Kirsher rfd = rfd->v_next;
47311597885SJeff Kirsher } while (rfd != lp->rfd_head);
47411597885SJeff Kirsher rbd = lp->rbd_head;
47511597885SJeff Kirsher printk(KERN_DEBUG "rbd_head = %p\n", rbd);
47611597885SJeff Kirsher do {
47711597885SJeff Kirsher printk(KERN_DEBUG
47811597885SJeff Kirsher " %p .count %04x, b_next %08x, b_data %08x,"
47911597885SJeff Kirsher " size %04x\n",
48011597885SJeff Kirsher rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
48111597885SJeff Kirsher SWAP32(rbd->b_data), SWAP16(rbd->size));
48211597885SJeff Kirsher rbd = rbd->v_next;
48311597885SJeff Kirsher } while (rbd != lp->rbd_head);
48400718b23SChristoph Hellwig dma_sync_cpu(dev, dma, sizeof(struct i596_dma));
48511597885SJeff Kirsher }
48611597885SJeff Kirsher
init_rx_bufs(struct net_device * dev)48711597885SJeff Kirsher static inline int init_rx_bufs(struct net_device *dev)
48811597885SJeff Kirsher {
48911597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev);
49011597885SJeff Kirsher struct i596_dma *dma = lp->dma;
49111597885SJeff Kirsher int i;
49211597885SJeff Kirsher struct i596_rfd *rfd;
49311597885SJeff Kirsher struct i596_rbd *rbd;
49411597885SJeff Kirsher
49511597885SJeff Kirsher /* First build the Receive Buffer Descriptor List */
49611597885SJeff Kirsher
49711597885SJeff Kirsher for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
49811597885SJeff Kirsher dma_addr_t dma_addr;
49911597885SJeff Kirsher struct sk_buff *skb;
50011597885SJeff Kirsher
50111597885SJeff Kirsher skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
50211597885SJeff Kirsher if (skb == NULL)
50311597885SJeff Kirsher return -1;
50411597885SJeff Kirsher dma_addr = dma_map_single(dev->dev.parent, skb->data,
50511597885SJeff Kirsher PKT_BUF_SZ, DMA_FROM_DEVICE);
50611597885SJeff Kirsher rbd->v_next = rbd+1;
50711597885SJeff Kirsher rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
50811597885SJeff Kirsher rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
50911597885SJeff Kirsher rbd->skb = skb;
51011597885SJeff Kirsher rbd->v_data = skb->data;
51111597885SJeff Kirsher rbd->b_data = SWAP32(dma_addr);
51211597885SJeff Kirsher rbd->size = SWAP16(PKT_BUF_SZ);
51311597885SJeff Kirsher }
51411597885SJeff Kirsher lp->rbd_head = dma->rbds;
51511597885SJeff Kirsher rbd = dma->rbds + rx_ring_size - 1;
51611597885SJeff Kirsher rbd->v_next = dma->rbds;
51711597885SJeff Kirsher rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
51811597885SJeff Kirsher
51911597885SJeff Kirsher /* Now build the Receive Frame Descriptor List */
52011597885SJeff Kirsher
52111597885SJeff Kirsher for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
52211597885SJeff Kirsher rfd->rbd = I596_NULL;
52311597885SJeff Kirsher rfd->v_next = rfd+1;
52411597885SJeff Kirsher rfd->v_prev = rfd-1;
52511597885SJeff Kirsher rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
52611597885SJeff Kirsher rfd->cmd = SWAP16(CMD_FLEX);
52711597885SJeff Kirsher }
52811597885SJeff Kirsher lp->rfd_head = dma->rfds;
52911597885SJeff Kirsher dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
53011597885SJeff Kirsher rfd = dma->rfds;
53111597885SJeff Kirsher rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
53211597885SJeff Kirsher rfd->v_prev = dma->rfds + rx_ring_size - 1;
53311597885SJeff Kirsher rfd = dma->rfds + rx_ring_size - 1;
53411597885SJeff Kirsher rfd->v_next = dma->rfds;
53511597885SJeff Kirsher rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
53611597885SJeff Kirsher rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
53711597885SJeff Kirsher
53800718b23SChristoph Hellwig dma_sync_dev(dev, dma, sizeof(struct i596_dma));
53911597885SJeff Kirsher return 0;
54011597885SJeff Kirsher }
54111597885SJeff Kirsher
remove_rx_bufs(struct net_device * dev)54211597885SJeff Kirsher static inline void remove_rx_bufs(struct net_device *dev)
54311597885SJeff Kirsher {
54411597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev);
54511597885SJeff Kirsher struct i596_rbd *rbd;
54611597885SJeff Kirsher int i;
54711597885SJeff Kirsher
54811597885SJeff Kirsher for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
54911597885SJeff Kirsher if (rbd->skb == NULL)
55011597885SJeff Kirsher break;
55111597885SJeff Kirsher dma_unmap_single(dev->dev.parent,
55211597885SJeff Kirsher (dma_addr_t)SWAP32(rbd->b_data),
55311597885SJeff Kirsher PKT_BUF_SZ, DMA_FROM_DEVICE);
55411597885SJeff Kirsher dev_kfree_skb(rbd->skb);
55511597885SJeff Kirsher }
55611597885SJeff Kirsher }
55711597885SJeff Kirsher
55811597885SJeff Kirsher
rebuild_rx_bufs(struct net_device * dev)55911597885SJeff Kirsher static void rebuild_rx_bufs(struct net_device *dev)
56011597885SJeff Kirsher {
56111597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev);
56211597885SJeff Kirsher struct i596_dma *dma = lp->dma;
56311597885SJeff Kirsher int i;
56411597885SJeff Kirsher
56511597885SJeff Kirsher /* Ensure rx frame/buffer descriptors are tidy */
56611597885SJeff Kirsher
56711597885SJeff Kirsher for (i = 0; i < rx_ring_size; i++) {
56811597885SJeff Kirsher dma->rfds[i].rbd = I596_NULL;
56911597885SJeff Kirsher dma->rfds[i].cmd = SWAP16(CMD_FLEX);
57011597885SJeff Kirsher }
57111597885SJeff Kirsher dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
57211597885SJeff Kirsher lp->rfd_head = dma->rfds;
57311597885SJeff Kirsher dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
57411597885SJeff Kirsher lp->rbd_head = dma->rbds;
57511597885SJeff Kirsher dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
57611597885SJeff Kirsher
57700718b23SChristoph Hellwig dma_sync_dev(dev, dma, sizeof(struct i596_dma));
57811597885SJeff Kirsher }
57911597885SJeff Kirsher
58011597885SJeff Kirsher
init_i596_mem(struct net_device * dev)58111597885SJeff Kirsher static int init_i596_mem(struct net_device *dev)
58211597885SJeff Kirsher {
58311597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev);
58411597885SJeff Kirsher struct i596_dma *dma = lp->dma;
58511597885SJeff Kirsher unsigned long flags;
58611597885SJeff Kirsher
58711597885SJeff Kirsher mpu_port(dev, PORT_RESET, 0);
58811597885SJeff Kirsher udelay(100); /* Wait 100us - seems to help */
58911597885SJeff Kirsher
59011597885SJeff Kirsher /* change the scp address */
59111597885SJeff Kirsher
59211597885SJeff Kirsher lp->last_cmd = jiffies;
59311597885SJeff Kirsher
59411597885SJeff Kirsher dma->scp.sysbus = SYSBUS;
59511597885SJeff Kirsher dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
59611597885SJeff Kirsher dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
59711597885SJeff Kirsher dma->iscp.stat = SWAP32(ISCP_BUSY);
59811597885SJeff Kirsher lp->cmd_backlog = 0;
59911597885SJeff Kirsher
60011597885SJeff Kirsher lp->cmd_head = NULL;
60111597885SJeff Kirsher dma->scb.cmd = I596_NULL;
60211597885SJeff Kirsher
60311597885SJeff Kirsher DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
60411597885SJeff Kirsher
60500718b23SChristoph Hellwig dma_sync_dev(dev, &(dma->scp), sizeof(struct i596_scp));
60600718b23SChristoph Hellwig dma_sync_dev(dev, &(dma->iscp), sizeof(struct i596_iscp));
60700718b23SChristoph Hellwig dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
60811597885SJeff Kirsher
60911597885SJeff Kirsher mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
61011597885SJeff Kirsher ca(dev);
61111597885SJeff Kirsher if (wait_istat(dev, dma, 1000, "initialization timed out"))
61211597885SJeff Kirsher goto failed;
61311597885SJeff Kirsher DEB(DEB_INIT, printk(KERN_DEBUG
61411597885SJeff Kirsher "%s: i82596 initialization successful\n",
61511597885SJeff Kirsher dev->name));
61611597885SJeff Kirsher
61711597885SJeff Kirsher if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
61811597885SJeff Kirsher printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
61911597885SJeff Kirsher goto failed;
62011597885SJeff Kirsher }
62111597885SJeff Kirsher
62211597885SJeff Kirsher /* Ensure rx frame/buffer descriptors are tidy */
62311597885SJeff Kirsher rebuild_rx_bufs(dev);
62411597885SJeff Kirsher
62511597885SJeff Kirsher dma->scb.command = 0;
62600718b23SChristoph Hellwig dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
62711597885SJeff Kirsher
62811597885SJeff Kirsher DEB(DEB_INIT, printk(KERN_DEBUG
62911597885SJeff Kirsher "%s: queuing CmdConfigure\n", dev->name));
63011597885SJeff Kirsher memcpy(dma->cf_cmd.i596_config, init_setup, 14);
63111597885SJeff Kirsher dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
63200718b23SChristoph Hellwig dma_sync_dev(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
63311597885SJeff Kirsher i596_add_cmd(dev, &dma->cf_cmd.cmd);
63411597885SJeff Kirsher
63511597885SJeff Kirsher DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
636d458cdf7SJoe Perches memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
63711597885SJeff Kirsher dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
63800718b23SChristoph Hellwig dma_sync_dev(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
63911597885SJeff Kirsher i596_add_cmd(dev, &dma->sa_cmd.cmd);
64011597885SJeff Kirsher
64111597885SJeff Kirsher DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
64211597885SJeff Kirsher dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
64300718b23SChristoph Hellwig dma_sync_dev(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
64411597885SJeff Kirsher i596_add_cmd(dev, &dma->tdr_cmd.cmd);
64511597885SJeff Kirsher
64611597885SJeff Kirsher spin_lock_irqsave (&lp->lock, flags);
64711597885SJeff Kirsher
64811597885SJeff Kirsher if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
64911597885SJeff Kirsher spin_unlock_irqrestore (&lp->lock, flags);
65011597885SJeff Kirsher goto failed_free_irq;
65111597885SJeff Kirsher }
65211597885SJeff Kirsher DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
65311597885SJeff Kirsher dma->scb.command = SWAP16(RX_START);
65411597885SJeff Kirsher dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
65500718b23SChristoph Hellwig dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
65611597885SJeff Kirsher
65711597885SJeff Kirsher ca(dev);
65811597885SJeff Kirsher
65911597885SJeff Kirsher spin_unlock_irqrestore (&lp->lock, flags);
66011597885SJeff Kirsher if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
66111597885SJeff Kirsher goto failed_free_irq;
66211597885SJeff Kirsher DEB(DEB_INIT, printk(KERN_DEBUG
66311597885SJeff Kirsher "%s: Receive unit started OK\n", dev->name));
66411597885SJeff Kirsher return 0;
66511597885SJeff Kirsher
66611597885SJeff Kirsher failed_free_irq:
66711597885SJeff Kirsher free_irq(dev->irq, dev);
66811597885SJeff Kirsher failed:
66911597885SJeff Kirsher printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
67011597885SJeff Kirsher mpu_port(dev, PORT_RESET, 0);
67111597885SJeff Kirsher return -1;
67211597885SJeff Kirsher }
67311597885SJeff Kirsher
67411597885SJeff Kirsher
i596_rx(struct net_device * dev)67511597885SJeff Kirsher static inline int i596_rx(struct net_device *dev)
67611597885SJeff Kirsher {
67711597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev);
67811597885SJeff Kirsher struct i596_rfd *rfd;
67911597885SJeff Kirsher struct i596_rbd *rbd;
68011597885SJeff Kirsher int frames = 0;
68111597885SJeff Kirsher
68211597885SJeff Kirsher DEB(DEB_RXFRAME, printk(KERN_DEBUG
68311597885SJeff Kirsher "i596_rx(), rfd_head %p, rbd_head %p\n",
68411597885SJeff Kirsher lp->rfd_head, lp->rbd_head));
68511597885SJeff Kirsher
68611597885SJeff Kirsher
68711597885SJeff Kirsher rfd = lp->rfd_head; /* Ref next frame to check */
68811597885SJeff Kirsher
68900718b23SChristoph Hellwig dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
69011597885SJeff Kirsher while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */
69111597885SJeff Kirsher if (rfd->rbd == I596_NULL)
69211597885SJeff Kirsher rbd = NULL;
69311597885SJeff Kirsher else if (rfd->rbd == lp->rbd_head->b_addr) {
69411597885SJeff Kirsher rbd = lp->rbd_head;
69500718b23SChristoph Hellwig dma_sync_cpu(dev, rbd, sizeof(struct i596_rbd));
69611597885SJeff Kirsher } else {
69711597885SJeff Kirsher printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
69811597885SJeff Kirsher /* XXX Now what? */
69911597885SJeff Kirsher rbd = NULL;
70011597885SJeff Kirsher }
70111597885SJeff Kirsher DEB(DEB_RXFRAME, printk(KERN_DEBUG
70211597885SJeff Kirsher " rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
70311597885SJeff Kirsher rfd, rfd->rbd, rfd->stat));
70411597885SJeff Kirsher
70511597885SJeff Kirsher if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
70611597885SJeff Kirsher /* a good frame */
70711597885SJeff Kirsher int pkt_len = SWAP16(rbd->count) & 0x3fff;
70811597885SJeff Kirsher struct sk_buff *skb = rbd->skb;
70911597885SJeff Kirsher int rx_in_place = 0;
71011597885SJeff Kirsher
71111597885SJeff Kirsher DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
71211597885SJeff Kirsher frames++;
71311597885SJeff Kirsher
71411597885SJeff Kirsher /* Check if the packet is long enough to just accept
71511597885SJeff Kirsher * without copying to a properly sized skbuff.
71611597885SJeff Kirsher */
71711597885SJeff Kirsher
71811597885SJeff Kirsher if (pkt_len > rx_copybreak) {
71911597885SJeff Kirsher struct sk_buff *newskb;
72011597885SJeff Kirsher dma_addr_t dma_addr;
72111597885SJeff Kirsher
72211597885SJeff Kirsher dma_unmap_single(dev->dev.parent,
72311597885SJeff Kirsher (dma_addr_t)SWAP32(rbd->b_data),
72411597885SJeff Kirsher PKT_BUF_SZ, DMA_FROM_DEVICE);
72511597885SJeff Kirsher /* Get fresh skbuff to replace filled one. */
72611597885SJeff Kirsher newskb = netdev_alloc_skb_ip_align(dev,
72711597885SJeff Kirsher PKT_BUF_SZ);
72811597885SJeff Kirsher if (newskb == NULL) {
72911597885SJeff Kirsher skb = NULL; /* drop pkt */
73011597885SJeff Kirsher goto memory_squeeze;
73111597885SJeff Kirsher }
73211597885SJeff Kirsher
73311597885SJeff Kirsher /* Pass up the skb already on the Rx ring. */
73411597885SJeff Kirsher skb_put(skb, pkt_len);
73511597885SJeff Kirsher rx_in_place = 1;
73611597885SJeff Kirsher rbd->skb = newskb;
73711597885SJeff Kirsher dma_addr = dma_map_single(dev->dev.parent,
73811597885SJeff Kirsher newskb->data,
73911597885SJeff Kirsher PKT_BUF_SZ,
74011597885SJeff Kirsher DMA_FROM_DEVICE);
74111597885SJeff Kirsher rbd->v_data = newskb->data;
74211597885SJeff Kirsher rbd->b_data = SWAP32(dma_addr);
74300718b23SChristoph Hellwig dma_sync_dev(dev, rbd, sizeof(struct i596_rbd));
744720a43efSJoe Perches } else {
74511597885SJeff Kirsher skb = netdev_alloc_skb_ip_align(dev, pkt_len);
746720a43efSJoe Perches }
74711597885SJeff Kirsher memory_squeeze:
74811597885SJeff Kirsher if (skb == NULL) {
74911597885SJeff Kirsher /* XXX tulip.c can defer packets here!! */
75011597885SJeff Kirsher dev->stats.rx_dropped++;
75111597885SJeff Kirsher } else {
75211597885SJeff Kirsher if (!rx_in_place) {
75311597885SJeff Kirsher /* 16 byte align the data fields */
75411597885SJeff Kirsher dma_sync_single_for_cpu(dev->dev.parent,
75511597885SJeff Kirsher (dma_addr_t)SWAP32(rbd->b_data),
75611597885SJeff Kirsher PKT_BUF_SZ, DMA_FROM_DEVICE);
75759ae1d12SJohannes Berg skb_put_data(skb, rbd->v_data,
75859ae1d12SJohannes Berg pkt_len);
75911597885SJeff Kirsher dma_sync_single_for_device(dev->dev.parent,
76011597885SJeff Kirsher (dma_addr_t)SWAP32(rbd->b_data),
76111597885SJeff Kirsher PKT_BUF_SZ, DMA_FROM_DEVICE);
76211597885SJeff Kirsher }
76311597885SJeff Kirsher skb->len = pkt_len;
76411597885SJeff Kirsher skb->protocol = eth_type_trans(skb, dev);
76511597885SJeff Kirsher netif_rx(skb);
76611597885SJeff Kirsher dev->stats.rx_packets++;
76711597885SJeff Kirsher dev->stats.rx_bytes += pkt_len;
76811597885SJeff Kirsher }
76911597885SJeff Kirsher } else {
77011597885SJeff Kirsher DEB(DEB_ERRORS, printk(KERN_DEBUG
77111597885SJeff Kirsher "%s: Error, rfd.stat = 0x%04x\n",
77211597885SJeff Kirsher dev->name, rfd->stat));
77311597885SJeff Kirsher dev->stats.rx_errors++;
77411597885SJeff Kirsher if (rfd->stat & SWAP16(0x0100))
77511597885SJeff Kirsher dev->stats.collisions++;
77611597885SJeff Kirsher if (rfd->stat & SWAP16(0x8000))
77711597885SJeff Kirsher dev->stats.rx_length_errors++;
77811597885SJeff Kirsher if (rfd->stat & SWAP16(0x0001))
77911597885SJeff Kirsher dev->stats.rx_over_errors++;
78011597885SJeff Kirsher if (rfd->stat & SWAP16(0x0002))
78111597885SJeff Kirsher dev->stats.rx_fifo_errors++;
78211597885SJeff Kirsher if (rfd->stat & SWAP16(0x0004))
78311597885SJeff Kirsher dev->stats.rx_frame_errors++;
78411597885SJeff Kirsher if (rfd->stat & SWAP16(0x0008))
78511597885SJeff Kirsher dev->stats.rx_crc_errors++;
78611597885SJeff Kirsher if (rfd->stat & SWAP16(0x0010))
78711597885SJeff Kirsher dev->stats.rx_length_errors++;
78811597885SJeff Kirsher }
78911597885SJeff Kirsher
79011597885SJeff Kirsher /* Clear the buffer descriptor count and EOF + F flags */
79111597885SJeff Kirsher
79211597885SJeff Kirsher if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
79311597885SJeff Kirsher rbd->count = 0;
79411597885SJeff Kirsher lp->rbd_head = rbd->v_next;
79500718b23SChristoph Hellwig dma_sync_dev(dev, rbd, sizeof(struct i596_rbd));
79611597885SJeff Kirsher }
79711597885SJeff Kirsher
79811597885SJeff Kirsher /* Tidy the frame descriptor, marking it as end of list */
79911597885SJeff Kirsher
80011597885SJeff Kirsher rfd->rbd = I596_NULL;
80111597885SJeff Kirsher rfd->stat = 0;
80211597885SJeff Kirsher rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
80311597885SJeff Kirsher rfd->count = 0;
80411597885SJeff Kirsher
80511597885SJeff Kirsher /* Update record of next frame descriptor to process */
80611597885SJeff Kirsher
80711597885SJeff Kirsher lp->dma->scb.rfd = rfd->b_next;
80811597885SJeff Kirsher lp->rfd_head = rfd->v_next;
80900718b23SChristoph Hellwig dma_sync_dev(dev, rfd, sizeof(struct i596_rfd));
81011597885SJeff Kirsher
81111597885SJeff Kirsher /* Remove end-of-list from old end descriptor */
81211597885SJeff Kirsher
81311597885SJeff Kirsher rfd->v_prev->cmd = SWAP16(CMD_FLEX);
81400718b23SChristoph Hellwig dma_sync_dev(dev, rfd->v_prev, sizeof(struct i596_rfd));
81511597885SJeff Kirsher rfd = lp->rfd_head;
81600718b23SChristoph Hellwig dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
81711597885SJeff Kirsher }
81811597885SJeff Kirsher
81911597885SJeff Kirsher DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
82011597885SJeff Kirsher
82111597885SJeff Kirsher return 0;
82211597885SJeff Kirsher }
82311597885SJeff Kirsher
82411597885SJeff Kirsher
i596_cleanup_cmd(struct net_device * dev,struct i596_private * lp)82511597885SJeff Kirsher static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
82611597885SJeff Kirsher {
82711597885SJeff Kirsher struct i596_cmd *ptr;
82811597885SJeff Kirsher
82911597885SJeff Kirsher while (lp->cmd_head != NULL) {
83011597885SJeff Kirsher ptr = lp->cmd_head;
83111597885SJeff Kirsher lp->cmd_head = ptr->v_next;
83211597885SJeff Kirsher lp->cmd_backlog--;
83311597885SJeff Kirsher
83411597885SJeff Kirsher switch (SWAP16(ptr->command) & 0x7) {
83511597885SJeff Kirsher case CmdTx:
83611597885SJeff Kirsher {
83711597885SJeff Kirsher struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
83811597885SJeff Kirsher struct sk_buff *skb = tx_cmd->skb;
83911597885SJeff Kirsher dma_unmap_single(dev->dev.parent,
84011597885SJeff Kirsher tx_cmd->dma_addr,
84111597885SJeff Kirsher skb->len, DMA_TO_DEVICE);
84211597885SJeff Kirsher
84311597885SJeff Kirsher dev_kfree_skb(skb);
84411597885SJeff Kirsher
84511597885SJeff Kirsher dev->stats.tx_errors++;
84611597885SJeff Kirsher dev->stats.tx_aborted_errors++;
84711597885SJeff Kirsher
84811597885SJeff Kirsher ptr->v_next = NULL;
84911597885SJeff Kirsher ptr->b_next = I596_NULL;
85011597885SJeff Kirsher tx_cmd->cmd.command = 0; /* Mark as free */
85111597885SJeff Kirsher break;
85211597885SJeff Kirsher }
85311597885SJeff Kirsher default:
85411597885SJeff Kirsher ptr->v_next = NULL;
85511597885SJeff Kirsher ptr->b_next = I596_NULL;
85611597885SJeff Kirsher }
85700718b23SChristoph Hellwig dma_sync_dev(dev, ptr, sizeof(struct i596_cmd));
85811597885SJeff Kirsher }
85911597885SJeff Kirsher
86011597885SJeff Kirsher wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
86111597885SJeff Kirsher lp->dma->scb.cmd = I596_NULL;
86200718b23SChristoph Hellwig dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
86311597885SJeff Kirsher }
86411597885SJeff Kirsher
86511597885SJeff Kirsher
i596_reset(struct net_device * dev,struct i596_private * lp)86611597885SJeff Kirsher static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
86711597885SJeff Kirsher {
86811597885SJeff Kirsher unsigned long flags;
86911597885SJeff Kirsher
87011597885SJeff Kirsher DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
87111597885SJeff Kirsher
87211597885SJeff Kirsher spin_lock_irqsave (&lp->lock, flags);
87311597885SJeff Kirsher
87411597885SJeff Kirsher wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
87511597885SJeff Kirsher
87611597885SJeff Kirsher netif_stop_queue(dev);
87711597885SJeff Kirsher
87811597885SJeff Kirsher /* FIXME: this command might cause an lpmc */
87911597885SJeff Kirsher lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
88000718b23SChristoph Hellwig dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
88111597885SJeff Kirsher ca(dev);
88211597885SJeff Kirsher
88311597885SJeff Kirsher /* wait for shutdown */
88411597885SJeff Kirsher wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
88511597885SJeff Kirsher spin_unlock_irqrestore (&lp->lock, flags);
88611597885SJeff Kirsher
88711597885SJeff Kirsher i596_cleanup_cmd(dev, lp);
88811597885SJeff Kirsher i596_rx(dev);
88911597885SJeff Kirsher
89011597885SJeff Kirsher netif_start_queue(dev);
89111597885SJeff Kirsher init_i596_mem(dev);
89211597885SJeff Kirsher }
89311597885SJeff Kirsher
89411597885SJeff Kirsher
i596_add_cmd(struct net_device * dev,struct i596_cmd * cmd)89511597885SJeff Kirsher static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
89611597885SJeff Kirsher {
89711597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev);
89811597885SJeff Kirsher struct i596_dma *dma = lp->dma;
89911597885SJeff Kirsher unsigned long flags;
90011597885SJeff Kirsher
90111597885SJeff Kirsher DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
90211597885SJeff Kirsher lp->cmd_head));
90311597885SJeff Kirsher
90411597885SJeff Kirsher cmd->status = 0;
90511597885SJeff Kirsher cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
90611597885SJeff Kirsher cmd->v_next = NULL;
90711597885SJeff Kirsher cmd->b_next = I596_NULL;
90800718b23SChristoph Hellwig dma_sync_dev(dev, cmd, sizeof(struct i596_cmd));
90911597885SJeff Kirsher
91011597885SJeff Kirsher spin_lock_irqsave (&lp->lock, flags);
91111597885SJeff Kirsher
91211597885SJeff Kirsher if (lp->cmd_head != NULL) {
91311597885SJeff Kirsher lp->cmd_tail->v_next = cmd;
91411597885SJeff Kirsher lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
91500718b23SChristoph Hellwig dma_sync_dev(dev, lp->cmd_tail, sizeof(struct i596_cmd));
91611597885SJeff Kirsher } else {
91711597885SJeff Kirsher lp->cmd_head = cmd;
91811597885SJeff Kirsher wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
91911597885SJeff Kirsher dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
92011597885SJeff Kirsher dma->scb.command = SWAP16(CUC_START);
92100718b23SChristoph Hellwig dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
92211597885SJeff Kirsher ca(dev);
92311597885SJeff Kirsher }
92411597885SJeff Kirsher lp->cmd_tail = cmd;
92511597885SJeff Kirsher lp->cmd_backlog++;
92611597885SJeff Kirsher
92711597885SJeff Kirsher spin_unlock_irqrestore (&lp->lock, flags);
92811597885SJeff Kirsher
92911597885SJeff Kirsher if (lp->cmd_backlog > max_cmd_backlog) {
93011597885SJeff Kirsher unsigned long tickssofar = jiffies - lp->last_cmd;
93111597885SJeff Kirsher
93211597885SJeff Kirsher if (tickssofar < ticks_limit)
93311597885SJeff Kirsher return;
93411597885SJeff Kirsher
93511597885SJeff Kirsher printk(KERN_ERR
93611597885SJeff Kirsher "%s: command unit timed out, status resetting.\n",
93711597885SJeff Kirsher dev->name);
93811597885SJeff Kirsher #if 1
93911597885SJeff Kirsher i596_reset(dev, lp);
94011597885SJeff Kirsher #endif
94111597885SJeff Kirsher }
94211597885SJeff Kirsher }
94311597885SJeff Kirsher
i596_open(struct net_device * dev)94411597885SJeff Kirsher static int i596_open(struct net_device *dev)
94511597885SJeff Kirsher {
94611597885SJeff Kirsher DEB(DEB_OPEN, printk(KERN_DEBUG
94711597885SJeff Kirsher "%s: i596_open() irq %d.\n", dev->name, dev->irq));
94811597885SJeff Kirsher
94911597885SJeff Kirsher if (init_rx_bufs(dev)) {
95011597885SJeff Kirsher printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
95111597885SJeff Kirsher return -EAGAIN;
95211597885SJeff Kirsher }
95311597885SJeff Kirsher if (init_i596_mem(dev)) {
95411597885SJeff Kirsher printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
95511597885SJeff Kirsher goto out_remove_rx_bufs;
95611597885SJeff Kirsher }
95711597885SJeff Kirsher netif_start_queue(dev);
95811597885SJeff Kirsher
95911597885SJeff Kirsher return 0;
96011597885SJeff Kirsher
96111597885SJeff Kirsher out_remove_rx_bufs:
96211597885SJeff Kirsher remove_rx_bufs(dev);
96311597885SJeff Kirsher return -EAGAIN;
96411597885SJeff Kirsher }
96511597885SJeff Kirsher
i596_tx_timeout(struct net_device * dev,unsigned int txqueue)9660290bd29SMichael S. Tsirkin static void i596_tx_timeout (struct net_device *dev, unsigned int txqueue)
96711597885SJeff Kirsher {
96811597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev);
96911597885SJeff Kirsher
97011597885SJeff Kirsher /* Transmitter timeout, serious problems. */
97111597885SJeff Kirsher DEB(DEB_ERRORS, printk(KERN_DEBUG
97211597885SJeff Kirsher "%s: transmit timed out, status resetting.\n",
97311597885SJeff Kirsher dev->name));
97411597885SJeff Kirsher
97511597885SJeff Kirsher dev->stats.tx_errors++;
97611597885SJeff Kirsher
97711597885SJeff Kirsher /* Try to restart the adaptor */
97811597885SJeff Kirsher if (lp->last_restart == dev->stats.tx_packets) {
97911597885SJeff Kirsher DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
98011597885SJeff Kirsher /* Shutdown and restart */
98111597885SJeff Kirsher i596_reset (dev, lp);
98211597885SJeff Kirsher } else {
98311597885SJeff Kirsher /* Issue a channel attention signal */
98411597885SJeff Kirsher DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
98511597885SJeff Kirsher lp->dma->scb.command = SWAP16(CUC_START | RX_START);
98600718b23SChristoph Hellwig dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb));
98711597885SJeff Kirsher ca (dev);
98811597885SJeff Kirsher lp->last_restart = dev->stats.tx_packets;
98911597885SJeff Kirsher }
99011597885SJeff Kirsher
991860e9538SFlorian Westphal netif_trans_update(dev); /* prevent tx timeout */
99211597885SJeff Kirsher netif_wake_queue (dev);
99311597885SJeff Kirsher }
99411597885SJeff Kirsher
99511597885SJeff Kirsher
i596_start_xmit(struct sk_buff * skb,struct net_device * dev)996648c361aSYueHaibing static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
99711597885SJeff Kirsher {
99811597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev);
99911597885SJeff Kirsher struct tx_cmd *tx_cmd;
100011597885SJeff Kirsher struct i596_tbd *tbd;
100111597885SJeff Kirsher short length = skb->len;
100211597885SJeff Kirsher
100311597885SJeff Kirsher DEB(DEB_STARTTX, printk(KERN_DEBUG
100411597885SJeff Kirsher "%s: i596_start_xmit(%x,%p) called\n",
100511597885SJeff Kirsher dev->name, skb->len, skb->data));
100611597885SJeff Kirsher
100711597885SJeff Kirsher if (length < ETH_ZLEN) {
100811597885SJeff Kirsher if (skb_padto(skb, ETH_ZLEN))
100911597885SJeff Kirsher return NETDEV_TX_OK;
101011597885SJeff Kirsher length = ETH_ZLEN;
101111597885SJeff Kirsher }
101211597885SJeff Kirsher
101311597885SJeff Kirsher netif_stop_queue(dev);
101411597885SJeff Kirsher
101511597885SJeff Kirsher tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
101611597885SJeff Kirsher tbd = lp->dma->tbds + lp->next_tx_cmd;
101711597885SJeff Kirsher
101811597885SJeff Kirsher if (tx_cmd->cmd.command) {
101911597885SJeff Kirsher DEB(DEB_ERRORS, printk(KERN_DEBUG
102011597885SJeff Kirsher "%s: xmit ring full, dropping packet.\n",
102111597885SJeff Kirsher dev->name));
102211597885SJeff Kirsher dev->stats.tx_dropped++;
102311597885SJeff Kirsher
1024374e29daSEric W. Biederman dev_kfree_skb_any(skb);
102511597885SJeff Kirsher } else {
102611597885SJeff Kirsher if (++lp->next_tx_cmd == TX_RING_SIZE)
102711597885SJeff Kirsher lp->next_tx_cmd = 0;
102811597885SJeff Kirsher tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
102911597885SJeff Kirsher tbd->next = I596_NULL;
103011597885SJeff Kirsher
103111597885SJeff Kirsher tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
103211597885SJeff Kirsher tx_cmd->skb = skb;
103311597885SJeff Kirsher
103411597885SJeff Kirsher tx_cmd->pad = 0;
103511597885SJeff Kirsher tx_cmd->size = 0;
103611597885SJeff Kirsher tbd->pad = 0;
103711597885SJeff Kirsher tbd->size = SWAP16(EOF | length);
103811597885SJeff Kirsher
103911597885SJeff Kirsher tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
104011597885SJeff Kirsher skb->len, DMA_TO_DEVICE);
104111597885SJeff Kirsher tbd->data = SWAP32(tx_cmd->dma_addr);
104211597885SJeff Kirsher
104311597885SJeff Kirsher DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
104400718b23SChristoph Hellwig dma_sync_dev(dev, tx_cmd, sizeof(struct tx_cmd));
104500718b23SChristoph Hellwig dma_sync_dev(dev, tbd, sizeof(struct i596_tbd));
104611597885SJeff Kirsher i596_add_cmd(dev, &tx_cmd->cmd);
104711597885SJeff Kirsher
104811597885SJeff Kirsher dev->stats.tx_packets++;
104911597885SJeff Kirsher dev->stats.tx_bytes += length;
105011597885SJeff Kirsher }
105111597885SJeff Kirsher
105211597885SJeff Kirsher netif_start_queue(dev);
105311597885SJeff Kirsher
105411597885SJeff Kirsher return NETDEV_TX_OK;
105511597885SJeff Kirsher }
105611597885SJeff Kirsher
print_eth(unsigned char * add,char * str)105711597885SJeff Kirsher static void print_eth(unsigned char *add, char *str)
105811597885SJeff Kirsher {
105911597885SJeff Kirsher printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
106011597885SJeff Kirsher add, add + 6, add, add[12], add[13], str);
106111597885SJeff Kirsher }
106211597885SJeff Kirsher static const struct net_device_ops i596_netdev_ops = {
106311597885SJeff Kirsher .ndo_open = i596_open,
106411597885SJeff Kirsher .ndo_stop = i596_close,
106511597885SJeff Kirsher .ndo_start_xmit = i596_start_xmit,
1066afc4b13dSJiri Pirko .ndo_set_rx_mode = set_multicast_list,
106711597885SJeff Kirsher .ndo_tx_timeout = i596_tx_timeout,
106811597885SJeff Kirsher .ndo_validate_addr = eth_validate_addr,
106911597885SJeff Kirsher .ndo_set_mac_address = eth_mac_addr,
107011597885SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
107111597885SJeff Kirsher .ndo_poll_controller = i596_poll_controller,
107211597885SJeff Kirsher #endif
107311597885SJeff Kirsher };
107411597885SJeff Kirsher
i82596_probe(struct net_device * dev)107558b10698SBill Pemberton static int i82596_probe(struct net_device *dev)
107611597885SJeff Kirsher {
107711597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev);
107848d15814SChristoph Hellwig int ret;
107911597885SJeff Kirsher
108011597885SJeff Kirsher /* This lot is ensure things have been cache line aligned. */
108111597885SJeff Kirsher BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
108211597885SJeff Kirsher BUILD_BUG_ON(sizeof(struct i596_rbd) & 31);
108311597885SJeff Kirsher BUILD_BUG_ON(sizeof(struct tx_cmd) & 31);
108411597885SJeff Kirsher BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
108511597885SJeff Kirsher #ifndef __LP64__
108611597885SJeff Kirsher BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
108711597885SJeff Kirsher #endif
108811597885SJeff Kirsher
108911597885SJeff Kirsher if (!dev->base_addr || !dev->irq)
109011597885SJeff Kirsher return -ENODEV;
109111597885SJeff Kirsher
109211597885SJeff Kirsher dev->netdev_ops = &i596_netdev_ops;
109311597885SJeff Kirsher dev->watchdog_timeo = TX_TIMEOUT;
109411597885SJeff Kirsher
109548d15814SChristoph Hellwig memset(lp->dma, 0, sizeof(struct i596_dma));
109648d15814SChristoph Hellwig lp->dma->scb.command = 0;
109748d15814SChristoph Hellwig lp->dma->scb.cmd = I596_NULL;
109848d15814SChristoph Hellwig lp->dma->scb.rfd = I596_NULL;
109911597885SJeff Kirsher spin_lock_init(&lp->lock);
110011597885SJeff Kirsher
110100718b23SChristoph Hellwig dma_sync_dev(dev, lp->dma, sizeof(struct i596_dma));
110211597885SJeff Kirsher
110348d15814SChristoph Hellwig ret = register_netdev(dev);
110448d15814SChristoph Hellwig if (ret)
110548d15814SChristoph Hellwig return ret;
110611597885SJeff Kirsher
110711597885SJeff Kirsher DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
110811597885SJeff Kirsher dev->name, dev->base_addr, dev->dev_addr,
110911597885SJeff Kirsher dev->irq));
111011597885SJeff Kirsher DEB(DEB_INIT, printk(KERN_INFO
111111597885SJeff Kirsher "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
111248d15814SChristoph Hellwig dev->name, lp->dma, (int)sizeof(struct i596_dma),
111348d15814SChristoph Hellwig &lp->dma->scb));
111411597885SJeff Kirsher
111511597885SJeff Kirsher return 0;
111611597885SJeff Kirsher }
111711597885SJeff Kirsher
111811597885SJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
i596_poll_controller(struct net_device * dev)111911597885SJeff Kirsher static void i596_poll_controller(struct net_device *dev)
112011597885SJeff Kirsher {
112111597885SJeff Kirsher disable_irq(dev->irq);
112211597885SJeff Kirsher i596_interrupt(dev->irq, dev);
112311597885SJeff Kirsher enable_irq(dev->irq);
112411597885SJeff Kirsher }
112511597885SJeff Kirsher #endif
112611597885SJeff Kirsher
i596_interrupt(int irq,void * dev_id)112711597885SJeff Kirsher static irqreturn_t i596_interrupt(int irq, void *dev_id)
112811597885SJeff Kirsher {
112911597885SJeff Kirsher struct net_device *dev = dev_id;
113011597885SJeff Kirsher struct i596_private *lp;
113111597885SJeff Kirsher struct i596_dma *dma;
113211597885SJeff Kirsher unsigned short status, ack_cmd = 0;
113311597885SJeff Kirsher
113411597885SJeff Kirsher lp = netdev_priv(dev);
113511597885SJeff Kirsher dma = lp->dma;
113611597885SJeff Kirsher
113711597885SJeff Kirsher spin_lock (&lp->lock);
113811597885SJeff Kirsher
113911597885SJeff Kirsher wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
114011597885SJeff Kirsher status = SWAP16(dma->scb.status);
114111597885SJeff Kirsher
114211597885SJeff Kirsher DEB(DEB_INTS, printk(KERN_DEBUG
114311597885SJeff Kirsher "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
114411597885SJeff Kirsher dev->name, dev->irq, status));
114511597885SJeff Kirsher
114611597885SJeff Kirsher ack_cmd = status & 0xf000;
114711597885SJeff Kirsher
114811597885SJeff Kirsher if (!ack_cmd) {
114911597885SJeff Kirsher DEB(DEB_ERRORS, printk(KERN_DEBUG
115011597885SJeff Kirsher "%s: interrupt with no events\n",
115111597885SJeff Kirsher dev->name));
115211597885SJeff Kirsher spin_unlock (&lp->lock);
115311597885SJeff Kirsher return IRQ_NONE;
115411597885SJeff Kirsher }
115511597885SJeff Kirsher
115611597885SJeff Kirsher if ((status & 0x8000) || (status & 0x2000)) {
115711597885SJeff Kirsher struct i596_cmd *ptr;
115811597885SJeff Kirsher
115911597885SJeff Kirsher if ((status & 0x8000))
116011597885SJeff Kirsher DEB(DEB_INTS,
116111597885SJeff Kirsher printk(KERN_DEBUG
116211597885SJeff Kirsher "%s: i596 interrupt completed command.\n",
116311597885SJeff Kirsher dev->name));
116411597885SJeff Kirsher if ((status & 0x2000))
116511597885SJeff Kirsher DEB(DEB_INTS,
116611597885SJeff Kirsher printk(KERN_DEBUG
116711597885SJeff Kirsher "%s: i596 interrupt command unit inactive %x.\n",
116811597885SJeff Kirsher dev->name, status & 0x0700));
116911597885SJeff Kirsher
117011597885SJeff Kirsher while (lp->cmd_head != NULL) {
117100718b23SChristoph Hellwig dma_sync_cpu(dev, lp->cmd_head, sizeof(struct i596_cmd));
117211597885SJeff Kirsher if (!(lp->cmd_head->status & SWAP16(STAT_C)))
117311597885SJeff Kirsher break;
117411597885SJeff Kirsher
117511597885SJeff Kirsher ptr = lp->cmd_head;
117611597885SJeff Kirsher
117711597885SJeff Kirsher DEB(DEB_STATUS,
117811597885SJeff Kirsher printk(KERN_DEBUG
117911597885SJeff Kirsher "cmd_head->status = %04x, ->command = %04x\n",
118011597885SJeff Kirsher SWAP16(lp->cmd_head->status),
118111597885SJeff Kirsher SWAP16(lp->cmd_head->command)));
118211597885SJeff Kirsher lp->cmd_head = ptr->v_next;
118311597885SJeff Kirsher lp->cmd_backlog--;
118411597885SJeff Kirsher
118511597885SJeff Kirsher switch (SWAP16(ptr->command) & 0x7) {
118611597885SJeff Kirsher case CmdTx:
118711597885SJeff Kirsher {
118811597885SJeff Kirsher struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
118911597885SJeff Kirsher struct sk_buff *skb = tx_cmd->skb;
119011597885SJeff Kirsher
119111597885SJeff Kirsher if (ptr->status & SWAP16(STAT_OK)) {
119211597885SJeff Kirsher DEB(DEB_TXADDR,
119311597885SJeff Kirsher print_eth(skb->data, "tx-done"));
119411597885SJeff Kirsher } else {
119511597885SJeff Kirsher dev->stats.tx_errors++;
119611597885SJeff Kirsher if (ptr->status & SWAP16(0x0020))
119711597885SJeff Kirsher dev->stats.collisions++;
119811597885SJeff Kirsher if (!(ptr->status & SWAP16(0x0040)))
119911597885SJeff Kirsher dev->stats.tx_heartbeat_errors++;
120011597885SJeff Kirsher if (ptr->status & SWAP16(0x0400))
120111597885SJeff Kirsher dev->stats.tx_carrier_errors++;
120211597885SJeff Kirsher if (ptr->status & SWAP16(0x0800))
120311597885SJeff Kirsher dev->stats.collisions++;
120411597885SJeff Kirsher if (ptr->status & SWAP16(0x1000))
120511597885SJeff Kirsher dev->stats.tx_aborted_errors++;
120611597885SJeff Kirsher }
120711597885SJeff Kirsher dma_unmap_single(dev->dev.parent,
120811597885SJeff Kirsher tx_cmd->dma_addr,
120911597885SJeff Kirsher skb->len, DMA_TO_DEVICE);
1210baff7b09SYang Wei dev_consume_skb_irq(skb);
121111597885SJeff Kirsher
121211597885SJeff Kirsher tx_cmd->cmd.command = 0; /* Mark free */
121311597885SJeff Kirsher break;
121411597885SJeff Kirsher }
121511597885SJeff Kirsher case CmdTDR:
121611597885SJeff Kirsher {
121711597885SJeff Kirsher unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
121811597885SJeff Kirsher
121911597885SJeff Kirsher if (status & 0x8000) {
122011597885SJeff Kirsher DEB(DEB_ANY,
122111597885SJeff Kirsher printk(KERN_DEBUG "%s: link ok.\n",
122211597885SJeff Kirsher dev->name));
122311597885SJeff Kirsher } else {
122411597885SJeff Kirsher if (status & 0x4000)
122511597885SJeff Kirsher printk(KERN_ERR
122611597885SJeff Kirsher "%s: Transceiver problem.\n",
122711597885SJeff Kirsher dev->name);
122811597885SJeff Kirsher if (status & 0x2000)
122911597885SJeff Kirsher printk(KERN_ERR
123011597885SJeff Kirsher "%s: Termination problem.\n",
123111597885SJeff Kirsher dev->name);
123211597885SJeff Kirsher if (status & 0x1000)
123311597885SJeff Kirsher printk(KERN_ERR
123411597885SJeff Kirsher "%s: Short circuit.\n",
123511597885SJeff Kirsher dev->name);
123611597885SJeff Kirsher
123711597885SJeff Kirsher DEB(DEB_TDR,
123811597885SJeff Kirsher printk(KERN_DEBUG "%s: Time %d.\n",
123911597885SJeff Kirsher dev->name, status & 0x07ff));
124011597885SJeff Kirsher }
124111597885SJeff Kirsher break;
124211597885SJeff Kirsher }
124311597885SJeff Kirsher case CmdConfigure:
124411597885SJeff Kirsher /*
124511597885SJeff Kirsher * Zap command so set_multicast_list() know
124611597885SJeff Kirsher * it is free
124711597885SJeff Kirsher */
124811597885SJeff Kirsher ptr->command = 0;
124911597885SJeff Kirsher break;
125011597885SJeff Kirsher }
125111597885SJeff Kirsher ptr->v_next = NULL;
125211597885SJeff Kirsher ptr->b_next = I596_NULL;
125300718b23SChristoph Hellwig dma_sync_dev(dev, ptr, sizeof(struct i596_cmd));
125411597885SJeff Kirsher lp->last_cmd = jiffies;
125511597885SJeff Kirsher }
125611597885SJeff Kirsher
125711597885SJeff Kirsher /* This mess is arranging that only the last of any outstanding
125811597885SJeff Kirsher * commands has the interrupt bit set. Should probably really
125911597885SJeff Kirsher * only add to the cmd queue when the CU is stopped.
126011597885SJeff Kirsher */
126111597885SJeff Kirsher ptr = lp->cmd_head;
126211597885SJeff Kirsher while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
126311597885SJeff Kirsher struct i596_cmd *prev = ptr;
126411597885SJeff Kirsher
126511597885SJeff Kirsher ptr->command &= SWAP16(0x1fff);
126611597885SJeff Kirsher ptr = ptr->v_next;
126700718b23SChristoph Hellwig dma_sync_dev(dev, prev, sizeof(struct i596_cmd));
126811597885SJeff Kirsher }
126911597885SJeff Kirsher
127011597885SJeff Kirsher if (lp->cmd_head != NULL)
127111597885SJeff Kirsher ack_cmd |= CUC_START;
127211597885SJeff Kirsher dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
127300718b23SChristoph Hellwig dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
127411597885SJeff Kirsher }
127511597885SJeff Kirsher if ((status & 0x1000) || (status & 0x4000)) {
127611597885SJeff Kirsher if ((status & 0x4000))
127711597885SJeff Kirsher DEB(DEB_INTS,
127811597885SJeff Kirsher printk(KERN_DEBUG
127911597885SJeff Kirsher "%s: i596 interrupt received a frame.\n",
128011597885SJeff Kirsher dev->name));
128111597885SJeff Kirsher i596_rx(dev);
128211597885SJeff Kirsher /* Only RX_START if stopped - RGH 07-07-96 */
128311597885SJeff Kirsher if (status & 0x1000) {
128411597885SJeff Kirsher if (netif_running(dev)) {
128511597885SJeff Kirsher DEB(DEB_ERRORS,
128611597885SJeff Kirsher printk(KERN_DEBUG
128711597885SJeff Kirsher "%s: i596 interrupt receive unit inactive, status 0x%x\n",
128811597885SJeff Kirsher dev->name, status));
128911597885SJeff Kirsher ack_cmd |= RX_START;
129011597885SJeff Kirsher dev->stats.rx_errors++;
129111597885SJeff Kirsher dev->stats.rx_fifo_errors++;
129211597885SJeff Kirsher rebuild_rx_bufs(dev);
129311597885SJeff Kirsher }
129411597885SJeff Kirsher }
129511597885SJeff Kirsher }
129611597885SJeff Kirsher wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
129711597885SJeff Kirsher dma->scb.command = SWAP16(ack_cmd);
129800718b23SChristoph Hellwig dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb));
129911597885SJeff Kirsher
130011597885SJeff Kirsher /* DANGER: I suspect that some kind of interrupt
130111597885SJeff Kirsher acknowledgement aside from acking the 82596 might be needed
130211597885SJeff Kirsher here... but it's running acceptably without */
130311597885SJeff Kirsher
130411597885SJeff Kirsher ca(dev);
130511597885SJeff Kirsher
130611597885SJeff Kirsher wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
130711597885SJeff Kirsher DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
130811597885SJeff Kirsher
130911597885SJeff Kirsher spin_unlock (&lp->lock);
131011597885SJeff Kirsher return IRQ_HANDLED;
131111597885SJeff Kirsher }
131211597885SJeff Kirsher
i596_close(struct net_device * dev)131311597885SJeff Kirsher static int i596_close(struct net_device *dev)
131411597885SJeff Kirsher {
131511597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev);
131611597885SJeff Kirsher unsigned long flags;
131711597885SJeff Kirsher
131811597885SJeff Kirsher netif_stop_queue(dev);
131911597885SJeff Kirsher
132011597885SJeff Kirsher DEB(DEB_INIT,
132111597885SJeff Kirsher printk(KERN_DEBUG
132211597885SJeff Kirsher "%s: Shutting down ethercard, status was %4.4x.\n",
132311597885SJeff Kirsher dev->name, SWAP16(lp->dma->scb.status)));
132411597885SJeff Kirsher
132511597885SJeff Kirsher spin_lock_irqsave(&lp->lock, flags);
132611597885SJeff Kirsher
132711597885SJeff Kirsher wait_cmd(dev, lp->dma, 100, "close1 timed out");
132811597885SJeff Kirsher lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
132900718b23SChristoph Hellwig dma_sync_dev(dev, &lp->dma->scb, sizeof(struct i596_scb));
133011597885SJeff Kirsher
133111597885SJeff Kirsher ca(dev);
133211597885SJeff Kirsher
133311597885SJeff Kirsher wait_cmd(dev, lp->dma, 100, "close2 timed out");
133411597885SJeff Kirsher spin_unlock_irqrestore(&lp->lock, flags);
133511597885SJeff Kirsher DEB(DEB_STRUCT, i596_display_data(dev));
133611597885SJeff Kirsher i596_cleanup_cmd(dev, lp);
133711597885SJeff Kirsher
133811597885SJeff Kirsher free_irq(dev->irq, dev);
133911597885SJeff Kirsher remove_rx_bufs(dev);
134011597885SJeff Kirsher
134111597885SJeff Kirsher return 0;
134211597885SJeff Kirsher }
134311597885SJeff Kirsher
134411597885SJeff Kirsher /*
134511597885SJeff Kirsher * Set or clear the multicast filter for this adaptor.
134611597885SJeff Kirsher */
134711597885SJeff Kirsher
set_multicast_list(struct net_device * dev)134811597885SJeff Kirsher static void set_multicast_list(struct net_device *dev)
134911597885SJeff Kirsher {
135011597885SJeff Kirsher struct i596_private *lp = netdev_priv(dev);
135111597885SJeff Kirsher struct i596_dma *dma = lp->dma;
135211597885SJeff Kirsher int config = 0, cnt;
135311597885SJeff Kirsher
135411597885SJeff Kirsher DEB(DEB_MULTI,
135511597885SJeff Kirsher printk(KERN_DEBUG
135611597885SJeff Kirsher "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
135711597885SJeff Kirsher dev->name, netdev_mc_count(dev),
135811597885SJeff Kirsher dev->flags & IFF_PROMISC ? "ON" : "OFF",
135911597885SJeff Kirsher dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
136011597885SJeff Kirsher
136111597885SJeff Kirsher if ((dev->flags & IFF_PROMISC) &&
136211597885SJeff Kirsher !(dma->cf_cmd.i596_config[8] & 0x01)) {
136311597885SJeff Kirsher dma->cf_cmd.i596_config[8] |= 0x01;
136411597885SJeff Kirsher config = 1;
136511597885SJeff Kirsher }
136611597885SJeff Kirsher if (!(dev->flags & IFF_PROMISC) &&
136711597885SJeff Kirsher (dma->cf_cmd.i596_config[8] & 0x01)) {
136811597885SJeff Kirsher dma->cf_cmd.i596_config[8] &= ~0x01;
136911597885SJeff Kirsher config = 1;
137011597885SJeff Kirsher }
137111597885SJeff Kirsher if ((dev->flags & IFF_ALLMULTI) &&
137211597885SJeff Kirsher (dma->cf_cmd.i596_config[11] & 0x20)) {
137311597885SJeff Kirsher dma->cf_cmd.i596_config[11] &= ~0x20;
137411597885SJeff Kirsher config = 1;
137511597885SJeff Kirsher }
137611597885SJeff Kirsher if (!(dev->flags & IFF_ALLMULTI) &&
137711597885SJeff Kirsher !(dma->cf_cmd.i596_config[11] & 0x20)) {
137811597885SJeff Kirsher dma->cf_cmd.i596_config[11] |= 0x20;
137911597885SJeff Kirsher config = 1;
138011597885SJeff Kirsher }
138111597885SJeff Kirsher if (config) {
138211597885SJeff Kirsher if (dma->cf_cmd.cmd.command)
138311597885SJeff Kirsher printk(KERN_INFO
138411597885SJeff Kirsher "%s: config change request already queued\n",
138511597885SJeff Kirsher dev->name);
138611597885SJeff Kirsher else {
138711597885SJeff Kirsher dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
138800718b23SChristoph Hellwig dma_sync_dev(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
138911597885SJeff Kirsher i596_add_cmd(dev, &dma->cf_cmd.cmd);
139011597885SJeff Kirsher }
139111597885SJeff Kirsher }
139211597885SJeff Kirsher
139311597885SJeff Kirsher cnt = netdev_mc_count(dev);
139411597885SJeff Kirsher if (cnt > MAX_MC_CNT) {
139511597885SJeff Kirsher cnt = MAX_MC_CNT;
139611597885SJeff Kirsher printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
139711597885SJeff Kirsher dev->name, cnt);
139811597885SJeff Kirsher }
139911597885SJeff Kirsher
140011597885SJeff Kirsher if (!netdev_mc_empty(dev)) {
140111597885SJeff Kirsher struct netdev_hw_addr *ha;
140211597885SJeff Kirsher unsigned char *cp;
140311597885SJeff Kirsher struct mc_cmd *cmd;
140411597885SJeff Kirsher
140511597885SJeff Kirsher cmd = &dma->mc_cmd;
140611597885SJeff Kirsher cmd->cmd.command = SWAP16(CmdMulticastList);
140711597885SJeff Kirsher cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
140811597885SJeff Kirsher cp = cmd->mc_addrs;
140911597885SJeff Kirsher netdev_for_each_mc_addr(ha, dev) {
141011597885SJeff Kirsher if (!cnt--)
141111597885SJeff Kirsher break;
1412d458cdf7SJoe Perches memcpy(cp, ha->addr, ETH_ALEN);
141311597885SJeff Kirsher if (i596_debug > 1)
141411597885SJeff Kirsher DEB(DEB_MULTI,
141511597885SJeff Kirsher printk(KERN_DEBUG
141611597885SJeff Kirsher "%s: Adding address %pM\n",
141711597885SJeff Kirsher dev->name, cp));
1418d458cdf7SJoe Perches cp += ETH_ALEN;
141911597885SJeff Kirsher }
142000718b23SChristoph Hellwig dma_sync_dev(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
142111597885SJeff Kirsher i596_add_cmd(dev, &cmd->cmd);
142211597885SJeff Kirsher }
142311597885SJeff Kirsher }
1424