xref: /linux/drivers/net/ethernet/amd/lance.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 /* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
2 /*
3 	Written/copyright 1993-1998 by Donald Becker.
4 
5 	Copyright 1993 United States Government as represented by the
6 	Director, National Security Agency.
7 	This software may be used and distributed according to the terms
8 	of the GNU General Public License, incorporated herein by reference.
9 
10 	This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
11 	with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
12 
13 	The author may be reached as becker@scyld.com, or C/O
14 	Scyld Computing Corporation
15 	410 Severn Ave., Suite 210
16 	Annapolis MD 21403
17 
18 	Andrey V. Savochkin:
19 	- alignment problem with 1.3.* kernel and some minor changes.
20 	Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
21 	- added support for Linux/Alpha, but removed most of it, because
22         it worked only for the PCI chip.
23       - added hook for the 32bit lance driver
24       - added PCnetPCI II (79C970A) to chip table
25 	Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
26 	- hopefully fix above so Linux/Alpha can use ISA cards too.
27     8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
28     v1.12 10/27/97 Module support -djb
29     v1.14  2/3/98 Module support modified, made PCI support optional -djb
30     v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed
31                   before unregister_netdev() which caused NULL pointer
32                   reference later in the chain (in rtnetlink_fill_ifinfo())
33                   -- Mika Kuoppala <miku@iki.fi>
34 
35     Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
36     the 2.1 version of the old driver - Alan Cox
37 
38     Get rid of check_region, check kmalloc return in lance_probe1
39     Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
40 
41 	Reworked detection, added support for Racal InterLan EtherBlaster cards
42 	Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004
43 */
44 
45 static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
46 
47 #include <linux/module.h>
48 #include <linux/kernel.h>
49 #include <linux/string.h>
50 #include <linux/delay.h>
51 #include <linux/errno.h>
52 #include <linux/ioport.h>
53 #include <linux/slab.h>
54 #include <linux/interrupt.h>
55 #include <linux/pci.h>
56 #include <linux/init.h>
57 #include <linux/netdevice.h>
58 #include <linux/etherdevice.h>
59 #include <linux/skbuff.h>
60 #include <linux/mm.h>
61 #include <linux/bitops.h>
62 #include <net/Space.h>
63 
64 #include <asm/io.h>
65 #include <asm/dma.h>
66 
67 static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
68 static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
69 static int __init do_lance_probe(struct net_device *dev);
70 
71 
72 static struct card {
73 	char id_offset14;
74 	char id_offset15;
75 } cards[] = {
76 	{	//"normal"
77 		.id_offset14 = 0x57,
78 		.id_offset15 = 0x57,
79 	},
80 	{	//NI6510EB
81 		.id_offset14 = 0x52,
82 		.id_offset15 = 0x44,
83 	},
84 	{	//Racal InterLan EtherBlaster
85 		.id_offset14 = 0x52,
86 		.id_offset15 = 0x49,
87 	},
88 };
89 #define NUM_CARDS 3
90 
91 #ifdef LANCE_DEBUG
92 static int lance_debug = LANCE_DEBUG;
93 #else
94 static int lance_debug = 1;
95 #endif
96 
97 /*
98 				Theory of Operation
99 
100 I. Board Compatibility
101 
102 This device driver is designed for the AMD 79C960, the "PCnet-ISA
103 single-chip ethernet controller for ISA".  This chip is used in a wide
104 variety of boards from vendors such as Allied Telesis, HP, Kingston,
105 and Boca.  This driver is also intended to work with older AMD 7990
106 designs, such as the NE1500 and NE2100, and newer 79C961.  For convenience,
107 I use the name LANCE to refer to all of the AMD chips, even though it properly
108 refers only to the original 7990.
109 
110 II. Board-specific settings
111 
112 The driver is designed to work the boards that use the faster
113 bus-master mode, rather than in shared memory mode.	 (Only older designs
114 have on-board buffer memory needed to support the slower shared memory mode.)
115 
116 Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
117 channel.  This driver probes the likely base addresses:
118 {0x300, 0x320, 0x340, 0x360}.
119 After the board is found it generates a DMA-timeout interrupt and uses
120 autoIRQ to find the IRQ line.  The DMA channel can be set with the low bits
121 of the otherwise-unused dev->mem_start value (aka PARAM1).  If unset it is
122 probed for by enabling each free DMA channel in turn and checking if
123 initialization succeeds.
124 
125 The HP-J2405A board is an exception: with this board it is easy to read the
126 EEPROM-set values for the base, IRQ, and DMA.  (Of course you must already
127 _know_ the base address -- that field is for writing the EEPROM.)
128 
129 III. Driver operation
130 
131 IIIa. Ring buffers
132 The LANCE uses ring buffers of Tx and Rx descriptors.  Each entry describes
133 the base and length of the data buffer, along with status bits.	 The length
134 of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
135 the buffer length (rather than being directly the buffer length) for
136 implementation ease.  The current values are 2 (Tx) and 4 (Rx), which leads to
137 ring sizes of 4 (Tx) and 16 (Rx).  Increasing the number of ring entries
138 needlessly uses extra space and reduces the chance that an upper layer will
139 be able to reorder queued Tx packets based on priority.	 Decreasing the number
140 of entries makes it more difficult to achieve back-to-back packet transmission
141 and increases the chance that Rx ring will overflow.  (Consider the worst case
142 of receiving back-to-back minimum-sized packets.)
143 
144 The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
145 statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
146 avoid the administrative overhead. For the Rx side this avoids dynamically
147 allocating full-sized buffers "just in case", at the expense of a
148 memory-to-memory data copy for each packet received.  For most systems this
149 is a good tradeoff: the Rx buffer will always be in low memory, the copy
150 is inexpensive, and it primes the cache for later packet processing.  For Tx
151 the buffers are only used when needed as low-memory bounce buffers.
152 
153 IIIB. 16M memory limitations.
154 For the ISA bus master mode all structures used directly by the LANCE,
155 the initialization block, Rx and Tx rings, and data buffers, must be
156 accessible from the ISA bus, i.e. in the lower 16M of real memory.
157 This is a problem for current Linux kernels on >16M machines. The network
158 devices are initialized after memory initialization, and the kernel doles out
159 memory from the top of memory downward.	 The current solution is to have a
160 special network initialization routine that's called before memory
161 initialization; this will eventually be generalized for all network devices.
162 As mentioned before, low-memory "bounce-buffers" are used when needed.
163 
164 IIIC. Synchronization
165 The driver runs as two independent, single-threaded flows of control.  One
166 is the send-packet routine, which enforces single-threaded use by the
167 dev->tbusy flag.  The other thread is the interrupt handler, which is single
168 threaded by the hardware and other software.
169 
170 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
171 flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
172 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
173 the 'lp->tx_full' flag.
174 
175 The interrupt handler has exclusive control over the Rx ring and records stats
176 from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
177 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
178 stats.)	 After reaping the stats, it marks the queue entry as empty by setting
179 the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
180 tx_full and tbusy flags.
181 
182 */
183 
184 /* Set the number of Tx and Rx buffers, using Log_2(# buffers).
185    Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
186    That translates to 4 and 4 (16 == 2^^4).
187    This is a compile-time option for efficiency.
188    */
189 #ifndef LANCE_LOG_TX_BUFFERS
190 #define LANCE_LOG_TX_BUFFERS 4
191 #define LANCE_LOG_RX_BUFFERS 4
192 #endif
193 
194 #define TX_RING_SIZE			(1 << (LANCE_LOG_TX_BUFFERS))
195 #define TX_RING_MOD_MASK		(TX_RING_SIZE - 1)
196 #define TX_RING_LEN_BITS		((LANCE_LOG_TX_BUFFERS) << 29)
197 
198 #define RX_RING_SIZE			(1 << (LANCE_LOG_RX_BUFFERS))
199 #define RX_RING_MOD_MASK		(RX_RING_SIZE - 1)
200 #define RX_RING_LEN_BITS		((LANCE_LOG_RX_BUFFERS) << 29)
201 
202 #define PKT_BUF_SZ		1544
203 
204 /* Offsets from base I/O address. */
205 #define LANCE_DATA 0x10
206 #define LANCE_ADDR 0x12
207 #define LANCE_RESET 0x14
208 #define LANCE_BUS_IF 0x16
209 #define LANCE_TOTAL_SIZE 0x18
210 
211 #define TX_TIMEOUT	(HZ/5)
212 
213 /* The LANCE Rx and Tx ring descriptors. */
214 struct lance_rx_head {
215 	s32 base;
216 	s16 buf_length;			/* This length is 2s complement (negative)! */
217 	s16 msg_length;			/* This length is "normal". */
218 };
219 
220 struct lance_tx_head {
221 	s32 base;
222 	s16 length;				/* Length is 2s complement (negative)! */
223 	s16 misc;
224 };
225 
226 /* The LANCE initialization block, described in databook. */
227 struct lance_init_block {
228 	u16 mode;		/* Pre-set mode (reg. 15) */
229 	u8  phys_addr[6]; /* Physical ethernet address */
230 	u32 filter[2];			/* Multicast filter (unused). */
231 	/* Receive and transmit ring base, along with extra bits. */
232 	u32  rx_ring;			/* Tx and Rx ring base pointers */
233 	u32  tx_ring;
234 };
235 
236 struct lance_private {
237 	/* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
238 	struct lance_rx_head rx_ring[RX_RING_SIZE];
239 	struct lance_tx_head tx_ring[TX_RING_SIZE];
240 	struct lance_init_block	init_block;
241 	const char *name;
242 	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
243 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
244 	/* The addresses of receive-in-place skbuffs. */
245 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
246 	unsigned long rx_buffs;		/* Address of Rx and Tx buffers. */
247 	/* Tx low-memory "bounce buffer" address. */
248 	char (*tx_bounce_buffs)[PKT_BUF_SZ];
249 	int cur_rx, cur_tx;			/* The next free ring entry */
250 	int dirty_rx, dirty_tx;		/* The ring entries to be free()ed. */
251 	int dma;
252 	unsigned char chip_version;	/* See lance_chip_type. */
253 	spinlock_t devlock;
254 };
255 
256 #define LANCE_MUST_PAD          0x00000001
257 #define LANCE_ENABLE_AUTOSELECT 0x00000002
258 #define LANCE_MUST_REINIT_RING  0x00000004
259 #define LANCE_MUST_UNRESET      0x00000008
260 #define LANCE_HAS_MISSED_FRAME  0x00000010
261 
262 /* A mapping from the chip ID number to the part number and features.
263    These are from the datasheets -- in real life the '970 version
264    reportedly has the same ID as the '965. */
265 static struct lance_chip_type {
266 	int id_number;
267 	const char *name;
268 	int flags;
269 } chip_table[] = {
270 	{0x0000, "LANCE 7990",				/* Ancient lance chip.  */
271 		LANCE_MUST_PAD + LANCE_MUST_UNRESET},
272 	{0x0003, "PCnet/ISA 79C960",		/* 79C960 PCnet/ISA.  */
273 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
274 			LANCE_HAS_MISSED_FRAME},
275 	{0x2260, "PCnet/ISA+ 79C961",		/* 79C961 PCnet/ISA+, Plug-n-Play.  */
276 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
277 			LANCE_HAS_MISSED_FRAME},
278 	{0x2420, "PCnet/PCI 79C970",		/* 79C970 or 79C974 PCnet-SCSI, PCI. */
279 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
280 			LANCE_HAS_MISSED_FRAME},
281 	/* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
282 		it the PCnet32. */
283 	{0x2430, "PCnet32",					/* 79C965 PCnet for VL bus. */
284 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
285 			LANCE_HAS_MISSED_FRAME},
286         {0x2621, "PCnet/PCI-II 79C970A",        /* 79C970A PCInetPCI II. */
287                 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
288                         LANCE_HAS_MISSED_FRAME},
289 	{0x0, 	 "PCnet (unknown)",
290 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
291 			LANCE_HAS_MISSED_FRAME},
292 };
293 
294 enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
295 
296 
297 /* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
298    Assume yes until we know the memory size. */
299 static unsigned char lance_need_isa_bounce_buffers = 1;
300 
301 static int lance_open(struct net_device *dev);
302 static void lance_init_ring(struct net_device *dev, gfp_t mode);
303 static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
304 				    struct net_device *dev);
305 static int lance_rx(struct net_device *dev);
306 static irqreturn_t lance_interrupt(int irq, void *dev_id);
307 static int lance_close(struct net_device *dev);
308 static struct net_device_stats *lance_get_stats(struct net_device *dev);
309 static void set_multicast_list(struct net_device *dev);
310 static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue);
311 
312 
313 
314 #ifdef MODULE
315 #define MAX_CARDS		8	/* Max number of interfaces (cards) per module */
316 
317 static struct net_device *dev_lance[MAX_CARDS];
318 static int io[MAX_CARDS];
319 static int dma[MAX_CARDS];
320 static int irq[MAX_CARDS];
321 
322 module_param_hw_array(io, int, ioport, NULL, 0);
323 module_param_hw_array(dma, int, dma, NULL, 0);
324 module_param_hw_array(irq, int, irq, NULL, 0);
325 module_param(lance_debug, int, 0);
326 MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
327 MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
328 MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
329 MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
330 
lance_init_module(void)331 static int __init lance_init_module(void)
332 {
333 	struct net_device *dev;
334 	int this_dev, found = 0;
335 
336 	for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
337 		if (io[this_dev] == 0)  {
338 			if (this_dev != 0) /* only complain once */
339 				break;
340 			printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
341 			return -EPERM;
342 		}
343 		dev = alloc_etherdev(0);
344 		if (!dev)
345 			break;
346 		dev->irq = irq[this_dev];
347 		dev->base_addr = io[this_dev];
348 		dev->dma = dma[this_dev];
349 		if (do_lance_probe(dev) == 0) {
350 			dev_lance[found++] = dev;
351 			continue;
352 		}
353 		free_netdev(dev);
354 		break;
355 	}
356 	if (found != 0)
357 		return 0;
358 	return -ENXIO;
359 }
360 module_init(lance_init_module);
361 
cleanup_card(struct net_device * dev)362 static void cleanup_card(struct net_device *dev)
363 {
364 	struct lance_private *lp = dev->ml_priv;
365 	if (dev->dma != 4)
366 		free_dma(dev->dma);
367 	release_region(dev->base_addr, LANCE_TOTAL_SIZE);
368 	kfree(lp->tx_bounce_buffs);
369 	kfree((void*)lp->rx_buffs);
370 	kfree(lp);
371 }
372 
lance_cleanup_module(void)373 static void __exit lance_cleanup_module(void)
374 {
375 	int this_dev;
376 
377 	for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
378 		struct net_device *dev = dev_lance[this_dev];
379 		if (dev) {
380 			unregister_netdev(dev);
381 			cleanup_card(dev);
382 			free_netdev(dev);
383 		}
384 	}
385 }
386 module_exit(lance_cleanup_module);
387 #endif /* MODULE */
388 MODULE_DESCRIPTION("AMD LANCE/PCnet Ethernet driver");
389 MODULE_LICENSE("GPL");
390 
391 
392 /* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
393    board probes now that kmalloc() can allocate ISA DMA-able regions.
394    This also allows the LANCE driver to be used as a module.
395    */
do_lance_probe(struct net_device * dev)396 static int __init do_lance_probe(struct net_device *dev)
397 {
398 	unsigned int *port;
399 	int result;
400 
401 	if (high_memory <= phys_to_virt(16*1024*1024))
402 		lance_need_isa_bounce_buffers = 0;
403 
404 	for (port = lance_portlist; *port; port++) {
405 		int ioaddr = *port;
406 		struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
407 							"lance-probe");
408 
409 		if (r) {
410 			/* Detect the card with minimal I/O reads */
411 			char offset14 = inb(ioaddr + 14);
412 			int card;
413 			for (card = 0; card < NUM_CARDS; ++card)
414 				if (cards[card].id_offset14 == offset14)
415 					break;
416 			if (card < NUM_CARDS) {/*yes, the first byte matches*/
417 				char offset15 = inb(ioaddr + 15);
418 				for (card = 0; card < NUM_CARDS; ++card)
419 					if ((cards[card].id_offset14 == offset14) &&
420 						(cards[card].id_offset15 == offset15))
421 						break;
422 			}
423 			if (card < NUM_CARDS) { /*Signature OK*/
424 				result = lance_probe1(dev, ioaddr, 0, 0);
425 				if (!result) {
426 					struct lance_private *lp = dev->ml_priv;
427 					int ver = lp->chip_version;
428 
429 					r->name = chip_table[ver].name;
430 					return 0;
431 				}
432 			}
433 			release_region(ioaddr, LANCE_TOTAL_SIZE);
434 		}
435 	}
436 	return -ENODEV;
437 }
438 
439 #ifndef MODULE
lance_probe(int unit)440 struct net_device * __init lance_probe(int unit)
441 {
442 	struct net_device *dev = alloc_etherdev(0);
443 	int err;
444 
445 	if (!dev)
446 		return ERR_PTR(-ENODEV);
447 
448 	sprintf(dev->name, "eth%d", unit);
449 	netdev_boot_setup_check(dev);
450 
451 	err = do_lance_probe(dev);
452 	if (err)
453 		goto out;
454 	return dev;
455 out:
456 	free_netdev(dev);
457 	return ERR_PTR(err);
458 }
459 #endif
460 
461 static const struct net_device_ops lance_netdev_ops = {
462 	.ndo_open 		= lance_open,
463 	.ndo_start_xmit		= lance_start_xmit,
464 	.ndo_stop		= lance_close,
465 	.ndo_get_stats		= lance_get_stats,
466 	.ndo_set_rx_mode	= set_multicast_list,
467 	.ndo_tx_timeout		= lance_tx_timeout,
468 	.ndo_set_mac_address 	= eth_mac_addr,
469 	.ndo_validate_addr	= eth_validate_addr,
470 };
471 
lance_probe1(struct net_device * dev,int ioaddr,int irq,int options)472 static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
473 {
474 	struct lance_private *lp;
475 	unsigned long dma_channels;	/* Mark spuriously-busy DMA channels */
476 	int i, reset_val, lance_version;
477 	const char *chipname;
478 	/* Flags for specific chips or boards. */
479 	unsigned char hpJ2405A = 0;	/* HP ISA adaptor */
480 	int hp_builtin = 0;		/* HP on-board ethernet. */
481 	static int did_version;		/* Already printed version info. */
482 	unsigned long flags;
483 	int err = -ENOMEM;
484 	void __iomem *bios;
485 	u8 addr[ETH_ALEN];
486 
487 	/* First we look for special cases.
488 	   Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
489 	   There are two HP versions, check the BIOS for the configuration port.
490 	   This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
491 	   */
492 	bios = ioremap(0xf00f0, 0x14);
493 	if (!bios)
494 		return -ENOMEM;
495 	if (readw(bios + 0x12) == 0x5048)  {
496 		static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
497 		int hp_port = (readl(bios + 1) & 1)  ? 0x499 : 0x99;
498 		/* We can have boards other than the built-in!  Verify this is on-board. */
499 		if ((inb(hp_port) & 0xc0) == 0x80 &&
500 		    ioaddr_table[inb(hp_port) & 3] == ioaddr)
501 			hp_builtin = hp_port;
502 	}
503 	iounmap(bios);
504 	/* We also recognize the HP Vectra on-board here, but check below. */
505 	hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00 &&
506 		    inb(ioaddr+2) == 0x09);
507 
508 	/* Reset the LANCE.	 */
509 	reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
510 
511 	/* The Un-Reset needed is only needed for the real NE2100, and will
512 	   confuse the HP board. */
513 	if (!hpJ2405A)
514 		outw(reset_val, ioaddr+LANCE_RESET);
515 
516 	outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
517 	if (inw(ioaddr+LANCE_DATA) != 0x0004)
518 		return -ENODEV;
519 
520 	/* Get the version of the chip. */
521 	outw(88, ioaddr+LANCE_ADDR);
522 	if (inw(ioaddr+LANCE_ADDR) != 88) {
523 		lance_version = 0;
524 	} else {			/* Good, it's a newer chip. */
525 		int chip_version = inw(ioaddr+LANCE_DATA);
526 		outw(89, ioaddr+LANCE_ADDR);
527 		chip_version |= inw(ioaddr+LANCE_DATA) << 16;
528 		if (lance_debug > 2)
529 			printk("  LANCE chip version is %#x.\n", chip_version);
530 		if ((chip_version & 0xfff) != 0x003)
531 			return -ENODEV;
532 		chip_version = (chip_version >> 12) & 0xffff;
533 		for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
534 			if (chip_table[lance_version].id_number == chip_version)
535 				break;
536 		}
537 	}
538 
539 	/* We can't allocate private data from alloc_etherdev() because it must
540 	   a ISA DMA-able region. */
541 	chipname = chip_table[lance_version].name;
542 	printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr);
543 
544 	/* There is a 16 byte station address PROM at the base address.
545 	   The first six bytes are the station address. */
546 	for (i = 0; i < 6; i++)
547 		addr[i] = inb(ioaddr + i);
548 	eth_hw_addr_set(dev, addr);
549 	printk("%pM", dev->dev_addr);
550 
551 	dev->base_addr = ioaddr;
552 	/* Make certain the data structures used by the LANCE are aligned and DMAble. */
553 
554 	lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
555 	if (!lp)
556 		return -ENOMEM;
557 	if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
558 	dev->ml_priv = lp;
559 	lp->name = chipname;
560 	lp->rx_buffs = (unsigned long)kmalloc_array(RX_RING_SIZE, PKT_BUF_SZ,
561 						    GFP_DMA | GFP_KERNEL);
562 	if (!lp->rx_buffs)
563 		goto out_lp;
564 	if (lance_need_isa_bounce_buffers) {
565 		lp->tx_bounce_buffs = kmalloc_array(TX_RING_SIZE, PKT_BUF_SZ,
566 						    GFP_DMA | GFP_KERNEL);
567 		if (!lp->tx_bounce_buffs)
568 			goto out_rx;
569 	} else
570 		lp->tx_bounce_buffs = NULL;
571 
572 	lp->chip_version = lance_version;
573 	spin_lock_init(&lp->devlock);
574 
575 	lp->init_block.mode = 0x0003;		/* Disable Rx and Tx. */
576 	for (i = 0; i < 6; i++)
577 		lp->init_block.phys_addr[i] = dev->dev_addr[i];
578 	lp->init_block.filter[0] = 0x00000000;
579 	lp->init_block.filter[1] = 0x00000000;
580 	lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
581 	lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
582 
583 	outw(0x0001, ioaddr+LANCE_ADDR);
584 	inw(ioaddr+LANCE_ADDR);
585 	outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
586 	outw(0x0002, ioaddr+LANCE_ADDR);
587 	inw(ioaddr+LANCE_ADDR);
588 	outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
589 	outw(0x0000, ioaddr+LANCE_ADDR);
590 	inw(ioaddr+LANCE_ADDR);
591 
592 	if (irq) {					/* Set iff PCI card. */
593 		dev->dma = 4;			/* Native bus-master, no DMA channel needed. */
594 		dev->irq = irq;
595 	} else if (hp_builtin) {
596 		static const char dma_tbl[4] = {3, 5, 6, 0};
597 		static const char irq_tbl[4] = {3, 4, 5, 9};
598 		unsigned char port_val = inb(hp_builtin);
599 		dev->dma = dma_tbl[(port_val >> 4) & 3];
600 		dev->irq = irq_tbl[(port_val >> 2) & 3];
601 		printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
602 	} else if (hpJ2405A) {
603 		static const char dma_tbl[4] = {3, 5, 6, 7};
604 		static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
605 		short reset_val = inw(ioaddr+LANCE_RESET);
606 		dev->dma = dma_tbl[(reset_val >> 2) & 3];
607 		dev->irq = irq_tbl[(reset_val >> 4) & 7];
608 		printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
609 	} else if (lance_version == PCNET_ISAP) {		/* The plug-n-play version. */
610 		short bus_info;
611 		outw(8, ioaddr+LANCE_ADDR);
612 		bus_info = inw(ioaddr+LANCE_BUS_IF);
613 		dev->dma = bus_info & 0x07;
614 		dev->irq = (bus_info >> 4) & 0x0F;
615 	} else {
616 		/* The DMA channel may be passed in PARAM1. */
617 		if (dev->mem_start & 0x07)
618 			dev->dma = dev->mem_start & 0x07;
619 	}
620 
621 	if (dev->dma == 0) {
622 		/* Read the DMA channel status register, so that we can avoid
623 		   stuck DMA channels in the DMA detection below. */
624 		dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
625 			(inb(DMA2_STAT_REG) & 0xf0);
626 	}
627 	err = -ENODEV;
628 	if (dev->irq >= 2)
629 		printk(" assigned IRQ %d", dev->irq);
630 	else if (lance_version != 0)  {	/* 7990 boards need DMA detection first. */
631 		unsigned long irq_mask;
632 
633 		/* To auto-IRQ we enable the initialization-done and DMA error
634 		   interrupts. For ISA boards we get a DMA error, but VLB and PCI
635 		   boards will work. */
636 		irq_mask = probe_irq_on();
637 
638 		/* Trigger an initialization just for the interrupt. */
639 		outw(0x0041, ioaddr+LANCE_DATA);
640 
641 		mdelay(20);
642 		dev->irq = probe_irq_off(irq_mask);
643 		if (dev->irq)
644 			printk(", probed IRQ %d", dev->irq);
645 		else {
646 			printk(", failed to detect IRQ line.\n");
647 			goto out_tx;
648 		}
649 
650 		/* Check for the initialization done bit, 0x0100, which means
651 		   that we don't need a DMA channel. */
652 		if (inw(ioaddr+LANCE_DATA) & 0x0100)
653 			dev->dma = 4;
654 	}
655 
656 	if (dev->dma == 4) {
657 		printk(", no DMA needed.\n");
658 	} else if (dev->dma) {
659 		if (request_dma(dev->dma, chipname)) {
660 			printk("DMA %d allocation failed.\n", dev->dma);
661 			goto out_tx;
662 		} else
663 			printk(", assigned DMA %d.\n", dev->dma);
664 	} else {			/* OK, we have to auto-DMA. */
665 		for (i = 0; i < 4; i++) {
666 			static const char dmas[] = { 5, 6, 7, 3 };
667 			int dma = dmas[i];
668 			int boguscnt;
669 
670 			/* Don't enable a permanently busy DMA channel, or the machine
671 			   will hang. */
672 			if (test_bit(dma, &dma_channels))
673 				continue;
674 			outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
675 			if (request_dma(dma, chipname))
676 				continue;
677 
678 			flags=claim_dma_lock();
679 			set_dma_mode(dma, DMA_MODE_CASCADE);
680 			enable_dma(dma);
681 			release_dma_lock(flags);
682 
683 			/* Trigger an initialization. */
684 			outw(0x0001, ioaddr+LANCE_DATA);
685 			for (boguscnt = 100; boguscnt > 0; --boguscnt)
686 				if (inw(ioaddr+LANCE_DATA) & 0x0900)
687 					break;
688 			if (inw(ioaddr+LANCE_DATA) & 0x0100) {
689 				dev->dma = dma;
690 				printk(", DMA %d.\n", dev->dma);
691 				break;
692 			} else {
693 				flags=claim_dma_lock();
694 				disable_dma(dma);
695 				release_dma_lock(flags);
696 				free_dma(dma);
697 			}
698 		}
699 		if (i == 4) {			/* Failure: bail. */
700 			printk("DMA detection failed.\n");
701 			goto out_tx;
702 		}
703 	}
704 
705 	if (lance_version == 0 && dev->irq == 0) {
706 		/* We may auto-IRQ now that we have a DMA channel. */
707 		/* Trigger an initialization just for the interrupt. */
708 		unsigned long irq_mask;
709 
710 		irq_mask = probe_irq_on();
711 		outw(0x0041, ioaddr+LANCE_DATA);
712 
713 		mdelay(40);
714 		dev->irq = probe_irq_off(irq_mask);
715 		if (dev->irq == 0) {
716 			printk("  Failed to detect the 7990 IRQ line.\n");
717 			goto out_dma;
718 		}
719 		printk("  Auto-IRQ detected IRQ%d.\n", dev->irq);
720 	}
721 
722 	if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
723 		/* Turn on auto-select of media (10baseT or BNC) so that the user
724 		   can watch the LEDs even if the board isn't opened. */
725 		outw(0x0002, ioaddr+LANCE_ADDR);
726 		/* Don't touch 10base2 power bit. */
727 		outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
728 	}
729 
730 	if (lance_debug > 0  &&  did_version++ == 0)
731 		printk(version);
732 
733 	/* The LANCE-specific entries in the device structure. */
734 	dev->netdev_ops = &lance_netdev_ops;
735 	dev->watchdog_timeo = TX_TIMEOUT;
736 
737 	err = register_netdev(dev);
738 	if (err)
739 		goto out_dma;
740 	return 0;
741 out_dma:
742 	if (dev->dma != 4)
743 		free_dma(dev->dma);
744 out_tx:
745 	kfree(lp->tx_bounce_buffs);
746 out_rx:
747 	kfree((void*)lp->rx_buffs);
748 out_lp:
749 	kfree(lp);
750 	return err;
751 }
752 
753 
754 static int
lance_open(struct net_device * dev)755 lance_open(struct net_device *dev)
756 {
757 	struct lance_private *lp = dev->ml_priv;
758 	int ioaddr = dev->base_addr;
759 	int i;
760 
761 	if (dev->irq == 0 ||
762 		request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
763 		return -EAGAIN;
764 	}
765 
766 	/* We used to allocate DMA here, but that was silly.
767 	   DMA lines can't be shared!  We now permanently allocate them. */
768 
769 	/* Reset the LANCE */
770 	inw(ioaddr+LANCE_RESET);
771 
772 	/* The DMA controller is used as a no-operation slave, "cascade mode". */
773 	if (dev->dma != 4) {
774 		unsigned long flags=claim_dma_lock();
775 		enable_dma(dev->dma);
776 		set_dma_mode(dev->dma, DMA_MODE_CASCADE);
777 		release_dma_lock(flags);
778 	}
779 
780 	/* Un-Reset the LANCE, needed only for the NE2100. */
781 	if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
782 		outw(0, ioaddr+LANCE_RESET);
783 
784 	if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
785 		/* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
786 		outw(0x0002, ioaddr+LANCE_ADDR);
787 		/* Only touch autoselect bit. */
788 		outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
789 	}
790 
791 	if (lance_debug > 1)
792 		printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
793 			   dev->name, dev->irq, dev->dma,
794 		           (u32) isa_virt_to_bus(lp->tx_ring),
795 		           (u32) isa_virt_to_bus(lp->rx_ring),
796 			   (u32) isa_virt_to_bus(&lp->init_block));
797 
798 	lance_init_ring(dev, GFP_KERNEL);
799 	/* Re-initialize the LANCE, and start it when done. */
800 	outw(0x0001, ioaddr+LANCE_ADDR);
801 	outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
802 	outw(0x0002, ioaddr+LANCE_ADDR);
803 	outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
804 
805 	outw(0x0004, ioaddr+LANCE_ADDR);
806 	outw(0x0915, ioaddr+LANCE_DATA);
807 
808 	outw(0x0000, ioaddr+LANCE_ADDR);
809 	outw(0x0001, ioaddr+LANCE_DATA);
810 
811 	netif_start_queue (dev);
812 
813 	i = 0;
814 	while (i++ < 100)
815 		if (inw(ioaddr+LANCE_DATA) & 0x0100)
816 			break;
817 	/*
818 	 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
819 	 * reports that doing so triggers a bug in the '974.
820 	 */
821 	outw(0x0042, ioaddr+LANCE_DATA);
822 
823 	if (lance_debug > 2)
824 		printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
825 			   dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
826 
827 	return 0;					/* Always succeed */
828 }
829 
830 /* The LANCE has been halted for one reason or another (busmaster memory
831    arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
832    etc.).  Modern LANCE variants always reload their ring-buffer
833    configuration when restarted, so we must reinitialize our ring
834    context before restarting.  As part of this reinitialization,
835    find all packets still on the Tx ring and pretend that they had been
836    sent (in effect, drop the packets on the floor) - the higher-level
837    protocols will time out and retransmit.  It'd be better to shuffle
838    these skbs to a temp list and then actually re-Tx them after
839    restarting the chip, but I'm too lazy to do so right now.  dplatt@3do.com
840 */
841 
842 static void
lance_purge_ring(struct net_device * dev)843 lance_purge_ring(struct net_device *dev)
844 {
845 	struct lance_private *lp = dev->ml_priv;
846 	int i;
847 
848 	/* Free all the skbuffs in the Rx and Tx queues. */
849 	for (i = 0; i < RX_RING_SIZE; i++) {
850 		struct sk_buff *skb = lp->rx_skbuff[i];
851 		lp->rx_skbuff[i] = NULL;
852 		lp->rx_ring[i].base = 0;		/* Not owned by LANCE chip. */
853 		if (skb)
854 			dev_kfree_skb_any(skb);
855 	}
856 	for (i = 0; i < TX_RING_SIZE; i++) {
857 		if (lp->tx_skbuff[i]) {
858 			dev_kfree_skb_any(lp->tx_skbuff[i]);
859 			lp->tx_skbuff[i] = NULL;
860 		}
861 	}
862 }
863 
864 
865 /* Initialize the LANCE Rx and Tx rings. */
866 static void
lance_init_ring(struct net_device * dev,gfp_t gfp)867 lance_init_ring(struct net_device *dev, gfp_t gfp)
868 {
869 	struct lance_private *lp = dev->ml_priv;
870 	int i;
871 
872 	lp->cur_rx = lp->cur_tx = 0;
873 	lp->dirty_rx = lp->dirty_tx = 0;
874 
875 	for (i = 0; i < RX_RING_SIZE; i++) {
876 		struct sk_buff *skb;
877 		void *rx_buff;
878 
879 		skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
880 		lp->rx_skbuff[i] = skb;
881 		if (skb)
882 			rx_buff = skb->data;
883 		else
884 			rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
885 		if (!rx_buff)
886 			lp->rx_ring[i].base = 0;
887 		else
888 			lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
889 		lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
890 	}
891 	/* The Tx buffer address is filled in as needed, but we do need to clear
892 	   the upper ownership bit. */
893 	for (i = 0; i < TX_RING_SIZE; i++) {
894 		lp->tx_skbuff[i] = NULL;
895 		lp->tx_ring[i].base = 0;
896 	}
897 
898 	lp->init_block.mode = 0x0000;
899 	for (i = 0; i < 6; i++)
900 		lp->init_block.phys_addr[i] = dev->dev_addr[i];
901 	lp->init_block.filter[0] = 0x00000000;
902 	lp->init_block.filter[1] = 0x00000000;
903 	lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
904 	lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
905 }
906 
907 static void
lance_restart(struct net_device * dev,unsigned int csr0_bits,int must_reinit)908 lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
909 {
910 	struct lance_private *lp = dev->ml_priv;
911 
912 	if (must_reinit ||
913 		(chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
914 		lance_purge_ring(dev);
915 		lance_init_ring(dev, GFP_ATOMIC);
916 	}
917 	outw(0x0000,    dev->base_addr + LANCE_ADDR);
918 	outw(csr0_bits, dev->base_addr + LANCE_DATA);
919 }
920 
921 
lance_tx_timeout(struct net_device * dev,unsigned int txqueue)922 static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue)
923 {
924 	struct lance_private *lp = (struct lance_private *) dev->ml_priv;
925 	int ioaddr = dev->base_addr;
926 
927 	outw (0, ioaddr + LANCE_ADDR);
928 	printk ("%s: transmit timed out, status %4.4x, resetting.\n",
929 		dev->name, inw (ioaddr + LANCE_DATA));
930 	outw (0x0004, ioaddr + LANCE_DATA);
931 	dev->stats.tx_errors++;
932 #ifndef final_version
933 	if (lance_debug > 3) {
934 		int i;
935 		printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
936 		  lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
937 			lp->cur_rx);
938 		for (i = 0; i < RX_RING_SIZE; i++)
939 			printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
940 			 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
941 				lp->rx_ring[i].msg_length);
942 		for (i = 0; i < TX_RING_SIZE; i++)
943 			printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
944 			     lp->tx_ring[i].base, -lp->tx_ring[i].length,
945 				lp->tx_ring[i].misc);
946 		printk ("\n");
947 	}
948 #endif
949 	lance_restart (dev, 0x0043, 1);
950 
951 	netif_trans_update(dev); /* prevent tx timeout */
952 	netif_wake_queue (dev);
953 }
954 
955 
lance_start_xmit(struct sk_buff * skb,struct net_device * dev)956 static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
957 				    struct net_device *dev)
958 {
959 	struct lance_private *lp = dev->ml_priv;
960 	int ioaddr = dev->base_addr;
961 	int entry;
962 	unsigned long flags;
963 
964 	spin_lock_irqsave(&lp->devlock, flags);
965 
966 	if (lance_debug > 3) {
967 		outw(0x0000, ioaddr+LANCE_ADDR);
968 		printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
969 			   inw(ioaddr+LANCE_DATA));
970 		outw(0x0000, ioaddr+LANCE_DATA);
971 	}
972 
973 	/* Fill in a Tx ring entry */
974 
975 	/* Mask to ring buffer boundary. */
976 	entry = lp->cur_tx & TX_RING_MOD_MASK;
977 
978 	/* Caution: the write order is important here, set the base address
979 	   with the "ownership" bits last. */
980 
981 	/* The old LANCE chips doesn't automatically pad buffers to min. size. */
982 	if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
983 		if (skb->len < ETH_ZLEN) {
984 			if (skb_padto(skb, ETH_ZLEN))
985 				goto out;
986 			lp->tx_ring[entry].length = -ETH_ZLEN;
987 		}
988 		else
989 			lp->tx_ring[entry].length = -skb->len;
990 	} else
991 		lp->tx_ring[entry].length = -skb->len;
992 
993 	lp->tx_ring[entry].misc = 0x0000;
994 
995 	dev->stats.tx_bytes += skb->len;
996 
997 	/* If any part of this buffer is >16M we must copy it to a low-memory
998 	   buffer. */
999 	if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
1000 		if (lance_debug > 5)
1001 			printk("%s: bouncing a high-memory packet (%#x).\n",
1002 				   dev->name, (u32)isa_virt_to_bus(skb->data));
1003 		skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
1004 		lp->tx_ring[entry].base =
1005 			((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
1006 		dev_consume_skb_irq(skb);
1007 	} else {
1008 		lp->tx_skbuff[entry] = skb;
1009 		lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
1010 	}
1011 	lp->cur_tx++;
1012 
1013 	/* Trigger an immediate send poll. */
1014 	outw(0x0000, ioaddr+LANCE_ADDR);
1015 	outw(0x0048, ioaddr+LANCE_DATA);
1016 
1017 	if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
1018 		netif_stop_queue(dev);
1019 
1020 out:
1021 	spin_unlock_irqrestore(&lp->devlock, flags);
1022 	return NETDEV_TX_OK;
1023 }
1024 
1025 /* The LANCE interrupt handler. */
lance_interrupt(int irq,void * dev_id)1026 static irqreturn_t lance_interrupt(int irq, void *dev_id)
1027 {
1028 	struct net_device *dev = dev_id;
1029 	struct lance_private *lp;
1030 	int csr0, ioaddr, boguscnt=10;
1031 	int must_restart;
1032 
1033 	ioaddr = dev->base_addr;
1034 	lp = dev->ml_priv;
1035 
1036 	spin_lock (&lp->devlock);
1037 
1038 	outw(0x00, dev->base_addr + LANCE_ADDR);
1039 	while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 &&
1040 	       --boguscnt >= 0) {
1041 		/* Acknowledge all of the current interrupt sources ASAP. */
1042 		outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
1043 
1044 		must_restart = 0;
1045 
1046 		if (lance_debug > 5)
1047 			printk("%s: interrupt  csr0=%#2.2x new csr=%#2.2x.\n",
1048 				   dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
1049 
1050 		if (csr0 & 0x0400)			/* Rx interrupt */
1051 			lance_rx(dev);
1052 
1053 		if (csr0 & 0x0200) {		/* Tx-done interrupt */
1054 			int dirty_tx = lp->dirty_tx;
1055 
1056 			while (dirty_tx < lp->cur_tx) {
1057 				int entry = dirty_tx & TX_RING_MOD_MASK;
1058 				int status = lp->tx_ring[entry].base;
1059 
1060 				if (status < 0)
1061 					break;			/* It still hasn't been Txed */
1062 
1063 				lp->tx_ring[entry].base = 0;
1064 
1065 				if (status & 0x40000000) {
1066 					/* There was an major error, log it. */
1067 					int err_status = lp->tx_ring[entry].misc;
1068 					dev->stats.tx_errors++;
1069 					if (err_status & 0x0400)
1070 						dev->stats.tx_aborted_errors++;
1071 					if (err_status & 0x0800)
1072 						dev->stats.tx_carrier_errors++;
1073 					if (err_status & 0x1000)
1074 						dev->stats.tx_window_errors++;
1075 					if (err_status & 0x4000) {
1076 						/* Ackk!  On FIFO errors the Tx unit is turned off! */
1077 						dev->stats.tx_fifo_errors++;
1078 						/* Remove this verbosity later! */
1079 						printk("%s: Tx FIFO error! Status %4.4x.\n",
1080 							   dev->name, csr0);
1081 						/* Restart the chip. */
1082 						must_restart = 1;
1083 					}
1084 				} else {
1085 					if (status & 0x18000000)
1086 						dev->stats.collisions++;
1087 					dev->stats.tx_packets++;
1088 				}
1089 
1090 				/* We must free the original skb if it's not a data-only copy
1091 				   in the bounce buffer. */
1092 				if (lp->tx_skbuff[entry]) {
1093 					dev_consume_skb_irq(lp->tx_skbuff[entry]);
1094 					lp->tx_skbuff[entry] = NULL;
1095 				}
1096 				dirty_tx++;
1097 			}
1098 
1099 #ifndef final_version
1100 			if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
1101 				printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
1102 					   dirty_tx, lp->cur_tx,
1103 					   netif_queue_stopped(dev) ? "yes" : "no");
1104 				dirty_tx += TX_RING_SIZE;
1105 			}
1106 #endif
1107 
1108 			/* if the ring is no longer full, accept more packets */
1109 			if (netif_queue_stopped(dev) &&
1110 			    dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
1111 				netif_wake_queue (dev);
1112 
1113 			lp->dirty_tx = dirty_tx;
1114 		}
1115 
1116 		/* Log misc errors. */
1117 		if (csr0 & 0x4000)
1118 			dev->stats.tx_errors++; /* Tx babble. */
1119 		if (csr0 & 0x1000)
1120 			dev->stats.rx_errors++; /* Missed a Rx frame. */
1121 		if (csr0 & 0x0800) {
1122 			printk("%s: Bus master arbitration failure, status %4.4x.\n",
1123 				   dev->name, csr0);
1124 			/* Restart the chip. */
1125 			must_restart = 1;
1126 		}
1127 
1128 		if (must_restart) {
1129 			/* stop the chip to clear the error condition, then restart */
1130 			outw(0x0000, dev->base_addr + LANCE_ADDR);
1131 			outw(0x0004, dev->base_addr + LANCE_DATA);
1132 			lance_restart(dev, 0x0002, 0);
1133 		}
1134 	}
1135 
1136 	/* Clear any other interrupt, and set interrupt enable. */
1137 	outw(0x0000, dev->base_addr + LANCE_ADDR);
1138 	outw(0x7940, dev->base_addr + LANCE_DATA);
1139 
1140 	if (lance_debug > 4)
1141 		printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
1142 			   dev->name, inw(ioaddr + LANCE_ADDR),
1143 			   inw(dev->base_addr + LANCE_DATA));
1144 
1145 	spin_unlock (&lp->devlock);
1146 	return IRQ_HANDLED;
1147 }
1148 
1149 static int
lance_rx(struct net_device * dev)1150 lance_rx(struct net_device *dev)
1151 {
1152 	struct lance_private *lp = dev->ml_priv;
1153 	int entry = lp->cur_rx & RX_RING_MOD_MASK;
1154 	int i;
1155 
1156 	/* If we own the next entry, it's a new packet. Send it up. */
1157 	while (lp->rx_ring[entry].base >= 0) {
1158 		int status = lp->rx_ring[entry].base >> 24;
1159 
1160 		if (status != 0x03) {			/* There was an error. */
1161 			/* There is a tricky error noted by John Murphy,
1162 			   <murf@perftech.com> to Russ Nelson: Even with full-sized
1163 			   buffers it's possible for a jabber packet to use two
1164 			   buffers, with only the last correctly noting the error. */
1165 			if (status & 0x01)	/* Only count a general error at the */
1166 				dev->stats.rx_errors++; /* end of a packet.*/
1167 			if (status & 0x20)
1168 				dev->stats.rx_frame_errors++;
1169 			if (status & 0x10)
1170 				dev->stats.rx_over_errors++;
1171 			if (status & 0x08)
1172 				dev->stats.rx_crc_errors++;
1173 			if (status & 0x04)
1174 				dev->stats.rx_fifo_errors++;
1175 			lp->rx_ring[entry].base &= 0x03ffffff;
1176 		}
1177 		else
1178 		{
1179 			/* Malloc up new buffer, compatible with net3. */
1180 			short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
1181 			struct sk_buff *skb;
1182 
1183 			if(pkt_len<60)
1184 			{
1185 				printk("%s: Runt packet!\n",dev->name);
1186 				dev->stats.rx_errors++;
1187 			}
1188 			else
1189 			{
1190 				skb = dev_alloc_skb(pkt_len+2);
1191 				if (!skb)
1192 				{
1193 					printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1194 					for (i=0; i < RX_RING_SIZE; i++)
1195 						if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
1196 							break;
1197 
1198 					if (i > RX_RING_SIZE -2)
1199 					{
1200 						dev->stats.rx_dropped++;
1201 						lp->rx_ring[entry].base |= 0x80000000;
1202 						lp->cur_rx++;
1203 					}
1204 					break;
1205 				}
1206 				skb_reserve(skb,2);	/* 16 byte align */
1207 				skb_put(skb,pkt_len);	/* Make room */
1208 				skb_copy_to_linear_data(skb,
1209 					(unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
1210 					pkt_len);
1211 				skb->protocol=eth_type_trans(skb,dev);
1212 				netif_rx(skb);
1213 				dev->stats.rx_packets++;
1214 				dev->stats.rx_bytes += pkt_len;
1215 			}
1216 		}
1217 		/* The docs say that the buffer length isn't touched, but Andrew Boyd
1218 		   of QNX reports that some revs of the 79C965 clear it. */
1219 		lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1220 		lp->rx_ring[entry].base |= 0x80000000;
1221 		entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1222 	}
1223 
1224 	/* We should check that at least two ring entries are free.	 If not,
1225 	   we should free one and mark stats->rx_dropped++. */
1226 
1227 	return 0;
1228 }
1229 
1230 static int
lance_close(struct net_device * dev)1231 lance_close(struct net_device *dev)
1232 {
1233 	int ioaddr = dev->base_addr;
1234 	struct lance_private *lp = dev->ml_priv;
1235 
1236 	netif_stop_queue (dev);
1237 
1238 	if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1239 		outw(112, ioaddr+LANCE_ADDR);
1240 		dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1241 	}
1242 	outw(0, ioaddr+LANCE_ADDR);
1243 
1244 	if (lance_debug > 1)
1245 		printk("%s: Shutting down ethercard, status was %2.2x.\n",
1246 			   dev->name, inw(ioaddr+LANCE_DATA));
1247 
1248 	/* We stop the LANCE here -- it occasionally polls
1249 	   memory if we don't. */
1250 	outw(0x0004, ioaddr+LANCE_DATA);
1251 
1252 	if (dev->dma != 4)
1253 	{
1254 		unsigned long flags=claim_dma_lock();
1255 		disable_dma(dev->dma);
1256 		release_dma_lock(flags);
1257 	}
1258 	free_irq(dev->irq, dev);
1259 
1260 	lance_purge_ring(dev);
1261 
1262 	return 0;
1263 }
1264 
lance_get_stats(struct net_device * dev)1265 static struct net_device_stats *lance_get_stats(struct net_device *dev)
1266 {
1267 	struct lance_private *lp = dev->ml_priv;
1268 
1269 	if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1270 		short ioaddr = dev->base_addr;
1271 		short saved_addr;
1272 		unsigned long flags;
1273 
1274 		spin_lock_irqsave(&lp->devlock, flags);
1275 		saved_addr = inw(ioaddr+LANCE_ADDR);
1276 		outw(112, ioaddr+LANCE_ADDR);
1277 		dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1278 		outw(saved_addr, ioaddr+LANCE_ADDR);
1279 		spin_unlock_irqrestore(&lp->devlock, flags);
1280 	}
1281 
1282 	return &dev->stats;
1283 }
1284 
1285 /* Set or clear the multicast filter for this adaptor.
1286  */
1287 
set_multicast_list(struct net_device * dev)1288 static void set_multicast_list(struct net_device *dev)
1289 {
1290 	short ioaddr = dev->base_addr;
1291 
1292 	outw(0, ioaddr+LANCE_ADDR);
1293 	outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance.	 */
1294 
1295 	if (dev->flags&IFF_PROMISC) {
1296 		outw(15, ioaddr+LANCE_ADDR);
1297 		outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
1298 	} else {
1299 		short multicast_table[4];
1300 		int i;
1301 		int num_addrs=netdev_mc_count(dev);
1302 		if(dev->flags&IFF_ALLMULTI)
1303 			num_addrs=1;
1304 		/* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
1305 		memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1306 		for (i = 0; i < 4; i++) {
1307 			outw(8 + i, ioaddr+LANCE_ADDR);
1308 			outw(multicast_table[i], ioaddr+LANCE_DATA);
1309 		}
1310 		outw(15, ioaddr+LANCE_ADDR);
1311 		outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
1312 	}
1313 
1314 	lance_restart(dev, 0x0142, 0); /*  Resume normal operation */
1315 
1316 }
1317 
1318