xref: /linux/drivers/net/ethernet/3com/3c59x.c (revision b04df400c30235fa347313c9e2a0695549bd2c8e)
1 /* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */
2 /*
3 	Written 1996-1999 by Donald Becker.
4 
5 	This software may be used and distributed according to the terms
6 	of the GNU General Public License, incorporated herein by reference.
7 
8 	This driver is for the 3Com "Vortex" and "Boomerang" series ethercards.
9 	Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597
10 	and the EtherLink XL 3c900 and 3c905 cards.
11 
12 	Problem reports and questions should be directed to
13 	vortex@scyld.com
14 
15 	The author may be reached as becker@scyld.com, or C/O
16 	Scyld Computing Corporation
17 	410 Severn Ave., Suite 210
18 	Annapolis MD 21403
19 
20 */
21 
22 /*
23  * FIXME: This driver _could_ support MTU changing, but doesn't.  See Don's hamachi.c implementation
24  * as well as other drivers
25  *
26  * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k
27  * due to dead code elimination.  There will be some performance benefits from this due to
28  * elimination of all the tests and reduced cache footprint.
29  */
30 
31 
32 #define DRV_NAME	"3c59x"
33 
34 
35 
36 /* A few values that may be tweaked. */
37 /* Keep the ring sizes a power of two for efficiency. */
38 #define TX_RING_SIZE	16
39 #define RX_RING_SIZE	32
40 #define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
41 
42 /* "Knobs" that adjust features and parameters. */
43 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
44    Setting to > 1512 effectively disables this feature. */
45 #ifndef __arm__
46 static int rx_copybreak = 200;
47 #else
48 /* ARM systems perform better by disregarding the bus-master
49    transfer capability of these cards. -- rmk */
50 static int rx_copybreak = 1513;
51 #endif
52 /* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
53 static const int mtu = 1500;
54 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
55 static int max_interrupt_work = 32;
56 /* Tx timeout interval (millisecs) */
57 static int watchdog = 5000;
58 
59 /* Allow aggregation of Tx interrupts.  Saves CPU load at the cost
60  * of possible Tx stalls if the system is blocking interrupts
61  * somewhere else.  Undefine this to disable.
62  */
63 #define tx_interrupt_mitigation 1
64 
65 /* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */
66 #define vortex_debug debug
67 #ifdef VORTEX_DEBUG
68 static int vortex_debug = VORTEX_DEBUG;
69 #else
70 static int vortex_debug = 1;
71 #endif
72 
73 #include <linux/module.h>
74 #include <linux/kernel.h>
75 #include <linux/string.h>
76 #include <linux/timer.h>
77 #include <linux/errno.h>
78 #include <linux/in.h>
79 #include <linux/ioport.h>
80 #include <linux/interrupt.h>
81 #include <linux/pci.h>
82 #include <linux/mii.h>
83 #include <linux/init.h>
84 #include <linux/netdevice.h>
85 #include <linux/etherdevice.h>
86 #include <linux/skbuff.h>
87 #include <linux/ethtool.h>
88 #include <linux/highmem.h>
89 #include <linux/eisa.h>
90 #include <linux/bitops.h>
91 #include <linux/jiffies.h>
92 #include <linux/gfp.h>
93 #include <asm/irq.h>			/* For nr_irqs only. */
94 #include <asm/io.h>
95 #include <linux/uaccess.h>
96 
97 /* Kernel compatibility defines, some common to David Hinds' PCMCIA package.
98    This is only in the support-all-kernels source code. */
99 
100 #define RUN_AT(x) (jiffies + (x))
101 
102 #include <linux/delay.h>
103 
104 
105 static const char version[] =
106 	DRV_NAME ": Donald Becker and others.\n";
107 
108 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
109 MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver ");
110 MODULE_LICENSE("GPL");
111 
112 
113 /* Operational parameter that usually are not changed. */
114 
115 /* The Vortex size is twice that of the original EtherLinkIII series: the
116    runtime register window, window 1, is now always mapped in.
117    The Boomerang size is twice as large as the Vortex -- it has additional
118    bus master control registers. */
119 #define VORTEX_TOTAL_SIZE 0x20
120 #define BOOMERANG_TOTAL_SIZE 0x40
121 
122 /* Set iff a MII transceiver on any interface requires mdio preamble.
123    This only set with the original DP83840 on older 3c905 boards, so the extra
124    code size of a per-interface flag is not worthwhile. */
125 static char mii_preamble_required;
126 
127 #define PFX DRV_NAME ": "
128 
129 
130 
131 /*
132 				Theory of Operation
133 
134 I. Board Compatibility
135 
136 This device driver is designed for the 3Com FastEtherLink and FastEtherLink
137 XL, 3Com's PCI to 10/100baseT adapters.  It also works with the 10Mbs
138 versions of the FastEtherLink cards.  The supported product IDs are
139   3c590, 3c592, 3c595, 3c597, 3c900, 3c905
140 
141 The related ISA 3c515 is supported with a separate driver, 3c515.c, included
142 with the kernel source or available from
143     cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html
144 
145 II. Board-specific settings
146 
147 PCI bus devices are configured by the system at boot time, so no jumpers
148 need to be set on the board.  The system BIOS should be set to assign the
149 PCI INTA signal to an otherwise unused system IRQ line.
150 
151 The EEPROM settings for media type and forced-full-duplex are observed.
152 The EEPROM media type should be left at the default "autoselect" unless using
153 10base2 or AUI connections which cannot be reliably detected.
154 
155 III. Driver operation
156 
157 The 3c59x series use an interface that's very similar to the previous 3c5x9
158 series.  The primary interface is two programmed-I/O FIFOs, with an
159 alternate single-contiguous-region bus-master transfer (see next).
160 
161 The 3c900 "Boomerang" series uses a full-bus-master interface with separate
162 lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet,
163 DEC Tulip and Intel Speedo3.  The first chip version retains a compatible
164 programmed-I/O interface that has been removed in 'B' and subsequent board
165 revisions.
166 
167 One extension that is advertised in a very large font is that the adapters
168 are capable of being bus masters.  On the Vortex chip this capability was
169 only for a single contiguous region making it far less useful than the full
170 bus master capability.  There is a significant performance impact of taking
171 an extra interrupt or polling for the completion of each transfer, as well
172 as difficulty sharing the single transfer engine between the transmit and
173 receive threads.  Using DMA transfers is a win only with large blocks or
174 with the flawed versions of the Intel Orion motherboard PCI controller.
175 
176 The Boomerang chip's full-bus-master interface is useful, and has the
177 currently-unused advantages over other similar chips that queued transmit
178 packets may be reordered and receive buffer groups are associated with a
179 single frame.
180 
181 With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme.
182 Rather than a fixed intermediate receive buffer, this scheme allocates
183 full-sized skbuffs as receive buffers.  The value RX_COPYBREAK is used as
184 the copying breakpoint: it is chosen to trade-off the memory wasted by
185 passing the full-sized skbuff to the queue layer for all frames vs. the
186 copying cost of copying a frame to a correctly-sized skbuff.
187 
188 IIIC. Synchronization
189 The driver runs as two independent, single-threaded flows of control.  One
190 is the send-packet routine, which enforces single-threaded use by the
191 dev->tbusy flag.  The other thread is the interrupt handler, which is single
192 threaded by the hardware and other software.
193 
194 IV. Notes
195 
196 Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development
197 3c590, 3c595, and 3c900 boards.
198 The name "Vortex" is the internal 3Com project name for the PCI ASIC, and
199 the EISA version is called "Demon".  According to Terry these names come
200 from rides at the local amusement park.
201 
202 The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes!
203 This driver only supports ethernet packets because of the skbuff allocation
204 limit of 4K.
205 */
206 
207 /* This table drives the PCI probe routines.  It's mostly boilerplate in all
208    of the drivers, and will likely be provided by some future kernel.
209 */
210 enum pci_flags_bit {
211 	PCI_USES_MASTER=4,
212 };
213 
214 enum {	IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8,
215 	EEPROM_8BIT=0x10,	/* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */
216 	HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100,
217 	INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800,
218 	EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000, WNO_XCVR_PWR=0x4000,
219 	EXTRA_PREAMBLE=0x8000, EEPROM_RESET=0x10000, };
220 
221 enum vortex_chips {
222 	CH_3C590 = 0,
223 	CH_3C592,
224 	CH_3C597,
225 	CH_3C595_1,
226 	CH_3C595_2,
227 
228 	CH_3C595_3,
229 	CH_3C900_1,
230 	CH_3C900_2,
231 	CH_3C900_3,
232 	CH_3C900_4,
233 
234 	CH_3C900_5,
235 	CH_3C900B_FL,
236 	CH_3C905_1,
237 	CH_3C905_2,
238 	CH_3C905B_TX,
239 	CH_3C905B_1,
240 
241 	CH_3C905B_2,
242 	CH_3C905B_FX,
243 	CH_3C905C,
244 	CH_3C9202,
245 	CH_3C980,
246 	CH_3C9805,
247 
248 	CH_3CSOHO100_TX,
249 	CH_3C555,
250 	CH_3C556,
251 	CH_3C556B,
252 	CH_3C575,
253 
254 	CH_3C575_1,
255 	CH_3CCFE575,
256 	CH_3CCFE575CT,
257 	CH_3CCFE656,
258 	CH_3CCFEM656,
259 
260 	CH_3CCFEM656_1,
261 	CH_3C450,
262 	CH_3C920,
263 	CH_3C982A,
264 	CH_3C982B,
265 
266 	CH_905BT4,
267 	CH_920B_EMB_WNM,
268 };
269 
270 
271 /* note: this array directly indexed by above enums, and MUST
272  * be kept in sync with both the enums above, and the PCI device
273  * table below
274  */
275 static struct vortex_chip_info {
276 	const char *name;
277 	int flags;
278 	int drv_flags;
279 	int io_size;
280 } vortex_info_tbl[] = {
281 	{"3c590 Vortex 10Mbps",
282 	 PCI_USES_MASTER, IS_VORTEX, 32, },
283 	{"3c592 EISA 10Mbps Demon/Vortex",					/* AKPM: from Don's 3c59x_cb.c 0.49H */
284 	 PCI_USES_MASTER, IS_VORTEX, 32, },
285 	{"3c597 EISA Fast Demon/Vortex",					/* AKPM: from Don's 3c59x_cb.c 0.49H */
286 	 PCI_USES_MASTER, IS_VORTEX, 32, },
287 	{"3c595 Vortex 100baseTx",
288 	 PCI_USES_MASTER, IS_VORTEX, 32, },
289 	{"3c595 Vortex 100baseT4",
290 	 PCI_USES_MASTER, IS_VORTEX, 32, },
291 
292 	{"3c595 Vortex 100base-MII",
293 	 PCI_USES_MASTER, IS_VORTEX, 32, },
294 	{"3c900 Boomerang 10baseT",
295 	 PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
296 	{"3c900 Boomerang 10Mbps Combo",
297 	 PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
298 	{"3c900 Cyclone 10Mbps TPO",						/* AKPM: from Don's 0.99M */
299 	 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
300 	{"3c900 Cyclone 10Mbps Combo",
301 	 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
302 
303 	{"3c900 Cyclone 10Mbps TPC",						/* AKPM: from Don's 0.99M */
304 	 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
305 	{"3c900B-FL Cyclone 10base-FL",
306 	 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
307 	{"3c905 Boomerang 100baseTx",
308 	 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
309 	{"3c905 Boomerang 100baseT4",
310 	 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
311 	{"3C905B-TX Fast Etherlink XL PCI",
312 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
313 	{"3c905B Cyclone 100baseTx",
314 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
315 
316 	{"3c905B Cyclone 10/100/BNC",
317 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
318 	{"3c905B-FX Cyclone 100baseFx",
319 	 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
320 	{"3c905C Tornado",
321 	PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
322 	{"3c920B-EMB-WNM (ATI Radeon 9100 IGP)",
323 	 PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, },
324 	{"3c980 Cyclone",
325 	 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
326 
327 	{"3c980C Python-T",
328 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
329 	{"3cSOHO100-TX Hurricane",
330 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
331 	{"3c555 Laptop Hurricane",
332 	 PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, },
333 	{"3c556 Laptop Tornado",
334 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR|
335 									HAS_HWCKSM, 128, },
336 	{"3c556B Laptop Hurricane",
337 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR|
338 	                                WNO_XCVR_PWR|HAS_HWCKSM, 128, },
339 
340 	{"3c575 [Megahertz] 10/100 LAN 	CardBus",
341 	PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
342 	{"3c575 Boomerang CardBus",
343 	 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
344 	{"3CCFE575BT Cyclone CardBus",
345 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|
346 									INVERT_LED_PWR|HAS_HWCKSM, 128, },
347 	{"3CCFE575CT Tornado CardBus",
348 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
349 									MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
350 	{"3CCFE656 Cyclone CardBus",
351 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
352 									INVERT_LED_PWR|HAS_HWCKSM, 128, },
353 
354 	{"3CCFEM656B Cyclone+Winmodem CardBus",
355 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
356 									INVERT_LED_PWR|HAS_HWCKSM, 128, },
357 	{"3CXFEM656C Tornado+Winmodem CardBus",			/* From pcmcia-cs-3.1.5 */
358 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
359 									MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
360 	{"3c450 HomePNA Tornado",						/* AKPM: from Don's 0.99Q */
361 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
362 	{"3c920 Tornado",
363 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
364 	{"3c982 Hydra Dual Port A",
365 	 PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
366 
367 	{"3c982 Hydra Dual Port B",
368 	 PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
369 	{"3c905B-T4",
370 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
371 	{"3c920B-EMB-WNM Tornado",
372 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
373 
374 	{NULL,}, /* NULL terminated list. */
375 };
376 
377 
378 static const struct pci_device_id vortex_pci_tbl[] = {
379 	{ 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 },
380 	{ 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 },
381 	{ 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 },
382 	{ 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 },
383 	{ 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 },
384 
385 	{ 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 },
386 	{ 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 },
387 	{ 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 },
388 	{ 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 },
389 	{ 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 },
390 
391 	{ 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 },
392 	{ 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL },
393 	{ 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 },
394 	{ 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 },
395 	{ 0x10B7, 0x9054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_TX },
396 	{ 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 },
397 
398 	{ 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 },
399 	{ 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX },
400 	{ 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C },
401 	{ 0x10B7, 0x9202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9202 },
402 	{ 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 },
403 	{ 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 },
404 
405 	{ 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX },
406 	{ 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 },
407 	{ 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 },
408 	{ 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B },
409 	{ 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 },
410 
411 	{ 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 },
412 	{ 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 },
413 	{ 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT },
414 	{ 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 },
415 	{ 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 },
416 
417 	{ 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 },
418 	{ 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 },
419 	{ 0x10B7, 0x9201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C920 },
420 	{ 0x10B7, 0x1201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982A },
421 	{ 0x10B7, 0x1202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982B },
422 
423 	{ 0x10B7, 0x9056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_905BT4 },
424 	{ 0x10B7, 0x9210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_920B_EMB_WNM },
425 
426 	{0,}						/* 0 terminated list. */
427 };
428 MODULE_DEVICE_TABLE(pci, vortex_pci_tbl);
429 
430 
431 /* Operational definitions.
432    These are not used by other compilation units and thus are not
433    exported in a ".h" file.
434 
435    First the windows.  There are eight register windows, with the command
436    and status registers available in each.
437    */
438 #define EL3_CMD 0x0e
439 #define EL3_STATUS 0x0e
440 
441 /* The top five bits written to EL3_CMD are a command, the lower
442    11 bits are the parameter, if applicable.
443    Note that 11 parameters bits was fine for ethernet, but the new chip
444    can handle FDDI length frames (~4500 octets) and now parameters count
445    32-bit 'Dwords' rather than octets. */
446 
447 enum vortex_cmd {
448 	TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
449 	RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11,
450 	UpStall = 6<<11, UpUnstall = (6<<11)+1,
451 	DownStall = (6<<11)+2, DownUnstall = (6<<11)+3,
452 	RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
453 	FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
454 	SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
455 	SetTxThreshold = 18<<11, SetTxStart = 19<<11,
456 	StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11,
457 	StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,};
458 
459 /* The SetRxFilter command accepts the following classes: */
460 enum RxFilter {
461 	RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
462 
463 /* Bits in the general status register. */
464 enum vortex_status {
465 	IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004,
466 	TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
467 	IntReq = 0x0040, StatsFull = 0x0080,
468 	DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10,
469 	DMAInProgress = 1<<11,			/* DMA controller is still busy.*/
470 	CmdInProgress = 1<<12,			/* EL3_CMD is still busy.*/
471 };
472 
473 /* Register window 1 offsets, the window used in normal operation.
474    On the Vortex this window is always mapped at offsets 0x10-0x1f. */
475 enum Window1 {
476 	TX_FIFO = 0x10,  RX_FIFO = 0x10,  RxErrors = 0x14,
477 	RxStatus = 0x18,  Timer=0x1A, TxStatus = 0x1B,
478 	TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */
479 };
480 enum Window0 {
481 	Wn0EepromCmd = 10,		/* Window 0: EEPROM command register. */
482 	Wn0EepromData = 12,		/* Window 0: EEPROM results register. */
483 	IntrStatus=0x0E,		/* Valid in all windows. */
484 };
485 enum Win0_EEPROM_bits {
486 	EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0,
487 	EEPROM_EWENB = 0x30,		/* Enable erasing/writing for 10 msec. */
488 	EEPROM_EWDIS = 0x00,		/* Disable EWENB before 10 msec timeout. */
489 };
490 /* EEPROM locations. */
491 enum eeprom_offset {
492 	PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3,
493 	EtherLink3ID=7, IFXcvrIO=8, IRQLine=9,
494 	NodeAddr01=10, NodeAddr23=11, NodeAddr45=12,
495 	DriverTune=13, Checksum=15};
496 
497 enum Window2 {			/* Window 2. */
498 	Wn2_ResetOptions=12,
499 };
500 enum Window3 {			/* Window 3: MAC/config bits. */
501 	Wn3_Config=0, Wn3_MaxPktSize=4, Wn3_MAC_Ctrl=6, Wn3_Options=8,
502 };
503 
504 #define BFEXT(value, offset, bitcount)  \
505     ((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1))
506 
507 #define BFINS(lhs, rhs, offset, bitcount)					\
508 	(((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) |	\
509 	(((rhs) & ((1 << (bitcount)) - 1)) << (offset)))
510 
511 #define RAM_SIZE(v)		BFEXT(v, 0, 3)
512 #define RAM_WIDTH(v)	BFEXT(v, 3, 1)
513 #define RAM_SPEED(v)	BFEXT(v, 4, 2)
514 #define ROM_SIZE(v)		BFEXT(v, 6, 2)
515 #define RAM_SPLIT(v)	BFEXT(v, 16, 2)
516 #define XCVR(v)			BFEXT(v, 20, 4)
517 #define AUTOSELECT(v)	BFEXT(v, 24, 1)
518 
519 enum Window4 {		/* Window 4: Xcvr/media bits. */
520 	Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10,
521 };
522 enum Win4_Media_bits {
523 	Media_SQE = 0x0008,		/* Enable SQE error counting for AUI. */
524 	Media_10TP = 0x00C0,	/* Enable link beat and jabber for 10baseT. */
525 	Media_Lnk = 0x0080,		/* Enable just link beat for 100TX/100FX. */
526 	Media_LnkBeat = 0x0800,
527 };
528 enum Window7 {					/* Window 7: Bus Master control. */
529 	Wn7_MasterAddr = 0, Wn7_VlanEtherType=4, Wn7_MasterLen = 6,
530 	Wn7_MasterStatus = 12,
531 };
532 /* Boomerang bus master control registers. */
533 enum MasterCtrl {
534 	PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c,
535 	TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38,
536 };
537 
538 /* The Rx and Tx descriptor lists.
539    Caution Alpha hackers: these types are 32 bits!  Note also the 8 byte
540    alignment contraint on tx_ring[] and rx_ring[]. */
541 #define LAST_FRAG 	0x80000000			/* Last Addr/Len pair in descriptor. */
542 #define DN_COMPLETE	0x00010000			/* This packet has been downloaded */
543 struct boom_rx_desc {
544 	__le32 next;					/* Last entry points to 0.   */
545 	__le32 status;
546 	__le32 addr;					/* Up to 63 addr/len pairs possible. */
547 	__le32 length;					/* Set LAST_FRAG to indicate last pair. */
548 };
549 /* Values for the Rx status entry. */
550 enum rx_desc_status {
551 	RxDComplete=0x00008000, RxDError=0x4000,
552 	/* See boomerang_rx() for actual error bits */
553 	IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27,
554 	IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31,
555 };
556 
557 #ifdef MAX_SKB_FRAGS
558 #define DO_ZEROCOPY 1
559 #else
560 #define DO_ZEROCOPY 0
561 #endif
562 
563 struct boom_tx_desc {
564 	__le32 next;					/* Last entry points to 0.   */
565 	__le32 status;					/* bits 0:12 length, others see below.  */
566 #if DO_ZEROCOPY
567 	struct {
568 		__le32 addr;
569 		__le32 length;
570 	} frag[1+MAX_SKB_FRAGS];
571 #else
572 		__le32 addr;
573 		__le32 length;
574 #endif
575 };
576 
577 /* Values for the Tx status entry. */
578 enum tx_desc_status {
579 	CRCDisable=0x2000, TxDComplete=0x8000,
580 	AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000,
581 	TxIntrUploaded=0x80000000,		/* IRQ when in FIFO, but maybe not sent. */
582 };
583 
584 /* Chip features we care about in vp->capabilities, read from the EEPROM. */
585 enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 };
586 
587 struct vortex_extra_stats {
588 	unsigned long tx_deferred;
589 	unsigned long tx_max_collisions;
590 	unsigned long tx_multiple_collisions;
591 	unsigned long tx_single_collisions;
592 	unsigned long rx_bad_ssd;
593 };
594 
595 struct vortex_private {
596 	/* The Rx and Tx rings should be quad-word-aligned. */
597 	struct boom_rx_desc* rx_ring;
598 	struct boom_tx_desc* tx_ring;
599 	dma_addr_t rx_ring_dma;
600 	dma_addr_t tx_ring_dma;
601 	/* The addresses of transmit- and receive-in-place skbuffs. */
602 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
603 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
604 	unsigned int cur_rx, cur_tx;		/* The next free ring entry */
605 	unsigned int dirty_tx;	/* The ring entries to be free()ed. */
606 	struct vortex_extra_stats xstats;	/* NIC-specific extra stats */
607 	struct sk_buff *tx_skb;				/* Packet being eaten by bus master ctrl.  */
608 	dma_addr_t tx_skb_dma;				/* Allocated DMA address for bus master ctrl DMA.   */
609 
610 	/* PCI configuration space information. */
611 	struct device *gendev;
612 	void __iomem *ioaddr;			/* IO address space */
613 	void __iomem *cb_fn_base;		/* CardBus function status addr space. */
614 
615 	/* Some values here only for performance evaluation and path-coverage */
616 	int rx_nocopy, rx_copy, queued_packet, rx_csumhits;
617 	int card_idx;
618 
619 	/* The remainder are related to chip state, mostly media selection. */
620 	struct timer_list timer;			/* Media selection timer. */
621 	int options;						/* User-settable misc. driver options. */
622 	unsigned int media_override:4, 		/* Passed-in media type. */
623 		default_media:4,				/* Read from the EEPROM/Wn3_Config. */
624 		full_duplex:1, autoselect:1,
625 		bus_master:1,					/* Vortex can only do a fragment bus-m. */
626 		full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang  */
627 		flow_ctrl:1,					/* Use 802.3x flow control (PAUSE only) */
628 		partner_flow_ctrl:1,			/* Partner supports flow control */
629 		has_nway:1,
630 		enable_wol:1,					/* Wake-on-LAN is enabled */
631 		pm_state_valid:1,				/* pci_dev->saved_config_space has sane contents */
632 		open:1,
633 		medialock:1,
634 		large_frames:1,			/* accept large frames */
635 		handling_irq:1;			/* private in_irq indicator */
636 	/* {get|set}_wol operations are already serialized by rtnl.
637 	 * no additional locking is required for the enable_wol and acpi_set_WOL()
638 	 */
639 	int drv_flags;
640 	u16 status_enable;
641 	u16 intr_enable;
642 	u16 available_media;				/* From Wn3_Options. */
643 	u16 capabilities, info1, info2;		/* Various, from EEPROM. */
644 	u16 advertising;					/* NWay media advertisement */
645 	unsigned char phys[2];				/* MII device addresses. */
646 	u16 deferred;						/* Resend these interrupts when we
647 										 * bale from the ISR */
648 	u16 io_size;						/* Size of PCI region (for release_region) */
649 
650 	/* Serialises access to hardware other than MII and variables below.
651 	 * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */
652 	spinlock_t lock;
653 
654 	spinlock_t mii_lock;		/* Serialises access to MII */
655 	struct mii_if_info mii;		/* MII lib hooks/info */
656 	spinlock_t window_lock;		/* Serialises access to windowed regs */
657 	int window;			/* Register window */
658 };
659 
660 static void window_set(struct vortex_private *vp, int window)
661 {
662 	if (window != vp->window) {
663 		iowrite16(SelectWindow + window, vp->ioaddr + EL3_CMD);
664 		vp->window = window;
665 	}
666 }
667 
668 #define DEFINE_WINDOW_IO(size)						\
669 static u ## size							\
670 window_read ## size(struct vortex_private *vp, int window, int addr)	\
671 {									\
672 	unsigned long flags;						\
673 	u ## size ret;							\
674 	spin_lock_irqsave(&vp->window_lock, flags);			\
675 	window_set(vp, window);						\
676 	ret = ioread ## size(vp->ioaddr + addr);			\
677 	spin_unlock_irqrestore(&vp->window_lock, flags);		\
678 	return ret;							\
679 }									\
680 static void								\
681 window_write ## size(struct vortex_private *vp, u ## size value,	\
682 		     int window, int addr)				\
683 {									\
684 	unsigned long flags;						\
685 	spin_lock_irqsave(&vp->window_lock, flags);			\
686 	window_set(vp, window);						\
687 	iowrite ## size(value, vp->ioaddr + addr);			\
688 	spin_unlock_irqrestore(&vp->window_lock, flags);		\
689 }
690 DEFINE_WINDOW_IO(8)
691 DEFINE_WINDOW_IO(16)
692 DEFINE_WINDOW_IO(32)
693 
694 #ifdef CONFIG_PCI
695 #define DEVICE_PCI(dev) ((dev_is_pci(dev)) ? to_pci_dev((dev)) : NULL)
696 #else
697 #define DEVICE_PCI(dev) NULL
698 #endif
699 
700 #define VORTEX_PCI(vp)							\
701 	((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL))
702 
703 #ifdef CONFIG_EISA
704 #define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL)
705 #else
706 #define DEVICE_EISA(dev) NULL
707 #endif
708 
709 #define VORTEX_EISA(vp)							\
710 	((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL))
711 
712 /* The action to take with a media selection timer tick.
713    Note that we deviate from the 3Com order by checking 10base2 before AUI.
714  */
715 enum xcvr_types {
716 	XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx,
717 	XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10,
718 };
719 
720 static const struct media_table {
721 	char *name;
722 	unsigned int media_bits:16,		/* Bits to set in Wn4_Media register. */
723 		mask:8,						/* The transceiver-present bit in Wn3_Config.*/
724 		next:8;						/* The media type to try next. */
725 	int wait;						/* Time before we check media status. */
726 } media_tbl[] = {
727   {	"10baseT",   Media_10TP,0x08, XCVR_10base2, (14*HZ)/10},
728   { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10},
729   { "undefined", 0,			0x80, XCVR_10baseT, 10000},
730   { "10base2",   0,			0x10, XCVR_AUI,		(1*HZ)/10},
731   { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10},
732   { "100baseFX", Media_Lnk, 0x04, XCVR_MII,		(14*HZ)/10},
733   { "MII",		 0,			0x41, XCVR_10baseT, 3*HZ },
734   { "undefined", 0,			0x01, XCVR_10baseT, 10000},
735   { "Autonegotiate", 0,		0x41, XCVR_10baseT, 3*HZ},
736   { "MII-External",	 0,		0x41, XCVR_10baseT, 3*HZ },
737   { "Default",	 0,			0xFF, XCVR_10baseT, 10000},
738 };
739 
740 static struct {
741 	const char str[ETH_GSTRING_LEN];
742 } ethtool_stats_keys[] = {
743 	{ "tx_deferred" },
744 	{ "tx_max_collisions" },
745 	{ "tx_multiple_collisions" },
746 	{ "tx_single_collisions" },
747 	{ "rx_bad_ssd" },
748 };
749 
750 /* number of ETHTOOL_GSTATS u64's */
751 #define VORTEX_NUM_STATS    5
752 
753 static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
754 				   int chip_idx, int card_idx);
755 static int vortex_up(struct net_device *dev);
756 static void vortex_down(struct net_device *dev, int final);
757 static int vortex_open(struct net_device *dev);
758 static void mdio_sync(struct vortex_private *vp, int bits);
759 static int mdio_read(struct net_device *dev, int phy_id, int location);
760 static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
761 static void vortex_timer(struct timer_list *t);
762 static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
763 				     struct net_device *dev);
764 static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
765 					struct net_device *dev);
766 static int vortex_rx(struct net_device *dev);
767 static int boomerang_rx(struct net_device *dev);
768 static irqreturn_t vortex_boomerang_interrupt(int irq, void *dev_id);
769 static irqreturn_t _vortex_interrupt(int irq, struct net_device *dev);
770 static irqreturn_t _boomerang_interrupt(int irq, struct net_device *dev);
771 static int vortex_close(struct net_device *dev);
772 static void dump_tx_ring(struct net_device *dev);
773 static void update_stats(void __iomem *ioaddr, struct net_device *dev);
774 static struct net_device_stats *vortex_get_stats(struct net_device *dev);
775 static void set_rx_mode(struct net_device *dev);
776 #ifdef CONFIG_PCI
777 static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
778 #endif
779 static void vortex_tx_timeout(struct net_device *dev);
780 static void acpi_set_WOL(struct net_device *dev);
781 static const struct ethtool_ops vortex_ethtool_ops;
782 static void set_8021q_mode(struct net_device *dev, int enable);
783 
784 /* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
785 /* Option count limit only -- unlimited interfaces are supported. */
786 #define MAX_UNITS 8
787 static int options[MAX_UNITS] = { [0 ... MAX_UNITS-1] = -1 };
788 static int full_duplex[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
789 static int hw_checksums[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
790 static int flow_ctrl[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
791 static int enable_wol[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
792 static int use_mmio[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
793 static int global_options = -1;
794 static int global_full_duplex = -1;
795 static int global_enable_wol = -1;
796 static int global_use_mmio = -1;
797 
798 /* Variables to work-around the Compaq PCI BIOS32 problem. */
799 static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900;
800 static struct net_device *compaq_net_device;
801 
802 static int vortex_cards_found;
803 
804 module_param(debug, int, 0);
805 module_param(global_options, int, 0);
806 module_param_array(options, int, NULL, 0);
807 module_param(global_full_duplex, int, 0);
808 module_param_array(full_duplex, int, NULL, 0);
809 module_param_array(hw_checksums, int, NULL, 0);
810 module_param_array(flow_ctrl, int, NULL, 0);
811 module_param(global_enable_wol, int, 0);
812 module_param_array(enable_wol, int, NULL, 0);
813 module_param(rx_copybreak, int, 0);
814 module_param(max_interrupt_work, int, 0);
815 module_param_hw(compaq_ioaddr, int, ioport, 0);
816 module_param_hw(compaq_irq, int, irq, 0);
817 module_param(compaq_device_id, int, 0);
818 module_param(watchdog, int, 0);
819 module_param(global_use_mmio, int, 0);
820 module_param_array(use_mmio, int, NULL, 0);
821 MODULE_PARM_DESC(debug, "3c59x debug level (0-6)");
822 MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex");
823 MODULE_PARM_DESC(global_options, "3c59x: same as options, but applies to all NICs if options is unset");
824 MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)");
825 MODULE_PARM_DESC(global_full_duplex, "3c59x: same as full_duplex, but applies to all NICs if full_duplex is unset");
826 MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)");
827 MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)");
828 MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)");
829 MODULE_PARM_DESC(global_enable_wol, "3c59x: same as enable_wol, but applies to all NICs if enable_wol is unset");
830 MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames");
831 MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt");
832 MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)");
833 MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)");
834 MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)");
835 MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds");
836 MODULE_PARM_DESC(global_use_mmio, "3c59x: same as use_mmio, but applies to all NICs if options is unset");
837 MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)");
838 
839 #ifdef CONFIG_NET_POLL_CONTROLLER
840 static void poll_vortex(struct net_device *dev)
841 {
842 	vortex_boomerang_interrupt(dev->irq, dev);
843 }
844 #endif
845 
846 #ifdef CONFIG_PM
847 
848 static int vortex_suspend(struct device *dev)
849 {
850 	struct pci_dev *pdev = to_pci_dev(dev);
851 	struct net_device *ndev = pci_get_drvdata(pdev);
852 
853 	if (!ndev || !netif_running(ndev))
854 		return 0;
855 
856 	netif_device_detach(ndev);
857 	vortex_down(ndev, 1);
858 
859 	return 0;
860 }
861 
862 static int vortex_resume(struct device *dev)
863 {
864 	struct pci_dev *pdev = to_pci_dev(dev);
865 	struct net_device *ndev = pci_get_drvdata(pdev);
866 	int err;
867 
868 	if (!ndev || !netif_running(ndev))
869 		return 0;
870 
871 	err = vortex_up(ndev);
872 	if (err)
873 		return err;
874 
875 	netif_device_attach(ndev);
876 
877 	return 0;
878 }
879 
880 static const struct dev_pm_ops vortex_pm_ops = {
881 	.suspend = vortex_suspend,
882 	.resume = vortex_resume,
883 	.freeze = vortex_suspend,
884 	.thaw = vortex_resume,
885 	.poweroff = vortex_suspend,
886 	.restore = vortex_resume,
887 };
888 
889 #define VORTEX_PM_OPS (&vortex_pm_ops)
890 
891 #else /* !CONFIG_PM */
892 
893 #define VORTEX_PM_OPS NULL
894 
895 #endif /* !CONFIG_PM */
896 
897 #ifdef CONFIG_EISA
898 static const struct eisa_device_id vortex_eisa_ids[] = {
899 	{ "TCM5920", CH_3C592 },
900 	{ "TCM5970", CH_3C597 },
901 	{ "" }
902 };
903 MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids);
904 
905 static int vortex_eisa_probe(struct device *device)
906 {
907 	void __iomem *ioaddr;
908 	struct eisa_device *edev;
909 
910 	edev = to_eisa_device(device);
911 
912 	if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME))
913 		return -EBUSY;
914 
915 	ioaddr = ioport_map(edev->base_addr, VORTEX_TOTAL_SIZE);
916 
917 	if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12,
918 					  edev->id.driver_data, vortex_cards_found)) {
919 		release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
920 		return -ENODEV;
921 	}
922 
923 	vortex_cards_found++;
924 
925 	return 0;
926 }
927 
928 static int vortex_eisa_remove(struct device *device)
929 {
930 	struct eisa_device *edev;
931 	struct net_device *dev;
932 	struct vortex_private *vp;
933 	void __iomem *ioaddr;
934 
935 	edev = to_eisa_device(device);
936 	dev = eisa_get_drvdata(edev);
937 
938 	if (!dev) {
939 		pr_err("vortex_eisa_remove called for Compaq device!\n");
940 		BUG();
941 	}
942 
943 	vp = netdev_priv(dev);
944 	ioaddr = vp->ioaddr;
945 
946 	unregister_netdev(dev);
947 	iowrite16(TotalReset|0x14, ioaddr + EL3_CMD);
948 	release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
949 
950 	free_netdev(dev);
951 	return 0;
952 }
953 
954 static struct eisa_driver vortex_eisa_driver = {
955 	.id_table = vortex_eisa_ids,
956 	.driver   = {
957 		.name    = "3c59x",
958 		.probe   = vortex_eisa_probe,
959 		.remove  = vortex_eisa_remove
960 	}
961 };
962 
963 #endif /* CONFIG_EISA */
964 
965 /* returns count found (>= 0), or negative on error */
966 static int __init vortex_eisa_init(void)
967 {
968 	int eisa_found = 0;
969 	int orig_cards_found = vortex_cards_found;
970 
971 #ifdef CONFIG_EISA
972 	int err;
973 
974 	err = eisa_driver_register (&vortex_eisa_driver);
975 	if (!err) {
976 		/*
977 		 * Because of the way EISA bus is probed, we cannot assume
978 		 * any device have been found when we exit from
979 		 * eisa_driver_register (the bus root driver may not be
980 		 * initialized yet). So we blindly assume something was
981 		 * found, and let the sysfs magic happened...
982 		 */
983 		eisa_found = 1;
984 	}
985 #endif
986 
987 	/* Special code to work-around the Compaq PCI BIOS32 problem. */
988 	if (compaq_ioaddr) {
989 		vortex_probe1(NULL, ioport_map(compaq_ioaddr, VORTEX_TOTAL_SIZE),
990 			      compaq_irq, compaq_device_id, vortex_cards_found++);
991 	}
992 
993 	return vortex_cards_found - orig_cards_found + eisa_found;
994 }
995 
996 /* returns count (>= 0), or negative on error */
997 static int vortex_init_one(struct pci_dev *pdev,
998 			   const struct pci_device_id *ent)
999 {
1000 	int rc, unit, pci_bar;
1001 	struct vortex_chip_info *vci;
1002 	void __iomem *ioaddr;
1003 
1004 	/* wake up and enable device */
1005 	rc = pci_enable_device(pdev);
1006 	if (rc < 0)
1007 		goto out;
1008 
1009 	rc = pci_request_regions(pdev, DRV_NAME);
1010 	if (rc < 0)
1011 		goto out_disable;
1012 
1013 	unit = vortex_cards_found;
1014 
1015 	if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) {
1016 		/* Determine the default if the user didn't override us */
1017 		vci = &vortex_info_tbl[ent->driver_data];
1018 		pci_bar = vci->drv_flags & (IS_CYCLONE | IS_TORNADO) ? 1 : 0;
1019 	} else if (unit < MAX_UNITS && use_mmio[unit] >= 0)
1020 		pci_bar = use_mmio[unit] ? 1 : 0;
1021 	else
1022 		pci_bar = global_use_mmio ? 1 : 0;
1023 
1024 	ioaddr = pci_iomap(pdev, pci_bar, 0);
1025 	if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
1026 		ioaddr = pci_iomap(pdev, 0, 0);
1027 	if (!ioaddr) {
1028 		rc = -ENOMEM;
1029 		goto out_release;
1030 	}
1031 
1032 	rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq,
1033 			   ent->driver_data, unit);
1034 	if (rc < 0)
1035 		goto out_iounmap;
1036 
1037 	vortex_cards_found++;
1038 	goto out;
1039 
1040 out_iounmap:
1041 	pci_iounmap(pdev, ioaddr);
1042 out_release:
1043 	pci_release_regions(pdev);
1044 out_disable:
1045 	pci_disable_device(pdev);
1046 out:
1047 	return rc;
1048 }
1049 
1050 static const struct net_device_ops boomrang_netdev_ops = {
1051 	.ndo_open		= vortex_open,
1052 	.ndo_stop		= vortex_close,
1053 	.ndo_start_xmit		= boomerang_start_xmit,
1054 	.ndo_tx_timeout		= vortex_tx_timeout,
1055 	.ndo_get_stats		= vortex_get_stats,
1056 #ifdef CONFIG_PCI
1057 	.ndo_do_ioctl 		= vortex_ioctl,
1058 #endif
1059 	.ndo_set_rx_mode	= set_rx_mode,
1060 	.ndo_set_mac_address 	= eth_mac_addr,
1061 	.ndo_validate_addr	= eth_validate_addr,
1062 #ifdef CONFIG_NET_POLL_CONTROLLER
1063 	.ndo_poll_controller	= poll_vortex,
1064 #endif
1065 };
1066 
1067 static const struct net_device_ops vortex_netdev_ops = {
1068 	.ndo_open		= vortex_open,
1069 	.ndo_stop		= vortex_close,
1070 	.ndo_start_xmit		= vortex_start_xmit,
1071 	.ndo_tx_timeout		= vortex_tx_timeout,
1072 	.ndo_get_stats		= vortex_get_stats,
1073 #ifdef CONFIG_PCI
1074 	.ndo_do_ioctl 		= vortex_ioctl,
1075 #endif
1076 	.ndo_set_rx_mode	= set_rx_mode,
1077 	.ndo_set_mac_address 	= eth_mac_addr,
1078 	.ndo_validate_addr	= eth_validate_addr,
1079 #ifdef CONFIG_NET_POLL_CONTROLLER
1080 	.ndo_poll_controller	= poll_vortex,
1081 #endif
1082 };
1083 
1084 /*
1085  * Start up the PCI/EISA device which is described by *gendev.
1086  * Return 0 on success.
1087  *
1088  * NOTE: pdev can be NULL, for the case of a Compaq device
1089  */
1090 static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1091 			 int chip_idx, int card_idx)
1092 {
1093 	struct vortex_private *vp;
1094 	int option;
1095 	unsigned int eeprom[0x40], checksum = 0;		/* EEPROM contents */
1096 	int i, step;
1097 	struct net_device *dev;
1098 	static int printed_version;
1099 	int retval, print_info;
1100 	struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx];
1101 	const char *print_name = "3c59x";
1102 	struct pci_dev *pdev = NULL;
1103 	struct eisa_device *edev = NULL;
1104 
1105 	if (!printed_version) {
1106 		pr_info("%s", version);
1107 		printed_version = 1;
1108 	}
1109 
1110 	if (gendev) {
1111 		if ((pdev = DEVICE_PCI(gendev))) {
1112 			print_name = pci_name(pdev);
1113 		}
1114 
1115 		if ((edev = DEVICE_EISA(gendev))) {
1116 			print_name = dev_name(&edev->dev);
1117 		}
1118 	}
1119 
1120 	dev = alloc_etherdev(sizeof(*vp));
1121 	retval = -ENOMEM;
1122 	if (!dev)
1123 		goto out;
1124 
1125 	SET_NETDEV_DEV(dev, gendev);
1126 	vp = netdev_priv(dev);
1127 
1128 	option = global_options;
1129 
1130 	/* The lower four bits are the media type. */
1131 	if (dev->mem_start) {
1132 		/*
1133 		 * The 'options' param is passed in as the third arg to the
1134 		 * LILO 'ether=' argument for non-modular use
1135 		 */
1136 		option = dev->mem_start;
1137 	}
1138 	else if (card_idx < MAX_UNITS) {
1139 		if (options[card_idx] >= 0)
1140 			option = options[card_idx];
1141 	}
1142 
1143 	if (option > 0) {
1144 		if (option & 0x8000)
1145 			vortex_debug = 7;
1146 		if (option & 0x4000)
1147 			vortex_debug = 2;
1148 		if (option & 0x0400)
1149 			vp->enable_wol = 1;
1150 	}
1151 
1152 	print_info = (vortex_debug > 1);
1153 	if (print_info)
1154 		pr_info("See Documentation/networking/vortex.txt\n");
1155 
1156 	pr_info("%s: 3Com %s %s at %p.\n",
1157 	       print_name,
1158 	       pdev ? "PCI" : "EISA",
1159 	       vci->name,
1160 	       ioaddr);
1161 
1162 	dev->base_addr = (unsigned long)ioaddr;
1163 	dev->irq = irq;
1164 	dev->mtu = mtu;
1165 	vp->ioaddr = ioaddr;
1166 	vp->large_frames = mtu > 1500;
1167 	vp->drv_flags = vci->drv_flags;
1168 	vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
1169 	vp->io_size = vci->io_size;
1170 	vp->card_idx = card_idx;
1171 	vp->window = -1;
1172 
1173 	/* module list only for Compaq device */
1174 	if (gendev == NULL) {
1175 		compaq_net_device = dev;
1176 	}
1177 
1178 	/* PCI-only startup logic */
1179 	if (pdev) {
1180 		/* enable bus-mastering if necessary */
1181 		if (vci->flags & PCI_USES_MASTER)
1182 			pci_set_master(pdev);
1183 
1184 		if (vci->drv_flags & IS_VORTEX) {
1185 			u8 pci_latency;
1186 			u8 new_latency = 248;
1187 
1188 			/* Check the PCI latency value.  On the 3c590 series the latency timer
1189 			   must be set to the maximum value to avoid data corruption that occurs
1190 			   when the timer expires during a transfer.  This bug exists the Vortex
1191 			   chip only. */
1192 			pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
1193 			if (pci_latency < new_latency) {
1194 				pr_info("%s: Overriding PCI latency timer (CFLT) setting of %d, new value is %d.\n",
1195 					print_name, pci_latency, new_latency);
1196 				pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
1197 			}
1198 		}
1199 	}
1200 
1201 	spin_lock_init(&vp->lock);
1202 	spin_lock_init(&vp->mii_lock);
1203 	spin_lock_init(&vp->window_lock);
1204 	vp->gendev = gendev;
1205 	vp->mii.dev = dev;
1206 	vp->mii.mdio_read = mdio_read;
1207 	vp->mii.mdio_write = mdio_write;
1208 	vp->mii.phy_id_mask = 0x1f;
1209 	vp->mii.reg_num_mask = 0x1f;
1210 
1211 	/* Makes sure rings are at least 16 byte aligned. */
1212 	vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
1213 					   + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1214 					   &vp->rx_ring_dma);
1215 	retval = -ENOMEM;
1216 	if (!vp->rx_ring)
1217 		goto free_device;
1218 
1219 	vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
1220 	vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
1221 
1222 	/* if we are a PCI driver, we store info in pdev->driver_data
1223 	 * instead of a module list */
1224 	if (pdev)
1225 		pci_set_drvdata(pdev, dev);
1226 	if (edev)
1227 		eisa_set_drvdata(edev, dev);
1228 
1229 	vp->media_override = 7;
1230 	if (option >= 0) {
1231 		vp->media_override = ((option & 7) == 2)  ?  0  :  option & 15;
1232 		if (vp->media_override != 7)
1233 			vp->medialock = 1;
1234 		vp->full_duplex = (option & 0x200) ? 1 : 0;
1235 		vp->bus_master = (option & 16) ? 1 : 0;
1236 	}
1237 
1238 	if (global_full_duplex > 0)
1239 		vp->full_duplex = 1;
1240 	if (global_enable_wol > 0)
1241 		vp->enable_wol = 1;
1242 
1243 	if (card_idx < MAX_UNITS) {
1244 		if (full_duplex[card_idx] > 0)
1245 			vp->full_duplex = 1;
1246 		if (flow_ctrl[card_idx] > 0)
1247 			vp->flow_ctrl = 1;
1248 		if (enable_wol[card_idx] > 0)
1249 			vp->enable_wol = 1;
1250 	}
1251 
1252 	vp->mii.force_media = vp->full_duplex;
1253 	vp->options = option;
1254 	/* Read the station address from the EEPROM. */
1255 	{
1256 		int base;
1257 
1258 		if (vci->drv_flags & EEPROM_8BIT)
1259 			base = 0x230;
1260 		else if (vci->drv_flags & EEPROM_OFFSET)
1261 			base = EEPROM_Read + 0x30;
1262 		else
1263 			base = EEPROM_Read;
1264 
1265 		for (i = 0; i < 0x40; i++) {
1266 			int timer;
1267 			window_write16(vp, base + i, 0, Wn0EepromCmd);
1268 			/* Pause for at least 162 us. for the read to take place. */
1269 			for (timer = 10; timer >= 0; timer--) {
1270 				udelay(162);
1271 				if ((window_read16(vp, 0, Wn0EepromCmd) &
1272 				     0x8000) == 0)
1273 					break;
1274 			}
1275 			eeprom[i] = window_read16(vp, 0, Wn0EepromData);
1276 		}
1277 	}
1278 	for (i = 0; i < 0x18; i++)
1279 		checksum ^= eeprom[i];
1280 	checksum = (checksum ^ (checksum >> 8)) & 0xff;
1281 	if (checksum != 0x00) {		/* Grrr, needless incompatible change 3Com. */
1282 		while (i < 0x21)
1283 			checksum ^= eeprom[i++];
1284 		checksum = (checksum ^ (checksum >> 8)) & 0xff;
1285 	}
1286 	if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO))
1287 		pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
1288 	for (i = 0; i < 3; i++)
1289 		((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
1290 	if (print_info)
1291 		pr_cont(" %pM", dev->dev_addr);
1292 	/* Unfortunately an all zero eeprom passes the checksum and this
1293 	   gets found in the wild in failure cases. Crypto is hard 8) */
1294 	if (!is_valid_ether_addr(dev->dev_addr)) {
1295 		retval = -EINVAL;
1296 		pr_err("*** EEPROM MAC address is invalid.\n");
1297 		goto free_ring;	/* With every pack */
1298 	}
1299 	for (i = 0; i < 6; i++)
1300 		window_write8(vp, dev->dev_addr[i], 2, i);
1301 
1302 	if (print_info)
1303 		pr_cont(", IRQ %d\n", dev->irq);
1304 	/* Tell them about an invalid IRQ. */
1305 	if (dev->irq <= 0 || dev->irq >= nr_irqs)
1306 		pr_warn(" *** Warning: IRQ %d is unlikely to work! ***\n",
1307 			dev->irq);
1308 
1309 	step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1;
1310 	if (print_info) {
1311 		pr_info("  product code %02x%02x rev %02x.%d date %02d-%02d-%02d\n",
1312 			eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
1313 			step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9);
1314 	}
1315 
1316 
1317 	if (pdev && vci->drv_flags & HAS_CB_FNS) {
1318 		unsigned short n;
1319 
1320 		vp->cb_fn_base = pci_iomap(pdev, 2, 0);
1321 		if (!vp->cb_fn_base) {
1322 			retval = -ENOMEM;
1323 			goto free_ring;
1324 		}
1325 
1326 		if (print_info) {
1327 			pr_info("%s: CardBus functions mapped %16.16llx->%p\n",
1328 				print_name,
1329 				(unsigned long long)pci_resource_start(pdev, 2),
1330 				vp->cb_fn_base);
1331 		}
1332 
1333 		n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
1334 		if (vp->drv_flags & INVERT_LED_PWR)
1335 			n |= 0x10;
1336 		if (vp->drv_flags & INVERT_MII_PWR)
1337 			n |= 0x4000;
1338 		window_write16(vp, n, 2, Wn2_ResetOptions);
1339 		if (vp->drv_flags & WNO_XCVR_PWR) {
1340 			window_write16(vp, 0x0800, 0, 0);
1341 		}
1342 	}
1343 
1344 	/* Extract our information from the EEPROM data. */
1345 	vp->info1 = eeprom[13];
1346 	vp->info2 = eeprom[15];
1347 	vp->capabilities = eeprom[16];
1348 
1349 	if (vp->info1 & 0x8000) {
1350 		vp->full_duplex = 1;
1351 		if (print_info)
1352 			pr_info("Full duplex capable\n");
1353 	}
1354 
1355 	{
1356 		static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
1357 		unsigned int config;
1358 		vp->available_media = window_read16(vp, 3, Wn3_Options);
1359 		if ((vp->available_media & 0xff) == 0)		/* Broken 3c916 */
1360 			vp->available_media = 0x40;
1361 		config = window_read32(vp, 3, Wn3_Config);
1362 		if (print_info) {
1363 			pr_debug("  Internal config register is %4.4x, transceivers %#x.\n",
1364 				config, window_read16(vp, 3, Wn3_Options));
1365 			pr_info("  %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
1366 				   8 << RAM_SIZE(config),
1367 				   RAM_WIDTH(config) ? "word" : "byte",
1368 				   ram_split[RAM_SPLIT(config)],
1369 				   AUTOSELECT(config) ? "autoselect/" : "",
1370 				   XCVR(config) > XCVR_ExtMII ? "<invalid transceiver>" :
1371 				   media_tbl[XCVR(config)].name);
1372 		}
1373 		vp->default_media = XCVR(config);
1374 		if (vp->default_media == XCVR_NWAY)
1375 			vp->has_nway = 1;
1376 		vp->autoselect = AUTOSELECT(config);
1377 	}
1378 
1379 	if (vp->media_override != 7) {
1380 		pr_info("%s:  Media override to transceiver type %d (%s).\n",
1381 				print_name, vp->media_override,
1382 				media_tbl[vp->media_override].name);
1383 		dev->if_port = vp->media_override;
1384 	} else
1385 		dev->if_port = vp->default_media;
1386 
1387 	if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) ||
1388 		dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
1389 		int phy, phy_idx = 0;
1390 		mii_preamble_required++;
1391 		if (vp->drv_flags & EXTRA_PREAMBLE)
1392 			mii_preamble_required++;
1393 		mdio_sync(vp, 32);
1394 		mdio_read(dev, 24, MII_BMSR);
1395 		for (phy = 0; phy < 32 && phy_idx < 1; phy++) {
1396 			int mii_status, phyx;
1397 
1398 			/*
1399 			 * For the 3c905CX we look at index 24 first, because it bogusly
1400 			 * reports an external PHY at all indices
1401 			 */
1402 			if (phy == 0)
1403 				phyx = 24;
1404 			else if (phy <= 24)
1405 				phyx = phy - 1;
1406 			else
1407 				phyx = phy;
1408 			mii_status = mdio_read(dev, phyx, MII_BMSR);
1409 			if (mii_status  &&  mii_status != 0xffff) {
1410 				vp->phys[phy_idx++] = phyx;
1411 				if (print_info) {
1412 					pr_info("  MII transceiver found at address %d, status %4x.\n",
1413 						phyx, mii_status);
1414 				}
1415 				if ((mii_status & 0x0040) == 0)
1416 					mii_preamble_required++;
1417 			}
1418 		}
1419 		mii_preamble_required--;
1420 		if (phy_idx == 0) {
1421 			pr_warn("  ***WARNING*** No MII transceivers found!\n");
1422 			vp->phys[0] = 24;
1423 		} else {
1424 			vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE);
1425 			if (vp->full_duplex) {
1426 				/* Only advertise the FD media types. */
1427 				vp->advertising &= ~0x02A0;
1428 				mdio_write(dev, vp->phys[0], 4, vp->advertising);
1429 			}
1430 		}
1431 		vp->mii.phy_id = vp->phys[0];
1432 	}
1433 
1434 	if (vp->capabilities & CapBusMaster) {
1435 		vp->full_bus_master_tx = 1;
1436 		if (print_info) {
1437 			pr_info("  Enabling bus-master transmits and %s receives.\n",
1438 			(vp->info2 & 1) ? "early" : "whole-frame" );
1439 		}
1440 		vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
1441 		vp->bus_master = 0;		/* AKPM: vortex only */
1442 	}
1443 
1444 	/* The 3c59x-specific entries in the device structure. */
1445 	if (vp->full_bus_master_tx) {
1446 		dev->netdev_ops = &boomrang_netdev_ops;
1447 		/* Actually, it still should work with iommu. */
1448 		if (card_idx < MAX_UNITS &&
1449 		    ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) ||
1450 				hw_checksums[card_idx] == 1)) {
1451 			dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
1452 		}
1453 	} else
1454 		dev->netdev_ops =  &vortex_netdev_ops;
1455 
1456 	if (print_info) {
1457 		pr_info("%s: scatter/gather %sabled. h/w checksums %sabled\n",
1458 				print_name,
1459 				(dev->features & NETIF_F_SG) ? "en":"dis",
1460 				(dev->features & NETIF_F_IP_CSUM) ? "en":"dis");
1461 	}
1462 
1463 	dev->ethtool_ops = &vortex_ethtool_ops;
1464 	dev->watchdog_timeo = (watchdog * HZ) / 1000;
1465 
1466 	if (pdev) {
1467 		vp->pm_state_valid = 1;
1468 		pci_save_state(pdev);
1469  		acpi_set_WOL(dev);
1470 	}
1471 	retval = register_netdev(dev);
1472 	if (retval == 0)
1473 		return 0;
1474 
1475 free_ring:
1476 	pci_free_consistent(pdev,
1477 						sizeof(struct boom_rx_desc) * RX_RING_SIZE
1478 							+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1479 						vp->rx_ring,
1480 						vp->rx_ring_dma);
1481 free_device:
1482 	free_netdev(dev);
1483 	pr_err(PFX "vortex_probe1 fails.  Returns %d\n", retval);
1484 out:
1485 	return retval;
1486 }
1487 
1488 static void
1489 issue_and_wait(struct net_device *dev, int cmd)
1490 {
1491 	struct vortex_private *vp = netdev_priv(dev);
1492 	void __iomem *ioaddr = vp->ioaddr;
1493 	int i;
1494 
1495 	iowrite16(cmd, ioaddr + EL3_CMD);
1496 	for (i = 0; i < 2000; i++) {
1497 		if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
1498 			return;
1499 	}
1500 
1501 	/* OK, that didn't work.  Do it the slow way.  One second */
1502 	for (i = 0; i < 100000; i++) {
1503 		if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) {
1504 			if (vortex_debug > 1)
1505 				pr_info("%s: command 0x%04x took %d usecs\n",
1506 					   dev->name, cmd, i * 10);
1507 			return;
1508 		}
1509 		udelay(10);
1510 	}
1511 	pr_err("%s: command 0x%04x did not complete! Status=0x%x\n",
1512 			   dev->name, cmd, ioread16(ioaddr + EL3_STATUS));
1513 }
1514 
1515 static void
1516 vortex_set_duplex(struct net_device *dev)
1517 {
1518 	struct vortex_private *vp = netdev_priv(dev);
1519 
1520 	pr_info("%s:  setting %s-duplex.\n",
1521 		dev->name, (vp->full_duplex) ? "full" : "half");
1522 
1523 	/* Set the full-duplex bit. */
1524 	window_write16(vp,
1525 		       ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
1526 		       (vp->large_frames ? 0x40 : 0) |
1527 		       ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ?
1528 			0x100 : 0),
1529 		       3, Wn3_MAC_Ctrl);
1530 }
1531 
1532 static void vortex_check_media(struct net_device *dev, unsigned int init)
1533 {
1534 	struct vortex_private *vp = netdev_priv(dev);
1535 	unsigned int ok_to_print = 0;
1536 
1537 	if (vortex_debug > 3)
1538 		ok_to_print = 1;
1539 
1540 	if (mii_check_media(&vp->mii, ok_to_print, init)) {
1541 		vp->full_duplex = vp->mii.full_duplex;
1542 		vortex_set_duplex(dev);
1543 	} else if (init) {
1544 		vortex_set_duplex(dev);
1545 	}
1546 }
1547 
1548 static int
1549 vortex_up(struct net_device *dev)
1550 {
1551 	struct vortex_private *vp = netdev_priv(dev);
1552 	void __iomem *ioaddr = vp->ioaddr;
1553 	unsigned int config;
1554 	int i, mii_reg1, mii_reg5, err = 0;
1555 
1556 	if (VORTEX_PCI(vp)) {
1557 		pci_set_power_state(VORTEX_PCI(vp), PCI_D0);	/* Go active */
1558 		if (vp->pm_state_valid)
1559 			pci_restore_state(VORTEX_PCI(vp));
1560 		err = pci_enable_device(VORTEX_PCI(vp));
1561 		if (err) {
1562 			pr_warn("%s: Could not enable device\n", dev->name);
1563 			goto err_out;
1564 		}
1565 	}
1566 
1567 	/* Before initializing select the active media port. */
1568 	config = window_read32(vp, 3, Wn3_Config);
1569 
1570 	if (vp->media_override != 7) {
1571 		pr_info("%s: Media override to transceiver %d (%s).\n",
1572 			   dev->name, vp->media_override,
1573 			   media_tbl[vp->media_override].name);
1574 		dev->if_port = vp->media_override;
1575 	} else if (vp->autoselect) {
1576 		if (vp->has_nway) {
1577 			if (vortex_debug > 1)
1578 				pr_info("%s: using NWAY device table, not %d\n",
1579 								dev->name, dev->if_port);
1580 			dev->if_port = XCVR_NWAY;
1581 		} else {
1582 			/* Find first available media type, starting with 100baseTx. */
1583 			dev->if_port = XCVR_100baseTx;
1584 			while (! (vp->available_media & media_tbl[dev->if_port].mask))
1585 				dev->if_port = media_tbl[dev->if_port].next;
1586 			if (vortex_debug > 1)
1587 				pr_info("%s: first available media type: %s\n",
1588 					dev->name, media_tbl[dev->if_port].name);
1589 		}
1590 	} else {
1591 		dev->if_port = vp->default_media;
1592 		if (vortex_debug > 1)
1593 			pr_info("%s: using default media %s\n",
1594 				dev->name, media_tbl[dev->if_port].name);
1595 	}
1596 
1597 	timer_setup(&vp->timer, vortex_timer, 0);
1598 	mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
1599 
1600 	if (vortex_debug > 1)
1601 		pr_debug("%s: Initial media type %s.\n",
1602 			   dev->name, media_tbl[dev->if_port].name);
1603 
1604 	vp->full_duplex = vp->mii.force_media;
1605 	config = BFINS(config, dev->if_port, 20, 4);
1606 	if (vortex_debug > 6)
1607 		pr_debug("vortex_up(): writing 0x%x to InternalConfig\n", config);
1608 	window_write32(vp, config, 3, Wn3_Config);
1609 
1610 	if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
1611 		mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR);
1612 		mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
1613 		vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
1614 		vp->mii.full_duplex = vp->full_duplex;
1615 
1616 		vortex_check_media(dev, 1);
1617 	}
1618 	else
1619 		vortex_set_duplex(dev);
1620 
1621 	issue_and_wait(dev, TxReset);
1622 	/*
1623 	 * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
1624 	 */
1625 	issue_and_wait(dev, RxReset|0x04);
1626 
1627 
1628 	iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
1629 
1630 	if (vortex_debug > 1) {
1631 		pr_debug("%s: vortex_up() irq %d media status %4.4x.\n",
1632 			   dev->name, dev->irq, window_read16(vp, 4, Wn4_Media));
1633 	}
1634 
1635 	/* Set the station address and mask in window 2 each time opened. */
1636 	for (i = 0; i < 6; i++)
1637 		window_write8(vp, dev->dev_addr[i], 2, i);
1638 	for (; i < 12; i+=2)
1639 		window_write16(vp, 0, 2, i);
1640 
1641 	if (vp->cb_fn_base) {
1642 		unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
1643 		if (vp->drv_flags & INVERT_LED_PWR)
1644 			n |= 0x10;
1645 		if (vp->drv_flags & INVERT_MII_PWR)
1646 			n |= 0x4000;
1647 		window_write16(vp, n, 2, Wn2_ResetOptions);
1648 	}
1649 
1650 	if (dev->if_port == XCVR_10base2)
1651 		/* Start the thinnet transceiver. We should really wait 50ms...*/
1652 		iowrite16(StartCoax, ioaddr + EL3_CMD);
1653 	if (dev->if_port != XCVR_NWAY) {
1654 		window_write16(vp,
1655 			       (window_read16(vp, 4, Wn4_Media) &
1656 				~(Media_10TP|Media_SQE)) |
1657 			       media_tbl[dev->if_port].media_bits,
1658 			       4, Wn4_Media);
1659 	}
1660 
1661 	/* Switch to the stats window, and clear all stats by reading. */
1662 	iowrite16(StatsDisable, ioaddr + EL3_CMD);
1663 	for (i = 0; i < 10; i++)
1664 		window_read8(vp, 6, i);
1665 	window_read16(vp, 6, 10);
1666 	window_read16(vp, 6, 12);
1667 	/* New: On the Vortex we must also clear the BadSSD counter. */
1668 	window_read8(vp, 4, 12);
1669 	/* ..and on the Boomerang we enable the extra statistics bits. */
1670 	window_write16(vp, 0x0040, 4, Wn4_NetDiag);
1671 
1672 	if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1673 		vp->cur_rx = 0;
1674 		/* Initialize the RxEarly register as recommended. */
1675 		iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
1676 		iowrite32(0x0020, ioaddr + PktStatus);
1677 		iowrite32(vp->rx_ring_dma, ioaddr + UpListPtr);
1678 	}
1679 	if (vp->full_bus_master_tx) { 		/* Boomerang bus master Tx. */
1680 		vp->cur_tx = vp->dirty_tx = 0;
1681 		if (vp->drv_flags & IS_BOOMERANG)
1682 			iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */
1683 		/* Clear the Rx, Tx rings. */
1684 		for (i = 0; i < RX_RING_SIZE; i++)	/* AKPM: this is done in vortex_open, too */
1685 			vp->rx_ring[i].status = 0;
1686 		for (i = 0; i < TX_RING_SIZE; i++)
1687 			vp->tx_skbuff[i] = NULL;
1688 		iowrite32(0, ioaddr + DownListPtr);
1689 	}
1690 	/* Set receiver mode: presumably accept b-case and phys addr only. */
1691 	set_rx_mode(dev);
1692 	/* enable 802.1q tagged frames */
1693 	set_8021q_mode(dev, 1);
1694 	iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
1695 
1696 	iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
1697 	iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
1698 	/* Allow status bits to be seen. */
1699 	vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
1700 		(vp->full_bus_master_tx ? DownComplete : TxAvailable) |
1701 		(vp->full_bus_master_rx ? UpComplete : RxComplete) |
1702 		(vp->bus_master ? DMADone : 0);
1703 	vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable |
1704 		(vp->full_bus_master_rx ? 0 : RxComplete) |
1705 		StatsFull | HostError | TxComplete | IntReq
1706 		| (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
1707 	iowrite16(vp->status_enable, ioaddr + EL3_CMD);
1708 	/* Ack all pending events, and set active indicator mask. */
1709 	iowrite16(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
1710 		 ioaddr + EL3_CMD);
1711 	iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
1712 	if (vp->cb_fn_base)			/* The PCMCIA people are idiots.  */
1713 		iowrite32(0x8000, vp->cb_fn_base + 4);
1714 	netif_start_queue (dev);
1715 	netdev_reset_queue(dev);
1716 err_out:
1717 	return err;
1718 }
1719 
1720 static int
1721 vortex_open(struct net_device *dev)
1722 {
1723 	struct vortex_private *vp = netdev_priv(dev);
1724 	int i;
1725 	int retval;
1726 	dma_addr_t dma;
1727 
1728 	/* Use the now-standard shared IRQ implementation. */
1729 	if ((retval = request_irq(dev->irq, vortex_boomerang_interrupt, IRQF_SHARED, dev->name, dev))) {
1730 		pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
1731 		goto err;
1732 	}
1733 
1734 	if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1735 		if (vortex_debug > 2)
1736 			pr_debug("%s:  Filling in the Rx ring.\n", dev->name);
1737 		for (i = 0; i < RX_RING_SIZE; i++) {
1738 			struct sk_buff *skb;
1739 			vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
1740 			vp->rx_ring[i].status = 0;	/* Clear complete bit. */
1741 			vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
1742 
1743 			skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN,
1744 						 GFP_KERNEL);
1745 			vp->rx_skbuff[i] = skb;
1746 			if (skb == NULL)
1747 				break;			/* Bad news!  */
1748 
1749 			skb_reserve(skb, NET_IP_ALIGN);	/* Align IP on 16 byte boundaries */
1750 			dma = pci_map_single(VORTEX_PCI(vp), skb->data,
1751 					     PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1752 			if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
1753 				break;
1754 			vp->rx_ring[i].addr = cpu_to_le32(dma);
1755 		}
1756 		if (i != RX_RING_SIZE) {
1757 			pr_emerg("%s: no memory for rx ring\n", dev->name);
1758 			retval = -ENOMEM;
1759 			goto err_free_skb;
1760 		}
1761 		/* Wrap the ring. */
1762 		vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
1763 	}
1764 
1765 	retval = vortex_up(dev);
1766 	if (!retval)
1767 		goto out;
1768 
1769 err_free_skb:
1770 	for (i = 0; i < RX_RING_SIZE; i++) {
1771 		if (vp->rx_skbuff[i]) {
1772 			dev_kfree_skb(vp->rx_skbuff[i]);
1773 			vp->rx_skbuff[i] = NULL;
1774 		}
1775 	}
1776 	free_irq(dev->irq, dev);
1777 err:
1778 	if (vortex_debug > 1)
1779 		pr_err("%s: vortex_open() fails: returning %d\n", dev->name, retval);
1780 out:
1781 	return retval;
1782 }
1783 
1784 static void
1785 vortex_timer(struct timer_list *t)
1786 {
1787 	struct vortex_private *vp = from_timer(vp, t, timer);
1788 	struct net_device *dev = vp->mii.dev;
1789 	void __iomem *ioaddr = vp->ioaddr;
1790 	int next_tick = 60*HZ;
1791 	int ok = 0;
1792 	int media_status;
1793 
1794 	if (vortex_debug > 2) {
1795 		pr_debug("%s: Media selection timer tick happened, %s.\n",
1796 			   dev->name, media_tbl[dev->if_port].name);
1797 		pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
1798 	}
1799 
1800 	media_status = window_read16(vp, 4, Wn4_Media);
1801 	switch (dev->if_port) {
1802 	case XCVR_10baseT:  case XCVR_100baseTx:  case XCVR_100baseFx:
1803 		if (media_status & Media_LnkBeat) {
1804 			netif_carrier_on(dev);
1805 			ok = 1;
1806 			if (vortex_debug > 1)
1807 				pr_debug("%s: Media %s has link beat, %x.\n",
1808 					   dev->name, media_tbl[dev->if_port].name, media_status);
1809 		} else {
1810 			netif_carrier_off(dev);
1811 			if (vortex_debug > 1) {
1812 				pr_debug("%s: Media %s has no link beat, %x.\n",
1813 					   dev->name, media_tbl[dev->if_port].name, media_status);
1814 			}
1815 		}
1816 		break;
1817 	case XCVR_MII: case XCVR_NWAY:
1818 		{
1819 			ok = 1;
1820 			vortex_check_media(dev, 0);
1821 		}
1822 		break;
1823 	  default:					/* Other media types handled by Tx timeouts. */
1824 		if (vortex_debug > 1)
1825 		  pr_debug("%s: Media %s has no indication, %x.\n",
1826 				 dev->name, media_tbl[dev->if_port].name, media_status);
1827 		ok = 1;
1828 	}
1829 
1830 	if (dev->flags & IFF_SLAVE || !netif_carrier_ok(dev))
1831 		next_tick = 5*HZ;
1832 
1833 	if (vp->medialock)
1834 		goto leave_media_alone;
1835 
1836 	if (!ok) {
1837 		unsigned int config;
1838 
1839 		spin_lock_irq(&vp->lock);
1840 
1841 		do {
1842 			dev->if_port = media_tbl[dev->if_port].next;
1843 		} while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
1844 		if (dev->if_port == XCVR_Default) { /* Go back to default. */
1845 		  dev->if_port = vp->default_media;
1846 		  if (vortex_debug > 1)
1847 			pr_debug("%s: Media selection failing, using default %s port.\n",
1848 				   dev->name, media_tbl[dev->if_port].name);
1849 		} else {
1850 			if (vortex_debug > 1)
1851 				pr_debug("%s: Media selection failed, now trying %s port.\n",
1852 					   dev->name, media_tbl[dev->if_port].name);
1853 			next_tick = media_tbl[dev->if_port].wait;
1854 		}
1855 		window_write16(vp,
1856 			       (media_status & ~(Media_10TP|Media_SQE)) |
1857 			       media_tbl[dev->if_port].media_bits,
1858 			       4, Wn4_Media);
1859 
1860 		config = window_read32(vp, 3, Wn3_Config);
1861 		config = BFINS(config, dev->if_port, 20, 4);
1862 		window_write32(vp, config, 3, Wn3_Config);
1863 
1864 		iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
1865 			 ioaddr + EL3_CMD);
1866 		if (vortex_debug > 1)
1867 			pr_debug("wrote 0x%08x to Wn3_Config\n", config);
1868 		/* AKPM: FIXME: Should reset Rx & Tx here.  P60 of 3c90xc.pdf */
1869 
1870 		spin_unlock_irq(&vp->lock);
1871 	}
1872 
1873 leave_media_alone:
1874 	if (vortex_debug > 2)
1875 	  pr_debug("%s: Media selection timer finished, %s.\n",
1876 			 dev->name, media_tbl[dev->if_port].name);
1877 
1878 	mod_timer(&vp->timer, RUN_AT(next_tick));
1879 	if (vp->deferred)
1880 		iowrite16(FakeIntr, ioaddr + EL3_CMD);
1881 }
1882 
1883 static void vortex_tx_timeout(struct net_device *dev)
1884 {
1885 	struct vortex_private *vp = netdev_priv(dev);
1886 	void __iomem *ioaddr = vp->ioaddr;
1887 
1888 	pr_err("%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
1889 		   dev->name, ioread8(ioaddr + TxStatus),
1890 		   ioread16(ioaddr + EL3_STATUS));
1891 	pr_err("  diagnostics: net %04x media %04x dma %08x fifo %04x\n",
1892 			window_read16(vp, 4, Wn4_NetDiag),
1893 			window_read16(vp, 4, Wn4_Media),
1894 			ioread32(ioaddr + PktStatus),
1895 			window_read16(vp, 4, Wn4_FIFODiag));
1896 	/* Slight code bloat to be user friendly. */
1897 	if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88)
1898 		pr_err("%s: Transmitter encountered 16 collisions --"
1899 			   " network cable problem?\n", dev->name);
1900 	if (ioread16(ioaddr + EL3_STATUS) & IntLatch) {
1901 		pr_err("%s: Interrupt posted but not delivered --"
1902 			   " IRQ blocked by another device?\n", dev->name);
1903 		/* Bad idea here.. but we might as well handle a few events. */
1904 		vortex_boomerang_interrupt(dev->irq, dev);
1905 	}
1906 
1907 	if (vortex_debug > 0)
1908 		dump_tx_ring(dev);
1909 
1910 	issue_and_wait(dev, TxReset);
1911 
1912 	dev->stats.tx_errors++;
1913 	if (vp->full_bus_master_tx) {
1914 		pr_debug("%s: Resetting the Tx ring pointer.\n", dev->name);
1915 		if (vp->cur_tx - vp->dirty_tx > 0  &&  ioread32(ioaddr + DownListPtr) == 0)
1916 			iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
1917 				 ioaddr + DownListPtr);
1918 		if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) {
1919 			netif_wake_queue (dev);
1920 			netdev_reset_queue (dev);
1921 		}
1922 		if (vp->drv_flags & IS_BOOMERANG)
1923 			iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
1924 		iowrite16(DownUnstall, ioaddr + EL3_CMD);
1925 	} else {
1926 		dev->stats.tx_dropped++;
1927 		netif_wake_queue(dev);
1928 		netdev_reset_queue(dev);
1929 	}
1930 	/* Issue Tx Enable */
1931 	iowrite16(TxEnable, ioaddr + EL3_CMD);
1932 	netif_trans_update(dev); /* prevent tx timeout */
1933 }
1934 
1935 /*
1936  * Handle uncommon interrupt sources.  This is a separate routine to minimize
1937  * the cache impact.
1938  */
1939 static void
1940 vortex_error(struct net_device *dev, int status)
1941 {
1942 	struct vortex_private *vp = netdev_priv(dev);
1943 	void __iomem *ioaddr = vp->ioaddr;
1944 	int do_tx_reset = 0, reset_mask = 0;
1945 	unsigned char tx_status = 0;
1946 
1947 	if (vortex_debug > 2) {
1948 		pr_err("%s: vortex_error(), status=0x%x\n", dev->name, status);
1949 	}
1950 
1951 	if (status & TxComplete) {			/* Really "TxError" for us. */
1952 		tx_status = ioread8(ioaddr + TxStatus);
1953 		/* Presumably a tx-timeout. We must merely re-enable. */
1954 		if (vortex_debug > 2 ||
1955 		    (tx_status != 0x88 && vortex_debug > 0)) {
1956 			pr_err("%s: Transmit error, Tx status register %2.2x.\n",
1957 				   dev->name, tx_status);
1958 			if (tx_status == 0x82) {
1959 				pr_err("Probably a duplex mismatch.  See "
1960 						"Documentation/networking/vortex.txt\n");
1961 			}
1962 			dump_tx_ring(dev);
1963 		}
1964 		if (tx_status & 0x14)  dev->stats.tx_fifo_errors++;
1965 		if (tx_status & 0x38)  dev->stats.tx_aborted_errors++;
1966 		if (tx_status & 0x08)  vp->xstats.tx_max_collisions++;
1967 		iowrite8(0, ioaddr + TxStatus);
1968 		if (tx_status & 0x30) {			/* txJabber or txUnderrun */
1969 			do_tx_reset = 1;
1970 		} else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET))  {	/* maxCollisions */
1971 			do_tx_reset = 1;
1972 			reset_mask = 0x0108;		/* Reset interface logic, but not download logic */
1973 		} else {				/* Merely re-enable the transmitter. */
1974 			iowrite16(TxEnable, ioaddr + EL3_CMD);
1975 		}
1976 	}
1977 
1978 	if (status & RxEarly)				/* Rx early is unused. */
1979 		iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
1980 
1981 	if (status & StatsFull) {			/* Empty statistics. */
1982 		static int DoneDidThat;
1983 		if (vortex_debug > 4)
1984 			pr_debug("%s: Updating stats.\n", dev->name);
1985 		update_stats(ioaddr, dev);
1986 		/* HACK: Disable statistics as an interrupt source. */
1987 		/* This occurs when we have the wrong media type! */
1988 		if (DoneDidThat == 0  &&
1989 			ioread16(ioaddr + EL3_STATUS) & StatsFull) {
1990 			pr_warn("%s: Updating statistics failed, disabling stats as an interrupt source\n",
1991 				dev->name);
1992 			iowrite16(SetIntrEnb |
1993 				  (window_read16(vp, 5, 10) & ~StatsFull),
1994 				  ioaddr + EL3_CMD);
1995 			vp->intr_enable &= ~StatsFull;
1996 			DoneDidThat++;
1997 		}
1998 	}
1999 	if (status & IntReq) {		/* Restore all interrupt sources.  */
2000 		iowrite16(vp->status_enable, ioaddr + EL3_CMD);
2001 		iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
2002 	}
2003 	if (status & HostError) {
2004 		u16 fifo_diag;
2005 		fifo_diag = window_read16(vp, 4, Wn4_FIFODiag);
2006 		pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n",
2007 			   dev->name, fifo_diag);
2008 		/* Adapter failure requires Tx/Rx reset and reinit. */
2009 		if (vp->full_bus_master_tx) {
2010 			int bus_status = ioread32(ioaddr + PktStatus);
2011 			/* 0x80000000 PCI master abort. */
2012 			/* 0x40000000 PCI target abort. */
2013 			if (vortex_debug)
2014 				pr_err("%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status);
2015 
2016 			/* In this case, blow the card away */
2017 			/* Must not enter D3 or we can't legally issue the reset! */
2018 			vortex_down(dev, 0);
2019 			issue_and_wait(dev, TotalReset | 0xff);
2020 			vortex_up(dev);		/* AKPM: bug.  vortex_up() assumes that the rx ring is full. It may not be. */
2021 		} else if (fifo_diag & 0x0400)
2022 			do_tx_reset = 1;
2023 		if (fifo_diag & 0x3000) {
2024 			/* Reset Rx fifo and upload logic */
2025 			issue_and_wait(dev, RxReset|0x07);
2026 			/* Set the Rx filter to the current state. */
2027 			set_rx_mode(dev);
2028 			/* enable 802.1q VLAN tagged frames */
2029 			set_8021q_mode(dev, 1);
2030 			iowrite16(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
2031 			iowrite16(AckIntr | HostError, ioaddr + EL3_CMD);
2032 		}
2033 	}
2034 
2035 	if (do_tx_reset) {
2036 		issue_and_wait(dev, TxReset|reset_mask);
2037 		iowrite16(TxEnable, ioaddr + EL3_CMD);
2038 		if (!vp->full_bus_master_tx)
2039 			netif_wake_queue(dev);
2040 	}
2041 }
2042 
2043 static netdev_tx_t
2044 vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2045 {
2046 	struct vortex_private *vp = netdev_priv(dev);
2047 	void __iomem *ioaddr = vp->ioaddr;
2048 	int skblen = skb->len;
2049 
2050 	/* Put out the doubleword header... */
2051 	iowrite32(skb->len, ioaddr + TX_FIFO);
2052 	if (vp->bus_master) {
2053 		/* Set the bus-master controller to transfer the packet. */
2054 		int len = (skb->len + 3) & ~3;
2055 		vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
2056 						PCI_DMA_TODEVICE);
2057 		if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
2058 			dev_kfree_skb_any(skb);
2059 			dev->stats.tx_dropped++;
2060 			return NETDEV_TX_OK;
2061 		}
2062 
2063 		spin_lock_irq(&vp->window_lock);
2064 		window_set(vp, 7);
2065 		iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
2066 		iowrite16(len, ioaddr + Wn7_MasterLen);
2067 		spin_unlock_irq(&vp->window_lock);
2068 		vp->tx_skb = skb;
2069 		skb_tx_timestamp(skb);
2070 		iowrite16(StartDMADown, ioaddr + EL3_CMD);
2071 		/* netif_wake_queue() will be called at the DMADone interrupt. */
2072 	} else {
2073 		/* ... and the packet rounded to a doubleword. */
2074 		skb_tx_timestamp(skb);
2075 		iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
2076 		dev_consume_skb_any (skb);
2077 		if (ioread16(ioaddr + TxFree) > 1536) {
2078 			netif_start_queue (dev);	/* AKPM: redundant? */
2079 		} else {
2080 			/* Interrupt us when the FIFO has room for max-sized packet. */
2081 			netif_stop_queue(dev);
2082 			iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
2083 		}
2084 	}
2085 
2086 	netdev_sent_queue(dev, skblen);
2087 
2088 	/* Clear the Tx status stack. */
2089 	{
2090 		int tx_status;
2091 		int i = 32;
2092 
2093 		while (--i > 0	&&	(tx_status = ioread8(ioaddr + TxStatus)) > 0) {
2094 			if (tx_status & 0x3C) {		/* A Tx-disabling error occurred.  */
2095 				if (vortex_debug > 2)
2096 				  pr_debug("%s: Tx error, status %2.2x.\n",
2097 						 dev->name, tx_status);
2098 				if (tx_status & 0x04) dev->stats.tx_fifo_errors++;
2099 				if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
2100 				if (tx_status & 0x30) {
2101 					issue_and_wait(dev, TxReset);
2102 				}
2103 				iowrite16(TxEnable, ioaddr + EL3_CMD);
2104 			}
2105 			iowrite8(0x00, ioaddr + TxStatus); /* Pop the status stack. */
2106 		}
2107 	}
2108 	return NETDEV_TX_OK;
2109 }
2110 
2111 static netdev_tx_t
2112 boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2113 {
2114 	struct vortex_private *vp = netdev_priv(dev);
2115 	void __iomem *ioaddr = vp->ioaddr;
2116 	/* Calculate the next Tx descriptor entry. */
2117 	int entry = vp->cur_tx % TX_RING_SIZE;
2118 	int skblen = skb->len;
2119 	struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
2120 	unsigned long flags;
2121 	dma_addr_t dma_addr;
2122 
2123 	if (vortex_debug > 6) {
2124 		pr_debug("boomerang_start_xmit()\n");
2125 		pr_debug("%s: Trying to send a packet, Tx index %d.\n",
2126 			   dev->name, vp->cur_tx);
2127 	}
2128 
2129 	/*
2130 	 * We can't allow a recursion from our interrupt handler back into the
2131 	 * tx routine, as they take the same spin lock, and that causes
2132 	 * deadlock.  Just return NETDEV_TX_BUSY and let the stack try again in
2133 	 * a bit
2134 	 */
2135 	if (vp->handling_irq)
2136 		return NETDEV_TX_BUSY;
2137 
2138 	if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
2139 		if (vortex_debug > 0)
2140 			pr_warn("%s: BUG! Tx Ring full, refusing to send buffer\n",
2141 				dev->name);
2142 		netif_stop_queue(dev);
2143 		return NETDEV_TX_BUSY;
2144 	}
2145 
2146 	vp->tx_skbuff[entry] = skb;
2147 
2148 	vp->tx_ring[entry].next = 0;
2149 #if DO_ZEROCOPY
2150 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2151 			vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2152 	else
2153 			vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
2154 
2155 	if (!skb_shinfo(skb)->nr_frags) {
2156 		dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len,
2157 					  PCI_DMA_TODEVICE);
2158 		if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
2159 			goto out_dma_err;
2160 
2161 		vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
2162 		vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
2163 	} else {
2164 		int i;
2165 
2166 		dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data,
2167 					  skb_headlen(skb), PCI_DMA_TODEVICE);
2168 		if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
2169 			goto out_dma_err;
2170 
2171 		vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
2172 		vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
2173 
2174 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2175 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2176 
2177 			dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag,
2178 						    0,
2179 						    frag->size,
2180 						    DMA_TO_DEVICE);
2181 			if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) {
2182 				for(i = i-1; i >= 0; i--)
2183 					dma_unmap_page(&VORTEX_PCI(vp)->dev,
2184 						       le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
2185 						       le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
2186 						       DMA_TO_DEVICE);
2187 
2188 				pci_unmap_single(VORTEX_PCI(vp),
2189 						 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2190 						 le32_to_cpu(vp->tx_ring[entry].frag[0].length),
2191 						 PCI_DMA_TODEVICE);
2192 
2193 				goto out_dma_err;
2194 			}
2195 
2196 			vp->tx_ring[entry].frag[i+1].addr =
2197 						cpu_to_le32(dma_addr);
2198 
2199 			if (i == skb_shinfo(skb)->nr_frags-1)
2200 					vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
2201 			else
2202 					vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag));
2203 		}
2204 	}
2205 #else
2206 	dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE);
2207 	if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
2208 		goto out_dma_err;
2209 	vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
2210 	vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
2211 	vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2212 #endif
2213 
2214 	spin_lock_irqsave(&vp->lock, flags);
2215 	/* Wait for the stall to complete. */
2216 	issue_and_wait(dev, DownStall);
2217 	prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
2218 	if (ioread32(ioaddr + DownListPtr) == 0) {
2219 		iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
2220 		vp->queued_packet++;
2221 	}
2222 
2223 	vp->cur_tx++;
2224 	netdev_sent_queue(dev, skblen);
2225 
2226 	if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
2227 		netif_stop_queue (dev);
2228 	} else {					/* Clear previous interrupt enable. */
2229 #if defined(tx_interrupt_mitigation)
2230 		/* Dubious. If in boomeang_interrupt "faster" cyclone ifdef
2231 		 * were selected, this would corrupt DN_COMPLETE. No?
2232 		 */
2233 		prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
2234 #endif
2235 	}
2236 	skb_tx_timestamp(skb);
2237 	iowrite16(DownUnstall, ioaddr + EL3_CMD);
2238 	spin_unlock_irqrestore(&vp->lock, flags);
2239 out:
2240 	return NETDEV_TX_OK;
2241 out_dma_err:
2242 	dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n");
2243 	goto out;
2244 }
2245 
2246 /* The interrupt handler does all of the Rx thread work and cleans up
2247    after the Tx thread. */
2248 
2249 /*
2250  * This is the ISR for the vortex series chips.
2251  * full_bus_master_tx == 0 && full_bus_master_rx == 0
2252  */
2253 
2254 static irqreturn_t
2255 _vortex_interrupt(int irq, struct net_device *dev)
2256 {
2257 	struct vortex_private *vp = netdev_priv(dev);
2258 	void __iomem *ioaddr;
2259 	int status;
2260 	int work_done = max_interrupt_work;
2261 	int handled = 0;
2262 	unsigned int bytes_compl = 0, pkts_compl = 0;
2263 
2264 	ioaddr = vp->ioaddr;
2265 
2266 	status = ioread16(ioaddr + EL3_STATUS);
2267 
2268 	if (vortex_debug > 6)
2269 		pr_debug("vortex_interrupt(). status=0x%4x\n", status);
2270 
2271 	if ((status & IntLatch) == 0)
2272 		goto handler_exit;		/* No interrupt: shared IRQs cause this */
2273 	handled = 1;
2274 
2275 	if (status & IntReq) {
2276 		status |= vp->deferred;
2277 		vp->deferred = 0;
2278 	}
2279 
2280 	if (status == 0xffff)		/* h/w no longer present (hotplug)? */
2281 		goto handler_exit;
2282 
2283 	if (vortex_debug > 4)
2284 		pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
2285 			   dev->name, status, ioread8(ioaddr + Timer));
2286 
2287 	spin_lock(&vp->window_lock);
2288 	window_set(vp, 7);
2289 
2290 	do {
2291 		if (vortex_debug > 5)
2292 				pr_debug("%s: In interrupt loop, status %4.4x.\n",
2293 					   dev->name, status);
2294 		if (status & RxComplete)
2295 			vortex_rx(dev);
2296 
2297 		if (status & TxAvailable) {
2298 			if (vortex_debug > 5)
2299 				pr_debug("	TX room bit was handled.\n");
2300 			/* There's room in the FIFO for a full-sized packet. */
2301 			iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD);
2302 			netif_wake_queue (dev);
2303 		}
2304 
2305 		if (status & DMADone) {
2306 			if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
2307 				iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
2308 				pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
2309 				pkts_compl++;
2310 				bytes_compl += vp->tx_skb->len;
2311 				dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
2312 				if (ioread16(ioaddr + TxFree) > 1536) {
2313 					/*
2314 					 * AKPM: FIXME: I don't think we need this.  If the queue was stopped due to
2315 					 * insufficient FIFO room, the TxAvailable test will succeed and call
2316 					 * netif_wake_queue()
2317 					 */
2318 					netif_wake_queue(dev);
2319 				} else { /* Interrupt when FIFO has room for max-sized packet. */
2320 					iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
2321 					netif_stop_queue(dev);
2322 				}
2323 			}
2324 		}
2325 		/* Check for all uncommon interrupts at once. */
2326 		if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
2327 			if (status == 0xffff)
2328 				break;
2329 			if (status & RxEarly)
2330 				vortex_rx(dev);
2331 			spin_unlock(&vp->window_lock);
2332 			vortex_error(dev, status);
2333 			spin_lock(&vp->window_lock);
2334 			window_set(vp, 7);
2335 		}
2336 
2337 		if (--work_done < 0) {
2338 			pr_warn("%s: Too much work in interrupt, status %4.4x\n",
2339 				dev->name, status);
2340 			/* Disable all pending interrupts. */
2341 			do {
2342 				vp->deferred |= status;
2343 				iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
2344 					 ioaddr + EL3_CMD);
2345 				iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2346 			} while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
2347 			/* The timer will reenable interrupts. */
2348 			mod_timer(&vp->timer, jiffies + 1*HZ);
2349 			break;
2350 		}
2351 		/* Acknowledge the IRQ. */
2352 		iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
2353 	} while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
2354 
2355 	netdev_completed_queue(dev, pkts_compl, bytes_compl);
2356 	spin_unlock(&vp->window_lock);
2357 
2358 	if (vortex_debug > 4)
2359 		pr_debug("%s: exiting interrupt, status %4.4x.\n",
2360 			   dev->name, status);
2361 handler_exit:
2362 	return IRQ_RETVAL(handled);
2363 }
2364 
2365 /*
2366  * This is the ISR for the boomerang series chips.
2367  * full_bus_master_tx == 1 && full_bus_master_rx == 1
2368  */
2369 
2370 static irqreturn_t
2371 _boomerang_interrupt(int irq, struct net_device *dev)
2372 {
2373 	struct vortex_private *vp = netdev_priv(dev);
2374 	void __iomem *ioaddr;
2375 	int status;
2376 	int work_done = max_interrupt_work;
2377 	int handled = 0;
2378 	unsigned int bytes_compl = 0, pkts_compl = 0;
2379 
2380 	ioaddr = vp->ioaddr;
2381 
2382 	vp->handling_irq = 1;
2383 
2384 	status = ioread16(ioaddr + EL3_STATUS);
2385 
2386 	if (vortex_debug > 6)
2387 		pr_debug("boomerang_interrupt. status=0x%4x\n", status);
2388 
2389 	if ((status & IntLatch) == 0)
2390 		goto handler_exit;		/* No interrupt: shared IRQs can cause this */
2391 	handled = 1;
2392 
2393 	if (status == 0xffff) {		/* h/w no longer present (hotplug)? */
2394 		if (vortex_debug > 1)
2395 			pr_debug("boomerang_interrupt(1): status = 0xffff\n");
2396 		goto handler_exit;
2397 	}
2398 
2399 	if (status & IntReq) {
2400 		status |= vp->deferred;
2401 		vp->deferred = 0;
2402 	}
2403 
2404 	if (vortex_debug > 4)
2405 		pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
2406 			   dev->name, status, ioread8(ioaddr + Timer));
2407 	do {
2408 		if (vortex_debug > 5)
2409 				pr_debug("%s: In interrupt loop, status %4.4x.\n",
2410 					   dev->name, status);
2411 		if (status & UpComplete) {
2412 			iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD);
2413 			if (vortex_debug > 5)
2414 				pr_debug("boomerang_interrupt->boomerang_rx\n");
2415 			boomerang_rx(dev);
2416 		}
2417 
2418 		if (status & DownComplete) {
2419 			unsigned int dirty_tx = vp->dirty_tx;
2420 
2421 			iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD);
2422 			while (vp->cur_tx - dirty_tx > 0) {
2423 				int entry = dirty_tx % TX_RING_SIZE;
2424 #if 1	/* AKPM: the latter is faster, but cyclone-only */
2425 				if (ioread32(ioaddr + DownListPtr) ==
2426 					vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
2427 					break;			/* It still hasn't been processed. */
2428 #else
2429 				if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0)
2430 					break;			/* It still hasn't been processed. */
2431 #endif
2432 
2433 				if (vp->tx_skbuff[entry]) {
2434 					struct sk_buff *skb = vp->tx_skbuff[entry];
2435 #if DO_ZEROCOPY
2436 					int i;
2437 					pci_unmap_single(VORTEX_PCI(vp),
2438 							le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2439 							le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
2440 							PCI_DMA_TODEVICE);
2441 
2442 					for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
2443 							pci_unmap_page(VORTEX_PCI(vp),
2444 											 le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
2445 											 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
2446 											 PCI_DMA_TODEVICE);
2447 #else
2448 					pci_unmap_single(VORTEX_PCI(vp),
2449 						le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
2450 #endif
2451 					pkts_compl++;
2452 					bytes_compl += skb->len;
2453 					dev_kfree_skb_irq(skb);
2454 					vp->tx_skbuff[entry] = NULL;
2455 				} else {
2456 					pr_debug("boomerang_interrupt: no skb!\n");
2457 				}
2458 				/* dev->stats.tx_packets++;  Counted below. */
2459 				dirty_tx++;
2460 			}
2461 			vp->dirty_tx = dirty_tx;
2462 			if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
2463 				if (vortex_debug > 6)
2464 					pr_debug("boomerang_interrupt: wake queue\n");
2465 				netif_wake_queue (dev);
2466 			}
2467 		}
2468 
2469 		/* Check for all uncommon interrupts at once. */
2470 		if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq))
2471 			vortex_error(dev, status);
2472 
2473 		if (--work_done < 0) {
2474 			pr_warn("%s: Too much work in interrupt, status %4.4x\n",
2475 				dev->name, status);
2476 			/* Disable all pending interrupts. */
2477 			do {
2478 				vp->deferred |= status;
2479 				iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
2480 					 ioaddr + EL3_CMD);
2481 				iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2482 			} while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
2483 			/* The timer will reenable interrupts. */
2484 			mod_timer(&vp->timer, jiffies + 1*HZ);
2485 			break;
2486 		}
2487 		/* Acknowledge the IRQ. */
2488 		iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
2489 		if (vp->cb_fn_base)			/* The PCMCIA people are idiots.  */
2490 			iowrite32(0x8000, vp->cb_fn_base + 4);
2491 
2492 	} while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch);
2493 	netdev_completed_queue(dev, pkts_compl, bytes_compl);
2494 
2495 	if (vortex_debug > 4)
2496 		pr_debug("%s: exiting interrupt, status %4.4x.\n",
2497 			   dev->name, status);
2498 handler_exit:
2499 	vp->handling_irq = 0;
2500 	return IRQ_RETVAL(handled);
2501 }
2502 
2503 static irqreturn_t
2504 vortex_boomerang_interrupt(int irq, void *dev_id)
2505 {
2506 	struct net_device *dev = dev_id;
2507 	struct vortex_private *vp = netdev_priv(dev);
2508 	unsigned long flags;
2509 	irqreturn_t ret;
2510 
2511 	spin_lock_irqsave(&vp->lock, flags);
2512 
2513 	if (vp->full_bus_master_rx)
2514 		ret = _boomerang_interrupt(dev->irq, dev);
2515 	else
2516 		ret = _vortex_interrupt(dev->irq, dev);
2517 
2518 	spin_unlock_irqrestore(&vp->lock, flags);
2519 
2520 	return ret;
2521 }
2522 
2523 static int vortex_rx(struct net_device *dev)
2524 {
2525 	struct vortex_private *vp = netdev_priv(dev);
2526 	void __iomem *ioaddr = vp->ioaddr;
2527 	int i;
2528 	short rx_status;
2529 
2530 	if (vortex_debug > 5)
2531 		pr_debug("vortex_rx(): status %4.4x, rx_status %4.4x.\n",
2532 			   ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus));
2533 	while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) {
2534 		if (rx_status & 0x4000) { /* Error, update stats. */
2535 			unsigned char rx_error = ioread8(ioaddr + RxErrors);
2536 			if (vortex_debug > 2)
2537 				pr_debug(" Rx error: status %2.2x.\n", rx_error);
2538 			dev->stats.rx_errors++;
2539 			if (rx_error & 0x01)  dev->stats.rx_over_errors++;
2540 			if (rx_error & 0x02)  dev->stats.rx_length_errors++;
2541 			if (rx_error & 0x04)  dev->stats.rx_frame_errors++;
2542 			if (rx_error & 0x08)  dev->stats.rx_crc_errors++;
2543 			if (rx_error & 0x10)  dev->stats.rx_length_errors++;
2544 		} else {
2545 			/* The packet length: up to 4.5K!. */
2546 			int pkt_len = rx_status & 0x1fff;
2547 			struct sk_buff *skb;
2548 
2549 			skb = netdev_alloc_skb(dev, pkt_len + 5);
2550 			if (vortex_debug > 4)
2551 				pr_debug("Receiving packet size %d status %4.4x.\n",
2552 					   pkt_len, rx_status);
2553 			if (skb != NULL) {
2554 				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
2555 				/* 'skb_put()' points to the start of sk_buff data area. */
2556 				if (vp->bus_master &&
2557 					! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
2558 					dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
2559 									   pkt_len, PCI_DMA_FROMDEVICE);
2560 					iowrite32(dma, ioaddr + Wn7_MasterAddr);
2561 					iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
2562 					iowrite16(StartDMAUp, ioaddr + EL3_CMD);
2563 					while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
2564 						;
2565 					pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
2566 				} else {
2567 					ioread32_rep(ioaddr + RX_FIFO,
2568 					             skb_put(skb, pkt_len),
2569 						     (pkt_len + 3) >> 2);
2570 				}
2571 				iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
2572 				skb->protocol = eth_type_trans(skb, dev);
2573 				netif_rx(skb);
2574 				dev->stats.rx_packets++;
2575 				/* Wait a limited time to go to next packet. */
2576 				for (i = 200; i >= 0; i--)
2577 					if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
2578 						break;
2579 				continue;
2580 			} else if (vortex_debug > 0)
2581 				pr_notice("%s: No memory to allocate a sk_buff of size %d.\n",
2582 					dev->name, pkt_len);
2583 			dev->stats.rx_dropped++;
2584 		}
2585 		issue_and_wait(dev, RxDiscard);
2586 	}
2587 
2588 	return 0;
2589 }
2590 
2591 static int
2592 boomerang_rx(struct net_device *dev)
2593 {
2594 	struct vortex_private *vp = netdev_priv(dev);
2595 	int entry = vp->cur_rx % RX_RING_SIZE;
2596 	void __iomem *ioaddr = vp->ioaddr;
2597 	int rx_status;
2598 	int rx_work_limit = RX_RING_SIZE;
2599 
2600 	if (vortex_debug > 5)
2601 		pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
2602 
2603 	while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
2604 		if (--rx_work_limit < 0)
2605 			break;
2606 		if (rx_status & RxDError) { /* Error, update stats. */
2607 			unsigned char rx_error = rx_status >> 16;
2608 			if (vortex_debug > 2)
2609 				pr_debug(" Rx error: status %2.2x.\n", rx_error);
2610 			dev->stats.rx_errors++;
2611 			if (rx_error & 0x01)  dev->stats.rx_over_errors++;
2612 			if (rx_error & 0x02)  dev->stats.rx_length_errors++;
2613 			if (rx_error & 0x04)  dev->stats.rx_frame_errors++;
2614 			if (rx_error & 0x08)  dev->stats.rx_crc_errors++;
2615 			if (rx_error & 0x10)  dev->stats.rx_length_errors++;
2616 		} else {
2617 			/* The packet length: up to 4.5K!. */
2618 			int pkt_len = rx_status & 0x1fff;
2619 			struct sk_buff *skb, *newskb;
2620 			dma_addr_t newdma;
2621 			dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
2622 
2623 			if (vortex_debug > 4)
2624 				pr_debug("Receiving packet size %d status %4.4x.\n",
2625 					   pkt_len, rx_status);
2626 
2627 			/* Check if the packet is long enough to just accept without
2628 			   copying to a properly sized skbuff. */
2629 			if (pkt_len < rx_copybreak &&
2630 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
2631 				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
2632 				pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2633 				/* 'skb_put()' points to the start of sk_buff data area. */
2634 				skb_put_data(skb, vp->rx_skbuff[entry]->data,
2635 					     pkt_len);
2636 				pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2637 				vp->rx_copy++;
2638 			} else {
2639 				/* Pre-allocate the replacement skb.  If it or its
2640 				 * mapping fails then recycle the buffer thats already
2641 				 * in place
2642 				 */
2643 				newskb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
2644 				if (!newskb) {
2645 					dev->stats.rx_dropped++;
2646 					goto clear_complete;
2647 				}
2648 				newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
2649 							PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2650 				if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
2651 					dev->stats.rx_dropped++;
2652 					consume_skb(newskb);
2653 					goto clear_complete;
2654 				}
2655 
2656 				/* Pass up the skbuff already on the Rx ring. */
2657 				skb = vp->rx_skbuff[entry];
2658 				vp->rx_skbuff[entry] = newskb;
2659 				vp->rx_ring[entry].addr = cpu_to_le32(newdma);
2660 				skb_put(skb, pkt_len);
2661 				pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2662 				vp->rx_nocopy++;
2663 			}
2664 			skb->protocol = eth_type_trans(skb, dev);
2665 			{					/* Use hardware checksum info. */
2666 				int csum_bits = rx_status & 0xee000000;
2667 				if (csum_bits &&
2668 					(csum_bits == (IPChksumValid | TCPChksumValid) ||
2669 					 csum_bits == (IPChksumValid | UDPChksumValid))) {
2670 					skb->ip_summed = CHECKSUM_UNNECESSARY;
2671 					vp->rx_csumhits++;
2672 				}
2673 			}
2674 			netif_rx(skb);
2675 			dev->stats.rx_packets++;
2676 		}
2677 
2678 clear_complete:
2679 		vp->rx_ring[entry].status = 0;	/* Clear complete bit. */
2680 		iowrite16(UpUnstall, ioaddr + EL3_CMD);
2681 		entry = (++vp->cur_rx) % RX_RING_SIZE;
2682 	}
2683 	return 0;
2684 }
2685 
2686 static void
2687 vortex_down(struct net_device *dev, int final_down)
2688 {
2689 	struct vortex_private *vp = netdev_priv(dev);
2690 	void __iomem *ioaddr = vp->ioaddr;
2691 
2692 	netdev_reset_queue(dev);
2693 	netif_stop_queue(dev);
2694 
2695 	del_timer_sync(&vp->timer);
2696 
2697 	/* Turn off statistics ASAP.  We update dev->stats below. */
2698 	iowrite16(StatsDisable, ioaddr + EL3_CMD);
2699 
2700 	/* Disable the receiver and transmitter. */
2701 	iowrite16(RxDisable, ioaddr + EL3_CMD);
2702 	iowrite16(TxDisable, ioaddr + EL3_CMD);
2703 
2704 	/* Disable receiving 802.1q tagged frames */
2705 	set_8021q_mode(dev, 0);
2706 
2707 	if (dev->if_port == XCVR_10base2)
2708 		/* Turn off thinnet power.  Green! */
2709 		iowrite16(StopCoax, ioaddr + EL3_CMD);
2710 
2711 	iowrite16(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
2712 
2713 	update_stats(ioaddr, dev);
2714 	if (vp->full_bus_master_rx)
2715 		iowrite32(0, ioaddr + UpListPtr);
2716 	if (vp->full_bus_master_tx)
2717 		iowrite32(0, ioaddr + DownListPtr);
2718 
2719 	if (final_down && VORTEX_PCI(vp)) {
2720 		vp->pm_state_valid = 1;
2721 		pci_save_state(VORTEX_PCI(vp));
2722 		acpi_set_WOL(dev);
2723 	}
2724 }
2725 
2726 static int
2727 vortex_close(struct net_device *dev)
2728 {
2729 	struct vortex_private *vp = netdev_priv(dev);
2730 	void __iomem *ioaddr = vp->ioaddr;
2731 	int i;
2732 
2733 	if (netif_device_present(dev))
2734 		vortex_down(dev, 1);
2735 
2736 	if (vortex_debug > 1) {
2737 		pr_debug("%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
2738 			   dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus));
2739 		pr_debug("%s: vortex close stats: rx_nocopy %d rx_copy %d"
2740 			   " tx_queued %d Rx pre-checksummed %d.\n",
2741 			   dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
2742 	}
2743 
2744 #if DO_ZEROCOPY
2745 	if (vp->rx_csumhits &&
2746 	    (vp->drv_flags & HAS_HWCKSM) == 0 &&
2747 	    (vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) {
2748 		pr_warn("%s supports hardware checksums, and we're not using them!\n",
2749 			dev->name);
2750 	}
2751 #endif
2752 
2753 	free_irq(dev->irq, dev);
2754 
2755 	if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
2756 		for (i = 0; i < RX_RING_SIZE; i++)
2757 			if (vp->rx_skbuff[i]) {
2758 				pci_unmap_single(	VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr),
2759 									PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2760 				dev_kfree_skb(vp->rx_skbuff[i]);
2761 				vp->rx_skbuff[i] = NULL;
2762 			}
2763 	}
2764 	if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
2765 		for (i = 0; i < TX_RING_SIZE; i++) {
2766 			if (vp->tx_skbuff[i]) {
2767 				struct sk_buff *skb = vp->tx_skbuff[i];
2768 #if DO_ZEROCOPY
2769 				int k;
2770 
2771 				for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
2772 						pci_unmap_single(VORTEX_PCI(vp),
2773 										 le32_to_cpu(vp->tx_ring[i].frag[k].addr),
2774 										 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
2775 										 PCI_DMA_TODEVICE);
2776 #else
2777 				pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
2778 #endif
2779 				dev_kfree_skb(skb);
2780 				vp->tx_skbuff[i] = NULL;
2781 			}
2782 		}
2783 	}
2784 
2785 	return 0;
2786 }
2787 
2788 static void
2789 dump_tx_ring(struct net_device *dev)
2790 {
2791 	if (vortex_debug > 0) {
2792 	struct vortex_private *vp = netdev_priv(dev);
2793 		void __iomem *ioaddr = vp->ioaddr;
2794 
2795 		if (vp->full_bus_master_tx) {
2796 			int i;
2797 			int stalled = ioread32(ioaddr + PktStatus) & 0x04;	/* Possible racy. But it's only debug stuff */
2798 
2799 			pr_err("  Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
2800 					vp->full_bus_master_tx,
2801 					vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
2802 					vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
2803 			pr_err("  Transmit list %8.8x vs. %p.\n",
2804 				   ioread32(ioaddr + DownListPtr),
2805 				   &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
2806 			issue_and_wait(dev, DownStall);
2807 			for (i = 0; i < TX_RING_SIZE; i++) {
2808 				unsigned int length;
2809 
2810 #if DO_ZEROCOPY
2811 				length = le32_to_cpu(vp->tx_ring[i].frag[0].length);
2812 #else
2813 				length = le32_to_cpu(vp->tx_ring[i].length);
2814 #endif
2815 				pr_err("  %d: @%p  length %8.8x status %8.8x\n",
2816 					   i, &vp->tx_ring[i], length,
2817 					   le32_to_cpu(vp->tx_ring[i].status));
2818 			}
2819 			if (!stalled)
2820 				iowrite16(DownUnstall, ioaddr + EL3_CMD);
2821 		}
2822 	}
2823 }
2824 
2825 static struct net_device_stats *vortex_get_stats(struct net_device *dev)
2826 {
2827 	struct vortex_private *vp = netdev_priv(dev);
2828 	void __iomem *ioaddr = vp->ioaddr;
2829 	unsigned long flags;
2830 
2831 	if (netif_device_present(dev)) {	/* AKPM: Used to be netif_running */
2832 		spin_lock_irqsave (&vp->lock, flags);
2833 		update_stats(ioaddr, dev);
2834 		spin_unlock_irqrestore (&vp->lock, flags);
2835 	}
2836 	return &dev->stats;
2837 }
2838 
2839 /*  Update statistics.
2840 	Unlike with the EL3 we need not worry about interrupts changing
2841 	the window setting from underneath us, but we must still guard
2842 	against a race condition with a StatsUpdate interrupt updating the
2843 	table.  This is done by checking that the ASM (!) code generated uses
2844 	atomic updates with '+='.
2845 	*/
2846 static void update_stats(void __iomem *ioaddr, struct net_device *dev)
2847 {
2848 	struct vortex_private *vp = netdev_priv(dev);
2849 
2850 	/* Unlike the 3c5x9 we need not turn off stats updates while reading. */
2851 	/* Switch to the stats window, and read everything. */
2852 	dev->stats.tx_carrier_errors		+= window_read8(vp, 6, 0);
2853 	dev->stats.tx_heartbeat_errors		+= window_read8(vp, 6, 1);
2854 	dev->stats.tx_window_errors		+= window_read8(vp, 6, 4);
2855 	dev->stats.rx_fifo_errors		+= window_read8(vp, 6, 5);
2856 	dev->stats.tx_packets			+= window_read8(vp, 6, 6);
2857 	dev->stats.tx_packets			+= (window_read8(vp, 6, 9) &
2858 						    0x30) << 4;
2859 	/* Rx packets	*/			window_read8(vp, 6, 7);   /* Must read to clear */
2860 	/* Don't bother with register 9, an extension of registers 6&7.
2861 	   If we do use the 6&7 values the atomic update assumption above
2862 	   is invalid. */
2863 	dev->stats.rx_bytes 			+= window_read16(vp, 6, 10);
2864 	dev->stats.tx_bytes 			+= window_read16(vp, 6, 12);
2865 	/* Extra stats for get_ethtool_stats() */
2866 	vp->xstats.tx_multiple_collisions	+= window_read8(vp, 6, 2);
2867 	vp->xstats.tx_single_collisions         += window_read8(vp, 6, 3);
2868 	vp->xstats.tx_deferred			+= window_read8(vp, 6, 8);
2869 	vp->xstats.rx_bad_ssd			+= window_read8(vp, 4, 12);
2870 
2871 	dev->stats.collisions = vp->xstats.tx_multiple_collisions
2872 		+ vp->xstats.tx_single_collisions
2873 		+ vp->xstats.tx_max_collisions;
2874 
2875 	{
2876 		u8 up = window_read8(vp, 4, 13);
2877 		dev->stats.rx_bytes += (up & 0x0f) << 16;
2878 		dev->stats.tx_bytes += (up & 0xf0) << 12;
2879 	}
2880 }
2881 
2882 static int vortex_nway_reset(struct net_device *dev)
2883 {
2884 	struct vortex_private *vp = netdev_priv(dev);
2885 
2886 	return mii_nway_restart(&vp->mii);
2887 }
2888 
2889 static int vortex_get_link_ksettings(struct net_device *dev,
2890 				     struct ethtool_link_ksettings *cmd)
2891 {
2892 	struct vortex_private *vp = netdev_priv(dev);
2893 
2894 	mii_ethtool_get_link_ksettings(&vp->mii, cmd);
2895 
2896 	return 0;
2897 }
2898 
2899 static int vortex_set_link_ksettings(struct net_device *dev,
2900 				     const struct ethtool_link_ksettings *cmd)
2901 {
2902 	struct vortex_private *vp = netdev_priv(dev);
2903 
2904 	return mii_ethtool_set_link_ksettings(&vp->mii, cmd);
2905 }
2906 
2907 static u32 vortex_get_msglevel(struct net_device *dev)
2908 {
2909 	return vortex_debug;
2910 }
2911 
2912 static void vortex_set_msglevel(struct net_device *dev, u32 dbg)
2913 {
2914 	vortex_debug = dbg;
2915 }
2916 
2917 static int vortex_get_sset_count(struct net_device *dev, int sset)
2918 {
2919 	switch (sset) {
2920 	case ETH_SS_STATS:
2921 		return VORTEX_NUM_STATS;
2922 	default:
2923 		return -EOPNOTSUPP;
2924 	}
2925 }
2926 
2927 static void vortex_get_ethtool_stats(struct net_device *dev,
2928 	struct ethtool_stats *stats, u64 *data)
2929 {
2930 	struct vortex_private *vp = netdev_priv(dev);
2931 	void __iomem *ioaddr = vp->ioaddr;
2932 	unsigned long flags;
2933 
2934 	spin_lock_irqsave(&vp->lock, flags);
2935 	update_stats(ioaddr, dev);
2936 	spin_unlock_irqrestore(&vp->lock, flags);
2937 
2938 	data[0] = vp->xstats.tx_deferred;
2939 	data[1] = vp->xstats.tx_max_collisions;
2940 	data[2] = vp->xstats.tx_multiple_collisions;
2941 	data[3] = vp->xstats.tx_single_collisions;
2942 	data[4] = vp->xstats.rx_bad_ssd;
2943 }
2944 
2945 
2946 static void vortex_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2947 {
2948 	switch (stringset) {
2949 	case ETH_SS_STATS:
2950 		memcpy(data, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
2951 		break;
2952 	default:
2953 		WARN_ON(1);
2954 		break;
2955 	}
2956 }
2957 
2958 static void vortex_get_drvinfo(struct net_device *dev,
2959 					struct ethtool_drvinfo *info)
2960 {
2961 	struct vortex_private *vp = netdev_priv(dev);
2962 
2963 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2964 	if (VORTEX_PCI(vp)) {
2965 		strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
2966 			sizeof(info->bus_info));
2967 	} else {
2968 		if (VORTEX_EISA(vp))
2969 			strlcpy(info->bus_info, dev_name(vp->gendev),
2970 				sizeof(info->bus_info));
2971 		else
2972 			snprintf(info->bus_info, sizeof(info->bus_info),
2973 				"EISA 0x%lx %d", dev->base_addr, dev->irq);
2974 	}
2975 }
2976 
2977 static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2978 {
2979 	struct vortex_private *vp = netdev_priv(dev);
2980 
2981 	if (!VORTEX_PCI(vp))
2982 		return;
2983 
2984 	wol->supported = WAKE_MAGIC;
2985 
2986 	wol->wolopts = 0;
2987 	if (vp->enable_wol)
2988 		wol->wolopts |= WAKE_MAGIC;
2989 }
2990 
2991 static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2992 {
2993 	struct vortex_private *vp = netdev_priv(dev);
2994 
2995 	if (!VORTEX_PCI(vp))
2996 		return -EOPNOTSUPP;
2997 
2998 	if (wol->wolopts & ~WAKE_MAGIC)
2999 		return -EINVAL;
3000 
3001 	if (wol->wolopts & WAKE_MAGIC)
3002 		vp->enable_wol = 1;
3003 	else
3004 		vp->enable_wol = 0;
3005 	acpi_set_WOL(dev);
3006 
3007 	return 0;
3008 }
3009 
3010 static const struct ethtool_ops vortex_ethtool_ops = {
3011 	.get_drvinfo		= vortex_get_drvinfo,
3012 	.get_strings            = vortex_get_strings,
3013 	.get_msglevel           = vortex_get_msglevel,
3014 	.set_msglevel           = vortex_set_msglevel,
3015 	.get_ethtool_stats      = vortex_get_ethtool_stats,
3016 	.get_sset_count		= vortex_get_sset_count,
3017 	.get_link               = ethtool_op_get_link,
3018 	.nway_reset             = vortex_nway_reset,
3019 	.get_wol                = vortex_get_wol,
3020 	.set_wol                = vortex_set_wol,
3021 	.get_ts_info		= ethtool_op_get_ts_info,
3022 	.get_link_ksettings     = vortex_get_link_ksettings,
3023 	.set_link_ksettings     = vortex_set_link_ksettings,
3024 };
3025 
3026 #ifdef CONFIG_PCI
3027 /*
3028  *	Must power the device up to do MDIO operations
3029  */
3030 static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3031 {
3032 	int err;
3033 	struct vortex_private *vp = netdev_priv(dev);
3034 	pci_power_t state = 0;
3035 
3036 	if(VORTEX_PCI(vp))
3037 		state = VORTEX_PCI(vp)->current_state;
3038 
3039 	/* The kernel core really should have pci_get_power_state() */
3040 
3041 	if(state != 0)
3042 		pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
3043 	err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
3044 	if(state != 0)
3045 		pci_set_power_state(VORTEX_PCI(vp), state);
3046 
3047 	return err;
3048 }
3049 #endif
3050 
3051 
3052 /* Pre-Cyclone chips have no documented multicast filter, so the only
3053    multicast setting is to receive all multicast frames.  At least
3054    the chip has a very clean way to set the mode, unlike many others. */
3055 static void set_rx_mode(struct net_device *dev)
3056 {
3057 	struct vortex_private *vp = netdev_priv(dev);
3058 	void __iomem *ioaddr = vp->ioaddr;
3059 	int new_mode;
3060 
3061 	if (dev->flags & IFF_PROMISC) {
3062 		if (vortex_debug > 3)
3063 			pr_notice("%s: Setting promiscuous mode.\n", dev->name);
3064 		new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
3065 	} else	if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
3066 		new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
3067 	} else
3068 		new_mode = SetRxFilter | RxStation | RxBroadcast;
3069 
3070 	iowrite16(new_mode, ioaddr + EL3_CMD);
3071 }
3072 
3073 #if IS_ENABLED(CONFIG_VLAN_8021Q)
3074 /* Setup the card so that it can receive frames with an 802.1q VLAN tag.
3075    Note that this must be done after each RxReset due to some backwards
3076    compatibility logic in the Cyclone and Tornado ASICs */
3077 
3078 /* The Ethernet Type used for 802.1q tagged frames */
3079 #define VLAN_ETHER_TYPE 0x8100
3080 
3081 static void set_8021q_mode(struct net_device *dev, int enable)
3082 {
3083 	struct vortex_private *vp = netdev_priv(dev);
3084 	int mac_ctrl;
3085 
3086 	if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) {
3087 		/* cyclone and tornado chipsets can recognize 802.1q
3088 		 * tagged frames and treat them correctly */
3089 
3090 		int max_pkt_size = dev->mtu+14;	/* MTU+Ethernet header */
3091 		if (enable)
3092 			max_pkt_size += 4;	/* 802.1Q VLAN tag */
3093 
3094 		window_write16(vp, max_pkt_size, 3, Wn3_MaxPktSize);
3095 
3096 		/* set VlanEtherType to let the hardware checksumming
3097 		   treat tagged frames correctly */
3098 		window_write16(vp, VLAN_ETHER_TYPE, 7, Wn7_VlanEtherType);
3099 	} else {
3100 		/* on older cards we have to enable large frames */
3101 
3102 		vp->large_frames = dev->mtu > 1500 || enable;
3103 
3104 		mac_ctrl = window_read16(vp, 3, Wn3_MAC_Ctrl);
3105 		if (vp->large_frames)
3106 			mac_ctrl |= 0x40;
3107 		else
3108 			mac_ctrl &= ~0x40;
3109 		window_write16(vp, mac_ctrl, 3, Wn3_MAC_Ctrl);
3110 	}
3111 }
3112 #else
3113 
3114 static void set_8021q_mode(struct net_device *dev, int enable)
3115 {
3116 }
3117 
3118 
3119 #endif
3120 
3121 /* MII transceiver control section.
3122    Read and write the MII registers using software-generated serial
3123    MDIO protocol.  See the MII specifications or DP83840A data sheet
3124    for details. */
3125 
3126 /* The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
3127    met by back-to-back PCI I/O cycles, but we insert a delay to avoid
3128    "overclocking" issues. */
3129 static void mdio_delay(struct vortex_private *vp)
3130 {
3131 	window_read32(vp, 4, Wn4_PhysicalMgmt);
3132 }
3133 
3134 #define MDIO_SHIFT_CLK	0x01
3135 #define MDIO_DIR_WRITE	0x04
3136 #define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
3137 #define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
3138 #define MDIO_DATA_READ	0x02
3139 #define MDIO_ENB_IN		0x00
3140 
3141 /* Generate the preamble required for initial synchronization and
3142    a few older transceivers. */
3143 static void mdio_sync(struct vortex_private *vp, int bits)
3144 {
3145 	/* Establish sync by sending at least 32 logic ones. */
3146 	while (-- bits >= 0) {
3147 		window_write16(vp, MDIO_DATA_WRITE1, 4, Wn4_PhysicalMgmt);
3148 		mdio_delay(vp);
3149 		window_write16(vp, MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK,
3150 			       4, Wn4_PhysicalMgmt);
3151 		mdio_delay(vp);
3152 	}
3153 }
3154 
3155 static int mdio_read(struct net_device *dev, int phy_id, int location)
3156 {
3157 	int i;
3158 	struct vortex_private *vp = netdev_priv(dev);
3159 	int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
3160 	unsigned int retval = 0;
3161 
3162 	spin_lock_bh(&vp->mii_lock);
3163 
3164 	if (mii_preamble_required)
3165 		mdio_sync(vp, 32);
3166 
3167 	/* Shift the read command bits out. */
3168 	for (i = 14; i >= 0; i--) {
3169 		int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
3170 		window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
3171 		mdio_delay(vp);
3172 		window_write16(vp, dataval | MDIO_SHIFT_CLK,
3173 			       4, Wn4_PhysicalMgmt);
3174 		mdio_delay(vp);
3175 	}
3176 	/* Read the two transition, 16 data, and wire-idle bits. */
3177 	for (i = 19; i > 0; i--) {
3178 		window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
3179 		mdio_delay(vp);
3180 		retval = (retval << 1) |
3181 			((window_read16(vp, 4, Wn4_PhysicalMgmt) &
3182 			  MDIO_DATA_READ) ? 1 : 0);
3183 		window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
3184 			       4, Wn4_PhysicalMgmt);
3185 		mdio_delay(vp);
3186 	}
3187 
3188 	spin_unlock_bh(&vp->mii_lock);
3189 
3190 	return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff;
3191 }
3192 
3193 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
3194 {
3195 	struct vortex_private *vp = netdev_priv(dev);
3196 	int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
3197 	int i;
3198 
3199 	spin_lock_bh(&vp->mii_lock);
3200 
3201 	if (mii_preamble_required)
3202 		mdio_sync(vp, 32);
3203 
3204 	/* Shift the command bits out. */
3205 	for (i = 31; i >= 0; i--) {
3206 		int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
3207 		window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
3208 		mdio_delay(vp);
3209 		window_write16(vp, dataval | MDIO_SHIFT_CLK,
3210 			       4, Wn4_PhysicalMgmt);
3211 		mdio_delay(vp);
3212 	}
3213 	/* Leave the interface idle. */
3214 	for (i = 1; i >= 0; i--) {
3215 		window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
3216 		mdio_delay(vp);
3217 		window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
3218 			       4, Wn4_PhysicalMgmt);
3219 		mdio_delay(vp);
3220 	}
3221 
3222 	spin_unlock_bh(&vp->mii_lock);
3223 }
3224 
3225 /* ACPI: Advanced Configuration and Power Interface. */
3226 /* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */
3227 static void acpi_set_WOL(struct net_device *dev)
3228 {
3229 	struct vortex_private *vp = netdev_priv(dev);
3230 	void __iomem *ioaddr = vp->ioaddr;
3231 
3232 	device_set_wakeup_enable(vp->gendev, vp->enable_wol);
3233 
3234 	if (vp->enable_wol) {
3235 		/* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
3236 		window_write16(vp, 2, 7, 0x0c);
3237 		/* The RxFilter must accept the WOL frames. */
3238 		iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
3239 		iowrite16(RxEnable, ioaddr + EL3_CMD);
3240 
3241 		if (pci_enable_wake(VORTEX_PCI(vp), PCI_D3hot, 1)) {
3242 			pr_info("%s: WOL not supported.\n", pci_name(VORTEX_PCI(vp)));
3243 
3244 			vp->enable_wol = 0;
3245 			return;
3246 		}
3247 
3248 		if (VORTEX_PCI(vp)->current_state < PCI_D3hot)
3249 			return;
3250 
3251 		/* Change the power state to D3; RxEnable doesn't take effect. */
3252 		pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
3253 	}
3254 }
3255 
3256 
3257 static void vortex_remove_one(struct pci_dev *pdev)
3258 {
3259 	struct net_device *dev = pci_get_drvdata(pdev);
3260 	struct vortex_private *vp;
3261 
3262 	if (!dev) {
3263 		pr_err("vortex_remove_one called for Compaq device!\n");
3264 		BUG();
3265 	}
3266 
3267 	vp = netdev_priv(dev);
3268 
3269 	if (vp->cb_fn_base)
3270 		pci_iounmap(pdev, vp->cb_fn_base);
3271 
3272 	unregister_netdev(dev);
3273 
3274 	pci_set_power_state(pdev, PCI_D0);	/* Go active */
3275 	if (vp->pm_state_valid)
3276 		pci_restore_state(pdev);
3277 	pci_disable_device(pdev);
3278 
3279 	/* Should really use issue_and_wait() here */
3280 	iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14),
3281 	     vp->ioaddr + EL3_CMD);
3282 
3283 	pci_iounmap(pdev, vp->ioaddr);
3284 
3285 	pci_free_consistent(pdev,
3286 						sizeof(struct boom_rx_desc) * RX_RING_SIZE
3287 							+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
3288 						vp->rx_ring,
3289 						vp->rx_ring_dma);
3290 
3291 	pci_release_regions(pdev);
3292 
3293 	free_netdev(dev);
3294 }
3295 
3296 
3297 static struct pci_driver vortex_driver = {
3298 	.name		= "3c59x",
3299 	.probe		= vortex_init_one,
3300 	.remove		= vortex_remove_one,
3301 	.id_table	= vortex_pci_tbl,
3302 	.driver.pm	= VORTEX_PM_OPS,
3303 };
3304 
3305 
3306 static int vortex_have_pci;
3307 static int vortex_have_eisa;
3308 
3309 
3310 static int __init vortex_init(void)
3311 {
3312 	int pci_rc, eisa_rc;
3313 
3314 	pci_rc = pci_register_driver(&vortex_driver);
3315 	eisa_rc = vortex_eisa_init();
3316 
3317 	if (pci_rc == 0)
3318 		vortex_have_pci = 1;
3319 	if (eisa_rc > 0)
3320 		vortex_have_eisa = 1;
3321 
3322 	return (vortex_have_pci + vortex_have_eisa) ? 0 : -ENODEV;
3323 }
3324 
3325 
3326 static void __exit vortex_eisa_cleanup(void)
3327 {
3328 	void __iomem *ioaddr;
3329 
3330 #ifdef CONFIG_EISA
3331 	/* Take care of the EISA devices */
3332 	eisa_driver_unregister(&vortex_eisa_driver);
3333 #endif
3334 
3335 	if (compaq_net_device) {
3336 		ioaddr = ioport_map(compaq_net_device->base_addr,
3337 		                    VORTEX_TOTAL_SIZE);
3338 
3339 		unregister_netdev(compaq_net_device);
3340 		iowrite16(TotalReset, ioaddr + EL3_CMD);
3341 		release_region(compaq_net_device->base_addr,
3342 		               VORTEX_TOTAL_SIZE);
3343 
3344 		free_netdev(compaq_net_device);
3345 	}
3346 }
3347 
3348 
3349 static void __exit vortex_cleanup(void)
3350 {
3351 	if (vortex_have_pci)
3352 		pci_unregister_driver(&vortex_driver);
3353 	if (vortex_have_eisa)
3354 		vortex_eisa_cleanup();
3355 }
3356 
3357 
3358 module_init(vortex_init);
3359 module_exit(vortex_cleanup);
3360