xref: /linux/drivers/net/ethernet/via/via-rhine.c (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2 /*
3 	Written 1998-2001 by Donald Becker.
4 
5 	Current Maintainer: Roger Luethi <rl@hellgate.ch>
6 
7 	This software may be used and distributed according to the terms of
8 	the GNU General Public License (GPL), incorporated herein by reference.
9 	Drivers based on or derived from this code fall under the GPL and must
10 	retain the authorship, copyright and license notice.  This file is not
11 	a complete program and may only be used when the entire operating
12 	system is licensed under the GPL.
13 
14 	This driver is designed for the VIA VT86C100A Rhine-I.
15 	It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 	and management NIC 6105M).
17 
18 	The author may be reached as becker@scyld.com, or C/O
19 	Scyld Computing Corporation
20 	410 Severn Ave., Suite 210
21 	Annapolis MD 21403
22 
23 
24 	This driver contains some changes from the original Donald Becker
25 	version. He may or may not be interested in bug reports on this
26 	code. You can find his versions at:
27 	http://www.scyld.com/network/via-rhine.html
28 	[link no longer provides useful info -jgarzik]
29 
30 */
31 
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 
34 #define DRV_NAME	"via-rhine"
35 #define DRV_VERSION	"1.5.0"
36 #define DRV_RELDATE	"2010-10-09"
37 
38 #include <linux/types.h>
39 
40 /* A few user-configurable values.
41    These may be modified when a driver module is loaded. */
42 static int debug = 0;
43 #define RHINE_MSG_DEFAULT \
44         (0x0000)
45 
46 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47    Setting to > 1518 effectively disables this feature. */
48 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 	defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
50 	defined(__sh__) || defined(__mips__)
51 static int rx_copybreak = 1518;
52 #else
53 static int rx_copybreak;
54 #endif
55 
56 /* Work-around for broken BIOSes: they are unable to get the chip back out of
57    power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
58 static bool avoid_D3;
59 
60 /*
61  * In case you are looking for 'options[]' or 'full_duplex[]', they
62  * are gone. Use ethtool(8) instead.
63  */
64 
65 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66    The Rhine has a 64 element 8390-like hash table. */
67 static const int multicast_filter_limit = 32;
68 
69 
70 /* Operational parameters that are set at compile time. */
71 
72 /* Keep the ring sizes a power of two for compile efficiency.
73    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74    Making the Tx ring too large decreases the effectiveness of channel
75    bonding and packet priority.
76    There are no ill effects from too-large receive rings. */
77 #define TX_RING_SIZE	16
78 #define TX_QUEUE_LEN	10	/* Limit ring entries actually used. */
79 #define RX_RING_SIZE	64
80 
81 /* Operational parameters that usually are not changed. */
82 
83 /* Time in jiffies before concluding the transmitter is hung. */
84 #define TX_TIMEOUT	(2*HZ)
85 
86 #define PKT_BUF_SZ	1536	/* Size of each temporary Rx buffer.*/
87 
88 #include <linux/module.h>
89 #include <linux/moduleparam.h>
90 #include <linux/kernel.h>
91 #include <linux/string.h>
92 #include <linux/timer.h>
93 #include <linux/errno.h>
94 #include <linux/ioport.h>
95 #include <linux/interrupt.h>
96 #include <linux/pci.h>
97 #include <linux/dma-mapping.h>
98 #include <linux/netdevice.h>
99 #include <linux/etherdevice.h>
100 #include <linux/skbuff.h>
101 #include <linux/init.h>
102 #include <linux/delay.h>
103 #include <linux/mii.h>
104 #include <linux/ethtool.h>
105 #include <linux/crc32.h>
106 #include <linux/if_vlan.h>
107 #include <linux/bitops.h>
108 #include <linux/workqueue.h>
109 #include <asm/processor.h>	/* Processor type for cache alignment. */
110 #include <asm/io.h>
111 #include <asm/irq.h>
112 #include <asm/uaccess.h>
113 #include <linux/dmi.h>
114 
115 /* These identify the driver base version and may not be removed. */
116 static const char version[] =
117 	"v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
118 
119 /* This driver was written to use PCI memory space. Some early versions
120    of the Rhine may only work correctly with I/O space accesses. */
121 #ifdef CONFIG_VIA_RHINE_MMIO
122 #define USE_MMIO
123 #else
124 #endif
125 
126 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
127 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
128 MODULE_LICENSE("GPL");
129 
130 module_param(debug, int, 0);
131 module_param(rx_copybreak, int, 0);
132 module_param(avoid_D3, bool, 0);
133 MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
134 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
135 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
136 
137 #define MCAM_SIZE	32
138 #define VCAM_SIZE	32
139 
140 /*
141 		Theory of Operation
142 
143 I. Board Compatibility
144 
145 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
146 controller.
147 
148 II. Board-specific settings
149 
150 Boards with this chip are functional only in a bus-master PCI slot.
151 
152 Many operational settings are loaded from the EEPROM to the Config word at
153 offset 0x78. For most of these settings, this driver assumes that they are
154 correct.
155 If this driver is compiled to use PCI memory space operations the EEPROM
156 must be configured to enable memory ops.
157 
158 III. Driver operation
159 
160 IIIa. Ring buffers
161 
162 This driver uses two statically allocated fixed-size descriptor lists
163 formed into rings by a branch from the final descriptor to the beginning of
164 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
165 
166 IIIb/c. Transmit/Receive Structure
167 
168 This driver attempts to use a zero-copy receive and transmit scheme.
169 
170 Alas, all data buffers are required to start on a 32 bit boundary, so
171 the driver must often copy transmit packets into bounce buffers.
172 
173 The driver allocates full frame size skbuffs for the Rx ring buffers at
174 open() time and passes the skb->data field to the chip as receive data
175 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
176 a fresh skbuff is allocated and the frame is copied to the new skbuff.
177 When the incoming frame is larger, the skbuff is passed directly up the
178 protocol stack. Buffers consumed this way are replaced by newly allocated
179 skbuffs in the last phase of rhine_rx().
180 
181 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
182 using a full-sized skbuff for small frames vs. the copying costs of larger
183 frames. New boards are typically used in generously configured machines
184 and the underfilled buffers have negligible impact compared to the benefit of
185 a single allocation size, so the default value of zero results in never
186 copying packets. When copying is done, the cost is usually mitigated by using
187 a combined copy/checksum routine. Copying also preloads the cache, which is
188 most useful with small frames.
189 
190 Since the VIA chips are only able to transfer data to buffers on 32 bit
191 boundaries, the IP header at offset 14 in an ethernet frame isn't
192 longword aligned for further processing. Copying these unaligned buffers
193 has the beneficial effect of 16-byte aligning the IP header.
194 
195 IIId. Synchronization
196 
197 The driver runs as two independent, single-threaded flows of control. One
198 is the send-packet routine, which enforces single-threaded use by the
199 netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
200 which is single threaded by the hardware and interrupt handling software.
201 
202 The send packet thread has partial control over the Tx ring. It locks the
203 netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
204 the ring is not available it stops the transmit queue by
205 calling netif_stop_queue.
206 
207 The interrupt handler has exclusive control over the Rx ring and records stats
208 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
209 empty by incrementing the dirty_tx mark. If at least half of the entries in
210 the Rx ring are available the transmit queue is woken up if it was stopped.
211 
212 IV. Notes
213 
214 IVb. References
215 
216 Preliminary VT86C100A manual from http://www.via.com.tw/
217 http://www.scyld.com/expert/100mbps.html
218 http://www.scyld.com/expert/NWay.html
219 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
220 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
221 
222 
223 IVc. Errata
224 
225 The VT86C100A manual is not reliable information.
226 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
227 in significant performance degradation for bounce buffer copies on transmit
228 and unaligned IP headers on receive.
229 The chip does not pad to minimum transmit length.
230 
231 */
232 
233 
234 /* This table drives the PCI probe routines. It's mostly boilerplate in all
235    of the drivers, and will likely be provided by some future kernel.
236    Note the matching code -- the first table entry matchs all 56** cards but
237    second only the 1234 card.
238 */
239 
240 enum rhine_revs {
241 	VT86C100A	= 0x00,
242 	VTunknown0	= 0x20,
243 	VT6102		= 0x40,
244 	VT8231		= 0x50,	/* Integrated MAC */
245 	VT8233		= 0x60,	/* Integrated MAC */
246 	VT8235		= 0x74,	/* Integrated MAC */
247 	VT8237		= 0x78,	/* Integrated MAC */
248 	VTunknown1	= 0x7C,
249 	VT6105		= 0x80,
250 	VT6105_B0	= 0x83,
251 	VT6105L		= 0x8A,
252 	VT6107		= 0x8C,
253 	VTunknown2	= 0x8E,
254 	VT6105M		= 0x90,	/* Management adapter */
255 };
256 
257 enum rhine_quirks {
258 	rqWOL		= 0x0001,	/* Wake-On-LAN support */
259 	rqForceReset	= 0x0002,
260 	rq6patterns	= 0x0040,	/* 6 instead of 4 patterns for WOL */
261 	rqStatusWBRace	= 0x0080,	/* Tx Status Writeback Error possible */
262 	rqRhineI	= 0x0100,	/* See comment below */
263 };
264 /*
265  * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
266  * MMIO as well as for the collision counter and the Tx FIFO underflow
267  * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
268  */
269 
270 /* Beware of PCI posted writes */
271 #define IOSYNC	do { ioread8(ioaddr + StationAddr); } while (0)
272 
273 static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
274 	{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },	/* VT86C100A */
275 	{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6102 */
276 	{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },	/* 6105{,L,LOM} */
277 	{ 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6105M */
278 	{ }	/* terminate list */
279 };
280 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
281 
282 
283 /* Offsets to the device registers. */
284 enum register_offsets {
285 	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
286 	ChipCmd1=0x09, TQWake=0x0A,
287 	IntrStatus=0x0C, IntrEnable=0x0E,
288 	MulticastFilter0=0x10, MulticastFilter1=0x14,
289 	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
290 	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
291 	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
292 	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
293 	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
294 	StickyHW=0x83, IntrStatus2=0x84,
295 	CamMask=0x88, CamCon=0x92, CamAddr=0x93,
296 	WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
297 	WOLcrClr1=0xA6, WOLcgClr=0xA7,
298 	PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
299 };
300 
301 /* Bits in ConfigD */
302 enum backoff_bits {
303 	BackOptional=0x01, BackModify=0x02,
304 	BackCaptureEffect=0x04, BackRandom=0x08
305 };
306 
307 /* Bits in the TxConfig (TCR) register */
308 enum tcr_bits {
309 	TCR_PQEN=0x01,
310 	TCR_LB0=0x02,		/* loopback[0] */
311 	TCR_LB1=0x04,		/* loopback[1] */
312 	TCR_OFSET=0x08,
313 	TCR_RTGOPT=0x10,
314 	TCR_RTFT0=0x20,
315 	TCR_RTFT1=0x40,
316 	TCR_RTSF=0x80,
317 };
318 
319 /* Bits in the CamCon (CAMC) register */
320 enum camcon_bits {
321 	CAMC_CAMEN=0x01,
322 	CAMC_VCAMSL=0x02,
323 	CAMC_CAMWR=0x04,
324 	CAMC_CAMRD=0x08,
325 };
326 
327 /* Bits in the PCIBusConfig1 (BCR1) register */
328 enum bcr1_bits {
329 	BCR1_POT0=0x01,
330 	BCR1_POT1=0x02,
331 	BCR1_POT2=0x04,
332 	BCR1_CTFT0=0x08,
333 	BCR1_CTFT1=0x10,
334 	BCR1_CTSF=0x20,
335 	BCR1_TXQNOBK=0x40,	/* for VT6105 */
336 	BCR1_VIDFR=0x80,	/* for VT6105 */
337 	BCR1_MED0=0x40,		/* for VT6102 */
338 	BCR1_MED1=0x80,		/* for VT6102 */
339 };
340 
341 #ifdef USE_MMIO
342 /* Registers we check that mmio and reg are the same. */
343 static const int mmio_verify_registers[] = {
344 	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
345 	0
346 };
347 #endif
348 
349 /* Bits in the interrupt status/mask registers. */
350 enum intr_status_bits {
351 	IntrRxDone	= 0x0001,
352 	IntrTxDone	= 0x0002,
353 	IntrRxErr	= 0x0004,
354 	IntrTxError	= 0x0008,
355 	IntrRxEmpty	= 0x0020,
356 	IntrPCIErr	= 0x0040,
357 	IntrStatsMax	= 0x0080,
358 	IntrRxEarly	= 0x0100,
359 	IntrTxUnderrun	= 0x0210,
360 	IntrRxOverflow	= 0x0400,
361 	IntrRxDropped	= 0x0800,
362 	IntrRxNoBuf	= 0x1000,
363 	IntrTxAborted	= 0x2000,
364 	IntrLinkChange	= 0x4000,
365 	IntrRxWakeUp	= 0x8000,
366 	IntrTxDescRace		= 0x080000,	/* mapped from IntrStatus2 */
367 	IntrNormalSummary	= IntrRxDone | IntrTxDone,
368 	IntrTxErrSummary	= IntrTxDescRace | IntrTxAborted | IntrTxError |
369 				  IntrTxUnderrun,
370 };
371 
372 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
373 enum wol_bits {
374 	WOLucast	= 0x10,
375 	WOLmagic	= 0x20,
376 	WOLbmcast	= 0x30,
377 	WOLlnkon	= 0x40,
378 	WOLlnkoff	= 0x80,
379 };
380 
381 /* The Rx and Tx buffer descriptors. */
382 struct rx_desc {
383 	__le32 rx_status;
384 	__le32 desc_length; /* Chain flag, Buffer/frame length */
385 	__le32 addr;
386 	__le32 next_desc;
387 };
388 struct tx_desc {
389 	__le32 tx_status;
390 	__le32 desc_length; /* Chain flag, Tx Config, Frame length */
391 	__le32 addr;
392 	__le32 next_desc;
393 };
394 
395 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
396 #define TXDESC		0x00e08000
397 
398 enum rx_status_bits {
399 	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
400 };
401 
402 /* Bits in *_desc.*_status */
403 enum desc_status_bits {
404 	DescOwn=0x80000000
405 };
406 
407 /* Bits in *_desc.*_length */
408 enum desc_length_bits {
409 	DescTag=0x00010000
410 };
411 
412 /* Bits in ChipCmd. */
413 enum chip_cmd_bits {
414 	CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
415 	CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
416 	Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
417 	Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
418 };
419 
420 struct rhine_private {
421 	/* Bit mask for configured VLAN ids */
422 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
423 
424 	/* Descriptor rings */
425 	struct rx_desc *rx_ring;
426 	struct tx_desc *tx_ring;
427 	dma_addr_t rx_ring_dma;
428 	dma_addr_t tx_ring_dma;
429 
430 	/* The addresses of receive-in-place skbuffs. */
431 	struct sk_buff *rx_skbuff[RX_RING_SIZE];
432 	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
433 
434 	/* The saved address of a sent-in-place packet/buffer, for later free(). */
435 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
436 	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
437 
438 	/* Tx bounce buffers (Rhine-I only) */
439 	unsigned char *tx_buf[TX_RING_SIZE];
440 	unsigned char *tx_bufs;
441 	dma_addr_t tx_bufs_dma;
442 
443 	struct pci_dev *pdev;
444 	long pioaddr;
445 	struct net_device *dev;
446 	struct napi_struct napi;
447 	spinlock_t lock;
448 	struct mutex task_lock;
449 	bool task_enable;
450 	struct work_struct slow_event_task;
451 	struct work_struct reset_task;
452 
453 	u32 msg_enable;
454 
455 	/* Frequently used values: keep some adjacent for cache effect. */
456 	u32 quirks;
457 	struct rx_desc *rx_head_desc;
458 	unsigned int cur_rx, dirty_rx;	/* Producer/consumer ring indices */
459 	unsigned int cur_tx, dirty_tx;
460 	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
461 	u8 wolopts;
462 
463 	u8 tx_thresh, rx_thresh;
464 
465 	struct mii_if_info mii_if;
466 	void __iomem *base;
467 };
468 
469 #define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
470 #define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
471 #define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
472 
473 #define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
474 #define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
475 #define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
476 
477 #define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
478 #define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
479 #define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
480 
481 #define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
482 #define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
483 #define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
484 
485 
486 static int  mdio_read(struct net_device *dev, int phy_id, int location);
487 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
488 static int  rhine_open(struct net_device *dev);
489 static void rhine_reset_task(struct work_struct *work);
490 static void rhine_slow_event_task(struct work_struct *work);
491 static void rhine_tx_timeout(struct net_device *dev);
492 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
493 				  struct net_device *dev);
494 static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
495 static void rhine_tx(struct net_device *dev);
496 static int rhine_rx(struct net_device *dev, int limit);
497 static void rhine_set_rx_mode(struct net_device *dev);
498 static struct net_device_stats *rhine_get_stats(struct net_device *dev);
499 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
500 static const struct ethtool_ops netdev_ethtool_ops;
501 static int  rhine_close(struct net_device *dev);
502 static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
503 static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
504 static void rhine_restart_tx(struct net_device *dev);
505 
506 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
507 {
508 	void __iomem *ioaddr = rp->base;
509 	int i;
510 
511 	for (i = 0; i < 1024; i++) {
512 		bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
513 
514 		if (low ^ has_mask_bits)
515 			break;
516 		udelay(10);
517 	}
518 	if (i > 64) {
519 		netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
520 			  "count: %04d\n", low ? "low" : "high", reg, mask, i);
521 	}
522 }
523 
524 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
525 {
526 	rhine_wait_bit(rp, reg, mask, false);
527 }
528 
529 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
530 {
531 	rhine_wait_bit(rp, reg, mask, true);
532 }
533 
534 static u32 rhine_get_events(struct rhine_private *rp)
535 {
536 	void __iomem *ioaddr = rp->base;
537 	u32 intr_status;
538 
539 	intr_status = ioread16(ioaddr + IntrStatus);
540 	/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
541 	if (rp->quirks & rqStatusWBRace)
542 		intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
543 	return intr_status;
544 }
545 
546 static void rhine_ack_events(struct rhine_private *rp, u32 mask)
547 {
548 	void __iomem *ioaddr = rp->base;
549 
550 	if (rp->quirks & rqStatusWBRace)
551 		iowrite8(mask >> 16, ioaddr + IntrStatus2);
552 	iowrite16(mask, ioaddr + IntrStatus);
553 	mmiowb();
554 }
555 
556 /*
557  * Get power related registers into sane state.
558  * Notify user about past WOL event.
559  */
560 static void rhine_power_init(struct net_device *dev)
561 {
562 	struct rhine_private *rp = netdev_priv(dev);
563 	void __iomem *ioaddr = rp->base;
564 	u16 wolstat;
565 
566 	if (rp->quirks & rqWOL) {
567 		/* Make sure chip is in power state D0 */
568 		iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
569 
570 		/* Disable "force PME-enable" */
571 		iowrite8(0x80, ioaddr + WOLcgClr);
572 
573 		/* Clear power-event config bits (WOL) */
574 		iowrite8(0xFF, ioaddr + WOLcrClr);
575 		/* More recent cards can manage two additional patterns */
576 		if (rp->quirks & rq6patterns)
577 			iowrite8(0x03, ioaddr + WOLcrClr1);
578 
579 		/* Save power-event status bits */
580 		wolstat = ioread8(ioaddr + PwrcsrSet);
581 		if (rp->quirks & rq6patterns)
582 			wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
583 
584 		/* Clear power-event status bits */
585 		iowrite8(0xFF, ioaddr + PwrcsrClr);
586 		if (rp->quirks & rq6patterns)
587 			iowrite8(0x03, ioaddr + PwrcsrClr1);
588 
589 		if (wolstat) {
590 			char *reason;
591 			switch (wolstat) {
592 			case WOLmagic:
593 				reason = "Magic packet";
594 				break;
595 			case WOLlnkon:
596 				reason = "Link went up";
597 				break;
598 			case WOLlnkoff:
599 				reason = "Link went down";
600 				break;
601 			case WOLucast:
602 				reason = "Unicast packet";
603 				break;
604 			case WOLbmcast:
605 				reason = "Multicast/broadcast packet";
606 				break;
607 			default:
608 				reason = "Unknown";
609 			}
610 			netdev_info(dev, "Woke system up. Reason: %s\n",
611 				    reason);
612 		}
613 	}
614 }
615 
616 static void rhine_chip_reset(struct net_device *dev)
617 {
618 	struct rhine_private *rp = netdev_priv(dev);
619 	void __iomem *ioaddr = rp->base;
620 	u8 cmd1;
621 
622 	iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
623 	IOSYNC;
624 
625 	if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
626 		netdev_info(dev, "Reset not complete yet. Trying harder.\n");
627 
628 		/* Force reset */
629 		if (rp->quirks & rqForceReset)
630 			iowrite8(0x40, ioaddr + MiscCmd);
631 
632 		/* Reset can take somewhat longer (rare) */
633 		rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
634 	}
635 
636 	cmd1 = ioread8(ioaddr + ChipCmd1);
637 	netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
638 		   "failed" : "succeeded");
639 }
640 
641 #ifdef USE_MMIO
642 static void enable_mmio(long pioaddr, u32 quirks)
643 {
644 	int n;
645 	if (quirks & rqRhineI) {
646 		/* More recent docs say that this bit is reserved ... */
647 		n = inb(pioaddr + ConfigA) | 0x20;
648 		outb(n, pioaddr + ConfigA);
649 	} else {
650 		n = inb(pioaddr + ConfigD) | 0x80;
651 		outb(n, pioaddr + ConfigD);
652 	}
653 }
654 #endif
655 
656 /*
657  * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
658  * (plus 0x6C for Rhine-I/II)
659  */
660 static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
661 {
662 	struct rhine_private *rp = netdev_priv(dev);
663 	void __iomem *ioaddr = rp->base;
664 	int i;
665 
666 	outb(0x20, pioaddr + MACRegEEcsr);
667 	for (i = 0; i < 1024; i++) {
668 		if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
669 			break;
670 	}
671 	if (i > 512)
672 		pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
673 
674 #ifdef USE_MMIO
675 	/*
676 	 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
677 	 * MMIO. If reloading EEPROM was done first this could be avoided, but
678 	 * it is not known if that still works with the "win98-reboot" problem.
679 	 */
680 	enable_mmio(pioaddr, rp->quirks);
681 #endif
682 
683 	/* Turn off EEPROM-controlled wake-up (magic packet) */
684 	if (rp->quirks & rqWOL)
685 		iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
686 
687 }
688 
689 #ifdef CONFIG_NET_POLL_CONTROLLER
690 static void rhine_poll(struct net_device *dev)
691 {
692 	struct rhine_private *rp = netdev_priv(dev);
693 	const int irq = rp->pdev->irq;
694 
695 	disable_irq(irq);
696 	rhine_interrupt(irq, dev);
697 	enable_irq(irq);
698 }
699 #endif
700 
701 static void rhine_kick_tx_threshold(struct rhine_private *rp)
702 {
703 	if (rp->tx_thresh < 0xe0) {
704 		void __iomem *ioaddr = rp->base;
705 
706 		rp->tx_thresh += 0x20;
707 		BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
708 	}
709 }
710 
711 static void rhine_tx_err(struct rhine_private *rp, u32 status)
712 {
713 	struct net_device *dev = rp->dev;
714 
715 	if (status & IntrTxAborted) {
716 		netif_info(rp, tx_err, dev,
717 			   "Abort %08x, frame dropped\n", status);
718 	}
719 
720 	if (status & IntrTxUnderrun) {
721 		rhine_kick_tx_threshold(rp);
722 		netif_info(rp, tx_err ,dev, "Transmitter underrun, "
723 			   "Tx threshold now %02x\n", rp->tx_thresh);
724 	}
725 
726 	if (status & IntrTxDescRace)
727 		netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
728 
729 	if ((status & IntrTxError) &&
730 	    (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
731 		rhine_kick_tx_threshold(rp);
732 		netif_info(rp, tx_err, dev, "Unspecified error. "
733 			   "Tx threshold now %02x\n", rp->tx_thresh);
734 	}
735 
736 	rhine_restart_tx(dev);
737 }
738 
739 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
740 {
741 	void __iomem *ioaddr = rp->base;
742 	struct net_device_stats *stats = &rp->dev->stats;
743 
744 	stats->rx_crc_errors    += ioread16(ioaddr + RxCRCErrs);
745 	stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
746 
747 	/*
748 	 * Clears the "tally counters" for CRC errors and missed frames(?).
749 	 * It has been reported that some chips need a write of 0 to clear
750 	 * these, for others the counters are set to 1 when written to and
751 	 * instead cleared when read. So we clear them both ways ...
752 	 */
753 	iowrite32(0, ioaddr + RxMissed);
754 	ioread16(ioaddr + RxCRCErrs);
755 	ioread16(ioaddr + RxMissed);
756 }
757 
758 #define RHINE_EVENT_NAPI_RX	(IntrRxDone | \
759 				 IntrRxErr | \
760 				 IntrRxEmpty | \
761 				 IntrRxOverflow	| \
762 				 IntrRxDropped | \
763 				 IntrRxNoBuf | \
764 				 IntrRxWakeUp)
765 
766 #define RHINE_EVENT_NAPI_TX_ERR	(IntrTxError | \
767 				 IntrTxAborted | \
768 				 IntrTxUnderrun | \
769 				 IntrTxDescRace)
770 #define RHINE_EVENT_NAPI_TX	(IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
771 
772 #define RHINE_EVENT_NAPI	(RHINE_EVENT_NAPI_RX | \
773 				 RHINE_EVENT_NAPI_TX | \
774 				 IntrStatsMax)
775 #define RHINE_EVENT_SLOW	(IntrPCIErr | IntrLinkChange)
776 #define RHINE_EVENT		(RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
777 
778 static int rhine_napipoll(struct napi_struct *napi, int budget)
779 {
780 	struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
781 	struct net_device *dev = rp->dev;
782 	void __iomem *ioaddr = rp->base;
783 	u16 enable_mask = RHINE_EVENT & 0xffff;
784 	int work_done = 0;
785 	u32 status;
786 
787 	status = rhine_get_events(rp);
788 	rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
789 
790 	if (status & RHINE_EVENT_NAPI_RX)
791 		work_done += rhine_rx(dev, budget);
792 
793 	if (status & RHINE_EVENT_NAPI_TX) {
794 		if (status & RHINE_EVENT_NAPI_TX_ERR) {
795 			/* Avoid scavenging before Tx engine turned off */
796 			rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
797 			if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
798 				netif_warn(rp, tx_err, dev, "Tx still on\n");
799 		}
800 
801 		rhine_tx(dev);
802 
803 		if (status & RHINE_EVENT_NAPI_TX_ERR)
804 			rhine_tx_err(rp, status);
805 	}
806 
807 	if (status & IntrStatsMax) {
808 		spin_lock(&rp->lock);
809 		rhine_update_rx_crc_and_missed_errord(rp);
810 		spin_unlock(&rp->lock);
811 	}
812 
813 	if (status & RHINE_EVENT_SLOW) {
814 		enable_mask &= ~RHINE_EVENT_SLOW;
815 		schedule_work(&rp->slow_event_task);
816 	}
817 
818 	if (work_done < budget) {
819 		napi_complete(napi);
820 		iowrite16(enable_mask, ioaddr + IntrEnable);
821 		mmiowb();
822 	}
823 	return work_done;
824 }
825 
826 static void rhine_hw_init(struct net_device *dev, long pioaddr)
827 {
828 	struct rhine_private *rp = netdev_priv(dev);
829 
830 	/* Reset the chip to erase previous misconfiguration. */
831 	rhine_chip_reset(dev);
832 
833 	/* Rhine-I needs extra time to recuperate before EEPROM reload */
834 	if (rp->quirks & rqRhineI)
835 		msleep(5);
836 
837 	/* Reload EEPROM controlled bytes cleared by soft reset */
838 	rhine_reload_eeprom(pioaddr, dev);
839 }
840 
841 static const struct net_device_ops rhine_netdev_ops = {
842 	.ndo_open		 = rhine_open,
843 	.ndo_stop		 = rhine_close,
844 	.ndo_start_xmit		 = rhine_start_tx,
845 	.ndo_get_stats		 = rhine_get_stats,
846 	.ndo_set_rx_mode	 = rhine_set_rx_mode,
847 	.ndo_change_mtu		 = eth_change_mtu,
848 	.ndo_validate_addr	 = eth_validate_addr,
849 	.ndo_set_mac_address 	 = eth_mac_addr,
850 	.ndo_do_ioctl		 = netdev_ioctl,
851 	.ndo_tx_timeout 	 = rhine_tx_timeout,
852 	.ndo_vlan_rx_add_vid	 = rhine_vlan_rx_add_vid,
853 	.ndo_vlan_rx_kill_vid	 = rhine_vlan_rx_kill_vid,
854 #ifdef CONFIG_NET_POLL_CONTROLLER
855 	.ndo_poll_controller	 = rhine_poll,
856 #endif
857 };
858 
859 static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
860 {
861 	struct net_device *dev;
862 	struct rhine_private *rp;
863 	int i, rc;
864 	u32 quirks;
865 	long pioaddr;
866 	long memaddr;
867 	void __iomem *ioaddr;
868 	int io_size, phy_id;
869 	const char *name;
870 #ifdef USE_MMIO
871 	int bar = 1;
872 #else
873 	int bar = 0;
874 #endif
875 
876 /* when built into the kernel, we only print version if device is found */
877 #ifndef MODULE
878 	pr_info_once("%s\n", version);
879 #endif
880 
881 	io_size = 256;
882 	phy_id = 0;
883 	quirks = 0;
884 	name = "Rhine";
885 	if (pdev->revision < VTunknown0) {
886 		quirks = rqRhineI;
887 		io_size = 128;
888 	}
889 	else if (pdev->revision >= VT6102) {
890 		quirks = rqWOL | rqForceReset;
891 		if (pdev->revision < VT6105) {
892 			name = "Rhine II";
893 			quirks |= rqStatusWBRace;	/* Rhine-II exclusive */
894 		}
895 		else {
896 			phy_id = 1;	/* Integrated PHY, phy_id fixed to 1 */
897 			if (pdev->revision >= VT6105_B0)
898 				quirks |= rq6patterns;
899 			if (pdev->revision < VT6105M)
900 				name = "Rhine III";
901 			else
902 				name = "Rhine III (Management Adapter)";
903 		}
904 	}
905 
906 	rc = pci_enable_device(pdev);
907 	if (rc)
908 		goto err_out;
909 
910 	/* this should always be supported */
911 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
912 	if (rc) {
913 		dev_err(&pdev->dev,
914 			"32-bit PCI DMA addresses not supported by the card!?\n");
915 		goto err_out;
916 	}
917 
918 	/* sanity check */
919 	if ((pci_resource_len(pdev, 0) < io_size) ||
920 	    (pci_resource_len(pdev, 1) < io_size)) {
921 		rc = -EIO;
922 		dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
923 		goto err_out;
924 	}
925 
926 	pioaddr = pci_resource_start(pdev, 0);
927 	memaddr = pci_resource_start(pdev, 1);
928 
929 	pci_set_master(pdev);
930 
931 	dev = alloc_etherdev(sizeof(struct rhine_private));
932 	if (!dev) {
933 		rc = -ENOMEM;
934 		goto err_out;
935 	}
936 	SET_NETDEV_DEV(dev, &pdev->dev);
937 
938 	rp = netdev_priv(dev);
939 	rp->dev = dev;
940 	rp->quirks = quirks;
941 	rp->pioaddr = pioaddr;
942 	rp->pdev = pdev;
943 	rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
944 
945 	rc = pci_request_regions(pdev, DRV_NAME);
946 	if (rc)
947 		goto err_out_free_netdev;
948 
949 	ioaddr = pci_iomap(pdev, bar, io_size);
950 	if (!ioaddr) {
951 		rc = -EIO;
952 		dev_err(&pdev->dev,
953 			"ioremap failed for device %s, region 0x%X @ 0x%lX\n",
954 			pci_name(pdev), io_size, memaddr);
955 		goto err_out_free_res;
956 	}
957 
958 #ifdef USE_MMIO
959 	enable_mmio(pioaddr, quirks);
960 
961 	/* Check that selected MMIO registers match the PIO ones */
962 	i = 0;
963 	while (mmio_verify_registers[i]) {
964 		int reg = mmio_verify_registers[i++];
965 		unsigned char a = inb(pioaddr+reg);
966 		unsigned char b = readb(ioaddr+reg);
967 		if (a != b) {
968 			rc = -EIO;
969 			dev_err(&pdev->dev,
970 				"MMIO do not match PIO [%02x] (%02x != %02x)\n",
971 				reg, a, b);
972 			goto err_out_unmap;
973 		}
974 	}
975 #endif /* USE_MMIO */
976 
977 	rp->base = ioaddr;
978 
979 	/* Get chip registers into a sane state */
980 	rhine_power_init(dev);
981 	rhine_hw_init(dev, pioaddr);
982 
983 	for (i = 0; i < 6; i++)
984 		dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
985 
986 	if (!is_valid_ether_addr(dev->dev_addr)) {
987 		/* Report it and use a random ethernet address instead */
988 		netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
989 		eth_hw_addr_random(dev);
990 		netdev_info(dev, "Using random MAC address: %pM\n",
991 			    dev->dev_addr);
992 	}
993 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
994 
995 	/* For Rhine-I/II, phy_id is loaded from EEPROM */
996 	if (!phy_id)
997 		phy_id = ioread8(ioaddr + 0x6C);
998 
999 	spin_lock_init(&rp->lock);
1000 	mutex_init(&rp->task_lock);
1001 	INIT_WORK(&rp->reset_task, rhine_reset_task);
1002 	INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
1003 
1004 	rp->mii_if.dev = dev;
1005 	rp->mii_if.mdio_read = mdio_read;
1006 	rp->mii_if.mdio_write = mdio_write;
1007 	rp->mii_if.phy_id_mask = 0x1f;
1008 	rp->mii_if.reg_num_mask = 0x1f;
1009 
1010 	/* The chip-specific entries in the device structure. */
1011 	dev->netdev_ops = &rhine_netdev_ops;
1012 	dev->ethtool_ops = &netdev_ethtool_ops,
1013 	dev->watchdog_timeo = TX_TIMEOUT;
1014 
1015 	netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
1016 
1017 	if (rp->quirks & rqRhineI)
1018 		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
1019 
1020 	if (pdev->revision >= VT6105M)
1021 		dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
1022 		NETIF_F_HW_VLAN_FILTER;
1023 
1024 	/* dev->name not defined before register_netdev()! */
1025 	rc = register_netdev(dev);
1026 	if (rc)
1027 		goto err_out_unmap;
1028 
1029 	netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
1030 		    name,
1031 #ifdef USE_MMIO
1032 		    memaddr,
1033 #else
1034 		    (long)ioaddr,
1035 #endif
1036 		    dev->dev_addr, pdev->irq);
1037 
1038 	pci_set_drvdata(pdev, dev);
1039 
1040 	{
1041 		u16 mii_cmd;
1042 		int mii_status = mdio_read(dev, phy_id, 1);
1043 		mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1044 		mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1045 		if (mii_status != 0xffff && mii_status != 0x0000) {
1046 			rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1047 			netdev_info(dev,
1048 				    "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1049 				    phy_id,
1050 				    mii_status, rp->mii_if.advertising,
1051 				    mdio_read(dev, phy_id, 5));
1052 
1053 			/* set IFF_RUNNING */
1054 			if (mii_status & BMSR_LSTATUS)
1055 				netif_carrier_on(dev);
1056 			else
1057 				netif_carrier_off(dev);
1058 
1059 		}
1060 	}
1061 	rp->mii_if.phy_id = phy_id;
1062 	if (avoid_D3)
1063 		netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1064 
1065 	return 0;
1066 
1067 err_out_unmap:
1068 	pci_iounmap(pdev, ioaddr);
1069 err_out_free_res:
1070 	pci_release_regions(pdev);
1071 err_out_free_netdev:
1072 	free_netdev(dev);
1073 err_out:
1074 	return rc;
1075 }
1076 
1077 static int alloc_ring(struct net_device* dev)
1078 {
1079 	struct rhine_private *rp = netdev_priv(dev);
1080 	void *ring;
1081 	dma_addr_t ring_dma;
1082 
1083 	ring = pci_alloc_consistent(rp->pdev,
1084 				    RX_RING_SIZE * sizeof(struct rx_desc) +
1085 				    TX_RING_SIZE * sizeof(struct tx_desc),
1086 				    &ring_dma);
1087 	if (!ring) {
1088 		netdev_err(dev, "Could not allocate DMA memory\n");
1089 		return -ENOMEM;
1090 	}
1091 	if (rp->quirks & rqRhineI) {
1092 		rp->tx_bufs = pci_alloc_consistent(rp->pdev,
1093 						   PKT_BUF_SZ * TX_RING_SIZE,
1094 						   &rp->tx_bufs_dma);
1095 		if (rp->tx_bufs == NULL) {
1096 			pci_free_consistent(rp->pdev,
1097 				    RX_RING_SIZE * sizeof(struct rx_desc) +
1098 				    TX_RING_SIZE * sizeof(struct tx_desc),
1099 				    ring, ring_dma);
1100 			return -ENOMEM;
1101 		}
1102 	}
1103 
1104 	rp->rx_ring = ring;
1105 	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1106 	rp->rx_ring_dma = ring_dma;
1107 	rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1108 
1109 	return 0;
1110 }
1111 
1112 static void free_ring(struct net_device* dev)
1113 {
1114 	struct rhine_private *rp = netdev_priv(dev);
1115 
1116 	pci_free_consistent(rp->pdev,
1117 			    RX_RING_SIZE * sizeof(struct rx_desc) +
1118 			    TX_RING_SIZE * sizeof(struct tx_desc),
1119 			    rp->rx_ring, rp->rx_ring_dma);
1120 	rp->tx_ring = NULL;
1121 
1122 	if (rp->tx_bufs)
1123 		pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
1124 				    rp->tx_bufs, rp->tx_bufs_dma);
1125 
1126 	rp->tx_bufs = NULL;
1127 
1128 }
1129 
1130 static void alloc_rbufs(struct net_device *dev)
1131 {
1132 	struct rhine_private *rp = netdev_priv(dev);
1133 	dma_addr_t next;
1134 	int i;
1135 
1136 	rp->dirty_rx = rp->cur_rx = 0;
1137 
1138 	rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1139 	rp->rx_head_desc = &rp->rx_ring[0];
1140 	next = rp->rx_ring_dma;
1141 
1142 	/* Init the ring entries */
1143 	for (i = 0; i < RX_RING_SIZE; i++) {
1144 		rp->rx_ring[i].rx_status = 0;
1145 		rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1146 		next += sizeof(struct rx_desc);
1147 		rp->rx_ring[i].next_desc = cpu_to_le32(next);
1148 		rp->rx_skbuff[i] = NULL;
1149 	}
1150 	/* Mark the last entry as wrapping the ring. */
1151 	rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1152 
1153 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1154 	for (i = 0; i < RX_RING_SIZE; i++) {
1155 		struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1156 		rp->rx_skbuff[i] = skb;
1157 		if (skb == NULL)
1158 			break;
1159 
1160 		rp->rx_skbuff_dma[i] =
1161 			pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1162 				       PCI_DMA_FROMDEVICE);
1163 
1164 		rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1165 		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1166 	}
1167 	rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1168 }
1169 
1170 static void free_rbufs(struct net_device* dev)
1171 {
1172 	struct rhine_private *rp = netdev_priv(dev);
1173 	int i;
1174 
1175 	/* Free all the skbuffs in the Rx queue. */
1176 	for (i = 0; i < RX_RING_SIZE; i++) {
1177 		rp->rx_ring[i].rx_status = 0;
1178 		rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1179 		if (rp->rx_skbuff[i]) {
1180 			pci_unmap_single(rp->pdev,
1181 					 rp->rx_skbuff_dma[i],
1182 					 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1183 			dev_kfree_skb(rp->rx_skbuff[i]);
1184 		}
1185 		rp->rx_skbuff[i] = NULL;
1186 	}
1187 }
1188 
1189 static void alloc_tbufs(struct net_device* dev)
1190 {
1191 	struct rhine_private *rp = netdev_priv(dev);
1192 	dma_addr_t next;
1193 	int i;
1194 
1195 	rp->dirty_tx = rp->cur_tx = 0;
1196 	next = rp->tx_ring_dma;
1197 	for (i = 0; i < TX_RING_SIZE; i++) {
1198 		rp->tx_skbuff[i] = NULL;
1199 		rp->tx_ring[i].tx_status = 0;
1200 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1201 		next += sizeof(struct tx_desc);
1202 		rp->tx_ring[i].next_desc = cpu_to_le32(next);
1203 		if (rp->quirks & rqRhineI)
1204 			rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1205 	}
1206 	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1207 
1208 }
1209 
1210 static void free_tbufs(struct net_device* dev)
1211 {
1212 	struct rhine_private *rp = netdev_priv(dev);
1213 	int i;
1214 
1215 	for (i = 0; i < TX_RING_SIZE; i++) {
1216 		rp->tx_ring[i].tx_status = 0;
1217 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1218 		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1219 		if (rp->tx_skbuff[i]) {
1220 			if (rp->tx_skbuff_dma[i]) {
1221 				pci_unmap_single(rp->pdev,
1222 						 rp->tx_skbuff_dma[i],
1223 						 rp->tx_skbuff[i]->len,
1224 						 PCI_DMA_TODEVICE);
1225 			}
1226 			dev_kfree_skb(rp->tx_skbuff[i]);
1227 		}
1228 		rp->tx_skbuff[i] = NULL;
1229 		rp->tx_buf[i] = NULL;
1230 	}
1231 }
1232 
1233 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1234 {
1235 	struct rhine_private *rp = netdev_priv(dev);
1236 	void __iomem *ioaddr = rp->base;
1237 
1238 	mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1239 
1240 	if (rp->mii_if.full_duplex)
1241 	    iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1242 		   ioaddr + ChipCmd1);
1243 	else
1244 	    iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1245 		   ioaddr + ChipCmd1);
1246 
1247 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1248 		   rp->mii_if.force_media, netif_carrier_ok(dev));
1249 }
1250 
1251 /* Called after status of force_media possibly changed */
1252 static void rhine_set_carrier(struct mii_if_info *mii)
1253 {
1254 	struct net_device *dev = mii->dev;
1255 	struct rhine_private *rp = netdev_priv(dev);
1256 
1257 	if (mii->force_media) {
1258 		/* autoneg is off: Link is always assumed to be up */
1259 		if (!netif_carrier_ok(dev))
1260 			netif_carrier_on(dev);
1261 	} else	/* Let MMI library update carrier status */
1262 		rhine_check_media(dev, 0);
1263 
1264 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1265 		   mii->force_media, netif_carrier_ok(dev));
1266 }
1267 
1268 /**
1269  * rhine_set_cam - set CAM multicast filters
1270  * @ioaddr: register block of this Rhine
1271  * @idx: multicast CAM index [0..MCAM_SIZE-1]
1272  * @addr: multicast address (6 bytes)
1273  *
1274  * Load addresses into multicast filters.
1275  */
1276 static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1277 {
1278 	int i;
1279 
1280 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1281 	wmb();
1282 
1283 	/* Paranoid -- idx out of range should never happen */
1284 	idx &= (MCAM_SIZE - 1);
1285 
1286 	iowrite8((u8) idx, ioaddr + CamAddr);
1287 
1288 	for (i = 0; i < 6; i++, addr++)
1289 		iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1290 	udelay(10);
1291 	wmb();
1292 
1293 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1294 	udelay(10);
1295 
1296 	iowrite8(0, ioaddr + CamCon);
1297 }
1298 
1299 /**
1300  * rhine_set_vlan_cam - set CAM VLAN filters
1301  * @ioaddr: register block of this Rhine
1302  * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1303  * @addr: VLAN ID (2 bytes)
1304  *
1305  * Load addresses into VLAN filters.
1306  */
1307 static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1308 {
1309 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1310 	wmb();
1311 
1312 	/* Paranoid -- idx out of range should never happen */
1313 	idx &= (VCAM_SIZE - 1);
1314 
1315 	iowrite8((u8) idx, ioaddr + CamAddr);
1316 
1317 	iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1318 	udelay(10);
1319 	wmb();
1320 
1321 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1322 	udelay(10);
1323 
1324 	iowrite8(0, ioaddr + CamCon);
1325 }
1326 
1327 /**
1328  * rhine_set_cam_mask - set multicast CAM mask
1329  * @ioaddr: register block of this Rhine
1330  * @mask: multicast CAM mask
1331  *
1332  * Mask sets multicast filters active/inactive.
1333  */
1334 static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1335 {
1336 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1337 	wmb();
1338 
1339 	/* write mask */
1340 	iowrite32(mask, ioaddr + CamMask);
1341 
1342 	/* disable CAMEN */
1343 	iowrite8(0, ioaddr + CamCon);
1344 }
1345 
1346 /**
1347  * rhine_set_vlan_cam_mask - set VLAN CAM mask
1348  * @ioaddr: register block of this Rhine
1349  * @mask: VLAN CAM mask
1350  *
1351  * Mask sets VLAN filters active/inactive.
1352  */
1353 static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1354 {
1355 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1356 	wmb();
1357 
1358 	/* write mask */
1359 	iowrite32(mask, ioaddr + CamMask);
1360 
1361 	/* disable CAMEN */
1362 	iowrite8(0, ioaddr + CamCon);
1363 }
1364 
1365 /**
1366  * rhine_init_cam_filter - initialize CAM filters
1367  * @dev: network device
1368  *
1369  * Initialize (disable) hardware VLAN and multicast support on this
1370  * Rhine.
1371  */
1372 static void rhine_init_cam_filter(struct net_device *dev)
1373 {
1374 	struct rhine_private *rp = netdev_priv(dev);
1375 	void __iomem *ioaddr = rp->base;
1376 
1377 	/* Disable all CAMs */
1378 	rhine_set_vlan_cam_mask(ioaddr, 0);
1379 	rhine_set_cam_mask(ioaddr, 0);
1380 
1381 	/* disable hardware VLAN support */
1382 	BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1383 	BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1384 }
1385 
1386 /**
1387  * rhine_update_vcam - update VLAN CAM filters
1388  * @rp: rhine_private data of this Rhine
1389  *
1390  * Update VLAN CAM filters to match configuration change.
1391  */
1392 static void rhine_update_vcam(struct net_device *dev)
1393 {
1394 	struct rhine_private *rp = netdev_priv(dev);
1395 	void __iomem *ioaddr = rp->base;
1396 	u16 vid;
1397 	u32 vCAMmask = 0;	/* 32 vCAMs (6105M and better) */
1398 	unsigned int i = 0;
1399 
1400 	for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1401 		rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1402 		vCAMmask |= 1 << i;
1403 		if (++i >= VCAM_SIZE)
1404 			break;
1405 	}
1406 	rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1407 }
1408 
1409 static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1410 {
1411 	struct rhine_private *rp = netdev_priv(dev);
1412 
1413 	spin_lock_bh(&rp->lock);
1414 	set_bit(vid, rp->active_vlans);
1415 	rhine_update_vcam(dev);
1416 	spin_unlock_bh(&rp->lock);
1417 	return 0;
1418 }
1419 
1420 static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1421 {
1422 	struct rhine_private *rp = netdev_priv(dev);
1423 
1424 	spin_lock_bh(&rp->lock);
1425 	clear_bit(vid, rp->active_vlans);
1426 	rhine_update_vcam(dev);
1427 	spin_unlock_bh(&rp->lock);
1428 	return 0;
1429 }
1430 
1431 static void init_registers(struct net_device *dev)
1432 {
1433 	struct rhine_private *rp = netdev_priv(dev);
1434 	void __iomem *ioaddr = rp->base;
1435 	int i;
1436 
1437 	for (i = 0; i < 6; i++)
1438 		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1439 
1440 	/* Initialize other registers. */
1441 	iowrite16(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
1442 	/* Configure initial FIFO thresholds. */
1443 	iowrite8(0x20, ioaddr + TxConfig);
1444 	rp->tx_thresh = 0x20;
1445 	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */
1446 
1447 	iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1448 	iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1449 
1450 	rhine_set_rx_mode(dev);
1451 
1452 	if (rp->pdev->revision >= VT6105M)
1453 		rhine_init_cam_filter(dev);
1454 
1455 	napi_enable(&rp->napi);
1456 
1457 	iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1458 
1459 	iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1460 	       ioaddr + ChipCmd);
1461 	rhine_check_media(dev, 1);
1462 }
1463 
1464 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1465 static void rhine_enable_linkmon(struct rhine_private *rp)
1466 {
1467 	void __iomem *ioaddr = rp->base;
1468 
1469 	iowrite8(0, ioaddr + MIICmd);
1470 	iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1471 	iowrite8(0x80, ioaddr + MIICmd);
1472 
1473 	rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1474 
1475 	iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1476 }
1477 
1478 /* Disable MII link status auto-polling (required for MDIO access) */
1479 static void rhine_disable_linkmon(struct rhine_private *rp)
1480 {
1481 	void __iomem *ioaddr = rp->base;
1482 
1483 	iowrite8(0, ioaddr + MIICmd);
1484 
1485 	if (rp->quirks & rqRhineI) {
1486 		iowrite8(0x01, ioaddr + MIIRegAddr);	// MII_BMSR
1487 
1488 		/* Can be called from ISR. Evil. */
1489 		mdelay(1);
1490 
1491 		/* 0x80 must be set immediately before turning it off */
1492 		iowrite8(0x80, ioaddr + MIICmd);
1493 
1494 		rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1495 
1496 		/* Heh. Now clear 0x80 again. */
1497 		iowrite8(0, ioaddr + MIICmd);
1498 	}
1499 	else
1500 		rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1501 }
1502 
1503 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1504 
1505 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1506 {
1507 	struct rhine_private *rp = netdev_priv(dev);
1508 	void __iomem *ioaddr = rp->base;
1509 	int result;
1510 
1511 	rhine_disable_linkmon(rp);
1512 
1513 	/* rhine_disable_linkmon already cleared MIICmd */
1514 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1515 	iowrite8(regnum, ioaddr + MIIRegAddr);
1516 	iowrite8(0x40, ioaddr + MIICmd);		/* Trigger read */
1517 	rhine_wait_bit_low(rp, MIICmd, 0x40);
1518 	result = ioread16(ioaddr + MIIData);
1519 
1520 	rhine_enable_linkmon(rp);
1521 	return result;
1522 }
1523 
1524 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1525 {
1526 	struct rhine_private *rp = netdev_priv(dev);
1527 	void __iomem *ioaddr = rp->base;
1528 
1529 	rhine_disable_linkmon(rp);
1530 
1531 	/* rhine_disable_linkmon already cleared MIICmd */
1532 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1533 	iowrite8(regnum, ioaddr + MIIRegAddr);
1534 	iowrite16(value, ioaddr + MIIData);
1535 	iowrite8(0x20, ioaddr + MIICmd);		/* Trigger write */
1536 	rhine_wait_bit_low(rp, MIICmd, 0x20);
1537 
1538 	rhine_enable_linkmon(rp);
1539 }
1540 
1541 static void rhine_task_disable(struct rhine_private *rp)
1542 {
1543 	mutex_lock(&rp->task_lock);
1544 	rp->task_enable = false;
1545 	mutex_unlock(&rp->task_lock);
1546 
1547 	cancel_work_sync(&rp->slow_event_task);
1548 	cancel_work_sync(&rp->reset_task);
1549 }
1550 
1551 static void rhine_task_enable(struct rhine_private *rp)
1552 {
1553 	mutex_lock(&rp->task_lock);
1554 	rp->task_enable = true;
1555 	mutex_unlock(&rp->task_lock);
1556 }
1557 
1558 static int rhine_open(struct net_device *dev)
1559 {
1560 	struct rhine_private *rp = netdev_priv(dev);
1561 	void __iomem *ioaddr = rp->base;
1562 	int rc;
1563 
1564 	rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
1565 			dev);
1566 	if (rc)
1567 		return rc;
1568 
1569 	netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
1570 
1571 	rc = alloc_ring(dev);
1572 	if (rc) {
1573 		free_irq(rp->pdev->irq, dev);
1574 		return rc;
1575 	}
1576 	alloc_rbufs(dev);
1577 	alloc_tbufs(dev);
1578 	rhine_chip_reset(dev);
1579 	rhine_task_enable(rp);
1580 	init_registers(dev);
1581 
1582 	netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1583 		  __func__, ioread16(ioaddr + ChipCmd),
1584 		  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1585 
1586 	netif_start_queue(dev);
1587 
1588 	return 0;
1589 }
1590 
1591 static void rhine_reset_task(struct work_struct *work)
1592 {
1593 	struct rhine_private *rp = container_of(work, struct rhine_private,
1594 						reset_task);
1595 	struct net_device *dev = rp->dev;
1596 
1597 	mutex_lock(&rp->task_lock);
1598 
1599 	if (!rp->task_enable)
1600 		goto out_unlock;
1601 
1602 	napi_disable(&rp->napi);
1603 	spin_lock_bh(&rp->lock);
1604 
1605 	/* clear all descriptors */
1606 	free_tbufs(dev);
1607 	free_rbufs(dev);
1608 	alloc_tbufs(dev);
1609 	alloc_rbufs(dev);
1610 
1611 	/* Reinitialize the hardware. */
1612 	rhine_chip_reset(dev);
1613 	init_registers(dev);
1614 
1615 	spin_unlock_bh(&rp->lock);
1616 
1617 	dev->trans_start = jiffies; /* prevent tx timeout */
1618 	dev->stats.tx_errors++;
1619 	netif_wake_queue(dev);
1620 
1621 out_unlock:
1622 	mutex_unlock(&rp->task_lock);
1623 }
1624 
1625 static void rhine_tx_timeout(struct net_device *dev)
1626 {
1627 	struct rhine_private *rp = netdev_priv(dev);
1628 	void __iomem *ioaddr = rp->base;
1629 
1630 	netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1631 		    ioread16(ioaddr + IntrStatus),
1632 		    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1633 
1634 	schedule_work(&rp->reset_task);
1635 }
1636 
1637 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1638 				  struct net_device *dev)
1639 {
1640 	struct rhine_private *rp = netdev_priv(dev);
1641 	void __iomem *ioaddr = rp->base;
1642 	unsigned entry;
1643 
1644 	/* Caution: the write order is important here, set the field
1645 	   with the "ownership" bits last. */
1646 
1647 	/* Calculate the next Tx descriptor entry. */
1648 	entry = rp->cur_tx % TX_RING_SIZE;
1649 
1650 	if (skb_padto(skb, ETH_ZLEN))
1651 		return NETDEV_TX_OK;
1652 
1653 	rp->tx_skbuff[entry] = skb;
1654 
1655 	if ((rp->quirks & rqRhineI) &&
1656 	    (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1657 		/* Must use alignment buffer. */
1658 		if (skb->len > PKT_BUF_SZ) {
1659 			/* packet too long, drop it */
1660 			dev_kfree_skb(skb);
1661 			rp->tx_skbuff[entry] = NULL;
1662 			dev->stats.tx_dropped++;
1663 			return NETDEV_TX_OK;
1664 		}
1665 
1666 		/* Padding is not copied and so must be redone. */
1667 		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1668 		if (skb->len < ETH_ZLEN)
1669 			memset(rp->tx_buf[entry] + skb->len, 0,
1670 			       ETH_ZLEN - skb->len);
1671 		rp->tx_skbuff_dma[entry] = 0;
1672 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1673 						      (rp->tx_buf[entry] -
1674 						       rp->tx_bufs));
1675 	} else {
1676 		rp->tx_skbuff_dma[entry] =
1677 			pci_map_single(rp->pdev, skb->data, skb->len,
1678 				       PCI_DMA_TODEVICE);
1679 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1680 	}
1681 
1682 	rp->tx_ring[entry].desc_length =
1683 		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1684 
1685 	if (unlikely(vlan_tx_tag_present(skb))) {
1686 		rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
1687 		/* request tagging */
1688 		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1689 	}
1690 	else
1691 		rp->tx_ring[entry].tx_status = 0;
1692 
1693 	/* lock eth irq */
1694 	wmb();
1695 	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1696 	wmb();
1697 
1698 	rp->cur_tx++;
1699 
1700 	/* Non-x86 Todo: explicitly flush cache lines here. */
1701 
1702 	if (vlan_tx_tag_present(skb))
1703 		/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1704 		BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1705 
1706 	/* Wake the potentially-idle transmit channel */
1707 	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1708 	       ioaddr + ChipCmd1);
1709 	IOSYNC;
1710 
1711 	if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1712 		netif_stop_queue(dev);
1713 
1714 	netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1715 		  rp->cur_tx - 1, entry);
1716 
1717 	return NETDEV_TX_OK;
1718 }
1719 
1720 static void rhine_irq_disable(struct rhine_private *rp)
1721 {
1722 	iowrite16(0x0000, rp->base + IntrEnable);
1723 	mmiowb();
1724 }
1725 
1726 /* The interrupt handler does all of the Rx thread work and cleans up
1727    after the Tx thread. */
1728 static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1729 {
1730 	struct net_device *dev = dev_instance;
1731 	struct rhine_private *rp = netdev_priv(dev);
1732 	u32 status;
1733 	int handled = 0;
1734 
1735 	status = rhine_get_events(rp);
1736 
1737 	netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1738 
1739 	if (status & RHINE_EVENT) {
1740 		handled = 1;
1741 
1742 		rhine_irq_disable(rp);
1743 		napi_schedule(&rp->napi);
1744 	}
1745 
1746 	if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1747 		netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1748 			  status);
1749 	}
1750 
1751 	return IRQ_RETVAL(handled);
1752 }
1753 
1754 /* This routine is logically part of the interrupt handler, but isolated
1755    for clarity. */
1756 static void rhine_tx(struct net_device *dev)
1757 {
1758 	struct rhine_private *rp = netdev_priv(dev);
1759 	int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1760 
1761 	/* find and cleanup dirty tx descriptors */
1762 	while (rp->dirty_tx != rp->cur_tx) {
1763 		txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1764 		netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1765 			  entry, txstatus);
1766 		if (txstatus & DescOwn)
1767 			break;
1768 		if (txstatus & 0x8000) {
1769 			netif_dbg(rp, tx_done, dev,
1770 				  "Transmit error, Tx status %08x\n", txstatus);
1771 			dev->stats.tx_errors++;
1772 			if (txstatus & 0x0400)
1773 				dev->stats.tx_carrier_errors++;
1774 			if (txstatus & 0x0200)
1775 				dev->stats.tx_window_errors++;
1776 			if (txstatus & 0x0100)
1777 				dev->stats.tx_aborted_errors++;
1778 			if (txstatus & 0x0080)
1779 				dev->stats.tx_heartbeat_errors++;
1780 			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1781 			    (txstatus & 0x0800) || (txstatus & 0x1000)) {
1782 				dev->stats.tx_fifo_errors++;
1783 				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1784 				break; /* Keep the skb - we try again */
1785 			}
1786 			/* Transmitter restarted in 'abnormal' handler. */
1787 		} else {
1788 			if (rp->quirks & rqRhineI)
1789 				dev->stats.collisions += (txstatus >> 3) & 0x0F;
1790 			else
1791 				dev->stats.collisions += txstatus & 0x0F;
1792 			netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1793 				  (txstatus >> 3) & 0xF, txstatus & 0xF);
1794 			dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1795 			dev->stats.tx_packets++;
1796 		}
1797 		/* Free the original skb. */
1798 		if (rp->tx_skbuff_dma[entry]) {
1799 			pci_unmap_single(rp->pdev,
1800 					 rp->tx_skbuff_dma[entry],
1801 					 rp->tx_skbuff[entry]->len,
1802 					 PCI_DMA_TODEVICE);
1803 		}
1804 		dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1805 		rp->tx_skbuff[entry] = NULL;
1806 		entry = (++rp->dirty_tx) % TX_RING_SIZE;
1807 	}
1808 	if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1809 		netif_wake_queue(dev);
1810 }
1811 
1812 /**
1813  * rhine_get_vlan_tci - extract TCI from Rx data buffer
1814  * @skb: pointer to sk_buff
1815  * @data_size: used data area of the buffer including CRC
1816  *
1817  * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1818  * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1819  * aligned following the CRC.
1820  */
1821 static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1822 {
1823 	u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1824 	return be16_to_cpup((__be16 *)trailer);
1825 }
1826 
1827 /* Process up to limit frames from receive ring */
1828 static int rhine_rx(struct net_device *dev, int limit)
1829 {
1830 	struct rhine_private *rp = netdev_priv(dev);
1831 	int count;
1832 	int entry = rp->cur_rx % RX_RING_SIZE;
1833 
1834 	netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1835 		  entry, le32_to_cpu(rp->rx_head_desc->rx_status));
1836 
1837 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1838 	for (count = 0; count < limit; ++count) {
1839 		struct rx_desc *desc = rp->rx_head_desc;
1840 		u32 desc_status = le32_to_cpu(desc->rx_status);
1841 		u32 desc_length = le32_to_cpu(desc->desc_length);
1842 		int data_size = desc_status >> 16;
1843 
1844 		if (desc_status & DescOwn)
1845 			break;
1846 
1847 		netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1848 			  desc_status);
1849 
1850 		if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1851 			if ((desc_status & RxWholePkt) != RxWholePkt) {
1852 				netdev_warn(dev,
1853 	"Oversized Ethernet frame spanned multiple buffers, "
1854 	"entry %#x length %d status %08x!\n",
1855 					    entry, data_size,
1856 					    desc_status);
1857 				netdev_warn(dev,
1858 					    "Oversized Ethernet frame %p vs %p\n",
1859 					    rp->rx_head_desc,
1860 					    &rp->rx_ring[entry]);
1861 				dev->stats.rx_length_errors++;
1862 			} else if (desc_status & RxErr) {
1863 				/* There was a error. */
1864 				netif_dbg(rp, rx_err, dev,
1865 					  "%s() Rx error %08x\n", __func__,
1866 					  desc_status);
1867 				dev->stats.rx_errors++;
1868 				if (desc_status & 0x0030)
1869 					dev->stats.rx_length_errors++;
1870 				if (desc_status & 0x0048)
1871 					dev->stats.rx_fifo_errors++;
1872 				if (desc_status & 0x0004)
1873 					dev->stats.rx_frame_errors++;
1874 				if (desc_status & 0x0002) {
1875 					/* this can also be updated outside the interrupt handler */
1876 					spin_lock(&rp->lock);
1877 					dev->stats.rx_crc_errors++;
1878 					spin_unlock(&rp->lock);
1879 				}
1880 			}
1881 		} else {
1882 			struct sk_buff *skb = NULL;
1883 			/* Length should omit the CRC */
1884 			int pkt_len = data_size - 4;
1885 			u16 vlan_tci = 0;
1886 
1887 			/* Check if the packet is long enough to accept without
1888 			   copying to a minimally-sized skbuff. */
1889 			if (pkt_len < rx_copybreak)
1890 				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1891 			if (skb) {
1892 				pci_dma_sync_single_for_cpu(rp->pdev,
1893 							    rp->rx_skbuff_dma[entry],
1894 							    rp->rx_buf_sz,
1895 							    PCI_DMA_FROMDEVICE);
1896 
1897 				skb_copy_to_linear_data(skb,
1898 						 rp->rx_skbuff[entry]->data,
1899 						 pkt_len);
1900 				skb_put(skb, pkt_len);
1901 				pci_dma_sync_single_for_device(rp->pdev,
1902 							       rp->rx_skbuff_dma[entry],
1903 							       rp->rx_buf_sz,
1904 							       PCI_DMA_FROMDEVICE);
1905 			} else {
1906 				skb = rp->rx_skbuff[entry];
1907 				if (skb == NULL) {
1908 					netdev_err(dev, "Inconsistent Rx descriptor chain\n");
1909 					break;
1910 				}
1911 				rp->rx_skbuff[entry] = NULL;
1912 				skb_put(skb, pkt_len);
1913 				pci_unmap_single(rp->pdev,
1914 						 rp->rx_skbuff_dma[entry],
1915 						 rp->rx_buf_sz,
1916 						 PCI_DMA_FROMDEVICE);
1917 			}
1918 
1919 			if (unlikely(desc_length & DescTag))
1920 				vlan_tci = rhine_get_vlan_tci(skb, data_size);
1921 
1922 			skb->protocol = eth_type_trans(skb, dev);
1923 
1924 			if (unlikely(desc_length & DescTag))
1925 				__vlan_hwaccel_put_tag(skb, vlan_tci);
1926 			netif_receive_skb(skb);
1927 			dev->stats.rx_bytes += pkt_len;
1928 			dev->stats.rx_packets++;
1929 		}
1930 		entry = (++rp->cur_rx) % RX_RING_SIZE;
1931 		rp->rx_head_desc = &rp->rx_ring[entry];
1932 	}
1933 
1934 	/* Refill the Rx ring buffers. */
1935 	for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1936 		struct sk_buff *skb;
1937 		entry = rp->dirty_rx % RX_RING_SIZE;
1938 		if (rp->rx_skbuff[entry] == NULL) {
1939 			skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1940 			rp->rx_skbuff[entry] = skb;
1941 			if (skb == NULL)
1942 				break;	/* Better luck next round. */
1943 			rp->rx_skbuff_dma[entry] =
1944 				pci_map_single(rp->pdev, skb->data,
1945 					       rp->rx_buf_sz,
1946 					       PCI_DMA_FROMDEVICE);
1947 			rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1948 		}
1949 		rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1950 	}
1951 
1952 	return count;
1953 }
1954 
1955 static void rhine_restart_tx(struct net_device *dev) {
1956 	struct rhine_private *rp = netdev_priv(dev);
1957 	void __iomem *ioaddr = rp->base;
1958 	int entry = rp->dirty_tx % TX_RING_SIZE;
1959 	u32 intr_status;
1960 
1961 	/*
1962 	 * If new errors occurred, we need to sort them out before doing Tx.
1963 	 * In that case the ISR will be back here RSN anyway.
1964 	 */
1965 	intr_status = rhine_get_events(rp);
1966 
1967 	if ((intr_status & IntrTxErrSummary) == 0) {
1968 
1969 		/* We know better than the chip where it should continue. */
1970 		iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1971 		       ioaddr + TxRingPtr);
1972 
1973 		iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1974 		       ioaddr + ChipCmd);
1975 
1976 		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
1977 			/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1978 			BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1979 
1980 		iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1981 		       ioaddr + ChipCmd1);
1982 		IOSYNC;
1983 	}
1984 	else {
1985 		/* This should never happen */
1986 		netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
1987 			   intr_status);
1988 	}
1989 
1990 }
1991 
1992 static void rhine_slow_event_task(struct work_struct *work)
1993 {
1994 	struct rhine_private *rp =
1995 		container_of(work, struct rhine_private, slow_event_task);
1996 	struct net_device *dev = rp->dev;
1997 	u32 intr_status;
1998 
1999 	mutex_lock(&rp->task_lock);
2000 
2001 	if (!rp->task_enable)
2002 		goto out_unlock;
2003 
2004 	intr_status = rhine_get_events(rp);
2005 	rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2006 
2007 	if (intr_status & IntrLinkChange)
2008 		rhine_check_media(dev, 0);
2009 
2010 	if (intr_status & IntrPCIErr)
2011 		netif_warn(rp, hw, dev, "PCI error\n");
2012 
2013 	napi_disable(&rp->napi);
2014 	rhine_irq_disable(rp);
2015 	/* Slow and safe. Consider __napi_schedule as a replacement ? */
2016 	napi_enable(&rp->napi);
2017 	napi_schedule(&rp->napi);
2018 
2019 out_unlock:
2020 	mutex_unlock(&rp->task_lock);
2021 }
2022 
2023 static struct net_device_stats *rhine_get_stats(struct net_device *dev)
2024 {
2025 	struct rhine_private *rp = netdev_priv(dev);
2026 
2027 	spin_lock_bh(&rp->lock);
2028 	rhine_update_rx_crc_and_missed_errord(rp);
2029 	spin_unlock_bh(&rp->lock);
2030 
2031 	return &dev->stats;
2032 }
2033 
2034 static void rhine_set_rx_mode(struct net_device *dev)
2035 {
2036 	struct rhine_private *rp = netdev_priv(dev);
2037 	void __iomem *ioaddr = rp->base;
2038 	u32 mc_filter[2];	/* Multicast hash filter */
2039 	u8 rx_mode = 0x0C;	/* Note: 0x02=accept runt, 0x01=accept errs */
2040 	struct netdev_hw_addr *ha;
2041 
2042 	if (dev->flags & IFF_PROMISC) {		/* Set promiscuous. */
2043 		rx_mode = 0x1C;
2044 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2045 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2046 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2047 		   (dev->flags & IFF_ALLMULTI)) {
2048 		/* Too many to match, or accept all multicasts. */
2049 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2050 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2051 	} else if (rp->pdev->revision >= VT6105M) {
2052 		int i = 0;
2053 		u32 mCAMmask = 0;	/* 32 mCAMs (6105M and better) */
2054 		netdev_for_each_mc_addr(ha, dev) {
2055 			if (i == MCAM_SIZE)
2056 				break;
2057 			rhine_set_cam(ioaddr, i, ha->addr);
2058 			mCAMmask |= 1 << i;
2059 			i++;
2060 		}
2061 		rhine_set_cam_mask(ioaddr, mCAMmask);
2062 	} else {
2063 		memset(mc_filter, 0, sizeof(mc_filter));
2064 		netdev_for_each_mc_addr(ha, dev) {
2065 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2066 
2067 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2068 		}
2069 		iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2070 		iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2071 	}
2072 	/* enable/disable VLAN receive filtering */
2073 	if (rp->pdev->revision >= VT6105M) {
2074 		if (dev->flags & IFF_PROMISC)
2075 			BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2076 		else
2077 			BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2078 	}
2079 	BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2080 }
2081 
2082 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2083 {
2084 	struct rhine_private *rp = netdev_priv(dev);
2085 
2086 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2087 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2088 	strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
2089 }
2090 
2091 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2092 {
2093 	struct rhine_private *rp = netdev_priv(dev);
2094 	int rc;
2095 
2096 	mutex_lock(&rp->task_lock);
2097 	rc = mii_ethtool_gset(&rp->mii_if, cmd);
2098 	mutex_unlock(&rp->task_lock);
2099 
2100 	return rc;
2101 }
2102 
2103 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2104 {
2105 	struct rhine_private *rp = netdev_priv(dev);
2106 	int rc;
2107 
2108 	mutex_lock(&rp->task_lock);
2109 	rc = mii_ethtool_sset(&rp->mii_if, cmd);
2110 	rhine_set_carrier(&rp->mii_if);
2111 	mutex_unlock(&rp->task_lock);
2112 
2113 	return rc;
2114 }
2115 
2116 static int netdev_nway_reset(struct net_device *dev)
2117 {
2118 	struct rhine_private *rp = netdev_priv(dev);
2119 
2120 	return mii_nway_restart(&rp->mii_if);
2121 }
2122 
2123 static u32 netdev_get_link(struct net_device *dev)
2124 {
2125 	struct rhine_private *rp = netdev_priv(dev);
2126 
2127 	return mii_link_ok(&rp->mii_if);
2128 }
2129 
2130 static u32 netdev_get_msglevel(struct net_device *dev)
2131 {
2132 	struct rhine_private *rp = netdev_priv(dev);
2133 
2134 	return rp->msg_enable;
2135 }
2136 
2137 static void netdev_set_msglevel(struct net_device *dev, u32 value)
2138 {
2139 	struct rhine_private *rp = netdev_priv(dev);
2140 
2141 	rp->msg_enable = value;
2142 }
2143 
2144 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2145 {
2146 	struct rhine_private *rp = netdev_priv(dev);
2147 
2148 	if (!(rp->quirks & rqWOL))
2149 		return;
2150 
2151 	spin_lock_irq(&rp->lock);
2152 	wol->supported = WAKE_PHY | WAKE_MAGIC |
2153 			 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2154 	wol->wolopts = rp->wolopts;
2155 	spin_unlock_irq(&rp->lock);
2156 }
2157 
2158 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2159 {
2160 	struct rhine_private *rp = netdev_priv(dev);
2161 	u32 support = WAKE_PHY | WAKE_MAGIC |
2162 		      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2163 
2164 	if (!(rp->quirks & rqWOL))
2165 		return -EINVAL;
2166 
2167 	if (wol->wolopts & ~support)
2168 		return -EINVAL;
2169 
2170 	spin_lock_irq(&rp->lock);
2171 	rp->wolopts = wol->wolopts;
2172 	spin_unlock_irq(&rp->lock);
2173 
2174 	return 0;
2175 }
2176 
2177 static const struct ethtool_ops netdev_ethtool_ops = {
2178 	.get_drvinfo		= netdev_get_drvinfo,
2179 	.get_settings		= netdev_get_settings,
2180 	.set_settings		= netdev_set_settings,
2181 	.nway_reset		= netdev_nway_reset,
2182 	.get_link		= netdev_get_link,
2183 	.get_msglevel		= netdev_get_msglevel,
2184 	.set_msglevel		= netdev_set_msglevel,
2185 	.get_wol		= rhine_get_wol,
2186 	.set_wol		= rhine_set_wol,
2187 };
2188 
2189 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2190 {
2191 	struct rhine_private *rp = netdev_priv(dev);
2192 	int rc;
2193 
2194 	if (!netif_running(dev))
2195 		return -EINVAL;
2196 
2197 	mutex_lock(&rp->task_lock);
2198 	rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2199 	rhine_set_carrier(&rp->mii_if);
2200 	mutex_unlock(&rp->task_lock);
2201 
2202 	return rc;
2203 }
2204 
2205 static int rhine_close(struct net_device *dev)
2206 {
2207 	struct rhine_private *rp = netdev_priv(dev);
2208 	void __iomem *ioaddr = rp->base;
2209 
2210 	rhine_task_disable(rp);
2211 	napi_disable(&rp->napi);
2212 	netif_stop_queue(dev);
2213 
2214 	netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2215 		  ioread16(ioaddr + ChipCmd));
2216 
2217 	/* Switch to loopback mode to avoid hardware races. */
2218 	iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2219 
2220 	rhine_irq_disable(rp);
2221 
2222 	/* Stop the chip's Tx and Rx processes. */
2223 	iowrite16(CmdStop, ioaddr + ChipCmd);
2224 
2225 	free_irq(rp->pdev->irq, dev);
2226 	free_rbufs(dev);
2227 	free_tbufs(dev);
2228 	free_ring(dev);
2229 
2230 	return 0;
2231 }
2232 
2233 
2234 static void rhine_remove_one(struct pci_dev *pdev)
2235 {
2236 	struct net_device *dev = pci_get_drvdata(pdev);
2237 	struct rhine_private *rp = netdev_priv(dev);
2238 
2239 	unregister_netdev(dev);
2240 
2241 	pci_iounmap(pdev, rp->base);
2242 	pci_release_regions(pdev);
2243 
2244 	free_netdev(dev);
2245 	pci_disable_device(pdev);
2246 	pci_set_drvdata(pdev, NULL);
2247 }
2248 
2249 static void rhine_shutdown (struct pci_dev *pdev)
2250 {
2251 	struct net_device *dev = pci_get_drvdata(pdev);
2252 	struct rhine_private *rp = netdev_priv(dev);
2253 	void __iomem *ioaddr = rp->base;
2254 
2255 	if (!(rp->quirks & rqWOL))
2256 		return; /* Nothing to do for non-WOL adapters */
2257 
2258 	rhine_power_init(dev);
2259 
2260 	/* Make sure we use pattern 0, 1 and not 4, 5 */
2261 	if (rp->quirks & rq6patterns)
2262 		iowrite8(0x04, ioaddr + WOLcgClr);
2263 
2264 	spin_lock(&rp->lock);
2265 
2266 	if (rp->wolopts & WAKE_MAGIC) {
2267 		iowrite8(WOLmagic, ioaddr + WOLcrSet);
2268 		/*
2269 		 * Turn EEPROM-controlled wake-up back on -- some hardware may
2270 		 * not cooperate otherwise.
2271 		 */
2272 		iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2273 	}
2274 
2275 	if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2276 		iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2277 
2278 	if (rp->wolopts & WAKE_PHY)
2279 		iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2280 
2281 	if (rp->wolopts & WAKE_UCAST)
2282 		iowrite8(WOLucast, ioaddr + WOLcrSet);
2283 
2284 	if (rp->wolopts) {
2285 		/* Enable legacy WOL (for old motherboards) */
2286 		iowrite8(0x01, ioaddr + PwcfgSet);
2287 		iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2288 	}
2289 
2290 	spin_unlock(&rp->lock);
2291 
2292 	if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2293 		iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2294 
2295 		pci_wake_from_d3(pdev, true);
2296 		pci_set_power_state(pdev, PCI_D3hot);
2297 	}
2298 }
2299 
2300 #ifdef CONFIG_PM_SLEEP
2301 static int rhine_suspend(struct device *device)
2302 {
2303 	struct pci_dev *pdev = to_pci_dev(device);
2304 	struct net_device *dev = pci_get_drvdata(pdev);
2305 	struct rhine_private *rp = netdev_priv(dev);
2306 
2307 	if (!netif_running(dev))
2308 		return 0;
2309 
2310 	rhine_task_disable(rp);
2311 	rhine_irq_disable(rp);
2312 	napi_disable(&rp->napi);
2313 
2314 	netif_device_detach(dev);
2315 
2316 	rhine_shutdown(pdev);
2317 
2318 	return 0;
2319 }
2320 
2321 static int rhine_resume(struct device *device)
2322 {
2323 	struct pci_dev *pdev = to_pci_dev(device);
2324 	struct net_device *dev = pci_get_drvdata(pdev);
2325 	struct rhine_private *rp = netdev_priv(dev);
2326 
2327 	if (!netif_running(dev))
2328 		return 0;
2329 
2330 #ifdef USE_MMIO
2331 	enable_mmio(rp->pioaddr, rp->quirks);
2332 #endif
2333 	rhine_power_init(dev);
2334 	free_tbufs(dev);
2335 	free_rbufs(dev);
2336 	alloc_tbufs(dev);
2337 	alloc_rbufs(dev);
2338 	rhine_task_enable(rp);
2339 	spin_lock_bh(&rp->lock);
2340 	init_registers(dev);
2341 	spin_unlock_bh(&rp->lock);
2342 
2343 	netif_device_attach(dev);
2344 
2345 	return 0;
2346 }
2347 
2348 static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2349 #define RHINE_PM_OPS	(&rhine_pm_ops)
2350 
2351 #else
2352 
2353 #define RHINE_PM_OPS	NULL
2354 
2355 #endif /* !CONFIG_PM_SLEEP */
2356 
2357 static struct pci_driver rhine_driver = {
2358 	.name		= DRV_NAME,
2359 	.id_table	= rhine_pci_tbl,
2360 	.probe		= rhine_init_one,
2361 	.remove		= rhine_remove_one,
2362 	.shutdown	= rhine_shutdown,
2363 	.driver.pm	= RHINE_PM_OPS,
2364 };
2365 
2366 static struct dmi_system_id __initdata rhine_dmi_table[] = {
2367 	{
2368 		.ident = "EPIA-M",
2369 		.matches = {
2370 			DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2371 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2372 		},
2373 	},
2374 	{
2375 		.ident = "KV7",
2376 		.matches = {
2377 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2378 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2379 		},
2380 	},
2381 	{ NULL }
2382 };
2383 
2384 static int __init rhine_init(void)
2385 {
2386 /* when a module, this is printed whether or not devices are found in probe */
2387 #ifdef MODULE
2388 	pr_info("%s\n", version);
2389 #endif
2390 	if (dmi_check_system(rhine_dmi_table)) {
2391 		/* these BIOSes fail at PXE boot if chip is in D3 */
2392 		avoid_D3 = true;
2393 		pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2394 	}
2395 	else if (avoid_D3)
2396 		pr_info("avoid_D3 set\n");
2397 
2398 	return pci_register_driver(&rhine_driver);
2399 }
2400 
2401 
2402 static void __exit rhine_cleanup(void)
2403 {
2404 	pci_unregister_driver(&rhine_driver);
2405 }
2406 
2407 
2408 module_init(rhine_init);
2409 module_exit(rhine_cleanup);
2410