xref: /linux/drivers/net/ethernet/via/via-rhine.c (revision e0bf6c5ca2d3281f231c5f0c9bf145e9513644de)
1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2 /*
3 	Written 1998-2001 by Donald Becker.
4 
5 	Current Maintainer: Roger Luethi <rl@hellgate.ch>
6 
7 	This software may be used and distributed according to the terms of
8 	the GNU General Public License (GPL), incorporated herein by reference.
9 	Drivers based on or derived from this code fall under the GPL and must
10 	retain the authorship, copyright and license notice.  This file is not
11 	a complete program and may only be used when the entire operating
12 	system is licensed under the GPL.
13 
14 	This driver is designed for the VIA VT86C100A Rhine-I.
15 	It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 	and management NIC 6105M).
17 
18 	The author may be reached as becker@scyld.com, or C/O
19 	Scyld Computing Corporation
20 	410 Severn Ave., Suite 210
21 	Annapolis MD 21403
22 
23 
24 	This driver contains some changes from the original Donald Becker
25 	version. He may or may not be interested in bug reports on this
26 	code. You can find his versions at:
27 	http://www.scyld.com/network/via-rhine.html
28 	[link no longer provides useful info -jgarzik]
29 
30 */
31 
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 
34 #define DRV_NAME	"via-rhine"
35 #define DRV_VERSION	"1.5.1"
36 #define DRV_RELDATE	"2010-10-09"
37 
38 #include <linux/types.h>
39 
40 /* A few user-configurable values.
41    These may be modified when a driver module is loaded. */
42 static int debug = 0;
43 #define RHINE_MSG_DEFAULT \
44         (0x0000)
45 
46 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47    Setting to > 1518 effectively disables this feature. */
48 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 	defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
50 	defined(__sh__) || defined(__mips__)
51 static int rx_copybreak = 1518;
52 #else
53 static int rx_copybreak;
54 #endif
55 
56 /* Work-around for broken BIOSes: they are unable to get the chip back out of
57    power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
58 static bool avoid_D3;
59 
60 /*
61  * In case you are looking for 'options[]' or 'full_duplex[]', they
62  * are gone. Use ethtool(8) instead.
63  */
64 
65 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66    The Rhine has a 64 element 8390-like hash table. */
67 static const int multicast_filter_limit = 32;
68 
69 
70 /* Operational parameters that are set at compile time. */
71 
72 /* Keep the ring sizes a power of two for compile efficiency.
73    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74    Making the Tx ring too large decreases the effectiveness of channel
75    bonding and packet priority.
76    There are no ill effects from too-large receive rings. */
77 #define TX_RING_SIZE	16
78 #define TX_QUEUE_LEN	10	/* Limit ring entries actually used. */
79 #define RX_RING_SIZE	64
80 
81 /* Operational parameters that usually are not changed. */
82 
83 /* Time in jiffies before concluding the transmitter is hung. */
84 #define TX_TIMEOUT	(2*HZ)
85 
86 #define PKT_BUF_SZ	1536	/* Size of each temporary Rx buffer.*/
87 
88 #include <linux/module.h>
89 #include <linux/moduleparam.h>
90 #include <linux/kernel.h>
91 #include <linux/string.h>
92 #include <linux/timer.h>
93 #include <linux/errno.h>
94 #include <linux/ioport.h>
95 #include <linux/interrupt.h>
96 #include <linux/pci.h>
97 #include <linux/of_address.h>
98 #include <linux/of_device.h>
99 #include <linux/of_irq.h>
100 #include <linux/platform_device.h>
101 #include <linux/dma-mapping.h>
102 #include <linux/netdevice.h>
103 #include <linux/etherdevice.h>
104 #include <linux/skbuff.h>
105 #include <linux/init.h>
106 #include <linux/delay.h>
107 #include <linux/mii.h>
108 #include <linux/ethtool.h>
109 #include <linux/crc32.h>
110 #include <linux/if_vlan.h>
111 #include <linux/bitops.h>
112 #include <linux/workqueue.h>
113 #include <asm/processor.h>	/* Processor type for cache alignment. */
114 #include <asm/io.h>
115 #include <asm/irq.h>
116 #include <asm/uaccess.h>
117 #include <linux/dmi.h>
118 
119 /* These identify the driver base version and may not be removed. */
120 static const char version[] =
121 	"v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
122 
123 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
124 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
125 MODULE_LICENSE("GPL");
126 
127 module_param(debug, int, 0);
128 module_param(rx_copybreak, int, 0);
129 module_param(avoid_D3, bool, 0);
130 MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
131 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
132 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
133 
134 #define MCAM_SIZE	32
135 #define VCAM_SIZE	32
136 
137 /*
138 		Theory of Operation
139 
140 I. Board Compatibility
141 
142 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
143 controller.
144 
145 II. Board-specific settings
146 
147 Boards with this chip are functional only in a bus-master PCI slot.
148 
149 Many operational settings are loaded from the EEPROM to the Config word at
150 offset 0x78. For most of these settings, this driver assumes that they are
151 correct.
152 If this driver is compiled to use PCI memory space operations the EEPROM
153 must be configured to enable memory ops.
154 
155 III. Driver operation
156 
157 IIIa. Ring buffers
158 
159 This driver uses two statically allocated fixed-size descriptor lists
160 formed into rings by a branch from the final descriptor to the beginning of
161 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
162 
163 IIIb/c. Transmit/Receive Structure
164 
165 This driver attempts to use a zero-copy receive and transmit scheme.
166 
167 Alas, all data buffers are required to start on a 32 bit boundary, so
168 the driver must often copy transmit packets into bounce buffers.
169 
170 The driver allocates full frame size skbuffs for the Rx ring buffers at
171 open() time and passes the skb->data field to the chip as receive data
172 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
173 a fresh skbuff is allocated and the frame is copied to the new skbuff.
174 When the incoming frame is larger, the skbuff is passed directly up the
175 protocol stack. Buffers consumed this way are replaced by newly allocated
176 skbuffs in the last phase of rhine_rx().
177 
178 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
179 using a full-sized skbuff for small frames vs. the copying costs of larger
180 frames. New boards are typically used in generously configured machines
181 and the underfilled buffers have negligible impact compared to the benefit of
182 a single allocation size, so the default value of zero results in never
183 copying packets. When copying is done, the cost is usually mitigated by using
184 a combined copy/checksum routine. Copying also preloads the cache, which is
185 most useful with small frames.
186 
187 Since the VIA chips are only able to transfer data to buffers on 32 bit
188 boundaries, the IP header at offset 14 in an ethernet frame isn't
189 longword aligned for further processing. Copying these unaligned buffers
190 has the beneficial effect of 16-byte aligning the IP header.
191 
192 IIId. Synchronization
193 
194 The driver runs as two independent, single-threaded flows of control. One
195 is the send-packet routine, which enforces single-threaded use by the
196 netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
197 which is single threaded by the hardware and interrupt handling software.
198 
199 The send packet thread has partial control over the Tx ring. It locks the
200 netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
201 the ring is not available it stops the transmit queue by
202 calling netif_stop_queue.
203 
204 The interrupt handler has exclusive control over the Rx ring and records stats
205 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
206 empty by incrementing the dirty_tx mark. If at least half of the entries in
207 the Rx ring are available the transmit queue is woken up if it was stopped.
208 
209 IV. Notes
210 
211 IVb. References
212 
213 Preliminary VT86C100A manual from http://www.via.com.tw/
214 http://www.scyld.com/expert/100mbps.html
215 http://www.scyld.com/expert/NWay.html
216 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
217 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
218 
219 
220 IVc. Errata
221 
222 The VT86C100A manual is not reliable information.
223 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
224 in significant performance degradation for bounce buffer copies on transmit
225 and unaligned IP headers on receive.
226 The chip does not pad to minimum transmit length.
227 
228 */
229 
230 
231 /* This table drives the PCI probe routines. It's mostly boilerplate in all
232    of the drivers, and will likely be provided by some future kernel.
233    Note the matching code -- the first table entry matchs all 56** cards but
234    second only the 1234 card.
235 */
236 
237 enum rhine_revs {
238 	VT86C100A	= 0x00,
239 	VTunknown0	= 0x20,
240 	VT6102		= 0x40,
241 	VT8231		= 0x50,	/* Integrated MAC */
242 	VT8233		= 0x60,	/* Integrated MAC */
243 	VT8235		= 0x74,	/* Integrated MAC */
244 	VT8237		= 0x78,	/* Integrated MAC */
245 	VTunknown1	= 0x7C,
246 	VT6105		= 0x80,
247 	VT6105_B0	= 0x83,
248 	VT6105L		= 0x8A,
249 	VT6107		= 0x8C,
250 	VTunknown2	= 0x8E,
251 	VT6105M		= 0x90,	/* Management adapter */
252 };
253 
254 enum rhine_quirks {
255 	rqWOL		= 0x0001,	/* Wake-On-LAN support */
256 	rqForceReset	= 0x0002,
257 	rq6patterns	= 0x0040,	/* 6 instead of 4 patterns for WOL */
258 	rqStatusWBRace	= 0x0080,	/* Tx Status Writeback Error possible */
259 	rqRhineI	= 0x0100,	/* See comment below */
260 	rqIntPHY	= 0x0200,	/* Integrated PHY */
261 	rqMgmt		= 0x0400,	/* Management adapter */
262 	rqNeedEnMMIO	= 0x0800,	/* Whether the core needs to be
263 					 * switched from PIO mode to MMIO
264 					 * (only applies to PCI)
265 					 */
266 };
267 /*
268  * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
269  * MMIO as well as for the collision counter and the Tx FIFO underflow
270  * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
271  */
272 
273 /* Beware of PCI posted writes */
274 #define IOSYNC	do { ioread8(ioaddr + StationAddr); } while (0)
275 
276 static const struct pci_device_id rhine_pci_tbl[] = {
277 	{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },	/* VT86C100A */
278 	{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6102 */
279 	{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },	/* 6105{,L,LOM} */
280 	{ 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6105M */
281 	{ }	/* terminate list */
282 };
283 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
284 
285 /* OpenFirmware identifiers for platform-bus devices
286  * The .data field is currently only used to store quirks
287  */
288 static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
289 static struct of_device_id rhine_of_tbl[] = {
290 	{ .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
291 	{ }	/* terminate list */
292 };
293 MODULE_DEVICE_TABLE(of, rhine_of_tbl);
294 
295 /* Offsets to the device registers. */
296 enum register_offsets {
297 	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
298 	ChipCmd1=0x09, TQWake=0x0A,
299 	IntrStatus=0x0C, IntrEnable=0x0E,
300 	MulticastFilter0=0x10, MulticastFilter1=0x14,
301 	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
302 	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
303 	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
304 	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
305 	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
306 	StickyHW=0x83, IntrStatus2=0x84,
307 	CamMask=0x88, CamCon=0x92, CamAddr=0x93,
308 	WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
309 	WOLcrClr1=0xA6, WOLcgClr=0xA7,
310 	PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
311 };
312 
313 /* Bits in ConfigD */
314 enum backoff_bits {
315 	BackOptional=0x01, BackModify=0x02,
316 	BackCaptureEffect=0x04, BackRandom=0x08
317 };
318 
319 /* Bits in the TxConfig (TCR) register */
320 enum tcr_bits {
321 	TCR_PQEN=0x01,
322 	TCR_LB0=0x02,		/* loopback[0] */
323 	TCR_LB1=0x04,		/* loopback[1] */
324 	TCR_OFSET=0x08,
325 	TCR_RTGOPT=0x10,
326 	TCR_RTFT0=0x20,
327 	TCR_RTFT1=0x40,
328 	TCR_RTSF=0x80,
329 };
330 
331 /* Bits in the CamCon (CAMC) register */
332 enum camcon_bits {
333 	CAMC_CAMEN=0x01,
334 	CAMC_VCAMSL=0x02,
335 	CAMC_CAMWR=0x04,
336 	CAMC_CAMRD=0x08,
337 };
338 
339 /* Bits in the PCIBusConfig1 (BCR1) register */
340 enum bcr1_bits {
341 	BCR1_POT0=0x01,
342 	BCR1_POT1=0x02,
343 	BCR1_POT2=0x04,
344 	BCR1_CTFT0=0x08,
345 	BCR1_CTFT1=0x10,
346 	BCR1_CTSF=0x20,
347 	BCR1_TXQNOBK=0x40,	/* for VT6105 */
348 	BCR1_VIDFR=0x80,	/* for VT6105 */
349 	BCR1_MED0=0x40,		/* for VT6102 */
350 	BCR1_MED1=0x80,		/* for VT6102 */
351 };
352 
353 /* Registers we check that mmio and reg are the same. */
354 static const int mmio_verify_registers[] = {
355 	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
356 	0
357 };
358 
359 /* Bits in the interrupt status/mask registers. */
360 enum intr_status_bits {
361 	IntrRxDone	= 0x0001,
362 	IntrTxDone	= 0x0002,
363 	IntrRxErr	= 0x0004,
364 	IntrTxError	= 0x0008,
365 	IntrRxEmpty	= 0x0020,
366 	IntrPCIErr	= 0x0040,
367 	IntrStatsMax	= 0x0080,
368 	IntrRxEarly	= 0x0100,
369 	IntrTxUnderrun	= 0x0210,
370 	IntrRxOverflow	= 0x0400,
371 	IntrRxDropped	= 0x0800,
372 	IntrRxNoBuf	= 0x1000,
373 	IntrTxAborted	= 0x2000,
374 	IntrLinkChange	= 0x4000,
375 	IntrRxWakeUp	= 0x8000,
376 	IntrTxDescRace		= 0x080000,	/* mapped from IntrStatus2 */
377 	IntrNormalSummary	= IntrRxDone | IntrTxDone,
378 	IntrTxErrSummary	= IntrTxDescRace | IntrTxAborted | IntrTxError |
379 				  IntrTxUnderrun,
380 };
381 
382 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
383 enum wol_bits {
384 	WOLucast	= 0x10,
385 	WOLmagic	= 0x20,
386 	WOLbmcast	= 0x30,
387 	WOLlnkon	= 0x40,
388 	WOLlnkoff	= 0x80,
389 };
390 
391 /* The Rx and Tx buffer descriptors. */
392 struct rx_desc {
393 	__le32 rx_status;
394 	__le32 desc_length; /* Chain flag, Buffer/frame length */
395 	__le32 addr;
396 	__le32 next_desc;
397 };
398 struct tx_desc {
399 	__le32 tx_status;
400 	__le32 desc_length; /* Chain flag, Tx Config, Frame length */
401 	__le32 addr;
402 	__le32 next_desc;
403 };
404 
405 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
406 #define TXDESC		0x00e08000
407 
408 enum rx_status_bits {
409 	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
410 };
411 
412 /* Bits in *_desc.*_status */
413 enum desc_status_bits {
414 	DescOwn=0x80000000
415 };
416 
417 /* Bits in *_desc.*_length */
418 enum desc_length_bits {
419 	DescTag=0x00010000
420 };
421 
422 /* Bits in ChipCmd. */
423 enum chip_cmd_bits {
424 	CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
425 	CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
426 	Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
427 	Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
428 };
429 
430 struct rhine_stats {
431 	u64		packets;
432 	u64		bytes;
433 	struct u64_stats_sync syncp;
434 };
435 
436 struct rhine_private {
437 	/* Bit mask for configured VLAN ids */
438 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
439 
440 	/* Descriptor rings */
441 	struct rx_desc *rx_ring;
442 	struct tx_desc *tx_ring;
443 	dma_addr_t rx_ring_dma;
444 	dma_addr_t tx_ring_dma;
445 
446 	/* The addresses of receive-in-place skbuffs. */
447 	struct sk_buff *rx_skbuff[RX_RING_SIZE];
448 	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
449 
450 	/* The saved address of a sent-in-place packet/buffer, for later free(). */
451 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
452 	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
453 
454 	/* Tx bounce buffers (Rhine-I only) */
455 	unsigned char *tx_buf[TX_RING_SIZE];
456 	unsigned char *tx_bufs;
457 	dma_addr_t tx_bufs_dma;
458 
459 	int irq;
460 	long pioaddr;
461 	struct net_device *dev;
462 	struct napi_struct napi;
463 	spinlock_t lock;
464 	struct mutex task_lock;
465 	bool task_enable;
466 	struct work_struct slow_event_task;
467 	struct work_struct reset_task;
468 
469 	u32 msg_enable;
470 
471 	/* Frequently used values: keep some adjacent for cache effect. */
472 	u32 quirks;
473 	struct rx_desc *rx_head_desc;
474 	unsigned int cur_rx, dirty_rx;	/* Producer/consumer ring indices */
475 	unsigned int cur_tx, dirty_tx;
476 	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
477 	struct rhine_stats rx_stats;
478 	struct rhine_stats tx_stats;
479 	u8 wolopts;
480 
481 	u8 tx_thresh, rx_thresh;
482 
483 	struct mii_if_info mii_if;
484 	void __iomem *base;
485 };
486 
487 #define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
488 #define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
489 #define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
490 
491 #define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
492 #define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
493 #define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
494 
495 #define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
496 #define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
497 #define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
498 
499 #define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
500 #define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
501 #define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
502 
503 
504 static int  mdio_read(struct net_device *dev, int phy_id, int location);
505 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
506 static int  rhine_open(struct net_device *dev);
507 static void rhine_reset_task(struct work_struct *work);
508 static void rhine_slow_event_task(struct work_struct *work);
509 static void rhine_tx_timeout(struct net_device *dev);
510 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
511 				  struct net_device *dev);
512 static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
513 static void rhine_tx(struct net_device *dev);
514 static int rhine_rx(struct net_device *dev, int limit);
515 static void rhine_set_rx_mode(struct net_device *dev);
516 static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
517 	       struct rtnl_link_stats64 *stats);
518 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
519 static const struct ethtool_ops netdev_ethtool_ops;
520 static int  rhine_close(struct net_device *dev);
521 static int rhine_vlan_rx_add_vid(struct net_device *dev,
522 				 __be16 proto, u16 vid);
523 static int rhine_vlan_rx_kill_vid(struct net_device *dev,
524 				  __be16 proto, u16 vid);
525 static void rhine_restart_tx(struct net_device *dev);
526 
527 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
528 {
529 	void __iomem *ioaddr = rp->base;
530 	int i;
531 
532 	for (i = 0; i < 1024; i++) {
533 		bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
534 
535 		if (low ^ has_mask_bits)
536 			break;
537 		udelay(10);
538 	}
539 	if (i > 64) {
540 		netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
541 			  "count: %04d\n", low ? "low" : "high", reg, mask, i);
542 	}
543 }
544 
545 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
546 {
547 	rhine_wait_bit(rp, reg, mask, false);
548 }
549 
550 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
551 {
552 	rhine_wait_bit(rp, reg, mask, true);
553 }
554 
555 static u32 rhine_get_events(struct rhine_private *rp)
556 {
557 	void __iomem *ioaddr = rp->base;
558 	u32 intr_status;
559 
560 	intr_status = ioread16(ioaddr + IntrStatus);
561 	/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
562 	if (rp->quirks & rqStatusWBRace)
563 		intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
564 	return intr_status;
565 }
566 
567 static void rhine_ack_events(struct rhine_private *rp, u32 mask)
568 {
569 	void __iomem *ioaddr = rp->base;
570 
571 	if (rp->quirks & rqStatusWBRace)
572 		iowrite8(mask >> 16, ioaddr + IntrStatus2);
573 	iowrite16(mask, ioaddr + IntrStatus);
574 	mmiowb();
575 }
576 
577 /*
578  * Get power related registers into sane state.
579  * Notify user about past WOL event.
580  */
581 static void rhine_power_init(struct net_device *dev)
582 {
583 	struct rhine_private *rp = netdev_priv(dev);
584 	void __iomem *ioaddr = rp->base;
585 	u16 wolstat;
586 
587 	if (rp->quirks & rqWOL) {
588 		/* Make sure chip is in power state D0 */
589 		iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
590 
591 		/* Disable "force PME-enable" */
592 		iowrite8(0x80, ioaddr + WOLcgClr);
593 
594 		/* Clear power-event config bits (WOL) */
595 		iowrite8(0xFF, ioaddr + WOLcrClr);
596 		/* More recent cards can manage two additional patterns */
597 		if (rp->quirks & rq6patterns)
598 			iowrite8(0x03, ioaddr + WOLcrClr1);
599 
600 		/* Save power-event status bits */
601 		wolstat = ioread8(ioaddr + PwrcsrSet);
602 		if (rp->quirks & rq6patterns)
603 			wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
604 
605 		/* Clear power-event status bits */
606 		iowrite8(0xFF, ioaddr + PwrcsrClr);
607 		if (rp->quirks & rq6patterns)
608 			iowrite8(0x03, ioaddr + PwrcsrClr1);
609 
610 		if (wolstat) {
611 			char *reason;
612 			switch (wolstat) {
613 			case WOLmagic:
614 				reason = "Magic packet";
615 				break;
616 			case WOLlnkon:
617 				reason = "Link went up";
618 				break;
619 			case WOLlnkoff:
620 				reason = "Link went down";
621 				break;
622 			case WOLucast:
623 				reason = "Unicast packet";
624 				break;
625 			case WOLbmcast:
626 				reason = "Multicast/broadcast packet";
627 				break;
628 			default:
629 				reason = "Unknown";
630 			}
631 			netdev_info(dev, "Woke system up. Reason: %s\n",
632 				    reason);
633 		}
634 	}
635 }
636 
637 static void rhine_chip_reset(struct net_device *dev)
638 {
639 	struct rhine_private *rp = netdev_priv(dev);
640 	void __iomem *ioaddr = rp->base;
641 	u8 cmd1;
642 
643 	iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
644 	IOSYNC;
645 
646 	if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
647 		netdev_info(dev, "Reset not complete yet. Trying harder.\n");
648 
649 		/* Force reset */
650 		if (rp->quirks & rqForceReset)
651 			iowrite8(0x40, ioaddr + MiscCmd);
652 
653 		/* Reset can take somewhat longer (rare) */
654 		rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
655 	}
656 
657 	cmd1 = ioread8(ioaddr + ChipCmd1);
658 	netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
659 		   "failed" : "succeeded");
660 }
661 
662 static void enable_mmio(long pioaddr, u32 quirks)
663 {
664 	int n;
665 
666 	if (quirks & rqNeedEnMMIO) {
667 		if (quirks & rqRhineI) {
668 			/* More recent docs say that this bit is reserved */
669 			n = inb(pioaddr + ConfigA) | 0x20;
670 			outb(n, pioaddr + ConfigA);
671 		} else {
672 			n = inb(pioaddr + ConfigD) | 0x80;
673 			outb(n, pioaddr + ConfigD);
674 		}
675 	}
676 }
677 
678 static inline int verify_mmio(struct device *hwdev,
679 			      long pioaddr,
680 			      void __iomem *ioaddr,
681 			      u32 quirks)
682 {
683 	if (quirks & rqNeedEnMMIO) {
684 		int i = 0;
685 
686 		/* Check that selected MMIO registers match the PIO ones */
687 		while (mmio_verify_registers[i]) {
688 			int reg = mmio_verify_registers[i++];
689 			unsigned char a = inb(pioaddr+reg);
690 			unsigned char b = readb(ioaddr+reg);
691 
692 			if (a != b) {
693 				dev_err(hwdev,
694 					"MMIO do not match PIO [%02x] (%02x != %02x)\n",
695 					reg, a, b);
696 				return -EIO;
697 			}
698 		}
699 	}
700 	return 0;
701 }
702 
703 /*
704  * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
705  * (plus 0x6C for Rhine-I/II)
706  */
707 static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
708 {
709 	struct rhine_private *rp = netdev_priv(dev);
710 	void __iomem *ioaddr = rp->base;
711 	int i;
712 
713 	outb(0x20, pioaddr + MACRegEEcsr);
714 	for (i = 0; i < 1024; i++) {
715 		if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
716 			break;
717 	}
718 	if (i > 512)
719 		pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
720 
721 	/*
722 	 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
723 	 * MMIO. If reloading EEPROM was done first this could be avoided, but
724 	 * it is not known if that still works with the "win98-reboot" problem.
725 	 */
726 	enable_mmio(pioaddr, rp->quirks);
727 
728 	/* Turn off EEPROM-controlled wake-up (magic packet) */
729 	if (rp->quirks & rqWOL)
730 		iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
731 
732 }
733 
734 #ifdef CONFIG_NET_POLL_CONTROLLER
735 static void rhine_poll(struct net_device *dev)
736 {
737 	struct rhine_private *rp = netdev_priv(dev);
738 	const int irq = rp->irq;
739 
740 	disable_irq(irq);
741 	rhine_interrupt(irq, dev);
742 	enable_irq(irq);
743 }
744 #endif
745 
746 static void rhine_kick_tx_threshold(struct rhine_private *rp)
747 {
748 	if (rp->tx_thresh < 0xe0) {
749 		void __iomem *ioaddr = rp->base;
750 
751 		rp->tx_thresh += 0x20;
752 		BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
753 	}
754 }
755 
756 static void rhine_tx_err(struct rhine_private *rp, u32 status)
757 {
758 	struct net_device *dev = rp->dev;
759 
760 	if (status & IntrTxAborted) {
761 		netif_info(rp, tx_err, dev,
762 			   "Abort %08x, frame dropped\n", status);
763 	}
764 
765 	if (status & IntrTxUnderrun) {
766 		rhine_kick_tx_threshold(rp);
767 		netif_info(rp, tx_err ,dev, "Transmitter underrun, "
768 			   "Tx threshold now %02x\n", rp->tx_thresh);
769 	}
770 
771 	if (status & IntrTxDescRace)
772 		netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
773 
774 	if ((status & IntrTxError) &&
775 	    (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
776 		rhine_kick_tx_threshold(rp);
777 		netif_info(rp, tx_err, dev, "Unspecified error. "
778 			   "Tx threshold now %02x\n", rp->tx_thresh);
779 	}
780 
781 	rhine_restart_tx(dev);
782 }
783 
784 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
785 {
786 	void __iomem *ioaddr = rp->base;
787 	struct net_device_stats *stats = &rp->dev->stats;
788 
789 	stats->rx_crc_errors    += ioread16(ioaddr + RxCRCErrs);
790 	stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
791 
792 	/*
793 	 * Clears the "tally counters" for CRC errors and missed frames(?).
794 	 * It has been reported that some chips need a write of 0 to clear
795 	 * these, for others the counters are set to 1 when written to and
796 	 * instead cleared when read. So we clear them both ways ...
797 	 */
798 	iowrite32(0, ioaddr + RxMissed);
799 	ioread16(ioaddr + RxCRCErrs);
800 	ioread16(ioaddr + RxMissed);
801 }
802 
803 #define RHINE_EVENT_NAPI_RX	(IntrRxDone | \
804 				 IntrRxErr | \
805 				 IntrRxEmpty | \
806 				 IntrRxOverflow	| \
807 				 IntrRxDropped | \
808 				 IntrRxNoBuf | \
809 				 IntrRxWakeUp)
810 
811 #define RHINE_EVENT_NAPI_TX_ERR	(IntrTxError | \
812 				 IntrTxAborted | \
813 				 IntrTxUnderrun | \
814 				 IntrTxDescRace)
815 #define RHINE_EVENT_NAPI_TX	(IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
816 
817 #define RHINE_EVENT_NAPI	(RHINE_EVENT_NAPI_RX | \
818 				 RHINE_EVENT_NAPI_TX | \
819 				 IntrStatsMax)
820 #define RHINE_EVENT_SLOW	(IntrPCIErr | IntrLinkChange)
821 #define RHINE_EVENT		(RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
822 
823 static int rhine_napipoll(struct napi_struct *napi, int budget)
824 {
825 	struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
826 	struct net_device *dev = rp->dev;
827 	void __iomem *ioaddr = rp->base;
828 	u16 enable_mask = RHINE_EVENT & 0xffff;
829 	int work_done = 0;
830 	u32 status;
831 
832 	status = rhine_get_events(rp);
833 	rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
834 
835 	if (status & RHINE_EVENT_NAPI_RX)
836 		work_done += rhine_rx(dev, budget);
837 
838 	if (status & RHINE_EVENT_NAPI_TX) {
839 		if (status & RHINE_EVENT_NAPI_TX_ERR) {
840 			/* Avoid scavenging before Tx engine turned off */
841 			rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
842 			if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
843 				netif_warn(rp, tx_err, dev, "Tx still on\n");
844 		}
845 
846 		rhine_tx(dev);
847 
848 		if (status & RHINE_EVENT_NAPI_TX_ERR)
849 			rhine_tx_err(rp, status);
850 	}
851 
852 	if (status & IntrStatsMax) {
853 		spin_lock(&rp->lock);
854 		rhine_update_rx_crc_and_missed_errord(rp);
855 		spin_unlock(&rp->lock);
856 	}
857 
858 	if (status & RHINE_EVENT_SLOW) {
859 		enable_mask &= ~RHINE_EVENT_SLOW;
860 		schedule_work(&rp->slow_event_task);
861 	}
862 
863 	if (work_done < budget) {
864 		napi_complete(napi);
865 		iowrite16(enable_mask, ioaddr + IntrEnable);
866 		mmiowb();
867 	}
868 	return work_done;
869 }
870 
871 static void rhine_hw_init(struct net_device *dev, long pioaddr)
872 {
873 	struct rhine_private *rp = netdev_priv(dev);
874 
875 	/* Reset the chip to erase previous misconfiguration. */
876 	rhine_chip_reset(dev);
877 
878 	/* Rhine-I needs extra time to recuperate before EEPROM reload */
879 	if (rp->quirks & rqRhineI)
880 		msleep(5);
881 
882 	/* Reload EEPROM controlled bytes cleared by soft reset */
883 	if (dev_is_pci(dev->dev.parent))
884 		rhine_reload_eeprom(pioaddr, dev);
885 }
886 
887 static const struct net_device_ops rhine_netdev_ops = {
888 	.ndo_open		 = rhine_open,
889 	.ndo_stop		 = rhine_close,
890 	.ndo_start_xmit		 = rhine_start_tx,
891 	.ndo_get_stats64	 = rhine_get_stats64,
892 	.ndo_set_rx_mode	 = rhine_set_rx_mode,
893 	.ndo_change_mtu		 = eth_change_mtu,
894 	.ndo_validate_addr	 = eth_validate_addr,
895 	.ndo_set_mac_address 	 = eth_mac_addr,
896 	.ndo_do_ioctl		 = netdev_ioctl,
897 	.ndo_tx_timeout 	 = rhine_tx_timeout,
898 	.ndo_vlan_rx_add_vid	 = rhine_vlan_rx_add_vid,
899 	.ndo_vlan_rx_kill_vid	 = rhine_vlan_rx_kill_vid,
900 #ifdef CONFIG_NET_POLL_CONTROLLER
901 	.ndo_poll_controller	 = rhine_poll,
902 #endif
903 };
904 
905 static int rhine_init_one_common(struct device *hwdev, u32 quirks,
906 				 long pioaddr, void __iomem *ioaddr, int irq)
907 {
908 	struct net_device *dev;
909 	struct rhine_private *rp;
910 	int i, rc, phy_id;
911 	const char *name;
912 
913 	/* this should always be supported */
914 	rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
915 	if (rc) {
916 		dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
917 		goto err_out;
918 	}
919 
920 	dev = alloc_etherdev(sizeof(struct rhine_private));
921 	if (!dev) {
922 		rc = -ENOMEM;
923 		goto err_out;
924 	}
925 	SET_NETDEV_DEV(dev, hwdev);
926 
927 	rp = netdev_priv(dev);
928 	rp->dev = dev;
929 	rp->quirks = quirks;
930 	rp->pioaddr = pioaddr;
931 	rp->base = ioaddr;
932 	rp->irq = irq;
933 	rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
934 
935 	phy_id = rp->quirks & rqIntPHY ? 1 : 0;
936 
937 	u64_stats_init(&rp->tx_stats.syncp);
938 	u64_stats_init(&rp->rx_stats.syncp);
939 
940 	/* Get chip registers into a sane state */
941 	rhine_power_init(dev);
942 	rhine_hw_init(dev, pioaddr);
943 
944 	for (i = 0; i < 6; i++)
945 		dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
946 
947 	if (!is_valid_ether_addr(dev->dev_addr)) {
948 		/* Report it and use a random ethernet address instead */
949 		netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
950 		eth_hw_addr_random(dev);
951 		netdev_info(dev, "Using random MAC address: %pM\n",
952 			    dev->dev_addr);
953 	}
954 
955 	/* For Rhine-I/II, phy_id is loaded from EEPROM */
956 	if (!phy_id)
957 		phy_id = ioread8(ioaddr + 0x6C);
958 
959 	spin_lock_init(&rp->lock);
960 	mutex_init(&rp->task_lock);
961 	INIT_WORK(&rp->reset_task, rhine_reset_task);
962 	INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
963 
964 	rp->mii_if.dev = dev;
965 	rp->mii_if.mdio_read = mdio_read;
966 	rp->mii_if.mdio_write = mdio_write;
967 	rp->mii_if.phy_id_mask = 0x1f;
968 	rp->mii_if.reg_num_mask = 0x1f;
969 
970 	/* The chip-specific entries in the device structure. */
971 	dev->netdev_ops = &rhine_netdev_ops;
972 	dev->ethtool_ops = &netdev_ethtool_ops;
973 	dev->watchdog_timeo = TX_TIMEOUT;
974 
975 	netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
976 
977 	if (rp->quirks & rqRhineI)
978 		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
979 
980 	if (rp->quirks & rqMgmt)
981 		dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
982 				 NETIF_F_HW_VLAN_CTAG_RX |
983 				 NETIF_F_HW_VLAN_CTAG_FILTER;
984 
985 	/* dev->name not defined before register_netdev()! */
986 	rc = register_netdev(dev);
987 	if (rc)
988 		goto err_out_free_netdev;
989 
990 	if (rp->quirks & rqRhineI)
991 		name = "Rhine";
992 	else if (rp->quirks & rqStatusWBRace)
993 		name = "Rhine II";
994 	else if (rp->quirks & rqMgmt)
995 		name = "Rhine III (Management Adapter)";
996 	else
997 		name = "Rhine III";
998 
999 	netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
1000 		    name, (long)ioaddr, dev->dev_addr, rp->irq);
1001 
1002 	dev_set_drvdata(hwdev, dev);
1003 
1004 	{
1005 		u16 mii_cmd;
1006 		int mii_status = mdio_read(dev, phy_id, 1);
1007 		mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1008 		mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1009 		if (mii_status != 0xffff && mii_status != 0x0000) {
1010 			rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1011 			netdev_info(dev,
1012 				    "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1013 				    phy_id,
1014 				    mii_status, rp->mii_if.advertising,
1015 				    mdio_read(dev, phy_id, 5));
1016 
1017 			/* set IFF_RUNNING */
1018 			if (mii_status & BMSR_LSTATUS)
1019 				netif_carrier_on(dev);
1020 			else
1021 				netif_carrier_off(dev);
1022 
1023 		}
1024 	}
1025 	rp->mii_if.phy_id = phy_id;
1026 	if (avoid_D3)
1027 		netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1028 
1029 	return 0;
1030 
1031 err_out_free_netdev:
1032 	free_netdev(dev);
1033 err_out:
1034 	return rc;
1035 }
1036 
1037 static int rhine_init_one_pci(struct pci_dev *pdev,
1038 			      const struct pci_device_id *ent)
1039 {
1040 	struct device *hwdev = &pdev->dev;
1041 	int rc;
1042 	long pioaddr, memaddr;
1043 	void __iomem *ioaddr;
1044 	int io_size = pdev->revision < VTunknown0 ? 128 : 256;
1045 
1046 /* This driver was written to use PCI memory space. Some early versions
1047  * of the Rhine may only work correctly with I/O space accesses.
1048  * TODO: determine for which revisions this is true and assign the flag
1049  *	 in code as opposed to this Kconfig option (???)
1050  */
1051 #ifdef CONFIG_VIA_RHINE_MMIO
1052 	u32 quirks = rqNeedEnMMIO;
1053 #else
1054 	u32 quirks = 0;
1055 #endif
1056 
1057 /* when built into the kernel, we only print version if device is found */
1058 #ifndef MODULE
1059 	pr_info_once("%s\n", version);
1060 #endif
1061 
1062 	rc = pci_enable_device(pdev);
1063 	if (rc)
1064 		goto err_out;
1065 
1066 	if (pdev->revision < VTunknown0) {
1067 		quirks |= rqRhineI;
1068 	} else if (pdev->revision >= VT6102) {
1069 		quirks |= rqWOL | rqForceReset;
1070 		if (pdev->revision < VT6105) {
1071 			quirks |= rqStatusWBRace;
1072 		} else {
1073 			quirks |= rqIntPHY;
1074 			if (pdev->revision >= VT6105_B0)
1075 				quirks |= rq6patterns;
1076 			if (pdev->revision >= VT6105M)
1077 				quirks |= rqMgmt;
1078 		}
1079 	}
1080 
1081 	/* sanity check */
1082 	if ((pci_resource_len(pdev, 0) < io_size) ||
1083 	    (pci_resource_len(pdev, 1) < io_size)) {
1084 		rc = -EIO;
1085 		dev_err(hwdev, "Insufficient PCI resources, aborting\n");
1086 		goto err_out_pci_disable;
1087 	}
1088 
1089 	pioaddr = pci_resource_start(pdev, 0);
1090 	memaddr = pci_resource_start(pdev, 1);
1091 
1092 	pci_set_master(pdev);
1093 
1094 	rc = pci_request_regions(pdev, DRV_NAME);
1095 	if (rc)
1096 		goto err_out_pci_disable;
1097 
1098 	ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
1099 	if (!ioaddr) {
1100 		rc = -EIO;
1101 		dev_err(hwdev,
1102 			"ioremap failed for device %s, region 0x%X @ 0x%lX\n",
1103 			dev_name(hwdev), io_size, memaddr);
1104 		goto err_out_free_res;
1105 	}
1106 
1107 	enable_mmio(pioaddr, quirks);
1108 
1109 	rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
1110 	if (rc)
1111 		goto err_out_unmap;
1112 
1113 	rc = rhine_init_one_common(&pdev->dev, quirks,
1114 				   pioaddr, ioaddr, pdev->irq);
1115 	if (!rc)
1116 		return 0;
1117 
1118 err_out_unmap:
1119 	pci_iounmap(pdev, ioaddr);
1120 err_out_free_res:
1121 	pci_release_regions(pdev);
1122 err_out_pci_disable:
1123 	pci_disable_device(pdev);
1124 err_out:
1125 	return rc;
1126 }
1127 
1128 static int rhine_init_one_platform(struct platform_device *pdev)
1129 {
1130 	const struct of_device_id *match;
1131 	const u32 *quirks;
1132 	int irq;
1133 	struct resource *res;
1134 	void __iomem *ioaddr;
1135 
1136 	match = of_match_device(rhine_of_tbl, &pdev->dev);
1137 	if (!match)
1138 		return -EINVAL;
1139 
1140 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1141 	ioaddr = devm_ioremap_resource(&pdev->dev, res);
1142 	if (IS_ERR(ioaddr))
1143 		return PTR_ERR(ioaddr);
1144 
1145 	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1146 	if (!irq)
1147 		return -EINVAL;
1148 
1149 	quirks = match->data;
1150 	if (!quirks)
1151 		return -EINVAL;
1152 
1153 	return rhine_init_one_common(&pdev->dev, *quirks,
1154 				     (long)ioaddr, ioaddr, irq);
1155 }
1156 
1157 static int alloc_ring(struct net_device* dev)
1158 {
1159 	struct rhine_private *rp = netdev_priv(dev);
1160 	struct device *hwdev = dev->dev.parent;
1161 	void *ring;
1162 	dma_addr_t ring_dma;
1163 
1164 	ring = dma_alloc_coherent(hwdev,
1165 				  RX_RING_SIZE * sizeof(struct rx_desc) +
1166 				  TX_RING_SIZE * sizeof(struct tx_desc),
1167 				  &ring_dma,
1168 				  GFP_ATOMIC);
1169 	if (!ring) {
1170 		netdev_err(dev, "Could not allocate DMA memory\n");
1171 		return -ENOMEM;
1172 	}
1173 	if (rp->quirks & rqRhineI) {
1174 		rp->tx_bufs = dma_alloc_coherent(hwdev,
1175 						 PKT_BUF_SZ * TX_RING_SIZE,
1176 						 &rp->tx_bufs_dma,
1177 						 GFP_ATOMIC);
1178 		if (rp->tx_bufs == NULL) {
1179 			dma_free_coherent(hwdev,
1180 					  RX_RING_SIZE * sizeof(struct rx_desc) +
1181 					  TX_RING_SIZE * sizeof(struct tx_desc),
1182 					  ring, ring_dma);
1183 			return -ENOMEM;
1184 		}
1185 	}
1186 
1187 	rp->rx_ring = ring;
1188 	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1189 	rp->rx_ring_dma = ring_dma;
1190 	rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1191 
1192 	return 0;
1193 }
1194 
1195 static void free_ring(struct net_device* dev)
1196 {
1197 	struct rhine_private *rp = netdev_priv(dev);
1198 	struct device *hwdev = dev->dev.parent;
1199 
1200 	dma_free_coherent(hwdev,
1201 			  RX_RING_SIZE * sizeof(struct rx_desc) +
1202 			  TX_RING_SIZE * sizeof(struct tx_desc),
1203 			  rp->rx_ring, rp->rx_ring_dma);
1204 	rp->tx_ring = NULL;
1205 
1206 	if (rp->tx_bufs)
1207 		dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
1208 				  rp->tx_bufs, rp->tx_bufs_dma);
1209 
1210 	rp->tx_bufs = NULL;
1211 
1212 }
1213 
1214 static void alloc_rbufs(struct net_device *dev)
1215 {
1216 	struct rhine_private *rp = netdev_priv(dev);
1217 	struct device *hwdev = dev->dev.parent;
1218 	dma_addr_t next;
1219 	int i;
1220 
1221 	rp->dirty_rx = rp->cur_rx = 0;
1222 
1223 	rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1224 	rp->rx_head_desc = &rp->rx_ring[0];
1225 	next = rp->rx_ring_dma;
1226 
1227 	/* Init the ring entries */
1228 	for (i = 0; i < RX_RING_SIZE; i++) {
1229 		rp->rx_ring[i].rx_status = 0;
1230 		rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1231 		next += sizeof(struct rx_desc);
1232 		rp->rx_ring[i].next_desc = cpu_to_le32(next);
1233 		rp->rx_skbuff[i] = NULL;
1234 	}
1235 	/* Mark the last entry as wrapping the ring. */
1236 	rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1237 
1238 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1239 	for (i = 0; i < RX_RING_SIZE; i++) {
1240 		struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1241 		rp->rx_skbuff[i] = skb;
1242 		if (skb == NULL)
1243 			break;
1244 
1245 		rp->rx_skbuff_dma[i] =
1246 			dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
1247 				       DMA_FROM_DEVICE);
1248 		if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
1249 			rp->rx_skbuff_dma[i] = 0;
1250 			dev_kfree_skb(skb);
1251 			break;
1252 		}
1253 		rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1254 		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1255 	}
1256 	rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1257 }
1258 
1259 static void free_rbufs(struct net_device* dev)
1260 {
1261 	struct rhine_private *rp = netdev_priv(dev);
1262 	struct device *hwdev = dev->dev.parent;
1263 	int i;
1264 
1265 	/* Free all the skbuffs in the Rx queue. */
1266 	for (i = 0; i < RX_RING_SIZE; i++) {
1267 		rp->rx_ring[i].rx_status = 0;
1268 		rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1269 		if (rp->rx_skbuff[i]) {
1270 			dma_unmap_single(hwdev,
1271 					 rp->rx_skbuff_dma[i],
1272 					 rp->rx_buf_sz, DMA_FROM_DEVICE);
1273 			dev_kfree_skb(rp->rx_skbuff[i]);
1274 		}
1275 		rp->rx_skbuff[i] = NULL;
1276 	}
1277 }
1278 
1279 static void alloc_tbufs(struct net_device* dev)
1280 {
1281 	struct rhine_private *rp = netdev_priv(dev);
1282 	dma_addr_t next;
1283 	int i;
1284 
1285 	rp->dirty_tx = rp->cur_tx = 0;
1286 	next = rp->tx_ring_dma;
1287 	for (i = 0; i < TX_RING_SIZE; i++) {
1288 		rp->tx_skbuff[i] = NULL;
1289 		rp->tx_ring[i].tx_status = 0;
1290 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1291 		next += sizeof(struct tx_desc);
1292 		rp->tx_ring[i].next_desc = cpu_to_le32(next);
1293 		if (rp->quirks & rqRhineI)
1294 			rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1295 	}
1296 	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1297 
1298 }
1299 
1300 static void free_tbufs(struct net_device* dev)
1301 {
1302 	struct rhine_private *rp = netdev_priv(dev);
1303 	struct device *hwdev = dev->dev.parent;
1304 	int i;
1305 
1306 	for (i = 0; i < TX_RING_SIZE; i++) {
1307 		rp->tx_ring[i].tx_status = 0;
1308 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1309 		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1310 		if (rp->tx_skbuff[i]) {
1311 			if (rp->tx_skbuff_dma[i]) {
1312 				dma_unmap_single(hwdev,
1313 						 rp->tx_skbuff_dma[i],
1314 						 rp->tx_skbuff[i]->len,
1315 						 DMA_TO_DEVICE);
1316 			}
1317 			dev_kfree_skb(rp->tx_skbuff[i]);
1318 		}
1319 		rp->tx_skbuff[i] = NULL;
1320 		rp->tx_buf[i] = NULL;
1321 	}
1322 }
1323 
1324 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1325 {
1326 	struct rhine_private *rp = netdev_priv(dev);
1327 	void __iomem *ioaddr = rp->base;
1328 
1329 	if (!rp->mii_if.force_media)
1330 		mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1331 
1332 	if (rp->mii_if.full_duplex)
1333 	    iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1334 		   ioaddr + ChipCmd1);
1335 	else
1336 	    iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1337 		   ioaddr + ChipCmd1);
1338 
1339 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1340 		   rp->mii_if.force_media, netif_carrier_ok(dev));
1341 }
1342 
1343 /* Called after status of force_media possibly changed */
1344 static void rhine_set_carrier(struct mii_if_info *mii)
1345 {
1346 	struct net_device *dev = mii->dev;
1347 	struct rhine_private *rp = netdev_priv(dev);
1348 
1349 	if (mii->force_media) {
1350 		/* autoneg is off: Link is always assumed to be up */
1351 		if (!netif_carrier_ok(dev))
1352 			netif_carrier_on(dev);
1353 	}
1354 
1355 	rhine_check_media(dev, 0);
1356 
1357 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1358 		   mii->force_media, netif_carrier_ok(dev));
1359 }
1360 
1361 /**
1362  * rhine_set_cam - set CAM multicast filters
1363  * @ioaddr: register block of this Rhine
1364  * @idx: multicast CAM index [0..MCAM_SIZE-1]
1365  * @addr: multicast address (6 bytes)
1366  *
1367  * Load addresses into multicast filters.
1368  */
1369 static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1370 {
1371 	int i;
1372 
1373 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1374 	wmb();
1375 
1376 	/* Paranoid -- idx out of range should never happen */
1377 	idx &= (MCAM_SIZE - 1);
1378 
1379 	iowrite8((u8) idx, ioaddr + CamAddr);
1380 
1381 	for (i = 0; i < 6; i++, addr++)
1382 		iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1383 	udelay(10);
1384 	wmb();
1385 
1386 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1387 	udelay(10);
1388 
1389 	iowrite8(0, ioaddr + CamCon);
1390 }
1391 
1392 /**
1393  * rhine_set_vlan_cam - set CAM VLAN filters
1394  * @ioaddr: register block of this Rhine
1395  * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1396  * @addr: VLAN ID (2 bytes)
1397  *
1398  * Load addresses into VLAN filters.
1399  */
1400 static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1401 {
1402 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1403 	wmb();
1404 
1405 	/* Paranoid -- idx out of range should never happen */
1406 	idx &= (VCAM_SIZE - 1);
1407 
1408 	iowrite8((u8) idx, ioaddr + CamAddr);
1409 
1410 	iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1411 	udelay(10);
1412 	wmb();
1413 
1414 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1415 	udelay(10);
1416 
1417 	iowrite8(0, ioaddr + CamCon);
1418 }
1419 
1420 /**
1421  * rhine_set_cam_mask - set multicast CAM mask
1422  * @ioaddr: register block of this Rhine
1423  * @mask: multicast CAM mask
1424  *
1425  * Mask sets multicast filters active/inactive.
1426  */
1427 static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1428 {
1429 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1430 	wmb();
1431 
1432 	/* write mask */
1433 	iowrite32(mask, ioaddr + CamMask);
1434 
1435 	/* disable CAMEN */
1436 	iowrite8(0, ioaddr + CamCon);
1437 }
1438 
1439 /**
1440  * rhine_set_vlan_cam_mask - set VLAN CAM mask
1441  * @ioaddr: register block of this Rhine
1442  * @mask: VLAN CAM mask
1443  *
1444  * Mask sets VLAN filters active/inactive.
1445  */
1446 static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1447 {
1448 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1449 	wmb();
1450 
1451 	/* write mask */
1452 	iowrite32(mask, ioaddr + CamMask);
1453 
1454 	/* disable CAMEN */
1455 	iowrite8(0, ioaddr + CamCon);
1456 }
1457 
1458 /**
1459  * rhine_init_cam_filter - initialize CAM filters
1460  * @dev: network device
1461  *
1462  * Initialize (disable) hardware VLAN and multicast support on this
1463  * Rhine.
1464  */
1465 static void rhine_init_cam_filter(struct net_device *dev)
1466 {
1467 	struct rhine_private *rp = netdev_priv(dev);
1468 	void __iomem *ioaddr = rp->base;
1469 
1470 	/* Disable all CAMs */
1471 	rhine_set_vlan_cam_mask(ioaddr, 0);
1472 	rhine_set_cam_mask(ioaddr, 0);
1473 
1474 	/* disable hardware VLAN support */
1475 	BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1476 	BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1477 }
1478 
1479 /**
1480  * rhine_update_vcam - update VLAN CAM filters
1481  * @rp: rhine_private data of this Rhine
1482  *
1483  * Update VLAN CAM filters to match configuration change.
1484  */
1485 static void rhine_update_vcam(struct net_device *dev)
1486 {
1487 	struct rhine_private *rp = netdev_priv(dev);
1488 	void __iomem *ioaddr = rp->base;
1489 	u16 vid;
1490 	u32 vCAMmask = 0;	/* 32 vCAMs (6105M and better) */
1491 	unsigned int i = 0;
1492 
1493 	for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1494 		rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1495 		vCAMmask |= 1 << i;
1496 		if (++i >= VCAM_SIZE)
1497 			break;
1498 	}
1499 	rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1500 }
1501 
1502 static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1503 {
1504 	struct rhine_private *rp = netdev_priv(dev);
1505 
1506 	spin_lock_bh(&rp->lock);
1507 	set_bit(vid, rp->active_vlans);
1508 	rhine_update_vcam(dev);
1509 	spin_unlock_bh(&rp->lock);
1510 	return 0;
1511 }
1512 
1513 static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1514 {
1515 	struct rhine_private *rp = netdev_priv(dev);
1516 
1517 	spin_lock_bh(&rp->lock);
1518 	clear_bit(vid, rp->active_vlans);
1519 	rhine_update_vcam(dev);
1520 	spin_unlock_bh(&rp->lock);
1521 	return 0;
1522 }
1523 
1524 static void init_registers(struct net_device *dev)
1525 {
1526 	struct rhine_private *rp = netdev_priv(dev);
1527 	void __iomem *ioaddr = rp->base;
1528 	int i;
1529 
1530 	for (i = 0; i < 6; i++)
1531 		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1532 
1533 	/* Initialize other registers. */
1534 	iowrite16(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
1535 	/* Configure initial FIFO thresholds. */
1536 	iowrite8(0x20, ioaddr + TxConfig);
1537 	rp->tx_thresh = 0x20;
1538 	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */
1539 
1540 	iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1541 	iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1542 
1543 	rhine_set_rx_mode(dev);
1544 
1545 	if (rp->quirks & rqMgmt)
1546 		rhine_init_cam_filter(dev);
1547 
1548 	napi_enable(&rp->napi);
1549 
1550 	iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1551 
1552 	iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1553 	       ioaddr + ChipCmd);
1554 	rhine_check_media(dev, 1);
1555 }
1556 
1557 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1558 static void rhine_enable_linkmon(struct rhine_private *rp)
1559 {
1560 	void __iomem *ioaddr = rp->base;
1561 
1562 	iowrite8(0, ioaddr + MIICmd);
1563 	iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1564 	iowrite8(0x80, ioaddr + MIICmd);
1565 
1566 	rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1567 
1568 	iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1569 }
1570 
1571 /* Disable MII link status auto-polling (required for MDIO access) */
1572 static void rhine_disable_linkmon(struct rhine_private *rp)
1573 {
1574 	void __iomem *ioaddr = rp->base;
1575 
1576 	iowrite8(0, ioaddr + MIICmd);
1577 
1578 	if (rp->quirks & rqRhineI) {
1579 		iowrite8(0x01, ioaddr + MIIRegAddr);	// MII_BMSR
1580 
1581 		/* Can be called from ISR. Evil. */
1582 		mdelay(1);
1583 
1584 		/* 0x80 must be set immediately before turning it off */
1585 		iowrite8(0x80, ioaddr + MIICmd);
1586 
1587 		rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1588 
1589 		/* Heh. Now clear 0x80 again. */
1590 		iowrite8(0, ioaddr + MIICmd);
1591 	}
1592 	else
1593 		rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1594 }
1595 
1596 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1597 
1598 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1599 {
1600 	struct rhine_private *rp = netdev_priv(dev);
1601 	void __iomem *ioaddr = rp->base;
1602 	int result;
1603 
1604 	rhine_disable_linkmon(rp);
1605 
1606 	/* rhine_disable_linkmon already cleared MIICmd */
1607 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1608 	iowrite8(regnum, ioaddr + MIIRegAddr);
1609 	iowrite8(0x40, ioaddr + MIICmd);		/* Trigger read */
1610 	rhine_wait_bit_low(rp, MIICmd, 0x40);
1611 	result = ioread16(ioaddr + MIIData);
1612 
1613 	rhine_enable_linkmon(rp);
1614 	return result;
1615 }
1616 
1617 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1618 {
1619 	struct rhine_private *rp = netdev_priv(dev);
1620 	void __iomem *ioaddr = rp->base;
1621 
1622 	rhine_disable_linkmon(rp);
1623 
1624 	/* rhine_disable_linkmon already cleared MIICmd */
1625 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1626 	iowrite8(regnum, ioaddr + MIIRegAddr);
1627 	iowrite16(value, ioaddr + MIIData);
1628 	iowrite8(0x20, ioaddr + MIICmd);		/* Trigger write */
1629 	rhine_wait_bit_low(rp, MIICmd, 0x20);
1630 
1631 	rhine_enable_linkmon(rp);
1632 }
1633 
1634 static void rhine_task_disable(struct rhine_private *rp)
1635 {
1636 	mutex_lock(&rp->task_lock);
1637 	rp->task_enable = false;
1638 	mutex_unlock(&rp->task_lock);
1639 
1640 	cancel_work_sync(&rp->slow_event_task);
1641 	cancel_work_sync(&rp->reset_task);
1642 }
1643 
1644 static void rhine_task_enable(struct rhine_private *rp)
1645 {
1646 	mutex_lock(&rp->task_lock);
1647 	rp->task_enable = true;
1648 	mutex_unlock(&rp->task_lock);
1649 }
1650 
1651 static int rhine_open(struct net_device *dev)
1652 {
1653 	struct rhine_private *rp = netdev_priv(dev);
1654 	void __iomem *ioaddr = rp->base;
1655 	int rc;
1656 
1657 	rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1658 	if (rc)
1659 		return rc;
1660 
1661 	netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1662 
1663 	rc = alloc_ring(dev);
1664 	if (rc) {
1665 		free_irq(rp->irq, dev);
1666 		return rc;
1667 	}
1668 	alloc_rbufs(dev);
1669 	alloc_tbufs(dev);
1670 	rhine_chip_reset(dev);
1671 	rhine_task_enable(rp);
1672 	init_registers(dev);
1673 
1674 	netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1675 		  __func__, ioread16(ioaddr + ChipCmd),
1676 		  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1677 
1678 	netif_start_queue(dev);
1679 
1680 	return 0;
1681 }
1682 
1683 static void rhine_reset_task(struct work_struct *work)
1684 {
1685 	struct rhine_private *rp = container_of(work, struct rhine_private,
1686 						reset_task);
1687 	struct net_device *dev = rp->dev;
1688 
1689 	mutex_lock(&rp->task_lock);
1690 
1691 	if (!rp->task_enable)
1692 		goto out_unlock;
1693 
1694 	napi_disable(&rp->napi);
1695 	netif_tx_disable(dev);
1696 	spin_lock_bh(&rp->lock);
1697 
1698 	/* clear all descriptors */
1699 	free_tbufs(dev);
1700 	free_rbufs(dev);
1701 	alloc_tbufs(dev);
1702 	alloc_rbufs(dev);
1703 
1704 	/* Reinitialize the hardware. */
1705 	rhine_chip_reset(dev);
1706 	init_registers(dev);
1707 
1708 	spin_unlock_bh(&rp->lock);
1709 
1710 	dev->trans_start = jiffies; /* prevent tx timeout */
1711 	dev->stats.tx_errors++;
1712 	netif_wake_queue(dev);
1713 
1714 out_unlock:
1715 	mutex_unlock(&rp->task_lock);
1716 }
1717 
1718 static void rhine_tx_timeout(struct net_device *dev)
1719 {
1720 	struct rhine_private *rp = netdev_priv(dev);
1721 	void __iomem *ioaddr = rp->base;
1722 
1723 	netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1724 		    ioread16(ioaddr + IntrStatus),
1725 		    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1726 
1727 	schedule_work(&rp->reset_task);
1728 }
1729 
1730 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1731 				  struct net_device *dev)
1732 {
1733 	struct rhine_private *rp = netdev_priv(dev);
1734 	struct device *hwdev = dev->dev.parent;
1735 	void __iomem *ioaddr = rp->base;
1736 	unsigned entry;
1737 
1738 	/* Caution: the write order is important here, set the field
1739 	   with the "ownership" bits last. */
1740 
1741 	/* Calculate the next Tx descriptor entry. */
1742 	entry = rp->cur_tx % TX_RING_SIZE;
1743 
1744 	if (skb_padto(skb, ETH_ZLEN))
1745 		return NETDEV_TX_OK;
1746 
1747 	rp->tx_skbuff[entry] = skb;
1748 
1749 	if ((rp->quirks & rqRhineI) &&
1750 	    (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1751 		/* Must use alignment buffer. */
1752 		if (skb->len > PKT_BUF_SZ) {
1753 			/* packet too long, drop it */
1754 			dev_kfree_skb_any(skb);
1755 			rp->tx_skbuff[entry] = NULL;
1756 			dev->stats.tx_dropped++;
1757 			return NETDEV_TX_OK;
1758 		}
1759 
1760 		/* Padding is not copied and so must be redone. */
1761 		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1762 		if (skb->len < ETH_ZLEN)
1763 			memset(rp->tx_buf[entry] + skb->len, 0,
1764 			       ETH_ZLEN - skb->len);
1765 		rp->tx_skbuff_dma[entry] = 0;
1766 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1767 						      (rp->tx_buf[entry] -
1768 						       rp->tx_bufs));
1769 	} else {
1770 		rp->tx_skbuff_dma[entry] =
1771 			dma_map_single(hwdev, skb->data, skb->len,
1772 				       DMA_TO_DEVICE);
1773 		if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
1774 			dev_kfree_skb_any(skb);
1775 			rp->tx_skbuff_dma[entry] = 0;
1776 			dev->stats.tx_dropped++;
1777 			return NETDEV_TX_OK;
1778 		}
1779 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1780 	}
1781 
1782 	rp->tx_ring[entry].desc_length =
1783 		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1784 
1785 	if (unlikely(skb_vlan_tag_present(skb))) {
1786 		u16 vid_pcp = skb_vlan_tag_get(skb);
1787 
1788 		/* drop CFI/DEI bit, register needs VID and PCP */
1789 		vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1790 			  ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1791 		rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1792 		/* request tagging */
1793 		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1794 	}
1795 	else
1796 		rp->tx_ring[entry].tx_status = 0;
1797 
1798 	/* lock eth irq */
1799 	wmb();
1800 	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1801 	wmb();
1802 
1803 	rp->cur_tx++;
1804 
1805 	/* Non-x86 Todo: explicitly flush cache lines here. */
1806 
1807 	if (skb_vlan_tag_present(skb))
1808 		/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1809 		BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1810 
1811 	/* Wake the potentially-idle transmit channel */
1812 	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1813 	       ioaddr + ChipCmd1);
1814 	IOSYNC;
1815 
1816 	if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1817 		netif_stop_queue(dev);
1818 
1819 	netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1820 		  rp->cur_tx - 1, entry);
1821 
1822 	return NETDEV_TX_OK;
1823 }
1824 
1825 static void rhine_irq_disable(struct rhine_private *rp)
1826 {
1827 	iowrite16(0x0000, rp->base + IntrEnable);
1828 	mmiowb();
1829 }
1830 
1831 /* The interrupt handler does all of the Rx thread work and cleans up
1832    after the Tx thread. */
1833 static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1834 {
1835 	struct net_device *dev = dev_instance;
1836 	struct rhine_private *rp = netdev_priv(dev);
1837 	u32 status;
1838 	int handled = 0;
1839 
1840 	status = rhine_get_events(rp);
1841 
1842 	netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1843 
1844 	if (status & RHINE_EVENT) {
1845 		handled = 1;
1846 
1847 		rhine_irq_disable(rp);
1848 		napi_schedule(&rp->napi);
1849 	}
1850 
1851 	if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1852 		netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1853 			  status);
1854 	}
1855 
1856 	return IRQ_RETVAL(handled);
1857 }
1858 
1859 /* This routine is logically part of the interrupt handler, but isolated
1860    for clarity. */
1861 static void rhine_tx(struct net_device *dev)
1862 {
1863 	struct rhine_private *rp = netdev_priv(dev);
1864 	struct device *hwdev = dev->dev.parent;
1865 	int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1866 
1867 	/* find and cleanup dirty tx descriptors */
1868 	while (rp->dirty_tx != rp->cur_tx) {
1869 		txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1870 		netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1871 			  entry, txstatus);
1872 		if (txstatus & DescOwn)
1873 			break;
1874 		if (txstatus & 0x8000) {
1875 			netif_dbg(rp, tx_done, dev,
1876 				  "Transmit error, Tx status %08x\n", txstatus);
1877 			dev->stats.tx_errors++;
1878 			if (txstatus & 0x0400)
1879 				dev->stats.tx_carrier_errors++;
1880 			if (txstatus & 0x0200)
1881 				dev->stats.tx_window_errors++;
1882 			if (txstatus & 0x0100)
1883 				dev->stats.tx_aborted_errors++;
1884 			if (txstatus & 0x0080)
1885 				dev->stats.tx_heartbeat_errors++;
1886 			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1887 			    (txstatus & 0x0800) || (txstatus & 0x1000)) {
1888 				dev->stats.tx_fifo_errors++;
1889 				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1890 				break; /* Keep the skb - we try again */
1891 			}
1892 			/* Transmitter restarted in 'abnormal' handler. */
1893 		} else {
1894 			if (rp->quirks & rqRhineI)
1895 				dev->stats.collisions += (txstatus >> 3) & 0x0F;
1896 			else
1897 				dev->stats.collisions += txstatus & 0x0F;
1898 			netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1899 				  (txstatus >> 3) & 0xF, txstatus & 0xF);
1900 
1901 			u64_stats_update_begin(&rp->tx_stats.syncp);
1902 			rp->tx_stats.bytes += rp->tx_skbuff[entry]->len;
1903 			rp->tx_stats.packets++;
1904 			u64_stats_update_end(&rp->tx_stats.syncp);
1905 		}
1906 		/* Free the original skb. */
1907 		if (rp->tx_skbuff_dma[entry]) {
1908 			dma_unmap_single(hwdev,
1909 					 rp->tx_skbuff_dma[entry],
1910 					 rp->tx_skbuff[entry]->len,
1911 					 DMA_TO_DEVICE);
1912 		}
1913 		dev_consume_skb_any(rp->tx_skbuff[entry]);
1914 		rp->tx_skbuff[entry] = NULL;
1915 		entry = (++rp->dirty_tx) % TX_RING_SIZE;
1916 	}
1917 	if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1918 		netif_wake_queue(dev);
1919 }
1920 
1921 /**
1922  * rhine_get_vlan_tci - extract TCI from Rx data buffer
1923  * @skb: pointer to sk_buff
1924  * @data_size: used data area of the buffer including CRC
1925  *
1926  * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1927  * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1928  * aligned following the CRC.
1929  */
1930 static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1931 {
1932 	u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1933 	return be16_to_cpup((__be16 *)trailer);
1934 }
1935 
1936 /* Process up to limit frames from receive ring */
1937 static int rhine_rx(struct net_device *dev, int limit)
1938 {
1939 	struct rhine_private *rp = netdev_priv(dev);
1940 	struct device *hwdev = dev->dev.parent;
1941 	int count;
1942 	int entry = rp->cur_rx % RX_RING_SIZE;
1943 
1944 	netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1945 		  entry, le32_to_cpu(rp->rx_head_desc->rx_status));
1946 
1947 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1948 	for (count = 0; count < limit; ++count) {
1949 		struct rx_desc *desc = rp->rx_head_desc;
1950 		u32 desc_status = le32_to_cpu(desc->rx_status);
1951 		u32 desc_length = le32_to_cpu(desc->desc_length);
1952 		int data_size = desc_status >> 16;
1953 
1954 		if (desc_status & DescOwn)
1955 			break;
1956 
1957 		netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1958 			  desc_status);
1959 
1960 		if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1961 			if ((desc_status & RxWholePkt) != RxWholePkt) {
1962 				netdev_warn(dev,
1963 	"Oversized Ethernet frame spanned multiple buffers, "
1964 	"entry %#x length %d status %08x!\n",
1965 					    entry, data_size,
1966 					    desc_status);
1967 				netdev_warn(dev,
1968 					    "Oversized Ethernet frame %p vs %p\n",
1969 					    rp->rx_head_desc,
1970 					    &rp->rx_ring[entry]);
1971 				dev->stats.rx_length_errors++;
1972 			} else if (desc_status & RxErr) {
1973 				/* There was a error. */
1974 				netif_dbg(rp, rx_err, dev,
1975 					  "%s() Rx error %08x\n", __func__,
1976 					  desc_status);
1977 				dev->stats.rx_errors++;
1978 				if (desc_status & 0x0030)
1979 					dev->stats.rx_length_errors++;
1980 				if (desc_status & 0x0048)
1981 					dev->stats.rx_fifo_errors++;
1982 				if (desc_status & 0x0004)
1983 					dev->stats.rx_frame_errors++;
1984 				if (desc_status & 0x0002) {
1985 					/* this can also be updated outside the interrupt handler */
1986 					spin_lock(&rp->lock);
1987 					dev->stats.rx_crc_errors++;
1988 					spin_unlock(&rp->lock);
1989 				}
1990 			}
1991 		} else {
1992 			struct sk_buff *skb = NULL;
1993 			/* Length should omit the CRC */
1994 			int pkt_len = data_size - 4;
1995 			u16 vlan_tci = 0;
1996 
1997 			/* Check if the packet is long enough to accept without
1998 			   copying to a minimally-sized skbuff. */
1999 			if (pkt_len < rx_copybreak)
2000 				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
2001 			if (skb) {
2002 				dma_sync_single_for_cpu(hwdev,
2003 							rp->rx_skbuff_dma[entry],
2004 							rp->rx_buf_sz,
2005 							DMA_FROM_DEVICE);
2006 
2007 				skb_copy_to_linear_data(skb,
2008 						 rp->rx_skbuff[entry]->data,
2009 						 pkt_len);
2010 				skb_put(skb, pkt_len);
2011 				dma_sync_single_for_device(hwdev,
2012 							   rp->rx_skbuff_dma[entry],
2013 							   rp->rx_buf_sz,
2014 							   DMA_FROM_DEVICE);
2015 			} else {
2016 				skb = rp->rx_skbuff[entry];
2017 				if (skb == NULL) {
2018 					netdev_err(dev, "Inconsistent Rx descriptor chain\n");
2019 					break;
2020 				}
2021 				rp->rx_skbuff[entry] = NULL;
2022 				skb_put(skb, pkt_len);
2023 				dma_unmap_single(hwdev,
2024 						 rp->rx_skbuff_dma[entry],
2025 						 rp->rx_buf_sz,
2026 						 DMA_FROM_DEVICE);
2027 			}
2028 
2029 			if (unlikely(desc_length & DescTag))
2030 				vlan_tci = rhine_get_vlan_tci(skb, data_size);
2031 
2032 			skb->protocol = eth_type_trans(skb, dev);
2033 
2034 			if (unlikely(desc_length & DescTag))
2035 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
2036 			netif_receive_skb(skb);
2037 
2038 			u64_stats_update_begin(&rp->rx_stats.syncp);
2039 			rp->rx_stats.bytes += pkt_len;
2040 			rp->rx_stats.packets++;
2041 			u64_stats_update_end(&rp->rx_stats.syncp);
2042 		}
2043 		entry = (++rp->cur_rx) % RX_RING_SIZE;
2044 		rp->rx_head_desc = &rp->rx_ring[entry];
2045 	}
2046 
2047 	/* Refill the Rx ring buffers. */
2048 	for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
2049 		struct sk_buff *skb;
2050 		entry = rp->dirty_rx % RX_RING_SIZE;
2051 		if (rp->rx_skbuff[entry] == NULL) {
2052 			skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
2053 			rp->rx_skbuff[entry] = skb;
2054 			if (skb == NULL)
2055 				break;	/* Better luck next round. */
2056 			rp->rx_skbuff_dma[entry] =
2057 				dma_map_single(hwdev, skb->data,
2058 					       rp->rx_buf_sz,
2059 					       DMA_FROM_DEVICE);
2060 			if (dma_mapping_error(hwdev,
2061 					      rp->rx_skbuff_dma[entry])) {
2062 				dev_kfree_skb(skb);
2063 				rp->rx_skbuff_dma[entry] = 0;
2064 				break;
2065 			}
2066 			rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
2067 		}
2068 		rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
2069 	}
2070 
2071 	return count;
2072 }
2073 
2074 static void rhine_restart_tx(struct net_device *dev) {
2075 	struct rhine_private *rp = netdev_priv(dev);
2076 	void __iomem *ioaddr = rp->base;
2077 	int entry = rp->dirty_tx % TX_RING_SIZE;
2078 	u32 intr_status;
2079 
2080 	/*
2081 	 * If new errors occurred, we need to sort them out before doing Tx.
2082 	 * In that case the ISR will be back here RSN anyway.
2083 	 */
2084 	intr_status = rhine_get_events(rp);
2085 
2086 	if ((intr_status & IntrTxErrSummary) == 0) {
2087 
2088 		/* We know better than the chip where it should continue. */
2089 		iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2090 		       ioaddr + TxRingPtr);
2091 
2092 		iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2093 		       ioaddr + ChipCmd);
2094 
2095 		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2096 			/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2097 			BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2098 
2099 		iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2100 		       ioaddr + ChipCmd1);
2101 		IOSYNC;
2102 	}
2103 	else {
2104 		/* This should never happen */
2105 		netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2106 			   intr_status);
2107 	}
2108 
2109 }
2110 
2111 static void rhine_slow_event_task(struct work_struct *work)
2112 {
2113 	struct rhine_private *rp =
2114 		container_of(work, struct rhine_private, slow_event_task);
2115 	struct net_device *dev = rp->dev;
2116 	u32 intr_status;
2117 
2118 	mutex_lock(&rp->task_lock);
2119 
2120 	if (!rp->task_enable)
2121 		goto out_unlock;
2122 
2123 	intr_status = rhine_get_events(rp);
2124 	rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2125 
2126 	if (intr_status & IntrLinkChange)
2127 		rhine_check_media(dev, 0);
2128 
2129 	if (intr_status & IntrPCIErr)
2130 		netif_warn(rp, hw, dev, "PCI error\n");
2131 
2132 	iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2133 
2134 out_unlock:
2135 	mutex_unlock(&rp->task_lock);
2136 }
2137 
2138 static struct rtnl_link_stats64 *
2139 rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2140 {
2141 	struct rhine_private *rp = netdev_priv(dev);
2142 	unsigned int start;
2143 
2144 	spin_lock_bh(&rp->lock);
2145 	rhine_update_rx_crc_and_missed_errord(rp);
2146 	spin_unlock_bh(&rp->lock);
2147 
2148 	netdev_stats_to_stats64(stats, &dev->stats);
2149 
2150 	do {
2151 		start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
2152 		stats->rx_packets = rp->rx_stats.packets;
2153 		stats->rx_bytes = rp->rx_stats.bytes;
2154 	} while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
2155 
2156 	do {
2157 		start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
2158 		stats->tx_packets = rp->tx_stats.packets;
2159 		stats->tx_bytes = rp->tx_stats.bytes;
2160 	} while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
2161 
2162 	return stats;
2163 }
2164 
2165 static void rhine_set_rx_mode(struct net_device *dev)
2166 {
2167 	struct rhine_private *rp = netdev_priv(dev);
2168 	void __iomem *ioaddr = rp->base;
2169 	u32 mc_filter[2];	/* Multicast hash filter */
2170 	u8 rx_mode = 0x0C;	/* Note: 0x02=accept runt, 0x01=accept errs */
2171 	struct netdev_hw_addr *ha;
2172 
2173 	if (dev->flags & IFF_PROMISC) {		/* Set promiscuous. */
2174 		rx_mode = 0x1C;
2175 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2176 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2177 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2178 		   (dev->flags & IFF_ALLMULTI)) {
2179 		/* Too many to match, or accept all multicasts. */
2180 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2181 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2182 	} else if (rp->quirks & rqMgmt) {
2183 		int i = 0;
2184 		u32 mCAMmask = 0;	/* 32 mCAMs (6105M and better) */
2185 		netdev_for_each_mc_addr(ha, dev) {
2186 			if (i == MCAM_SIZE)
2187 				break;
2188 			rhine_set_cam(ioaddr, i, ha->addr);
2189 			mCAMmask |= 1 << i;
2190 			i++;
2191 		}
2192 		rhine_set_cam_mask(ioaddr, mCAMmask);
2193 	} else {
2194 		memset(mc_filter, 0, sizeof(mc_filter));
2195 		netdev_for_each_mc_addr(ha, dev) {
2196 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2197 
2198 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2199 		}
2200 		iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2201 		iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2202 	}
2203 	/* enable/disable VLAN receive filtering */
2204 	if (rp->quirks & rqMgmt) {
2205 		if (dev->flags & IFF_PROMISC)
2206 			BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2207 		else
2208 			BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2209 	}
2210 	BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2211 }
2212 
2213 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2214 {
2215 	struct device *hwdev = dev->dev.parent;
2216 
2217 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2218 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2219 	strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
2220 }
2221 
2222 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2223 {
2224 	struct rhine_private *rp = netdev_priv(dev);
2225 	int rc;
2226 
2227 	mutex_lock(&rp->task_lock);
2228 	rc = mii_ethtool_gset(&rp->mii_if, cmd);
2229 	mutex_unlock(&rp->task_lock);
2230 
2231 	return rc;
2232 }
2233 
2234 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2235 {
2236 	struct rhine_private *rp = netdev_priv(dev);
2237 	int rc;
2238 
2239 	mutex_lock(&rp->task_lock);
2240 	rc = mii_ethtool_sset(&rp->mii_if, cmd);
2241 	rhine_set_carrier(&rp->mii_if);
2242 	mutex_unlock(&rp->task_lock);
2243 
2244 	return rc;
2245 }
2246 
2247 static int netdev_nway_reset(struct net_device *dev)
2248 {
2249 	struct rhine_private *rp = netdev_priv(dev);
2250 
2251 	return mii_nway_restart(&rp->mii_if);
2252 }
2253 
2254 static u32 netdev_get_link(struct net_device *dev)
2255 {
2256 	struct rhine_private *rp = netdev_priv(dev);
2257 
2258 	return mii_link_ok(&rp->mii_if);
2259 }
2260 
2261 static u32 netdev_get_msglevel(struct net_device *dev)
2262 {
2263 	struct rhine_private *rp = netdev_priv(dev);
2264 
2265 	return rp->msg_enable;
2266 }
2267 
2268 static void netdev_set_msglevel(struct net_device *dev, u32 value)
2269 {
2270 	struct rhine_private *rp = netdev_priv(dev);
2271 
2272 	rp->msg_enable = value;
2273 }
2274 
2275 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2276 {
2277 	struct rhine_private *rp = netdev_priv(dev);
2278 
2279 	if (!(rp->quirks & rqWOL))
2280 		return;
2281 
2282 	spin_lock_irq(&rp->lock);
2283 	wol->supported = WAKE_PHY | WAKE_MAGIC |
2284 			 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2285 	wol->wolopts = rp->wolopts;
2286 	spin_unlock_irq(&rp->lock);
2287 }
2288 
2289 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2290 {
2291 	struct rhine_private *rp = netdev_priv(dev);
2292 	u32 support = WAKE_PHY | WAKE_MAGIC |
2293 		      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2294 
2295 	if (!(rp->quirks & rqWOL))
2296 		return -EINVAL;
2297 
2298 	if (wol->wolopts & ~support)
2299 		return -EINVAL;
2300 
2301 	spin_lock_irq(&rp->lock);
2302 	rp->wolopts = wol->wolopts;
2303 	spin_unlock_irq(&rp->lock);
2304 
2305 	return 0;
2306 }
2307 
2308 static const struct ethtool_ops netdev_ethtool_ops = {
2309 	.get_drvinfo		= netdev_get_drvinfo,
2310 	.get_settings		= netdev_get_settings,
2311 	.set_settings		= netdev_set_settings,
2312 	.nway_reset		= netdev_nway_reset,
2313 	.get_link		= netdev_get_link,
2314 	.get_msglevel		= netdev_get_msglevel,
2315 	.set_msglevel		= netdev_set_msglevel,
2316 	.get_wol		= rhine_get_wol,
2317 	.set_wol		= rhine_set_wol,
2318 };
2319 
2320 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2321 {
2322 	struct rhine_private *rp = netdev_priv(dev);
2323 	int rc;
2324 
2325 	if (!netif_running(dev))
2326 		return -EINVAL;
2327 
2328 	mutex_lock(&rp->task_lock);
2329 	rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2330 	rhine_set_carrier(&rp->mii_if);
2331 	mutex_unlock(&rp->task_lock);
2332 
2333 	return rc;
2334 }
2335 
2336 static int rhine_close(struct net_device *dev)
2337 {
2338 	struct rhine_private *rp = netdev_priv(dev);
2339 	void __iomem *ioaddr = rp->base;
2340 
2341 	rhine_task_disable(rp);
2342 	napi_disable(&rp->napi);
2343 	netif_stop_queue(dev);
2344 
2345 	netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2346 		  ioread16(ioaddr + ChipCmd));
2347 
2348 	/* Switch to loopback mode to avoid hardware races. */
2349 	iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2350 
2351 	rhine_irq_disable(rp);
2352 
2353 	/* Stop the chip's Tx and Rx processes. */
2354 	iowrite16(CmdStop, ioaddr + ChipCmd);
2355 
2356 	free_irq(rp->irq, dev);
2357 	free_rbufs(dev);
2358 	free_tbufs(dev);
2359 	free_ring(dev);
2360 
2361 	return 0;
2362 }
2363 
2364 
2365 static void rhine_remove_one_pci(struct pci_dev *pdev)
2366 {
2367 	struct net_device *dev = pci_get_drvdata(pdev);
2368 	struct rhine_private *rp = netdev_priv(dev);
2369 
2370 	unregister_netdev(dev);
2371 
2372 	pci_iounmap(pdev, rp->base);
2373 	pci_release_regions(pdev);
2374 
2375 	free_netdev(dev);
2376 	pci_disable_device(pdev);
2377 }
2378 
2379 static int rhine_remove_one_platform(struct platform_device *pdev)
2380 {
2381 	struct net_device *dev = platform_get_drvdata(pdev);
2382 	struct rhine_private *rp = netdev_priv(dev);
2383 
2384 	unregister_netdev(dev);
2385 
2386 	iounmap(rp->base);
2387 
2388 	free_netdev(dev);
2389 
2390 	return 0;
2391 }
2392 
2393 static void rhine_shutdown_pci(struct pci_dev *pdev)
2394 {
2395 	struct net_device *dev = pci_get_drvdata(pdev);
2396 	struct rhine_private *rp = netdev_priv(dev);
2397 	void __iomem *ioaddr = rp->base;
2398 
2399 	if (!(rp->quirks & rqWOL))
2400 		return; /* Nothing to do for non-WOL adapters */
2401 
2402 	rhine_power_init(dev);
2403 
2404 	/* Make sure we use pattern 0, 1 and not 4, 5 */
2405 	if (rp->quirks & rq6patterns)
2406 		iowrite8(0x04, ioaddr + WOLcgClr);
2407 
2408 	spin_lock(&rp->lock);
2409 
2410 	if (rp->wolopts & WAKE_MAGIC) {
2411 		iowrite8(WOLmagic, ioaddr + WOLcrSet);
2412 		/*
2413 		 * Turn EEPROM-controlled wake-up back on -- some hardware may
2414 		 * not cooperate otherwise.
2415 		 */
2416 		iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2417 	}
2418 
2419 	if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2420 		iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2421 
2422 	if (rp->wolopts & WAKE_PHY)
2423 		iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2424 
2425 	if (rp->wolopts & WAKE_UCAST)
2426 		iowrite8(WOLucast, ioaddr + WOLcrSet);
2427 
2428 	if (rp->wolopts) {
2429 		/* Enable legacy WOL (for old motherboards) */
2430 		iowrite8(0x01, ioaddr + PwcfgSet);
2431 		iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2432 	}
2433 
2434 	spin_unlock(&rp->lock);
2435 
2436 	if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2437 		iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2438 
2439 		pci_wake_from_d3(pdev, true);
2440 		pci_set_power_state(pdev, PCI_D3hot);
2441 	}
2442 }
2443 
2444 #ifdef CONFIG_PM_SLEEP
2445 static int rhine_suspend(struct device *device)
2446 {
2447 	struct net_device *dev = dev_get_drvdata(device);
2448 	struct rhine_private *rp = netdev_priv(dev);
2449 
2450 	if (!netif_running(dev))
2451 		return 0;
2452 
2453 	rhine_task_disable(rp);
2454 	rhine_irq_disable(rp);
2455 	napi_disable(&rp->napi);
2456 
2457 	netif_device_detach(dev);
2458 
2459 	if (dev_is_pci(device))
2460 		rhine_shutdown_pci(to_pci_dev(device));
2461 
2462 	return 0;
2463 }
2464 
2465 static int rhine_resume(struct device *device)
2466 {
2467 	struct net_device *dev = dev_get_drvdata(device);
2468 	struct rhine_private *rp = netdev_priv(dev);
2469 
2470 	if (!netif_running(dev))
2471 		return 0;
2472 
2473 	enable_mmio(rp->pioaddr, rp->quirks);
2474 	rhine_power_init(dev);
2475 	free_tbufs(dev);
2476 	free_rbufs(dev);
2477 	alloc_tbufs(dev);
2478 	alloc_rbufs(dev);
2479 	rhine_task_enable(rp);
2480 	spin_lock_bh(&rp->lock);
2481 	init_registers(dev);
2482 	spin_unlock_bh(&rp->lock);
2483 
2484 	netif_device_attach(dev);
2485 
2486 	return 0;
2487 }
2488 
2489 static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2490 #define RHINE_PM_OPS	(&rhine_pm_ops)
2491 
2492 #else
2493 
2494 #define RHINE_PM_OPS	NULL
2495 
2496 #endif /* !CONFIG_PM_SLEEP */
2497 
2498 static struct pci_driver rhine_driver_pci = {
2499 	.name		= DRV_NAME,
2500 	.id_table	= rhine_pci_tbl,
2501 	.probe		= rhine_init_one_pci,
2502 	.remove		= rhine_remove_one_pci,
2503 	.shutdown	= rhine_shutdown_pci,
2504 	.driver.pm	= RHINE_PM_OPS,
2505 };
2506 
2507 static struct platform_driver rhine_driver_platform = {
2508 	.probe		= rhine_init_one_platform,
2509 	.remove		= rhine_remove_one_platform,
2510 	.driver = {
2511 		.name	= DRV_NAME,
2512 		.of_match_table	= rhine_of_tbl,
2513 		.pm		= RHINE_PM_OPS,
2514 	}
2515 };
2516 
2517 static struct dmi_system_id rhine_dmi_table[] __initdata = {
2518 	{
2519 		.ident = "EPIA-M",
2520 		.matches = {
2521 			DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2522 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2523 		},
2524 	},
2525 	{
2526 		.ident = "KV7",
2527 		.matches = {
2528 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2529 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2530 		},
2531 	},
2532 	{ NULL }
2533 };
2534 
2535 static int __init rhine_init(void)
2536 {
2537 	int ret_pci, ret_platform;
2538 
2539 /* when a module, this is printed whether or not devices are found in probe */
2540 #ifdef MODULE
2541 	pr_info("%s\n", version);
2542 #endif
2543 	if (dmi_check_system(rhine_dmi_table)) {
2544 		/* these BIOSes fail at PXE boot if chip is in D3 */
2545 		avoid_D3 = true;
2546 		pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2547 	}
2548 	else if (avoid_D3)
2549 		pr_info("avoid_D3 set\n");
2550 
2551 	ret_pci = pci_register_driver(&rhine_driver_pci);
2552 	ret_platform = platform_driver_register(&rhine_driver_platform);
2553 	if ((ret_pci < 0) && (ret_platform < 0))
2554 		return ret_pci;
2555 
2556 	return 0;
2557 }
2558 
2559 
2560 static void __exit rhine_cleanup(void)
2561 {
2562 	platform_driver_unregister(&rhine_driver_platform);
2563 	pci_unregister_driver(&rhine_driver_pci);
2564 }
2565 
2566 
2567 module_init(rhine_init);
2568 module_exit(rhine_cleanup);
2569