xref: /linux/drivers/net/ethernet/smsc/epic100.c (revision 14e6cf8474e15f5599de2be7b66226138c0a9fc2)
1 /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
2 /*
3 	Written/copyright 1997-2001 by Donald Becker.
4 
5 	This software may be used and distributed according to the terms of
6 	the GNU General Public License (GPL), incorporated herein by reference.
7 	Drivers based on or derived from this code fall under the GPL and must
8 	retain the authorship, copyright and license notice.  This file is not
9 	a complete program and may only be used when the entire operating
10 	system is licensed under the GPL.
11 
12 	This driver is for the SMC83c170/175 "EPIC" series, as used on the
13 	SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
14 
15 	The author may be reached as becker@scyld.com, or C/O
16 	Scyld Computing Corporation
17 	410 Severn Ave., Suite 210
18 	Annapolis MD 21403
19 
20 	Information and updates available at
21 	http://www.scyld.com/network/epic100.html
22 	[this link no longer provides anything useful -jgarzik]
23 
24 	---------------------------------------------------------------------
25 
26 */
27 
28 #define DRV_NAME        "epic100"
29 
30 /* The user-configurable values.
31    These may be modified when a driver module is loaded.*/
32 
33 static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
34 
35 /* Used to pass the full-duplex flag, etc. */
36 #define MAX_UNITS 8		/* More are supported, limit only on options */
37 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
38 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
39 
40 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
41    Setting to > 1518 effectively disables this feature. */
42 static int rx_copybreak;
43 
44 /* Operational parameters that are set at compile time. */
45 
46 /* Keep the ring sizes a power of two for operational efficiency.
47    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
48    Making the Tx ring too large decreases the effectiveness of channel
49    bonding and packet priority.
50    There are no ill effects from too-large receive rings. */
51 #define TX_RING_SIZE	256
52 #define TX_QUEUE_LEN	240		/* Limit ring entries actually used.  */
53 #define RX_RING_SIZE	256
54 #define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct epic_tx_desc)
55 #define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct epic_rx_desc)
56 
57 /* Operational parameters that usually are not changed. */
58 /* Time in jiffies before concluding the transmitter is hung. */
59 #define TX_TIMEOUT  (2*HZ)
60 
61 #define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
62 
63 /* Bytes transferred to chip before transmission starts. */
64 /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
65 #define TX_FIFO_THRESH 256
66 #define RX_FIFO_THRESH 1		/* 0-3, 0==32, 64,96, or 3==128 bytes  */
67 
68 #include <linux/module.h>
69 #include <linux/kernel.h>
70 #include <linux/string.h>
71 #include <linux/timer.h>
72 #include <linux/errno.h>
73 #include <linux/ioport.h>
74 #include <linux/interrupt.h>
75 #include <linux/pci.h>
76 #include <linux/delay.h>
77 #include <linux/netdevice.h>
78 #include <linux/etherdevice.h>
79 #include <linux/skbuff.h>
80 #include <linux/init.h>
81 #include <linux/spinlock.h>
82 #include <linux/ethtool.h>
83 #include <linux/mii.h>
84 #include <linux/crc32.h>
85 #include <linux/bitops.h>
86 #include <asm/io.h>
87 #include <linux/uaccess.h>
88 #include <asm/byteorder.h>
89 
90 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
91 MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
92 MODULE_LICENSE("GPL");
93 
94 module_param(debug, int, 0);
95 module_param(rx_copybreak, int, 0);
96 module_param_array(options, int, NULL, 0);
97 module_param_array(full_duplex, int, NULL, 0);
98 MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
99 MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
100 MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
101 MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
102 
103 /*
104 				Theory of Operation
105 
106 I. Board Compatibility
107 
108 This device driver is designed for the SMC "EPIC/100", the SMC
109 single-chip Ethernet controllers for PCI.  This chip is used on
110 the SMC EtherPower II boards.
111 
112 II. Board-specific settings
113 
114 PCI bus devices are configured by the system at boot time, so no jumpers
115 need to be set on the board.  The system BIOS will assign the
116 PCI INTA signal to a (preferably otherwise unused) system IRQ line.
117 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
118 interrupt lines.
119 
120 III. Driver operation
121 
122 IIIa. Ring buffers
123 
124 IVb. References
125 
126 http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf
127 http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf
128 http://scyld.com/expert/NWay.html
129 http://www.national.com/pf/DP/DP83840A.html
130 
131 IVc. Errata
132 
133 */
134 
135 
136 enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
137 
138 #define EPIC_TOTAL_SIZE 0x100
139 #define USE_IO_OPS 1
140 
141 #ifdef USE_IO_OPS
142 #define EPIC_BAR	0
143 #else
144 #define EPIC_BAR	1
145 #endif
146 
147 typedef enum {
148 	SMSC_83C170_0,
149 	SMSC_83C170,
150 	SMSC_83C175,
151 } chip_t;
152 
153 
154 struct epic_chip_info {
155 	const char *name;
156         int drv_flags;                          /* Driver use, intended as capability flags. */
157 };
158 
159 
160 /* indexed by chip_t */
161 static const struct epic_chip_info pci_id_tbl[] = {
162 	{ "SMSC EPIC/100 83c170",	TYPE2_INTR | NO_MII | MII_PWRDWN },
163 	{ "SMSC EPIC/100 83c170",	TYPE2_INTR },
164 	{ "SMSC EPIC/C 83c175",		TYPE2_INTR | MII_PWRDWN },
165 };
166 
167 
168 static const struct pci_device_id epic_pci_tbl[] = {
169 	{ 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
170 	{ 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
171 	{ 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
172 	  PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
173 	{ 0,}
174 };
175 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
176 
177 #define ew16(reg, val)	iowrite16(val, ioaddr + (reg))
178 #define ew32(reg, val)	iowrite32(val, ioaddr + (reg))
179 #define er8(reg)	ioread8(ioaddr + (reg))
180 #define er16(reg)	ioread16(ioaddr + (reg))
181 #define er32(reg)	ioread32(ioaddr + (reg))
182 
183 /* Offsets to registers, using the (ugh) SMC names. */
184 enum epic_registers {
185   COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
186   PCIBurstCnt=0x18,
187   TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28,	/* Rx error counters. */
188   MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
189   LAN0=64,						/* MAC address. */
190   MC0=80,						/* Multicast filter table. */
191   RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
192   PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
193 };
194 
195 /* Interrupt register bits, using my own meaningful names. */
196 enum IntrStatus {
197 	TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
198 	PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
199 	RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
200 	TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
201 	RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
202 };
203 enum CommandBits {
204 	StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
205 	StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
206 };
207 
208 #define EpicRemoved	0xffffffff	/* Chip failed or removed (CardBus) */
209 
210 #define EpicNapiEvent	(TxEmpty | TxDone | \
211 			 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
212 #define EpicNormalEvent	(0x0000ffff & ~EpicNapiEvent)
213 
214 static const u16 media2miictl[16] = {
215 	0, 0x0C00, 0x0C00, 0x2000,  0x0100, 0x2100, 0, 0,
216 	0, 0, 0, 0,  0, 0, 0, 0 };
217 
218 /*
219  * The EPIC100 Rx and Tx buffer descriptors.  Note that these
220  * really ARE host-endian; it's not a misannotation.  We tell
221  * the card to byteswap them internally on big-endian hosts -
222  * look for #ifdef __BIG_ENDIAN in epic_open().
223  */
224 
225 struct epic_tx_desc {
226 	u32 txstatus;
227 	u32 bufaddr;
228 	u32 buflength;
229 	u32 next;
230 };
231 
232 struct epic_rx_desc {
233 	u32 rxstatus;
234 	u32 bufaddr;
235 	u32 buflength;
236 	u32 next;
237 };
238 
239 enum desc_status_bits {
240 	DescOwn=0x8000,
241 };
242 
243 #define PRIV_ALIGN	15 	/* Required alignment mask */
244 struct epic_private {
245 	struct epic_rx_desc *rx_ring;
246 	struct epic_tx_desc *tx_ring;
247 	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
248 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
249 	/* The addresses of receive-in-place skbuffs. */
250 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
251 
252 	dma_addr_t tx_ring_dma;
253 	dma_addr_t rx_ring_dma;
254 
255 	/* Ring pointers. */
256 	spinlock_t lock;				/* Group with Tx control cache line. */
257 	spinlock_t napi_lock;
258 	struct napi_struct napi;
259 	unsigned int cur_tx, dirty_tx;
260 
261 	unsigned int cur_rx, dirty_rx;
262 	u32 irq_mask;
263 	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
264 
265 	void __iomem *ioaddr;
266 	struct pci_dev *pci_dev;			/* PCI bus location. */
267 	int chip_id, chip_flags;
268 
269 	struct timer_list timer;			/* Media selection timer. */
270 	int tx_threshold;
271 	unsigned char mc_filter[8];
272 	signed char phys[4];				/* MII device addresses. */
273 	u16 advertising;					/* NWay media advertisement */
274 	int mii_phy_cnt;
275 	u32 ethtool_ops_nesting;
276 	struct mii_if_info mii;
277 	unsigned int tx_full:1;				/* The Tx queue is full. */
278 	unsigned int default_port:4;		/* Last dev->if_port value. */
279 };
280 
281 static int epic_open(struct net_device *dev);
282 static int read_eeprom(struct epic_private *, int);
283 static int mdio_read(struct net_device *dev, int phy_id, int location);
284 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
285 static void epic_restart(struct net_device *dev);
286 static void epic_timer(struct timer_list *t);
287 static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue);
288 static void epic_init_ring(struct net_device *dev);
289 static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
290 				   struct net_device *dev);
291 static int epic_rx(struct net_device *dev, int budget);
292 static int epic_poll(struct napi_struct *napi, int budget);
293 static irqreturn_t epic_interrupt(int irq, void *dev_instance);
294 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
295 static const struct ethtool_ops netdev_ethtool_ops;
296 static int epic_close(struct net_device *dev);
297 static struct net_device_stats *epic_get_stats(struct net_device *dev);
298 static void set_rx_mode(struct net_device *dev);
299 
300 static const struct net_device_ops epic_netdev_ops = {
301 	.ndo_open		= epic_open,
302 	.ndo_stop		= epic_close,
303 	.ndo_start_xmit		= epic_start_xmit,
304 	.ndo_tx_timeout 	= epic_tx_timeout,
305 	.ndo_get_stats		= epic_get_stats,
306 	.ndo_set_rx_mode	= set_rx_mode,
307 	.ndo_eth_ioctl		= netdev_ioctl,
308 	.ndo_set_mac_address 	= eth_mac_addr,
309 	.ndo_validate_addr	= eth_validate_addr,
310 };
311 
312 static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
313 {
314 	static int card_idx = -1;
315 	void __iomem *ioaddr;
316 	int chip_idx = (int) ent->driver_data;
317 	struct net_device *dev;
318 	struct epic_private *ep;
319 	int i, ret, option = 0, duplex = 0;
320 	__le16 addr[ETH_ALEN / 2];
321 	void *ring_space;
322 	dma_addr_t ring_dma;
323 
324 	card_idx++;
325 
326 	ret = pci_enable_device(pdev);
327 	if (ret)
328 		goto out;
329 
330 	if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
331 		dev_err(&pdev->dev, "no PCI region space\n");
332 		ret = -ENODEV;
333 		goto err_out_disable;
334 	}
335 
336 	pci_set_master(pdev);
337 
338 	ret = pci_request_regions(pdev, DRV_NAME);
339 	if (ret < 0)
340 		goto err_out_disable;
341 
342 	ret = -ENOMEM;
343 
344 	dev = alloc_etherdev(sizeof (*ep));
345 	if (!dev)
346 		goto err_out_free_res;
347 
348 	SET_NETDEV_DEV(dev, &pdev->dev);
349 
350 	ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
351 	if (!ioaddr) {
352 		dev_err(&pdev->dev, "ioremap failed\n");
353 		goto err_out_free_netdev;
354 	}
355 
356 	pci_set_drvdata(pdev, dev);
357 	ep = netdev_priv(dev);
358 	ep->ioaddr = ioaddr;
359 	ep->mii.dev = dev;
360 	ep->mii.mdio_read = mdio_read;
361 	ep->mii.mdio_write = mdio_write;
362 	ep->mii.phy_id_mask = 0x1f;
363 	ep->mii.reg_num_mask = 0x1f;
364 
365 	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
366 					GFP_KERNEL);
367 	if (!ring_space)
368 		goto err_out_iounmap;
369 	ep->tx_ring = ring_space;
370 	ep->tx_ring_dma = ring_dma;
371 
372 	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
373 					GFP_KERNEL);
374 	if (!ring_space)
375 		goto err_out_unmap_tx;
376 	ep->rx_ring = ring_space;
377 	ep->rx_ring_dma = ring_dma;
378 
379 	if (dev->mem_start) {
380 		option = dev->mem_start;
381 		duplex = (dev->mem_start & 16) ? 1 : 0;
382 	} else if (card_idx >= 0  &&  card_idx < MAX_UNITS) {
383 		if (options[card_idx] >= 0)
384 			option = options[card_idx];
385 		if (full_duplex[card_idx] >= 0)
386 			duplex = full_duplex[card_idx];
387 	}
388 
389 	spin_lock_init(&ep->lock);
390 	spin_lock_init(&ep->napi_lock);
391 
392 	/* Bring the chip out of low-power mode. */
393 	ew32(GENCTL, 0x4200);
394 	/* Magic?!  If we don't set this bit the MII interface won't work. */
395 	/* This magic is documented in SMSC app note 7.15 */
396 	for (i = 16; i > 0; i--)
397 		ew32(TEST1, 0x0008);
398 
399 	/* Turn on the MII transceiver. */
400 	ew32(MIICfg, 0x12);
401 	if (chip_idx == 1)
402 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
403 	ew32(GENCTL, 0x0200);
404 
405 	/* Note: the '175 does not have a serial EEPROM. */
406 	for (i = 0; i < 3; i++)
407 		addr[i] = cpu_to_le16(er16(LAN0 + i*4));
408 	eth_hw_addr_set(dev, (u8 *)addr);
409 
410 	if (debug > 2) {
411 		dev_dbg(&pdev->dev, "EEPROM contents:\n");
412 		for (i = 0; i < 64; i++)
413 			pr_cont(" %4.4x%s", read_eeprom(ep, i),
414 				   i % 16 == 15 ? "\n" : "");
415 	}
416 
417 	ep->pci_dev = pdev;
418 	ep->chip_id = chip_idx;
419 	ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
420 	ep->irq_mask =
421 		(ep->chip_flags & TYPE2_INTR ?  PCIBusErr175 : PCIBusErr170)
422 		 | CntFull | TxUnderrun | EpicNapiEvent;
423 
424 	/* Find the connected MII xcvrs.
425 	   Doing this in open() would allow detecting external xcvrs later, but
426 	   takes much time and no cards have external MII. */
427 	{
428 		int phy, phy_idx = 0;
429 		for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
430 			int mii_status = mdio_read(dev, phy, MII_BMSR);
431 			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
432 				ep->phys[phy_idx++] = phy;
433 				dev_info(&pdev->dev,
434 					"MII transceiver #%d control "
435 					"%4.4x status %4.4x.\n",
436 					phy, mdio_read(dev, phy, 0), mii_status);
437 			}
438 		}
439 		ep->mii_phy_cnt = phy_idx;
440 		if (phy_idx != 0) {
441 			phy = ep->phys[0];
442 			ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
443 			dev_info(&pdev->dev,
444 				"Autonegotiation advertising %4.4x link "
445 				   "partner %4.4x.\n",
446 				   ep->mii.advertising, mdio_read(dev, phy, 5));
447 		} else if ( ! (ep->chip_flags & NO_MII)) {
448 			dev_warn(&pdev->dev,
449 				"***WARNING***: No MII transceiver found!\n");
450 			/* Use the known PHY address of the EPII. */
451 			ep->phys[0] = 3;
452 		}
453 		ep->mii.phy_id = ep->phys[0];
454 	}
455 
456 	/* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
457 	if (ep->chip_flags & MII_PWRDWN)
458 		ew32(NVCTL, er32(NVCTL) & ~0x483c);
459 	ew32(GENCTL, 0x0008);
460 
461 	/* The lower four bits are the media type. */
462 	if (duplex) {
463 		ep->mii.force_media = ep->mii.full_duplex = 1;
464 		dev_info(&pdev->dev, "Forced full duplex requested.\n");
465 	}
466 	dev->if_port = ep->default_port = option;
467 
468 	/* The Epic-specific entries in the device structure. */
469 	dev->netdev_ops = &epic_netdev_ops;
470 	dev->ethtool_ops = &netdev_ethtool_ops;
471 	dev->watchdog_timeo = TX_TIMEOUT;
472 	netif_napi_add(dev, &ep->napi, epic_poll);
473 
474 	ret = register_netdev(dev);
475 	if (ret < 0)
476 		goto err_out_unmap_rx;
477 
478 	netdev_info(dev, "%s at %lx, IRQ %d, %pM\n",
479 		    pci_id_tbl[chip_idx].name,
480 		    (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
481 		    dev->dev_addr);
482 
483 out:
484 	return ret;
485 
486 err_out_unmap_rx:
487 	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
488 			  ep->rx_ring_dma);
489 err_out_unmap_tx:
490 	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
491 			  ep->tx_ring_dma);
492 err_out_iounmap:
493 	pci_iounmap(pdev, ioaddr);
494 err_out_free_netdev:
495 	free_netdev(dev);
496 err_out_free_res:
497 	pci_release_regions(pdev);
498 err_out_disable:
499 	pci_disable_device(pdev);
500 	goto out;
501 }
502 
503 /* Serial EEPROM section. */
504 
505 /*  EEPROM_Ctrl bits. */
506 #define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
507 #define EE_CS			0x02	/* EEPROM chip select. */
508 #define EE_DATA_WRITE	0x08	/* EEPROM chip data in. */
509 #define EE_WRITE_0		0x01
510 #define EE_WRITE_1		0x09
511 #define EE_DATA_READ	0x10	/* EEPROM chip data out. */
512 #define EE_ENB			(0x0001 | EE_CS)
513 
514 /* Delay between EEPROM clock transitions.
515    This serves to flush the operation to the PCI bus.
516  */
517 
518 #define eeprom_delay()	er32(EECTL)
519 
520 /* The EEPROM commands include the alway-set leading bit. */
521 #define EE_WRITE_CMD	(5 << 6)
522 #define EE_READ64_CMD	(6 << 6)
523 #define EE_READ256_CMD	(6 << 8)
524 #define EE_ERASE_CMD	(7 << 6)
525 
526 static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
527 {
528 	void __iomem *ioaddr = ep->ioaddr;
529 
530 	ew32(INTMASK, 0x00000000);
531 }
532 
533 static inline void __epic_pci_commit(void __iomem *ioaddr)
534 {
535 #ifndef USE_IO_OPS
536 	er32(INTMASK);
537 #endif
538 }
539 
540 static inline void epic_napi_irq_off(struct net_device *dev,
541 				     struct epic_private *ep)
542 {
543 	void __iomem *ioaddr = ep->ioaddr;
544 
545 	ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
546 	__epic_pci_commit(ioaddr);
547 }
548 
549 static inline void epic_napi_irq_on(struct net_device *dev,
550 				    struct epic_private *ep)
551 {
552 	void __iomem *ioaddr = ep->ioaddr;
553 
554 	/* No need to commit possible posted write */
555 	ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
556 }
557 
558 static int read_eeprom(struct epic_private *ep, int location)
559 {
560 	void __iomem *ioaddr = ep->ioaddr;
561 	int i;
562 	int retval = 0;
563 	int read_cmd = location |
564 		(er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
565 
566 	ew32(EECTL, EE_ENB & ~EE_CS);
567 	ew32(EECTL, EE_ENB);
568 
569 	/* Shift the read command bits out. */
570 	for (i = 12; i >= 0; i--) {
571 		short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
572 		ew32(EECTL, EE_ENB | dataval);
573 		eeprom_delay();
574 		ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
575 		eeprom_delay();
576 	}
577 	ew32(EECTL, EE_ENB);
578 
579 	for (i = 16; i > 0; i--) {
580 		ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
581 		eeprom_delay();
582 		retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
583 		ew32(EECTL, EE_ENB);
584 		eeprom_delay();
585 	}
586 
587 	/* Terminate the EEPROM access. */
588 	ew32(EECTL, EE_ENB & ~EE_CS);
589 	return retval;
590 }
591 
592 #define MII_READOP		1
593 #define MII_WRITEOP		2
594 static int mdio_read(struct net_device *dev, int phy_id, int location)
595 {
596 	struct epic_private *ep = netdev_priv(dev);
597 	void __iomem *ioaddr = ep->ioaddr;
598 	int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
599 	int i;
600 
601 	ew32(MIICtrl, read_cmd);
602 	/* Typical operation takes 25 loops. */
603 	for (i = 400; i > 0; i--) {
604 		barrier();
605 		if ((er32(MIICtrl) & MII_READOP) == 0) {
606 			/* Work around read failure bug. */
607 			if (phy_id == 1 && location < 6 &&
608 			    er16(MIIData) == 0xffff) {
609 				ew32(MIICtrl, read_cmd);
610 				continue;
611 			}
612 			return er16(MIIData);
613 		}
614 	}
615 	return 0xffff;
616 }
617 
618 static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
619 {
620 	struct epic_private *ep = netdev_priv(dev);
621 	void __iomem *ioaddr = ep->ioaddr;
622 	int i;
623 
624 	ew16(MIIData, value);
625 	ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
626 	for (i = 10000; i > 0; i--) {
627 		barrier();
628 		if ((er32(MIICtrl) & MII_WRITEOP) == 0)
629 			break;
630 	}
631 }
632 
633 
634 static int epic_open(struct net_device *dev)
635 {
636 	struct epic_private *ep = netdev_priv(dev);
637 	void __iomem *ioaddr = ep->ioaddr;
638 	const int irq = ep->pci_dev->irq;
639 	int rc, i;
640 
641 	/* Soft reset the chip. */
642 	ew32(GENCTL, 0x4001);
643 
644 	napi_enable(&ep->napi);
645 	rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
646 	if (rc) {
647 		napi_disable(&ep->napi);
648 		return rc;
649 	}
650 
651 	epic_init_ring(dev);
652 
653 	ew32(GENCTL, 0x4000);
654 	/* This magic is documented in SMSC app note 7.15 */
655 	for (i = 16; i > 0; i--)
656 		ew32(TEST1, 0x0008);
657 
658 	/* Pull the chip out of low-power mode, enable interrupts, and set for
659 	   PCI read multiple.  The MIIcfg setting and strange write order are
660 	   required by the details of which bits are reset and the transceiver
661 	   wiring on the Ositech CardBus card.
662 	*/
663 #if 0
664 	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
665 #endif
666 	if (ep->chip_flags & MII_PWRDWN)
667 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
668 
669 	/* Tell the chip to byteswap descriptors on big-endian hosts */
670 #ifdef __BIG_ENDIAN
671 	ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
672 	er32(GENCTL);
673 	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
674 #else
675 	ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
676 	er32(GENCTL);
677 	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
678 #endif
679 
680 	udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
681 
682 	for (i = 0; i < 3; i++)
683 		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
684 
685 	ep->tx_threshold = TX_FIFO_THRESH;
686 	ew32(TxThresh, ep->tx_threshold);
687 
688 	if (media2miictl[dev->if_port & 15]) {
689 		if (ep->mii_phy_cnt)
690 			mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
691 		if (dev->if_port == 1) {
692 			if (debug > 1)
693 				netdev_info(dev, "Using the 10base2 transceiver, MII status %4.4x.\n",
694 					    mdio_read(dev, ep->phys[0], MII_BMSR));
695 		}
696 	} else {
697 		int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
698 		if (mii_lpa != 0xffff) {
699 			if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
700 				ep->mii.full_duplex = 1;
701 			else if (! (mii_lpa & LPA_LPACK))
702 				mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
703 			if (debug > 1)
704 				netdev_info(dev, "Setting %s-duplex based on MII xcvr %d register read of %4.4x.\n",
705 					    ep->mii.full_duplex ? "full"
706 								: "half",
707 					    ep->phys[0], mii_lpa);
708 		}
709 	}
710 
711 	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
712 	ew32(PRxCDAR, ep->rx_ring_dma);
713 	ew32(PTxCDAR, ep->tx_ring_dma);
714 
715 	/* Start the chip's Rx process. */
716 	set_rx_mode(dev);
717 	ew32(COMMAND, StartRx | RxQueued);
718 
719 	netif_start_queue(dev);
720 
721 	/* Enable interrupts by setting the interrupt mask. */
722 	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
723 	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
724 	     TxUnderrun);
725 
726 	if (debug > 1) {
727 		netdev_dbg(dev, "epic_open() ioaddr %p IRQ %d status %4.4x %s-duplex.\n",
728 			   ioaddr, irq, er32(GENCTL),
729 			   ep->mii.full_duplex ? "full" : "half");
730 	}
731 
732 	/* Set the timer to switch to check for link beat and perhaps switch
733 	   to an alternate media type. */
734 	timer_setup(&ep->timer, epic_timer, 0);
735 	ep->timer.expires = jiffies + 3*HZ;
736 	add_timer(&ep->timer);
737 
738 	return rc;
739 }
740 
741 /* Reset the chip to recover from a PCI transaction error.
742    This may occur at interrupt time. */
743 static void epic_pause(struct net_device *dev)
744 {
745 	struct net_device_stats *stats = &dev->stats;
746 	struct epic_private *ep = netdev_priv(dev);
747 	void __iomem *ioaddr = ep->ioaddr;
748 
749 	netif_stop_queue (dev);
750 
751 	/* Disable interrupts by clearing the interrupt mask. */
752 	ew32(INTMASK, 0x00000000);
753 	/* Stop the chip's Tx and Rx DMA processes. */
754 	ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
755 
756 	/* Update the error counts. */
757 	if (er16(COMMAND) != 0xffff) {
758 		stats->rx_missed_errors	+= er8(MPCNT);
759 		stats->rx_frame_errors	+= er8(ALICNT);
760 		stats->rx_crc_errors	+= er8(CRCCNT);
761 	}
762 
763 	/* Remove the packets on the Rx queue. */
764 	epic_rx(dev, RX_RING_SIZE);
765 }
766 
767 static void epic_restart(struct net_device *dev)
768 {
769 	struct epic_private *ep = netdev_priv(dev);
770 	void __iomem *ioaddr = ep->ioaddr;
771 	int i;
772 
773 	/* Soft reset the chip. */
774 	ew32(GENCTL, 0x4001);
775 
776 	netdev_dbg(dev, "Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
777 		   ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
778 	udelay(1);
779 
780 	/* This magic is documented in SMSC app note 7.15 */
781 	for (i = 16; i > 0; i--)
782 		ew32(TEST1, 0x0008);
783 
784 #ifdef __BIG_ENDIAN
785 	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
786 #else
787 	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
788 #endif
789 	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
790 	if (ep->chip_flags & MII_PWRDWN)
791 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
792 
793 	for (i = 0; i < 3; i++)
794 		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
795 
796 	ep->tx_threshold = TX_FIFO_THRESH;
797 	ew32(TxThresh, ep->tx_threshold);
798 	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
799 	ew32(PRxCDAR, ep->rx_ring_dma +
800 	     (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
801 	ew32(PTxCDAR, ep->tx_ring_dma +
802 	     (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
803 
804 	/* Start the chip's Rx process. */
805 	set_rx_mode(dev);
806 	ew32(COMMAND, StartRx | RxQueued);
807 
808 	/* Enable interrupts by setting the interrupt mask. */
809 	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
810 	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
811 	     TxUnderrun);
812 
813 	netdev_dbg(dev, "epic_restart() done, cmd status %4.4x, ctl %4.4x interrupt %4.4x.\n",
814 		   er32(COMMAND), er32(GENCTL), er32(INTSTAT));
815 }
816 
817 static void check_media(struct net_device *dev)
818 {
819 	struct epic_private *ep = netdev_priv(dev);
820 	void __iomem *ioaddr = ep->ioaddr;
821 	int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
822 	int negotiated = mii_lpa & ep->mii.advertising;
823 	int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
824 
825 	if (ep->mii.force_media)
826 		return;
827 	if (mii_lpa == 0xffff)		/* Bogus read */
828 		return;
829 	if (ep->mii.full_duplex != duplex) {
830 		ep->mii.full_duplex = duplex;
831 		netdev_info(dev, "Setting %s-duplex based on MII #%d link partner capability of %4.4x.\n",
832 			    ep->mii.full_duplex ? "full" : "half",
833 			    ep->phys[0], mii_lpa);
834 		ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
835 	}
836 }
837 
838 static void epic_timer(struct timer_list *t)
839 {
840 	struct epic_private *ep = timer_container_of(ep, t, timer);
841 	struct net_device *dev = ep->mii.dev;
842 	void __iomem *ioaddr = ep->ioaddr;
843 	int next_tick = 5*HZ;
844 
845 	if (debug > 3) {
846 		netdev_dbg(dev, "Media monitor tick, Tx status %8.8x.\n",
847 			   er32(TxSTAT));
848 		netdev_dbg(dev, "Other registers are IntMask %4.4x IntStatus %4.4x RxStatus %4.4x.\n",
849 			   er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
850 	}
851 
852 	check_media(dev);
853 
854 	ep->timer.expires = jiffies + next_tick;
855 	add_timer(&ep->timer);
856 }
857 
858 static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue)
859 {
860 	struct epic_private *ep = netdev_priv(dev);
861 	void __iomem *ioaddr = ep->ioaddr;
862 
863 	if (debug > 0) {
864 		netdev_warn(dev, "Transmit timeout using MII device, Tx status %4.4x.\n",
865 			    er16(TxSTAT));
866 		if (debug > 1) {
867 			netdev_dbg(dev, "Tx indices: dirty_tx %d, cur_tx %d.\n",
868 				   ep->dirty_tx, ep->cur_tx);
869 		}
870 	}
871 	if (er16(TxSTAT) & 0x10) {		/* Tx FIFO underflow. */
872 		dev->stats.tx_fifo_errors++;
873 		ew32(COMMAND, RestartTx);
874 	} else {
875 		epic_restart(dev);
876 		ew32(COMMAND, TxQueued);
877 	}
878 
879 	netif_trans_update(dev); /* prevent tx timeout */
880 	dev->stats.tx_errors++;
881 	if (!ep->tx_full)
882 		netif_wake_queue(dev);
883 }
884 
885 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
886 static void epic_init_ring(struct net_device *dev)
887 {
888 	struct epic_private *ep = netdev_priv(dev);
889 	int i;
890 
891 	ep->tx_full = 0;
892 	ep->dirty_tx = ep->cur_tx = 0;
893 	ep->cur_rx = ep->dirty_rx = 0;
894 	ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
895 
896 	/* Initialize all Rx descriptors. */
897 	for (i = 0; i < RX_RING_SIZE; i++) {
898 		ep->rx_ring[i].rxstatus = 0;
899 		ep->rx_ring[i].buflength = ep->rx_buf_sz;
900 		ep->rx_ring[i].next = ep->rx_ring_dma +
901 				      (i+1)*sizeof(struct epic_rx_desc);
902 		ep->rx_skbuff[i] = NULL;
903 	}
904 	/* Mark the last entry as wrapping the ring. */
905 	ep->rx_ring[i-1].next = ep->rx_ring_dma;
906 
907 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
908 	for (i = 0; i < RX_RING_SIZE; i++) {
909 		struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
910 		ep->rx_skbuff[i] = skb;
911 		if (skb == NULL)
912 			break;
913 		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
914 		ep->rx_ring[i].bufaddr = dma_map_single(&ep->pci_dev->dev,
915 							skb->data,
916 							ep->rx_buf_sz,
917 							DMA_FROM_DEVICE);
918 		ep->rx_ring[i].rxstatus = DescOwn;
919 	}
920 	ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
921 
922 	/* The Tx buffer descriptor is filled in as needed, but we
923 	   do need to clear the ownership bit. */
924 	for (i = 0; i < TX_RING_SIZE; i++) {
925 		ep->tx_skbuff[i] = NULL;
926 		ep->tx_ring[i].txstatus = 0x0000;
927 		ep->tx_ring[i].next = ep->tx_ring_dma +
928 			(i+1)*sizeof(struct epic_tx_desc);
929 	}
930 	ep->tx_ring[i-1].next = ep->tx_ring_dma;
931 }
932 
933 static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
934 {
935 	struct epic_private *ep = netdev_priv(dev);
936 	void __iomem *ioaddr = ep->ioaddr;
937 	int entry, free_count;
938 	u32 ctrl_word;
939 	unsigned long flags;
940 
941 	if (skb_padto(skb, ETH_ZLEN))
942 		return NETDEV_TX_OK;
943 
944 	/* Caution: the write order is important here, set the field with the
945 	   "ownership" bit last. */
946 
947 	/* Calculate the next Tx descriptor entry. */
948 	spin_lock_irqsave(&ep->lock, flags);
949 	free_count = ep->cur_tx - ep->dirty_tx;
950 	entry = ep->cur_tx % TX_RING_SIZE;
951 
952 	ep->tx_skbuff[entry] = skb;
953 	ep->tx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
954 						    skb->data, skb->len,
955 						    DMA_TO_DEVICE);
956 	if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
957 		ctrl_word = 0x100000; /* No interrupt */
958 	} else if (free_count == TX_QUEUE_LEN/2) {
959 		ctrl_word = 0x140000; /* Tx-done intr. */
960 	} else if (free_count < TX_QUEUE_LEN - 1) {
961 		ctrl_word = 0x100000; /* No Tx-done intr. */
962 	} else {
963 		/* Leave room for an additional entry. */
964 		ctrl_word = 0x140000; /* Tx-done intr. */
965 		ep->tx_full = 1;
966 	}
967 	ep->tx_ring[entry].buflength = ctrl_word | skb->len;
968 	ep->tx_ring[entry].txstatus =
969 		((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
970 			    | DescOwn;
971 
972 	ep->cur_tx++;
973 	if (ep->tx_full)
974 		netif_stop_queue(dev);
975 
976 	spin_unlock_irqrestore(&ep->lock, flags);
977 	/* Trigger an immediate transmit demand. */
978 	ew32(COMMAND, TxQueued);
979 
980 	if (debug > 4)
981 		netdev_dbg(dev, "Queued Tx packet size %d to slot %d, flag %2.2x Tx status %8.8x.\n",
982 			   skb->len, entry, ctrl_word, er32(TxSTAT));
983 
984 	return NETDEV_TX_OK;
985 }
986 
987 static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
988 			  int status)
989 {
990 	struct net_device_stats *stats = &dev->stats;
991 
992 #ifndef final_version
993 	/* There was an major error, log it. */
994 	if (debug > 1)
995 		netdev_dbg(dev, "Transmit error, Tx status %8.8x.\n",
996 			   status);
997 #endif
998 	stats->tx_errors++;
999 	if (status & 0x1050)
1000 		stats->tx_aborted_errors++;
1001 	if (status & 0x0008)
1002 		stats->tx_carrier_errors++;
1003 	if (status & 0x0040)
1004 		stats->tx_window_errors++;
1005 	if (status & 0x0010)
1006 		stats->tx_fifo_errors++;
1007 }
1008 
1009 static void epic_tx(struct net_device *dev, struct epic_private *ep)
1010 {
1011 	unsigned int dirty_tx, cur_tx;
1012 
1013 	/*
1014 	 * Note: if this lock becomes a problem we can narrow the locked
1015 	 * region at the cost of occasionally grabbing the lock more times.
1016 	 */
1017 	cur_tx = ep->cur_tx;
1018 	for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1019 		struct sk_buff *skb;
1020 		int entry = dirty_tx % TX_RING_SIZE;
1021 		int txstatus = ep->tx_ring[entry].txstatus;
1022 
1023 		if (txstatus & DescOwn)
1024 			break;	/* It still hasn't been Txed */
1025 
1026 		if (likely(txstatus & 0x0001)) {
1027 			dev->stats.collisions += (txstatus >> 8) & 15;
1028 			dev->stats.tx_packets++;
1029 			dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1030 		} else
1031 			epic_tx_error(dev, ep, txstatus);
1032 
1033 		/* Free the original skb. */
1034 		skb = ep->tx_skbuff[entry];
1035 		dma_unmap_single(&ep->pci_dev->dev,
1036 				 ep->tx_ring[entry].bufaddr, skb->len,
1037 				 DMA_TO_DEVICE);
1038 		dev_consume_skb_irq(skb);
1039 		ep->tx_skbuff[entry] = NULL;
1040 	}
1041 
1042 #ifndef final_version
1043 	if (cur_tx - dirty_tx > TX_RING_SIZE) {
1044 		netdev_warn(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1045 			    dirty_tx, cur_tx, ep->tx_full);
1046 		dirty_tx += TX_RING_SIZE;
1047 	}
1048 #endif
1049 	ep->dirty_tx = dirty_tx;
1050 	if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1051 		/* The ring is no longer full, allow new TX entries. */
1052 		ep->tx_full = 0;
1053 		netif_wake_queue(dev);
1054 	}
1055 }
1056 
1057 /* The interrupt handler does all of the Rx thread work and cleans up
1058    after the Tx thread. */
1059 static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1060 {
1061 	struct net_device *dev = dev_instance;
1062 	struct epic_private *ep = netdev_priv(dev);
1063 	void __iomem *ioaddr = ep->ioaddr;
1064 	unsigned int handled = 0;
1065 	int status;
1066 
1067 	status = er32(INTSTAT);
1068 	/* Acknowledge all of the current interrupt sources ASAP. */
1069 	ew32(INTSTAT, status & EpicNormalEvent);
1070 
1071 	if (debug > 4) {
1072 		netdev_dbg(dev, "Interrupt, status=%#8.8x new intstat=%#8.8x.\n",
1073 			   status, er32(INTSTAT));
1074 	}
1075 
1076 	if ((status & IntrSummary) == 0)
1077 		goto out;
1078 
1079 	handled = 1;
1080 
1081 	if (status & EpicNapiEvent) {
1082 		spin_lock(&ep->napi_lock);
1083 		if (napi_schedule_prep(&ep->napi)) {
1084 			epic_napi_irq_off(dev, ep);
1085 			__napi_schedule(&ep->napi);
1086 		}
1087 		spin_unlock(&ep->napi_lock);
1088 	}
1089 	status &= ~EpicNapiEvent;
1090 
1091 	/* Check uncommon events all at once. */
1092 	if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1093 		struct net_device_stats *stats = &dev->stats;
1094 
1095 		if (status == EpicRemoved)
1096 			goto out;
1097 
1098 		/* Always update the error counts to avoid overhead later. */
1099 		stats->rx_missed_errors	+= er8(MPCNT);
1100 		stats->rx_frame_errors	+= er8(ALICNT);
1101 		stats->rx_crc_errors	+= er8(CRCCNT);
1102 
1103 		if (status & TxUnderrun) { /* Tx FIFO underflow. */
1104 			stats->tx_fifo_errors++;
1105 			ew32(TxThresh, ep->tx_threshold += 128);
1106 			/* Restart the transmit process. */
1107 			ew32(COMMAND, RestartTx);
1108 		}
1109 		if (status & PCIBusErr170) {
1110 			netdev_err(dev, "PCI Bus Error! status %4.4x.\n",
1111 				   status);
1112 			epic_pause(dev);
1113 			epic_restart(dev);
1114 		}
1115 		/* Clear all error sources. */
1116 		ew32(INTSTAT, status & 0x7f18);
1117 	}
1118 
1119 out:
1120 	if (debug > 3) {
1121 		netdev_dbg(dev, "exit interrupt, intr_status=%#4.4x.\n",
1122 			   status);
1123 	}
1124 
1125 	return IRQ_RETVAL(handled);
1126 }
1127 
1128 static int epic_rx(struct net_device *dev, int budget)
1129 {
1130 	struct epic_private *ep = netdev_priv(dev);
1131 	int entry = ep->cur_rx % RX_RING_SIZE;
1132 	int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1133 	int work_done = 0;
1134 
1135 	if (debug > 4)
1136 		netdev_dbg(dev, " In epic_rx(), entry %d %8.8x.\n", entry,
1137 			   ep->rx_ring[entry].rxstatus);
1138 
1139 	if (rx_work_limit > budget)
1140 		rx_work_limit = budget;
1141 
1142 	/* If we own the next entry, it's a new packet. Send it up. */
1143 	while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
1144 		int status = ep->rx_ring[entry].rxstatus;
1145 
1146 		if (debug > 4)
1147 			netdev_dbg(dev, "  epic_rx() status was %8.8x.\n",
1148 				   status);
1149 		if (--rx_work_limit < 0)
1150 			break;
1151 		if (status & 0x2006) {
1152 			if (debug > 2)
1153 				netdev_dbg(dev, "epic_rx() error status was %8.8x.\n",
1154 					   status);
1155 			if (status & 0x2000) {
1156 				netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %4.4x!\n",
1157 					    status);
1158 				dev->stats.rx_length_errors++;
1159 			} else if (status & 0x0006)
1160 				/* Rx Frame errors are counted in hardware. */
1161 				dev->stats.rx_errors++;
1162 		} else {
1163 			/* Malloc up new buffer, compatible with net-2e. */
1164 			/* Omit the four octet CRC from the length. */
1165 			short pkt_len = (status >> 16) - 4;
1166 			struct sk_buff *skb;
1167 
1168 			if (pkt_len > PKT_BUF_SZ - 4) {
1169 				netdev_err(dev, "Oversized Ethernet frame, status %x %d bytes.\n",
1170 					   status, pkt_len);
1171 				pkt_len = 1514;
1172 			}
1173 			/* Check if the packet is long enough to accept without copying
1174 			   to a minimally-sized skbuff. */
1175 			if (pkt_len < rx_copybreak &&
1176 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1177 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1178 				dma_sync_single_for_cpu(&ep->pci_dev->dev,
1179 							ep->rx_ring[entry].bufaddr,
1180 							ep->rx_buf_sz,
1181 							DMA_FROM_DEVICE);
1182 				skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1183 				skb_put(skb, pkt_len);
1184 				dma_sync_single_for_device(&ep->pci_dev->dev,
1185 							   ep->rx_ring[entry].bufaddr,
1186 							   ep->rx_buf_sz,
1187 							   DMA_FROM_DEVICE);
1188 			} else {
1189 				dma_unmap_single(&ep->pci_dev->dev,
1190 						 ep->rx_ring[entry].bufaddr,
1191 						 ep->rx_buf_sz,
1192 						 DMA_FROM_DEVICE);
1193 				skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1194 				ep->rx_skbuff[entry] = NULL;
1195 			}
1196 			skb->protocol = eth_type_trans(skb, dev);
1197 			netif_receive_skb(skb);
1198 			dev->stats.rx_packets++;
1199 			dev->stats.rx_bytes += pkt_len;
1200 		}
1201 		work_done++;
1202 		entry = (++ep->cur_rx) % RX_RING_SIZE;
1203 	}
1204 
1205 	/* Refill the Rx ring buffers. */
1206 	for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1207 		entry = ep->dirty_rx % RX_RING_SIZE;
1208 		if (ep->rx_skbuff[entry] == NULL) {
1209 			struct sk_buff *skb;
1210 			skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
1211 			if (skb == NULL)
1212 				break;
1213 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1214 			ep->rx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
1215 								    skb->data,
1216 								    ep->rx_buf_sz,
1217 								    DMA_FROM_DEVICE);
1218 			work_done++;
1219 		}
1220 		/* AV: shouldn't we add a barrier here? */
1221 		ep->rx_ring[entry].rxstatus = DescOwn;
1222 	}
1223 	return work_done;
1224 }
1225 
1226 static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1227 {
1228 	void __iomem *ioaddr = ep->ioaddr;
1229 	int status;
1230 
1231 	status = er32(INTSTAT);
1232 
1233 	if (status == EpicRemoved)
1234 		return;
1235 	if (status & RxOverflow) 	/* Missed a Rx frame. */
1236 		dev->stats.rx_errors++;
1237 	if (status & (RxOverflow | RxFull))
1238 		ew16(COMMAND, RxQueued);
1239 }
1240 
1241 static int epic_poll(struct napi_struct *napi, int budget)
1242 {
1243 	struct epic_private *ep = container_of(napi, struct epic_private, napi);
1244 	struct net_device *dev = ep->mii.dev;
1245 	void __iomem *ioaddr = ep->ioaddr;
1246 	int work_done;
1247 
1248 	epic_tx(dev, ep);
1249 
1250 	work_done = epic_rx(dev, budget);
1251 
1252 	epic_rx_err(dev, ep);
1253 
1254 	if (work_done < budget && napi_complete_done(napi, work_done)) {
1255 		unsigned long flags;
1256 
1257 		spin_lock_irqsave(&ep->napi_lock, flags);
1258 
1259 		ew32(INTSTAT, EpicNapiEvent);
1260 		epic_napi_irq_on(dev, ep);
1261 		spin_unlock_irqrestore(&ep->napi_lock, flags);
1262 	}
1263 
1264 	return work_done;
1265 }
1266 
1267 static int epic_close(struct net_device *dev)
1268 {
1269 	struct epic_private *ep = netdev_priv(dev);
1270 	struct pci_dev *pdev = ep->pci_dev;
1271 	void __iomem *ioaddr = ep->ioaddr;
1272 	struct sk_buff *skb;
1273 	int i;
1274 
1275 	netif_stop_queue(dev);
1276 	napi_disable(&ep->napi);
1277 
1278 	if (debug > 1)
1279 		netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n",
1280 			   er32(INTSTAT));
1281 
1282 	timer_delete_sync(&ep->timer);
1283 
1284 	epic_disable_int(dev, ep);
1285 
1286 	free_irq(pdev->irq, dev);
1287 
1288 	epic_pause(dev);
1289 
1290 	/* Free all the skbuffs in the Rx queue. */
1291 	for (i = 0; i < RX_RING_SIZE; i++) {
1292 		skb = ep->rx_skbuff[i];
1293 		ep->rx_skbuff[i] = NULL;
1294 		ep->rx_ring[i].rxstatus = 0;		/* Not owned by Epic chip. */
1295 		ep->rx_ring[i].buflength = 0;
1296 		if (skb) {
1297 			dma_unmap_single(&pdev->dev, ep->rx_ring[i].bufaddr,
1298 					 ep->rx_buf_sz, DMA_FROM_DEVICE);
1299 			dev_kfree_skb(skb);
1300 		}
1301 		ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1302 	}
1303 	for (i = 0; i < TX_RING_SIZE; i++) {
1304 		skb = ep->tx_skbuff[i];
1305 		ep->tx_skbuff[i] = NULL;
1306 		if (!skb)
1307 			continue;
1308 		dma_unmap_single(&pdev->dev, ep->tx_ring[i].bufaddr, skb->len,
1309 				 DMA_TO_DEVICE);
1310 		dev_kfree_skb(skb);
1311 	}
1312 
1313 	/* Green! Leave the chip in low-power mode. */
1314 	ew32(GENCTL, 0x0008);
1315 
1316 	return 0;
1317 }
1318 
1319 static struct net_device_stats *epic_get_stats(struct net_device *dev)
1320 {
1321 	struct epic_private *ep = netdev_priv(dev);
1322 	void __iomem *ioaddr = ep->ioaddr;
1323 
1324 	if (netif_running(dev)) {
1325 		struct net_device_stats *stats = &dev->stats;
1326 
1327 		stats->rx_missed_errors	+= er8(MPCNT);
1328 		stats->rx_frame_errors	+= er8(ALICNT);
1329 		stats->rx_crc_errors	+= er8(CRCCNT);
1330 	}
1331 
1332 	return &dev->stats;
1333 }
1334 
1335 /* Set or clear the multicast filter for this adaptor.
1336    Note that we only use exclusion around actually queueing the
1337    new frame, not around filling ep->setup_frame.  This is non-deterministic
1338    when re-entered but still correct. */
1339 
1340 static void set_rx_mode(struct net_device *dev)
1341 {
1342 	struct epic_private *ep = netdev_priv(dev);
1343 	void __iomem *ioaddr = ep->ioaddr;
1344 	unsigned char mc_filter[8];		 /* Multicast hash filter */
1345 	int i;
1346 
1347 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1348 		ew32(RxCtrl, 0x002c);
1349 		/* Unconditionally log net taps. */
1350 		memset(mc_filter, 0xff, sizeof(mc_filter));
1351 	} else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
1352 		/* There is apparently a chip bug, so the multicast filter
1353 		   is never enabled. */
1354 		/* Too many to filter perfectly -- accept all multicasts. */
1355 		memset(mc_filter, 0xff, sizeof(mc_filter));
1356 		ew32(RxCtrl, 0x000c);
1357 	} else if (netdev_mc_empty(dev)) {
1358 		ew32(RxCtrl, 0x0004);
1359 		return;
1360 	} else {					/* Never executed, for now. */
1361 		struct netdev_hw_addr *ha;
1362 
1363 		memset(mc_filter, 0, sizeof(mc_filter));
1364 		netdev_for_each_mc_addr(ha, dev) {
1365 			unsigned int bit_nr =
1366 				ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
1367 			mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1368 		}
1369 	}
1370 	/* ToDo: perhaps we need to stop the Tx and Rx process here? */
1371 	if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1372 		for (i = 0; i < 4; i++)
1373 			ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
1374 		memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1375 	}
1376 }
1377 
1378 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1379 {
1380 	struct epic_private *np = netdev_priv(dev);
1381 
1382 	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1383 	strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1384 }
1385 
1386 static int netdev_get_link_ksettings(struct net_device *dev,
1387 				     struct ethtool_link_ksettings *cmd)
1388 {
1389 	struct epic_private *np = netdev_priv(dev);
1390 
1391 	spin_lock_irq(&np->lock);
1392 	mii_ethtool_get_link_ksettings(&np->mii, cmd);
1393 	spin_unlock_irq(&np->lock);
1394 
1395 	return 0;
1396 }
1397 
1398 static int netdev_set_link_ksettings(struct net_device *dev,
1399 				     const struct ethtool_link_ksettings *cmd)
1400 {
1401 	struct epic_private *np = netdev_priv(dev);
1402 	int rc;
1403 
1404 	spin_lock_irq(&np->lock);
1405 	rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
1406 	spin_unlock_irq(&np->lock);
1407 
1408 	return rc;
1409 }
1410 
1411 static int netdev_nway_reset(struct net_device *dev)
1412 {
1413 	struct epic_private *np = netdev_priv(dev);
1414 	return mii_nway_restart(&np->mii);
1415 }
1416 
1417 static u32 netdev_get_link(struct net_device *dev)
1418 {
1419 	struct epic_private *np = netdev_priv(dev);
1420 	return mii_link_ok(&np->mii);
1421 }
1422 
1423 static u32 netdev_get_msglevel(struct net_device *dev)
1424 {
1425 	return debug;
1426 }
1427 
1428 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1429 {
1430 	debug = value;
1431 }
1432 
1433 static int ethtool_begin(struct net_device *dev)
1434 {
1435 	struct epic_private *ep = netdev_priv(dev);
1436 	void __iomem *ioaddr = ep->ioaddr;
1437 
1438 	if (ep->ethtool_ops_nesting == U32_MAX)
1439 		return -EBUSY;
1440 	/* power-up, if interface is down */
1441 	if (!ep->ethtool_ops_nesting++ && !netif_running(dev)) {
1442 		ew32(GENCTL, 0x0200);
1443 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1444 	}
1445 	return 0;
1446 }
1447 
1448 static void ethtool_complete(struct net_device *dev)
1449 {
1450 	struct epic_private *ep = netdev_priv(dev);
1451 	void __iomem *ioaddr = ep->ioaddr;
1452 
1453 	/* power-down, if interface is down */
1454 	if (!--ep->ethtool_ops_nesting && !netif_running(dev)) {
1455 		ew32(GENCTL, 0x0008);
1456 		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1457 	}
1458 }
1459 
1460 static const struct ethtool_ops netdev_ethtool_ops = {
1461 	.get_drvinfo		= netdev_get_drvinfo,
1462 	.nway_reset		= netdev_nway_reset,
1463 	.get_link		= netdev_get_link,
1464 	.get_msglevel		= netdev_get_msglevel,
1465 	.set_msglevel		= netdev_set_msglevel,
1466 	.begin			= ethtool_begin,
1467 	.complete		= ethtool_complete,
1468 	.get_link_ksettings	= netdev_get_link_ksettings,
1469 	.set_link_ksettings	= netdev_set_link_ksettings,
1470 };
1471 
1472 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1473 {
1474 	struct epic_private *np = netdev_priv(dev);
1475 	void __iomem *ioaddr = np->ioaddr;
1476 	struct mii_ioctl_data *data = if_mii(rq);
1477 	int rc;
1478 
1479 	/* power-up, if interface is down */
1480 	if (! netif_running(dev)) {
1481 		ew32(GENCTL, 0x0200);
1482 		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1483 	}
1484 
1485 	/* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1486 	spin_lock_irq(&np->lock);
1487 	rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1488 	spin_unlock_irq(&np->lock);
1489 
1490 	/* power-down, if interface is down */
1491 	if (! netif_running(dev)) {
1492 		ew32(GENCTL, 0x0008);
1493 		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1494 	}
1495 	return rc;
1496 }
1497 
1498 
1499 static void epic_remove_one(struct pci_dev *pdev)
1500 {
1501 	struct net_device *dev = pci_get_drvdata(pdev);
1502 	struct epic_private *ep = netdev_priv(dev);
1503 
1504 	unregister_netdev(dev);
1505 	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
1506 			  ep->tx_ring_dma);
1507 	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
1508 			  ep->rx_ring_dma);
1509 	pci_iounmap(pdev, ep->ioaddr);
1510 	free_netdev(dev);
1511 	pci_release_regions(pdev);
1512 	pci_disable_device(pdev);
1513 	/* pci_power_off(pdev, -1); */
1514 }
1515 
1516 static int __maybe_unused epic_suspend(struct device *dev_d)
1517 {
1518 	struct net_device *dev = dev_get_drvdata(dev_d);
1519 	struct epic_private *ep = netdev_priv(dev);
1520 	void __iomem *ioaddr = ep->ioaddr;
1521 
1522 	if (!netif_running(dev))
1523 		return 0;
1524 	epic_pause(dev);
1525 	/* Put the chip into low-power mode. */
1526 	ew32(GENCTL, 0x0008);
1527 	/* pci_power_off(pdev, -1); */
1528 	return 0;
1529 }
1530 
1531 
1532 static int __maybe_unused epic_resume(struct device *dev_d)
1533 {
1534 	struct net_device *dev = dev_get_drvdata(dev_d);
1535 
1536 	if (!netif_running(dev))
1537 		return 0;
1538 	epic_restart(dev);
1539 	/* pci_power_on(pdev); */
1540 	return 0;
1541 }
1542 
1543 static SIMPLE_DEV_PM_OPS(epic_pm_ops, epic_suspend, epic_resume);
1544 
1545 static struct pci_driver epic_driver = {
1546 	.name		= DRV_NAME,
1547 	.id_table	= epic_pci_tbl,
1548 	.probe		= epic_init_one,
1549 	.remove		= epic_remove_one,
1550 	.driver.pm	= &epic_pm_ops,
1551 };
1552 
1553 module_pci_driver(epic_driver);
1554