xref: /linux/drivers/net/ethernet/sun/cassini.c (revision 800c5eb7b5eba6cb2a32738d763fd59f0fbcdde4)
1 /* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
2  *
3  * Copyright (C) 2004 Sun Microsystems Inc.
4  * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation; either version 2 of the
9  * License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
19  * 02111-1307, USA.
20  *
21  * This driver uses the sungem driver (c) David Miller
22  * (davem@redhat.com) as its basis.
23  *
24  * The cassini chip has a number of features that distinguish it from
25  * the gem chip:
26  *  4 transmit descriptor rings that are used for either QoS (VLAN) or
27  *      load balancing (non-VLAN mode)
28  *  batching of multiple packets
29  *  multiple CPU dispatching
30  *  page-based RX descriptor engine with separate completion rings
31  *  Gigabit support (GMII and PCS interface)
32  *  MIF link up/down detection works
33  *
34  * RX is handled by page sized buffers that are attached as fragments to
35  * the skb. here's what's done:
36  *  -- driver allocates pages at a time and keeps reference counts
37  *     on them.
38  *  -- the upper protocol layers assume that the header is in the skb
39  *     itself. as a result, cassini will copy a small amount (64 bytes)
40  *     to make them happy.
41  *  -- driver appends the rest of the data pages as frags to skbuffs
42  *     and increments the reference count
43  *  -- on page reclamation, the driver swaps the page with a spare page.
44  *     if that page is still in use, it frees its reference to that page,
45  *     and allocates a new page for use. otherwise, it just recycles the
46  *     the page.
47  *
48  * NOTE: cassini can parse the header. however, it's not worth it
49  *       as long as the network stack requires a header copy.
50  *
51  * TX has 4 queues. currently these queues are used in a round-robin
52  * fashion for load balancing. They can also be used for QoS. for that
53  * to work, however, QoS information needs to be exposed down to the driver
54  * level so that subqueues get targeted to particular transmit rings.
55  * alternatively, the queues can be configured via use of the all-purpose
56  * ioctl.
57  *
58  * RX DATA: the rx completion ring has all the info, but the rx desc
59  * ring has all of the data. RX can conceivably come in under multiple
60  * interrupts, but the INT# assignment needs to be set up properly by
61  * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
62  * that. also, the two descriptor rings are designed to distinguish between
63  * encrypted and non-encrypted packets, but we use them for buffering
64  * instead.
65  *
66  * by default, the selective clear mask is set up to process rx packets.
67  */
68 
69 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
70 
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/types.h>
74 #include <linux/compiler.h>
75 #include <linux/slab.h>
76 #include <linux/delay.h>
77 #include <linux/init.h>
78 #include <linux/interrupt.h>
79 #include <linux/vmalloc.h>
80 #include <linux/ioport.h>
81 #include <linux/pci.h>
82 #include <linux/mm.h>
83 #include <linux/highmem.h>
84 #include <linux/list.h>
85 #include <linux/dma-mapping.h>
86 
87 #include <linux/netdevice.h>
88 #include <linux/etherdevice.h>
89 #include <linux/skbuff.h>
90 #include <linux/ethtool.h>
91 #include <linux/crc32.h>
92 #include <linux/random.h>
93 #include <linux/mii.h>
94 #include <linux/ip.h>
95 #include <linux/tcp.h>
96 #include <linux/mutex.h>
97 #include <linux/firmware.h>
98 
99 #include <net/checksum.h>
100 
101 #include <linux/atomic.h>
102 #include <asm/system.h>
103 #include <asm/io.h>
104 #include <asm/byteorder.h>
105 #include <asm/uaccess.h>
106 
107 #define cas_page_map(x)      kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
108 #define cas_page_unmap(x)    kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
109 #define CAS_NCPUS            num_online_cpus()
110 
111 #define cas_skb_release(x)  netif_rx(x)
112 
113 /* select which firmware to use */
114 #define USE_HP_WORKAROUND
115 #define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
116 #define CAS_HP_ALT_FIRMWARE   cas_prog_null /* alternate firmware */
117 
118 #include "cassini.h"
119 
120 #define USE_TX_COMPWB      /* use completion writeback registers */
121 #define USE_CSMA_CD_PROTO  /* standard CSMA/CD */
122 #define USE_RX_BLANK       /* hw interrupt mitigation */
123 #undef USE_ENTROPY_DEV     /* don't test for entropy device */
124 
125 /* NOTE: these aren't useable unless PCI interrupts can be assigned.
126  * also, we need to make cp->lock finer-grained.
127  */
128 #undef  USE_PCI_INTB
129 #undef  USE_PCI_INTC
130 #undef  USE_PCI_INTD
131 #undef  USE_QOS
132 
133 #undef  USE_VPD_DEBUG       /* debug vpd information if defined */
134 
135 /* rx processing options */
136 #define USE_PAGE_ORDER      /* specify to allocate large rx pages */
137 #define RX_DONT_BATCH  0    /* if 1, don't batch flows */
138 #define RX_COPY_ALWAYS 0    /* if 0, use frags */
139 #define RX_COPY_MIN    64   /* copy a little to make upper layers happy */
140 #undef  RX_COUNT_BUFFERS    /* define to calculate RX buffer stats */
141 
142 #define DRV_MODULE_NAME		"cassini"
143 #define DRV_MODULE_VERSION	"1.6"
144 #define DRV_MODULE_RELDATE	"21 May 2008"
145 
146 #define CAS_DEF_MSG_ENABLE	  \
147 	(NETIF_MSG_DRV		| \
148 	 NETIF_MSG_PROBE	| \
149 	 NETIF_MSG_LINK		| \
150 	 NETIF_MSG_TIMER	| \
151 	 NETIF_MSG_IFDOWN	| \
152 	 NETIF_MSG_IFUP		| \
153 	 NETIF_MSG_RX_ERR	| \
154 	 NETIF_MSG_TX_ERR)
155 
156 /* length of time before we decide the hardware is borked,
157  * and dev->tx_timeout() should be called to fix the problem
158  */
159 #define CAS_TX_TIMEOUT			(HZ)
160 #define CAS_LINK_TIMEOUT                (22*HZ/10)
161 #define CAS_LINK_FAST_TIMEOUT           (1)
162 
163 /* timeout values for state changing. these specify the number
164  * of 10us delays to be used before giving up.
165  */
166 #define STOP_TRIES_PHY 1000
167 #define STOP_TRIES     5000
168 
169 /* specify a minimum frame size to deal with some fifo issues
170  * max mtu == 2 * page size - ethernet header - 64 - swivel =
171  *            2 * page_size - 0x50
172  */
173 #define CAS_MIN_FRAME			97
174 #define CAS_1000MB_MIN_FRAME            255
175 #define CAS_MIN_MTU                     60
176 #define CAS_MAX_MTU                     min(((cp->page_size << 1) - 0x50), 9000)
177 
178 #if 1
179 /*
180  * Eliminate these and use separate atomic counters for each, to
181  * avoid a race condition.
182  */
183 #else
184 #define CAS_RESET_MTU                   1
185 #define CAS_RESET_ALL                   2
186 #define CAS_RESET_SPARE                 3
187 #endif
188 
189 static char version[] __devinitdata =
190 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
191 
192 static int cassini_debug = -1;	/* -1 == use CAS_DEF_MSG_ENABLE as value */
193 static int link_mode;
194 
195 MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
196 MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
197 MODULE_LICENSE("GPL");
198 MODULE_FIRMWARE("sun/cassini.bin");
199 module_param(cassini_debug, int, 0);
200 MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
201 module_param(link_mode, int, 0);
202 MODULE_PARM_DESC(link_mode, "default link mode");
203 
204 /*
205  * Work around for a PCS bug in which the link goes down due to the chip
206  * being confused and never showing a link status of "up."
207  */
208 #define DEFAULT_LINKDOWN_TIMEOUT 5
209 /*
210  * Value in seconds, for user input.
211  */
212 static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
213 module_param(linkdown_timeout, int, 0);
214 MODULE_PARM_DESC(linkdown_timeout,
215 "min reset interval in sec. for PCS linkdown issue; disabled if not positive");
216 
217 /*
218  * value in 'ticks' (units used by jiffies). Set when we init the
219  * module because 'HZ' in actually a function call on some flavors of
220  * Linux.  This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
221  */
222 static int link_transition_timeout;
223 
224 
225 
226 static u16 link_modes[] __devinitdata = {
227 	BMCR_ANENABLE,			 /* 0 : autoneg */
228 	0,				 /* 1 : 10bt half duplex */
229 	BMCR_SPEED100,			 /* 2 : 100bt half duplex */
230 	BMCR_FULLDPLX,			 /* 3 : 10bt full duplex */
231 	BMCR_SPEED100|BMCR_FULLDPLX,	 /* 4 : 100bt full duplex */
232 	CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
233 };
234 
235 static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
236 	{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
237 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238 	{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
239 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240 	{ 0, }
241 };
242 
243 MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
244 
245 static void cas_set_link_modes(struct cas *cp);
246 
247 static inline void cas_lock_tx(struct cas *cp)
248 {
249 	int i;
250 
251 	for (i = 0; i < N_TX_RINGS; i++)
252 		spin_lock(&cp->tx_lock[i]);
253 }
254 
255 static inline void cas_lock_all(struct cas *cp)
256 {
257 	spin_lock_irq(&cp->lock);
258 	cas_lock_tx(cp);
259 }
260 
261 /* WTZ: QA was finding deadlock problems with the previous
262  * versions after long test runs with multiple cards per machine.
263  * See if replacing cas_lock_all with safer versions helps. The
264  * symptoms QA is reporting match those we'd expect if interrupts
265  * aren't being properly restored, and we fixed a previous deadlock
266  * with similar symptoms by using save/restore versions in other
267  * places.
268  */
269 #define cas_lock_all_save(cp, flags) \
270 do { \
271 	struct cas *xxxcp = (cp); \
272 	spin_lock_irqsave(&xxxcp->lock, flags); \
273 	cas_lock_tx(xxxcp); \
274 } while (0)
275 
276 static inline void cas_unlock_tx(struct cas *cp)
277 {
278 	int i;
279 
280 	for (i = N_TX_RINGS; i > 0; i--)
281 		spin_unlock(&cp->tx_lock[i - 1]);
282 }
283 
284 static inline void cas_unlock_all(struct cas *cp)
285 {
286 	cas_unlock_tx(cp);
287 	spin_unlock_irq(&cp->lock);
288 }
289 
290 #define cas_unlock_all_restore(cp, flags) \
291 do { \
292 	struct cas *xxxcp = (cp); \
293 	cas_unlock_tx(xxxcp); \
294 	spin_unlock_irqrestore(&xxxcp->lock, flags); \
295 } while (0)
296 
297 static void cas_disable_irq(struct cas *cp, const int ring)
298 {
299 	/* Make sure we won't get any more interrupts */
300 	if (ring == 0) {
301 		writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
302 		return;
303 	}
304 
305 	/* disable completion interrupts and selectively mask */
306 	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
307 		switch (ring) {
308 #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
309 #ifdef USE_PCI_INTB
310 		case 1:
311 #endif
312 #ifdef USE_PCI_INTC
313 		case 2:
314 #endif
315 #ifdef USE_PCI_INTD
316 		case 3:
317 #endif
318 			writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
319 			       cp->regs + REG_PLUS_INTRN_MASK(ring));
320 			break;
321 #endif
322 		default:
323 			writel(INTRN_MASK_CLEAR_ALL, cp->regs +
324 			       REG_PLUS_INTRN_MASK(ring));
325 			break;
326 		}
327 	}
328 }
329 
330 static inline void cas_mask_intr(struct cas *cp)
331 {
332 	int i;
333 
334 	for (i = 0; i < N_RX_COMP_RINGS; i++)
335 		cas_disable_irq(cp, i);
336 }
337 
338 static void cas_enable_irq(struct cas *cp, const int ring)
339 {
340 	if (ring == 0) { /* all but TX_DONE */
341 		writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
342 		return;
343 	}
344 
345 	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
346 		switch (ring) {
347 #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
348 #ifdef USE_PCI_INTB
349 		case 1:
350 #endif
351 #ifdef USE_PCI_INTC
352 		case 2:
353 #endif
354 #ifdef USE_PCI_INTD
355 		case 3:
356 #endif
357 			writel(INTRN_MASK_RX_EN, cp->regs +
358 			       REG_PLUS_INTRN_MASK(ring));
359 			break;
360 #endif
361 		default:
362 			break;
363 		}
364 	}
365 }
366 
367 static inline void cas_unmask_intr(struct cas *cp)
368 {
369 	int i;
370 
371 	for (i = 0; i < N_RX_COMP_RINGS; i++)
372 		cas_enable_irq(cp, i);
373 }
374 
375 static inline void cas_entropy_gather(struct cas *cp)
376 {
377 #ifdef USE_ENTROPY_DEV
378 	if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
379 		return;
380 
381 	batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
382 			    readl(cp->regs + REG_ENTROPY_IV),
383 			    sizeof(uint64_t)*8);
384 #endif
385 }
386 
387 static inline void cas_entropy_reset(struct cas *cp)
388 {
389 #ifdef USE_ENTROPY_DEV
390 	if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
391 		return;
392 
393 	writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
394 	       cp->regs + REG_BIM_LOCAL_DEV_EN);
395 	writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
396 	writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
397 
398 	/* if we read back 0x0, we don't have an entropy device */
399 	if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
400 		cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
401 #endif
402 }
403 
404 /* access to the phy. the following assumes that we've initialized the MIF to
405  * be in frame rather than bit-bang mode
406  */
407 static u16 cas_phy_read(struct cas *cp, int reg)
408 {
409 	u32 cmd;
410 	int limit = STOP_TRIES_PHY;
411 
412 	cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
413 	cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
414 	cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
415 	cmd |= MIF_FRAME_TURN_AROUND_MSB;
416 	writel(cmd, cp->regs + REG_MIF_FRAME);
417 
418 	/* poll for completion */
419 	while (limit-- > 0) {
420 		udelay(10);
421 		cmd = readl(cp->regs + REG_MIF_FRAME);
422 		if (cmd & MIF_FRAME_TURN_AROUND_LSB)
423 			return cmd & MIF_FRAME_DATA_MASK;
424 	}
425 	return 0xFFFF; /* -1 */
426 }
427 
428 static int cas_phy_write(struct cas *cp, int reg, u16 val)
429 {
430 	int limit = STOP_TRIES_PHY;
431 	u32 cmd;
432 
433 	cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
434 	cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
435 	cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
436 	cmd |= MIF_FRAME_TURN_AROUND_MSB;
437 	cmd |= val & MIF_FRAME_DATA_MASK;
438 	writel(cmd, cp->regs + REG_MIF_FRAME);
439 
440 	/* poll for completion */
441 	while (limit-- > 0) {
442 		udelay(10);
443 		cmd = readl(cp->regs + REG_MIF_FRAME);
444 		if (cmd & MIF_FRAME_TURN_AROUND_LSB)
445 			return 0;
446 	}
447 	return -1;
448 }
449 
450 static void cas_phy_powerup(struct cas *cp)
451 {
452 	u16 ctl = cas_phy_read(cp, MII_BMCR);
453 
454 	if ((ctl & BMCR_PDOWN) == 0)
455 		return;
456 	ctl &= ~BMCR_PDOWN;
457 	cas_phy_write(cp, MII_BMCR, ctl);
458 }
459 
460 static void cas_phy_powerdown(struct cas *cp)
461 {
462 	u16 ctl = cas_phy_read(cp, MII_BMCR);
463 
464 	if (ctl & BMCR_PDOWN)
465 		return;
466 	ctl |= BMCR_PDOWN;
467 	cas_phy_write(cp, MII_BMCR, ctl);
468 }
469 
470 /* cp->lock held. note: the last put_page will free the buffer */
471 static int cas_page_free(struct cas *cp, cas_page_t *page)
472 {
473 	pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
474 		       PCI_DMA_FROMDEVICE);
475 	__free_pages(page->buffer, cp->page_order);
476 	kfree(page);
477 	return 0;
478 }
479 
480 #ifdef RX_COUNT_BUFFERS
481 #define RX_USED_ADD(x, y)       ((x)->used += (y))
482 #define RX_USED_SET(x, y)       ((x)->used  = (y))
483 #else
484 #define RX_USED_ADD(x, y)
485 #define RX_USED_SET(x, y)
486 #endif
487 
488 /* local page allocation routines for the receive buffers. jumbo pages
489  * require at least 8K contiguous and 8K aligned buffers.
490  */
491 static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
492 {
493 	cas_page_t *page;
494 
495 	page = kmalloc(sizeof(cas_page_t), flags);
496 	if (!page)
497 		return NULL;
498 
499 	INIT_LIST_HEAD(&page->list);
500 	RX_USED_SET(page, 0);
501 	page->buffer = alloc_pages(flags, cp->page_order);
502 	if (!page->buffer)
503 		goto page_err;
504 	page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
505 				      cp->page_size, PCI_DMA_FROMDEVICE);
506 	return page;
507 
508 page_err:
509 	kfree(page);
510 	return NULL;
511 }
512 
513 /* initialize spare pool of rx buffers, but allocate during the open */
514 static void cas_spare_init(struct cas *cp)
515 {
516   	spin_lock(&cp->rx_inuse_lock);
517 	INIT_LIST_HEAD(&cp->rx_inuse_list);
518 	spin_unlock(&cp->rx_inuse_lock);
519 
520 	spin_lock(&cp->rx_spare_lock);
521 	INIT_LIST_HEAD(&cp->rx_spare_list);
522 	cp->rx_spares_needed = RX_SPARE_COUNT;
523 	spin_unlock(&cp->rx_spare_lock);
524 }
525 
526 /* used on close. free all the spare buffers. */
527 static void cas_spare_free(struct cas *cp)
528 {
529 	struct list_head list, *elem, *tmp;
530 
531 	/* free spare buffers */
532 	INIT_LIST_HEAD(&list);
533 	spin_lock(&cp->rx_spare_lock);
534 	list_splice_init(&cp->rx_spare_list, &list);
535 	spin_unlock(&cp->rx_spare_lock);
536 	list_for_each_safe(elem, tmp, &list) {
537 		cas_page_free(cp, list_entry(elem, cas_page_t, list));
538 	}
539 
540 	INIT_LIST_HEAD(&list);
541 #if 1
542 	/*
543 	 * Looks like Adrian had protected this with a different
544 	 * lock than used everywhere else to manipulate this list.
545 	 */
546 	spin_lock(&cp->rx_inuse_lock);
547 	list_splice_init(&cp->rx_inuse_list, &list);
548 	spin_unlock(&cp->rx_inuse_lock);
549 #else
550 	spin_lock(&cp->rx_spare_lock);
551 	list_splice_init(&cp->rx_inuse_list, &list);
552 	spin_unlock(&cp->rx_spare_lock);
553 #endif
554 	list_for_each_safe(elem, tmp, &list) {
555 		cas_page_free(cp, list_entry(elem, cas_page_t, list));
556 	}
557 }
558 
559 /* replenish spares if needed */
560 static void cas_spare_recover(struct cas *cp, const gfp_t flags)
561 {
562 	struct list_head list, *elem, *tmp;
563 	int needed, i;
564 
565 	/* check inuse list. if we don't need any more free buffers,
566 	 * just free it
567 	 */
568 
569 	/* make a local copy of the list */
570 	INIT_LIST_HEAD(&list);
571 	spin_lock(&cp->rx_inuse_lock);
572 	list_splice_init(&cp->rx_inuse_list, &list);
573 	spin_unlock(&cp->rx_inuse_lock);
574 
575 	list_for_each_safe(elem, tmp, &list) {
576 		cas_page_t *page = list_entry(elem, cas_page_t, list);
577 
578 		/*
579 		 * With the lockless pagecache, cassini buffering scheme gets
580 		 * slightly less accurate: we might find that a page has an
581 		 * elevated reference count here, due to a speculative ref,
582 		 * and skip it as in-use. Ideally we would be able to reclaim
583 		 * it. However this would be such a rare case, it doesn't
584 		 * matter too much as we should pick it up the next time round.
585 		 *
586 		 * Importantly, if we find that the page has a refcount of 1
587 		 * here (our refcount), then we know it is definitely not inuse
588 		 * so we can reuse it.
589 		 */
590 		if (page_count(page->buffer) > 1)
591 			continue;
592 
593 		list_del(elem);
594 		spin_lock(&cp->rx_spare_lock);
595 		if (cp->rx_spares_needed > 0) {
596 			list_add(elem, &cp->rx_spare_list);
597 			cp->rx_spares_needed--;
598 			spin_unlock(&cp->rx_spare_lock);
599 		} else {
600 			spin_unlock(&cp->rx_spare_lock);
601 			cas_page_free(cp, page);
602 		}
603 	}
604 
605 	/* put any inuse buffers back on the list */
606 	if (!list_empty(&list)) {
607 		spin_lock(&cp->rx_inuse_lock);
608 		list_splice(&list, &cp->rx_inuse_list);
609 		spin_unlock(&cp->rx_inuse_lock);
610 	}
611 
612 	spin_lock(&cp->rx_spare_lock);
613 	needed = cp->rx_spares_needed;
614 	spin_unlock(&cp->rx_spare_lock);
615 	if (!needed)
616 		return;
617 
618 	/* we still need spares, so try to allocate some */
619 	INIT_LIST_HEAD(&list);
620 	i = 0;
621 	while (i < needed) {
622 		cas_page_t *spare = cas_page_alloc(cp, flags);
623 		if (!spare)
624 			break;
625 		list_add(&spare->list, &list);
626 		i++;
627 	}
628 
629 	spin_lock(&cp->rx_spare_lock);
630 	list_splice(&list, &cp->rx_spare_list);
631 	cp->rx_spares_needed -= i;
632 	spin_unlock(&cp->rx_spare_lock);
633 }
634 
635 /* pull a page from the list. */
636 static cas_page_t *cas_page_dequeue(struct cas *cp)
637 {
638 	struct list_head *entry;
639 	int recover;
640 
641 	spin_lock(&cp->rx_spare_lock);
642 	if (list_empty(&cp->rx_spare_list)) {
643 		/* try to do a quick recovery */
644 		spin_unlock(&cp->rx_spare_lock);
645 		cas_spare_recover(cp, GFP_ATOMIC);
646 		spin_lock(&cp->rx_spare_lock);
647 		if (list_empty(&cp->rx_spare_list)) {
648 			netif_err(cp, rx_err, cp->dev,
649 				  "no spare buffers available\n");
650 			spin_unlock(&cp->rx_spare_lock);
651 			return NULL;
652 		}
653 	}
654 
655 	entry = cp->rx_spare_list.next;
656 	list_del(entry);
657 	recover = ++cp->rx_spares_needed;
658 	spin_unlock(&cp->rx_spare_lock);
659 
660 	/* trigger the timer to do the recovery */
661 	if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
662 #if 1
663 		atomic_inc(&cp->reset_task_pending);
664 		atomic_inc(&cp->reset_task_pending_spare);
665 		schedule_work(&cp->reset_task);
666 #else
667 		atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
668 		schedule_work(&cp->reset_task);
669 #endif
670 	}
671 	return list_entry(entry, cas_page_t, list);
672 }
673 
674 
675 static void cas_mif_poll(struct cas *cp, const int enable)
676 {
677 	u32 cfg;
678 
679 	cfg  = readl(cp->regs + REG_MIF_CFG);
680 	cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
681 
682 	if (cp->phy_type & CAS_PHY_MII_MDIO1)
683 		cfg |= MIF_CFG_PHY_SELECT;
684 
685 	/* poll and interrupt on link status change. */
686 	if (enable) {
687 		cfg |= MIF_CFG_POLL_EN;
688 		cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
689 		cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
690 	}
691 	writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
692 	       cp->regs + REG_MIF_MASK);
693 	writel(cfg, cp->regs + REG_MIF_CFG);
694 }
695 
696 /* Must be invoked under cp->lock */
697 static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
698 {
699 	u16 ctl;
700 #if 1
701 	int lcntl;
702 	int changed = 0;
703 	int oldstate = cp->lstate;
704 	int link_was_not_down = !(oldstate == link_down);
705 #endif
706 	/* Setup link parameters */
707 	if (!ep)
708 		goto start_aneg;
709 	lcntl = cp->link_cntl;
710 	if (ep->autoneg == AUTONEG_ENABLE)
711 		cp->link_cntl = BMCR_ANENABLE;
712 	else {
713 		u32 speed = ethtool_cmd_speed(ep);
714 		cp->link_cntl = 0;
715 		if (speed == SPEED_100)
716 			cp->link_cntl |= BMCR_SPEED100;
717 		else if (speed == SPEED_1000)
718 			cp->link_cntl |= CAS_BMCR_SPEED1000;
719 		if (ep->duplex == DUPLEX_FULL)
720 			cp->link_cntl |= BMCR_FULLDPLX;
721 	}
722 #if 1
723 	changed = (lcntl != cp->link_cntl);
724 #endif
725 start_aneg:
726 	if (cp->lstate == link_up) {
727 		netdev_info(cp->dev, "PCS link down\n");
728 	} else {
729 		if (changed) {
730 			netdev_info(cp->dev, "link configuration changed\n");
731 		}
732 	}
733 	cp->lstate = link_down;
734 	cp->link_transition = LINK_TRANSITION_LINK_DOWN;
735 	if (!cp->hw_running)
736 		return;
737 #if 1
738 	/*
739 	 * WTZ: If the old state was link_up, we turn off the carrier
740 	 * to replicate everything we do elsewhere on a link-down
741 	 * event when we were already in a link-up state..
742 	 */
743 	if (oldstate == link_up)
744 		netif_carrier_off(cp->dev);
745 	if (changed  && link_was_not_down) {
746 		/*
747 		 * WTZ: This branch will simply schedule a full reset after
748 		 * we explicitly changed link modes in an ioctl. See if this
749 		 * fixes the link-problems we were having for forced mode.
750 		 */
751 		atomic_inc(&cp->reset_task_pending);
752 		atomic_inc(&cp->reset_task_pending_all);
753 		schedule_work(&cp->reset_task);
754 		cp->timer_ticks = 0;
755 		mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
756 		return;
757 	}
758 #endif
759 	if (cp->phy_type & CAS_PHY_SERDES) {
760 		u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
761 
762 		if (cp->link_cntl & BMCR_ANENABLE) {
763 			val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
764 			cp->lstate = link_aneg;
765 		} else {
766 			if (cp->link_cntl & BMCR_FULLDPLX)
767 				val |= PCS_MII_CTRL_DUPLEX;
768 			val &= ~PCS_MII_AUTONEG_EN;
769 			cp->lstate = link_force_ok;
770 		}
771 		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
772 		writel(val, cp->regs + REG_PCS_MII_CTRL);
773 
774 	} else {
775 		cas_mif_poll(cp, 0);
776 		ctl = cas_phy_read(cp, MII_BMCR);
777 		ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
778 			 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
779 		ctl |= cp->link_cntl;
780 		if (ctl & BMCR_ANENABLE) {
781 			ctl |= BMCR_ANRESTART;
782 			cp->lstate = link_aneg;
783 		} else {
784 			cp->lstate = link_force_ok;
785 		}
786 		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
787 		cas_phy_write(cp, MII_BMCR, ctl);
788 		cas_mif_poll(cp, 1);
789 	}
790 
791 	cp->timer_ticks = 0;
792 	mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
793 }
794 
795 /* Must be invoked under cp->lock. */
796 static int cas_reset_mii_phy(struct cas *cp)
797 {
798 	int limit = STOP_TRIES_PHY;
799 	u16 val;
800 
801 	cas_phy_write(cp, MII_BMCR, BMCR_RESET);
802 	udelay(100);
803 	while (--limit) {
804 		val = cas_phy_read(cp, MII_BMCR);
805 		if ((val & BMCR_RESET) == 0)
806 			break;
807 		udelay(10);
808 	}
809 	return limit <= 0;
810 }
811 
812 static int cas_saturn_firmware_init(struct cas *cp)
813 {
814 	const struct firmware *fw;
815 	const char fw_name[] = "sun/cassini.bin";
816 	int err;
817 
818 	if (PHY_NS_DP83065 != cp->phy_id)
819 		return 0;
820 
821 	err = request_firmware(&fw, fw_name, &cp->pdev->dev);
822 	if (err) {
823 		pr_err("Failed to load firmware \"%s\"\n",
824 		       fw_name);
825 		return err;
826 	}
827 	if (fw->size < 2) {
828 		pr_err("bogus length %zu in \"%s\"\n",
829 		       fw->size, fw_name);
830 		err = -EINVAL;
831 		goto out;
832 	}
833 	cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
834 	cp->fw_size = fw->size - 2;
835 	cp->fw_data = vmalloc(cp->fw_size);
836 	if (!cp->fw_data) {
837 		err = -ENOMEM;
838 		goto out;
839 	}
840 	memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
841 out:
842 	release_firmware(fw);
843 	return err;
844 }
845 
846 static void cas_saturn_firmware_load(struct cas *cp)
847 {
848 	int i;
849 
850 	cas_phy_powerdown(cp);
851 
852 	/* expanded memory access mode */
853 	cas_phy_write(cp, DP83065_MII_MEM, 0x0);
854 
855 	/* pointer configuration for new firmware */
856 	cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
857 	cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
858 	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
859 	cas_phy_write(cp, DP83065_MII_REGD, 0x82);
860 	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
861 	cas_phy_write(cp, DP83065_MII_REGD, 0x0);
862 	cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
863 	cas_phy_write(cp, DP83065_MII_REGD, 0x39);
864 
865 	/* download new firmware */
866 	cas_phy_write(cp, DP83065_MII_MEM, 0x1);
867 	cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
868 	for (i = 0; i < cp->fw_size; i++)
869 		cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
870 
871 	/* enable firmware */
872 	cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
873 	cas_phy_write(cp, DP83065_MII_REGD, 0x1);
874 }
875 
876 
877 /* phy initialization */
878 static void cas_phy_init(struct cas *cp)
879 {
880 	u16 val;
881 
882 	/* if we're in MII/GMII mode, set up phy */
883 	if (CAS_PHY_MII(cp->phy_type)) {
884 		writel(PCS_DATAPATH_MODE_MII,
885 		       cp->regs + REG_PCS_DATAPATH_MODE);
886 
887 		cas_mif_poll(cp, 0);
888 		cas_reset_mii_phy(cp); /* take out of isolate mode */
889 
890 		if (PHY_LUCENT_B0 == cp->phy_id) {
891 			/* workaround link up/down issue with lucent */
892 			cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
893 			cas_phy_write(cp, MII_BMCR, 0x00f1);
894 			cas_phy_write(cp, LUCENT_MII_REG, 0x0);
895 
896 		} else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
897 			/* workarounds for broadcom phy */
898 			cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
899 			cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
900 			cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
901 			cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
902 			cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
903 			cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
904 			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
905 			cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
906 			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
907 			cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
908 			cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
909 
910 		} else if (PHY_BROADCOM_5411 == cp->phy_id) {
911 			val = cas_phy_read(cp, BROADCOM_MII_REG4);
912 			val = cas_phy_read(cp, BROADCOM_MII_REG4);
913 			if (val & 0x0080) {
914 				/* link workaround */
915 				cas_phy_write(cp, BROADCOM_MII_REG4,
916 					      val & ~0x0080);
917 			}
918 
919 		} else if (cp->cas_flags & CAS_FLAG_SATURN) {
920 			writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
921 			       SATURN_PCFG_FSI : 0x0,
922 			       cp->regs + REG_SATURN_PCFG);
923 
924 			/* load firmware to address 10Mbps auto-negotiation
925 			 * issue. NOTE: this will need to be changed if the
926 			 * default firmware gets fixed.
927 			 */
928 			if (PHY_NS_DP83065 == cp->phy_id) {
929 				cas_saturn_firmware_load(cp);
930 			}
931 			cas_phy_powerup(cp);
932 		}
933 
934 		/* advertise capabilities */
935 		val = cas_phy_read(cp, MII_BMCR);
936 		val &= ~BMCR_ANENABLE;
937 		cas_phy_write(cp, MII_BMCR, val);
938 		udelay(10);
939 
940 		cas_phy_write(cp, MII_ADVERTISE,
941 			      cas_phy_read(cp, MII_ADVERTISE) |
942 			      (ADVERTISE_10HALF | ADVERTISE_10FULL |
943 			       ADVERTISE_100HALF | ADVERTISE_100FULL |
944 			       CAS_ADVERTISE_PAUSE |
945 			       CAS_ADVERTISE_ASYM_PAUSE));
946 
947 		if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
948 			/* make sure that we don't advertise half
949 			 * duplex to avoid a chip issue
950 			 */
951 			val  = cas_phy_read(cp, CAS_MII_1000_CTRL);
952 			val &= ~CAS_ADVERTISE_1000HALF;
953 			val |= CAS_ADVERTISE_1000FULL;
954 			cas_phy_write(cp, CAS_MII_1000_CTRL, val);
955 		}
956 
957 	} else {
958 		/* reset pcs for serdes */
959 		u32 val;
960 		int limit;
961 
962 		writel(PCS_DATAPATH_MODE_SERDES,
963 		       cp->regs + REG_PCS_DATAPATH_MODE);
964 
965 		/* enable serdes pins on saturn */
966 		if (cp->cas_flags & CAS_FLAG_SATURN)
967 			writel(0, cp->regs + REG_SATURN_PCFG);
968 
969 		/* Reset PCS unit. */
970 		val = readl(cp->regs + REG_PCS_MII_CTRL);
971 		val |= PCS_MII_RESET;
972 		writel(val, cp->regs + REG_PCS_MII_CTRL);
973 
974 		limit = STOP_TRIES;
975 		while (--limit > 0) {
976 			udelay(10);
977 			if ((readl(cp->regs + REG_PCS_MII_CTRL) &
978 			     PCS_MII_RESET) == 0)
979 				break;
980 		}
981 		if (limit <= 0)
982 			netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
983 				    readl(cp->regs + REG_PCS_STATE_MACHINE));
984 
985 		/* Make sure PCS is disabled while changing advertisement
986 		 * configuration.
987 		 */
988 		writel(0x0, cp->regs + REG_PCS_CFG);
989 
990 		/* Advertise all capabilities except half-duplex. */
991 		val  = readl(cp->regs + REG_PCS_MII_ADVERT);
992 		val &= ~PCS_MII_ADVERT_HD;
993 		val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
994 			PCS_MII_ADVERT_ASYM_PAUSE);
995 		writel(val, cp->regs + REG_PCS_MII_ADVERT);
996 
997 		/* enable PCS */
998 		writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
999 
1000 		/* pcs workaround: enable sync detect */
1001 		writel(PCS_SERDES_CTRL_SYNCD_EN,
1002 		       cp->regs + REG_PCS_SERDES_CTRL);
1003 	}
1004 }
1005 
1006 
1007 static int cas_pcs_link_check(struct cas *cp)
1008 {
1009 	u32 stat, state_machine;
1010 	int retval = 0;
1011 
1012 	/* The link status bit latches on zero, so you must
1013 	 * read it twice in such a case to see a transition
1014 	 * to the link being up.
1015 	 */
1016 	stat = readl(cp->regs + REG_PCS_MII_STATUS);
1017 	if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
1018 		stat = readl(cp->regs + REG_PCS_MII_STATUS);
1019 
1020 	/* The remote-fault indication is only valid
1021 	 * when autoneg has completed.
1022 	 */
1023 	if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
1024 		     PCS_MII_STATUS_REMOTE_FAULT)) ==
1025 	    (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
1026 		netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
1027 
1028 	/* work around link detection issue by querying the PCS state
1029 	 * machine directly.
1030 	 */
1031 	state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1032 	if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1033 		stat &= ~PCS_MII_STATUS_LINK_STATUS;
1034 	} else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1035 		stat |= PCS_MII_STATUS_LINK_STATUS;
1036 	}
1037 
1038 	if (stat & PCS_MII_STATUS_LINK_STATUS) {
1039 		if (cp->lstate != link_up) {
1040 			if (cp->opened) {
1041 				cp->lstate = link_up;
1042 				cp->link_transition = LINK_TRANSITION_LINK_UP;
1043 
1044 				cas_set_link_modes(cp);
1045 				netif_carrier_on(cp->dev);
1046 			}
1047 		}
1048 	} else if (cp->lstate == link_up) {
1049 		cp->lstate = link_down;
1050 		if (link_transition_timeout != 0 &&
1051 		    cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1052 		    !cp->link_transition_jiffies_valid) {
1053 			/*
1054 			 * force a reset, as a workaround for the
1055 			 * link-failure problem. May want to move this to a
1056 			 * point a bit earlier in the sequence. If we had
1057 			 * generated a reset a short time ago, we'll wait for
1058 			 * the link timer to check the status until a
1059 			 * timer expires (link_transistion_jiffies_valid is
1060 			 * true when the timer is running.)  Instead of using
1061 			 * a system timer, we just do a check whenever the
1062 			 * link timer is running - this clears the flag after
1063 			 * a suitable delay.
1064 			 */
1065 			retval = 1;
1066 			cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1067 			cp->link_transition_jiffies = jiffies;
1068 			cp->link_transition_jiffies_valid = 1;
1069 		} else {
1070 			cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1071 		}
1072 		netif_carrier_off(cp->dev);
1073 		if (cp->opened)
1074 			netif_info(cp, link, cp->dev, "PCS link down\n");
1075 
1076 		/* Cassini only: if you force a mode, there can be
1077 		 * sync problems on link down. to fix that, the following
1078 		 * things need to be checked:
1079 		 * 1) read serialink state register
1080 		 * 2) read pcs status register to verify link down.
1081 		 * 3) if link down and serial link == 0x03, then you need
1082 		 *    to global reset the chip.
1083 		 */
1084 		if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1085 			/* should check to see if we're in a forced mode */
1086 			stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1087 			if (stat == 0x03)
1088 				return 1;
1089 		}
1090 	} else if (cp->lstate == link_down) {
1091 		if (link_transition_timeout != 0 &&
1092 		    cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1093 		    !cp->link_transition_jiffies_valid) {
1094 			/* force a reset, as a workaround for the
1095 			 * link-failure problem.  May want to move
1096 			 * this to a point a bit earlier in the
1097 			 * sequence.
1098 			 */
1099 			retval = 1;
1100 			cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1101 			cp->link_transition_jiffies = jiffies;
1102 			cp->link_transition_jiffies_valid = 1;
1103 		} else {
1104 			cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1105 		}
1106 	}
1107 
1108 	return retval;
1109 }
1110 
1111 static int cas_pcs_interrupt(struct net_device *dev,
1112 			     struct cas *cp, u32 status)
1113 {
1114 	u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1115 
1116 	if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1117 		return 0;
1118 	return cas_pcs_link_check(cp);
1119 }
1120 
1121 static int cas_txmac_interrupt(struct net_device *dev,
1122 			       struct cas *cp, u32 status)
1123 {
1124 	u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1125 
1126 	if (!txmac_stat)
1127 		return 0;
1128 
1129 	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1130 		     "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
1131 
1132 	/* Defer timer expiration is quite normal,
1133 	 * don't even log the event.
1134 	 */
1135 	if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1136 	    !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1137 		return 0;
1138 
1139 	spin_lock(&cp->stat_lock[0]);
1140 	if (txmac_stat & MAC_TX_UNDERRUN) {
1141 		netdev_err(dev, "TX MAC xmit underrun\n");
1142 		cp->net_stats[0].tx_fifo_errors++;
1143 	}
1144 
1145 	if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1146 		netdev_err(dev, "TX MAC max packet size error\n");
1147 		cp->net_stats[0].tx_errors++;
1148 	}
1149 
1150 	/* The rest are all cases of one of the 16-bit TX
1151 	 * counters expiring.
1152 	 */
1153 	if (txmac_stat & MAC_TX_COLL_NORMAL)
1154 		cp->net_stats[0].collisions += 0x10000;
1155 
1156 	if (txmac_stat & MAC_TX_COLL_EXCESS) {
1157 		cp->net_stats[0].tx_aborted_errors += 0x10000;
1158 		cp->net_stats[0].collisions += 0x10000;
1159 	}
1160 
1161 	if (txmac_stat & MAC_TX_COLL_LATE) {
1162 		cp->net_stats[0].tx_aborted_errors += 0x10000;
1163 		cp->net_stats[0].collisions += 0x10000;
1164 	}
1165 	spin_unlock(&cp->stat_lock[0]);
1166 
1167 	/* We do not keep track of MAC_TX_COLL_FIRST and
1168 	 * MAC_TX_PEAK_ATTEMPTS events.
1169 	 */
1170 	return 0;
1171 }
1172 
1173 static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1174 {
1175 	cas_hp_inst_t *inst;
1176 	u32 val;
1177 	int i;
1178 
1179 	i = 0;
1180 	while ((inst = firmware) && inst->note) {
1181 		writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1182 
1183 		val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1184 		val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1185 		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1186 
1187 		val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1188 		val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1189 		val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1190 		val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1191 		val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1192 		val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1193 		val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1194 		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1195 
1196 		val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1197 		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1198 		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1199 		val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1200 		writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1201 		++firmware;
1202 		++i;
1203 	}
1204 }
1205 
1206 static void cas_init_rx_dma(struct cas *cp)
1207 {
1208 	u64 desc_dma = cp->block_dvma;
1209 	u32 val;
1210 	int i, size;
1211 
1212 	/* rx free descriptors */
1213 	val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1214 	val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1215 	val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1216 	if ((N_RX_DESC_RINGS > 1) &&
1217 	    (cp->cas_flags & CAS_FLAG_REG_PLUS))  /* do desc 2 */
1218 		val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1219 	writel(val, cp->regs + REG_RX_CFG);
1220 
1221 	val = (unsigned long) cp->init_rxds[0] -
1222 		(unsigned long) cp->init_block;
1223 	writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1224 	writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1225 	writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1226 
1227 	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1228 		/* rx desc 2 is for IPSEC packets. however,
1229 		 * we don't it that for that purpose.
1230 		 */
1231 		val = (unsigned long) cp->init_rxds[1] -
1232 			(unsigned long) cp->init_block;
1233 		writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1234 		writel((desc_dma + val) & 0xffffffff, cp->regs +
1235 		       REG_PLUS_RX_DB1_LOW);
1236 		writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1237 		       REG_PLUS_RX_KICK1);
1238 	}
1239 
1240 	/* rx completion registers */
1241 	val = (unsigned long) cp->init_rxcs[0] -
1242 		(unsigned long) cp->init_block;
1243 	writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1244 	writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1245 
1246 	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1247 		/* rx comp 2-4 */
1248 		for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1249 			val = (unsigned long) cp->init_rxcs[i] -
1250 				(unsigned long) cp->init_block;
1251 			writel((desc_dma + val) >> 32, cp->regs +
1252 			       REG_PLUS_RX_CBN_HI(i));
1253 			writel((desc_dma + val) & 0xffffffff, cp->regs +
1254 			       REG_PLUS_RX_CBN_LOW(i));
1255 		}
1256 	}
1257 
1258 	/* read selective clear regs to prevent spurious interrupts
1259 	 * on reset because complete == kick.
1260 	 * selective clear set up to prevent interrupts on resets
1261 	 */
1262 	readl(cp->regs + REG_INTR_STATUS_ALIAS);
1263 	writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1264 	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1265 		for (i = 1; i < N_RX_COMP_RINGS; i++)
1266 			readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
1267 
1268 		/* 2 is different from 3 and 4 */
1269 		if (N_RX_COMP_RINGS > 1)
1270 			writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
1271 			       cp->regs + REG_PLUS_ALIASN_CLEAR(1));
1272 
1273 		for (i = 2; i < N_RX_COMP_RINGS; i++)
1274 			writel(INTR_RX_DONE_ALT,
1275 			       cp->regs + REG_PLUS_ALIASN_CLEAR(i));
1276 	}
1277 
1278 	/* set up pause thresholds */
1279 	val  = CAS_BASE(RX_PAUSE_THRESH_OFF,
1280 			cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1281 	val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1282 			cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1283 	writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1284 
1285 	/* zero out dma reassembly buffers */
1286 	for (i = 0; i < 64; i++) {
1287 		writel(i, cp->regs + REG_RX_TABLE_ADDR);
1288 		writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1289 		writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1290 		writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1291 	}
1292 
1293 	/* make sure address register is 0 for normal operation */
1294 	writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1295 	writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1296 
1297 	/* interrupt mitigation */
1298 #ifdef USE_RX_BLANK
1299 	val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1300 	val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1301 	writel(val, cp->regs + REG_RX_BLANK);
1302 #else
1303 	writel(0x0, cp->regs + REG_RX_BLANK);
1304 #endif
1305 
1306 	/* interrupt generation as a function of low water marks for
1307 	 * free desc and completion entries. these are used to trigger
1308 	 * housekeeping for rx descs. we don't use the free interrupt
1309 	 * as it's not very useful
1310 	 */
1311 	/* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
1312 	val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1313 	writel(val, cp->regs + REG_RX_AE_THRESH);
1314 	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1315 		val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1316 		writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1317 	}
1318 
1319 	/* Random early detect registers. useful for congestion avoidance.
1320 	 * this should be tunable.
1321 	 */
1322 	writel(0x0, cp->regs + REG_RX_RED);
1323 
1324 	/* receive page sizes. default == 2K (0x800) */
1325 	val = 0;
1326 	if (cp->page_size == 0x1000)
1327 		val = 0x1;
1328 	else if (cp->page_size == 0x2000)
1329 		val = 0x2;
1330 	else if (cp->page_size == 0x4000)
1331 		val = 0x3;
1332 
1333 	/* round mtu + offset. constrain to page size. */
1334 	size = cp->dev->mtu + 64;
1335 	if (size > cp->page_size)
1336 		size = cp->page_size;
1337 
1338 	if (size <= 0x400)
1339 		i = 0x0;
1340 	else if (size <= 0x800)
1341 		i = 0x1;
1342 	else if (size <= 0x1000)
1343 		i = 0x2;
1344 	else
1345 		i = 0x3;
1346 
1347 	cp->mtu_stride = 1 << (i + 10);
1348 	val  = CAS_BASE(RX_PAGE_SIZE, val);
1349 	val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1350 	val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1351 	val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1352 	writel(val, cp->regs + REG_RX_PAGE_SIZE);
1353 
1354 	/* enable the header parser if desired */
1355 	if (CAS_HP_FIRMWARE == cas_prog_null)
1356 		return;
1357 
1358 	val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1359 	val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1360 	val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1361 	writel(val, cp->regs + REG_HP_CFG);
1362 }
1363 
1364 static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1365 {
1366 	memset(rxc, 0, sizeof(*rxc));
1367 	rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1368 }
1369 
1370 /* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
1371  * flipping is protected by the fact that the chip will not
1372  * hand back the same page index while it's being processed.
1373  */
1374 static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1375 {
1376 	cas_page_t *page = cp->rx_pages[1][index];
1377 	cas_page_t *new;
1378 
1379 	if (page_count(page->buffer) == 1)
1380 		return page;
1381 
1382 	new = cas_page_dequeue(cp);
1383 	if (new) {
1384 		spin_lock(&cp->rx_inuse_lock);
1385 		list_add(&page->list, &cp->rx_inuse_list);
1386 		spin_unlock(&cp->rx_inuse_lock);
1387 	}
1388 	return new;
1389 }
1390 
1391 /* this needs to be changed if we actually use the ENC RX DESC ring */
1392 static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1393 				 const int index)
1394 {
1395 	cas_page_t **page0 = cp->rx_pages[0];
1396 	cas_page_t **page1 = cp->rx_pages[1];
1397 
1398 	/* swap if buffer is in use */
1399 	if (page_count(page0[index]->buffer) > 1) {
1400 		cas_page_t *new = cas_page_spare(cp, index);
1401 		if (new) {
1402 			page1[index] = page0[index];
1403 			page0[index] = new;
1404 		}
1405 	}
1406 	RX_USED_SET(page0[index], 0);
1407 	return page0[index];
1408 }
1409 
1410 static void cas_clean_rxds(struct cas *cp)
1411 {
1412 	/* only clean ring 0 as ring 1 is used for spare buffers */
1413         struct cas_rx_desc *rxd = cp->init_rxds[0];
1414 	int i, size;
1415 
1416 	/* release all rx flows */
1417 	for (i = 0; i < N_RX_FLOWS; i++) {
1418 		struct sk_buff *skb;
1419 		while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1420 			cas_skb_release(skb);
1421 		}
1422 	}
1423 
1424 	/* initialize descriptors */
1425 	size = RX_DESC_RINGN_SIZE(0);
1426 	for (i = 0; i < size; i++) {
1427 		cas_page_t *page = cas_page_swap(cp, 0, i);
1428 		rxd[i].buffer = cpu_to_le64(page->dma_addr);
1429 		rxd[i].index  = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1430 					    CAS_BASE(RX_INDEX_RING, 0));
1431 	}
1432 
1433 	cp->rx_old[0]  = RX_DESC_RINGN_SIZE(0) - 4;
1434 	cp->rx_last[0] = 0;
1435 	cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1436 }
1437 
1438 static void cas_clean_rxcs(struct cas *cp)
1439 {
1440 	int i, j;
1441 
1442 	/* take ownership of rx comp descriptors */
1443 	memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1444 	memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1445 	for (i = 0; i < N_RX_COMP_RINGS; i++) {
1446 		struct cas_rx_comp *rxc = cp->init_rxcs[i];
1447 		for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1448 			cas_rxc_init(rxc + j);
1449 		}
1450 	}
1451 }
1452 
1453 #if 0
1454 /* When we get a RX fifo overflow, the RX unit is probably hung
1455  * so we do the following.
1456  *
1457  * If any part of the reset goes wrong, we return 1 and that causes the
1458  * whole chip to be reset.
1459  */
1460 static int cas_rxmac_reset(struct cas *cp)
1461 {
1462 	struct net_device *dev = cp->dev;
1463 	int limit;
1464 	u32 val;
1465 
1466 	/* First, reset MAC RX. */
1467 	writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1468 	for (limit = 0; limit < STOP_TRIES; limit++) {
1469 		if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1470 			break;
1471 		udelay(10);
1472 	}
1473 	if (limit == STOP_TRIES) {
1474 		netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
1475 		return 1;
1476 	}
1477 
1478 	/* Second, disable RX DMA. */
1479 	writel(0, cp->regs + REG_RX_CFG);
1480 	for (limit = 0; limit < STOP_TRIES; limit++) {
1481 		if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1482 			break;
1483 		udelay(10);
1484 	}
1485 	if (limit == STOP_TRIES) {
1486 		netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
1487 		return 1;
1488 	}
1489 
1490 	mdelay(5);
1491 
1492 	/* Execute RX reset command. */
1493 	writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1494 	for (limit = 0; limit < STOP_TRIES; limit++) {
1495 		if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1496 			break;
1497 		udelay(10);
1498 	}
1499 	if (limit == STOP_TRIES) {
1500 		netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
1501 		return 1;
1502 	}
1503 
1504 	/* reset driver rx state */
1505 	cas_clean_rxds(cp);
1506 	cas_clean_rxcs(cp);
1507 
1508 	/* Now, reprogram the rest of RX unit. */
1509 	cas_init_rx_dma(cp);
1510 
1511 	/* re-enable */
1512 	val = readl(cp->regs + REG_RX_CFG);
1513 	writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1514 	writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1515 	val = readl(cp->regs + REG_MAC_RX_CFG);
1516 	writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1517 	return 0;
1518 }
1519 #endif
1520 
1521 static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1522 			       u32 status)
1523 {
1524 	u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1525 
1526 	if (!stat)
1527 		return 0;
1528 
1529 	netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1530 
1531 	/* these are all rollovers */
1532 	spin_lock(&cp->stat_lock[0]);
1533 	if (stat & MAC_RX_ALIGN_ERR)
1534 		cp->net_stats[0].rx_frame_errors += 0x10000;
1535 
1536 	if (stat & MAC_RX_CRC_ERR)
1537 		cp->net_stats[0].rx_crc_errors += 0x10000;
1538 
1539 	if (stat & MAC_RX_LEN_ERR)
1540 		cp->net_stats[0].rx_length_errors += 0x10000;
1541 
1542 	if (stat & MAC_RX_OVERFLOW) {
1543 		cp->net_stats[0].rx_over_errors++;
1544 		cp->net_stats[0].rx_fifo_errors++;
1545 	}
1546 
1547 	/* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
1548 	 * events.
1549 	 */
1550 	spin_unlock(&cp->stat_lock[0]);
1551 	return 0;
1552 }
1553 
1554 static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1555 			     u32 status)
1556 {
1557 	u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1558 
1559 	if (!stat)
1560 		return 0;
1561 
1562 	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1563 		     "mac interrupt, stat: 0x%x\n", stat);
1564 
1565 	/* This interrupt is just for pause frame and pause
1566 	 * tracking.  It is useful for diagnostics and debug
1567 	 * but probably by default we will mask these events.
1568 	 */
1569 	if (stat & MAC_CTRL_PAUSE_STATE)
1570 		cp->pause_entered++;
1571 
1572 	if (stat & MAC_CTRL_PAUSE_RECEIVED)
1573 		cp->pause_last_time_recvd = (stat >> 16);
1574 
1575 	return 0;
1576 }
1577 
1578 
1579 /* Must be invoked under cp->lock. */
1580 static inline int cas_mdio_link_not_up(struct cas *cp)
1581 {
1582 	u16 val;
1583 
1584 	switch (cp->lstate) {
1585 	case link_force_ret:
1586 		netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1587 		cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1588 		cp->timer_ticks = 5;
1589 		cp->lstate = link_force_ok;
1590 		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1591 		break;
1592 
1593 	case link_aneg:
1594 		val = cas_phy_read(cp, MII_BMCR);
1595 
1596 		/* Try forced modes. we try things in the following order:
1597 		 * 1000 full -> 100 full/half -> 10 half
1598 		 */
1599 		val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1600 		val |= BMCR_FULLDPLX;
1601 		val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1602 			CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1603 		cas_phy_write(cp, MII_BMCR, val);
1604 		cp->timer_ticks = 5;
1605 		cp->lstate = link_force_try;
1606 		cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1607 		break;
1608 
1609 	case link_force_try:
1610 		/* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
1611 		val = cas_phy_read(cp, MII_BMCR);
1612 		cp->timer_ticks = 5;
1613 		if (val & CAS_BMCR_SPEED1000) { /* gigabit */
1614 			val &= ~CAS_BMCR_SPEED1000;
1615 			val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1616 			cas_phy_write(cp, MII_BMCR, val);
1617 			break;
1618 		}
1619 
1620 		if (val & BMCR_SPEED100) {
1621 			if (val & BMCR_FULLDPLX) /* fd failed */
1622 				val &= ~BMCR_FULLDPLX;
1623 			else { /* 100Mbps failed */
1624 				val &= ~BMCR_SPEED100;
1625 			}
1626 			cas_phy_write(cp, MII_BMCR, val);
1627 			break;
1628 		}
1629 	default:
1630 		break;
1631 	}
1632 	return 0;
1633 }
1634 
1635 
1636 /* must be invoked with cp->lock held */
1637 static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1638 {
1639 	int restart;
1640 
1641 	if (bmsr & BMSR_LSTATUS) {
1642 		/* Ok, here we got a link. If we had it due to a forced
1643 		 * fallback, and we were configured for autoneg, we
1644 		 * retry a short autoneg pass. If you know your hub is
1645 		 * broken, use ethtool ;)
1646 		 */
1647 		if ((cp->lstate == link_force_try) &&
1648 		    (cp->link_cntl & BMCR_ANENABLE)) {
1649 			cp->lstate = link_force_ret;
1650 			cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1651 			cas_mif_poll(cp, 0);
1652 			cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1653 			cp->timer_ticks = 5;
1654 			if (cp->opened)
1655 				netif_info(cp, link, cp->dev,
1656 					   "Got link after fallback, retrying autoneg once...\n");
1657 			cas_phy_write(cp, MII_BMCR,
1658 				      cp->link_fcntl | BMCR_ANENABLE |
1659 				      BMCR_ANRESTART);
1660 			cas_mif_poll(cp, 1);
1661 
1662 		} else if (cp->lstate != link_up) {
1663 			cp->lstate = link_up;
1664 			cp->link_transition = LINK_TRANSITION_LINK_UP;
1665 
1666 			if (cp->opened) {
1667 				cas_set_link_modes(cp);
1668 				netif_carrier_on(cp->dev);
1669 			}
1670 		}
1671 		return 0;
1672 	}
1673 
1674 	/* link not up. if the link was previously up, we restart the
1675 	 * whole process
1676 	 */
1677 	restart = 0;
1678 	if (cp->lstate == link_up) {
1679 		cp->lstate = link_down;
1680 		cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1681 
1682 		netif_carrier_off(cp->dev);
1683 		if (cp->opened)
1684 			netif_info(cp, link, cp->dev, "Link down\n");
1685 		restart = 1;
1686 
1687 	} else if (++cp->timer_ticks > 10)
1688 		cas_mdio_link_not_up(cp);
1689 
1690 	return restart;
1691 }
1692 
1693 static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1694 			     u32 status)
1695 {
1696 	u32 stat = readl(cp->regs + REG_MIF_STATUS);
1697 	u16 bmsr;
1698 
1699 	/* check for a link change */
1700 	if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1701 		return 0;
1702 
1703 	bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1704 	return cas_mii_link_check(cp, bmsr);
1705 }
1706 
1707 static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1708 			     u32 status)
1709 {
1710 	u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1711 
1712 	if (!stat)
1713 		return 0;
1714 
1715 	netdev_err(dev, "PCI error [%04x:%04x]",
1716 		   stat, readl(cp->regs + REG_BIM_DIAG));
1717 
1718 	/* cassini+ has this reserved */
1719 	if ((stat & PCI_ERR_BADACK) &&
1720 	    ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1721 		pr_cont(" <No ACK64# during ABS64 cycle>");
1722 
1723 	if (stat & PCI_ERR_DTRTO)
1724 		pr_cont(" <Delayed transaction timeout>");
1725 	if (stat & PCI_ERR_OTHER)
1726 		pr_cont(" <other>");
1727 	if (stat & PCI_ERR_BIM_DMA_WRITE)
1728 		pr_cont(" <BIM DMA 0 write req>");
1729 	if (stat & PCI_ERR_BIM_DMA_READ)
1730 		pr_cont(" <BIM DMA 0 read req>");
1731 	pr_cont("\n");
1732 
1733 	if (stat & PCI_ERR_OTHER) {
1734 		u16 cfg;
1735 
1736 		/* Interrogate PCI config space for the
1737 		 * true cause.
1738 		 */
1739 		pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
1740 		netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
1741 		if (cfg & PCI_STATUS_PARITY)
1742 			netdev_err(dev, "PCI parity error detected\n");
1743 		if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
1744 			netdev_err(dev, "PCI target abort\n");
1745 		if (cfg & PCI_STATUS_REC_TARGET_ABORT)
1746 			netdev_err(dev, "PCI master acks target abort\n");
1747 		if (cfg & PCI_STATUS_REC_MASTER_ABORT)
1748 			netdev_err(dev, "PCI master abort\n");
1749 		if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
1750 			netdev_err(dev, "PCI system error SERR#\n");
1751 		if (cfg & PCI_STATUS_DETECTED_PARITY)
1752 			netdev_err(dev, "PCI parity error\n");
1753 
1754 		/* Write the error bits back to clear them. */
1755 		cfg &= (PCI_STATUS_PARITY |
1756 			PCI_STATUS_SIG_TARGET_ABORT |
1757 			PCI_STATUS_REC_TARGET_ABORT |
1758 			PCI_STATUS_REC_MASTER_ABORT |
1759 			PCI_STATUS_SIG_SYSTEM_ERROR |
1760 			PCI_STATUS_DETECTED_PARITY);
1761 		pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
1762 	}
1763 
1764 	/* For all PCI errors, we should reset the chip. */
1765 	return 1;
1766 }
1767 
1768 /* All non-normal interrupt conditions get serviced here.
1769  * Returns non-zero if we should just exit the interrupt
1770  * handler right now (ie. if we reset the card which invalidates
1771  * all of the other original irq status bits).
1772  */
1773 static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1774 			    u32 status)
1775 {
1776 	if (status & INTR_RX_TAG_ERROR) {
1777 		/* corrupt RX tag framing */
1778 		netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1779 			     "corrupt rx tag framing\n");
1780 		spin_lock(&cp->stat_lock[0]);
1781 		cp->net_stats[0].rx_errors++;
1782 		spin_unlock(&cp->stat_lock[0]);
1783 		goto do_reset;
1784 	}
1785 
1786 	if (status & INTR_RX_LEN_MISMATCH) {
1787 		/* length mismatch. */
1788 		netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1789 			     "length mismatch for rx frame\n");
1790 		spin_lock(&cp->stat_lock[0]);
1791 		cp->net_stats[0].rx_errors++;
1792 		spin_unlock(&cp->stat_lock[0]);
1793 		goto do_reset;
1794 	}
1795 
1796 	if (status & INTR_PCS_STATUS) {
1797 		if (cas_pcs_interrupt(dev, cp, status))
1798 			goto do_reset;
1799 	}
1800 
1801 	if (status & INTR_TX_MAC_STATUS) {
1802 		if (cas_txmac_interrupt(dev, cp, status))
1803 			goto do_reset;
1804 	}
1805 
1806 	if (status & INTR_RX_MAC_STATUS) {
1807 		if (cas_rxmac_interrupt(dev, cp, status))
1808 			goto do_reset;
1809 	}
1810 
1811 	if (status & INTR_MAC_CTRL_STATUS) {
1812 		if (cas_mac_interrupt(dev, cp, status))
1813 			goto do_reset;
1814 	}
1815 
1816 	if (status & INTR_MIF_STATUS) {
1817 		if (cas_mif_interrupt(dev, cp, status))
1818 			goto do_reset;
1819 	}
1820 
1821 	if (status & INTR_PCI_ERROR_STATUS) {
1822 		if (cas_pci_interrupt(dev, cp, status))
1823 			goto do_reset;
1824 	}
1825 	return 0;
1826 
1827 do_reset:
1828 #if 1
1829 	atomic_inc(&cp->reset_task_pending);
1830 	atomic_inc(&cp->reset_task_pending_all);
1831 	netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
1832 	schedule_work(&cp->reset_task);
1833 #else
1834 	atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1835 	netdev_err(dev, "reset called in cas_abnormal_irq\n");
1836 	schedule_work(&cp->reset_task);
1837 #endif
1838 	return 1;
1839 }
1840 
1841 /* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
1842  *       determining whether to do a netif_stop/wakeup
1843  */
1844 #define CAS_TABORT(x)      (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1845 #define CAS_ROUND_PAGE(x)  (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1846 static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1847 				  const int len)
1848 {
1849 	unsigned long off = addr + len;
1850 
1851 	if (CAS_TABORT(cp) == 1)
1852 		return 0;
1853 	if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1854 		return 0;
1855 	return TX_TARGET_ABORT_LEN;
1856 }
1857 
1858 static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1859 {
1860 	struct cas_tx_desc *txds;
1861 	struct sk_buff **skbs;
1862 	struct net_device *dev = cp->dev;
1863 	int entry, count;
1864 
1865 	spin_lock(&cp->tx_lock[ring]);
1866 	txds = cp->init_txds[ring];
1867 	skbs = cp->tx_skbs[ring];
1868 	entry = cp->tx_old[ring];
1869 
1870 	count = TX_BUFF_COUNT(ring, entry, limit);
1871 	while (entry != limit) {
1872 		struct sk_buff *skb = skbs[entry];
1873 		dma_addr_t daddr;
1874 		u32 dlen;
1875 		int frag;
1876 
1877 		if (!skb) {
1878 			/* this should never occur */
1879 			entry = TX_DESC_NEXT(ring, entry);
1880 			continue;
1881 		}
1882 
1883 		/* however, we might get only a partial skb release. */
1884 		count -= skb_shinfo(skb)->nr_frags +
1885 			+ cp->tx_tiny_use[ring][entry].nbufs + 1;
1886 		if (count < 0)
1887 			break;
1888 
1889 		netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
1890 			     "tx[%d] done, slot %d\n", ring, entry);
1891 
1892 		skbs[entry] = NULL;
1893 		cp->tx_tiny_use[ring][entry].nbufs = 0;
1894 
1895 		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1896 			struct cas_tx_desc *txd = txds + entry;
1897 
1898 			daddr = le64_to_cpu(txd->buffer);
1899 			dlen = CAS_VAL(TX_DESC_BUFLEN,
1900 				       le64_to_cpu(txd->control));
1901 			pci_unmap_page(cp->pdev, daddr, dlen,
1902 				       PCI_DMA_TODEVICE);
1903 			entry = TX_DESC_NEXT(ring, entry);
1904 
1905 			/* tiny buffer may follow */
1906 			if (cp->tx_tiny_use[ring][entry].used) {
1907 				cp->tx_tiny_use[ring][entry].used = 0;
1908 				entry = TX_DESC_NEXT(ring, entry);
1909 			}
1910 		}
1911 
1912 		spin_lock(&cp->stat_lock[ring]);
1913 		cp->net_stats[ring].tx_packets++;
1914 		cp->net_stats[ring].tx_bytes += skb->len;
1915 		spin_unlock(&cp->stat_lock[ring]);
1916 		dev_kfree_skb_irq(skb);
1917 	}
1918 	cp->tx_old[ring] = entry;
1919 
1920 	/* this is wrong for multiple tx rings. the net device needs
1921 	 * multiple queues for this to do the right thing.  we wait
1922 	 * for 2*packets to be available when using tiny buffers
1923 	 */
1924 	if (netif_queue_stopped(dev) &&
1925 	    (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1926 		netif_wake_queue(dev);
1927 	spin_unlock(&cp->tx_lock[ring]);
1928 }
1929 
1930 static void cas_tx(struct net_device *dev, struct cas *cp,
1931 		   u32 status)
1932 {
1933         int limit, ring;
1934 #ifdef USE_TX_COMPWB
1935 	u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1936 #endif
1937 	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1938 		     "tx interrupt, status: 0x%x, %llx\n",
1939 		     status, (unsigned long long)compwb);
1940 	/* process all the rings */
1941 	for (ring = 0; ring < N_TX_RINGS; ring++) {
1942 #ifdef USE_TX_COMPWB
1943 		/* use the completion writeback registers */
1944 		limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1945 			CAS_VAL(TX_COMPWB_LSB, compwb);
1946 		compwb = TX_COMPWB_NEXT(compwb);
1947 #else
1948 		limit = readl(cp->regs + REG_TX_COMPN(ring));
1949 #endif
1950 		if (cp->tx_old[ring] != limit)
1951 			cas_tx_ringN(cp, ring, limit);
1952 	}
1953 }
1954 
1955 
1956 static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1957 			      int entry, const u64 *words,
1958 			      struct sk_buff **skbref)
1959 {
1960 	int dlen, hlen, len, i, alloclen;
1961 	int off, swivel = RX_SWIVEL_OFF_VAL;
1962 	struct cas_page *page;
1963 	struct sk_buff *skb;
1964 	void *addr, *crcaddr;
1965 	__sum16 csum;
1966 	char *p;
1967 
1968 	hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1969 	dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1970 	len  = hlen + dlen;
1971 
1972 	if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1973 		alloclen = len;
1974 	else
1975 		alloclen = max(hlen, RX_COPY_MIN);
1976 
1977 	skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
1978 	if (skb == NULL)
1979 		return -1;
1980 
1981 	*skbref = skb;
1982 	skb_reserve(skb, swivel);
1983 
1984 	p = skb->data;
1985 	addr = crcaddr = NULL;
1986 	if (hlen) { /* always copy header pages */
1987 		i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
1988 		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1989 		off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
1990 			swivel;
1991 
1992 		i = hlen;
1993 		if (!dlen) /* attach FCS */
1994 			i += cp->crc_size;
1995 		pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
1996 				    PCI_DMA_FROMDEVICE);
1997 		addr = cas_page_map(page->buffer);
1998 		memcpy(p, addr + off, i);
1999 		pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2000 				    PCI_DMA_FROMDEVICE);
2001 		cas_page_unmap(addr);
2002 		RX_USED_ADD(page, 0x100);
2003 		p += hlen;
2004 		swivel = 0;
2005 	}
2006 
2007 
2008 	if (alloclen < (hlen + dlen)) {
2009 		skb_frag_t *frag = skb_shinfo(skb)->frags;
2010 
2011 		/* normal or jumbo packets. we use frags */
2012 		i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2013 		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2014 		off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2015 
2016 		hlen = min(cp->page_size - off, dlen);
2017 		if (hlen < 0) {
2018 			netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2019 				     "rx page overflow: %d\n", hlen);
2020 			dev_kfree_skb_irq(skb);
2021 			return -1;
2022 		}
2023 		i = hlen;
2024 		if (i == dlen)  /* attach FCS */
2025 			i += cp->crc_size;
2026 		pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2027 				    PCI_DMA_FROMDEVICE);
2028 
2029 		/* make sure we always copy a header */
2030 		swivel = 0;
2031 		if (p == (char *) skb->data) { /* not split */
2032 			addr = cas_page_map(page->buffer);
2033 			memcpy(p, addr + off, RX_COPY_MIN);
2034 			pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2035 					PCI_DMA_FROMDEVICE);
2036 			cas_page_unmap(addr);
2037 			off += RX_COPY_MIN;
2038 			swivel = RX_COPY_MIN;
2039 			RX_USED_ADD(page, cp->mtu_stride);
2040 		} else {
2041 			RX_USED_ADD(page, hlen);
2042 		}
2043 		skb_put(skb, alloclen);
2044 
2045 		skb_shinfo(skb)->nr_frags++;
2046 		skb->data_len += hlen - swivel;
2047 		skb->truesize += hlen - swivel;
2048 		skb->len      += hlen - swivel;
2049 
2050 		__skb_frag_set_page(frag, page->buffer);
2051 		__skb_frag_ref(frag);
2052 		frag->page_offset = off;
2053 		skb_frag_size_set(frag, hlen - swivel);
2054 
2055 		/* any more data? */
2056 		if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2057 			hlen = dlen;
2058 			off = 0;
2059 
2060 			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2061 			page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2062 			pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2063 					    hlen + cp->crc_size,
2064 					    PCI_DMA_FROMDEVICE);
2065 			pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2066 					    hlen + cp->crc_size,
2067 					    PCI_DMA_FROMDEVICE);
2068 
2069 			skb_shinfo(skb)->nr_frags++;
2070 			skb->data_len += hlen;
2071 			skb->len      += hlen;
2072 			frag++;
2073 
2074 			__skb_frag_set_page(frag, page->buffer);
2075 			__skb_frag_ref(frag);
2076 			frag->page_offset = 0;
2077 			skb_frag_size_set(frag, hlen);
2078 			RX_USED_ADD(page, hlen + cp->crc_size);
2079 		}
2080 
2081 		if (cp->crc_size) {
2082 			addr = cas_page_map(page->buffer);
2083 			crcaddr  = addr + off + hlen;
2084 		}
2085 
2086 	} else {
2087 		/* copying packet */
2088 		if (!dlen)
2089 			goto end_copy_pkt;
2090 
2091 		i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2092 		page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2093 		off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2094 		hlen = min(cp->page_size - off, dlen);
2095 		if (hlen < 0) {
2096 			netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2097 				     "rx page overflow: %d\n", hlen);
2098 			dev_kfree_skb_irq(skb);
2099 			return -1;
2100 		}
2101 		i = hlen;
2102 		if (i == dlen) /* attach FCS */
2103 			i += cp->crc_size;
2104 		pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2105 				    PCI_DMA_FROMDEVICE);
2106 		addr = cas_page_map(page->buffer);
2107 		memcpy(p, addr + off, i);
2108 		pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2109 				    PCI_DMA_FROMDEVICE);
2110 		cas_page_unmap(addr);
2111 		if (p == (char *) skb->data) /* not split */
2112 			RX_USED_ADD(page, cp->mtu_stride);
2113 		else
2114 			RX_USED_ADD(page, i);
2115 
2116 		/* any more data? */
2117 		if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2118 			p += hlen;
2119 			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2120 			page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2121 			pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2122 					    dlen + cp->crc_size,
2123 					    PCI_DMA_FROMDEVICE);
2124 			addr = cas_page_map(page->buffer);
2125 			memcpy(p, addr, dlen + cp->crc_size);
2126 			pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2127 					    dlen + cp->crc_size,
2128 					    PCI_DMA_FROMDEVICE);
2129 			cas_page_unmap(addr);
2130 			RX_USED_ADD(page, dlen + cp->crc_size);
2131 		}
2132 end_copy_pkt:
2133 		if (cp->crc_size) {
2134 			addr    = NULL;
2135 			crcaddr = skb->data + alloclen;
2136 		}
2137 		skb_put(skb, alloclen);
2138 	}
2139 
2140 	csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
2141 	if (cp->crc_size) {
2142 		/* checksum includes FCS. strip it out. */
2143 		csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2144 					      csum_unfold(csum)));
2145 		if (addr)
2146 			cas_page_unmap(addr);
2147 	}
2148 	skb->protocol = eth_type_trans(skb, cp->dev);
2149 	if (skb->protocol == htons(ETH_P_IP)) {
2150 		skb->csum = csum_unfold(~csum);
2151 		skb->ip_summed = CHECKSUM_COMPLETE;
2152 	} else
2153 		skb_checksum_none_assert(skb);
2154 	return len;
2155 }
2156 
2157 
2158 /* we can handle up to 64 rx flows at a time. we do the same thing
2159  * as nonreassm except that we batch up the buffers.
2160  * NOTE: we currently just treat each flow as a bunch of packets that
2161  *       we pass up. a better way would be to coalesce the packets
2162  *       into a jumbo packet. to do that, we need to do the following:
2163  *       1) the first packet will have a clean split between header and
2164  *          data. save both.
2165  *       2) each time the next flow packet comes in, extend the
2166  *          data length and merge the checksums.
2167  *       3) on flow release, fix up the header.
2168  *       4) make sure the higher layer doesn't care.
2169  * because packets get coalesced, we shouldn't run into fragment count
2170  * issues.
2171  */
2172 static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2173 				   struct sk_buff *skb)
2174 {
2175 	int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2176 	struct sk_buff_head *flow = &cp->rx_flows[flowid];
2177 
2178 	/* this is protected at a higher layer, so no need to
2179 	 * do any additional locking here. stick the buffer
2180 	 * at the end.
2181 	 */
2182 	__skb_queue_tail(flow, skb);
2183 	if (words[0] & RX_COMP1_RELEASE_FLOW) {
2184 		while ((skb = __skb_dequeue(flow))) {
2185 			cas_skb_release(skb);
2186 		}
2187 	}
2188 }
2189 
2190 /* put rx descriptor back on ring. if a buffer is in use by a higher
2191  * layer, this will need to put in a replacement.
2192  */
2193 static void cas_post_page(struct cas *cp, const int ring, const int index)
2194 {
2195 	cas_page_t *new;
2196 	int entry;
2197 
2198 	entry = cp->rx_old[ring];
2199 
2200 	new = cas_page_swap(cp, ring, index);
2201 	cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2202 	cp->init_rxds[ring][entry].index  =
2203 		cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2204 			    CAS_BASE(RX_INDEX_RING, ring));
2205 
2206 	entry = RX_DESC_ENTRY(ring, entry + 1);
2207 	cp->rx_old[ring] = entry;
2208 
2209 	if (entry % 4)
2210 		return;
2211 
2212 	if (ring == 0)
2213 		writel(entry, cp->regs + REG_RX_KICK);
2214 	else if ((N_RX_DESC_RINGS > 1) &&
2215 		 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2216 		writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2217 }
2218 
2219 
2220 /* only when things are bad */
2221 static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2222 {
2223 	unsigned int entry, last, count, released;
2224 	int cluster;
2225 	cas_page_t **page = cp->rx_pages[ring];
2226 
2227 	entry = cp->rx_old[ring];
2228 
2229 	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2230 		     "rxd[%d] interrupt, done: %d\n", ring, entry);
2231 
2232 	cluster = -1;
2233 	count = entry & 0x3;
2234 	last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2235 	released = 0;
2236 	while (entry != last) {
2237 		/* make a new buffer if it's still in use */
2238 		if (page_count(page[entry]->buffer) > 1) {
2239 			cas_page_t *new = cas_page_dequeue(cp);
2240 			if (!new) {
2241 				/* let the timer know that we need to
2242 				 * do this again
2243 				 */
2244 				cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2245 				if (!timer_pending(&cp->link_timer))
2246 					mod_timer(&cp->link_timer, jiffies +
2247 						  CAS_LINK_FAST_TIMEOUT);
2248 				cp->rx_old[ring]  = entry;
2249 				cp->rx_last[ring] = num ? num - released : 0;
2250 				return -ENOMEM;
2251 			}
2252 			spin_lock(&cp->rx_inuse_lock);
2253 			list_add(&page[entry]->list, &cp->rx_inuse_list);
2254 			spin_unlock(&cp->rx_inuse_lock);
2255 			cp->init_rxds[ring][entry].buffer =
2256 				cpu_to_le64(new->dma_addr);
2257 			page[entry] = new;
2258 
2259 		}
2260 
2261 		if (++count == 4) {
2262 			cluster = entry;
2263 			count = 0;
2264 		}
2265 		released++;
2266 		entry = RX_DESC_ENTRY(ring, entry + 1);
2267 	}
2268 	cp->rx_old[ring] = entry;
2269 
2270 	if (cluster < 0)
2271 		return 0;
2272 
2273 	if (ring == 0)
2274 		writel(cluster, cp->regs + REG_RX_KICK);
2275 	else if ((N_RX_DESC_RINGS > 1) &&
2276 		 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2277 		writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2278 	return 0;
2279 }
2280 
2281 
2282 /* process a completion ring. packets are set up in three basic ways:
2283  * small packets: should be copied header + data in single buffer.
2284  * large packets: header and data in a single buffer.
2285  * split packets: header in a separate buffer from data.
2286  *                data may be in multiple pages. data may be > 256
2287  *                bytes but in a single page.
2288  *
2289  * NOTE: RX page posting is done in this routine as well. while there's
2290  *       the capability of using multiple RX completion rings, it isn't
2291  *       really worthwhile due to the fact that the page posting will
2292  *       force serialization on the single descriptor ring.
2293  */
2294 static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2295 {
2296 	struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2297 	int entry, drops;
2298 	int npackets = 0;
2299 
2300 	netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2301 		     "rx[%d] interrupt, done: %d/%d\n",
2302 		     ring,
2303 		     readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2304 
2305 	entry = cp->rx_new[ring];
2306 	drops = 0;
2307 	while (1) {
2308 		struct cas_rx_comp *rxc = rxcs + entry;
2309 		struct sk_buff *uninitialized_var(skb);
2310 		int type, len;
2311 		u64 words[4];
2312 		int i, dring;
2313 
2314 		words[0] = le64_to_cpu(rxc->word1);
2315 		words[1] = le64_to_cpu(rxc->word2);
2316 		words[2] = le64_to_cpu(rxc->word3);
2317 		words[3] = le64_to_cpu(rxc->word4);
2318 
2319 		/* don't touch if still owned by hw */
2320 		type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2321 		if (type == 0)
2322 			break;
2323 
2324 		/* hw hasn't cleared the zero bit yet */
2325 		if (words[3] & RX_COMP4_ZERO) {
2326 			break;
2327 		}
2328 
2329 		/* get info on the packet */
2330 		if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2331 			spin_lock(&cp->stat_lock[ring]);
2332 			cp->net_stats[ring].rx_errors++;
2333 			if (words[3] & RX_COMP4_LEN_MISMATCH)
2334 				cp->net_stats[ring].rx_length_errors++;
2335 			if (words[3] & RX_COMP4_BAD)
2336 				cp->net_stats[ring].rx_crc_errors++;
2337 			spin_unlock(&cp->stat_lock[ring]);
2338 
2339 			/* We'll just return it to Cassini. */
2340 		drop_it:
2341 			spin_lock(&cp->stat_lock[ring]);
2342 			++cp->net_stats[ring].rx_dropped;
2343 			spin_unlock(&cp->stat_lock[ring]);
2344 			goto next;
2345 		}
2346 
2347 		len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2348 		if (len < 0) {
2349 			++drops;
2350 			goto drop_it;
2351 		}
2352 
2353 		/* see if it's a flow re-assembly or not. the driver
2354 		 * itself handles release back up.
2355 		 */
2356 		if (RX_DONT_BATCH || (type == 0x2)) {
2357 			/* non-reassm: these always get released */
2358 			cas_skb_release(skb);
2359 		} else {
2360 			cas_rx_flow_pkt(cp, words, skb);
2361 		}
2362 
2363 		spin_lock(&cp->stat_lock[ring]);
2364 		cp->net_stats[ring].rx_packets++;
2365 		cp->net_stats[ring].rx_bytes += len;
2366 		spin_unlock(&cp->stat_lock[ring]);
2367 
2368 	next:
2369 		npackets++;
2370 
2371 		/* should it be released? */
2372 		if (words[0] & RX_COMP1_RELEASE_HDR) {
2373 			i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2374 			dring = CAS_VAL(RX_INDEX_RING, i);
2375 			i = CAS_VAL(RX_INDEX_NUM, i);
2376 			cas_post_page(cp, dring, i);
2377 		}
2378 
2379 		if (words[0] & RX_COMP1_RELEASE_DATA) {
2380 			i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2381 			dring = CAS_VAL(RX_INDEX_RING, i);
2382 			i = CAS_VAL(RX_INDEX_NUM, i);
2383 			cas_post_page(cp, dring, i);
2384 		}
2385 
2386 		if (words[0] & RX_COMP1_RELEASE_NEXT) {
2387 			i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2388 			dring = CAS_VAL(RX_INDEX_RING, i);
2389 			i = CAS_VAL(RX_INDEX_NUM, i);
2390 			cas_post_page(cp, dring, i);
2391 		}
2392 
2393 		/* skip to the next entry */
2394 		entry = RX_COMP_ENTRY(ring, entry + 1 +
2395 				      CAS_VAL(RX_COMP1_SKIP, words[0]));
2396 #ifdef USE_NAPI
2397 		if (budget && (npackets >= budget))
2398 			break;
2399 #endif
2400 	}
2401 	cp->rx_new[ring] = entry;
2402 
2403 	if (drops)
2404 		netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
2405 	return npackets;
2406 }
2407 
2408 
2409 /* put completion entries back on the ring */
2410 static void cas_post_rxcs_ringN(struct net_device *dev,
2411 				struct cas *cp, int ring)
2412 {
2413 	struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2414 	int last, entry;
2415 
2416 	last = cp->rx_cur[ring];
2417 	entry = cp->rx_new[ring];
2418 	netif_printk(cp, intr, KERN_DEBUG, dev,
2419 		     "rxc[%d] interrupt, done: %d/%d\n",
2420 		     ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2421 
2422 	/* zero and re-mark descriptors */
2423 	while (last != entry) {
2424 		cas_rxc_init(rxc + last);
2425 		last = RX_COMP_ENTRY(ring, last + 1);
2426 	}
2427 	cp->rx_cur[ring] = last;
2428 
2429 	if (ring == 0)
2430 		writel(last, cp->regs + REG_RX_COMP_TAIL);
2431 	else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2432 		writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2433 }
2434 
2435 
2436 
2437 /* cassini can use all four PCI interrupts for the completion ring.
2438  * rings 3 and 4 are identical
2439  */
2440 #if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2441 static inline void cas_handle_irqN(struct net_device *dev,
2442 				   struct cas *cp, const u32 status,
2443 				   const int ring)
2444 {
2445 	if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2446 		cas_post_rxcs_ringN(dev, cp, ring);
2447 }
2448 
2449 static irqreturn_t cas_interruptN(int irq, void *dev_id)
2450 {
2451 	struct net_device *dev = dev_id;
2452 	struct cas *cp = netdev_priv(dev);
2453 	unsigned long flags;
2454 	int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2455 	u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2456 
2457 	/* check for shared irq */
2458 	if (status == 0)
2459 		return IRQ_NONE;
2460 
2461 	spin_lock_irqsave(&cp->lock, flags);
2462 	if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2463 #ifdef USE_NAPI
2464 		cas_mask_intr(cp);
2465 		napi_schedule(&cp->napi);
2466 #else
2467 		cas_rx_ringN(cp, ring, 0);
2468 #endif
2469 		status &= ~INTR_RX_DONE_ALT;
2470 	}
2471 
2472 	if (status)
2473 		cas_handle_irqN(dev, cp, status, ring);
2474 	spin_unlock_irqrestore(&cp->lock, flags);
2475 	return IRQ_HANDLED;
2476 }
2477 #endif
2478 
2479 #ifdef USE_PCI_INTB
2480 /* everything but rx packets */
2481 static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2482 {
2483 	if (status & INTR_RX_BUF_UNAVAIL_1) {
2484 		/* Frame arrived, no free RX buffers available.
2485 		 * NOTE: we can get this on a link transition. */
2486 		cas_post_rxds_ringN(cp, 1, 0);
2487 		spin_lock(&cp->stat_lock[1]);
2488 		cp->net_stats[1].rx_dropped++;
2489 		spin_unlock(&cp->stat_lock[1]);
2490 	}
2491 
2492 	if (status & INTR_RX_BUF_AE_1)
2493 		cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2494 				    RX_AE_FREEN_VAL(1));
2495 
2496 	if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2497 		cas_post_rxcs_ringN(cp, 1);
2498 }
2499 
2500 /* ring 2 handles a few more events than 3 and 4 */
2501 static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2502 {
2503 	struct net_device *dev = dev_id;
2504 	struct cas *cp = netdev_priv(dev);
2505 	unsigned long flags;
2506 	u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2507 
2508 	/* check for shared interrupt */
2509 	if (status == 0)
2510 		return IRQ_NONE;
2511 
2512 	spin_lock_irqsave(&cp->lock, flags);
2513 	if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2514 #ifdef USE_NAPI
2515 		cas_mask_intr(cp);
2516 		napi_schedule(&cp->napi);
2517 #else
2518 		cas_rx_ringN(cp, 1, 0);
2519 #endif
2520 		status &= ~INTR_RX_DONE_ALT;
2521 	}
2522 	if (status)
2523 		cas_handle_irq1(cp, status);
2524 	spin_unlock_irqrestore(&cp->lock, flags);
2525 	return IRQ_HANDLED;
2526 }
2527 #endif
2528 
2529 static inline void cas_handle_irq(struct net_device *dev,
2530 				  struct cas *cp, const u32 status)
2531 {
2532 	/* housekeeping interrupts */
2533 	if (status & INTR_ERROR_MASK)
2534 		cas_abnormal_irq(dev, cp, status);
2535 
2536 	if (status & INTR_RX_BUF_UNAVAIL) {
2537 		/* Frame arrived, no free RX buffers available.
2538 		 * NOTE: we can get this on a link transition.
2539 		 */
2540 		cas_post_rxds_ringN(cp, 0, 0);
2541 		spin_lock(&cp->stat_lock[0]);
2542 		cp->net_stats[0].rx_dropped++;
2543 		spin_unlock(&cp->stat_lock[0]);
2544 	} else if (status & INTR_RX_BUF_AE) {
2545 		cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2546 				    RX_AE_FREEN_VAL(0));
2547 	}
2548 
2549 	if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2550 		cas_post_rxcs_ringN(dev, cp, 0);
2551 }
2552 
2553 static irqreturn_t cas_interrupt(int irq, void *dev_id)
2554 {
2555 	struct net_device *dev = dev_id;
2556 	struct cas *cp = netdev_priv(dev);
2557 	unsigned long flags;
2558 	u32 status = readl(cp->regs + REG_INTR_STATUS);
2559 
2560 	if (status == 0)
2561 		return IRQ_NONE;
2562 
2563 	spin_lock_irqsave(&cp->lock, flags);
2564 	if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2565 		cas_tx(dev, cp, status);
2566 		status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2567 	}
2568 
2569 	if (status & INTR_RX_DONE) {
2570 #ifdef USE_NAPI
2571 		cas_mask_intr(cp);
2572 		napi_schedule(&cp->napi);
2573 #else
2574 		cas_rx_ringN(cp, 0, 0);
2575 #endif
2576 		status &= ~INTR_RX_DONE;
2577 	}
2578 
2579 	if (status)
2580 		cas_handle_irq(dev, cp, status);
2581 	spin_unlock_irqrestore(&cp->lock, flags);
2582 	return IRQ_HANDLED;
2583 }
2584 
2585 
2586 #ifdef USE_NAPI
2587 static int cas_poll(struct napi_struct *napi, int budget)
2588 {
2589 	struct cas *cp = container_of(napi, struct cas, napi);
2590 	struct net_device *dev = cp->dev;
2591 	int i, enable_intr, credits;
2592 	u32 status = readl(cp->regs + REG_INTR_STATUS);
2593 	unsigned long flags;
2594 
2595 	spin_lock_irqsave(&cp->lock, flags);
2596 	cas_tx(dev, cp, status);
2597 	spin_unlock_irqrestore(&cp->lock, flags);
2598 
2599 	/* NAPI rx packets. we spread the credits across all of the
2600 	 * rxc rings
2601 	 *
2602 	 * to make sure we're fair with the work we loop through each
2603 	 * ring N_RX_COMP_RING times with a request of
2604 	 * budget / N_RX_COMP_RINGS
2605 	 */
2606 	enable_intr = 1;
2607 	credits = 0;
2608 	for (i = 0; i < N_RX_COMP_RINGS; i++) {
2609 		int j;
2610 		for (j = 0; j < N_RX_COMP_RINGS; j++) {
2611 			credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2612 			if (credits >= budget) {
2613 				enable_intr = 0;
2614 				goto rx_comp;
2615 			}
2616 		}
2617 	}
2618 
2619 rx_comp:
2620 	/* final rx completion */
2621 	spin_lock_irqsave(&cp->lock, flags);
2622 	if (status)
2623 		cas_handle_irq(dev, cp, status);
2624 
2625 #ifdef USE_PCI_INTB
2626 	if (N_RX_COMP_RINGS > 1) {
2627 		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2628 		if (status)
2629 			cas_handle_irq1(dev, cp, status);
2630 	}
2631 #endif
2632 
2633 #ifdef USE_PCI_INTC
2634 	if (N_RX_COMP_RINGS > 2) {
2635 		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2636 		if (status)
2637 			cas_handle_irqN(dev, cp, status, 2);
2638 	}
2639 #endif
2640 
2641 #ifdef USE_PCI_INTD
2642 	if (N_RX_COMP_RINGS > 3) {
2643 		status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2644 		if (status)
2645 			cas_handle_irqN(dev, cp, status, 3);
2646 	}
2647 #endif
2648 	spin_unlock_irqrestore(&cp->lock, flags);
2649 	if (enable_intr) {
2650 		napi_complete(napi);
2651 		cas_unmask_intr(cp);
2652 	}
2653 	return credits;
2654 }
2655 #endif
2656 
2657 #ifdef CONFIG_NET_POLL_CONTROLLER
2658 static void cas_netpoll(struct net_device *dev)
2659 {
2660 	struct cas *cp = netdev_priv(dev);
2661 
2662 	cas_disable_irq(cp, 0);
2663 	cas_interrupt(cp->pdev->irq, dev);
2664 	cas_enable_irq(cp, 0);
2665 
2666 #ifdef USE_PCI_INTB
2667 	if (N_RX_COMP_RINGS > 1) {
2668 		/* cas_interrupt1(); */
2669 	}
2670 #endif
2671 #ifdef USE_PCI_INTC
2672 	if (N_RX_COMP_RINGS > 2) {
2673 		/* cas_interruptN(); */
2674 	}
2675 #endif
2676 #ifdef USE_PCI_INTD
2677 	if (N_RX_COMP_RINGS > 3) {
2678 		/* cas_interruptN(); */
2679 	}
2680 #endif
2681 }
2682 #endif
2683 
2684 static void cas_tx_timeout(struct net_device *dev)
2685 {
2686 	struct cas *cp = netdev_priv(dev);
2687 
2688 	netdev_err(dev, "transmit timed out, resetting\n");
2689 	if (!cp->hw_running) {
2690 		netdev_err(dev, "hrm.. hw not running!\n");
2691 		return;
2692 	}
2693 
2694 	netdev_err(dev, "MIF_STATE[%08x]\n",
2695 		   readl(cp->regs + REG_MIF_STATE_MACHINE));
2696 
2697 	netdev_err(dev, "MAC_STATE[%08x]\n",
2698 		   readl(cp->regs + REG_MAC_STATE_MACHINE));
2699 
2700 	netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2701 		   readl(cp->regs + REG_TX_CFG),
2702 		   readl(cp->regs + REG_MAC_TX_STATUS),
2703 		   readl(cp->regs + REG_MAC_TX_CFG),
2704 		   readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2705 		   readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2706 		   readl(cp->regs + REG_TX_FIFO_READ_PTR),
2707 		   readl(cp->regs + REG_TX_SM_1),
2708 		   readl(cp->regs + REG_TX_SM_2));
2709 
2710 	netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
2711 		   readl(cp->regs + REG_RX_CFG),
2712 		   readl(cp->regs + REG_MAC_RX_STATUS),
2713 		   readl(cp->regs + REG_MAC_RX_CFG));
2714 
2715 	netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
2716 		   readl(cp->regs + REG_HP_STATE_MACHINE),
2717 		   readl(cp->regs + REG_HP_STATUS0),
2718 		   readl(cp->regs + REG_HP_STATUS1),
2719 		   readl(cp->regs + REG_HP_STATUS2));
2720 
2721 #if 1
2722 	atomic_inc(&cp->reset_task_pending);
2723 	atomic_inc(&cp->reset_task_pending_all);
2724 	schedule_work(&cp->reset_task);
2725 #else
2726 	atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2727 	schedule_work(&cp->reset_task);
2728 #endif
2729 }
2730 
2731 static inline int cas_intme(int ring, int entry)
2732 {
2733 	/* Algorithm: IRQ every 1/2 of descriptors. */
2734 	if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2735 		return 1;
2736 	return 0;
2737 }
2738 
2739 
2740 static void cas_write_txd(struct cas *cp, int ring, int entry,
2741 			  dma_addr_t mapping, int len, u64 ctrl, int last)
2742 {
2743 	struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2744 
2745 	ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2746 	if (cas_intme(ring, entry))
2747 		ctrl |= TX_DESC_INTME;
2748 	if (last)
2749 		ctrl |= TX_DESC_EOF;
2750 	txd->control = cpu_to_le64(ctrl);
2751 	txd->buffer = cpu_to_le64(mapping);
2752 }
2753 
2754 static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2755 				const int entry)
2756 {
2757 	return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2758 }
2759 
2760 static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2761 				     const int entry, const int tentry)
2762 {
2763 	cp->tx_tiny_use[ring][tentry].nbufs++;
2764 	cp->tx_tiny_use[ring][entry].used = 1;
2765 	return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2766 }
2767 
2768 static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2769 				    struct sk_buff *skb)
2770 {
2771 	struct net_device *dev = cp->dev;
2772 	int entry, nr_frags, frag, tabort, tentry;
2773 	dma_addr_t mapping;
2774 	unsigned long flags;
2775 	u64 ctrl;
2776 	u32 len;
2777 
2778 	spin_lock_irqsave(&cp->tx_lock[ring], flags);
2779 
2780 	/* This is a hard error, log it. */
2781 	if (TX_BUFFS_AVAIL(cp, ring) <=
2782 	    CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2783 		netif_stop_queue(dev);
2784 		spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2785 		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2786 		return 1;
2787 	}
2788 
2789 	ctrl = 0;
2790 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2791 		const u64 csum_start_off = skb_checksum_start_offset(skb);
2792 		const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2793 
2794 		ctrl =  TX_DESC_CSUM_EN |
2795 			CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2796 			CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2797 	}
2798 
2799 	entry = cp->tx_new[ring];
2800 	cp->tx_skbs[ring][entry] = skb;
2801 
2802 	nr_frags = skb_shinfo(skb)->nr_frags;
2803 	len = skb_headlen(skb);
2804 	mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
2805 			       offset_in_page(skb->data), len,
2806 			       PCI_DMA_TODEVICE);
2807 
2808 	tentry = entry;
2809 	tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2810 	if (unlikely(tabort)) {
2811 		/* NOTE: len is always >  tabort */
2812 		cas_write_txd(cp, ring, entry, mapping, len - tabort,
2813 			      ctrl | TX_DESC_SOF, 0);
2814 		entry = TX_DESC_NEXT(ring, entry);
2815 
2816 		skb_copy_from_linear_data_offset(skb, len - tabort,
2817 			      tx_tiny_buf(cp, ring, entry), tabort);
2818 		mapping = tx_tiny_map(cp, ring, entry, tentry);
2819 		cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2820 			      (nr_frags == 0));
2821 	} else {
2822 		cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2823 			      TX_DESC_SOF, (nr_frags == 0));
2824 	}
2825 	entry = TX_DESC_NEXT(ring, entry);
2826 
2827 	for (frag = 0; frag < nr_frags; frag++) {
2828 		const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2829 
2830 		len = skb_frag_size(fragp);
2831 		mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
2832 					   DMA_TO_DEVICE);
2833 
2834 		tabort = cas_calc_tabort(cp, fragp->page_offset, len);
2835 		if (unlikely(tabort)) {
2836 			void *addr;
2837 
2838 			/* NOTE: len is always > tabort */
2839 			cas_write_txd(cp, ring, entry, mapping, len - tabort,
2840 				      ctrl, 0);
2841 			entry = TX_DESC_NEXT(ring, entry);
2842 
2843 			addr = cas_page_map(skb_frag_page(fragp));
2844 			memcpy(tx_tiny_buf(cp, ring, entry),
2845 			       addr + fragp->page_offset + len - tabort,
2846 			       tabort);
2847 			cas_page_unmap(addr);
2848 			mapping = tx_tiny_map(cp, ring, entry, tentry);
2849 			len     = tabort;
2850 		}
2851 
2852 		cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2853 			      (frag + 1 == nr_frags));
2854 		entry = TX_DESC_NEXT(ring, entry);
2855 	}
2856 
2857 	cp->tx_new[ring] = entry;
2858 	if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2859 		netif_stop_queue(dev);
2860 
2861 	netif_printk(cp, tx_queued, KERN_DEBUG, dev,
2862 		     "tx[%d] queued, slot %d, skblen %d, avail %d\n",
2863 		     ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2864 	writel(entry, cp->regs + REG_TX_KICKN(ring));
2865 	spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2866 	return 0;
2867 }
2868 
2869 static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2870 {
2871 	struct cas *cp = netdev_priv(dev);
2872 
2873 	/* this is only used as a load-balancing hint, so it doesn't
2874 	 * need to be SMP safe
2875 	 */
2876 	static int ring;
2877 
2878 	if (skb_padto(skb, cp->min_frame_size))
2879 		return NETDEV_TX_OK;
2880 
2881 	/* XXX: we need some higher-level QoS hooks to steer packets to
2882 	 *      individual queues.
2883 	 */
2884 	if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2885 		return NETDEV_TX_BUSY;
2886 	return NETDEV_TX_OK;
2887 }
2888 
2889 static void cas_init_tx_dma(struct cas *cp)
2890 {
2891 	u64 desc_dma = cp->block_dvma;
2892 	unsigned long off;
2893 	u32 val;
2894 	int i;
2895 
2896 	/* set up tx completion writeback registers. must be 8-byte aligned */
2897 #ifdef USE_TX_COMPWB
2898 	off = offsetof(struct cas_init_block, tx_compwb);
2899 	writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2900 	writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2901 #endif
2902 
2903 	/* enable completion writebacks, enable paced mode,
2904 	 * disable read pipe, and disable pre-interrupt compwbs
2905 	 */
2906 	val =   TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2907 		TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2908 		TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2909 		TX_CFG_INTR_COMPWB_DIS;
2910 
2911 	/* write out tx ring info and tx desc bases */
2912 	for (i = 0; i < MAX_TX_RINGS; i++) {
2913 		off = (unsigned long) cp->init_txds[i] -
2914 			(unsigned long) cp->init_block;
2915 
2916 		val |= CAS_TX_RINGN_BASE(i);
2917 		writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2918 		writel((desc_dma + off) & 0xffffffff, cp->regs +
2919 		       REG_TX_DBN_LOW(i));
2920 		/* don't zero out the kick register here as the system
2921 		 * will wedge
2922 		 */
2923 	}
2924 	writel(val, cp->regs + REG_TX_CFG);
2925 
2926 	/* program max burst sizes. these numbers should be different
2927 	 * if doing QoS.
2928 	 */
2929 #ifdef USE_QOS
2930 	writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2931 	writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2932 	writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2933 	writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2934 #else
2935 	writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2936 	writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2937 	writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2938 	writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2939 #endif
2940 }
2941 
2942 /* Must be invoked under cp->lock. */
2943 static inline void cas_init_dma(struct cas *cp)
2944 {
2945 	cas_init_tx_dma(cp);
2946 	cas_init_rx_dma(cp);
2947 }
2948 
2949 static void cas_process_mc_list(struct cas *cp)
2950 {
2951 	u16 hash_table[16];
2952 	u32 crc;
2953 	struct netdev_hw_addr *ha;
2954 	int i = 1;
2955 
2956 	memset(hash_table, 0, sizeof(hash_table));
2957 	netdev_for_each_mc_addr(ha, cp->dev) {
2958 		if (i <= CAS_MC_EXACT_MATCH_SIZE) {
2959 			/* use the alternate mac address registers for the
2960 			 * first 15 multicast addresses
2961 			 */
2962 			writel((ha->addr[4] << 8) | ha->addr[5],
2963 			       cp->regs + REG_MAC_ADDRN(i*3 + 0));
2964 			writel((ha->addr[2] << 8) | ha->addr[3],
2965 			       cp->regs + REG_MAC_ADDRN(i*3 + 1));
2966 			writel((ha->addr[0] << 8) | ha->addr[1],
2967 			       cp->regs + REG_MAC_ADDRN(i*3 + 2));
2968 			i++;
2969 		}
2970 		else {
2971 			/* use hw hash table for the next series of
2972 			 * multicast addresses
2973 			 */
2974 			crc = ether_crc_le(ETH_ALEN, ha->addr);
2975 			crc >>= 24;
2976 			hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
2977 		}
2978 	}
2979 	for (i = 0; i < 16; i++)
2980 		writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
2981 }
2982 
2983 /* Must be invoked under cp->lock. */
2984 static u32 cas_setup_multicast(struct cas *cp)
2985 {
2986 	u32 rxcfg = 0;
2987 	int i;
2988 
2989 	if (cp->dev->flags & IFF_PROMISC) {
2990 		rxcfg |= MAC_RX_CFG_PROMISC_EN;
2991 
2992 	} else if (cp->dev->flags & IFF_ALLMULTI) {
2993 	    	for (i=0; i < 16; i++)
2994 			writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2995 		rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2996 
2997 	} else {
2998 		cas_process_mc_list(cp);
2999 		rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
3000 	}
3001 
3002 	return rxcfg;
3003 }
3004 
3005 /* must be invoked under cp->stat_lock[N_TX_RINGS] */
3006 static void cas_clear_mac_err(struct cas *cp)
3007 {
3008 	writel(0, cp->regs + REG_MAC_COLL_NORMAL);
3009 	writel(0, cp->regs + REG_MAC_COLL_FIRST);
3010 	writel(0, cp->regs + REG_MAC_COLL_EXCESS);
3011 	writel(0, cp->regs + REG_MAC_COLL_LATE);
3012 	writel(0, cp->regs + REG_MAC_TIMER_DEFER);
3013 	writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
3014 	writel(0, cp->regs + REG_MAC_RECV_FRAME);
3015 	writel(0, cp->regs + REG_MAC_LEN_ERR);
3016 	writel(0, cp->regs + REG_MAC_ALIGN_ERR);
3017 	writel(0, cp->regs + REG_MAC_FCS_ERR);
3018 	writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
3019 }
3020 
3021 
3022 static void cas_mac_reset(struct cas *cp)
3023 {
3024 	int i;
3025 
3026 	/* do both TX and RX reset */
3027 	writel(0x1, cp->regs + REG_MAC_TX_RESET);
3028 	writel(0x1, cp->regs + REG_MAC_RX_RESET);
3029 
3030 	/* wait for TX */
3031 	i = STOP_TRIES;
3032 	while (i-- > 0) {
3033 		if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
3034 			break;
3035 		udelay(10);
3036 	}
3037 
3038 	/* wait for RX */
3039 	i = STOP_TRIES;
3040 	while (i-- > 0) {
3041 		if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
3042 			break;
3043 		udelay(10);
3044 	}
3045 
3046 	if (readl(cp->regs + REG_MAC_TX_RESET) |
3047 	    readl(cp->regs + REG_MAC_RX_RESET))
3048 		netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
3049 			   readl(cp->regs + REG_MAC_TX_RESET),
3050 			   readl(cp->regs + REG_MAC_RX_RESET),
3051 			   readl(cp->regs + REG_MAC_STATE_MACHINE));
3052 }
3053 
3054 
3055 /* Must be invoked under cp->lock. */
3056 static void cas_init_mac(struct cas *cp)
3057 {
3058 	unsigned char *e = &cp->dev->dev_addr[0];
3059 	int i;
3060 	cas_mac_reset(cp);
3061 
3062 	/* setup core arbitration weight register */
3063 	writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3064 
3065 	/* XXX Use pci_dma_burst_advice() */
3066 #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3067 	/* set the infinite burst register for chips that don't have
3068 	 * pci issues.
3069 	 */
3070 	if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3071 		writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3072 #endif
3073 
3074 	writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3075 
3076 	writel(0x00, cp->regs + REG_MAC_IPG0);
3077 	writel(0x08, cp->regs + REG_MAC_IPG1);
3078 	writel(0x04, cp->regs + REG_MAC_IPG2);
3079 
3080 	/* change later for 802.3z */
3081 	writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3082 
3083 	/* min frame + FCS */
3084 	writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3085 
3086 	/* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
3087 	 * specify the maximum frame size to prevent RX tag errors on
3088 	 * oversized frames.
3089 	 */
3090 	writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3091 	       CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3092 			(CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3093 	       cp->regs + REG_MAC_FRAMESIZE_MAX);
3094 
3095 	/* NOTE: crc_size is used as a surrogate for half-duplex.
3096 	 * workaround saturn half-duplex issue by increasing preamble
3097 	 * size to 65 bytes.
3098 	 */
3099 	if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3100 		writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3101 	else
3102 		writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3103 	writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3104 	writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3105 	writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3106 
3107 	writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3108 
3109 	writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3110 	writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3111 	writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3112 	writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3113 	writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3114 
3115 	/* setup mac address in perfect filter array */
3116 	for (i = 0; i < 45; i++)
3117 		writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3118 
3119 	writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3120 	writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3121 	writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3122 
3123 	writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3124 	writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3125 	writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3126 
3127 	cp->mac_rx_cfg = cas_setup_multicast(cp);
3128 
3129 	spin_lock(&cp->stat_lock[N_TX_RINGS]);
3130 	cas_clear_mac_err(cp);
3131 	spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3132 
3133 	/* Setup MAC interrupts.  We want to get all of the interesting
3134 	 * counter expiration events, but we do not want to hear about
3135 	 * normal rx/tx as the DMA engine tells us that.
3136 	 */
3137 	writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3138 	writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3139 
3140 	/* Don't enable even the PAUSE interrupts for now, we
3141 	 * make no use of those events other than to record them.
3142 	 */
3143 	writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3144 }
3145 
3146 /* Must be invoked under cp->lock. */
3147 static void cas_init_pause_thresholds(struct cas *cp)
3148 {
3149 	/* Calculate pause thresholds.  Setting the OFF threshold to the
3150 	 * full RX fifo size effectively disables PAUSE generation
3151 	 */
3152 	if (cp->rx_fifo_size <= (2 * 1024)) {
3153 		cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3154 	} else {
3155 		int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3156 		if (max_frame * 3 > cp->rx_fifo_size) {
3157 			cp->rx_pause_off = 7104;
3158 			cp->rx_pause_on  = 960;
3159 		} else {
3160 			int off = (cp->rx_fifo_size - (max_frame * 2));
3161 			int on = off - max_frame;
3162 			cp->rx_pause_off = off;
3163 			cp->rx_pause_on = on;
3164 		}
3165 	}
3166 }
3167 
3168 static int cas_vpd_match(const void __iomem *p, const char *str)
3169 {
3170 	int len = strlen(str) + 1;
3171 	int i;
3172 
3173 	for (i = 0; i < len; i++) {
3174 		if (readb(p + i) != str[i])
3175 			return 0;
3176 	}
3177 	return 1;
3178 }
3179 
3180 
3181 /* get the mac address by reading the vpd information in the rom.
3182  * also get the phy type and determine if there's an entropy generator.
3183  * NOTE: this is a bit convoluted for the following reasons:
3184  *  1) vpd info has order-dependent mac addresses for multinic cards
3185  *  2) the only way to determine the nic order is to use the slot
3186  *     number.
3187  *  3) fiber cards don't have bridges, so their slot numbers don't
3188  *     mean anything.
3189  *  4) we don't actually know we have a fiber card until after
3190  *     the mac addresses are parsed.
3191  */
3192 static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3193 			    const int offset)
3194 {
3195 	void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3196 	void __iomem *base, *kstart;
3197 	int i, len;
3198 	int found = 0;
3199 #define VPD_FOUND_MAC        0x01
3200 #define VPD_FOUND_PHY        0x02
3201 
3202 	int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
3203 	int mac_off  = 0;
3204 
3205 #if defined(CONFIG_SPARC)
3206 	const unsigned char *addr;
3207 #endif
3208 
3209 	/* give us access to the PROM */
3210 	writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3211 	       cp->regs + REG_BIM_LOCAL_DEV_EN);
3212 
3213 	/* check for an expansion rom */
3214 	if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3215 		goto use_random_mac_addr;
3216 
3217 	/* search for beginning of vpd */
3218 	base = NULL;
3219 	for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3220 		/* check for PCIR */
3221 		if ((readb(p + i + 0) == 0x50) &&
3222 		    (readb(p + i + 1) == 0x43) &&
3223 		    (readb(p + i + 2) == 0x49) &&
3224 		    (readb(p + i + 3) == 0x52)) {
3225 			base = p + (readb(p + i + 8) |
3226 				    (readb(p + i + 9) << 8));
3227 			break;
3228 		}
3229 	}
3230 
3231 	if (!base || (readb(base) != 0x82))
3232 		goto use_random_mac_addr;
3233 
3234 	i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3235 	while (i < EXPANSION_ROM_SIZE) {
3236 		if (readb(base + i) != 0x90) /* no vpd found */
3237 			goto use_random_mac_addr;
3238 
3239 		/* found a vpd field */
3240 		len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3241 
3242 		/* extract keywords */
3243 		kstart = base + i + 3;
3244 		p = kstart;
3245 		while ((p - kstart) < len) {
3246 			int klen = readb(p + 2);
3247 			int j;
3248 			char type;
3249 
3250 			p += 3;
3251 
3252 			/* look for the following things:
3253 			 * -- correct length == 29
3254 			 * 3 (type) + 2 (size) +
3255 			 * 18 (strlen("local-mac-address") + 1) +
3256 			 * 6 (mac addr)
3257 			 * -- VPD Instance 'I'
3258 			 * -- VPD Type Bytes 'B'
3259 			 * -- VPD data length == 6
3260 			 * -- property string == local-mac-address
3261 			 *
3262 			 * -- correct length == 24
3263 			 * 3 (type) + 2 (size) +
3264 			 * 12 (strlen("entropy-dev") + 1) +
3265 			 * 7 (strlen("vms110") + 1)
3266 			 * -- VPD Instance 'I'
3267 			 * -- VPD Type String 'B'
3268 			 * -- VPD data length == 7
3269 			 * -- property string == entropy-dev
3270 			 *
3271 			 * -- correct length == 18
3272 			 * 3 (type) + 2 (size) +
3273 			 * 9 (strlen("phy-type") + 1) +
3274 			 * 4 (strlen("pcs") + 1)
3275 			 * -- VPD Instance 'I'
3276 			 * -- VPD Type String 'S'
3277 			 * -- VPD data length == 4
3278 			 * -- property string == phy-type
3279 			 *
3280 			 * -- correct length == 23
3281 			 * 3 (type) + 2 (size) +
3282 			 * 14 (strlen("phy-interface") + 1) +
3283 			 * 4 (strlen("pcs") + 1)
3284 			 * -- VPD Instance 'I'
3285 			 * -- VPD Type String 'S'
3286 			 * -- VPD data length == 4
3287 			 * -- property string == phy-interface
3288 			 */
3289 			if (readb(p) != 'I')
3290 				goto next;
3291 
3292 			/* finally, check string and length */
3293 			type = readb(p + 3);
3294 			if (type == 'B') {
3295 				if ((klen == 29) && readb(p + 4) == 6 &&
3296 				    cas_vpd_match(p + 5,
3297 						  "local-mac-address")) {
3298 					if (mac_off++ > offset)
3299 						goto next;
3300 
3301 					/* set mac address */
3302 					for (j = 0; j < 6; j++)
3303 						dev_addr[j] =
3304 							readb(p + 23 + j);
3305 					goto found_mac;
3306 				}
3307 			}
3308 
3309 			if (type != 'S')
3310 				goto next;
3311 
3312 #ifdef USE_ENTROPY_DEV
3313 			if ((klen == 24) &&
3314 			    cas_vpd_match(p + 5, "entropy-dev") &&
3315 			    cas_vpd_match(p + 17, "vms110")) {
3316 				cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3317 				goto next;
3318 			}
3319 #endif
3320 
3321 			if (found & VPD_FOUND_PHY)
3322 				goto next;
3323 
3324 			if ((klen == 18) && readb(p + 4) == 4 &&
3325 			    cas_vpd_match(p + 5, "phy-type")) {
3326 				if (cas_vpd_match(p + 14, "pcs")) {
3327 					phy_type = CAS_PHY_SERDES;
3328 					goto found_phy;
3329 				}
3330 			}
3331 
3332 			if ((klen == 23) && readb(p + 4) == 4 &&
3333 			    cas_vpd_match(p + 5, "phy-interface")) {
3334 				if (cas_vpd_match(p + 19, "pcs")) {
3335 					phy_type = CAS_PHY_SERDES;
3336 					goto found_phy;
3337 				}
3338 			}
3339 found_mac:
3340 			found |= VPD_FOUND_MAC;
3341 			goto next;
3342 
3343 found_phy:
3344 			found |= VPD_FOUND_PHY;
3345 
3346 next:
3347 			p += klen;
3348 		}
3349 		i += len + 3;
3350 	}
3351 
3352 use_random_mac_addr:
3353 	if (found & VPD_FOUND_MAC)
3354 		goto done;
3355 
3356 #if defined(CONFIG_SPARC)
3357 	addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3358 	if (addr != NULL) {
3359 		memcpy(dev_addr, addr, 6);
3360 		goto done;
3361 	}
3362 #endif
3363 
3364 	/* Sun MAC prefix then 3 random bytes. */
3365 	pr_info("MAC address not found in ROM VPD\n");
3366 	dev_addr[0] = 0x08;
3367 	dev_addr[1] = 0x00;
3368 	dev_addr[2] = 0x20;
3369 	get_random_bytes(dev_addr + 3, 3);
3370 
3371 done:
3372 	writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3373 	return phy_type;
3374 }
3375 
3376 /* check pci invariants */
3377 static void cas_check_pci_invariants(struct cas *cp)
3378 {
3379 	struct pci_dev *pdev = cp->pdev;
3380 
3381 	cp->cas_flags = 0;
3382 	if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3383 	    (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3384 		if (pdev->revision >= CAS_ID_REVPLUS)
3385 			cp->cas_flags |= CAS_FLAG_REG_PLUS;
3386 		if (pdev->revision < CAS_ID_REVPLUS02u)
3387 			cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3388 
3389 		/* Original Cassini supports HW CSUM, but it's not
3390 		 * enabled by default as it can trigger TX hangs.
3391 		 */
3392 		if (pdev->revision < CAS_ID_REV2)
3393 			cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3394 	} else {
3395 		/* Only sun has original cassini chips.  */
3396 		cp->cas_flags |= CAS_FLAG_REG_PLUS;
3397 
3398 		/* We use a flag because the same phy might be externally
3399 		 * connected.
3400 		 */
3401 		if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3402 		    (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3403 			cp->cas_flags |= CAS_FLAG_SATURN;
3404 	}
3405 }
3406 
3407 
3408 static int cas_check_invariants(struct cas *cp)
3409 {
3410 	struct pci_dev *pdev = cp->pdev;
3411 	u32 cfg;
3412 	int i;
3413 
3414 	/* get page size for rx buffers. */
3415 	cp->page_order = 0;
3416 #ifdef USE_PAGE_ORDER
3417 	if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3418 		/* see if we can allocate larger pages */
3419 		struct page *page = alloc_pages(GFP_ATOMIC,
3420 						CAS_JUMBO_PAGE_SHIFT -
3421 						PAGE_SHIFT);
3422 		if (page) {
3423 			__free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3424 			cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3425 		} else {
3426 			printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
3427 		}
3428 	}
3429 #endif
3430 	cp->page_size = (PAGE_SIZE << cp->page_order);
3431 
3432 	/* Fetch the FIFO configurations. */
3433 	cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3434 	cp->rx_fifo_size = RX_FIFO_SIZE;
3435 
3436 	/* finish phy determination. MDIO1 takes precedence over MDIO0 if
3437 	 * they're both connected.
3438 	 */
3439 	cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
3440 					PCI_SLOT(pdev->devfn));
3441 	if (cp->phy_type & CAS_PHY_SERDES) {
3442 		cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3443 		return 0; /* no more checking needed */
3444 	}
3445 
3446 	/* MII */
3447 	cfg = readl(cp->regs + REG_MIF_CFG);
3448 	if (cfg & MIF_CFG_MDIO_1) {
3449 		cp->phy_type = CAS_PHY_MII_MDIO1;
3450 	} else if (cfg & MIF_CFG_MDIO_0) {
3451 		cp->phy_type = CAS_PHY_MII_MDIO0;
3452 	}
3453 
3454 	cas_mif_poll(cp, 0);
3455 	writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3456 
3457 	for (i = 0; i < 32; i++) {
3458 		u32 phy_id;
3459 		int j;
3460 
3461 		for (j = 0; j < 3; j++) {
3462 			cp->phy_addr = i;
3463 			phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3464 			phy_id |= cas_phy_read(cp, MII_PHYSID2);
3465 			if (phy_id && (phy_id != 0xFFFFFFFF)) {
3466 				cp->phy_id = phy_id;
3467 				goto done;
3468 			}
3469 		}
3470 	}
3471 	pr_err("MII phy did not respond [%08x]\n",
3472 	       readl(cp->regs + REG_MIF_STATE_MACHINE));
3473 	return -1;
3474 
3475 done:
3476 	/* see if we can do gigabit */
3477 	cfg = cas_phy_read(cp, MII_BMSR);
3478 	if ((cfg & CAS_BMSR_1000_EXTEND) &&
3479 	    cas_phy_read(cp, CAS_MII_1000_EXTEND))
3480 		cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3481 	return 0;
3482 }
3483 
3484 /* Must be invoked under cp->lock. */
3485 static inline void cas_start_dma(struct cas *cp)
3486 {
3487 	int i;
3488 	u32 val;
3489 	int txfailed = 0;
3490 
3491 	/* enable dma */
3492 	val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3493 	writel(val, cp->regs + REG_TX_CFG);
3494 	val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3495 	writel(val, cp->regs + REG_RX_CFG);
3496 
3497 	/* enable the mac */
3498 	val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3499 	writel(val, cp->regs + REG_MAC_TX_CFG);
3500 	val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3501 	writel(val, cp->regs + REG_MAC_RX_CFG);
3502 
3503 	i = STOP_TRIES;
3504 	while (i-- > 0) {
3505 		val = readl(cp->regs + REG_MAC_TX_CFG);
3506 		if ((val & MAC_TX_CFG_EN))
3507 			break;
3508 		udelay(10);
3509 	}
3510 	if (i < 0) txfailed = 1;
3511 	i = STOP_TRIES;
3512 	while (i-- > 0) {
3513 		val = readl(cp->regs + REG_MAC_RX_CFG);
3514 		if ((val & MAC_RX_CFG_EN)) {
3515 			if (txfailed) {
3516 				netdev_err(cp->dev,
3517 					   "enabling mac failed [tx:%08x:%08x]\n",
3518 					   readl(cp->regs + REG_MIF_STATE_MACHINE),
3519 					   readl(cp->regs + REG_MAC_STATE_MACHINE));
3520 			}
3521 			goto enable_rx_done;
3522 		}
3523 		udelay(10);
3524 	}
3525 	netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
3526 		   (txfailed ? "tx,rx" : "rx"),
3527 		   readl(cp->regs + REG_MIF_STATE_MACHINE),
3528 		   readl(cp->regs + REG_MAC_STATE_MACHINE));
3529 
3530 enable_rx_done:
3531 	cas_unmask_intr(cp); /* enable interrupts */
3532 	writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3533 	writel(0, cp->regs + REG_RX_COMP_TAIL);
3534 
3535 	if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3536 		if (N_RX_DESC_RINGS > 1)
3537 			writel(RX_DESC_RINGN_SIZE(1) - 4,
3538 			       cp->regs + REG_PLUS_RX_KICK1);
3539 
3540 		for (i = 1; i < N_RX_COMP_RINGS; i++)
3541 			writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
3542 	}
3543 }
3544 
3545 /* Must be invoked under cp->lock. */
3546 static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3547 				   int *pause)
3548 {
3549 	u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3550 	*fd     = (val & PCS_MII_LPA_FD) ? 1 : 0;
3551 	*pause  = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3552 	if (val & PCS_MII_LPA_ASYM_PAUSE)
3553 		*pause |= 0x10;
3554 	*spd = 1000;
3555 }
3556 
3557 /* Must be invoked under cp->lock. */
3558 static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3559 				   int *pause)
3560 {
3561 	u32 val;
3562 
3563 	*fd = 0;
3564 	*spd = 10;
3565 	*pause = 0;
3566 
3567 	/* use GMII registers */
3568 	val = cas_phy_read(cp, MII_LPA);
3569 	if (val & CAS_LPA_PAUSE)
3570 		*pause = 0x01;
3571 
3572 	if (val & CAS_LPA_ASYM_PAUSE)
3573 		*pause |= 0x10;
3574 
3575 	if (val & LPA_DUPLEX)
3576 		*fd = 1;
3577 	if (val & LPA_100)
3578 		*spd = 100;
3579 
3580 	if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3581 		val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3582 		if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3583 			*spd = 1000;
3584 		if (val & CAS_LPA_1000FULL)
3585 			*fd = 1;
3586 	}
3587 }
3588 
3589 /* A link-up condition has occurred, initialize and enable the
3590  * rest of the chip.
3591  *
3592  * Must be invoked under cp->lock.
3593  */
3594 static void cas_set_link_modes(struct cas *cp)
3595 {
3596 	u32 val;
3597 	int full_duplex, speed, pause;
3598 
3599 	full_duplex = 0;
3600 	speed = 10;
3601 	pause = 0;
3602 
3603 	if (CAS_PHY_MII(cp->phy_type)) {
3604 		cas_mif_poll(cp, 0);
3605 		val = cas_phy_read(cp, MII_BMCR);
3606 		if (val & BMCR_ANENABLE) {
3607 			cas_read_mii_link_mode(cp, &full_duplex, &speed,
3608 					       &pause);
3609 		} else {
3610 			if (val & BMCR_FULLDPLX)
3611 				full_duplex = 1;
3612 
3613 			if (val & BMCR_SPEED100)
3614 				speed = 100;
3615 			else if (val & CAS_BMCR_SPEED1000)
3616 				speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3617 					1000 : 100;
3618 		}
3619 		cas_mif_poll(cp, 1);
3620 
3621 	} else {
3622 		val = readl(cp->regs + REG_PCS_MII_CTRL);
3623 		cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3624 		if ((val & PCS_MII_AUTONEG_EN) == 0) {
3625 			if (val & PCS_MII_CTRL_DUPLEX)
3626 				full_duplex = 1;
3627 		}
3628 	}
3629 
3630 	netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
3631 		   speed, full_duplex ? "full" : "half");
3632 
3633 	val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3634 	if (CAS_PHY_MII(cp->phy_type)) {
3635 		val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3636 		if (!full_duplex)
3637 			val |= MAC_XIF_DISABLE_ECHO;
3638 	}
3639 	if (full_duplex)
3640 		val |= MAC_XIF_FDPLX_LED;
3641 	if (speed == 1000)
3642 		val |= MAC_XIF_GMII_MODE;
3643 	writel(val, cp->regs + REG_MAC_XIF_CFG);
3644 
3645 	/* deal with carrier and collision detect. */
3646 	val = MAC_TX_CFG_IPG_EN;
3647 	if (full_duplex) {
3648 		val |= MAC_TX_CFG_IGNORE_CARRIER;
3649 		val |= MAC_TX_CFG_IGNORE_COLL;
3650 	} else {
3651 #ifndef USE_CSMA_CD_PROTO
3652 		val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3653 		val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3654 #endif
3655 	}
3656 	/* val now set up for REG_MAC_TX_CFG */
3657 
3658 	/* If gigabit and half-duplex, enable carrier extension
3659 	 * mode.  increase slot time to 512 bytes as well.
3660 	 * else, disable it and make sure slot time is 64 bytes.
3661 	 * also activate checksum bug workaround
3662 	 */
3663 	if ((speed == 1000) && !full_duplex) {
3664 		writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3665 		       cp->regs + REG_MAC_TX_CFG);
3666 
3667 		val = readl(cp->regs + REG_MAC_RX_CFG);
3668 		val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
3669 		writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3670 		       cp->regs + REG_MAC_RX_CFG);
3671 
3672 		writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3673 
3674 		cp->crc_size = 4;
3675 		/* minimum size gigabit frame at half duplex */
3676 		cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3677 
3678 	} else {
3679 		writel(val, cp->regs + REG_MAC_TX_CFG);
3680 
3681 		/* checksum bug workaround. don't strip FCS when in
3682 		 * half-duplex mode
3683 		 */
3684 		val = readl(cp->regs + REG_MAC_RX_CFG);
3685 		if (full_duplex) {
3686 			val |= MAC_RX_CFG_STRIP_FCS;
3687 			cp->crc_size = 0;
3688 			cp->min_frame_size = CAS_MIN_MTU;
3689 		} else {
3690 			val &= ~MAC_RX_CFG_STRIP_FCS;
3691 			cp->crc_size = 4;
3692 			cp->min_frame_size = CAS_MIN_FRAME;
3693 		}
3694 		writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3695 		       cp->regs + REG_MAC_RX_CFG);
3696 		writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3697 	}
3698 
3699 	if (netif_msg_link(cp)) {
3700 		if (pause & 0x01) {
3701 			netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
3702 				    cp->rx_fifo_size,
3703 				    cp->rx_pause_off,
3704 				    cp->rx_pause_on);
3705 		} else if (pause & 0x10) {
3706 			netdev_info(cp->dev, "TX pause enabled\n");
3707 		} else {
3708 			netdev_info(cp->dev, "Pause is disabled\n");
3709 		}
3710 	}
3711 
3712 	val = readl(cp->regs + REG_MAC_CTRL_CFG);
3713 	val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3714 	if (pause) { /* symmetric or asymmetric pause */
3715 		val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3716 		if (pause & 0x01) { /* symmetric pause */
3717 			val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3718 		}
3719 	}
3720 	writel(val, cp->regs + REG_MAC_CTRL_CFG);
3721 	cas_start_dma(cp);
3722 }
3723 
3724 /* Must be invoked under cp->lock. */
3725 static void cas_init_hw(struct cas *cp, int restart_link)
3726 {
3727 	if (restart_link)
3728 		cas_phy_init(cp);
3729 
3730 	cas_init_pause_thresholds(cp);
3731 	cas_init_mac(cp);
3732 	cas_init_dma(cp);
3733 
3734 	if (restart_link) {
3735 		/* Default aneg parameters */
3736 		cp->timer_ticks = 0;
3737 		cas_begin_auto_negotiation(cp, NULL);
3738 	} else if (cp->lstate == link_up) {
3739 		cas_set_link_modes(cp);
3740 		netif_carrier_on(cp->dev);
3741 	}
3742 }
3743 
3744 /* Must be invoked under cp->lock. on earlier cassini boards,
3745  * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
3746  * let it settle out, and then restore pci state.
3747  */
3748 static void cas_hard_reset(struct cas *cp)
3749 {
3750 	writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3751 	udelay(20);
3752 	pci_restore_state(cp->pdev);
3753 }
3754 
3755 
3756 static void cas_global_reset(struct cas *cp, int blkflag)
3757 {
3758 	int limit;
3759 
3760 	/* issue a global reset. don't use RSTOUT. */
3761 	if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3762 		/* For PCS, when the blkflag is set, we should set the
3763 		 * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
3764 		 * the last autonegotiation from being cleared.  We'll
3765 		 * need some special handling if the chip is set into a
3766 		 * loopback mode.
3767 		 */
3768 		writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3769 		       cp->regs + REG_SW_RESET);
3770 	} else {
3771 		writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3772 	}
3773 
3774 	/* need to wait at least 3ms before polling register */
3775 	mdelay(3);
3776 
3777 	limit = STOP_TRIES;
3778 	while (limit-- > 0) {
3779 		u32 val = readl(cp->regs + REG_SW_RESET);
3780 		if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3781 			goto done;
3782 		udelay(10);
3783 	}
3784 	netdev_err(cp->dev, "sw reset failed\n");
3785 
3786 done:
3787 	/* enable various BIM interrupts */
3788 	writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3789 	       BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3790 
3791 	/* clear out pci error status mask for handled errors.
3792 	 * we don't deal with DMA counter overflows as they happen
3793 	 * all the time.
3794 	 */
3795 	writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3796 			       PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3797 			       PCI_ERR_BIM_DMA_READ), cp->regs +
3798 	       REG_PCI_ERR_STATUS_MASK);
3799 
3800 	/* set up for MII by default to address mac rx reset timeout
3801 	 * issue
3802 	 */
3803 	writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3804 }
3805 
3806 static void cas_reset(struct cas *cp, int blkflag)
3807 {
3808 	u32 val;
3809 
3810 	cas_mask_intr(cp);
3811 	cas_global_reset(cp, blkflag);
3812 	cas_mac_reset(cp);
3813 	cas_entropy_reset(cp);
3814 
3815 	/* disable dma engines. */
3816 	val = readl(cp->regs + REG_TX_CFG);
3817 	val &= ~TX_CFG_DMA_EN;
3818 	writel(val, cp->regs + REG_TX_CFG);
3819 
3820 	val = readl(cp->regs + REG_RX_CFG);
3821 	val &= ~RX_CFG_DMA_EN;
3822 	writel(val, cp->regs + REG_RX_CFG);
3823 
3824 	/* program header parser */
3825 	if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3826 	    (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
3827 		cas_load_firmware(cp, CAS_HP_FIRMWARE);
3828 	} else {
3829 		cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3830 	}
3831 
3832 	/* clear out error registers */
3833 	spin_lock(&cp->stat_lock[N_TX_RINGS]);
3834 	cas_clear_mac_err(cp);
3835 	spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3836 }
3837 
3838 /* Shut down the chip, must be called with pm_mutex held.  */
3839 static void cas_shutdown(struct cas *cp)
3840 {
3841 	unsigned long flags;
3842 
3843 	/* Make us not-running to avoid timers respawning */
3844 	cp->hw_running = 0;
3845 
3846 	del_timer_sync(&cp->link_timer);
3847 
3848 	/* Stop the reset task */
3849 #if 0
3850 	while (atomic_read(&cp->reset_task_pending_mtu) ||
3851 	       atomic_read(&cp->reset_task_pending_spare) ||
3852 	       atomic_read(&cp->reset_task_pending_all))
3853 		schedule();
3854 
3855 #else
3856 	while (atomic_read(&cp->reset_task_pending))
3857 		schedule();
3858 #endif
3859 	/* Actually stop the chip */
3860 	cas_lock_all_save(cp, flags);
3861 	cas_reset(cp, 0);
3862 	if (cp->cas_flags & CAS_FLAG_SATURN)
3863 		cas_phy_powerdown(cp);
3864 	cas_unlock_all_restore(cp, flags);
3865 }
3866 
3867 static int cas_change_mtu(struct net_device *dev, int new_mtu)
3868 {
3869 	struct cas *cp = netdev_priv(dev);
3870 
3871 	if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
3872 		return -EINVAL;
3873 
3874 	dev->mtu = new_mtu;
3875 	if (!netif_running(dev) || !netif_device_present(dev))
3876 		return 0;
3877 
3878 	/* let the reset task handle it */
3879 #if 1
3880 	atomic_inc(&cp->reset_task_pending);
3881 	if ((cp->phy_type & CAS_PHY_SERDES)) {
3882 		atomic_inc(&cp->reset_task_pending_all);
3883 	} else {
3884 		atomic_inc(&cp->reset_task_pending_mtu);
3885 	}
3886 	schedule_work(&cp->reset_task);
3887 #else
3888 	atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3889 		   CAS_RESET_ALL : CAS_RESET_MTU);
3890 	pr_err("reset called in cas_change_mtu\n");
3891 	schedule_work(&cp->reset_task);
3892 #endif
3893 
3894 	flush_work_sync(&cp->reset_task);
3895 	return 0;
3896 }
3897 
3898 static void cas_clean_txd(struct cas *cp, int ring)
3899 {
3900 	struct cas_tx_desc *txd = cp->init_txds[ring];
3901 	struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3902 	u64 daddr, dlen;
3903 	int i, size;
3904 
3905 	size = TX_DESC_RINGN_SIZE(ring);
3906 	for (i = 0; i < size; i++) {
3907 		int frag;
3908 
3909 		if (skbs[i] == NULL)
3910 			continue;
3911 
3912 		skb = skbs[i];
3913 		skbs[i] = NULL;
3914 
3915 		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags;  frag++) {
3916 			int ent = i & (size - 1);
3917 
3918 			/* first buffer is never a tiny buffer and so
3919 			 * needs to be unmapped.
3920 			 */
3921 			daddr = le64_to_cpu(txd[ent].buffer);
3922 			dlen  =  CAS_VAL(TX_DESC_BUFLEN,
3923 					 le64_to_cpu(txd[ent].control));
3924 			pci_unmap_page(cp->pdev, daddr, dlen,
3925 				       PCI_DMA_TODEVICE);
3926 
3927 			if (frag != skb_shinfo(skb)->nr_frags) {
3928 				i++;
3929 
3930 				/* next buffer might by a tiny buffer.
3931 				 * skip past it.
3932 				 */
3933 				ent = i & (size - 1);
3934 				if (cp->tx_tiny_use[ring][ent].used)
3935 					i++;
3936 			}
3937 		}
3938 		dev_kfree_skb_any(skb);
3939 	}
3940 
3941 	/* zero out tiny buf usage */
3942 	memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3943 }
3944 
3945 /* freed on close */
3946 static inline void cas_free_rx_desc(struct cas *cp, int ring)
3947 {
3948 	cas_page_t **page = cp->rx_pages[ring];
3949 	int i, size;
3950 
3951 	size = RX_DESC_RINGN_SIZE(ring);
3952 	for (i = 0; i < size; i++) {
3953 		if (page[i]) {
3954 			cas_page_free(cp, page[i]);
3955 			page[i] = NULL;
3956 		}
3957 	}
3958 }
3959 
3960 static void cas_free_rxds(struct cas *cp)
3961 {
3962 	int i;
3963 
3964 	for (i = 0; i < N_RX_DESC_RINGS; i++)
3965 		cas_free_rx_desc(cp, i);
3966 }
3967 
3968 /* Must be invoked under cp->lock. */
3969 static void cas_clean_rings(struct cas *cp)
3970 {
3971 	int i;
3972 
3973 	/* need to clean all tx rings */
3974 	memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
3975 	memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
3976 	for (i = 0; i < N_TX_RINGS; i++)
3977 		cas_clean_txd(cp, i);
3978 
3979 	/* zero out init block */
3980 	memset(cp->init_block, 0, sizeof(struct cas_init_block));
3981 	cas_clean_rxds(cp);
3982 	cas_clean_rxcs(cp);
3983 }
3984 
3985 /* allocated on open */
3986 static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
3987 {
3988 	cas_page_t **page = cp->rx_pages[ring];
3989 	int size, i = 0;
3990 
3991 	size = RX_DESC_RINGN_SIZE(ring);
3992 	for (i = 0; i < size; i++) {
3993 		if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
3994 			return -1;
3995 	}
3996 	return 0;
3997 }
3998 
3999 static int cas_alloc_rxds(struct cas *cp)
4000 {
4001 	int i;
4002 
4003 	for (i = 0; i < N_RX_DESC_RINGS; i++) {
4004 		if (cas_alloc_rx_desc(cp, i) < 0) {
4005 			cas_free_rxds(cp);
4006 			return -1;
4007 		}
4008 	}
4009 	return 0;
4010 }
4011 
4012 static void cas_reset_task(struct work_struct *work)
4013 {
4014 	struct cas *cp = container_of(work, struct cas, reset_task);
4015 #if 0
4016 	int pending = atomic_read(&cp->reset_task_pending);
4017 #else
4018 	int pending_all = atomic_read(&cp->reset_task_pending_all);
4019 	int pending_spare = atomic_read(&cp->reset_task_pending_spare);
4020 	int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
4021 
4022 	if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
4023 		/* We can have more tasks scheduled than actually
4024 		 * needed.
4025 		 */
4026 		atomic_dec(&cp->reset_task_pending);
4027 		return;
4028 	}
4029 #endif
4030 	/* The link went down, we reset the ring, but keep
4031 	 * DMA stopped. Use this function for reset
4032 	 * on error as well.
4033 	 */
4034 	if (cp->hw_running) {
4035 		unsigned long flags;
4036 
4037 		/* Make sure we don't get interrupts or tx packets */
4038 		netif_device_detach(cp->dev);
4039 		cas_lock_all_save(cp, flags);
4040 
4041 		if (cp->opened) {
4042 			/* We call cas_spare_recover when we call cas_open.
4043 			 * but we do not initialize the lists cas_spare_recover
4044 			 * uses until cas_open is called.
4045 			 */
4046 			cas_spare_recover(cp, GFP_ATOMIC);
4047 		}
4048 #if 1
4049 		/* test => only pending_spare set */
4050 		if (!pending_all && !pending_mtu)
4051 			goto done;
4052 #else
4053 		if (pending == CAS_RESET_SPARE)
4054 			goto done;
4055 #endif
4056 		/* when pending == CAS_RESET_ALL, the following
4057 		 * call to cas_init_hw will restart auto negotiation.
4058 		 * Setting the second argument of cas_reset to
4059 		 * !(pending == CAS_RESET_ALL) will set this argument
4060 		 * to 1 (avoiding reinitializing the PHY for the normal
4061 		 * PCS case) when auto negotiation is not restarted.
4062 		 */
4063 #if 1
4064 		cas_reset(cp, !(pending_all > 0));
4065 		if (cp->opened)
4066 			cas_clean_rings(cp);
4067 		cas_init_hw(cp, (pending_all > 0));
4068 #else
4069 		cas_reset(cp, !(pending == CAS_RESET_ALL));
4070 		if (cp->opened)
4071 			cas_clean_rings(cp);
4072 		cas_init_hw(cp, pending == CAS_RESET_ALL);
4073 #endif
4074 
4075 done:
4076 		cas_unlock_all_restore(cp, flags);
4077 		netif_device_attach(cp->dev);
4078 	}
4079 #if 1
4080 	atomic_sub(pending_all, &cp->reset_task_pending_all);
4081 	atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4082 	atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4083 	atomic_dec(&cp->reset_task_pending);
4084 #else
4085 	atomic_set(&cp->reset_task_pending, 0);
4086 #endif
4087 }
4088 
4089 static void cas_link_timer(unsigned long data)
4090 {
4091 	struct cas *cp = (struct cas *) data;
4092 	int mask, pending = 0, reset = 0;
4093 	unsigned long flags;
4094 
4095 	if (link_transition_timeout != 0 &&
4096 	    cp->link_transition_jiffies_valid &&
4097 	    ((jiffies - cp->link_transition_jiffies) >
4098 	      (link_transition_timeout))) {
4099 		/* One-second counter so link-down workaround doesn't
4100 		 * cause resets to occur so fast as to fool the switch
4101 		 * into thinking the link is down.
4102 		 */
4103 		cp->link_transition_jiffies_valid = 0;
4104 	}
4105 
4106 	if (!cp->hw_running)
4107 		return;
4108 
4109 	spin_lock_irqsave(&cp->lock, flags);
4110 	cas_lock_tx(cp);
4111 	cas_entropy_gather(cp);
4112 
4113 	/* If the link task is still pending, we just
4114 	 * reschedule the link timer
4115 	 */
4116 #if 1
4117 	if (atomic_read(&cp->reset_task_pending_all) ||
4118 	    atomic_read(&cp->reset_task_pending_spare) ||
4119 	    atomic_read(&cp->reset_task_pending_mtu))
4120 		goto done;
4121 #else
4122 	if (atomic_read(&cp->reset_task_pending))
4123 		goto done;
4124 #endif
4125 
4126 	/* check for rx cleaning */
4127 	if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4128 		int i, rmask;
4129 
4130 		for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4131 			rmask = CAS_FLAG_RXD_POST(i);
4132 			if ((mask & rmask) == 0)
4133 				continue;
4134 
4135 			/* post_rxds will do a mod_timer */
4136 			if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4137 				pending = 1;
4138 				continue;
4139 			}
4140 			cp->cas_flags &= ~rmask;
4141 		}
4142 	}
4143 
4144 	if (CAS_PHY_MII(cp->phy_type)) {
4145 		u16 bmsr;
4146 		cas_mif_poll(cp, 0);
4147 		bmsr = cas_phy_read(cp, MII_BMSR);
4148 		/* WTZ: Solaris driver reads this twice, but that
4149 		 * may be due to the PCS case and the use of a
4150 		 * common implementation. Read it twice here to be
4151 		 * safe.
4152 		 */
4153 		bmsr = cas_phy_read(cp, MII_BMSR);
4154 		cas_mif_poll(cp, 1);
4155 		readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
4156 		reset = cas_mii_link_check(cp, bmsr);
4157 	} else {
4158 		reset = cas_pcs_link_check(cp);
4159 	}
4160 
4161 	if (reset)
4162 		goto done;
4163 
4164 	/* check for tx state machine confusion */
4165 	if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4166 		u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4167 		u32 wptr, rptr;
4168 		int tlm  = CAS_VAL(MAC_SM_TLM, val);
4169 
4170 		if (((tlm == 0x5) || (tlm == 0x3)) &&
4171 		    (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4172 			netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4173 				     "tx err: MAC_STATE[%08x]\n", val);
4174 			reset = 1;
4175 			goto done;
4176 		}
4177 
4178 		val  = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4179 		wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4180 		rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4181 		if ((val == 0) && (wptr != rptr)) {
4182 			netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4183 				     "tx err: TX_FIFO[%08x:%08x:%08x]\n",
4184 				     val, wptr, rptr);
4185 			reset = 1;
4186 		}
4187 
4188 		if (reset)
4189 			cas_hard_reset(cp);
4190 	}
4191 
4192 done:
4193 	if (reset) {
4194 #if 1
4195 		atomic_inc(&cp->reset_task_pending);
4196 		atomic_inc(&cp->reset_task_pending_all);
4197 		schedule_work(&cp->reset_task);
4198 #else
4199 		atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4200 		pr_err("reset called in cas_link_timer\n");
4201 		schedule_work(&cp->reset_task);
4202 #endif
4203 	}
4204 
4205 	if (!pending)
4206 		mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4207 	cas_unlock_tx(cp);
4208 	spin_unlock_irqrestore(&cp->lock, flags);
4209 }
4210 
4211 /* tiny buffers are used to avoid target abort issues with
4212  * older cassini's
4213  */
4214 static void cas_tx_tiny_free(struct cas *cp)
4215 {
4216 	struct pci_dev *pdev = cp->pdev;
4217 	int i;
4218 
4219 	for (i = 0; i < N_TX_RINGS; i++) {
4220 		if (!cp->tx_tiny_bufs[i])
4221 			continue;
4222 
4223 		pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
4224 				    cp->tx_tiny_bufs[i],
4225 				    cp->tx_tiny_dvma[i]);
4226 		cp->tx_tiny_bufs[i] = NULL;
4227 	}
4228 }
4229 
4230 static int cas_tx_tiny_alloc(struct cas *cp)
4231 {
4232 	struct pci_dev *pdev = cp->pdev;
4233 	int i;
4234 
4235 	for (i = 0; i < N_TX_RINGS; i++) {
4236 		cp->tx_tiny_bufs[i] =
4237 			pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
4238 					     &cp->tx_tiny_dvma[i]);
4239 		if (!cp->tx_tiny_bufs[i]) {
4240 			cas_tx_tiny_free(cp);
4241 			return -1;
4242 		}
4243 	}
4244 	return 0;
4245 }
4246 
4247 
4248 static int cas_open(struct net_device *dev)
4249 {
4250 	struct cas *cp = netdev_priv(dev);
4251 	int hw_was_up, err;
4252 	unsigned long flags;
4253 
4254 	mutex_lock(&cp->pm_mutex);
4255 
4256 	hw_was_up = cp->hw_running;
4257 
4258 	/* The power-management mutex protects the hw_running
4259 	 * etc. state so it is safe to do this bit without cp->lock
4260 	 */
4261 	if (!cp->hw_running) {
4262 		/* Reset the chip */
4263 		cas_lock_all_save(cp, flags);
4264 		/* We set the second arg to cas_reset to zero
4265 		 * because cas_init_hw below will have its second
4266 		 * argument set to non-zero, which will force
4267 		 * autonegotiation to start.
4268 		 */
4269 		cas_reset(cp, 0);
4270 		cp->hw_running = 1;
4271 		cas_unlock_all_restore(cp, flags);
4272 	}
4273 
4274 	err = -ENOMEM;
4275 	if (cas_tx_tiny_alloc(cp) < 0)
4276 		goto err_unlock;
4277 
4278 	/* alloc rx descriptors */
4279 	if (cas_alloc_rxds(cp) < 0)
4280 		goto err_tx_tiny;
4281 
4282 	/* allocate spares */
4283 	cas_spare_init(cp);
4284 	cas_spare_recover(cp, GFP_KERNEL);
4285 
4286 	/* We can now request the interrupt as we know it's masked
4287 	 * on the controller. cassini+ has up to 4 interrupts
4288 	 * that can be used, but you need to do explicit pci interrupt
4289 	 * mapping to expose them
4290 	 */
4291 	if (request_irq(cp->pdev->irq, cas_interrupt,
4292 			IRQF_SHARED, dev->name, (void *) dev)) {
4293 		netdev_err(cp->dev, "failed to request irq !\n");
4294 		err = -EAGAIN;
4295 		goto err_spare;
4296 	}
4297 
4298 #ifdef USE_NAPI
4299 	napi_enable(&cp->napi);
4300 #endif
4301 	/* init hw */
4302 	cas_lock_all_save(cp, flags);
4303 	cas_clean_rings(cp);
4304 	cas_init_hw(cp, !hw_was_up);
4305 	cp->opened = 1;
4306 	cas_unlock_all_restore(cp, flags);
4307 
4308 	netif_start_queue(dev);
4309 	mutex_unlock(&cp->pm_mutex);
4310 	return 0;
4311 
4312 err_spare:
4313 	cas_spare_free(cp);
4314 	cas_free_rxds(cp);
4315 err_tx_tiny:
4316 	cas_tx_tiny_free(cp);
4317 err_unlock:
4318 	mutex_unlock(&cp->pm_mutex);
4319 	return err;
4320 }
4321 
4322 static int cas_close(struct net_device *dev)
4323 {
4324 	unsigned long flags;
4325 	struct cas *cp = netdev_priv(dev);
4326 
4327 #ifdef USE_NAPI
4328 	napi_disable(&cp->napi);
4329 #endif
4330 	/* Make sure we don't get distracted by suspend/resume */
4331 	mutex_lock(&cp->pm_mutex);
4332 
4333 	netif_stop_queue(dev);
4334 
4335 	/* Stop traffic, mark us closed */
4336 	cas_lock_all_save(cp, flags);
4337 	cp->opened = 0;
4338 	cas_reset(cp, 0);
4339 	cas_phy_init(cp);
4340 	cas_begin_auto_negotiation(cp, NULL);
4341 	cas_clean_rings(cp);
4342 	cas_unlock_all_restore(cp, flags);
4343 
4344 	free_irq(cp->pdev->irq, (void *) dev);
4345 	cas_spare_free(cp);
4346 	cas_free_rxds(cp);
4347 	cas_tx_tiny_free(cp);
4348 	mutex_unlock(&cp->pm_mutex);
4349 	return 0;
4350 }
4351 
4352 static struct {
4353 	const char name[ETH_GSTRING_LEN];
4354 } ethtool_cassini_statnames[] = {
4355 	{"collisions"},
4356 	{"rx_bytes"},
4357 	{"rx_crc_errors"},
4358 	{"rx_dropped"},
4359 	{"rx_errors"},
4360 	{"rx_fifo_errors"},
4361 	{"rx_frame_errors"},
4362 	{"rx_length_errors"},
4363 	{"rx_over_errors"},
4364 	{"rx_packets"},
4365 	{"tx_aborted_errors"},
4366 	{"tx_bytes"},
4367 	{"tx_dropped"},
4368 	{"tx_errors"},
4369 	{"tx_fifo_errors"},
4370 	{"tx_packets"}
4371 };
4372 #define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
4373 
4374 static struct {
4375 	const int offsets;	/* neg. values for 2nd arg to cas_read_phy */
4376 } ethtool_register_table[] = {
4377 	{-MII_BMSR},
4378 	{-MII_BMCR},
4379 	{REG_CAWR},
4380 	{REG_INF_BURST},
4381 	{REG_BIM_CFG},
4382 	{REG_RX_CFG},
4383 	{REG_HP_CFG},
4384 	{REG_MAC_TX_CFG},
4385 	{REG_MAC_RX_CFG},
4386 	{REG_MAC_CTRL_CFG},
4387 	{REG_MAC_XIF_CFG},
4388 	{REG_MIF_CFG},
4389 	{REG_PCS_CFG},
4390 	{REG_SATURN_PCFG},
4391 	{REG_PCS_MII_STATUS},
4392 	{REG_PCS_STATE_MACHINE},
4393 	{REG_MAC_COLL_EXCESS},
4394 	{REG_MAC_COLL_LATE}
4395 };
4396 #define CAS_REG_LEN 	ARRAY_SIZE(ethtool_register_table)
4397 #define CAS_MAX_REGS 	(sizeof (u32)*CAS_REG_LEN)
4398 
4399 static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4400 {
4401 	u8 *p;
4402 	int i;
4403 	unsigned long flags;
4404 
4405 	spin_lock_irqsave(&cp->lock, flags);
4406 	for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4407 		u16 hval;
4408 		u32 val;
4409 		if (ethtool_register_table[i].offsets < 0) {
4410 			hval = cas_phy_read(cp,
4411 				    -ethtool_register_table[i].offsets);
4412 			val = hval;
4413 		} else {
4414 			val= readl(cp->regs+ethtool_register_table[i].offsets);
4415 		}
4416 		memcpy(p, (u8 *)&val, sizeof(u32));
4417 	}
4418 	spin_unlock_irqrestore(&cp->lock, flags);
4419 }
4420 
4421 static struct net_device_stats *cas_get_stats(struct net_device *dev)
4422 {
4423 	struct cas *cp = netdev_priv(dev);
4424 	struct net_device_stats *stats = cp->net_stats;
4425 	unsigned long flags;
4426 	int i;
4427 	unsigned long tmp;
4428 
4429 	/* we collate all of the stats into net_stats[N_TX_RING] */
4430 	if (!cp->hw_running)
4431 		return stats + N_TX_RINGS;
4432 
4433 	/* collect outstanding stats */
4434 	/* WTZ: the Cassini spec gives these as 16 bit counters but
4435 	 * stored in 32-bit words.  Added a mask of 0xffff to be safe,
4436 	 * in case the chip somehow puts any garbage in the other bits.
4437 	 * Also, counter usage didn't seem to mach what Adrian did
4438 	 * in the parts of the code that set these quantities. Made
4439 	 * that consistent.
4440 	 */
4441 	spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4442 	stats[N_TX_RINGS].rx_crc_errors +=
4443 	  readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4444 	stats[N_TX_RINGS].rx_frame_errors +=
4445 		readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4446 	stats[N_TX_RINGS].rx_length_errors +=
4447 		readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4448 #if 1
4449 	tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4450 		(readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4451 	stats[N_TX_RINGS].tx_aborted_errors += tmp;
4452 	stats[N_TX_RINGS].collisions +=
4453 	  tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4454 #else
4455 	stats[N_TX_RINGS].tx_aborted_errors +=
4456 		readl(cp->regs + REG_MAC_COLL_EXCESS);
4457 	stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4458 		readl(cp->regs + REG_MAC_COLL_LATE);
4459 #endif
4460 	cas_clear_mac_err(cp);
4461 
4462 	/* saved bits that are unique to ring 0 */
4463 	spin_lock(&cp->stat_lock[0]);
4464 	stats[N_TX_RINGS].collisions        += stats[0].collisions;
4465 	stats[N_TX_RINGS].rx_over_errors    += stats[0].rx_over_errors;
4466 	stats[N_TX_RINGS].rx_frame_errors   += stats[0].rx_frame_errors;
4467 	stats[N_TX_RINGS].rx_fifo_errors    += stats[0].rx_fifo_errors;
4468 	stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4469 	stats[N_TX_RINGS].tx_fifo_errors    += stats[0].tx_fifo_errors;
4470 	spin_unlock(&cp->stat_lock[0]);
4471 
4472 	for (i = 0; i < N_TX_RINGS; i++) {
4473 		spin_lock(&cp->stat_lock[i]);
4474 		stats[N_TX_RINGS].rx_length_errors +=
4475 			stats[i].rx_length_errors;
4476 		stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4477 		stats[N_TX_RINGS].rx_packets    += stats[i].rx_packets;
4478 		stats[N_TX_RINGS].tx_packets    += stats[i].tx_packets;
4479 		stats[N_TX_RINGS].rx_bytes      += stats[i].rx_bytes;
4480 		stats[N_TX_RINGS].tx_bytes      += stats[i].tx_bytes;
4481 		stats[N_TX_RINGS].rx_errors     += stats[i].rx_errors;
4482 		stats[N_TX_RINGS].tx_errors     += stats[i].tx_errors;
4483 		stats[N_TX_RINGS].rx_dropped    += stats[i].rx_dropped;
4484 		stats[N_TX_RINGS].tx_dropped    += stats[i].tx_dropped;
4485 		memset(stats + i, 0, sizeof(struct net_device_stats));
4486 		spin_unlock(&cp->stat_lock[i]);
4487 	}
4488 	spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4489 	return stats + N_TX_RINGS;
4490 }
4491 
4492 
4493 static void cas_set_multicast(struct net_device *dev)
4494 {
4495 	struct cas *cp = netdev_priv(dev);
4496 	u32 rxcfg, rxcfg_new;
4497 	unsigned long flags;
4498 	int limit = STOP_TRIES;
4499 
4500 	if (!cp->hw_running)
4501 		return;
4502 
4503 	spin_lock_irqsave(&cp->lock, flags);
4504 	rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4505 
4506 	/* disable RX MAC and wait for completion */
4507 	writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4508 	while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4509 		if (!limit--)
4510 			break;
4511 		udelay(10);
4512 	}
4513 
4514 	/* disable hash filter and wait for completion */
4515 	limit = STOP_TRIES;
4516 	rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4517 	writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4518 	while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4519 		if (!limit--)
4520 			break;
4521 		udelay(10);
4522 	}
4523 
4524 	/* program hash filters */
4525 	cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4526 	rxcfg |= rxcfg_new;
4527 	writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4528 	spin_unlock_irqrestore(&cp->lock, flags);
4529 }
4530 
4531 static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4532 {
4533 	struct cas *cp = netdev_priv(dev);
4534 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
4535 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
4536 	strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
4537 	info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
4538 		cp->casreg_len : CAS_MAX_REGS;
4539 	info->n_stats = CAS_NUM_STAT_KEYS;
4540 }
4541 
4542 static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4543 {
4544 	struct cas *cp = netdev_priv(dev);
4545 	u16 bmcr;
4546 	int full_duplex, speed, pause;
4547 	unsigned long flags;
4548 	enum link_state linkstate = link_up;
4549 
4550 	cmd->advertising = 0;
4551 	cmd->supported = SUPPORTED_Autoneg;
4552 	if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4553 		cmd->supported |= SUPPORTED_1000baseT_Full;
4554 		cmd->advertising |= ADVERTISED_1000baseT_Full;
4555 	}
4556 
4557 	/* Record PHY settings if HW is on. */
4558 	spin_lock_irqsave(&cp->lock, flags);
4559 	bmcr = 0;
4560 	linkstate = cp->lstate;
4561 	if (CAS_PHY_MII(cp->phy_type)) {
4562 		cmd->port = PORT_MII;
4563 		cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
4564 			XCVR_INTERNAL : XCVR_EXTERNAL;
4565 		cmd->phy_address = cp->phy_addr;
4566 		cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
4567 			ADVERTISED_10baseT_Half |
4568 			ADVERTISED_10baseT_Full |
4569 			ADVERTISED_100baseT_Half |
4570 			ADVERTISED_100baseT_Full;
4571 
4572 		cmd->supported |=
4573 			(SUPPORTED_10baseT_Half |
4574 			 SUPPORTED_10baseT_Full |
4575 			 SUPPORTED_100baseT_Half |
4576 			 SUPPORTED_100baseT_Full |
4577 			 SUPPORTED_TP | SUPPORTED_MII);
4578 
4579 		if (cp->hw_running) {
4580 			cas_mif_poll(cp, 0);
4581 			bmcr = cas_phy_read(cp, MII_BMCR);
4582 			cas_read_mii_link_mode(cp, &full_duplex,
4583 					       &speed, &pause);
4584 			cas_mif_poll(cp, 1);
4585 		}
4586 
4587 	} else {
4588 		cmd->port = PORT_FIBRE;
4589 		cmd->transceiver = XCVR_INTERNAL;
4590 		cmd->phy_address = 0;
4591 		cmd->supported   |= SUPPORTED_FIBRE;
4592 		cmd->advertising |= ADVERTISED_FIBRE;
4593 
4594 		if (cp->hw_running) {
4595 			/* pcs uses the same bits as mii */
4596 			bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4597 			cas_read_pcs_link_mode(cp, &full_duplex,
4598 					       &speed, &pause);
4599 		}
4600 	}
4601 	spin_unlock_irqrestore(&cp->lock, flags);
4602 
4603 	if (bmcr & BMCR_ANENABLE) {
4604 		cmd->advertising |= ADVERTISED_Autoneg;
4605 		cmd->autoneg = AUTONEG_ENABLE;
4606 		ethtool_cmd_speed_set(cmd, ((speed == 10) ?
4607 					    SPEED_10 :
4608 					    ((speed == 1000) ?
4609 					     SPEED_1000 : SPEED_100)));
4610 		cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4611 	} else {
4612 		cmd->autoneg = AUTONEG_DISABLE;
4613 		ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ?
4614 					    SPEED_1000 :
4615 					    ((bmcr & BMCR_SPEED100) ?
4616 					     SPEED_100 : SPEED_10)));
4617 		cmd->duplex =
4618 			(bmcr & BMCR_FULLDPLX) ?
4619 			DUPLEX_FULL : DUPLEX_HALF;
4620 	}
4621 	if (linkstate != link_up) {
4622 		/* Force these to "unknown" if the link is not up and
4623 		 * autonogotiation in enabled. We can set the link
4624 		 * speed to 0, but not cmd->duplex,
4625 		 * because its legal values are 0 and 1.  Ethtool will
4626 		 * print the value reported in parentheses after the
4627 		 * word "Unknown" for unrecognized values.
4628 		 *
4629 		 * If in forced mode, we report the speed and duplex
4630 		 * settings that we configured.
4631 		 */
4632 		if (cp->link_cntl & BMCR_ANENABLE) {
4633 			ethtool_cmd_speed_set(cmd, 0);
4634 			cmd->duplex = 0xff;
4635 		} else {
4636 			ethtool_cmd_speed_set(cmd, SPEED_10);
4637 			if (cp->link_cntl & BMCR_SPEED100) {
4638 				ethtool_cmd_speed_set(cmd, SPEED_100);
4639 			} else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4640 				ethtool_cmd_speed_set(cmd, SPEED_1000);
4641 			}
4642 			cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
4643 				DUPLEX_FULL : DUPLEX_HALF;
4644 		}
4645 	}
4646 	return 0;
4647 }
4648 
4649 static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4650 {
4651 	struct cas *cp = netdev_priv(dev);
4652 	unsigned long flags;
4653 	u32 speed = ethtool_cmd_speed(cmd);
4654 
4655 	/* Verify the settings we care about. */
4656 	if (cmd->autoneg != AUTONEG_ENABLE &&
4657 	    cmd->autoneg != AUTONEG_DISABLE)
4658 		return -EINVAL;
4659 
4660 	if (cmd->autoneg == AUTONEG_DISABLE &&
4661 	    ((speed != SPEED_1000 &&
4662 	      speed != SPEED_100 &&
4663 	      speed != SPEED_10) ||
4664 	     (cmd->duplex != DUPLEX_HALF &&
4665 	      cmd->duplex != DUPLEX_FULL)))
4666 		return -EINVAL;
4667 
4668 	/* Apply settings and restart link process. */
4669 	spin_lock_irqsave(&cp->lock, flags);
4670 	cas_begin_auto_negotiation(cp, cmd);
4671 	spin_unlock_irqrestore(&cp->lock, flags);
4672 	return 0;
4673 }
4674 
4675 static int cas_nway_reset(struct net_device *dev)
4676 {
4677 	struct cas *cp = netdev_priv(dev);
4678 	unsigned long flags;
4679 
4680 	if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4681 		return -EINVAL;
4682 
4683 	/* Restart link process. */
4684 	spin_lock_irqsave(&cp->lock, flags);
4685 	cas_begin_auto_negotiation(cp, NULL);
4686 	spin_unlock_irqrestore(&cp->lock, flags);
4687 
4688 	return 0;
4689 }
4690 
4691 static u32 cas_get_link(struct net_device *dev)
4692 {
4693 	struct cas *cp = netdev_priv(dev);
4694 	return cp->lstate == link_up;
4695 }
4696 
4697 static u32 cas_get_msglevel(struct net_device *dev)
4698 {
4699 	struct cas *cp = netdev_priv(dev);
4700 	return cp->msg_enable;
4701 }
4702 
4703 static void cas_set_msglevel(struct net_device *dev, u32 value)
4704 {
4705 	struct cas *cp = netdev_priv(dev);
4706 	cp->msg_enable = value;
4707 }
4708 
4709 static int cas_get_regs_len(struct net_device *dev)
4710 {
4711 	struct cas *cp = netdev_priv(dev);
4712 	return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
4713 }
4714 
4715 static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4716 			     void *p)
4717 {
4718 	struct cas *cp = netdev_priv(dev);
4719 	regs->version = 0;
4720 	/* cas_read_regs handles locks (cp->lock).  */
4721 	cas_read_regs(cp, p, regs->len / sizeof(u32));
4722 }
4723 
4724 static int cas_get_sset_count(struct net_device *dev, int sset)
4725 {
4726 	switch (sset) {
4727 	case ETH_SS_STATS:
4728 		return CAS_NUM_STAT_KEYS;
4729 	default:
4730 		return -EOPNOTSUPP;
4731 	}
4732 }
4733 
4734 static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4735 {
4736 	 memcpy(data, &ethtool_cassini_statnames,
4737 					 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4738 }
4739 
4740 static void cas_get_ethtool_stats(struct net_device *dev,
4741 				      struct ethtool_stats *estats, u64 *data)
4742 {
4743 	struct cas *cp = netdev_priv(dev);
4744 	struct net_device_stats *stats = cas_get_stats(cp->dev);
4745 	int i = 0;
4746 	data[i++] = stats->collisions;
4747 	data[i++] = stats->rx_bytes;
4748 	data[i++] = stats->rx_crc_errors;
4749 	data[i++] = stats->rx_dropped;
4750 	data[i++] = stats->rx_errors;
4751 	data[i++] = stats->rx_fifo_errors;
4752 	data[i++] = stats->rx_frame_errors;
4753 	data[i++] = stats->rx_length_errors;
4754 	data[i++] = stats->rx_over_errors;
4755 	data[i++] = stats->rx_packets;
4756 	data[i++] = stats->tx_aborted_errors;
4757 	data[i++] = stats->tx_bytes;
4758 	data[i++] = stats->tx_dropped;
4759 	data[i++] = stats->tx_errors;
4760 	data[i++] = stats->tx_fifo_errors;
4761 	data[i++] = stats->tx_packets;
4762 	BUG_ON(i != CAS_NUM_STAT_KEYS);
4763 }
4764 
4765 static const struct ethtool_ops cas_ethtool_ops = {
4766 	.get_drvinfo		= cas_get_drvinfo,
4767 	.get_settings		= cas_get_settings,
4768 	.set_settings		= cas_set_settings,
4769 	.nway_reset		= cas_nway_reset,
4770 	.get_link		= cas_get_link,
4771 	.get_msglevel		= cas_get_msglevel,
4772 	.set_msglevel		= cas_set_msglevel,
4773 	.get_regs_len		= cas_get_regs_len,
4774 	.get_regs		= cas_get_regs,
4775 	.get_sset_count		= cas_get_sset_count,
4776 	.get_strings		= cas_get_strings,
4777 	.get_ethtool_stats	= cas_get_ethtool_stats,
4778 };
4779 
4780 static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4781 {
4782 	struct cas *cp = netdev_priv(dev);
4783 	struct mii_ioctl_data *data = if_mii(ifr);
4784 	unsigned long flags;
4785 	int rc = -EOPNOTSUPP;
4786 
4787 	/* Hold the PM mutex while doing ioctl's or we may collide
4788 	 * with open/close and power management and oops.
4789 	 */
4790 	mutex_lock(&cp->pm_mutex);
4791 	switch (cmd) {
4792 	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
4793 		data->phy_id = cp->phy_addr;
4794 		/* Fallthrough... */
4795 
4796 	case SIOCGMIIREG:		/* Read MII PHY register. */
4797 		spin_lock_irqsave(&cp->lock, flags);
4798 		cas_mif_poll(cp, 0);
4799 		data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4800 		cas_mif_poll(cp, 1);
4801 		spin_unlock_irqrestore(&cp->lock, flags);
4802 		rc = 0;
4803 		break;
4804 
4805 	case SIOCSMIIREG:		/* Write MII PHY register. */
4806 		spin_lock_irqsave(&cp->lock, flags);
4807 		cas_mif_poll(cp, 0);
4808 		rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4809 		cas_mif_poll(cp, 1);
4810 		spin_unlock_irqrestore(&cp->lock, flags);
4811 		break;
4812 	default:
4813 		break;
4814 	}
4815 
4816 	mutex_unlock(&cp->pm_mutex);
4817 	return rc;
4818 }
4819 
4820 /* When this chip sits underneath an Intel 31154 bridge, it is the
4821  * only subordinate device and we can tweak the bridge settings to
4822  * reflect that fact.
4823  */
4824 static void __devinit cas_program_bridge(struct pci_dev *cas_pdev)
4825 {
4826 	struct pci_dev *pdev = cas_pdev->bus->self;
4827 	u32 val;
4828 
4829 	if (!pdev)
4830 		return;
4831 
4832 	if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
4833 		return;
4834 
4835 	/* Clear bit 10 (Bus Parking Control) in the Secondary
4836 	 * Arbiter Control/Status Register which lives at offset
4837 	 * 0x41.  Using a 32-bit word read/modify/write at 0x40
4838 	 * is much simpler so that's how we do this.
4839 	 */
4840 	pci_read_config_dword(pdev, 0x40, &val);
4841 	val &= ~0x00040000;
4842 	pci_write_config_dword(pdev, 0x40, val);
4843 
4844 	/* Max out the Multi-Transaction Timer settings since
4845 	 * Cassini is the only device present.
4846 	 *
4847 	 * The register is 16-bit and lives at 0x50.  When the
4848 	 * settings are enabled, it extends the GRANT# signal
4849 	 * for a requestor after a transaction is complete.  This
4850 	 * allows the next request to run without first needing
4851 	 * to negotiate the GRANT# signal back.
4852 	 *
4853 	 * Bits 12:10 define the grant duration:
4854 	 *
4855 	 *	1	--	16 clocks
4856 	 *	2	--	32 clocks
4857 	 *	3	--	64 clocks
4858 	 *	4	--	128 clocks
4859 	 *	5	--	256 clocks
4860 	 *
4861 	 * All other values are illegal.
4862 	 *
4863 	 * Bits 09:00 define which REQ/GNT signal pairs get the
4864 	 * GRANT# signal treatment.  We set them all.
4865 	 */
4866 	pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
4867 
4868 	/* The Read Prefecth Policy register is 16-bit and sits at
4869 	 * offset 0x52.  It enables a "smart" pre-fetch policy.  We
4870 	 * enable it and max out all of the settings since only one
4871 	 * device is sitting underneath and thus bandwidth sharing is
4872 	 * not an issue.
4873 	 *
4874 	 * The register has several 3 bit fields, which indicates a
4875 	 * multiplier applied to the base amount of prefetching the
4876 	 * chip would do.  These fields are at:
4877 	 *
4878 	 *	15:13	---	ReRead Primary Bus
4879 	 *	12:10	---	FirstRead Primary Bus
4880 	 *	09:07	---	ReRead Secondary Bus
4881 	 *	06:04	---	FirstRead Secondary Bus
4882 	 *
4883 	 * Bits 03:00 control which REQ/GNT pairs the prefetch settings
4884 	 * get enabled on.  Bit 3 is a grouped enabler which controls
4885 	 * all of the REQ/GNT pairs from [8:3].  Bits 2 to 0 control
4886 	 * the individual REQ/GNT pairs [2:0].
4887 	 */
4888 	pci_write_config_word(pdev, 0x52,
4889 			      (0x7 << 13) |
4890 			      (0x7 << 10) |
4891 			      (0x7 <<  7) |
4892 			      (0x7 <<  4) |
4893 			      (0xf <<  0));
4894 
4895 	/* Force cacheline size to 0x8 */
4896 	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4897 
4898 	/* Force latency timer to maximum setting so Cassini can
4899 	 * sit on the bus as long as it likes.
4900 	 */
4901 	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
4902 }
4903 
4904 static const struct net_device_ops cas_netdev_ops = {
4905 	.ndo_open		= cas_open,
4906 	.ndo_stop		= cas_close,
4907 	.ndo_start_xmit		= cas_start_xmit,
4908 	.ndo_get_stats 		= cas_get_stats,
4909 	.ndo_set_rx_mode	= cas_set_multicast,
4910 	.ndo_do_ioctl		= cas_ioctl,
4911 	.ndo_tx_timeout		= cas_tx_timeout,
4912 	.ndo_change_mtu		= cas_change_mtu,
4913 	.ndo_set_mac_address	= eth_mac_addr,
4914 	.ndo_validate_addr	= eth_validate_addr,
4915 #ifdef CONFIG_NET_POLL_CONTROLLER
4916 	.ndo_poll_controller	= cas_netpoll,
4917 #endif
4918 };
4919 
4920 static int __devinit cas_init_one(struct pci_dev *pdev,
4921 				  const struct pci_device_id *ent)
4922 {
4923 	static int cas_version_printed = 0;
4924 	unsigned long casreg_len;
4925 	struct net_device *dev;
4926 	struct cas *cp;
4927 	int i, err, pci_using_dac;
4928 	u16 pci_cmd;
4929 	u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4930 
4931 	if (cas_version_printed++ == 0)
4932 		pr_info("%s", version);
4933 
4934 	err = pci_enable_device(pdev);
4935 	if (err) {
4936 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
4937 		return err;
4938 	}
4939 
4940 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4941 		dev_err(&pdev->dev, "Cannot find proper PCI device "
4942 		       "base address, aborting\n");
4943 		err = -ENODEV;
4944 		goto err_out_disable_pdev;
4945 	}
4946 
4947 	dev = alloc_etherdev(sizeof(*cp));
4948 	if (!dev) {
4949 		err = -ENOMEM;
4950 		goto err_out_disable_pdev;
4951 	}
4952 	SET_NETDEV_DEV(dev, &pdev->dev);
4953 
4954 	err = pci_request_regions(pdev, dev->name);
4955 	if (err) {
4956 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
4957 		goto err_out_free_netdev;
4958 	}
4959 	pci_set_master(pdev);
4960 
4961 	/* we must always turn on parity response or else parity
4962 	 * doesn't get generated properly. disable SERR/PERR as well.
4963 	 * in addition, we want to turn MWI on.
4964 	 */
4965 	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4966 	pci_cmd &= ~PCI_COMMAND_SERR;
4967 	pci_cmd |= PCI_COMMAND_PARITY;
4968 	pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4969 	if (pci_try_set_mwi(pdev))
4970 		pr_warning("Could not enable MWI for %s\n", pci_name(pdev));
4971 
4972 	cas_program_bridge(pdev);
4973 
4974 	/*
4975 	 * On some architectures, the default cache line size set
4976 	 * by pci_try_set_mwi reduces perforamnce.  We have to increase
4977 	 * it for this case.  To start, we'll print some configuration
4978 	 * data.
4979 	 */
4980 #if 1
4981 	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4982 			     &orig_cacheline_size);
4983 	if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4984 		cas_cacheline_size =
4985 			(CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4986 			CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4987 		if (pci_write_config_byte(pdev,
4988 					  PCI_CACHE_LINE_SIZE,
4989 					  cas_cacheline_size)) {
4990 			dev_err(&pdev->dev, "Could not set PCI cache "
4991 			       "line size\n");
4992 			goto err_write_cacheline;
4993 		}
4994 	}
4995 #endif
4996 
4997 
4998 	/* Configure DMA attributes. */
4999 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5000 		pci_using_dac = 1;
5001 		err = pci_set_consistent_dma_mask(pdev,
5002 						  DMA_BIT_MASK(64));
5003 		if (err < 0) {
5004 			dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
5005 			       "for consistent allocations\n");
5006 			goto err_out_free_res;
5007 		}
5008 
5009 	} else {
5010 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5011 		if (err) {
5012 			dev_err(&pdev->dev, "No usable DMA configuration, "
5013 			       "aborting\n");
5014 			goto err_out_free_res;
5015 		}
5016 		pci_using_dac = 0;
5017 	}
5018 
5019 	casreg_len = pci_resource_len(pdev, 0);
5020 
5021 	cp = netdev_priv(dev);
5022 	cp->pdev = pdev;
5023 #if 1
5024 	/* A value of 0 indicates we never explicitly set it */
5025 	cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
5026 #endif
5027 	cp->dev = dev;
5028 	cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
5029 	  cassini_debug;
5030 
5031 #if defined(CONFIG_SPARC)
5032 	cp->of_node = pci_device_to_OF_node(pdev);
5033 #endif
5034 
5035 	cp->link_transition = LINK_TRANSITION_UNKNOWN;
5036 	cp->link_transition_jiffies_valid = 0;
5037 
5038 	spin_lock_init(&cp->lock);
5039 	spin_lock_init(&cp->rx_inuse_lock);
5040 	spin_lock_init(&cp->rx_spare_lock);
5041 	for (i = 0; i < N_TX_RINGS; i++) {
5042 		spin_lock_init(&cp->stat_lock[i]);
5043 		spin_lock_init(&cp->tx_lock[i]);
5044 	}
5045 	spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
5046 	mutex_init(&cp->pm_mutex);
5047 
5048 	init_timer(&cp->link_timer);
5049 	cp->link_timer.function = cas_link_timer;
5050 	cp->link_timer.data = (unsigned long) cp;
5051 
5052 #if 1
5053 	/* Just in case the implementation of atomic operations
5054 	 * change so that an explicit initialization is necessary.
5055 	 */
5056 	atomic_set(&cp->reset_task_pending, 0);
5057 	atomic_set(&cp->reset_task_pending_all, 0);
5058 	atomic_set(&cp->reset_task_pending_spare, 0);
5059 	atomic_set(&cp->reset_task_pending_mtu, 0);
5060 #endif
5061 	INIT_WORK(&cp->reset_task, cas_reset_task);
5062 
5063 	/* Default link parameters */
5064 	if (link_mode >= 0 && link_mode < 6)
5065 		cp->link_cntl = link_modes[link_mode];
5066 	else
5067 		cp->link_cntl = BMCR_ANENABLE;
5068 	cp->lstate = link_down;
5069 	cp->link_transition = LINK_TRANSITION_LINK_DOWN;
5070 	netif_carrier_off(cp->dev);
5071 	cp->timer_ticks = 0;
5072 
5073 	/* give us access to cassini registers */
5074 	cp->regs = pci_iomap(pdev, 0, casreg_len);
5075 	if (!cp->regs) {
5076 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5077 		goto err_out_free_res;
5078 	}
5079 	cp->casreg_len = casreg_len;
5080 
5081 	pci_save_state(pdev);
5082 	cas_check_pci_invariants(cp);
5083 	cas_hard_reset(cp);
5084 	cas_reset(cp, 0);
5085 	if (cas_check_invariants(cp))
5086 		goto err_out_iounmap;
5087 	if (cp->cas_flags & CAS_FLAG_SATURN)
5088 		if (cas_saturn_firmware_init(cp))
5089 			goto err_out_iounmap;
5090 
5091 	cp->init_block = (struct cas_init_block *)
5092 		pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
5093 				     &cp->block_dvma);
5094 	if (!cp->init_block) {
5095 		dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
5096 		goto err_out_iounmap;
5097 	}
5098 
5099 	for (i = 0; i < N_TX_RINGS; i++)
5100 		cp->init_txds[i] = cp->init_block->txds[i];
5101 
5102 	for (i = 0; i < N_RX_DESC_RINGS; i++)
5103 		cp->init_rxds[i] = cp->init_block->rxds[i];
5104 
5105 	for (i = 0; i < N_RX_COMP_RINGS; i++)
5106 		cp->init_rxcs[i] = cp->init_block->rxcs[i];
5107 
5108 	for (i = 0; i < N_RX_FLOWS; i++)
5109 		skb_queue_head_init(&cp->rx_flows[i]);
5110 
5111 	dev->netdev_ops = &cas_netdev_ops;
5112 	dev->ethtool_ops = &cas_ethtool_ops;
5113 	dev->watchdog_timeo = CAS_TX_TIMEOUT;
5114 
5115 #ifdef USE_NAPI
5116 	netif_napi_add(dev, &cp->napi, cas_poll, 64);
5117 #endif
5118 	dev->irq = pdev->irq;
5119 	dev->dma = 0;
5120 
5121 	/* Cassini features. */
5122 	if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5123 		dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5124 
5125 	if (pci_using_dac)
5126 		dev->features |= NETIF_F_HIGHDMA;
5127 
5128 	if (register_netdev(dev)) {
5129 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
5130 		goto err_out_free_consistent;
5131 	}
5132 
5133 	i = readl(cp->regs + REG_BIM_CFG);
5134 	netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
5135 		    (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5136 		    (i & BIM_CFG_32BIT) ? "32" : "64",
5137 		    (i & BIM_CFG_66MHZ) ? "66" : "33",
5138 		    (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5139 		    dev->dev_addr);
5140 
5141 	pci_set_drvdata(pdev, dev);
5142 	cp->hw_running = 1;
5143 	cas_entropy_reset(cp);
5144 	cas_phy_init(cp);
5145 	cas_begin_auto_negotiation(cp, NULL);
5146 	return 0;
5147 
5148 err_out_free_consistent:
5149 	pci_free_consistent(pdev, sizeof(struct cas_init_block),
5150 			    cp->init_block, cp->block_dvma);
5151 
5152 err_out_iounmap:
5153 	mutex_lock(&cp->pm_mutex);
5154 	if (cp->hw_running)
5155 		cas_shutdown(cp);
5156 	mutex_unlock(&cp->pm_mutex);
5157 
5158 	pci_iounmap(pdev, cp->regs);
5159 
5160 
5161 err_out_free_res:
5162 	pci_release_regions(pdev);
5163 
5164 err_write_cacheline:
5165 	/* Try to restore it in case the error occurred after we
5166 	 * set it.
5167 	 */
5168 	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5169 
5170 err_out_free_netdev:
5171 	free_netdev(dev);
5172 
5173 err_out_disable_pdev:
5174 	pci_disable_device(pdev);
5175 	pci_set_drvdata(pdev, NULL);
5176 	return -ENODEV;
5177 }
5178 
5179 static void __devexit cas_remove_one(struct pci_dev *pdev)
5180 {
5181 	struct net_device *dev = pci_get_drvdata(pdev);
5182 	struct cas *cp;
5183 	if (!dev)
5184 		return;
5185 
5186 	cp = netdev_priv(dev);
5187 	unregister_netdev(dev);
5188 
5189 	if (cp->fw_data)
5190 		vfree(cp->fw_data);
5191 
5192 	mutex_lock(&cp->pm_mutex);
5193 	cancel_work_sync(&cp->reset_task);
5194 	if (cp->hw_running)
5195 		cas_shutdown(cp);
5196 	mutex_unlock(&cp->pm_mutex);
5197 
5198 #if 1
5199 	if (cp->orig_cacheline_size) {
5200 		/* Restore the cache line size if we had modified
5201 		 * it.
5202 		 */
5203 		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5204 				      cp->orig_cacheline_size);
5205 	}
5206 #endif
5207 	pci_free_consistent(pdev, sizeof(struct cas_init_block),
5208 			    cp->init_block, cp->block_dvma);
5209 	pci_iounmap(pdev, cp->regs);
5210 	free_netdev(dev);
5211 	pci_release_regions(pdev);
5212 	pci_disable_device(pdev);
5213 	pci_set_drvdata(pdev, NULL);
5214 }
5215 
5216 #ifdef CONFIG_PM
5217 static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
5218 {
5219 	struct net_device *dev = pci_get_drvdata(pdev);
5220 	struct cas *cp = netdev_priv(dev);
5221 	unsigned long flags;
5222 
5223 	mutex_lock(&cp->pm_mutex);
5224 
5225 	/* If the driver is opened, we stop the DMA */
5226 	if (cp->opened) {
5227 		netif_device_detach(dev);
5228 
5229 		cas_lock_all_save(cp, flags);
5230 
5231 		/* We can set the second arg of cas_reset to 0
5232 		 * because on resume, we'll call cas_init_hw with
5233 		 * its second arg set so that autonegotiation is
5234 		 * restarted.
5235 		 */
5236 		cas_reset(cp, 0);
5237 		cas_clean_rings(cp);
5238 		cas_unlock_all_restore(cp, flags);
5239 	}
5240 
5241 	if (cp->hw_running)
5242 		cas_shutdown(cp);
5243 	mutex_unlock(&cp->pm_mutex);
5244 
5245 	return 0;
5246 }
5247 
5248 static int cas_resume(struct pci_dev *pdev)
5249 {
5250 	struct net_device *dev = pci_get_drvdata(pdev);
5251 	struct cas *cp = netdev_priv(dev);
5252 
5253 	netdev_info(dev, "resuming\n");
5254 
5255 	mutex_lock(&cp->pm_mutex);
5256 	cas_hard_reset(cp);
5257 	if (cp->opened) {
5258 		unsigned long flags;
5259 		cas_lock_all_save(cp, flags);
5260 		cas_reset(cp, 0);
5261 		cp->hw_running = 1;
5262 		cas_clean_rings(cp);
5263 		cas_init_hw(cp, 1);
5264 		cas_unlock_all_restore(cp, flags);
5265 
5266 		netif_device_attach(dev);
5267 	}
5268 	mutex_unlock(&cp->pm_mutex);
5269 	return 0;
5270 }
5271 #endif /* CONFIG_PM */
5272 
5273 static struct pci_driver cas_driver = {
5274 	.name		= DRV_MODULE_NAME,
5275 	.id_table	= cas_pci_tbl,
5276 	.probe		= cas_init_one,
5277 	.remove		= __devexit_p(cas_remove_one),
5278 #ifdef CONFIG_PM
5279 	.suspend	= cas_suspend,
5280 	.resume		= cas_resume
5281 #endif
5282 };
5283 
5284 static int __init cas_init(void)
5285 {
5286 	if (linkdown_timeout > 0)
5287 		link_transition_timeout = linkdown_timeout * HZ;
5288 	else
5289 		link_transition_timeout = 0;
5290 
5291 	return pci_register_driver(&cas_driver);
5292 }
5293 
5294 static void __exit cas_cleanup(void)
5295 {
5296 	pci_unregister_driver(&cas_driver);
5297 }
5298 
5299 module_init(cas_init);
5300 module_exit(cas_cleanup);
5301