xref: /linux/drivers/net/ethernet/sun/sunbmac.c (revision 791d3ef2e11100449837dc0b6fe884e60ca3a484)
1 // SPDX-License-Identifier: GPL-2.0
2 /* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters.
3  *
4  * Copyright (C) 1997, 1998, 1999, 2003, 2008 David S. Miller (davem@davemloft.net)
5  */
6 
7 #include <linux/module.h>
8 
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/fcntl.h>
12 #include <linux/interrupt.h>
13 #include <linux/ioport.h>
14 #include <linux/in.h>
15 #include <linux/string.h>
16 #include <linux/delay.h>
17 #include <linux/crc32.h>
18 #include <linux/errno.h>
19 #include <linux/ethtool.h>
20 #include <linux/mii.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/bitops.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/of.h>
27 #include <linux/of_device.h>
28 #include <linux/gfp.h>
29 
30 #include <asm/auxio.h>
31 #include <asm/byteorder.h>
32 #include <asm/dma.h>
33 #include <asm/idprom.h>
34 #include <asm/io.h>
35 #include <asm/openprom.h>
36 #include <asm/oplib.h>
37 #include <asm/pgtable.h>
38 
39 #include "sunbmac.h"
40 
41 #define DRV_NAME	"sunbmac"
42 #define DRV_VERSION	"2.1"
43 #define DRV_RELDATE	"August 26, 2008"
44 #define DRV_AUTHOR	"David S. Miller (davem@davemloft.net)"
45 
46 static char version[] =
47 	DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
48 
49 MODULE_VERSION(DRV_VERSION);
50 MODULE_AUTHOR(DRV_AUTHOR);
51 MODULE_DESCRIPTION("Sun BigMAC 100baseT ethernet driver");
52 MODULE_LICENSE("GPL");
53 
54 #undef DEBUG_PROBE
55 #undef DEBUG_TX
56 #undef DEBUG_IRQ
57 
58 #ifdef DEBUG_PROBE
59 #define DP(x)  printk x
60 #else
61 #define DP(x)
62 #endif
63 
64 #ifdef DEBUG_TX
65 #define DTX(x)  printk x
66 #else
67 #define DTX(x)
68 #endif
69 
70 #ifdef DEBUG_IRQ
71 #define DIRQ(x)  printk x
72 #else
73 #define DIRQ(x)
74 #endif
75 
76 #define DEFAULT_JAMSIZE    4 /* Toe jam */
77 
78 #define QEC_RESET_TRIES 200
79 
80 static int qec_global_reset(void __iomem *gregs)
81 {
82 	int tries = QEC_RESET_TRIES;
83 
84 	sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
85 	while (--tries) {
86 		if (sbus_readl(gregs + GLOB_CTRL) & GLOB_CTRL_RESET) {
87 			udelay(20);
88 			continue;
89 		}
90 		break;
91 	}
92 	if (tries)
93 		return 0;
94 	printk(KERN_ERR "BigMAC: Cannot reset the QEC.\n");
95 	return -1;
96 }
97 
98 static void qec_init(struct bigmac *bp)
99 {
100 	struct platform_device *qec_op = bp->qec_op;
101 	void __iomem *gregs = bp->gregs;
102 	u8 bsizes = bp->bigmac_bursts;
103 	u32 regval;
104 
105 	/* 64byte bursts do not work at the moment, do
106 	 * not even try to enable them.  -DaveM
107 	 */
108 	if (bsizes & DMA_BURST32)
109 		regval = GLOB_CTRL_B32;
110 	else
111 		regval = GLOB_CTRL_B16;
112 	sbus_writel(regval | GLOB_CTRL_BMODE, gregs + GLOB_CTRL);
113 	sbus_writel(GLOB_PSIZE_2048, gregs + GLOB_PSIZE);
114 
115 	/* All of memsize is given to bigmac. */
116 	sbus_writel(resource_size(&qec_op->resource[1]),
117 		    gregs + GLOB_MSIZE);
118 
119 	/* Half to the transmitter, half to the receiver. */
120 	sbus_writel(resource_size(&qec_op->resource[1]) >> 1,
121 		    gregs + GLOB_TSIZE);
122 	sbus_writel(resource_size(&qec_op->resource[1]) >> 1,
123 		    gregs + GLOB_RSIZE);
124 }
125 
126 #define TX_RESET_TRIES     32
127 #define RX_RESET_TRIES     32
128 
129 static void bigmac_tx_reset(void __iomem *bregs)
130 {
131 	int tries = TX_RESET_TRIES;
132 
133 	sbus_writel(0, bregs + BMAC_TXCFG);
134 
135 	/* The fifo threshold bit is read-only and does
136 	 * not clear.  -DaveM
137 	 */
138 	while ((sbus_readl(bregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_FIFO)) != 0 &&
139 	       --tries != 0)
140 		udelay(20);
141 
142 	if (!tries) {
143 		printk(KERN_ERR "BIGMAC: Transmitter will not reset.\n");
144 		printk(KERN_ERR "BIGMAC: tx_cfg is %08x\n",
145 		       sbus_readl(bregs + BMAC_TXCFG));
146 	}
147 }
148 
149 static void bigmac_rx_reset(void __iomem *bregs)
150 {
151 	int tries = RX_RESET_TRIES;
152 
153 	sbus_writel(0, bregs + BMAC_RXCFG);
154 	while (sbus_readl(bregs + BMAC_RXCFG) && --tries)
155 		udelay(20);
156 
157 	if (!tries) {
158 		printk(KERN_ERR "BIGMAC: Receiver will not reset.\n");
159 		printk(KERN_ERR "BIGMAC: rx_cfg is %08x\n",
160 		       sbus_readl(bregs + BMAC_RXCFG));
161 	}
162 }
163 
164 /* Reset the transmitter and receiver. */
165 static void bigmac_stop(struct bigmac *bp)
166 {
167 	bigmac_tx_reset(bp->bregs);
168 	bigmac_rx_reset(bp->bregs);
169 }
170 
171 static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs)
172 {
173 	struct net_device_stats *stats = &bp->dev->stats;
174 
175 	stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR);
176 	sbus_writel(0, bregs + BMAC_RCRCECTR);
177 
178 	stats->rx_frame_errors += sbus_readl(bregs + BMAC_UNALECTR);
179 	sbus_writel(0, bregs + BMAC_UNALECTR);
180 
181 	stats->rx_length_errors += sbus_readl(bregs + BMAC_GLECTR);
182 	sbus_writel(0, bregs + BMAC_GLECTR);
183 
184 	stats->tx_aborted_errors += sbus_readl(bregs + BMAC_EXCTR);
185 
186 	stats->collisions +=
187 		(sbus_readl(bregs + BMAC_EXCTR) +
188 		 sbus_readl(bregs + BMAC_LTCTR));
189 	sbus_writel(0, bregs + BMAC_EXCTR);
190 	sbus_writel(0, bregs + BMAC_LTCTR);
191 }
192 
193 static void bigmac_clean_rings(struct bigmac *bp)
194 {
195 	int i;
196 
197 	for (i = 0; i < RX_RING_SIZE; i++) {
198 		if (bp->rx_skbs[i] != NULL) {
199 			dev_kfree_skb_any(bp->rx_skbs[i]);
200 			bp->rx_skbs[i] = NULL;
201 		}
202 	}
203 
204 	for (i = 0; i < TX_RING_SIZE; i++) {
205 		if (bp->tx_skbs[i] != NULL) {
206 			dev_kfree_skb_any(bp->tx_skbs[i]);
207 			bp->tx_skbs[i] = NULL;
208 		}
209 	}
210 }
211 
212 static void bigmac_init_rings(struct bigmac *bp, int from_irq)
213 {
214 	struct bmac_init_block *bb = bp->bmac_block;
215 	int i;
216 	gfp_t gfp_flags = GFP_KERNEL;
217 
218 	if (from_irq || in_interrupt())
219 		gfp_flags = GFP_ATOMIC;
220 
221 	bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0;
222 
223 	/* Free any skippy bufs left around in the rings. */
224 	bigmac_clean_rings(bp);
225 
226 	/* Now get new skbufs for the receive ring. */
227 	for (i = 0; i < RX_RING_SIZE; i++) {
228 		struct sk_buff *skb;
229 
230 		skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags);
231 		if (!skb)
232 			continue;
233 
234 		bp->rx_skbs[i] = skb;
235 
236 		/* Because we reserve afterwards. */
237 		skb_put(skb, ETH_FRAME_LEN);
238 		skb_reserve(skb, 34);
239 
240 		bb->be_rxd[i].rx_addr =
241 			dma_map_single(&bp->bigmac_op->dev,
242 				       skb->data,
243 				       RX_BUF_ALLOC_SIZE - 34,
244 				       DMA_FROM_DEVICE);
245 		bb->be_rxd[i].rx_flags =
246 			(RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
247 	}
248 
249 	for (i = 0; i < TX_RING_SIZE; i++)
250 		bb->be_txd[i].tx_flags = bb->be_txd[i].tx_addr = 0;
251 }
252 
253 #define MGMT_CLKON  (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB|MGMT_PAL_DCLOCK)
254 #define MGMT_CLKOFF (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB)
255 
256 static void idle_transceiver(void __iomem *tregs)
257 {
258 	int i = 20;
259 
260 	while (i--) {
261 		sbus_writel(MGMT_CLKOFF, tregs + TCVR_MPAL);
262 		sbus_readl(tregs + TCVR_MPAL);
263 		sbus_writel(MGMT_CLKON, tregs + TCVR_MPAL);
264 		sbus_readl(tregs + TCVR_MPAL);
265 	}
266 }
267 
268 static void write_tcvr_bit(struct bigmac *bp, void __iomem *tregs, int bit)
269 {
270 	if (bp->tcvr_type == internal) {
271 		bit = (bit & 1) << 3;
272 		sbus_writel(bit | (MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO),
273 			    tregs + TCVR_MPAL);
274 		sbus_readl(tregs + TCVR_MPAL);
275 		sbus_writel(bit | MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
276 			    tregs + TCVR_MPAL);
277 		sbus_readl(tregs + TCVR_MPAL);
278 	} else if (bp->tcvr_type == external) {
279 		bit = (bit & 1) << 2;
280 		sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB,
281 			    tregs + TCVR_MPAL);
282 		sbus_readl(tregs + TCVR_MPAL);
283 		sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB | MGMT_PAL_DCLOCK,
284 			    tregs + TCVR_MPAL);
285 		sbus_readl(tregs + TCVR_MPAL);
286 	} else {
287 		printk(KERN_ERR "write_tcvr_bit: No transceiver type known!\n");
288 	}
289 }
290 
291 static int read_tcvr_bit(struct bigmac *bp, void __iomem *tregs)
292 {
293 	int retval = 0;
294 
295 	if (bp->tcvr_type == internal) {
296 		sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
297 		sbus_readl(tregs + TCVR_MPAL);
298 		sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
299 			    tregs + TCVR_MPAL);
300 		sbus_readl(tregs + TCVR_MPAL);
301 		retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3;
302 	} else if (bp->tcvr_type == external) {
303 		sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL);
304 		sbus_readl(tregs + TCVR_MPAL);
305 		sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
306 		sbus_readl(tregs + TCVR_MPAL);
307 		retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2;
308 	} else {
309 		printk(KERN_ERR "read_tcvr_bit: No transceiver type known!\n");
310 	}
311 	return retval;
312 }
313 
314 static int read_tcvr_bit2(struct bigmac *bp, void __iomem *tregs)
315 {
316 	int retval = 0;
317 
318 	if (bp->tcvr_type == internal) {
319 		sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
320 		sbus_readl(tregs + TCVR_MPAL);
321 		retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3;
322 		sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
323 		sbus_readl(tregs + TCVR_MPAL);
324 	} else if (bp->tcvr_type == external) {
325 		sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL);
326 		sbus_readl(tregs + TCVR_MPAL);
327 		retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2;
328 		sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
329 		sbus_readl(tregs + TCVR_MPAL);
330 	} else {
331 		printk(KERN_ERR "read_tcvr_bit2: No transceiver type known!\n");
332 	}
333 	return retval;
334 }
335 
336 static void put_tcvr_byte(struct bigmac *bp,
337 			  void __iomem *tregs,
338 			  unsigned int byte)
339 {
340 	int shift = 4;
341 
342 	do {
343 		write_tcvr_bit(bp, tregs, ((byte >> shift) & 1));
344 		shift -= 1;
345 	} while (shift >= 0);
346 }
347 
348 static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs,
349 			      int reg, unsigned short val)
350 {
351 	int shift;
352 
353 	reg &= 0xff;
354 	val &= 0xffff;
355 	switch(bp->tcvr_type) {
356 	case internal:
357 	case external:
358 		break;
359 
360 	default:
361 		printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
362 		return;
363 	}
364 
365 	idle_transceiver(tregs);
366 	write_tcvr_bit(bp, tregs, 0);
367 	write_tcvr_bit(bp, tregs, 1);
368 	write_tcvr_bit(bp, tregs, 0);
369 	write_tcvr_bit(bp, tregs, 1);
370 
371 	put_tcvr_byte(bp, tregs,
372 		      ((bp->tcvr_type == internal) ?
373 		       BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL));
374 
375 	put_tcvr_byte(bp, tregs, reg);
376 
377 	write_tcvr_bit(bp, tregs, 1);
378 	write_tcvr_bit(bp, tregs, 0);
379 
380 	shift = 15;
381 	do {
382 		write_tcvr_bit(bp, tregs, (val >> shift) & 1);
383 		shift -= 1;
384 	} while (shift >= 0);
385 }
386 
387 static unsigned short bigmac_tcvr_read(struct bigmac *bp,
388 				       void __iomem *tregs,
389 				       int reg)
390 {
391 	unsigned short retval = 0;
392 
393 	reg &= 0xff;
394 	switch(bp->tcvr_type) {
395 	case internal:
396 	case external:
397 		break;
398 
399 	default:
400 		printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
401 		return 0xffff;
402 	}
403 
404 	idle_transceiver(tregs);
405 	write_tcvr_bit(bp, tregs, 0);
406 	write_tcvr_bit(bp, tregs, 1);
407 	write_tcvr_bit(bp, tregs, 1);
408 	write_tcvr_bit(bp, tregs, 0);
409 
410 	put_tcvr_byte(bp, tregs,
411 		      ((bp->tcvr_type == internal) ?
412 		       BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL));
413 
414 	put_tcvr_byte(bp, tregs, reg);
415 
416 	if (bp->tcvr_type == external) {
417 		int shift = 15;
418 
419 		(void) read_tcvr_bit2(bp, tregs);
420 		(void) read_tcvr_bit2(bp, tregs);
421 
422 		do {
423 			int tmp;
424 
425 			tmp = read_tcvr_bit2(bp, tregs);
426 			retval |= ((tmp & 1) << shift);
427 			shift -= 1;
428 		} while (shift >= 0);
429 
430 		(void) read_tcvr_bit2(bp, tregs);
431 		(void) read_tcvr_bit2(bp, tregs);
432 		(void) read_tcvr_bit2(bp, tregs);
433 	} else {
434 		int shift = 15;
435 
436 		(void) read_tcvr_bit(bp, tregs);
437 		(void) read_tcvr_bit(bp, tregs);
438 
439 		do {
440 			int tmp;
441 
442 			tmp = read_tcvr_bit(bp, tregs);
443 			retval |= ((tmp & 1) << shift);
444 			shift -= 1;
445 		} while (shift >= 0);
446 
447 		(void) read_tcvr_bit(bp, tregs);
448 		(void) read_tcvr_bit(bp, tregs);
449 		(void) read_tcvr_bit(bp, tregs);
450 	}
451 	return retval;
452 }
453 
454 static void bigmac_tcvr_init(struct bigmac *bp)
455 {
456 	void __iomem *tregs = bp->tregs;
457 	u32 mpal;
458 
459 	idle_transceiver(tregs);
460 	sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
461 		    tregs + TCVR_MPAL);
462 	sbus_readl(tregs + TCVR_MPAL);
463 
464 	/* Only the bit for the present transceiver (internal or
465 	 * external) will stick, set them both and see what stays.
466 	 */
467 	sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
468 	sbus_readl(tregs + TCVR_MPAL);
469 	udelay(20);
470 
471 	mpal = sbus_readl(tregs + TCVR_MPAL);
472 	if (mpal & MGMT_PAL_EXT_MDIO) {
473 		bp->tcvr_type = external;
474 		sbus_writel(~(TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE),
475 			    tregs + TCVR_TPAL);
476 		sbus_readl(tregs + TCVR_TPAL);
477 	} else if (mpal & MGMT_PAL_INT_MDIO) {
478 		bp->tcvr_type = internal;
479 		sbus_writel(~(TCVR_PAL_SERIAL | TCVR_PAL_EXTLBACK |
480 			      TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE),
481 			    tregs + TCVR_TPAL);
482 		sbus_readl(tregs + TCVR_TPAL);
483 	} else {
484 		printk(KERN_ERR "BIGMAC: AIEEE, neither internal nor "
485 		       "external MDIO available!\n");
486 		printk(KERN_ERR "BIGMAC: mgmt_pal[%08x] tcvr_pal[%08x]\n",
487 		       sbus_readl(tregs + TCVR_MPAL),
488 		       sbus_readl(tregs + TCVR_TPAL));
489 	}
490 }
491 
492 static int bigmac_init_hw(struct bigmac *, int);
493 
494 static int try_next_permutation(struct bigmac *bp, void __iomem *tregs)
495 {
496 	if (bp->sw_bmcr & BMCR_SPEED100) {
497 		int timeout;
498 
499 		/* Reset the PHY. */
500 		bp->sw_bmcr	= (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK);
501 		bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
502 		bp->sw_bmcr	= (BMCR_RESET);
503 		bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
504 
505 		timeout = 64;
506 		while (--timeout) {
507 			bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
508 			if ((bp->sw_bmcr & BMCR_RESET) == 0)
509 				break;
510 			udelay(20);
511 		}
512 		if (timeout == 0)
513 			printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name);
514 
515 		bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
516 
517 		/* Now we try 10baseT. */
518 		bp->sw_bmcr &= ~(BMCR_SPEED100);
519 		bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
520 		return 0;
521 	}
522 
523 	/* We've tried them all. */
524 	return -1;
525 }
526 
527 static void bigmac_timer(struct timer_list *t)
528 {
529 	struct bigmac *bp = from_timer(bp, t, bigmac_timer);
530 	void __iomem *tregs = bp->tregs;
531 	int restart_timer = 0;
532 
533 	bp->timer_ticks++;
534 	if (bp->timer_state == ltrywait) {
535 		bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, MII_BMSR);
536 		bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
537 		if (bp->sw_bmsr & BMSR_LSTATUS) {
538 			printk(KERN_INFO "%s: Link is now up at %s.\n",
539 			       bp->dev->name,
540 			       (bp->sw_bmcr & BMCR_SPEED100) ?
541 			       "100baseT" : "10baseT");
542 			bp->timer_state = asleep;
543 			restart_timer = 0;
544 		} else {
545 			if (bp->timer_ticks >= 4) {
546 				int ret;
547 
548 				ret = try_next_permutation(bp, tregs);
549 				if (ret == -1) {
550 					printk(KERN_ERR "%s: Link down, cable problem?\n",
551 					       bp->dev->name);
552 					ret = bigmac_init_hw(bp, 0);
553 					if (ret) {
554 						printk(KERN_ERR "%s: Error, cannot re-init the "
555 						       "BigMAC.\n", bp->dev->name);
556 					}
557 					return;
558 				}
559 				bp->timer_ticks = 0;
560 				restart_timer = 1;
561 			} else {
562 				restart_timer = 1;
563 			}
564 		}
565 	} else {
566 		/* Can't happens.... */
567 		printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
568 		       bp->dev->name);
569 		restart_timer = 0;
570 		bp->timer_ticks = 0;
571 		bp->timer_state = asleep; /* foo on you */
572 	}
573 
574 	if (restart_timer != 0) {
575 		bp->bigmac_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
576 		add_timer(&bp->bigmac_timer);
577 	}
578 }
579 
580 /* Well, really we just force the chip into 100baseT then
581  * 10baseT, each time checking for a link status.
582  */
583 static void bigmac_begin_auto_negotiation(struct bigmac *bp)
584 {
585 	void __iomem *tregs = bp->tregs;
586 	int timeout;
587 
588 	/* Grab new software copies of PHY registers. */
589 	bp->sw_bmsr	= bigmac_tcvr_read(bp, tregs, MII_BMSR);
590 	bp->sw_bmcr	= bigmac_tcvr_read(bp, tregs, MII_BMCR);
591 
592 	/* Reset the PHY. */
593 	bp->sw_bmcr	= (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK);
594 	bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
595 	bp->sw_bmcr	= (BMCR_RESET);
596 	bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
597 
598 	timeout = 64;
599 	while (--timeout) {
600 		bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
601 		if ((bp->sw_bmcr & BMCR_RESET) == 0)
602 			break;
603 		udelay(20);
604 	}
605 	if (timeout == 0)
606 		printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name);
607 
608 	bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, MII_BMCR);
609 
610 	/* First we try 100baseT. */
611 	bp->sw_bmcr |= BMCR_SPEED100;
612 	bigmac_tcvr_write(bp, tregs, MII_BMCR, bp->sw_bmcr);
613 
614 	bp->timer_state = ltrywait;
615 	bp->timer_ticks = 0;
616 	bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10;
617 	add_timer(&bp->bigmac_timer);
618 }
619 
620 static int bigmac_init_hw(struct bigmac *bp, int from_irq)
621 {
622 	void __iomem *gregs        = bp->gregs;
623 	void __iomem *cregs        = bp->creg;
624 	void __iomem *bregs        = bp->bregs;
625 	__u32 bblk_dvma = (__u32)bp->bblock_dvma;
626 	unsigned char *e = &bp->dev->dev_addr[0];
627 
628 	/* Latch current counters into statistics. */
629 	bigmac_get_counters(bp, bregs);
630 
631 	/* Reset QEC. */
632 	qec_global_reset(gregs);
633 
634 	/* Init QEC. */
635 	qec_init(bp);
636 
637 	/* Alloc and reset the tx/rx descriptor chains. */
638 	bigmac_init_rings(bp, from_irq);
639 
640 	/* Initialize the PHY. */
641 	bigmac_tcvr_init(bp);
642 
643 	/* Stop transmitter and receiver. */
644 	bigmac_stop(bp);
645 
646 	/* Set hardware ethernet address. */
647 	sbus_writel(((e[4] << 8) | e[5]), bregs + BMAC_MACADDR2);
648 	sbus_writel(((e[2] << 8) | e[3]), bregs + BMAC_MACADDR1);
649 	sbus_writel(((e[0] << 8) | e[1]), bregs + BMAC_MACADDR0);
650 
651 	/* Clear the hash table until mc upload occurs. */
652 	sbus_writel(0, bregs + BMAC_HTABLE3);
653 	sbus_writel(0, bregs + BMAC_HTABLE2);
654 	sbus_writel(0, bregs + BMAC_HTABLE1);
655 	sbus_writel(0, bregs + BMAC_HTABLE0);
656 
657 	/* Enable Big Mac hash table filter. */
658 	sbus_writel(BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_FIFO,
659 		    bregs + BMAC_RXCFG);
660 	udelay(20);
661 
662 	/* Ok, configure the Big Mac transmitter. */
663 	sbus_writel(BIGMAC_TXCFG_FIFO, bregs + BMAC_TXCFG);
664 
665 	/* The HME docs recommend to use the 10LSB of our MAC here. */
666 	sbus_writel(((e[5] | e[4] << 8) & 0x3ff),
667 		    bregs + BMAC_RSEED);
668 
669 	/* Enable the output drivers no matter what. */
670 	sbus_writel(BIGMAC_XCFG_ODENABLE | BIGMAC_XCFG_RESV,
671 		    bregs + BMAC_XIFCFG);
672 
673 	/* Tell the QEC where the ring descriptors are. */
674 	sbus_writel(bblk_dvma + bib_offset(be_rxd, 0),
675 		    cregs + CREG_RXDS);
676 	sbus_writel(bblk_dvma + bib_offset(be_txd, 0),
677 		    cregs + CREG_TXDS);
678 
679 	/* Setup the FIFO pointers into QEC local memory. */
680 	sbus_writel(0, cregs + CREG_RXRBUFPTR);
681 	sbus_writel(0, cregs + CREG_RXWBUFPTR);
682 	sbus_writel(sbus_readl(gregs + GLOB_RSIZE),
683 		    cregs + CREG_TXRBUFPTR);
684 	sbus_writel(sbus_readl(gregs + GLOB_RSIZE),
685 		    cregs + CREG_TXWBUFPTR);
686 
687 	/* Tell bigmac what interrupts we don't want to hear about. */
688 	sbus_writel(BIGMAC_IMASK_GOTFRAME | BIGMAC_IMASK_SENTFRAME,
689 		    bregs + BMAC_IMASK);
690 
691 	/* Enable the various other irq's. */
692 	sbus_writel(0, cregs + CREG_RIMASK);
693 	sbus_writel(0, cregs + CREG_TIMASK);
694 	sbus_writel(0, cregs + CREG_QMASK);
695 	sbus_writel(0, cregs + CREG_BMASK);
696 
697 	/* Set jam size to a reasonable default. */
698 	sbus_writel(DEFAULT_JAMSIZE, bregs + BMAC_JSIZE);
699 
700 	/* Clear collision counter. */
701 	sbus_writel(0, cregs + CREG_CCNT);
702 
703 	/* Enable transmitter and receiver. */
704 	sbus_writel(sbus_readl(bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE,
705 		    bregs + BMAC_TXCFG);
706 	sbus_writel(sbus_readl(bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE,
707 		    bregs + BMAC_RXCFG);
708 
709 	/* Ok, start detecting link speed/duplex. */
710 	bigmac_begin_auto_negotiation(bp);
711 
712 	/* Success. */
713 	return 0;
714 }
715 
716 /* Error interrupts get sent here. */
717 static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_status)
718 {
719 	printk(KERN_ERR "bigmac_is_medium_rare: ");
720 	if (qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) {
721 		if (qec_status & GLOB_STAT_ER)
722 			printk("QEC_ERROR, ");
723 		if (qec_status & GLOB_STAT_BM)
724 			printk("QEC_BMAC_ERROR, ");
725 	}
726 	if (bmac_status & CREG_STAT_ERRORS) {
727 		if (bmac_status & CREG_STAT_BERROR)
728 			printk("BMAC_ERROR, ");
729 		if (bmac_status & CREG_STAT_TXDERROR)
730 			printk("TXD_ERROR, ");
731 		if (bmac_status & CREG_STAT_TXLERR)
732 			printk("TX_LATE_ERROR, ");
733 		if (bmac_status & CREG_STAT_TXPERR)
734 			printk("TX_PARITY_ERROR, ");
735 		if (bmac_status & CREG_STAT_TXSERR)
736 			printk("TX_SBUS_ERROR, ");
737 
738 		if (bmac_status & CREG_STAT_RXDROP)
739 			printk("RX_DROP_ERROR, ");
740 
741 		if (bmac_status & CREG_STAT_RXSMALL)
742 			printk("RX_SMALL_ERROR, ");
743 		if (bmac_status & CREG_STAT_RXLERR)
744 			printk("RX_LATE_ERROR, ");
745 		if (bmac_status & CREG_STAT_RXPERR)
746 			printk("RX_PARITY_ERROR, ");
747 		if (bmac_status & CREG_STAT_RXSERR)
748 			printk("RX_SBUS_ERROR, ");
749 	}
750 
751 	printk(" RESET\n");
752 	bigmac_init_hw(bp, 1);
753 }
754 
755 /* BigMAC transmit complete service routines. */
756 static void bigmac_tx(struct bigmac *bp)
757 {
758 	struct be_txd *txbase = &bp->bmac_block->be_txd[0];
759 	struct net_device *dev = bp->dev;
760 	int elem;
761 
762 	spin_lock(&bp->lock);
763 
764 	elem = bp->tx_old;
765 	DTX(("bigmac_tx: tx_old[%d] ", elem));
766 	while (elem != bp->tx_new) {
767 		struct sk_buff *skb;
768 		struct be_txd *this = &txbase[elem];
769 
770 		DTX(("this(%p) [flags(%08x)addr(%08x)]",
771 		     this, this->tx_flags, this->tx_addr));
772 
773 		if (this->tx_flags & TXD_OWN)
774 			break;
775 		skb = bp->tx_skbs[elem];
776 		dev->stats.tx_packets++;
777 		dev->stats.tx_bytes += skb->len;
778 		dma_unmap_single(&bp->bigmac_op->dev,
779 				 this->tx_addr, skb->len,
780 				 DMA_TO_DEVICE);
781 
782 		DTX(("skb(%p) ", skb));
783 		bp->tx_skbs[elem] = NULL;
784 		dev_kfree_skb_irq(skb);
785 
786 		elem = NEXT_TX(elem);
787 	}
788 	DTX((" DONE, tx_old=%d\n", elem));
789 	bp->tx_old = elem;
790 
791 	if (netif_queue_stopped(dev) &&
792 	    TX_BUFFS_AVAIL(bp) > 0)
793 		netif_wake_queue(bp->dev);
794 
795 	spin_unlock(&bp->lock);
796 }
797 
798 /* BigMAC receive complete service routines. */
799 static void bigmac_rx(struct bigmac *bp)
800 {
801 	struct be_rxd *rxbase = &bp->bmac_block->be_rxd[0];
802 	struct be_rxd *this;
803 	int elem = bp->rx_new, drops = 0;
804 	u32 flags;
805 
806 	this = &rxbase[elem];
807 	while (!((flags = this->rx_flags) & RXD_OWN)) {
808 		struct sk_buff *skb;
809 		int len = (flags & RXD_LENGTH); /* FCS not included */
810 
811 		/* Check for errors. */
812 		if (len < ETH_ZLEN) {
813 			bp->dev->stats.rx_errors++;
814 			bp->dev->stats.rx_length_errors++;
815 
816 	drop_it:
817 			/* Return it to the BigMAC. */
818 			bp->dev->stats.rx_dropped++;
819 			this->rx_flags =
820 				(RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
821 			goto next;
822 		}
823 		skb = bp->rx_skbs[elem];
824 		if (len > RX_COPY_THRESHOLD) {
825 			struct sk_buff *new_skb;
826 
827 			/* Now refill the entry, if we can. */
828 			new_skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
829 			if (new_skb == NULL) {
830 				drops++;
831 				goto drop_it;
832 			}
833 			dma_unmap_single(&bp->bigmac_op->dev,
834 					 this->rx_addr,
835 					 RX_BUF_ALLOC_SIZE - 34,
836 					 DMA_FROM_DEVICE);
837 			bp->rx_skbs[elem] = new_skb;
838 			skb_put(new_skb, ETH_FRAME_LEN);
839 			skb_reserve(new_skb, 34);
840 			this->rx_addr =
841 				dma_map_single(&bp->bigmac_op->dev,
842 					       new_skb->data,
843 					       RX_BUF_ALLOC_SIZE - 34,
844 					       DMA_FROM_DEVICE);
845 			this->rx_flags =
846 				(RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
847 
848 			/* Trim the original skb for the netif. */
849 			skb_trim(skb, len);
850 		} else {
851 			struct sk_buff *copy_skb = netdev_alloc_skb(bp->dev, len + 2);
852 
853 			if (copy_skb == NULL) {
854 				drops++;
855 				goto drop_it;
856 			}
857 			skb_reserve(copy_skb, 2);
858 			skb_put(copy_skb, len);
859 			dma_sync_single_for_cpu(&bp->bigmac_op->dev,
860 						this->rx_addr, len,
861 						DMA_FROM_DEVICE);
862 			skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len);
863 			dma_sync_single_for_device(&bp->bigmac_op->dev,
864 						   this->rx_addr, len,
865 						   DMA_FROM_DEVICE);
866 
867 			/* Reuse original ring buffer. */
868 			this->rx_flags =
869 				(RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
870 
871 			skb = copy_skb;
872 		}
873 
874 		/* No checksums done by the BigMAC ;-( */
875 		skb->protocol = eth_type_trans(skb, bp->dev);
876 		netif_rx(skb);
877 		bp->dev->stats.rx_packets++;
878 		bp->dev->stats.rx_bytes += len;
879 	next:
880 		elem = NEXT_RX(elem);
881 		this = &rxbase[elem];
882 	}
883 	bp->rx_new = elem;
884 	if (drops)
885 		printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", bp->dev->name);
886 }
887 
888 static irqreturn_t bigmac_interrupt(int irq, void *dev_id)
889 {
890 	struct bigmac *bp = (struct bigmac *) dev_id;
891 	u32 qec_status, bmac_status;
892 
893 	DIRQ(("bigmac_interrupt: "));
894 
895 	/* Latch status registers now. */
896 	bmac_status = sbus_readl(bp->creg + CREG_STAT);
897 	qec_status = sbus_readl(bp->gregs + GLOB_STAT);
898 
899 	DIRQ(("qec_status=%08x bmac_status=%08x\n", qec_status, bmac_status));
900 	if ((qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) ||
901 	   (bmac_status & CREG_STAT_ERRORS))
902 		bigmac_is_medium_rare(bp, qec_status, bmac_status);
903 
904 	if (bmac_status & CREG_STAT_TXIRQ)
905 		bigmac_tx(bp);
906 
907 	if (bmac_status & CREG_STAT_RXIRQ)
908 		bigmac_rx(bp);
909 
910 	return IRQ_HANDLED;
911 }
912 
913 static int bigmac_open(struct net_device *dev)
914 {
915 	struct bigmac *bp = netdev_priv(dev);
916 	int ret;
917 
918 	ret = request_irq(dev->irq, bigmac_interrupt, IRQF_SHARED, dev->name, bp);
919 	if (ret) {
920 		printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq);
921 		return ret;
922 	}
923 	timer_setup(&bp->bigmac_timer, bigmac_timer, 0);
924 	ret = bigmac_init_hw(bp, 0);
925 	if (ret)
926 		free_irq(dev->irq, bp);
927 	return ret;
928 }
929 
930 static int bigmac_close(struct net_device *dev)
931 {
932 	struct bigmac *bp = netdev_priv(dev);
933 
934 	del_timer(&bp->bigmac_timer);
935 	bp->timer_state = asleep;
936 	bp->timer_ticks = 0;
937 
938 	bigmac_stop(bp);
939 	bigmac_clean_rings(bp);
940 	free_irq(dev->irq, bp);
941 	return 0;
942 }
943 
944 static void bigmac_tx_timeout(struct net_device *dev)
945 {
946 	struct bigmac *bp = netdev_priv(dev);
947 
948 	bigmac_init_hw(bp, 0);
949 	netif_wake_queue(dev);
950 }
951 
952 /* Put a packet on the wire. */
953 static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
954 {
955 	struct bigmac *bp = netdev_priv(dev);
956 	int len, entry;
957 	u32 mapping;
958 
959 	len = skb->len;
960 	mapping = dma_map_single(&bp->bigmac_op->dev, skb->data,
961 				 len, DMA_TO_DEVICE);
962 
963 	/* Avoid a race... */
964 	spin_lock_irq(&bp->lock);
965 	entry = bp->tx_new;
966 	DTX(("bigmac_start_xmit: len(%d) entry(%d)\n", len, entry));
967 	bp->bmac_block->be_txd[entry].tx_flags = TXD_UPDATE;
968 	bp->tx_skbs[entry] = skb;
969 	bp->bmac_block->be_txd[entry].tx_addr = mapping;
970 	bp->bmac_block->be_txd[entry].tx_flags =
971 		(TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
972 	bp->tx_new = NEXT_TX(entry);
973 	if (TX_BUFFS_AVAIL(bp) <= 0)
974 		netif_stop_queue(dev);
975 	spin_unlock_irq(&bp->lock);
976 
977 	/* Get it going. */
978 	sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL);
979 
980 
981 	return NETDEV_TX_OK;
982 }
983 
984 static struct net_device_stats *bigmac_get_stats(struct net_device *dev)
985 {
986 	struct bigmac *bp = netdev_priv(dev);
987 
988 	bigmac_get_counters(bp, bp->bregs);
989 	return &dev->stats;
990 }
991 
992 static void bigmac_set_multicast(struct net_device *dev)
993 {
994 	struct bigmac *bp = netdev_priv(dev);
995 	void __iomem *bregs = bp->bregs;
996 	struct netdev_hw_addr *ha;
997 	u32 tmp, crc;
998 
999 	/* Disable the receiver.  The bit self-clears when
1000 	 * the operation is complete.
1001 	 */
1002 	tmp = sbus_readl(bregs + BMAC_RXCFG);
1003 	tmp &= ~(BIGMAC_RXCFG_ENABLE);
1004 	sbus_writel(tmp, bregs + BMAC_RXCFG);
1005 	while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0)
1006 		udelay(20);
1007 
1008 	if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
1009 		sbus_writel(0xffff, bregs + BMAC_HTABLE0);
1010 		sbus_writel(0xffff, bregs + BMAC_HTABLE1);
1011 		sbus_writel(0xffff, bregs + BMAC_HTABLE2);
1012 		sbus_writel(0xffff, bregs + BMAC_HTABLE3);
1013 	} else if (dev->flags & IFF_PROMISC) {
1014 		tmp = sbus_readl(bregs + BMAC_RXCFG);
1015 		tmp |= BIGMAC_RXCFG_PMISC;
1016 		sbus_writel(tmp, bregs + BMAC_RXCFG);
1017 	} else {
1018 		u16 hash_table[4] = { 0 };
1019 
1020 		netdev_for_each_mc_addr(ha, dev) {
1021 			crc = ether_crc_le(6, ha->addr);
1022 			crc >>= 26;
1023 			hash_table[crc >> 4] |= 1 << (crc & 0xf);
1024 		}
1025 		sbus_writel(hash_table[0], bregs + BMAC_HTABLE0);
1026 		sbus_writel(hash_table[1], bregs + BMAC_HTABLE1);
1027 		sbus_writel(hash_table[2], bregs + BMAC_HTABLE2);
1028 		sbus_writel(hash_table[3], bregs + BMAC_HTABLE3);
1029 	}
1030 
1031 	/* Re-enable the receiver. */
1032 	tmp = sbus_readl(bregs + BMAC_RXCFG);
1033 	tmp |= BIGMAC_RXCFG_ENABLE;
1034 	sbus_writel(tmp, bregs + BMAC_RXCFG);
1035 }
1036 
1037 /* Ethtool support... */
1038 static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1039 {
1040 	strlcpy(info->driver, "sunbmac", sizeof(info->driver));
1041 	strlcpy(info->version, "2.0", sizeof(info->version));
1042 }
1043 
1044 static u32 bigmac_get_link(struct net_device *dev)
1045 {
1046 	struct bigmac *bp = netdev_priv(dev);
1047 
1048 	spin_lock_irq(&bp->lock);
1049 	bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, MII_BMSR);
1050 	spin_unlock_irq(&bp->lock);
1051 
1052 	return (bp->sw_bmsr & BMSR_LSTATUS);
1053 }
1054 
1055 static const struct ethtool_ops bigmac_ethtool_ops = {
1056 	.get_drvinfo		= bigmac_get_drvinfo,
1057 	.get_link		= bigmac_get_link,
1058 };
1059 
1060 static const struct net_device_ops bigmac_ops = {
1061 	.ndo_open		= bigmac_open,
1062 	.ndo_stop		= bigmac_close,
1063 	.ndo_start_xmit		= bigmac_start_xmit,
1064 	.ndo_get_stats		= bigmac_get_stats,
1065 	.ndo_set_rx_mode	= bigmac_set_multicast,
1066 	.ndo_tx_timeout		= bigmac_tx_timeout,
1067 	.ndo_set_mac_address	= eth_mac_addr,
1068 	.ndo_validate_addr	= eth_validate_addr,
1069 };
1070 
1071 static int bigmac_ether_init(struct platform_device *op,
1072 			     struct platform_device *qec_op)
1073 {
1074 	static int version_printed;
1075 	struct net_device *dev;
1076 	u8 bsizes, bsizes_more;
1077 	struct bigmac *bp;
1078 	int i;
1079 
1080 	/* Get a new device struct for this interface. */
1081 	dev = alloc_etherdev(sizeof(struct bigmac));
1082 	if (!dev)
1083 		return -ENOMEM;
1084 
1085 	if (version_printed++ == 0)
1086 		printk(KERN_INFO "%s", version);
1087 
1088 	for (i = 0; i < 6; i++)
1089 		dev->dev_addr[i] = idprom->id_ethaddr[i];
1090 
1091 	/* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */
1092 	bp = netdev_priv(dev);
1093 	bp->qec_op = qec_op;
1094 	bp->bigmac_op = op;
1095 
1096 	SET_NETDEV_DEV(dev, &op->dev);
1097 
1098 	spin_lock_init(&bp->lock);
1099 
1100 	/* Map in QEC global control registers. */
1101 	bp->gregs = of_ioremap(&qec_op->resource[0], 0,
1102 			       GLOB_REG_SIZE, "BigMAC QEC GLobal Regs");
1103 	if (!bp->gregs) {
1104 		printk(KERN_ERR "BIGMAC: Cannot map QEC global registers.\n");
1105 		goto fail_and_cleanup;
1106 	}
1107 
1108 	/* Make sure QEC is in BigMAC mode. */
1109 	if ((sbus_readl(bp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_BMODE) {
1110 		printk(KERN_ERR "BigMAC: AIEEE, QEC is not in BigMAC mode!\n");
1111 		goto fail_and_cleanup;
1112 	}
1113 
1114 	/* Reset the QEC. */
1115 	if (qec_global_reset(bp->gregs))
1116 		goto fail_and_cleanup;
1117 
1118 	/* Get supported SBUS burst sizes. */
1119 	bsizes = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
1120 	bsizes_more = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
1121 
1122 	bsizes &= 0xff;
1123 	if (bsizes_more != 0xff)
1124 		bsizes &= bsizes_more;
1125 	if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
1126 	    (bsizes & DMA_BURST32) == 0)
1127 		bsizes = (DMA_BURST32 - 1);
1128 	bp->bigmac_bursts = bsizes;
1129 
1130 	/* Perform QEC initialization. */
1131 	qec_init(bp);
1132 
1133 	/* Map in the BigMAC channel registers. */
1134 	bp->creg = of_ioremap(&op->resource[0], 0,
1135 			      CREG_REG_SIZE, "BigMAC QEC Channel Regs");
1136 	if (!bp->creg) {
1137 		printk(KERN_ERR "BIGMAC: Cannot map QEC channel registers.\n");
1138 		goto fail_and_cleanup;
1139 	}
1140 
1141 	/* Map in the BigMAC control registers. */
1142 	bp->bregs = of_ioremap(&op->resource[1], 0,
1143 			       BMAC_REG_SIZE, "BigMAC Primary Regs");
1144 	if (!bp->bregs) {
1145 		printk(KERN_ERR "BIGMAC: Cannot map BigMAC primary registers.\n");
1146 		goto fail_and_cleanup;
1147 	}
1148 
1149 	/* Map in the BigMAC transceiver registers, this is how you poke at
1150 	 * the BigMAC's PHY.
1151 	 */
1152 	bp->tregs = of_ioremap(&op->resource[2], 0,
1153 			       TCVR_REG_SIZE, "BigMAC Transceiver Regs");
1154 	if (!bp->tregs) {
1155 		printk(KERN_ERR "BIGMAC: Cannot map BigMAC transceiver registers.\n");
1156 		goto fail_and_cleanup;
1157 	}
1158 
1159 	/* Stop the BigMAC. */
1160 	bigmac_stop(bp);
1161 
1162 	/* Allocate transmit/receive descriptor DVMA block. */
1163 	bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev,
1164 					    PAGE_SIZE,
1165 					    &bp->bblock_dvma, GFP_ATOMIC);
1166 	if (bp->bmac_block == NULL || bp->bblock_dvma == 0)
1167 		goto fail_and_cleanup;
1168 
1169 	/* Get the board revision of this BigMAC. */
1170 	bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node,
1171 					      "board-version", 1);
1172 
1173 	/* Init auto-negotiation timer state. */
1174 	timer_setup(&bp->bigmac_timer, bigmac_timer, 0);
1175 	bp->timer_state = asleep;
1176 	bp->timer_ticks = 0;
1177 
1178 	/* Backlink to generic net device struct. */
1179 	bp->dev = dev;
1180 
1181 	/* Set links to our BigMAC open and close routines. */
1182 	dev->ethtool_ops = &bigmac_ethtool_ops;
1183 	dev->netdev_ops = &bigmac_ops;
1184 	dev->watchdog_timeo = 5*HZ;
1185 
1186 	/* Finish net device registration. */
1187 	dev->irq = bp->bigmac_op->archdata.irqs[0];
1188 	dev->dma = 0;
1189 
1190 	if (register_netdev(dev)) {
1191 		printk(KERN_ERR "BIGMAC: Cannot register device.\n");
1192 		goto fail_and_cleanup;
1193 	}
1194 
1195 	dev_set_drvdata(&bp->bigmac_op->dev, bp);
1196 
1197 	printk(KERN_INFO "%s: BigMAC 100baseT Ethernet %pM\n",
1198 	       dev->name, dev->dev_addr);
1199 
1200 	return 0;
1201 
1202 fail_and_cleanup:
1203 	/* Something went wrong, undo whatever we did so far. */
1204 	/* Free register mappings if any. */
1205 	if (bp->gregs)
1206 		of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE);
1207 	if (bp->creg)
1208 		of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE);
1209 	if (bp->bregs)
1210 		of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE);
1211 	if (bp->tregs)
1212 		of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE);
1213 
1214 	if (bp->bmac_block)
1215 		dma_free_coherent(&bp->bigmac_op->dev,
1216 				  PAGE_SIZE,
1217 				  bp->bmac_block,
1218 				  bp->bblock_dvma);
1219 
1220 	/* This also frees the co-located private data */
1221 	free_netdev(dev);
1222 	return -ENODEV;
1223 }
1224 
1225 /* QEC can be the parent of either QuadEthernet or a BigMAC.  We want
1226  * the latter.
1227  */
1228 static int bigmac_sbus_probe(struct platform_device *op)
1229 {
1230 	struct device *parent = op->dev.parent;
1231 	struct platform_device *qec_op;
1232 
1233 	qec_op = to_platform_device(parent);
1234 
1235 	return bigmac_ether_init(op, qec_op);
1236 }
1237 
1238 static int bigmac_sbus_remove(struct platform_device *op)
1239 {
1240 	struct bigmac *bp = platform_get_drvdata(op);
1241 	struct device *parent = op->dev.parent;
1242 	struct net_device *net_dev = bp->dev;
1243 	struct platform_device *qec_op;
1244 
1245 	qec_op = to_platform_device(parent);
1246 
1247 	unregister_netdev(net_dev);
1248 
1249 	of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE);
1250 	of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE);
1251 	of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE);
1252 	of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE);
1253 	dma_free_coherent(&op->dev,
1254 			  PAGE_SIZE,
1255 			  bp->bmac_block,
1256 			  bp->bblock_dvma);
1257 
1258 	free_netdev(net_dev);
1259 
1260 	return 0;
1261 }
1262 
1263 static const struct of_device_id bigmac_sbus_match[] = {
1264 	{
1265 		.name = "be",
1266 	},
1267 	{},
1268 };
1269 
1270 MODULE_DEVICE_TABLE(of, bigmac_sbus_match);
1271 
1272 static struct platform_driver bigmac_sbus_driver = {
1273 	.driver = {
1274 		.name = "sunbmac",
1275 		.of_match_table = bigmac_sbus_match,
1276 	},
1277 	.probe		= bigmac_sbus_probe,
1278 	.remove		= bigmac_sbus_remove,
1279 };
1280 
1281 module_platform_driver(bigmac_sbus_driver);
1282