xref: /titanic_41/usr/src/grub/grub-0.97/netboot/tg3.c (revision 9b214d32697277d03ed2e5d98c4a7bfef16dcf4d)
1 /* $Id: tg3.c,v 1.5 2003/03/19 21:26:20 gbaum Exp $
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002 Jeff Garzik (jgarzik@mandrakesoft.com)
6  * Copyright (C) 2003 Eric Biederman (ebiederman@lnxi.com)  [etherboot port]
7  */
8 
9 /* 11-13-2003	timlegge	Fix Issue with NetGear GA302T
10  * 11-18-2003   ebiederm        Generalize NetGear Fix to what the code was supposed to be.
11  */
12 
13 #include "etherboot.h"
14 #include "nic.h"
15 #include "pci.h"
16 #include "timer.h"
17 /*#include "string.h"*/
18 #include "tg3.h"
19 
20 #define SUPPORT_COPPER_PHY  1
21 #define SUPPORT_FIBER_PHY   1
22 #define SUPPORT_LINK_REPORT 1
23 #define SUPPORT_PARTNO_STR  1
24 #define SUPPORT_PHY_STR     1
25 
26 struct tg3 tg3;
27 
28 /* Dummy defines for error handling */
29 #define EBUSY  1
30 #define ENODEV 2
31 #define EINVAL 3
32 #define ENOMEM 4
33 
34 
35 /* These numbers seem to be hard coded in the NIC firmware somehow.
36  * You can't change the ring sizes, but you can change where you place
37  * them in the NIC onboard memory.
38  */
39 #define TG3_RX_RING_SIZE		512
40 #define TG3_DEF_RX_RING_PENDING		20	/* RX_RING_PENDING seems to be o.k. at 20 and 200 */
41 #define TG3_RX_RCB_RING_SIZE	1024
42 
43 /*	(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ? \
44 	 512 : 1024) */
45  #define TG3_TX_RING_SIZE		512
46 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
47 
48 #define TG3_RX_RING_BYTES	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_RING_SIZE)
49 #define TG3_RX_RCB_RING_BYTES	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_RCB_RING_SIZE)
50 
51 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * TG3_TX_RING_SIZE)
52 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
53 #define PREV_TX(N)		(((N) - 1) & (TG3_TX_RING_SIZE - 1))
54 
55 #define RX_PKT_BUF_SZ		(1536 + 2 + 64)
56 
57 
58 static struct bss {
59 	struct tg3_rx_buffer_desc rx_std[TG3_RX_RING_SIZE];
60 	struct tg3_rx_buffer_desc rx_rcb[TG3_RX_RCB_RING_SIZE];
61 	struct tg3_tx_buffer_desc tx_ring[TG3_TX_RING_SIZE];
62 	struct tg3_hw_status      hw_status;
63 	struct tg3_hw_stats       hw_stats;
64 	unsigned char             rx_bufs[TG3_DEF_RX_RING_PENDING][RX_PKT_BUF_SZ];
65 } tg3_bss;
66 
67 /**
68  * pci_save_state - save the PCI configuration space of a device before suspending
69  * @dev: - PCI device that we're dealing with
70  * @buffer: - buffer to hold config space context
71  *
72  * @buffer must be large enough to hold the entire PCI 2.2 config space
73  * (>= 64 bytes).
74  */
75 static int pci_save_state(struct pci_device *dev, uint32_t *buffer)
76 {
77 	int i;
78 	for (i = 0; i < 16; i++)
79 		pci_read_config_dword(dev, i * 4,&buffer[i]);
80 	return 0;
81 }
82 
83 /**
84  * pci_restore_state - Restore the saved state of a PCI device
85  * @dev: - PCI device that we're dealing with
86  * @buffer: - saved PCI config space
87  *
88  */
89 static int pci_restore_state(struct pci_device *dev, uint32_t *buffer)
90 {
91 	int i;
92 
93 	for (i = 0; i < 16; i++)
94 		pci_write_config_dword(dev,i * 4, buffer[i]);
95 	return 0;
96 }
97 
98 static void tg3_write_indirect_reg32(uint32_t off, uint32_t val)
99 {
100 	pci_write_config_dword(tg3.pdev, TG3PCI_REG_BASE_ADDR, off);
101 	pci_write_config_dword(tg3.pdev, TG3PCI_REG_DATA, val);
102 }
103 
104 #define tw32(reg,val)		tg3_write_indirect_reg32((reg),(val))
105 #define tw32_mailbox(reg, val)	writel(((val) & 0xffffffff), tg3.regs + (reg))
106 #define tw16(reg,val)		writew(((val) & 0xffff), tg3.regs + (reg))
107 #define tw8(reg,val)		writeb(((val) & 0xff), tg3.regs + (reg))
108 #define tr32(reg)		readl(tg3.regs + (reg))
109 #define tr16(reg)		readw(tg3.regs + (reg))
110 #define tr8(reg)		readb(tg3.regs + (reg))
111 
112 static void tw32_carefully(uint32_t reg, uint32_t val)
113 {
114 	tw32(reg, val);
115 	tr32(reg);
116 	udelay(100);
117 }
118 
119 static void tw32_mailbox2(uint32_t reg, uint32_t val)
120 {
121 	tw32_mailbox(reg, val);
122 	tr32(reg);
123 }
124 
125 static void tg3_write_mem(uint32_t off, uint32_t val)
126 {
127 	pci_write_config_dword(tg3.pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
128 	pci_write_config_dword(tg3.pdev, TG3PCI_MEM_WIN_DATA, val);
129 
130 	/* Always leave this as zero. */
131 	pci_write_config_dword(tg3.pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
132 }
133 
134 static void tg3_read_mem(uint32_t off, uint32_t *val)
135 {
136 	pci_write_config_dword(tg3.pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
137 	pci_read_config_dword(tg3.pdev, TG3PCI_MEM_WIN_DATA, val);
138 
139 	/* Always leave this as zero. */
140 	pci_write_config_dword(tg3.pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
141 }
142 
143 static void tg3_disable_ints(struct tg3 *tp)
144 {
145 	tw32(TG3PCI_MISC_HOST_CTRL,
146 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
147 	tw32_mailbox2(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
148 }
149 
150 static void tg3_switch_clocks(struct tg3 *tp)
151 {
152 	uint32_t orig_clock_ctrl, clock_ctrl;
153 
154 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
155 
156 	orig_clock_ctrl = clock_ctrl;
157 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN | CLOCK_CTRL_CLKRUN_OENABLE | 0x1f);
158 	tp->pci_clock_ctrl = clock_ctrl;
159 
160 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) &&
161 		(orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE)!=0) {
162 		tw32_carefully(TG3PCI_CLOCK_CTRL,
163 			clock_ctrl | (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
164 		tw32_carefully(TG3PCI_CLOCK_CTRL,
165 			clock_ctrl | (CLOCK_CTRL_ALTCLK));
166 	}
167 	tw32_carefully(TG3PCI_CLOCK_CTRL, clock_ctrl);
168 }
169 
170 #define PHY_BUSY_LOOPS	5000
171 
172 static int tg3_readphy(struct tg3 *tp, int reg, uint32_t *val)
173 {
174 	uint32_t frame_val;
175 	int loops, ret;
176 
177 	tw32_carefully(MAC_MI_MODE, tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL);
178 
179 	*val = 0xffffffff;
180 
181 	frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
182 		      MI_COM_PHY_ADDR_MASK);
183 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
184 		      MI_COM_REG_ADDR_MASK);
185 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
186 
187 	tw32_carefully(MAC_MI_COM, frame_val);
188 
189 	loops = PHY_BUSY_LOOPS;
190 	while (loops-- > 0) {
191 		udelay(10);
192 		frame_val = tr32(MAC_MI_COM);
193 
194 		if ((frame_val & MI_COM_BUSY) == 0) {
195 			udelay(5);
196 			frame_val = tr32(MAC_MI_COM);
197 			break;
198 		}
199 	}
200 
201 	ret = -EBUSY;
202 	if (loops > 0) {
203 		*val = frame_val & MI_COM_DATA_MASK;
204 		ret = 0;
205 	}
206 
207 	tw32_carefully(MAC_MI_MODE, tp->mi_mode);
208 
209 	return ret;
210 }
211 
212 static int tg3_writephy(struct tg3 *tp, int reg, uint32_t val)
213 {
214 	uint32_t frame_val;
215 	int loops, ret;
216 
217 	tw32_carefully(MAC_MI_MODE, tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL);
218 
219 	frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
220 		      MI_COM_PHY_ADDR_MASK);
221 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
222 		      MI_COM_REG_ADDR_MASK);
223 	frame_val |= (val & MI_COM_DATA_MASK);
224 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
225 
226 	tw32_carefully(MAC_MI_COM, frame_val);
227 
228 	loops = PHY_BUSY_LOOPS;
229 	while (loops-- > 0) {
230 		udelay(10);
231 		frame_val = tr32(MAC_MI_COM);
232 		if ((frame_val & MI_COM_BUSY) == 0) {
233 			udelay(5);
234 			frame_val = tr32(MAC_MI_COM);
235 			break;
236 		}
237 	}
238 
239 	ret = -EBUSY;
240 	if (loops > 0)
241 		ret = 0;
242 
243 	tw32_carefully(MAC_MI_MODE, tp->mi_mode);
244 
245 	return ret;
246 }
247 
248 static int tg3_writedsp(struct tg3 *tp, uint16_t addr, uint16_t val)
249 {
250 	int err;
251 	err  = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, addr);
252 	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
253 	return err;
254 }
255 
256 
257 static void tg3_phy_set_wirespeed(struct tg3 *tp)
258 {
259 	uint32_t val;
260 
261 	if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
262 		return;
263 
264 	tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007);
265 	tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
266 	tg3_writephy(tp, MII_TG3_AUX_CTRL, (val | (1 << 15) | (1 << 4)));
267 }
268 
269 static int tg3_bmcr_reset(struct tg3 *tp)
270 {
271 	uint32_t phy_control;
272 	int limit, err;
273 
274 	/* OK, reset it, and poll the BMCR_RESET bit until it
275 	 * clears or we time out.
276 	 */
277 	phy_control = BMCR_RESET;
278 	err = tg3_writephy(tp, MII_BMCR, phy_control);
279 	if (err != 0)
280 		return -EBUSY;
281 
282 	limit = 5000;
283 	while (limit--) {
284 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
285 		if (err != 0)
286 			return -EBUSY;
287 
288 		if ((phy_control & BMCR_RESET) == 0) {
289 			udelay(40);
290 			break;
291 		}
292 		udelay(10);
293 	}
294 	if (limit <= 0)
295 		return -EBUSY;
296 
297 	return 0;
298 }
299 
300 static int tg3_wait_macro_done(struct tg3 *tp)
301 {
302 	int limit = 100;
303 
304 	while (limit--) {
305 		uint32_t tmp32;
306 
307 		tg3_readphy(tp, 0x16, &tmp32);
308 		if ((tmp32 & 0x1000) == 0)
309 			break;
310 	}
311 	if (limit <= 0)
312 		return -EBUSY;
313 
314 	return 0;
315 }
316 
317 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
318 {
319 	static const uint32_t test_pat[4][6] = {
320 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
321 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
322 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
323 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
324 	};
325 	int chan;
326 
327 	for (chan = 0; chan < 4; chan++) {
328 		int i;
329 
330 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
331 			(chan * 0x2000) | 0x0200);
332 		tg3_writephy(tp, 0x16, 0x0002);
333 
334 		for (i = 0; i < 6; i++)
335 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
336 				test_pat[chan][i]);
337 
338 		tg3_writephy(tp, 0x16, 0x0202);
339 		if (tg3_wait_macro_done(tp)) {
340 			*resetp = 1;
341 			return -EBUSY;
342 		}
343 
344 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
345 			     (chan * 0x2000) | 0x0200);
346 		tg3_writephy(tp, 0x16, 0x0082);
347 		if (tg3_wait_macro_done(tp)) {
348 			*resetp = 1;
349 			return -EBUSY;
350 		}
351 
352 		tg3_writephy(tp, 0x16, 0x0802);
353 		if (tg3_wait_macro_done(tp)) {
354 			*resetp = 1;
355 			return -EBUSY;
356 		}
357 
358 		for (i = 0; i < 6; i += 2) {
359 			uint32_t low, high;
360 
361 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low);
362 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high);
363 			if (tg3_wait_macro_done(tp)) {
364 				*resetp = 1;
365 				return -EBUSY;
366 			}
367 			low &= 0x7fff;
368 			high &= 0x000f;
369 			if (low != test_pat[chan][i] ||
370 			    high != test_pat[chan][i+1]) {
371 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
372 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
373 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
374 
375 				return -EBUSY;
376 			}
377 		}
378 	}
379 
380 	return 0;
381 }
382 
383 static int tg3_phy_reset_chanpat(struct tg3 *tp)
384 {
385 	int chan;
386 
387 	for (chan = 0; chan < 4; chan++) {
388 		int i;
389 
390 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
391 			     (chan * 0x2000) | 0x0200);
392 		tg3_writephy(tp, 0x16, 0x0002);
393 		for (i = 0; i < 6; i++)
394 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
395 		tg3_writephy(tp, 0x16, 0x0202);
396 		if (tg3_wait_macro_done(tp))
397 			return -EBUSY;
398 	}
399 
400 	return 0;
401 }
402 
403 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
404 {
405 	uint32_t reg32, phy9_orig;
406 	int retries, do_phy_reset, err;
407 
408 	retries = 10;
409 	do_phy_reset = 1;
410 	do {
411 		if (do_phy_reset) {
412 			err = tg3_bmcr_reset(tp);
413 			if (err)
414 				return err;
415 			do_phy_reset = 0;
416 		}
417 
418 		/* Disable transmitter and interrupt.  */
419 		tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
420 		reg32 |= 0x3000;
421 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
422 
423 		/* Set full-duplex, 1000 mbps.  */
424 		tg3_writephy(tp, MII_BMCR,
425 			BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
426 
427 		/* Set to master mode.  */
428 		tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig);
429 		tg3_writephy(tp, MII_TG3_CTRL,
430 			(MII_TG3_CTRL_AS_MASTER |
431 				MII_TG3_CTRL_ENABLE_AS_MASTER));
432 
433 		/* Enable SM_DSP_CLOCK and 6dB.  */
434 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
435 
436 		/* Block the PHY control access.  */
437 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
438 		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
439 
440 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
441 		if (!err)
442 			break;
443 	} while (--retries);
444 
445 	err = tg3_phy_reset_chanpat(tp);
446 	if (err)
447 		return err;
448 
449 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
450 	tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
451 
452 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
453 	tg3_writephy(tp, 0x16, 0x0000);
454 
455 	tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
456 
457 	tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
458 
459 	tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
460 	reg32 &= ~0x3000;
461 	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
462 
463 	return err;
464 }
465 
466 /* This will reset the tigon3 PHY if there is no valid
467  * link.
468  */
469 static int tg3_phy_reset(struct tg3 *tp)
470 {
471 	uint32_t phy_status;
472 	int err;
473 
474 	err  = tg3_readphy(tp, MII_BMSR, &phy_status);
475 	err |= tg3_readphy(tp, MII_BMSR, &phy_status);
476 	if (err != 0)
477 		return -EBUSY;
478 
479 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
480 		(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
481 		(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
482 		err = tg3_phy_reset_5703_4_5(tp);
483 		if (err)
484 			return err;
485 		goto out;
486 	}
487 	err = tg3_bmcr_reset(tp);
488 	if (err)
489 		return err;
490  out:
491 	tg3_phy_set_wirespeed(tp);
492 	return 0;
493 }
494 
495 static void tg3_set_power_state_0(struct tg3 *tp)
496 {
497 	uint16_t power_control;
498 	int pm = tp->pm_cap;
499 
500 	/* Make sure register accesses (indirect or otherwise)
501 	 * will function correctly.
502 	 */
503 	pci_write_config_dword(tp->pdev,  TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
504 
505 	pci_read_config_word(tp->pdev, pm + PCI_PM_CTRL, &power_control);
506 
507 	power_control |= PCI_PM_CTRL_PME_STATUS;
508 	power_control &= ~(PCI_PM_CTRL_STATE_MASK);
509 	power_control |= 0;
510 	pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
511 
512 	tw32_carefully(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
513 
514 	return;
515 }
516 
517 
518 #if SUPPORT_LINK_REPORT
519 static void tg3_link_report(struct tg3 *tp)
520 {
521 	if (!tp->carrier_ok) {
522 		printf("Link is down.\n");
523 	} else {
524 		printf("Link is up at %d Mbps, %s duplex. %s %s %s\n",
525 			(tp->link_config.active_speed == SPEED_1000 ?
526 			       1000 :
527 			(tp->link_config.active_speed == SPEED_100 ?
528 				100 : 10)),
529 			(tp->link_config.active_duplex == DUPLEX_FULL ?
530 				"full" : "half"),
531 			(tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "TX" : "",
532 			(tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "RX" : "",
533 			(tp->tg3_flags & (TG3_FLAG_TX_PAUSE |TG3_FLAG_RX_PAUSE)) ? "flow control" : "");
534 	}
535 }
536 #else
537 #define tg3_link_report(tp)
538 #endif
539 
540 static void tg3_setup_flow_control(struct tg3 *tp, uint32_t local_adv, uint32_t remote_adv)
541 {
542 	uint32_t new_tg3_flags = 0;
543 
544 	if (local_adv & ADVERTISE_PAUSE_CAP) {
545 		if (local_adv & ADVERTISE_PAUSE_ASYM) {
546 			if (remote_adv & LPA_PAUSE_CAP)
547 				new_tg3_flags |=
548 					(TG3_FLAG_RX_PAUSE |
549 					 TG3_FLAG_TX_PAUSE);
550 			else if (remote_adv & LPA_PAUSE_ASYM)
551 				new_tg3_flags |=
552 					(TG3_FLAG_RX_PAUSE);
553 		} else {
554 			if (remote_adv & LPA_PAUSE_CAP)
555 				new_tg3_flags |=
556 					(TG3_FLAG_RX_PAUSE |
557 					 TG3_FLAG_TX_PAUSE);
558 		}
559 	} else if (local_adv & ADVERTISE_PAUSE_ASYM) {
560 		if ((remote_adv & LPA_PAUSE_CAP) &&
561 		    (remote_adv & LPA_PAUSE_ASYM))
562 			new_tg3_flags |= TG3_FLAG_TX_PAUSE;
563 	}
564 
565 	tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
566 	tp->tg3_flags |= new_tg3_flags;
567 
568 	if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
569 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
570 	else
571 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
572 
573 	if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
574 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
575 	else
576 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
577 }
578 
579 #if SUPPORT_COPPER_PHY
580 static void tg3_aux_stat_to_speed_duplex(
581 	struct tg3 *tp __unused, uint32_t val, uint8_t *speed, uint8_t *duplex)
582 {
583 	static const uint8_t map[] = {
584 		[0] = (SPEED_INVALID << 2) | DUPLEX_INVALID,
585 		[MII_TG3_AUX_STAT_10HALF >> 8]   = (SPEED_10 << 2) | DUPLEX_HALF,
586 		[MII_TG3_AUX_STAT_10FULL >> 8]   = (SPEED_10 << 2) | DUPLEX_FULL,
587 		[MII_TG3_AUX_STAT_100HALF >> 8]  = (SPEED_100 << 2) | DUPLEX_HALF,
588 		[MII_TG3_AUX_STAT_100_4 >> 8] = (SPEED_INVALID << 2) | DUPLEX_INVALID,
589 		[MII_TG3_AUX_STAT_100FULL >> 8]  = (SPEED_100 << 2) | DUPLEX_FULL,
590 		[MII_TG3_AUX_STAT_1000HALF >> 8] = (SPEED_1000 << 2) | DUPLEX_HALF,
591 		[MII_TG3_AUX_STAT_1000FULL >> 8] = (SPEED_1000 << 2) | DUPLEX_FULL,
592 	};
593 	uint8_t result;
594 	result = map[(val & MII_TG3_AUX_STAT_SPDMASK) >> 8];
595 	*speed = result >> 2;
596 	*duplex = result & 3;
597 }
598 
599 static int tg3_phy_copper_begin(struct tg3 *tp)
600 {
601 	uint32_t new_adv;
602 
603 	tp->link_config.advertising =
604 		(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
605 			ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
606 			ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
607 			ADVERTISED_Autoneg | ADVERTISED_MII);
608 
609 	if (tp->tg3_flags & TG3_FLAG_10_100_ONLY) {
610 		tp->link_config.advertising &=
611 			~(ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
612 	}
613 
614 	new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
615 	if (tp->link_config.advertising & ADVERTISED_10baseT_Half) {
616 		new_adv |= ADVERTISE_10HALF;
617 	}
618 	if (tp->link_config.advertising & ADVERTISED_10baseT_Full) {
619 		new_adv |= ADVERTISE_10FULL;
620 	}
621 	if (tp->link_config.advertising & ADVERTISED_100baseT_Half) {
622 		new_adv |= ADVERTISE_100HALF;
623 	}
624 	if (tp->link_config.advertising & ADVERTISED_100baseT_Full) {
625 		new_adv |= ADVERTISE_100FULL;
626 	}
627 	tg3_writephy(tp, MII_ADVERTISE, new_adv);
628 
629 	if (tp->link_config.advertising &
630 		(ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
631 		new_adv = 0;
632 		if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) {
633 			new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
634 		}
635 		if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) {
636 			new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
637 		}
638 		if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
639 			(tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
640 				tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
641 			new_adv |= (MII_TG3_CTRL_AS_MASTER |
642 				MII_TG3_CTRL_ENABLE_AS_MASTER);
643 		}
644 		tg3_writephy(tp, MII_TG3_CTRL, new_adv);
645 	} else {
646 		tg3_writephy(tp, MII_TG3_CTRL, 0);
647 	}
648 
649 	tg3_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
650 
651 	return 0;
652 }
653 
654 static int tg3_init_5401phy_dsp(struct tg3 *tp)
655 {
656 	int err;
657 
658 	/* Turn off tap power management. */
659 	err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c20);
660 
661 	err |= tg3_writedsp(tp, 0x0012, 0x1804);
662 	err |= tg3_writedsp(tp, 0x0013, 0x1204);
663 	err |= tg3_writedsp(tp, 0x8006, 0x0132);
664 	err |= tg3_writedsp(tp, 0x8006, 0x0232);
665 	err |= tg3_writedsp(tp, 0x201f, 0x0a20);
666 
667 	udelay(40);
668 
669 	return err;
670 }
671 
672 static int tg3_setup_copper_phy(struct tg3 *tp)
673 {
674 	int current_link_up;
675 	uint32_t bmsr, dummy;
676 	int i, err;
677 
678 	tw32_carefully(MAC_STATUS,
679 		(MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED));
680 
681 	tp->mi_mode = MAC_MI_MODE_BASE;
682 	tw32_carefully(MAC_MI_MODE, tp->mi_mode);
683 
684 	tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
685 
686 	/* Some third-party PHYs need to be reset on link going
687 	 * down.
688 	 */
689 	if (	(	(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
690 			(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
691 			(tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)) &&
692 		(tp->carrier_ok)) {
693 		tg3_readphy(tp, MII_BMSR, &bmsr);
694 		tg3_readphy(tp, MII_BMSR, &bmsr);
695 		if (!(bmsr & BMSR_LSTATUS))
696 			tg3_phy_reset(tp);
697 	}
698 
699 	if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
700 		tg3_readphy(tp, MII_BMSR, &bmsr);
701 		tg3_readphy(tp, MII_BMSR, &bmsr);
702 
703 		if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
704 			bmsr = 0;
705 
706 		if (!(bmsr & BMSR_LSTATUS)) {
707 			err = tg3_init_5401phy_dsp(tp);
708 			if (err)
709 				return err;
710 
711 			tg3_readphy(tp, MII_BMSR, &bmsr);
712 			for (i = 0; i < 1000; i++) {
713 				udelay(10);
714 				tg3_readphy(tp, MII_BMSR, &bmsr);
715 				if (bmsr & BMSR_LSTATUS) {
716 					udelay(40);
717 					break;
718 				}
719 			}
720 
721 			if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
722 			    !(bmsr & BMSR_LSTATUS) &&
723 			    tp->link_config.active_speed == SPEED_1000) {
724 				err = tg3_phy_reset(tp);
725 				if (!err)
726 					err = tg3_init_5401phy_dsp(tp);
727 				if (err)
728 					return err;
729 			}
730 		}
731 	} else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
732 		   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
733 		/* 5701 {A0,B0} CRC bug workaround */
734 		tg3_writephy(tp, 0x15, 0x0a75);
735 		tg3_writephy(tp, 0x1c, 0x8c68);
736 		tg3_writephy(tp, 0x1c, 0x8d68);
737 		tg3_writephy(tp, 0x1c, 0x8c68);
738 	}
739 
740 	/* Clear pending interrupts... */
741 	tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
742 	tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
743 
744 	tg3_writephy(tp, MII_TG3_IMASK, ~0);
745 
746 	if (tp->led_mode == led_mode_three_link)
747 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
748 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
749 	else
750 		tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
751 
752 	current_link_up = 0;
753 
754 	tg3_readphy(tp, MII_BMSR, &bmsr);
755 	tg3_readphy(tp, MII_BMSR, &bmsr);
756 
757 	if (bmsr & BMSR_LSTATUS) {
758 		uint32_t aux_stat, bmcr;
759 
760 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
761 		for (i = 0; i < 2000; i++) {
762 			udelay(10);
763 			tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
764 			if (aux_stat)
765 				break;
766 		}
767 
768 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
769 			&tp->link_config.active_speed,
770 			&tp->link_config.active_duplex);
771 		tg3_readphy(tp, MII_BMCR, &bmcr);
772 		tg3_readphy(tp, MII_BMCR, &bmcr);
773 		if (bmcr & BMCR_ANENABLE) {
774 			uint32_t gig_ctrl;
775 
776 			current_link_up = 1;
777 
778 			/* Force autoneg restart if we are exiting
779 			 * low power mode.
780 			 */
781 			tg3_readphy(tp, MII_TG3_CTRL, &gig_ctrl);
782 			if (!(gig_ctrl & (MII_TG3_CTRL_ADV_1000_HALF |
783 				      MII_TG3_CTRL_ADV_1000_FULL))) {
784 				current_link_up = 0;
785 			}
786 		} else {
787 			current_link_up = 0;
788 		}
789 	}
790 
791 	if (current_link_up == 1 &&
792 		(tp->link_config.active_duplex == DUPLEX_FULL)) {
793 		uint32_t local_adv, remote_adv;
794 
795 		tg3_readphy(tp, MII_ADVERTISE, &local_adv);
796 		local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
797 
798 		tg3_readphy(tp, MII_LPA, &remote_adv);
799 		remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
800 
801 		/* If we are not advertising full pause capability,
802 		 * something is wrong.  Bring the link down and reconfigure.
803 		 */
804 		if (local_adv != ADVERTISE_PAUSE_CAP) {
805 			current_link_up = 0;
806 		} else {
807 			tg3_setup_flow_control(tp, local_adv, remote_adv);
808 		}
809 	}
810 
811 	if (current_link_up == 0) {
812 		uint32_t tmp;
813 
814 		tg3_phy_copper_begin(tp);
815 
816 		tg3_readphy(tp, MII_BMSR, &tmp);
817 		tg3_readphy(tp, MII_BMSR, &tmp);
818 		if (tmp & BMSR_LSTATUS)
819 			current_link_up = 1;
820 	}
821 
822 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
823 	if (current_link_up == 1) {
824 		if (tp->link_config.active_speed == SPEED_100 ||
825 		    tp->link_config.active_speed == SPEED_10)
826 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
827 		else
828 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
829 	} else
830 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
831 
832 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
833 	if (tp->link_config.active_duplex == DUPLEX_HALF)
834 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
835 
836 	tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
837 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
838 		if ((tp->led_mode == led_mode_link10) ||
839 		    (current_link_up == 1 &&
840 		     tp->link_config.active_speed == SPEED_10))
841 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
842 	} else {
843 		if (current_link_up == 1)
844 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
845 		tw32(MAC_LED_CTRL, LED_CTRL_PHY_MODE_1);
846 	}
847 
848 	/* ??? Without this setting Netgear GA302T PHY does not
849 	 * ??? send/receive packets...
850 	 * With this other PHYs cannot bring up the link
851 	 */
852 	if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
853 		tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
854 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
855 		tw32_carefully(MAC_MI_MODE, tp->mi_mode);
856 	}
857 
858 	tw32_carefully(MAC_MODE, tp->mac_mode);
859 
860 	/* Link change polled. */
861 	tw32_carefully(MAC_EVENT, 0);
862 
863 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
864 	    current_link_up == 1 &&
865 	    tp->link_config.active_speed == SPEED_1000 &&
866 	    ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
867 	     (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
868 		udelay(120);
869 		tw32_carefully(MAC_STATUS,
870 			(MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED));
871 		tg3_write_mem(
872 			      NIC_SRAM_FIRMWARE_MBOX,
873 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
874 	}
875 
876 	if (current_link_up != tp->carrier_ok) {
877 		tp->carrier_ok = current_link_up;
878 		tg3_link_report(tp);
879 	}
880 
881 	return 0;
882 }
883 #else
884 #define tg3_setup_copper_phy(TP) (-EINVAL)
885 #endif /* SUPPORT_COPPER_PHY */
886 
887 #if SUPPORT_FIBER_PHY
888 struct tg3_fiber_aneginfo {
889 	int state;
890 #define ANEG_STATE_UNKNOWN		0
891 #define ANEG_STATE_AN_ENABLE		1
892 #define ANEG_STATE_RESTART_INIT		2
893 #define ANEG_STATE_RESTART		3
894 #define ANEG_STATE_DISABLE_LINK_OK	4
895 #define ANEG_STATE_ABILITY_DETECT_INIT	5
896 #define ANEG_STATE_ABILITY_DETECT	6
897 #define ANEG_STATE_ACK_DETECT_INIT	7
898 #define ANEG_STATE_ACK_DETECT		8
899 #define ANEG_STATE_COMPLETE_ACK_INIT	9
900 #define ANEG_STATE_COMPLETE_ACK		10
901 #define ANEG_STATE_IDLE_DETECT_INIT	11
902 #define ANEG_STATE_IDLE_DETECT		12
903 #define ANEG_STATE_LINK_OK		13
904 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
905 #define ANEG_STATE_NEXT_PAGE_WAIT	15
906 
907 	uint32_t flags;
908 #define MR_AN_ENABLE		0x00000001
909 #define MR_RESTART_AN		0x00000002
910 #define MR_AN_COMPLETE		0x00000004
911 #define MR_PAGE_RX		0x00000008
912 #define MR_NP_LOADED		0x00000010
913 #define MR_TOGGLE_TX		0x00000020
914 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
915 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
916 #define MR_LP_ADV_SYM_PAUSE	0x00000100
917 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
918 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
919 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
920 #define MR_LP_ADV_NEXT_PAGE	0x00001000
921 #define MR_TOGGLE_RX		0x00002000
922 #define MR_NP_RX		0x00004000
923 
924 #define MR_LINK_OK		0x80000000
925 
926 	unsigned long link_time, cur_time;
927 
928 	uint32_t ability_match_cfg;
929 	int ability_match_count;
930 
931 	char ability_match, idle_match, ack_match;
932 
933 	uint32_t txconfig, rxconfig;
934 #define ANEG_CFG_NP		0x00000080
935 #define ANEG_CFG_ACK		0x00000040
936 #define ANEG_CFG_RF2		0x00000020
937 #define ANEG_CFG_RF1		0x00000010
938 #define ANEG_CFG_PS2		0x00000001
939 #define ANEG_CFG_PS1		0x00008000
940 #define ANEG_CFG_HD		0x00004000
941 #define ANEG_CFG_FD		0x00002000
942 #define ANEG_CFG_INVAL		0x00001f06
943 
944 };
945 #define ANEG_OK		0
946 #define ANEG_DONE	1
947 #define ANEG_TIMER_ENAB	2
948 #define ANEG_FAILED	-1
949 
950 #define ANEG_STATE_SETTLE_TIME	10000
951 
952 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
953 				   struct tg3_fiber_aneginfo *ap)
954 {
955 	unsigned long delta;
956 	uint32_t rx_cfg_reg;
957 	int ret;
958 
959 	if (ap->state == ANEG_STATE_UNKNOWN) {
960 		ap->rxconfig = 0;
961 		ap->link_time = 0;
962 		ap->cur_time = 0;
963 		ap->ability_match_cfg = 0;
964 		ap->ability_match_count = 0;
965 		ap->ability_match = 0;
966 		ap->idle_match = 0;
967 		ap->ack_match = 0;
968 	}
969 	ap->cur_time++;
970 
971 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
972 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
973 
974 		if (rx_cfg_reg != ap->ability_match_cfg) {
975 			ap->ability_match_cfg = rx_cfg_reg;
976 			ap->ability_match = 0;
977 			ap->ability_match_count = 0;
978 		} else {
979 			if (++ap->ability_match_count > 1) {
980 				ap->ability_match = 1;
981 				ap->ability_match_cfg = rx_cfg_reg;
982 			}
983 		}
984 		if (rx_cfg_reg & ANEG_CFG_ACK)
985 			ap->ack_match = 1;
986 		else
987 			ap->ack_match = 0;
988 
989 		ap->idle_match = 0;
990 	} else {
991 		ap->idle_match = 1;
992 		ap->ability_match_cfg = 0;
993 		ap->ability_match_count = 0;
994 		ap->ability_match = 0;
995 		ap->ack_match = 0;
996 
997 		rx_cfg_reg = 0;
998 	}
999 
1000 	ap->rxconfig = rx_cfg_reg;
1001 	ret = ANEG_OK;
1002 
1003 	switch(ap->state) {
1004 	case ANEG_STATE_UNKNOWN:
1005 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1006 			ap->state = ANEG_STATE_AN_ENABLE;
1007 
1008 		/* fallthru */
1009 	case ANEG_STATE_AN_ENABLE:
1010 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1011 		if (ap->flags & MR_AN_ENABLE) {
1012 			ap->link_time = 0;
1013 			ap->cur_time = 0;
1014 			ap->ability_match_cfg = 0;
1015 			ap->ability_match_count = 0;
1016 			ap->ability_match = 0;
1017 			ap->idle_match = 0;
1018 			ap->ack_match = 0;
1019 
1020 			ap->state = ANEG_STATE_RESTART_INIT;
1021 		} else {
1022 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
1023 		}
1024 		break;
1025 
1026 	case ANEG_STATE_RESTART_INIT:
1027 		ap->link_time = ap->cur_time;
1028 		ap->flags &= ~(MR_NP_LOADED);
1029 		ap->txconfig = 0;
1030 		tw32(MAC_TX_AUTO_NEG, 0);
1031 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1032 		tw32_carefully(MAC_MODE, tp->mac_mode);
1033 
1034 		ret = ANEG_TIMER_ENAB;
1035 		ap->state = ANEG_STATE_RESTART;
1036 
1037 		/* fallthru */
1038 	case ANEG_STATE_RESTART:
1039 		delta = ap->cur_time - ap->link_time;
1040 		if (delta > ANEG_STATE_SETTLE_TIME) {
1041 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1042 		} else {
1043 			ret = ANEG_TIMER_ENAB;
1044 		}
1045 		break;
1046 
1047 	case ANEG_STATE_DISABLE_LINK_OK:
1048 		ret = ANEG_DONE;
1049 		break;
1050 
1051 	case ANEG_STATE_ABILITY_DETECT_INIT:
1052 		ap->flags &= ~(MR_TOGGLE_TX);
1053 		ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1054 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1055 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1056 		tw32_carefully(MAC_MODE, tp->mac_mode);
1057 
1058 		ap->state = ANEG_STATE_ABILITY_DETECT;
1059 		break;
1060 
1061 	case ANEG_STATE_ABILITY_DETECT:
1062 		if (ap->ability_match != 0 && ap->rxconfig != 0) {
1063 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
1064 		}
1065 		break;
1066 
1067 	case ANEG_STATE_ACK_DETECT_INIT:
1068 		ap->txconfig |= ANEG_CFG_ACK;
1069 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1070 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1071 		tw32_carefully(MAC_MODE, tp->mac_mode);
1072 
1073 		ap->state = ANEG_STATE_ACK_DETECT;
1074 
1075 		/* fallthru */
1076 	case ANEG_STATE_ACK_DETECT:
1077 		if (ap->ack_match != 0) {
1078 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1079 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1080 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1081 			} else {
1082 				ap->state = ANEG_STATE_AN_ENABLE;
1083 			}
1084 		} else if (ap->ability_match != 0 &&
1085 			   ap->rxconfig == 0) {
1086 			ap->state = ANEG_STATE_AN_ENABLE;
1087 		}
1088 		break;
1089 
1090 	case ANEG_STATE_COMPLETE_ACK_INIT:
1091 		if (ap->rxconfig & ANEG_CFG_INVAL) {
1092 			ret = ANEG_FAILED;
1093 			break;
1094 		}
1095 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1096 			       MR_LP_ADV_HALF_DUPLEX |
1097 			       MR_LP_ADV_SYM_PAUSE |
1098 			       MR_LP_ADV_ASYM_PAUSE |
1099 			       MR_LP_ADV_REMOTE_FAULT1 |
1100 			       MR_LP_ADV_REMOTE_FAULT2 |
1101 			       MR_LP_ADV_NEXT_PAGE |
1102 			       MR_TOGGLE_RX |
1103 			       MR_NP_RX);
1104 		if (ap->rxconfig & ANEG_CFG_FD)
1105 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1106 		if (ap->rxconfig & ANEG_CFG_HD)
1107 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1108 		if (ap->rxconfig & ANEG_CFG_PS1)
1109 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
1110 		if (ap->rxconfig & ANEG_CFG_PS2)
1111 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1112 		if (ap->rxconfig & ANEG_CFG_RF1)
1113 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1114 		if (ap->rxconfig & ANEG_CFG_RF2)
1115 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1116 		if (ap->rxconfig & ANEG_CFG_NP)
1117 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
1118 
1119 		ap->link_time = ap->cur_time;
1120 
1121 		ap->flags ^= (MR_TOGGLE_TX);
1122 		if (ap->rxconfig & 0x0008)
1123 			ap->flags |= MR_TOGGLE_RX;
1124 		if (ap->rxconfig & ANEG_CFG_NP)
1125 			ap->flags |= MR_NP_RX;
1126 		ap->flags |= MR_PAGE_RX;
1127 
1128 		ap->state = ANEG_STATE_COMPLETE_ACK;
1129 		ret = ANEG_TIMER_ENAB;
1130 		break;
1131 
1132 	case ANEG_STATE_COMPLETE_ACK:
1133 		if (ap->ability_match != 0 &&
1134 		    ap->rxconfig == 0) {
1135 			ap->state = ANEG_STATE_AN_ENABLE;
1136 			break;
1137 		}
1138 		delta = ap->cur_time - ap->link_time;
1139 		if (delta > ANEG_STATE_SETTLE_TIME) {
1140 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
1141 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1142 			} else {
1143 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
1144 				    !(ap->flags & MR_NP_RX)) {
1145 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1146 				} else {
1147 					ret = ANEG_FAILED;
1148 				}
1149 			}
1150 		}
1151 		break;
1152 
1153 	case ANEG_STATE_IDLE_DETECT_INIT:
1154 		ap->link_time = ap->cur_time;
1155 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1156 		tw32_carefully(MAC_MODE, tp->mac_mode);
1157 
1158 		ap->state = ANEG_STATE_IDLE_DETECT;
1159 		ret = ANEG_TIMER_ENAB;
1160 		break;
1161 
1162 	case ANEG_STATE_IDLE_DETECT:
1163 		if (ap->ability_match != 0 &&
1164 		    ap->rxconfig == 0) {
1165 			ap->state = ANEG_STATE_AN_ENABLE;
1166 			break;
1167 		}
1168 		delta = ap->cur_time - ap->link_time;
1169 		if (delta > ANEG_STATE_SETTLE_TIME) {
1170 			/* XXX another gem from the Broadcom driver :( */
1171 			ap->state = ANEG_STATE_LINK_OK;
1172 		}
1173 		break;
1174 
1175 	case ANEG_STATE_LINK_OK:
1176 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
1177 		ret = ANEG_DONE;
1178 		break;
1179 
1180 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
1181 		/* ??? unimplemented */
1182 		break;
1183 
1184 	case ANEG_STATE_NEXT_PAGE_WAIT:
1185 		/* ??? unimplemented */
1186 		break;
1187 
1188 	default:
1189 		ret = ANEG_FAILED;
1190 		break;
1191 	};
1192 
1193 	return ret;
1194 }
1195 
1196 static int tg3_setup_fiber_phy(struct tg3 *tp)
1197 {
1198 	uint32_t orig_pause_cfg;
1199 	uint16_t orig_active_speed;
1200 	uint8_t orig_active_duplex;
1201 	int current_link_up;
1202 	int i;
1203 
1204 	orig_pause_cfg =
1205 		(tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
1206 				  TG3_FLAG_TX_PAUSE));
1207 	orig_active_speed = tp->link_config.active_speed;
1208 	orig_active_duplex = tp->link_config.active_duplex;
1209 
1210 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
1211 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
1212 	tw32_carefully(MAC_MODE, tp->mac_mode);
1213 
1214 	/* Reset when initting first time or we have a link. */
1215 	if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
1216 	    (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
1217 		/* Set PLL lock range. */
1218 		tg3_writephy(tp, 0x16, 0x8007);
1219 
1220 		/* SW reset */
1221 		tg3_writephy(tp, MII_BMCR, BMCR_RESET);
1222 
1223 		/* Wait for reset to complete. */
1224 		mdelay(5);
1225 
1226 		/* Config mode; select PMA/Ch 1 regs. */
1227 		tg3_writephy(tp, 0x10, 0x8411);
1228 
1229 		/* Enable auto-lock and comdet, select txclk for tx. */
1230 		tg3_writephy(tp, 0x11, 0x0a10);
1231 
1232 		tg3_writephy(tp, 0x18, 0x00a0);
1233 		tg3_writephy(tp, 0x16, 0x41ff);
1234 
1235 		/* Assert and deassert POR. */
1236 		tg3_writephy(tp, 0x13, 0x0400);
1237 		udelay(40);
1238 		tg3_writephy(tp, 0x13, 0x0000);
1239 
1240 		tg3_writephy(tp, 0x11, 0x0a50);
1241 		udelay(40);
1242 		tg3_writephy(tp, 0x11, 0x0a10);
1243 
1244 		/* Wait for signal to stabilize */
1245 		mdelay(150);
1246 
1247 		/* Deselect the channel register so we can read the PHYID
1248 		 * later.
1249 		 */
1250 		tg3_writephy(tp, 0x10, 0x8011);
1251 	}
1252 
1253 	/* Disable link change interrupt.  */
1254 	tw32_carefully(MAC_EVENT, 0);
1255 
1256 	current_link_up = 0;
1257 	if (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) {
1258 		if (!(tp->tg3_flags & TG3_FLAG_GOT_SERDES_FLOWCTL)) {
1259 			struct tg3_fiber_aneginfo aninfo;
1260 			int status = ANEG_FAILED;
1261 			unsigned int tick;
1262 			uint32_t tmp;
1263 
1264 			memset(&aninfo, 0, sizeof(aninfo));
1265 			aninfo.flags |= (MR_AN_ENABLE);
1266 
1267 			tw32(MAC_TX_AUTO_NEG, 0);
1268 
1269 			tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
1270 			tw32_carefully(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
1271 
1272 			tw32_carefully(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
1273 
1274 			aninfo.state = ANEG_STATE_UNKNOWN;
1275 			aninfo.cur_time = 0;
1276 			tick = 0;
1277 			while (++tick < 195000) {
1278 				status = tg3_fiber_aneg_smachine(tp, &aninfo);
1279 				if (status == ANEG_DONE ||
1280 				    status == ANEG_FAILED)
1281 					break;
1282 
1283 				udelay(1);
1284 			}
1285 
1286 			tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1287 			tw32_carefully(MAC_MODE, tp->mac_mode);
1288 
1289 			if (status == ANEG_DONE &&
1290 			    (aninfo.flags &
1291 			     (MR_AN_COMPLETE | MR_LINK_OK |
1292 			      MR_LP_ADV_FULL_DUPLEX))) {
1293 				uint32_t local_adv, remote_adv;
1294 
1295 				local_adv = ADVERTISE_PAUSE_CAP;
1296 				remote_adv = 0;
1297 				if (aninfo.flags & MR_LP_ADV_SYM_PAUSE)
1298 					remote_adv |= LPA_PAUSE_CAP;
1299 				if (aninfo.flags & MR_LP_ADV_ASYM_PAUSE)
1300 					remote_adv |= LPA_PAUSE_ASYM;
1301 
1302 				tg3_setup_flow_control(tp, local_adv, remote_adv);
1303 
1304 				tp->tg3_flags |=
1305 					TG3_FLAG_GOT_SERDES_FLOWCTL;
1306 				current_link_up = 1;
1307 			}
1308 			for (i = 0; i < 60; i++) {
1309 				udelay(20);
1310 				tw32_carefully(MAC_STATUS,
1311 					(MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED));
1312 				if ((tr32(MAC_STATUS) &
1313 				     (MAC_STATUS_SYNC_CHANGED |
1314 				      MAC_STATUS_CFG_CHANGED)) == 0)
1315 					break;
1316 			}
1317 			if (current_link_up == 0 &&
1318 			    (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
1319 				current_link_up = 1;
1320 			}
1321 		} else {
1322 			/* Forcing 1000FD link up. */
1323 			current_link_up = 1;
1324 		}
1325 	}
1326 
1327 	tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1328 	tw32_carefully(MAC_MODE, tp->mac_mode);
1329 
1330 	tp->hw_status->status =
1331 		(SD_STATUS_UPDATED |
1332 		 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
1333 
1334 	for (i = 0; i < 100; i++) {
1335 		udelay(20);
1336 		tw32_carefully(MAC_STATUS,
1337 			(MAC_STATUS_SYNC_CHANGED | MAC_STATUS_CFG_CHANGED));
1338 		if ((tr32(MAC_STATUS) &
1339 		     (MAC_STATUS_SYNC_CHANGED |
1340 		      MAC_STATUS_CFG_CHANGED)) == 0)
1341 			break;
1342 	}
1343 
1344 	if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0)
1345 		current_link_up = 0;
1346 
1347 	if (current_link_up == 1) {
1348 		tp->link_config.active_speed = SPEED_1000;
1349 		tp->link_config.active_duplex = DUPLEX_FULL;
1350 	} else {
1351 		tp->link_config.active_speed = SPEED_INVALID;
1352 		tp->link_config.active_duplex = DUPLEX_INVALID;
1353 	}
1354 
1355 	if (current_link_up != tp->carrier_ok) {
1356 		tp->carrier_ok = current_link_up;
1357 		tg3_link_report(tp);
1358 	} else {
1359 		uint32_t now_pause_cfg =
1360 			tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
1361 					 TG3_FLAG_TX_PAUSE);
1362 		if (orig_pause_cfg != now_pause_cfg ||
1363 		    orig_active_speed != tp->link_config.active_speed ||
1364 		    orig_active_duplex != tp->link_config.active_duplex)
1365 			tg3_link_report(tp);
1366 	}
1367 
1368 	if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0) {
1369 		tw32_carefully(MAC_MODE, tp->mac_mode | MAC_MODE_LINK_POLARITY);
1370 		if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
1371 			tw32_carefully(MAC_MODE, tp->mac_mode);
1372 		}
1373 	}
1374 
1375 	return 0;
1376 }
1377 #else
1378 #define tg3_setup_fiber_phy(TP) (-EINVAL)
1379 #endif /* SUPPORT_FIBER_PHY */
1380 
1381 static int tg3_setup_phy(struct tg3 *tp)
1382 {
1383 	int err;
1384 
1385 	if (tp->phy_id == PHY_ID_SERDES) {
1386 		err = tg3_setup_fiber_phy(tp);
1387 	} else {
1388 		err = tg3_setup_copper_phy(tp);
1389 	}
1390 
1391 	if (tp->link_config.active_speed == SPEED_1000 &&
1392 	    tp->link_config.active_duplex == DUPLEX_HALF)
1393 		tw32(MAC_TX_LENGTHS,
1394 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1395 		      (6 << TX_LENGTHS_IPG_SHIFT) |
1396 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1397 	else
1398 		tw32(MAC_TX_LENGTHS,
1399 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1400 		      (6 << TX_LENGTHS_IPG_SHIFT) |
1401 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1402 
1403 	return err;
1404 }
1405 
1406 
1407 #define MAX_WAIT_CNT 1000
1408 
1409 /* To stop a block, clear the enable bit and poll till it
1410  * clears.
1411  */
1412 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, uint32_t enable_bit)
1413 {
1414 	unsigned int i;
1415 	uint32_t val;
1416 
1417 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1418 		switch(ofs) {
1419 		case RCVLSC_MODE:
1420 		case DMAC_MODE:
1421 		case MBFREE_MODE:
1422 		case BUFMGR_MODE:
1423 		case MEMARB_MODE:
1424 			/* We can't enable/disable these bits of the
1425 			 * 5705, just say success.
1426 			 */
1427 			return 0;
1428 		default:
1429 			break;
1430 		}
1431 	}
1432 	val = tr32(ofs);
1433 	val &= ~enable_bit;
1434 	tw32(ofs, val);
1435 	tr32(ofs);
1436 
1437 	for (i = 0; i < MAX_WAIT_CNT; i++) {
1438 		udelay(100);
1439 		val = tr32(ofs);
1440 		if ((val & enable_bit) == 0)
1441 			break;
1442 	}
1443 
1444 	if (i == MAX_WAIT_CNT) {
1445 		printf("tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
1446 		       ofs, enable_bit);
1447 		return -ENODEV;
1448 	}
1449 
1450 	return 0;
1451 }
1452 
1453 static int tg3_abort_hw(struct tg3 *tp)
1454 {
1455 	int i, err;
1456 
1457 	tg3_disable_ints(tp);
1458 
1459 	tp->rx_mode &= ~RX_MODE_ENABLE;
1460 	tw32_carefully(MAC_RX_MODE, tp->rx_mode);
1461 
1462 	err  = tg3_stop_block(tp, RCVBDI_MODE,   RCVBDI_MODE_ENABLE);
1463 	err |= tg3_stop_block(tp, RCVLPC_MODE,   RCVLPC_MODE_ENABLE);
1464 	err |= tg3_stop_block(tp, RCVLSC_MODE,   RCVLSC_MODE_ENABLE);
1465 	err |= tg3_stop_block(tp, RCVDBDI_MODE,  RCVDBDI_MODE_ENABLE);
1466 	err |= tg3_stop_block(tp, RCVDCC_MODE,   RCVDCC_MODE_ENABLE);
1467 	err |= tg3_stop_block(tp, RCVCC_MODE,    RCVCC_MODE_ENABLE);
1468 
1469 	err |= tg3_stop_block(tp, SNDBDS_MODE,   SNDBDS_MODE_ENABLE);
1470 	err |= tg3_stop_block(tp, SNDBDI_MODE,   SNDBDI_MODE_ENABLE);
1471 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
1472 	err |= tg3_stop_block(tp, RDMAC_MODE,    RDMAC_MODE_ENABLE);
1473 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
1474 	err |= tg3_stop_block(tp, SNDBDC_MODE,   SNDBDC_MODE_ENABLE);
1475 	if (err)
1476 		goto out;
1477 
1478 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
1479 	tw32_carefully(MAC_MODE, tp->mac_mode);
1480 
1481 	tp->tx_mode &= ~TX_MODE_ENABLE;
1482 	tw32_carefully(MAC_TX_MODE, tp->tx_mode);
1483 
1484 	for (i = 0; i < MAX_WAIT_CNT; i++) {
1485 		udelay(100);
1486 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
1487 			break;
1488 	}
1489 	if (i >= MAX_WAIT_CNT) {
1490 		printf("tg3_abort_hw timed out TX_MODE_ENABLE will not clear MAC_TX_MODE=%x\n",
1491 			tr32(MAC_TX_MODE));
1492 		return -ENODEV;
1493 	}
1494 
1495 	err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
1496 	err |= tg3_stop_block(tp, WDMAC_MODE,  WDMAC_MODE_ENABLE);
1497 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
1498 
1499 	tw32(FTQ_RESET, 0xffffffff);
1500 	tw32(FTQ_RESET, 0x00000000);
1501 
1502 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
1503 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
1504 	if (err)
1505 		goto out;
1506 
1507 	memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
1508 
1509 out:
1510 	return err;
1511 }
1512 
1513 static void tg3_chip_reset(struct tg3 *tp)
1514 {
1515 	uint32_t val;
1516 
1517 	if (!(tp->tg3_flags2 & TG3_FLG2_SUN_5704)) {
1518 		/* Force NVRAM to settle.
1519 		 * This deals with a chip bug which can result in EEPROM
1520 		 * corruption.
1521 		 */
1522 		if (tp->tg3_flags & TG3_FLAG_NVRAM) {
1523 			int i;
1524 
1525 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
1526 			for (i = 0; i < 100000; i++) {
1527 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
1528 					break;
1529 				udelay(10);
1530 			}
1531 		}
1532 	}
1533 	/* In Etherboot we don't need to worry about the 5701
1534 	 * REG_WRITE_BUG because we do all register writes indirectly.
1535 	 */
1536 
1537 	/* do the reset */
1538 	val = GRC_MISC_CFG_CORECLK_RESET;
1539 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
1540 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
1541 	tw32(GRC_MISC_CFG, val);
1542 
1543 	/* Flush PCI posted writes.  The normal MMIO registers
1544 	 * are inaccessible at this time so this is the only
1545 	 * way to make this reliably.  I tried to use indirect
1546 	 * register read/write but this upset some 5701 variants.
1547 	 */
1548 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
1549 
1550 	udelay(120);
1551 
1552 	/* Re-enable indirect register accesses. */
1553 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
1554 			       tp->misc_host_ctrl);
1555 
1556 	/* Set MAX PCI retry to zero. */
1557 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
1558 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
1559 	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
1560 		val |= PCISTATE_RETRY_SAME_DMA;
1561 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
1562 
1563 	pci_restore_state(tp->pdev, tp->pci_cfg_state);
1564 
1565 	/* Make sure PCI-X relaxed ordering bit is clear. */
1566 	pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
1567 	val &= ~PCIX_CAPS_RELAXED_ORDERING;
1568 	pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
1569 
1570 	tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
1571 
1572 	if (((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0) &&
1573 		(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
1574 		tp->pci_clock_ctrl |=
1575 			(CLOCK_CTRL_FORCE_CLKRUN | CLOCK_CTRL_CLKRUN_OENABLE);
1576 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
1577 	}
1578 
1579 	tw32(TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
1580 }
1581 
1582 static void tg3_stop_fw(struct tg3 *tp)
1583 {
1584 	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
1585 		uint32_t val;
1586 		int i;
1587 
1588 		tg3_write_mem(NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1589 		val = tr32(GRC_RX_CPU_EVENT);
1590 		val |= (1 << 14);
1591 		tw32(GRC_RX_CPU_EVENT, val);
1592 
1593 		/* Wait for RX cpu to ACK the event.  */
1594 		for (i = 0; i < 100; i++) {
1595 			if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
1596 				break;
1597 			udelay(1);
1598 		}
1599 	}
1600 }
1601 
1602 static int tg3_restart_fw(struct tg3 *tp, uint32_t state)
1603 {
1604 	uint32_t val;
1605 	int i;
1606 
1607 	tg3_write_mem(NIC_SRAM_FIRMWARE_MBOX,
1608 		NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1609 	/* Wait for firmware initialization to complete. */
1610 	for (i = 0; i < 100000; i++) {
1611 		tg3_read_mem(NIC_SRAM_FIRMWARE_MBOX, &val);
1612 		if (val == (uint32_t) ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1613 			break;
1614 		udelay(10);
1615 	}
1616 	if (i >= 100000 &&
1617 		    !(tp->tg3_flags2 & TG3_FLG2_SUN_5704)) {
1618 		printf("Firmware will not restart magic=%x\n",
1619 			val);
1620 		return -ENODEV;
1621 	}
1622 	if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1623 		state = DRV_STATE_SUSPEND;
1624 	}
1625 	tg3_write_mem(NIC_SRAM_FW_DRV_STATE_MBOX, state);
1626 	return 0;
1627 }
1628 
1629 static int tg3_halt(struct tg3 *tp)
1630 {
1631 	tg3_stop_fw(tp);
1632 	tg3_abort_hw(tp);
1633 	tg3_chip_reset(tp);
1634 	return tg3_restart_fw(tp, DRV_STATE_UNLOAD);
1635 }
1636 
1637 static void __tg3_set_mac_addr(struct tg3 *tp)
1638 {
1639 	uint32_t addr_high, addr_low;
1640 	int i;
1641 
1642 	addr_high = ((tp->nic->node_addr[0] << 8) |
1643 		     tp->nic->node_addr[1]);
1644 	addr_low = ((tp->nic->node_addr[2] << 24) |
1645 		    (tp->nic->node_addr[3] << 16) |
1646 		    (tp->nic->node_addr[4] <<  8) |
1647 		    (tp->nic->node_addr[5] <<  0));
1648 	for (i = 0; i < 4; i++) {
1649 		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
1650 		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
1651 	}
1652 
1653 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
1654 		(GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
1655 		(GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)) {
1656 		for(i = 0; i < 12; i++) {
1657 			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
1658 			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
1659 		}
1660 	}
1661 	addr_high = (tp->nic->node_addr[0] +
1662 		     tp->nic->node_addr[1] +
1663 		     tp->nic->node_addr[2] +
1664 		     tp->nic->node_addr[3] +
1665 		     tp->nic->node_addr[4] +
1666 		     tp->nic->node_addr[5]) &
1667 		TX_BACKOFF_SEED_MASK;
1668 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
1669 }
1670 
1671 static void tg3_set_bdinfo(struct tg3 *tp, uint32_t bdinfo_addr,
1672 			   dma_addr_t mapping, uint32_t maxlen_flags,
1673 			   uint32_t nic_addr)
1674 {
1675 	tg3_write_mem((bdinfo_addr +
1676 		       TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
1677 		      ((uint64_t) mapping >> 32));
1678 	tg3_write_mem((bdinfo_addr +
1679 		       TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
1680 		      ((uint64_t) mapping & 0xffffffff));
1681 	tg3_write_mem((bdinfo_addr +
1682 		       TG3_BDINFO_MAXLEN_FLAGS),
1683 		       maxlen_flags);
1684 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
1685 		tg3_write_mem((bdinfo_addr + TG3_BDINFO_NIC_ADDR), nic_addr);
1686 	}
1687 }
1688 
1689 
1690 static void tg3_init_rings(struct tg3 *tp)
1691 {
1692 	unsigned i;
1693 
1694 	/* Zero out the tg3 variables */
1695 	memset(&tg3_bss, 0, sizeof(tg3_bss));
1696 	tp->rx_std    = &tg3_bss.rx_std[0];
1697 	tp->rx_rcb    = &tg3_bss.rx_rcb[0];
1698 	tp->tx_ring   = &tg3_bss.tx_ring[0];
1699 	tp->hw_status = &tg3_bss.hw_status;
1700 	tp->hw_stats  = &tg3_bss.hw_stats;
1701 	tp->mac_mode  = 0;
1702 
1703 
1704 	/* Initialize tx/rx rings for packet processing.
1705 	 *
1706 	 * The chip has been shut down and the driver detached from
1707 	 * the networking, so no interrupts or new tx packets will
1708 	 * end up in the driver.
1709 	 */
1710 
1711 	/* Initialize invariants of the rings, we only set this
1712 	 * stuff once.  This works because the card does not
1713 	 * write into the rx buffer posting rings.
1714 	 */
1715 	for (i = 0; i < TG3_RX_RING_SIZE; i++) {
1716 		struct tg3_rx_buffer_desc *rxd;
1717 
1718 		rxd = &tp->rx_std[i];
1719 		rxd->idx_len = (RX_PKT_BUF_SZ - 2 - 64)	<< RXD_LEN_SHIFT;
1720 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
1721 		rxd->opaque = (RXD_OPAQUE_RING_STD | (i << RXD_OPAQUE_INDEX_SHIFT));
1722 
1723 		/* Note where the receive buffer for the ring is placed */
1724 		rxd->addr_hi = 0;
1725 		rxd->addr_lo = virt_to_bus(
1726 			&tg3_bss.rx_bufs[i%TG3_DEF_RX_RING_PENDING][2]);
1727 	}
1728 }
1729 
1730 #define TG3_WRITE_SETTINGS(TABLE) \
1731 do { \
1732 	const uint32_t *_table, *_end; \
1733 	_table = TABLE; \
1734 	_end = _table + sizeof(TABLE)/sizeof(TABLE[0]);  \
1735 	for(; _table < _end; _table += 2) { \
1736 		tw32(_table[0], _table[1]); \
1737 	} \
1738 } while(0)
1739 
1740 
1741 /* initialize/reset the tg3 */
1742 static int tg3_setup_hw(struct tg3 *tp)
1743 {
1744 	uint32_t val, rdmac_mode;
1745 	int i, err, limit;
1746 
1747 	/* Simply don't support setups with extremly buggy firmware in etherboot */
1748 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
1749 		printf("Error 5701_A0 firmware bug detected\n");
1750 		return -EINVAL;
1751 	}
1752 
1753 	tg3_disable_ints(tp);
1754 
1755 	/* Originally this was all in tg3_init_hw */
1756 
1757 	/* Force the chip into D0. */
1758 	tg3_set_power_state_0(tp);
1759 
1760 	tg3_switch_clocks(tp);
1761 
1762 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
1763 
1764 
1765 	/* Originally this was all in tg3_reset_hw */
1766 
1767 	tg3_stop_fw(tp);
1768 
1769 	/* No need to call tg3_abort_hw here, it is called before tg3_setup_hw. */
1770 
1771 	tg3_chip_reset(tp);
1772 
1773 	tw32(GRC_MODE, tp->grc_mode);  /* Redundant? */
1774 
1775 	err = tg3_restart_fw(tp, DRV_STATE_START);
1776 	if (err)
1777 		return err;
1778 
1779 	if (tp->phy_id == PHY_ID_SERDES) {
1780 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
1781 	}
1782 	tw32_carefully(MAC_MODE, tp->mac_mode);
1783 
1784 
1785 	/* This works around an issue with Athlon chipsets on
1786 	 * B3 tigon3 silicon.  This bit has no effect on any
1787 	 * other revision.
1788 	 */
1789 	tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
1790 	tw32_carefully(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
1791 
1792 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
1793 	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
1794 		val = tr32(TG3PCI_PCISTATE);
1795 		val |= PCISTATE_RETRY_SAME_DMA;
1796 		tw32(TG3PCI_PCISTATE, val);
1797 	}
1798 
1799 	/* Descriptor ring init may make accesses to the
1800 	 * NIC SRAM area to setup the TX descriptors, so we
1801 	 * can only do this after the hardware has been
1802 	 * successfully reset.
1803 	 */
1804 	tg3_init_rings(tp);
1805 
1806 	/* Clear statistics/status block in chip */
1807 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
1808 		for (i = NIC_SRAM_STATS_BLK;
1809 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
1810 		     i += sizeof(uint32_t)) {
1811 			tg3_write_mem(i, 0);
1812 			udelay(40);
1813 		}
1814 	}
1815 
1816 	/* This value is determined during the probe time DMA
1817 	 * engine test, tg3_setup_dma.
1818 	 */
1819 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
1820 
1821 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
1822 			  GRC_MODE_4X_NIC_SEND_RINGS |
1823 			  GRC_MODE_NO_TX_PHDR_CSUM |
1824 			  GRC_MODE_NO_RX_PHDR_CSUM);
1825 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
1826 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1827 	tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
1828 
1829 	tw32(GRC_MODE,
1830 		tp->grc_mode |
1831 		(GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
1832 
1833 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
1834 	tw32(GRC_MISC_CFG,
1835 	     (65 << GRC_MISC_CFG_PRESCALAR_SHIFT));
1836 
1837 	/* Initialize MBUF/DESC pool. */
1838 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
1839 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
1840 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
1841 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
1842 		else
1843 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
1844 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
1845 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
1846 	}
1847 	if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
1848 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
1849 		     tp->bufmgr_config.mbuf_read_dma_low_water);
1850 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
1851 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
1852 		tw32(BUFMGR_MB_HIGH_WATER,
1853 		     tp->bufmgr_config.mbuf_high_water);
1854 	} else {
1855 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
1856 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
1857 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
1858 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
1859 		tw32(BUFMGR_MB_HIGH_WATER,
1860 		     tp->bufmgr_config.mbuf_high_water_jumbo);
1861 	}
1862 	tw32(BUFMGR_DMA_LOW_WATER,
1863 	     tp->bufmgr_config.dma_low_water);
1864 	tw32(BUFMGR_DMA_HIGH_WATER,
1865 	     tp->bufmgr_config.dma_high_water);
1866 
1867 	tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
1868 	for (i = 0; i < 2000; i++) {
1869 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
1870 			break;
1871 		udelay(10);
1872 	}
1873 	if (i >= 2000) {
1874 		printf("tg3_setup_hw cannot enable BUFMGR\n");
1875 		return -ENODEV;
1876 	}
1877 
1878 	tw32(FTQ_RESET, 0xffffffff);
1879 	tw32(FTQ_RESET, 0x00000000);
1880 	for (i = 0; i < 2000; i++) {
1881 		if (tr32(FTQ_RESET) == 0x00000000)
1882 			break;
1883 		udelay(10);
1884 	}
1885 	if (i >= 2000) {
1886 		printf("tg3_setup_hw cannot reset FTQ\n");
1887 		return -ENODEV;
1888 	}
1889 
1890 	/* Initialize TG3_BDINFO's at:
1891 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
1892 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
1893 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
1894 	 *
1895 	 * like so:
1896 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
1897 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
1898 	 *                              ring attribute flags
1899 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
1900 	 *
1901 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
1902 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
1903 	 *
1904 	 * ??? No space allocated for mini receive ring? :(
1905 	 *
1906 	 * The size of each ring is fixed in the firmware, but the location is
1907 	 * configurable.
1908 	 */
1909 	{
1910 		static const uint32_t table_all[] = {
1911 			/* Setup replenish thresholds. */
1912 			RCVBDI_STD_THRESH, TG3_DEF_RX_RING_PENDING / 8,
1913 
1914 			/* Etherboot lives below 4GB */
1915 			RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH, 0,
1916 			RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, NIC_SRAM_RX_BUFFER_DESC,
1917 		};
1918 		static const uint32_t table_not_5705[] = {
1919 			/* Buffer maximum length */
1920 			RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT,
1921 
1922 			/* Disable the mini frame rx ring */
1923 			RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,	BDINFO_FLAGS_DISABLED,
1924 
1925 			/* Disable the jumbo frame rx ring */
1926 			RCVBDI_JUMBO_THRESH, 0,
1927 			RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED,
1928 
1929 
1930 		};
1931 		TG3_WRITE_SETTINGS(table_all);
1932 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
1933 			virt_to_bus(tp->rx_std));
1934 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1935 			tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
1936 				RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
1937 		} else {
1938 			TG3_WRITE_SETTINGS(table_not_5705);
1939 		}
1940 	}
1941 
1942 
1943 	/* There is only one send ring on 5705, no need to explicitly
1944 	 * disable the others.
1945 	 */
1946 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
1947 		/* Clear out send RCB ring in SRAM. */
1948 		for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
1949 			tg3_write_mem(i + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED);
1950 	}
1951 
1952 	tp->tx_prod = 0;
1953 	tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
1954 	tw32_mailbox2(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
1955 
1956 	tg3_set_bdinfo(tp,
1957 		NIC_SRAM_SEND_RCB,
1958 		virt_to_bus(tp->tx_ring),
1959 		(TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
1960 		NIC_SRAM_TX_BUFFER_DESC);
1961 
1962 	/* There is only one receive return ring on 5705, no need to explicitly
1963 	 * disable the others.
1964 	 */
1965 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
1966 		for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK; i += TG3_BDINFO_SIZE) {
1967 			tg3_write_mem(i + TG3_BDINFO_MAXLEN_FLAGS,
1968 				BDINFO_FLAGS_DISABLED);
1969 		}
1970 	}
1971 
1972 	tp->rx_rcb_ptr = 0;
1973 	tw32_mailbox2(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
1974 
1975 	tg3_set_bdinfo(tp,
1976 		NIC_SRAM_RCV_RET_RCB,
1977 		virt_to_bus(tp->rx_rcb),
1978 		(TG3_RX_RCB_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
1979 		0);
1980 
1981 	tp->rx_std_ptr = TG3_DEF_RX_RING_PENDING;
1982 	tw32_mailbox2(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
1983 		     tp->rx_std_ptr);
1984 
1985 	tw32_mailbox2(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 0);
1986 
1987 	/* Initialize MAC address and backoff seed. */
1988 	__tg3_set_mac_addr(tp);
1989 
1990 	/* Calculate RDMAC_MODE setting early, we need it to determine
1991 	 * the RCVLPC_STATE_ENABLE mask.
1992 	 */
1993 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
1994 		RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
1995 		RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
1996 		RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
1997 		RDMAC_MODE_LNGREAD_ENAB);
1998 	if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
1999 		rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
2000 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2001 		if (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
2002 			if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
2003 				!(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
2004 				rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
2005 			}
2006 		}
2007 	}
2008 
2009 	/* Setup host coalescing engine. */
2010 	tw32(HOSTCC_MODE, 0);
2011 	for (i = 0; i < 2000; i++) {
2012 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
2013 			break;
2014 		udelay(10);
2015 	}
2016 
2017 	tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
2018 		MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
2019 	tw32_carefully(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
2020 
2021 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
2022 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
2023 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
2024 				       GRC_LCLCTRL_GPIO_OUTPUT1);
2025 	tw32_carefully(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
2026 
2027 	tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
2028 	tr32(MAILBOX_INTERRUPT_0);
2029 
2030 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
2031 		tw32_carefully(DMAC_MODE, DMAC_MODE_ENABLE);
2032 	}
2033 
2034 	val = (	WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
2035 		WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
2036 		WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
2037 		WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
2038 		WDMAC_MODE_LNGREAD_ENAB);
2039 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) &&
2040 		((tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) != 0) &&
2041 		!(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
2042 		val |= WDMAC_MODE_RX_ACCEL;
2043 	}
2044 	tw32_carefully(WDMAC_MODE, val);
2045 
2046 	if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
2047 		val = tr32(TG3PCI_X_CAPS);
2048 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
2049 			val &= PCIX_CAPS_BURST_MASK;
2050 			val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
2051 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2052 			val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
2053 			val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
2054 			if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
2055 				val |= (tp->split_mode_max_reqs <<
2056 					PCIX_CAPS_SPLIT_SHIFT);
2057 		}
2058 		tw32(TG3PCI_X_CAPS, val);
2059 	}
2060 
2061 	tw32_carefully(RDMAC_MODE, rdmac_mode);
2062 	{
2063 		static const uint32_t table_all[] = {
2064 			/* MTU + ethernet header + FCS + optional VLAN tag */
2065 			MAC_RX_MTU_SIZE, ETH_MAX_MTU + ETH_HLEN + 8,
2066 
2067 			/* The slot time is changed by tg3_setup_phy if we
2068 			 * run at gigabit with half duplex.
2069 			 */
2070 			MAC_TX_LENGTHS,
2071 			(2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2072 			(6 << TX_LENGTHS_IPG_SHIFT) |
2073 			(32 << TX_LENGTHS_SLOT_TIME_SHIFT),
2074 
2075 			/* Receive rules. */
2076 			MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS,
2077 			RCVLPC_CONFIG, 0x0181,
2078 
2079 			/* Receive/send statistics. */
2080 			RCVLPC_STATS_ENABLE, 0xffffff,
2081 			RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE,
2082 			SNDDATAI_STATSENAB, 0xffffff,
2083 			SNDDATAI_STATSCTRL, (SNDDATAI_SCTRL_ENABLE |SNDDATAI_SCTRL_FASTUPD),
2084 
2085 			/* Host coalescing engine */
2086 			HOSTCC_RXCOL_TICKS, 0,
2087 			HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS,
2088 			HOSTCC_RXMAX_FRAMES, 1,
2089 			HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES,
2090 			HOSTCC_RXCOAL_MAXF_INT, 1,
2091 			HOSTCC_TXCOAL_MAXF_INT, 0,
2092 
2093 			/* Status/statistics block address. */
2094 			/* Etherboot lives below 4GB, so HIGH == 0 */
2095 			HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 0,
2096 
2097 			/* No need to enable 32byte coalesce mode. */
2098 			HOSTCC_MODE, HOSTCC_MODE_ENABLE | 0,
2099 
2100 			RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE,
2101 			RCVLPC_MODE, RCVLPC_MODE_ENABLE,
2102 
2103 			RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE,
2104 
2105 			SNDDATAC_MODE, SNDDATAC_MODE_ENABLE,
2106 			SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE,
2107 			RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB,
2108 			RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ,
2109 			SNDDATAI_MODE, SNDDATAI_MODE_ENABLE,
2110 			SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE,
2111 			SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE,
2112 
2113 			/* Accept all multicast frames. */
2114 			MAC_HASH_REG_0, 0xffffffff,
2115 			MAC_HASH_REG_1, 0xffffffff,
2116 			MAC_HASH_REG_2, 0xffffffff,
2117 			MAC_HASH_REG_3, 0xffffffff,
2118 		};
2119 		static const uint32_t table_not_5705[] = {
2120 			/* Host coalescing engine */
2121 			HOSTCC_RXCOAL_TICK_INT, 0,
2122 			HOSTCC_TXCOAL_TICK_INT, 0,
2123 
2124 			/* Status/statistics block address. */
2125 			/* Etherboot lives below 4GB, so HIGH == 0 */
2126 			HOSTCC_STAT_COAL_TICKS, DEFAULT_STAT_COAL_TICKS,
2127 			HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 0,
2128 			HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK,
2129 			HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK,
2130 
2131 			RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE,
2132 
2133 			MBFREE_MODE, MBFREE_MODE_ENABLE,
2134 		};
2135 		TG3_WRITE_SETTINGS(table_all);
2136 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
2137 			virt_to_bus(tp->hw_stats));
2138 		tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
2139 			virt_to_bus(tp->hw_status));
2140 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
2141 			TG3_WRITE_SETTINGS(table_not_5705);
2142 		}
2143 	}
2144 
2145 	tp->tx_mode = TX_MODE_ENABLE;
2146 	tw32_carefully(MAC_TX_MODE, tp->tx_mode);
2147 
2148 	tp->rx_mode = RX_MODE_ENABLE;
2149 	tw32_carefully(MAC_RX_MODE, tp->rx_mode);
2150 
2151 	tp->mi_mode = MAC_MI_MODE_BASE;
2152 	tw32_carefully(MAC_MI_MODE, tp->mi_mode);
2153 
2154 	tw32(MAC_LED_CTRL, 0);
2155 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2156 	if (tp->phy_id == PHY_ID_SERDES) {
2157 		tw32_carefully(MAC_RX_MODE, RX_MODE_RESET);
2158 	}
2159 	tp->rx_mode |= RX_MODE_KEEP_VLAN_TAG; /* drop tagged vlan packets */
2160 	tw32_carefully(MAC_RX_MODE, tp->rx_mode);
2161 
2162 	if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
2163 		tw32(MAC_SERDES_CFG, 0x616000);
2164 
2165 	/* Prevent chip from dropping frames when flow control
2166 	 * is enabled.
2167 	 */
2168 	tw32(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
2169 	tr32(MAC_LOW_WMARK_MAX_RX_FRAME);
2170 
2171 	err = tg3_setup_phy(tp);
2172 
2173 	/* Ignore CRC stats */
2174 
2175 	/* Initialize receive rules. */
2176 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
2177 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
2178 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
2179 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
2180 
2181 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
2182 		limit = 8;
2183 	else
2184 		limit = 16;
2185 	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
2186 		limit -= 4;
2187 	switch (limit) {
2188 	case 16:	tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
2189 	case 15:	tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
2190 	case 14:	tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
2191 	case 13:	tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
2192 	case 12:	tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
2193 	case 11:	tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
2194 	case 10:	tw32(MAC_RCV_RULE_9,  0);  tw32(MAC_RCV_VALUE_9,  0);
2195 	case 9:		tw32(MAC_RCV_RULE_8,  0);  tw32(MAC_RCV_VALUE_8,  0);
2196 	case 8:		tw32(MAC_RCV_RULE_7,  0);  tw32(MAC_RCV_VALUE_7,  0);
2197 	case 7:		tw32(MAC_RCV_RULE_6,  0);  tw32(MAC_RCV_VALUE_6,  0);
2198 	case 6:		tw32(MAC_RCV_RULE_5,  0);  tw32(MAC_RCV_VALUE_5,  0);
2199 	case 5:		tw32(MAC_RCV_RULE_4,  0);  tw32(MAC_RCV_VALUE_4,  0);
2200 	case 4:		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
2201 	case 3:		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
2202 	case 2:
2203 	case 1:
2204 	default:
2205 		break;
2206 	};
2207 
2208 	return err;
2209 }
2210 
2211 
2212 
2213 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
2214 static void tg3_nvram_init(struct tg3 *tp)
2215 {
2216 	tw32(GRC_EEPROM_ADDR,
2217 	     (EEPROM_ADDR_FSM_RESET |
2218 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
2219 	       EEPROM_ADDR_CLKPERD_SHIFT)));
2220 
2221 	mdelay(1);
2222 
2223 	/* Enable seeprom accesses. */
2224 	tw32_carefully(GRC_LOCAL_CTRL,
2225 		tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
2226 
2227 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2228 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2229 		uint32_t nvcfg1 = tr32(NVRAM_CFG1);
2230 
2231 		tp->tg3_flags |= TG3_FLAG_NVRAM;
2232 		if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
2233 			if (nvcfg1 & NVRAM_CFG1_BUFFERED_MODE)
2234 				tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
2235 		} else {
2236 			nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
2237 			tw32(NVRAM_CFG1, nvcfg1);
2238 		}
2239 
2240 	} else {
2241 		tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
2242 	}
2243 }
2244 
2245 
2246 static int tg3_nvram_read_using_eeprom(
2247 	struct tg3 *tp __unused, uint32_t offset, uint32_t *val)
2248 {
2249 	uint32_t tmp;
2250 	int i;
2251 
2252 	if (offset > EEPROM_ADDR_ADDR_MASK ||
2253 		(offset % 4) != 0) {
2254 		return -EINVAL;
2255 	}
2256 
2257 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2258 					EEPROM_ADDR_DEVID_MASK |
2259 					EEPROM_ADDR_READ);
2260 	tw32(GRC_EEPROM_ADDR,
2261 	     tmp |
2262 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
2263 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2264 	      EEPROM_ADDR_ADDR_MASK) |
2265 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
2266 
2267 	for (i = 0; i < 10000; i++) {
2268 		tmp = tr32(GRC_EEPROM_ADDR);
2269 
2270 		if (tmp & EEPROM_ADDR_COMPLETE)
2271 			break;
2272 		udelay(100);
2273 	}
2274 	if (!(tmp & EEPROM_ADDR_COMPLETE)) {
2275 		return -EBUSY;
2276 	}
2277 
2278 	*val = tr32(GRC_EEPROM_DATA);
2279 	return 0;
2280 }
2281 
2282 static int tg3_nvram_read(struct tg3 *tp, uint32_t offset, uint32_t *val)
2283 {
2284 	int i, saw_done_clear;
2285 
2286 	if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2287 		return tg3_nvram_read_using_eeprom(tp, offset, val);
2288 
2289 	if (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED)
2290 		offset = ((offset / NVRAM_BUFFERED_PAGE_SIZE) <<
2291 			  NVRAM_BUFFERED_PAGE_POS) +
2292 			(offset % NVRAM_BUFFERED_PAGE_SIZE);
2293 
2294 	if (offset > NVRAM_ADDR_MSK)
2295 		return -EINVAL;
2296 
2297 	tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2298 	for (i = 0; i < 1000; i++) {
2299 		if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2300 			break;
2301 		udelay(20);
2302 	}
2303 
2304 	tw32(NVRAM_ADDR, offset);
2305 	tw32(NVRAM_CMD,
2306 	     NVRAM_CMD_RD | NVRAM_CMD_GO |
2307 	     NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2308 
2309 	/* Wait for done bit to clear then set again. */
2310 	saw_done_clear = 0;
2311 	for (i = 0; i < 1000; i++) {
2312 		udelay(10);
2313 		if (!saw_done_clear &&
2314 		    !(tr32(NVRAM_CMD) & NVRAM_CMD_DONE))
2315 			saw_done_clear = 1;
2316 		else if (saw_done_clear &&
2317 			 (tr32(NVRAM_CMD) & NVRAM_CMD_DONE))
2318 			break;
2319 	}
2320 	if (i >= 1000) {
2321 		tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2322 		return -EBUSY;
2323 	}
2324 
2325 	*val = bswap_32(tr32(NVRAM_RDDATA));
2326 	tw32(NVRAM_SWARB, 0x20);
2327 
2328 	return 0;
2329 }
2330 
2331 struct subsys_tbl_ent {
2332 	uint16_t subsys_vendor, subsys_devid;
2333 	uint32_t phy_id;
2334 };
2335 
2336 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
2337 	/* Broadcom boards. */
2338 	{ 0x14e4, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
2339 	{ 0x14e4, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
2340 	{ 0x14e4, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
2341 	{ 0x14e4, 0x0003, PHY_ID_SERDES  }, /* BCM95700A9 */
2342 	{ 0x14e4, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
2343 	{ 0x14e4, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
2344 	{ 0x14e4, 0x0007, PHY_ID_SERDES  }, /* BCM95701A7 */
2345 	{ 0x14e4, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
2346 	{ 0x14e4, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
2347 	{ 0x14e4, 0x0009, PHY_ID_BCM5701 }, /* BCM95703Ax1 */
2348 	{ 0x14e4, 0x8009, PHY_ID_BCM5701 }, /* BCM95703Ax2 */
2349 
2350 	/* 3com boards. */
2351 	{ PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
2352 	{ PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
2353 	/* { PCI_VENDOR_ID_3COM, 0x1002, PHY_ID_XXX },     3C996CT */
2354 	/* { PCI_VENDOR_ID_3COM, 0x1003, PHY_ID_XXX },     3C997T */
2355 	{ PCI_VENDOR_ID_3COM, 0x1004, PHY_ID_SERDES  }, /* 3C996SX */
2356 	/* { PCI_VENDOR_ID_3COM, 0x1005, PHY_ID_XXX },     3C997SZ */
2357 	{ PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
2358 	{ PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
2359 
2360 	/* DELL boards. */
2361 	{ PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
2362 	{ PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
2363 	{ PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
2364 	{ PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
2365 
2366 	/* Compaq boards. */
2367 	{ PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
2368 	{ PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
2369 	{ PCI_VENDOR_ID_COMPAQ, 0x007d, PHY_ID_SERDES  }, /* CHANGELING */
2370 	{ PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
2371 	{ PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }  /* NC7780_2 */
2372 };
2373 
2374 static int tg3_phy_probe(struct tg3 *tp)
2375 {
2376 	uint32_t eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
2377 	uint32_t hw_phy_id, hw_phy_id_masked;
2378 	enum phy_led_mode eeprom_led_mode;
2379 	uint32_t val;
2380 	unsigned i;
2381 	int eeprom_signature_found, err;
2382 
2383 	tp->phy_id = PHY_ID_INVALID;
2384 
2385 	for (i = 0; i < sizeof(subsys_id_to_phy_id)/sizeof(subsys_id_to_phy_id[0]); i++) {
2386 		if ((subsys_id_to_phy_id[i].subsys_vendor == tp->subsystem_vendor) &&
2387 			(subsys_id_to_phy_id[i].subsys_devid == tp->subsystem_device)) {
2388 			tp->phy_id = subsys_id_to_phy_id[i].phy_id;
2389 			break;
2390 		}
2391 	}
2392 
2393 	eeprom_phy_id = PHY_ID_INVALID;
2394 	eeprom_led_mode = led_mode_auto;
2395 	eeprom_signature_found = 0;
2396 	tg3_read_mem(NIC_SRAM_DATA_SIG, &val);
2397 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
2398 		uint32_t nic_cfg;
2399 
2400 		tg3_read_mem(NIC_SRAM_DATA_CFG, &nic_cfg);
2401 		tp->nic_sram_data_cfg = nic_cfg;
2402 
2403 		eeprom_signature_found = 1;
2404 
2405 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
2406 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) {
2407 			eeprom_phy_id = PHY_ID_SERDES;
2408 		} else {
2409 			uint32_t nic_phy_id;
2410 
2411 			tg3_read_mem(NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
2412 			if (nic_phy_id != 0) {
2413 				uint32_t id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
2414 				uint32_t id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
2415 
2416 				eeprom_phy_id  = (id1 >> 16) << 10;
2417 				eeprom_phy_id |= (id2 & 0xfc00) << 16;
2418 				eeprom_phy_id |= (id2 & 0x03ff) <<  0;
2419 			}
2420 		}
2421 
2422 		switch (nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK) {
2423 		case NIC_SRAM_DATA_CFG_LED_TRIPLE_SPD:
2424 			eeprom_led_mode = led_mode_three_link;
2425 			break;
2426 
2427 		case NIC_SRAM_DATA_CFG_LED_LINK_SPD:
2428 			eeprom_led_mode = led_mode_link10;
2429 			break;
2430 
2431 		default:
2432 			eeprom_led_mode = led_mode_auto;
2433 			break;
2434 		};
2435 		if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
2436 			(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
2437 			(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) &&
2438 			(nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)) {
2439 			tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
2440 		}
2441 
2442 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE)
2443 			tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
2444 		if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
2445 			tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
2446 	}
2447 
2448 	/* Now read the physical PHY_ID from the chip and verify
2449 	 * that it is sane.  If it doesn't look good, we fall back
2450 	 * to either the hard-coded table based PHY_ID and failing
2451 	 * that the value found in the eeprom area.
2452 	 */
2453 	err  = tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
2454 	err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
2455 
2456 	hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
2457 	hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
2458 	hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
2459 
2460 	hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
2461 
2462 	if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
2463 		tp->phy_id = hw_phy_id;
2464 	} else {
2465 		/* phy_id currently holds the value found in the
2466 		 * subsys_id_to_phy_id[] table or PHY_ID_INVALID
2467 		 * if a match was not found there.
2468 		 */
2469 		if (tp->phy_id == PHY_ID_INVALID) {
2470 			if (!eeprom_signature_found ||
2471 			    !KNOWN_PHY_ID(eeprom_phy_id & PHY_ID_MASK))
2472 				return -ENODEV;
2473 			tp->phy_id = eeprom_phy_id;
2474 		}
2475 	}
2476 
2477 	err = tg3_phy_reset(tp);
2478 	if (err)
2479 		return err;
2480 
2481 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2482 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2483 		uint32_t mii_tg3_ctrl;
2484 
2485 		/* These chips, when reset, only advertise 10Mb
2486 		 * capabilities.  Fix that.
2487 		 */
2488 		err  = tg3_writephy(tp, MII_ADVERTISE,
2489 				    (ADVERTISE_CSMA |
2490 				     ADVERTISE_PAUSE_CAP |
2491 				     ADVERTISE_10HALF |
2492 				     ADVERTISE_10FULL |
2493 				     ADVERTISE_100HALF |
2494 				     ADVERTISE_100FULL));
2495 		mii_tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
2496 				MII_TG3_CTRL_ADV_1000_FULL |
2497 				MII_TG3_CTRL_AS_MASTER |
2498 				MII_TG3_CTRL_ENABLE_AS_MASTER);
2499 		if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2500 			mii_tg3_ctrl = 0;
2501 
2502 		err |= tg3_writephy(tp, MII_TG3_CTRL, mii_tg3_ctrl);
2503 		err |= tg3_writephy(tp, MII_BMCR,
2504 				    (BMCR_ANRESTART | BMCR_ANENABLE));
2505 	}
2506 
2507 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
2508 		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2509 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2510 		tg3_writedsp(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
2511 	}
2512 
2513 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2514 		tg3_writephy(tp, 0x1c, 0x8d68);
2515 		tg3_writephy(tp, 0x1c, 0x8d68);
2516 	}
2517 
2518 	/* Enable Ethernet@WireSpeed */
2519 	tg3_phy_set_wirespeed(tp);
2520 
2521 	if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
2522 		err = tg3_init_5401phy_dsp(tp);
2523 	}
2524 
2525 	/* Determine the PHY led mode.
2526 	 * Be careful if this gets set wrong it can result in an inability to
2527 	 * establish a link.
2528 	 */
2529 	if (tp->phy_id == PHY_ID_SERDES) {
2530 		tp->led_mode = led_mode_three_link;
2531 	}
2532 	else if (tp->subsystem_vendor == PCI_VENDOR_ID_DELL) {
2533 		tp->led_mode = led_mode_link10;
2534 	} else {
2535 		tp->led_mode = led_mode_three_link;
2536 		if (eeprom_signature_found &&
2537 		    eeprom_led_mode != led_mode_auto)
2538 			tp->led_mode = eeprom_led_mode;
2539 	}
2540 
2541 	if (tp->phy_id == PHY_ID_SERDES)
2542 		tp->link_config.advertising =
2543 			(ADVERTISED_1000baseT_Half |
2544 			 ADVERTISED_1000baseT_Full |
2545 			 ADVERTISED_Autoneg |
2546 			 ADVERTISED_FIBRE);
2547 	if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2548 		tp->link_config.advertising &=
2549 			~(ADVERTISED_1000baseT_Half |
2550 			  ADVERTISED_1000baseT_Full);
2551 
2552 	return err;
2553 }
2554 
2555 #if SUPPORT_PARTNO_STR
2556 static void tg3_read_partno(struct tg3 *tp)
2557 {
2558 	unsigned char vpd_data[256];
2559 	int i;
2560 
2561 	for (i = 0; i < 256; i += 4) {
2562 		uint32_t tmp;
2563 
2564 		if (tg3_nvram_read(tp, 0x100 + i, &tmp))
2565 			goto out_not_found;
2566 
2567 		vpd_data[i + 0] = ((tmp >>  0) & 0xff);
2568 		vpd_data[i + 1] = ((tmp >>  8) & 0xff);
2569 		vpd_data[i + 2] = ((tmp >> 16) & 0xff);
2570 		vpd_data[i + 3] = ((tmp >> 24) & 0xff);
2571 	}
2572 
2573 	/* Now parse and find the part number. */
2574 	for (i = 0; i < 256; ) {
2575 		unsigned char val = vpd_data[i];
2576 		int block_end;
2577 
2578 		if (val == 0x82 || val == 0x91) {
2579 			i = (i + 3 +
2580 			     (vpd_data[i + 1] +
2581 			      (vpd_data[i + 2] << 8)));
2582 			continue;
2583 		}
2584 
2585 		if (val != 0x90)
2586 			goto out_not_found;
2587 
2588 		block_end = (i + 3 +
2589 			     (vpd_data[i + 1] +
2590 			      (vpd_data[i + 2] << 8)));
2591 		i += 3;
2592 		while (i < block_end) {
2593 			if (vpd_data[i + 0] == 'P' &&
2594 			    vpd_data[i + 1] == 'N') {
2595 				int partno_len = vpd_data[i + 2];
2596 
2597 				if (partno_len > 24)
2598 					goto out_not_found;
2599 
2600 				memcpy(tp->board_part_number,
2601 				       &vpd_data[i + 3],
2602 				       partno_len);
2603 
2604 				/* Success. */
2605 				return;
2606 			}
2607 		}
2608 
2609 		/* Part number not found. */
2610 		goto out_not_found;
2611 	}
2612 
2613 out_not_found:
2614 	memcpy(tp->board_part_number, "none", sizeof("none"));
2615 }
2616 #else
2617 #define tg3_read_partno(TP) ((TP)->board_part_number[0] = '\0')
2618 #endif
2619 
2620 static int tg3_get_invariants(struct tg3 *tp)
2621 {
2622 	uint32_t misc_ctrl_reg;
2623 	uint32_t pci_state_reg, grc_misc_cfg;
2624 	uint16_t pci_cmd;
2625 	uint8_t  pci_latency;
2626 	int err;
2627 
2628 	/* Read the subsystem vendor and device ids */
2629 	pci_read_config_word(tp->pdev, PCI_SUBSYSTEM_VENDOR_ID, &tp->subsystem_vendor);
2630 	pci_read_config_word(tp->pdev, PCI_SUBSYSTEM_ID, &tp->subsystem_device);
2631 
2632 	/* The sun_5704 code needs infrastructure etherboot does have
2633 	 * ignore it for now.
2634 	 */
2635 
2636 	/* If we have an AMD 762 or Intel ICH/ICH0 chipset, write
2637 	 * reordering to the mailbox registers done by the host
2638 	 * controller can cause major troubles.  We read back from
2639 	 * every mailbox register write to force the writes to be
2640 	 * posted to the chip in order.
2641 	 *
2642 	 * TG3_FLAG_MBOX_WRITE_REORDER has been forced on.
2643 	 */
2644 
2645 	/* Force memory write invalidate off.  If we leave it on,
2646 	 * then on 5700_BX chips we have to enable a workaround.
2647 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundry
2648 	 * to match the cacheline size.  The Broadcom driver have this
2649 	 * workaround but turns MWI off all the times so never uses
2650 	 * it.  This seems to suggest that the workaround is insufficient.
2651 	 */
2652 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
2653 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
2654 	/* Also, force SERR#/PERR# in PCI command. */
2655 	pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
2656 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
2657 
2658 	/* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
2659 	 * has the register indirect write enable bit set before
2660 	 * we try to access any of the MMIO registers.  It is also
2661 	 * critical that the PCI-X hw workaround situation is decided
2662 	 * before that as well.
2663 	 */
2664 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, &misc_ctrl_reg);
2665 
2666 	tp->pci_chip_rev_id = (misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT);
2667 
2668 	/* Initialize misc host control in PCI block. */
2669 	tp->misc_host_ctrl |= (misc_ctrl_reg &
2670 			       MISC_HOST_CTRL_CHIPREV);
2671 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
2672 			       tp->misc_host_ctrl);
2673 
2674 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER, &pci_latency);
2675 	if (pci_latency < 64) {
2676 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 64);
2677 	}
2678 
2679 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &pci_state_reg);
2680 
2681 	/* If this is a 5700 BX chipset, and we are in PCI-X
2682 	 * mode, enable register write workaround.
2683 	 *
2684 	 * The workaround is to use indirect register accesses
2685 	 * for all chip writes not to mailbox registers.
2686 	 *
2687 	 * In etherboot to simplify things we just always use this work around.
2688 	 */
2689 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
2690 		tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
2691 	}
2692 	/* Back to back register writes can cause problems on the 5701,
2693 	 * the workaround is to read back all reg writes except those to
2694 	 * mailbox regs.
2695 	 * In etherboot we always use indirect register accesses so
2696 	 * we don't see this.
2697 	 */
2698 
2699 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
2700 		tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
2701 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
2702 		tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
2703 
2704 	/* Chip-specific fixup from Broadcom driver */
2705 	if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
2706 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
2707 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
2708 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
2709 	}
2710 
2711 	/* Force the chip into D0. */
2712 	tg3_set_power_state_0(tp);
2713 
2714 	/* Etherboot does not ask the tg3 to do checksums */
2715 	/* Etherboot does not ask the tg3 to do jumbo frames */
2716 	/* Ehterboot does not ask the tg3 to use WakeOnLan. */
2717 
2718 	/* A few boards don't want Ethernet@WireSpeed phy feature */
2719 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
2720 		((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2721 			(tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
2722 			(tp->pci_chip_rev_id != CHIPREV_ID_5705_A1))) {
2723 		tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
2724 	}
2725 
2726 	/* Avoid tagged irq status etherboot does not use irqs */
2727 
2728 	/* Only 5701 and later support tagged irq status mode.
2729 	 * Also, 5788 chips cannot use tagged irq status.
2730 	 *
2731 	 * However, since etherboot does not use irqs avoid tagged irqs
2732 	 * status  because the interrupt condition is more difficult to
2733 	 * fully clear in that mode.
2734 	 */
2735 
2736 	/* Since some 5700_AX && 5700_BX have problems with 32BYTE
2737 	 * coalesce_mode, and the rest work fine anything set.
2738 	 * Don't enable HOST_CC_MODE_32BYTE in etherboot.
2739 	 */
2740 
2741 	/* Initialize MAC MI mode, polling disabled. */
2742 	tw32_carefully(MAC_MI_MODE, tp->mi_mode);
2743 
2744 	/* Initialize data/descriptor byte/word swapping. */
2745 	tw32(GRC_MODE, tp->grc_mode);
2746 
2747 	tg3_switch_clocks(tp);
2748 
2749 	/* Clear this out for sanity. */
2750 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
2751 
2752 	/* Etherboot does not need to check if the PCIX_TARGET_HWBUG
2753 	 * is needed.  It always uses it.
2754 	 */
2755 
2756 	udelay(50);
2757 	tg3_nvram_init(tp);
2758 
2759 	/* The TX descriptors will reside in main memory.
2760 	 */
2761 
2762 	/* See which board we are using.
2763 	 */
2764 	grc_misc_cfg = tr32(GRC_MISC_CFG);
2765 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
2766 
2767 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
2768 	    grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
2769 		tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
2770 		tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
2771 	}
2772 
2773 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
2774 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
2775 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
2776 		tp->tg3_flags2 |= TG3_FLG2_IS_5788;
2777 
2778 	/* these are limited to 10/100 only */
2779 	if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) &&
2780 		    ((grc_misc_cfg == 0x8000) || (grc_misc_cfg == 0x4000))) ||
2781 		((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2782 			(tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM) &&
2783 			((tp->pdev->dev_id == PCI_DEVICE_ID_TIGON3_5901) ||
2784 				(tp->pdev->dev_id == PCI_DEVICE_ID_TIGON3_5901_2)))) {
2785 		tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
2786 	}
2787 
2788 	err = tg3_phy_probe(tp);
2789 	if (err) {
2790 		printf("phy probe failed, err %d\n", err);
2791 	}
2792 
2793 	tg3_read_partno(tp);
2794 
2795 
2796 	/* 5700 BX chips need to have their TX producer index mailboxes
2797 	 * written twice to workaround a bug.
2798 	 * In etherboot we do this unconditionally to simplify things.
2799 	 */
2800 
2801 	/* 5700 chips can get confused if TX buffers straddle the
2802 	 * 4GB address boundary in some cases.
2803 	 *
2804 	 * In etherboot we can ignore the problem as etherboot lives below 4GB.
2805 	 */
2806 
2807 	/* In etherboot wake-on-lan is unconditionally disabled */
2808 	return err;
2809 }
2810 
2811 static int  tg3_get_device_address(struct tg3 *tp)
2812 {
2813 	struct nic *nic = tp->nic;
2814 	uint32_t hi, lo, mac_offset;
2815 
2816 	if (PCI_FUNC(tp->pdev->devfn) == 0)
2817 		mac_offset = 0x7c;
2818 	else
2819 		mac_offset = 0xcc;
2820 
2821 	/* First try to get it from MAC address mailbox. */
2822 	tg3_read_mem(NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
2823 	if ((hi >> 16) == 0x484b) {
2824 		nic->node_addr[0] = (hi >>  8) & 0xff;
2825 		nic->node_addr[1] = (hi >>  0) & 0xff;
2826 
2827 		tg3_read_mem(NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
2828 		nic->node_addr[2] = (lo >> 24) & 0xff;
2829 		nic->node_addr[3] = (lo >> 16) & 0xff;
2830 		nic->node_addr[4] = (lo >>  8) & 0xff;
2831 		nic->node_addr[5] = (lo >>  0) & 0xff;
2832 	}
2833 	/* Next, try NVRAM. */
2834 	else if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
2835 		 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
2836 		nic->node_addr[0] = ((hi >> 16) & 0xff);
2837 		nic->node_addr[1] = ((hi >> 24) & 0xff);
2838 		nic->node_addr[2] = ((lo >>  0) & 0xff);
2839 		nic->node_addr[3] = ((lo >>  8) & 0xff);
2840 		nic->node_addr[4] = ((lo >> 16) & 0xff);
2841 		nic->node_addr[5] = ((lo >> 24) & 0xff);
2842 	}
2843 	/* Finally just fetch it out of the MAC control regs. */
2844 	else {
2845 		hi = tr32(MAC_ADDR_0_HIGH);
2846 		lo = tr32(MAC_ADDR_0_LOW);
2847 
2848 		nic->node_addr[5] = lo & 0xff;
2849 		nic->node_addr[4] = (lo >> 8) & 0xff;
2850 		nic->node_addr[3] = (lo >> 16) & 0xff;
2851 		nic->node_addr[2] = (lo >> 24) & 0xff;
2852 		nic->node_addr[1] = hi & 0xff;
2853 		nic->node_addr[0] = (hi >> 8) & 0xff;
2854 	}
2855 
2856 	return 0;
2857 }
2858 
2859 
2860 static int tg3_setup_dma(struct tg3 *tp)
2861 {
2862 	tw32(TG3PCI_CLOCK_CTRL, 0);
2863 
2864 	if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) == 0) {
2865 		tp->dma_rwctrl =
2866 			(0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
2867 			(0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT) |
2868 			(0x7 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
2869 			(0x7 << DMA_RWCTRL_READ_WATER_SHIFT) |
2870 			(0x0f << DMA_RWCTRL_MIN_DMA_SHIFT);
2871 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2872 			tp->dma_rwctrl &= ~(DMA_RWCTRL_MIN_DMA << DMA_RWCTRL_MIN_DMA_SHIFT);
2873 		}
2874 	} else {
2875 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
2876 			tp->dma_rwctrl =
2877 				(0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
2878 				(0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT) |
2879 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
2880 				(0x7 << DMA_RWCTRL_READ_WATER_SHIFT) |
2881 				(0x00 << DMA_RWCTRL_MIN_DMA_SHIFT);
2882 		else
2883 			tp->dma_rwctrl =
2884 				(0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
2885 				(0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT) |
2886 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
2887 				(0x3 << DMA_RWCTRL_READ_WATER_SHIFT) |
2888 				(0x0f << DMA_RWCTRL_MIN_DMA_SHIFT);
2889 
2890 		/* Wheee, some more chip bugs... */
2891 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
2892 			(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)) {
2893 			uint32_t ccval = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
2894 
2895 			if ((ccval == 0x6) || (ccval == 0x7)) {
2896 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
2897 			}
2898 		}
2899 	}
2900 
2901 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
2902 		(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)) {
2903 		tp->dma_rwctrl &= ~(DMA_RWCTRL_MIN_DMA << DMA_RWCTRL_MIN_DMA_SHIFT);
2904 	}
2905 
2906 	tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
2907 
2908 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
2909 
2910 	return 0;
2911 }
2912 
2913 static void tg3_init_link_config(struct tg3 *tp)
2914 {
2915 	tp->link_config.advertising =
2916 		(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
2917 		 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
2918 		 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
2919 		 ADVERTISED_Autoneg | ADVERTISED_MII);
2920 	tp->carrier_ok = 0;
2921 	tp->link_config.active_speed = SPEED_INVALID;
2922 	tp->link_config.active_duplex = DUPLEX_INVALID;
2923 }
2924 
2925 
2926 #if SUPPORT_PHY_STR
2927 static const char * tg3_phy_string(struct tg3 *tp)
2928 {
2929 	switch (tp->phy_id & PHY_ID_MASK) {
2930 	case PHY_ID_BCM5400:	return "5400";
2931 	case PHY_ID_BCM5401:	return "5401";
2932 	case PHY_ID_BCM5411:	return "5411";
2933 	case PHY_ID_BCM5701:	return "5701";
2934 	case PHY_ID_BCM5703:	return "5703";
2935 	case PHY_ID_BCM5704:	return "5704";
2936 	case PHY_ID_BCM8002:	return "8002";
2937 	case PHY_ID_SERDES:	return "serdes";
2938 	default:		return "unknown";
2939 	};
2940 }
2941 #else
2942 #define tg3_phy_string(TP) "?"
2943 #endif
2944 
2945 
2946 static void tg3_poll_link(struct tg3 *tp)
2947 {
2948 	uint32_t mac_stat;
2949 
2950 	mac_stat = tr32(MAC_STATUS);
2951 	if (tp->phy_id == PHY_ID_SERDES) {
2952 		if (tp->carrier_ok?
2953 			(mac_stat & MAC_STATUS_LNKSTATE_CHANGED):
2954 			(mac_stat & MAC_STATUS_PCS_SYNCED)) {
2955 			tw32_carefully(MAC_MODE, tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK);
2956 			tw32_carefully(MAC_MODE, tp->mac_mode);
2957 
2958 			tg3_setup_phy(tp);
2959 		}
2960 	}
2961 	else {
2962 		if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED) {
2963 			tg3_setup_phy(tp);
2964 		}
2965 	}
2966 }
2967 
2968 /**************************************************************************
2969 POLL - Wait for a frame
2970 ***************************************************************************/
2971 static void tg3_ack_irqs(struct tg3 *tp)
2972 {
2973 	if (tp->hw_status->status & SD_STATUS_UPDATED) {
2974 		/*
2975 		 * writing any value to intr-mbox-0 clears PCI INTA# and
2976 		 * chip-internal interrupt pending events.
2977 		 * writing non-zero to intr-mbox-0 additional tells the
2978 		 * NIC to stop sending us irqs, engaging "in-intr-handler"
2979 		 * event coalescing.
2980 		 */
2981 		tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2982 			0x00000001);
2983 		/*
2984 		 * Flush PCI write.  This also guarantees that our
2985 		 * status block has been flushed to host memory.
2986 		 */
2987 		tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2988 		tp->hw_status->status &= ~SD_STATUS_UPDATED;
2989 	}
2990 }
2991 
2992 static int tg3_poll(struct nic *nic, int retrieve)
2993 {
2994 	/* return true if there's an ethernet packet ready to read */
2995 	/* nic->packet should contain data on return */
2996 	/* nic->packetlen should contain length of data */
2997 
2998 	struct tg3 *tp = &tg3;
2999 	int result;
3000 
3001 	result = 0;
3002 
3003 	if ( (tp->hw_status->idx[0].rx_producer != tp->rx_rcb_ptr) && !retrieve )
3004 	  return 1;
3005 
3006 	tg3_ack_irqs(tp);
3007 
3008 	if (tp->hw_status->idx[0].rx_producer != tp->rx_rcb_ptr) {
3009 		struct tg3_rx_buffer_desc *desc;
3010 		unsigned int len;
3011 		desc = &tp->rx_rcb[tp->rx_rcb_ptr];
3012 		if ((desc->opaque & RXD_OPAQUE_RING_MASK) == RXD_OPAQUE_RING_STD) {
3013 			len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3014 
3015 			nic->packetlen = len;
3016 			memcpy(nic->packet, bus_to_virt(desc->addr_lo), len);
3017 			result = 1;
3018 		}
3019 		tp->rx_rcb_ptr = (tp->rx_rcb_ptr + 1) % TG3_RX_RCB_RING_SIZE;
3020 
3021 		/* ACK the status ring */
3022 		tw32_mailbox2(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, tp->rx_rcb_ptr);
3023 
3024 		/* Refill RX ring. */
3025 		if (result) {
3026 			tp->rx_std_ptr = (tp->rx_std_ptr + 1) % TG3_RX_RING_SIZE;
3027 			tw32_mailbox2(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, tp->rx_std_ptr);
3028 		}
3029 	}
3030 	tg3_poll_link(tp);
3031 	return result;
3032 }
3033 
3034 /**************************************************************************
3035 TRANSMIT - Transmit a frame
3036 ***************************************************************************/
3037 #if 0
3038 static void tg3_set_txd(struct tg3 *tp, int entry,
3039 	dma_addr_t mapping, int len, uint32_t flags,
3040 	uint32_t mss_and_is_end)
3041 {
3042 	struct tg3_tx_buffer_desc *txd =  &tp->tx_ring[entry];
3043 	int is_end = (mss_and_is_end & 0x1);
3044 	if (is_end) {
3045 		flags |= TXD_FLAG_END;
3046 	}
3047 
3048 	txd->addr_hi   = 0;
3049 	txd->addr_lo   = mapping & 0xffffffff;
3050 	txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3051 	txd->vlan_tag  = 0 << TXD_VLAN_TAG_SHIFT;
3052 }
3053 #endif
3054 
3055 static void tg3_transmit(struct nic *nic, const char *dst_addr,
3056 	unsigned int type, unsigned int size, const char *packet)
3057 {
3058 	static struct eth_frame {
3059 		uint8_t  dst_addr[ETH_ALEN];
3060 		uint8_t  src_addr[ETH_ALEN];
3061 		uint16_t type;
3062 		uint8_t  data [ETH_FRAME_LEN - ETH_HLEN];
3063 	} frame[2];
3064 	static int frame_idx;
3065 
3066 	/* send the packet to destination */
3067 	struct tg3_tx_buffer_desc *txd;
3068 	struct tg3 *tp;
3069 	uint32_t entry;
3070 	int i;
3071 
3072 	/* Wait until there is a free packet frame */
3073 	tp = &tg3;
3074 	i = 0;
3075 	entry = tp->tx_prod;
3076 	while((tp->hw_status->idx[0].tx_consumer != entry) &&
3077 		(tp->hw_status->idx[0].tx_consumer != PREV_TX(entry))) {
3078 		mdelay(10);	/* give the nick a chance */
3079 		poll_interruptions();
3080 		if (++i > 500) { /* timeout 5s for transmit */
3081 			printf("transmit timed out\n");
3082 			tg3_halt(tp);
3083 			tg3_setup_hw(tp);
3084 			return;
3085 		}
3086 	}
3087 	if (i != 0) {
3088 		printf("#");
3089 	}
3090 
3091 	/* Copy the packet to the our local buffer */
3092 	memcpy(&frame[frame_idx].dst_addr, dst_addr, ETH_ALEN);
3093 	memcpy(&frame[frame_idx].src_addr, nic->node_addr, ETH_ALEN);
3094 	frame[frame_idx].type = htons(type);
3095 	memset(&frame[frame_idx].data, 0, sizeof(frame[frame_idx].data));
3096 	memcpy(&frame[frame_idx].data, packet, size);
3097 
3098 	/* Setup the ring buffer entry to transmit */
3099 	txd            = &tp->tx_ring[entry];
3100 	txd->addr_hi   = 0; /* Etherboot runs under 4GB */
3101 	txd->addr_lo   = virt_to_bus(&frame[frame_idx]);
3102 	txd->len_flags = ((size + ETH_HLEN) << TXD_LEN_SHIFT) | TXD_FLAG_END;
3103 	txd->vlan_tag  = 0 << TXD_VLAN_TAG_SHIFT;
3104 
3105 	/* Advance to the next entry */
3106 	entry = NEXT_TX(entry);
3107 	frame_idx ^= 1;
3108 
3109 	/* Packets are ready, update Tx producer idx local and on card */
3110 	tw32_mailbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3111 	tw32_mailbox2((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3112 	tp->tx_prod = entry;
3113 }
3114 
3115 /**************************************************************************
3116 DISABLE - Turn off ethernet interface
3117 ***************************************************************************/
3118 static void tg3_disable(struct dev *dev __unused)
3119 {
3120 	struct tg3 *tp = &tg3;
3121 	/* put the card in its initial state */
3122 	/* This function serves 3 purposes.
3123 	 * This disables DMA and interrupts so we don't receive
3124 	 *  unexpected packets or interrupts from the card after
3125 	 *  etherboot has finished.
3126 	 * This frees resources so etherboot may use
3127 	 *  this driver on another interface
3128 	 * This allows etherboot to reinitialize the interface
3129 	 *  if something is something goes wrong.
3130 	 */
3131 	tg3_halt(tp);
3132 	tp->tg3_flags &= ~(TG3_FLAG_INIT_COMPLETE|TG3_FLAG_GOT_SERDES_FLOWCTL);
3133 	tp->carrier_ok = 0;
3134 	iounmap((void *)tp->regs);
3135 }
3136 
3137 /**************************************************************************
3138 IRQ - Enable, Disable, or Force interrupts
3139 ***************************************************************************/
3140 static void tg3_irq(struct nic *nic __unused, irq_action_t action __unused)
3141 {
3142   switch ( action ) {
3143   case DISABLE :
3144     break;
3145   case ENABLE :
3146     break;
3147   case FORCE :
3148     break;
3149   }
3150 }
3151 
3152 /**************************************************************************
3153 PROBE - Look for an adapter, this routine's visible to the outside
3154 You should omit the last argument struct pci_device * for a non-PCI NIC
3155 ***************************************************************************/
3156 static int tg3_probe(struct dev *dev, struct pci_device *pdev)
3157 {
3158 	struct nic *nic = (struct nic *)dev;
3159 	struct tg3 *tp = &tg3;
3160 	unsigned long tg3reg_base, tg3reg_len;
3161 	int i, err, pm_cap;
3162 
3163 	if (pdev == 0)
3164 		return 0;
3165 
3166 	memset(tp, 0, sizeof(*tp));
3167 
3168 	adjust_pci_device(pdev);
3169 
3170 	nic->irqno  = 0;
3171 	nic->ioaddr = pdev->ioaddr & ~3;
3172 
3173 	/* Find power-management capability. */
3174 	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
3175 	if (pm_cap == 0) {
3176 		printf("Cannot find PowerManagement capability, aborting.\n");
3177 		return 0;
3178 	}
3179 	tg3reg_base = pci_bar_start(pdev, PCI_BASE_ADDRESS_0);
3180 	if (tg3reg_base == -1UL) {
3181 		printf("Unuseable bar\n");
3182 		return 0;
3183 	}
3184 	tg3reg_len  = pci_bar_size(pdev,  PCI_BASE_ADDRESS_0);
3185 
3186 	tp->pdev       = pdev;
3187 	tp->nic        = nic;
3188 	tp->pm_cap     = pm_cap;
3189 	tp->rx_mode    = 0;
3190 	tp->tx_mode    = 0;
3191 	tp->mi_mode    = MAC_MI_MODE_BASE;
3192 	tp->tg3_flags  = 0 & ~TG3_FLAG_INIT_COMPLETE;
3193 
3194 	/* The word/byte swap controls here control register access byte
3195 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
3196 	 * setting below.
3197 	 */
3198 	tp->misc_host_ctrl =
3199 		MISC_HOST_CTRL_MASK_PCI_INT |
3200 		MISC_HOST_CTRL_WORD_SWAP |
3201 		MISC_HOST_CTRL_INDIR_ACCESS |
3202 		MISC_HOST_CTRL_PCISTATE_RW;
3203 
3204 	/* The NONFRM (non-frame) byte/word swap controls take effect
3205 	 * on descriptor entries, anything which isn't packet data.
3206 	 *
3207 	 * The StrongARM chips on the board (one for tx, one for rx)
3208 	 * are running in big-endian mode.
3209 	 */
3210 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
3211 			GRC_MODE_WSWAP_NONFRM_DATA);
3212 #if __BYTE_ORDER == __BIG_ENDIAN
3213 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
3214 #endif
3215 	tp->regs = (unsigned long) ioremap(tg3reg_base, tg3reg_len);
3216 	if (tp->regs == 0UL) {
3217 		printf("Cannot map device registers, aborting\n");
3218 		return 0;
3219 	}
3220 
3221 	tg3_init_link_config(tp);
3222 
3223 	err = tg3_get_invariants(tp);
3224 	if (err) {
3225 		printf("Problem fetching invariants of chip, aborting.\n");
3226 		goto err_out_iounmap;
3227 	}
3228 
3229 	err = tg3_get_device_address(tp);
3230 	if (err) {
3231 		printf("Could not obtain valid ethernet address, aborting.\n");
3232 		goto err_out_iounmap;
3233 	}
3234 	printf("Ethernet addr: %!\n", nic->node_addr);
3235 
3236 	tg3_setup_dma(tp);
3237 
3238 	/* Now that we have fully setup the chip, save away a snapshot
3239 	 * of the PCI config space.  We need to restore this after
3240 	 * GRC_MISC_CFG core clock resets and some resume events.
3241 	 */
3242 	pci_save_state(tp->pdev, tp->pci_cfg_state);
3243 
3244 	printf("Tigon3 [partno(%s) rev %hx PHY(%s)] (PCI%s:%s:%s)\n",
3245 		tp->board_part_number,
3246 		tp->pci_chip_rev_id,
3247 		tg3_phy_string(tp),
3248 		((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
3249 		((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
3250 			((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
3251 			((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
3252 		((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"));
3253 
3254 
3255 	err = tg3_setup_hw(tp);
3256 	if (err) {
3257 		goto err_out_disable;
3258 	}
3259 	tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
3260 
3261 	/* Wait for a reasonable time for the link to come up */
3262 	tg3_poll_link(tp);
3263 	for(i = 0; !tp->carrier_ok && (i < VALID_LINK_TIMEOUT*100); i++) {
3264 		mdelay(1);
3265 		tg3_poll_link(tp);
3266 	}
3267 	if (!tp->carrier_ok){
3268 		printf("Valid link not established\n");
3269 		goto err_out_disable;
3270 	}
3271 
3272 	dev->disable  = tg3_disable;
3273 	nic->poll     = tg3_poll;
3274 	nic->transmit = tg3_transmit;
3275 	nic->irq      = tg3_irq;
3276 
3277 	return 1;
3278 
3279  err_out_iounmap:
3280 	iounmap((void *)tp->regs);
3281 	return 0;
3282  err_out_disable:
3283 	tg3_disable(dev);
3284 	return 0;
3285 }
3286 
3287 static struct pci_id tg3_nics[] = {
3288 PCI_ROM(0x14e4, 0x1644, "tg3-5700",        "Broadcom Tigon 3 5700"),
3289 PCI_ROM(0x14e4, 0x1645, "tg3-5701",        "Broadcom Tigon 3 5701"),
3290 PCI_ROM(0x14e4, 0x1646, "tg3-5702",        "Broadcom Tigon 3 5702"),
3291 PCI_ROM(0x14e4, 0x1647, "tg3-5703",        "Broadcom Tigon 3 5703"),
3292 PCI_ROM(0x14e4, 0x1648, "tg3-5704",        "Broadcom Tigon 3 5704"),
3293 PCI_ROM(0x14e4, 0x164d, "tg3-5702FE",      "Broadcom Tigon 3 5702FE"),
3294 PCI_ROM(0x14e4, 0x1653, "tg3-5705",        "Broadcom Tigon 3 5705"),
3295 PCI_ROM(0x14e4, 0x1654, "tg3-5705_2",      "Broadcom Tigon 3 5705_2"),
3296 PCI_ROM(0x14e4, 0x165d, "tg3-5705M",       "Broadcom Tigon 3 5705M"),
3297 PCI_ROM(0x14e4, 0x165e, "tg3-5705M_2",     "Broadcom Tigon 3 5705M_2"),
3298 PCI_ROM(0x14e4, 0x1696, "tg3-5782",        "Broadcom Tigon 3 5782"),
3299 PCI_ROM(0x14e4, 0x169c, "tg3-5788",        "Broadcom Tigon 3 5788"),
3300 PCI_ROM(0x14e4, 0x16a6, "tg3-5702X",       "Broadcom Tigon 3 5702X"),
3301 PCI_ROM(0x14e4, 0x16a7, "tg3-5703X",       "Broadcom Tigon 3 5703X"),
3302 PCI_ROM(0x14e4, 0x16a8, "tg3-5704S",       "Broadcom Tigon 3 5704S"),
3303 PCI_ROM(0x14e4, 0x16c6, "tg3-5702A3",      "Broadcom Tigon 3 5702A3"),
3304 PCI_ROM(0x14e4, 0x16c7, "tg3-5703A3",      "Broadcom Tigon 3 5703A3"),
3305 PCI_ROM(0x14e4, 0x170d, "tg3-5901",        "Broadcom Tigon 3 5901"),
3306 PCI_ROM(0x14e4, 0x170e, "tg3-5901_2",      "Broadcom Tigon 3 5901_2"),
3307 PCI_ROM(0x1148, 0x4400, "tg3-9DXX",        "Syskonnect 9DXX"),
3308 PCI_ROM(0x1148, 0x4500, "tg3-9MXX",        "Syskonnect 9MXX"),
3309 PCI_ROM(0x173b, 0x03e8, "tg3-ac1000",      "Altima AC1000"),
3310 PCI_ROM(0x173b, 0x03e9, "tg3-ac1001",      "Altima AC1001"),
3311 PCI_ROM(0x173b, 0x03ea, "tg3-ac9100",      "Altima AC9100"),
3312 PCI_ROM(0x173b, 0x03eb, "tg3-ac1003",      "Altima AC1003"),
3313 };
3314 
3315 struct pci_driver tg3_driver = {
3316 	.type	  = NIC_DRIVER,
3317 	.name	  = "TG3",
3318 	.probe	  = tg3_probe,
3319 	.ids	  = tg3_nics,
3320 	.id_count = sizeof(tg3_nics)/sizeof(tg3_nics[0]),
3321 	.class    = 0,
3322 };
3323