xref: /linux/drivers/net/ethernet/broadcom/tg3.c (revision a508da6cc0093171833efb8376b00473f24221b9)
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *	Derived from proprietary unpublished source code,
11  *	Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *	Permission is hereby granted for the distribution of this firmware
14  *	data in hexadecimal or equivalent format, provided this copyright
15  *	notice is accompanying it.
16  */
17 
18 
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 
48 #include <net/checksum.h>
49 #include <net/ip.h>
50 
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
54 
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59 
60 #define BAR_0	0
61 #define BAR_2	2
62 
63 #include "tg3.h"
64 
65 /* Functions & macros to verify TG3_FLAGS types */
66 
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69 	return test_bit(flag, bits);
70 }
71 
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74 	set_bit(flag, bits);
75 }
76 
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79 	clear_bit(flag, bits);
80 }
81 
82 #define tg3_flag(tp, flag)				\
83 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag)				\
85 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag)			\
87 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88 
89 #define DRV_MODULE_NAME		"tg3"
90 #define TG3_MAJ_NUM			3
91 #define TG3_MIN_NUM			123
92 #define DRV_MODULE_VERSION	\
93 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE	"March 21, 2012"
95 
96 #define RESET_KIND_SHUTDOWN	0
97 #define RESET_KIND_INIT		1
98 #define RESET_KIND_SUSPEND	2
99 
100 #define TG3_DEF_RX_MODE		0
101 #define TG3_DEF_TX_MODE		0
102 #define TG3_DEF_MSG_ENABLE	  \
103 	(NETIF_MSG_DRV		| \
104 	 NETIF_MSG_PROBE	| \
105 	 NETIF_MSG_LINK		| \
106 	 NETIF_MSG_TIMER	| \
107 	 NETIF_MSG_IFDOWN	| \
108 	 NETIF_MSG_IFUP		| \
109 	 NETIF_MSG_RX_ERR	| \
110 	 NETIF_MSG_TX_ERR)
111 
112 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
113 
114 /* length of time before we decide the hardware is borked,
115  * and dev->tx_timeout() should be called to fix the problem
116  */
117 
118 #define TG3_TX_TIMEOUT			(5 * HZ)
119 
120 /* hardware minimum and maximum for a single frame's data payload */
121 #define TG3_MIN_MTU			60
122 #define TG3_MAX_MTU(tp)	\
123 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
124 
125 /* These numbers seem to be hard coded in the NIC firmware somehow.
126  * You can't change the ring sizes, but you can change where you place
127  * them in the NIC onboard memory.
128  */
129 #define TG3_RX_STD_RING_SIZE(tp) \
130 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
132 #define TG3_DEF_RX_RING_PENDING		200
133 #define TG3_RX_JMB_RING_SIZE(tp) \
134 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
136 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
137 
138 /* Do not place this n-ring entries value into the tp struct itself,
139  * we really want to expose these constants to GCC so that modulo et
140  * al.  operations are done with shifts and masks instead of with
141  * hw multiply/modulo instructions.  Another solution would be to
142  * replace things like '% foo' with '& (foo - 1)'.
143  */
144 
145 #define TG3_TX_RING_SIZE		512
146 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
147 
148 #define TG3_RX_STD_RING_BYTES(tp) \
149 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
150 #define TG3_RX_JMB_RING_BYTES(tp) \
151 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
152 #define TG3_RX_RCB_RING_BYTES(tp) \
153 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
154 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
155 				 TG3_TX_RING_SIZE)
156 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
157 
158 #define TG3_DMA_BYTE_ENAB		64
159 
160 #define TG3_RX_STD_DMA_SZ		1536
161 #define TG3_RX_JMB_DMA_SZ		9046
162 
163 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
164 
165 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
166 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
167 
168 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
169 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
170 
171 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
172 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
173 
174 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
175  * that are at least dword aligned when used in PCIX mode.  The driver
176  * works around this bug by double copying the packet.  This workaround
177  * is built into the normal double copy length check for efficiency.
178  *
179  * However, the double copy is only necessary on those architectures
180  * where unaligned memory accesses are inefficient.  For those architectures
181  * where unaligned memory accesses incur little penalty, we can reintegrate
182  * the 5701 in the normal rx path.  Doing so saves a device structure
183  * dereference by hardcoding the double copy threshold in place.
184  */
185 #define TG3_RX_COPY_THRESHOLD		256
186 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
187 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
188 #else
189 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
190 #endif
191 
192 #if (NET_IP_ALIGN != 0)
193 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
194 #else
195 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
196 #endif
197 
198 /* This driver uses the new build_skb() API providing a frag as skb->head
199  * This strategy permits better GRO aggregation, better TCP coalescing, and
200  * better splice() implementation (avoids a copy from head to a page), at
201  * minimal memory cost.
202  * In this 2048 bytes block, we have enough room to store the MTU=1500 frame
203  * and the struct skb_shared_info.
204  */
205 #define TG3_FRAGSIZE 2048
206 
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K		2048
210 #define TG3_TX_BD_DMA_MAX_4K		4096
211 
212 #define TG3_RAW_IP_ALIGN 2
213 
214 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
215 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
216 
217 #define FIRMWARE_TG3		"tigon/tg3.bin"
218 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
219 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
220 
221 static char version[] __devinitdata =
222 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
223 
224 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
225 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
226 MODULE_LICENSE("GPL");
227 MODULE_VERSION(DRV_MODULE_VERSION);
228 MODULE_FIRMWARE(FIRMWARE_TG3);
229 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
231 
232 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
233 module_param(tg3_debug, int, 0);
234 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
235 
236 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
237 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
238 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
239 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
240 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
257 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
258 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
260 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
261 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
264 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
265 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
267 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
272 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
278 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
286 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
287 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
288 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
289 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
292 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
311 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
312 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
313 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
314 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
315 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
316 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
317 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
318 	{}
319 };
320 
321 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
322 
323 static const struct {
324 	const char string[ETH_GSTRING_LEN];
325 } ethtool_stats_keys[] = {
326 	{ "rx_octets" },
327 	{ "rx_fragments" },
328 	{ "rx_ucast_packets" },
329 	{ "rx_mcast_packets" },
330 	{ "rx_bcast_packets" },
331 	{ "rx_fcs_errors" },
332 	{ "rx_align_errors" },
333 	{ "rx_xon_pause_rcvd" },
334 	{ "rx_xoff_pause_rcvd" },
335 	{ "rx_mac_ctrl_rcvd" },
336 	{ "rx_xoff_entered" },
337 	{ "rx_frame_too_long_errors" },
338 	{ "rx_jabbers" },
339 	{ "rx_undersize_packets" },
340 	{ "rx_in_length_errors" },
341 	{ "rx_out_length_errors" },
342 	{ "rx_64_or_less_octet_packets" },
343 	{ "rx_65_to_127_octet_packets" },
344 	{ "rx_128_to_255_octet_packets" },
345 	{ "rx_256_to_511_octet_packets" },
346 	{ "rx_512_to_1023_octet_packets" },
347 	{ "rx_1024_to_1522_octet_packets" },
348 	{ "rx_1523_to_2047_octet_packets" },
349 	{ "rx_2048_to_4095_octet_packets" },
350 	{ "rx_4096_to_8191_octet_packets" },
351 	{ "rx_8192_to_9022_octet_packets" },
352 
353 	{ "tx_octets" },
354 	{ "tx_collisions" },
355 
356 	{ "tx_xon_sent" },
357 	{ "tx_xoff_sent" },
358 	{ "tx_flow_control" },
359 	{ "tx_mac_errors" },
360 	{ "tx_single_collisions" },
361 	{ "tx_mult_collisions" },
362 	{ "tx_deferred" },
363 	{ "tx_excessive_collisions" },
364 	{ "tx_late_collisions" },
365 	{ "tx_collide_2times" },
366 	{ "tx_collide_3times" },
367 	{ "tx_collide_4times" },
368 	{ "tx_collide_5times" },
369 	{ "tx_collide_6times" },
370 	{ "tx_collide_7times" },
371 	{ "tx_collide_8times" },
372 	{ "tx_collide_9times" },
373 	{ "tx_collide_10times" },
374 	{ "tx_collide_11times" },
375 	{ "tx_collide_12times" },
376 	{ "tx_collide_13times" },
377 	{ "tx_collide_14times" },
378 	{ "tx_collide_15times" },
379 	{ "tx_ucast_packets" },
380 	{ "tx_mcast_packets" },
381 	{ "tx_bcast_packets" },
382 	{ "tx_carrier_sense_errors" },
383 	{ "tx_discards" },
384 	{ "tx_errors" },
385 
386 	{ "dma_writeq_full" },
387 	{ "dma_write_prioq_full" },
388 	{ "rxbds_empty" },
389 	{ "rx_discards" },
390 	{ "rx_errors" },
391 	{ "rx_threshold_hit" },
392 
393 	{ "dma_readq_full" },
394 	{ "dma_read_prioq_full" },
395 	{ "tx_comp_queue_full" },
396 
397 	{ "ring_set_send_prod_index" },
398 	{ "ring_status_update" },
399 	{ "nic_irqs" },
400 	{ "nic_avoided_irqs" },
401 	{ "nic_tx_threshold_hit" },
402 
403 	{ "mbuf_lwm_thresh_hit" },
404 };
405 
406 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
407 
408 
409 static const struct {
410 	const char string[ETH_GSTRING_LEN];
411 } ethtool_test_keys[] = {
412 	{ "nvram test        (online) " },
413 	{ "link test         (online) " },
414 	{ "register test     (offline)" },
415 	{ "memory test       (offline)" },
416 	{ "mac loopback test (offline)" },
417 	{ "phy loopback test (offline)" },
418 	{ "ext loopback test (offline)" },
419 	{ "interrupt test    (offline)" },
420 };
421 
422 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
423 
424 
425 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
426 {
427 	writel(val, tp->regs + off);
428 }
429 
430 static u32 tg3_read32(struct tg3 *tp, u32 off)
431 {
432 	return readl(tp->regs + off);
433 }
434 
435 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
436 {
437 	writel(val, tp->aperegs + off);
438 }
439 
440 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
441 {
442 	return readl(tp->aperegs + off);
443 }
444 
445 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
446 {
447 	unsigned long flags;
448 
449 	spin_lock_irqsave(&tp->indirect_lock, flags);
450 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
451 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
452 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
453 }
454 
455 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
456 {
457 	writel(val, tp->regs + off);
458 	readl(tp->regs + off);
459 }
460 
461 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
462 {
463 	unsigned long flags;
464 	u32 val;
465 
466 	spin_lock_irqsave(&tp->indirect_lock, flags);
467 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
468 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
469 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
470 	return val;
471 }
472 
473 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
474 {
475 	unsigned long flags;
476 
477 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
478 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
479 				       TG3_64BIT_REG_LOW, val);
480 		return;
481 	}
482 	if (off == TG3_RX_STD_PROD_IDX_REG) {
483 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
484 				       TG3_64BIT_REG_LOW, val);
485 		return;
486 	}
487 
488 	spin_lock_irqsave(&tp->indirect_lock, flags);
489 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
490 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
491 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
492 
493 	/* In indirect mode when disabling interrupts, we also need
494 	 * to clear the interrupt bit in the GRC local ctrl register.
495 	 */
496 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
497 	    (val == 0x1)) {
498 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
499 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
500 	}
501 }
502 
503 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
504 {
505 	unsigned long flags;
506 	u32 val;
507 
508 	spin_lock_irqsave(&tp->indirect_lock, flags);
509 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
510 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
511 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
512 	return val;
513 }
514 
515 /* usec_wait specifies the wait time in usec when writing to certain registers
516  * where it is unsafe to read back the register without some delay.
517  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
518  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
519  */
520 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
521 {
522 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
523 		/* Non-posted methods */
524 		tp->write32(tp, off, val);
525 	else {
526 		/* Posted method */
527 		tg3_write32(tp, off, val);
528 		if (usec_wait)
529 			udelay(usec_wait);
530 		tp->read32(tp, off);
531 	}
532 	/* Wait again after the read for the posted method to guarantee that
533 	 * the wait time is met.
534 	 */
535 	if (usec_wait)
536 		udelay(usec_wait);
537 }
538 
539 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
540 {
541 	tp->write32_mbox(tp, off, val);
542 	if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
543 		tp->read32_mbox(tp, off);
544 }
545 
546 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
547 {
548 	void __iomem *mbox = tp->regs + off;
549 	writel(val, mbox);
550 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
551 		writel(val, mbox);
552 	if (tg3_flag(tp, MBOX_WRITE_REORDER))
553 		readl(mbox);
554 }
555 
556 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
557 {
558 	return readl(tp->regs + off + GRCMBOX_BASE);
559 }
560 
561 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
562 {
563 	writel(val, tp->regs + off + GRCMBOX_BASE);
564 }
565 
566 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
567 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
568 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
569 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
570 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
571 
572 #define tw32(reg, val)			tp->write32(tp, reg, val)
573 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
574 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
575 #define tr32(reg)			tp->read32(tp, reg)
576 
577 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
578 {
579 	unsigned long flags;
580 
581 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
582 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
583 		return;
584 
585 	spin_lock_irqsave(&tp->indirect_lock, flags);
586 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
587 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
588 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
589 
590 		/* Always leave this as zero. */
591 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
592 	} else {
593 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
594 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
595 
596 		/* Always leave this as zero. */
597 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
598 	}
599 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
600 }
601 
602 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
603 {
604 	unsigned long flags;
605 
606 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
607 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
608 		*val = 0;
609 		return;
610 	}
611 
612 	spin_lock_irqsave(&tp->indirect_lock, flags);
613 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
614 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
615 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
616 
617 		/* Always leave this as zero. */
618 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
619 	} else {
620 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
621 		*val = tr32(TG3PCI_MEM_WIN_DATA);
622 
623 		/* Always leave this as zero. */
624 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
625 	}
626 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
627 }
628 
629 static void tg3_ape_lock_init(struct tg3 *tp)
630 {
631 	int i;
632 	u32 regbase, bit;
633 
634 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
635 		regbase = TG3_APE_LOCK_GRANT;
636 	else
637 		regbase = TG3_APE_PER_LOCK_GRANT;
638 
639 	/* Make sure the driver hasn't any stale locks. */
640 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
641 		switch (i) {
642 		case TG3_APE_LOCK_PHY0:
643 		case TG3_APE_LOCK_PHY1:
644 		case TG3_APE_LOCK_PHY2:
645 		case TG3_APE_LOCK_PHY3:
646 			bit = APE_LOCK_GRANT_DRIVER;
647 			break;
648 		default:
649 			if (!tp->pci_fn)
650 				bit = APE_LOCK_GRANT_DRIVER;
651 			else
652 				bit = 1 << tp->pci_fn;
653 		}
654 		tg3_ape_write32(tp, regbase + 4 * i, bit);
655 	}
656 
657 }
658 
659 static int tg3_ape_lock(struct tg3 *tp, int locknum)
660 {
661 	int i, off;
662 	int ret = 0;
663 	u32 status, req, gnt, bit;
664 
665 	if (!tg3_flag(tp, ENABLE_APE))
666 		return 0;
667 
668 	switch (locknum) {
669 	case TG3_APE_LOCK_GPIO:
670 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
671 			return 0;
672 	case TG3_APE_LOCK_GRC:
673 	case TG3_APE_LOCK_MEM:
674 		if (!tp->pci_fn)
675 			bit = APE_LOCK_REQ_DRIVER;
676 		else
677 			bit = 1 << tp->pci_fn;
678 		break;
679 	default:
680 		return -EINVAL;
681 	}
682 
683 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
684 		req = TG3_APE_LOCK_REQ;
685 		gnt = TG3_APE_LOCK_GRANT;
686 	} else {
687 		req = TG3_APE_PER_LOCK_REQ;
688 		gnt = TG3_APE_PER_LOCK_GRANT;
689 	}
690 
691 	off = 4 * locknum;
692 
693 	tg3_ape_write32(tp, req + off, bit);
694 
695 	/* Wait for up to 1 millisecond to acquire lock. */
696 	for (i = 0; i < 100; i++) {
697 		status = tg3_ape_read32(tp, gnt + off);
698 		if (status == bit)
699 			break;
700 		udelay(10);
701 	}
702 
703 	if (status != bit) {
704 		/* Revoke the lock request. */
705 		tg3_ape_write32(tp, gnt + off, bit);
706 		ret = -EBUSY;
707 	}
708 
709 	return ret;
710 }
711 
712 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
713 {
714 	u32 gnt, bit;
715 
716 	if (!tg3_flag(tp, ENABLE_APE))
717 		return;
718 
719 	switch (locknum) {
720 	case TG3_APE_LOCK_GPIO:
721 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
722 			return;
723 	case TG3_APE_LOCK_GRC:
724 	case TG3_APE_LOCK_MEM:
725 		if (!tp->pci_fn)
726 			bit = APE_LOCK_GRANT_DRIVER;
727 		else
728 			bit = 1 << tp->pci_fn;
729 		break;
730 	default:
731 		return;
732 	}
733 
734 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
735 		gnt = TG3_APE_LOCK_GRANT;
736 	else
737 		gnt = TG3_APE_PER_LOCK_GRANT;
738 
739 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
740 }
741 
742 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
743 {
744 	int i;
745 	u32 apedata;
746 
747 	/* NCSI does not support APE events */
748 	if (tg3_flag(tp, APE_HAS_NCSI))
749 		return;
750 
751 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
752 	if (apedata != APE_SEG_SIG_MAGIC)
753 		return;
754 
755 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
756 	if (!(apedata & APE_FW_STATUS_READY))
757 		return;
758 
759 	/* Wait for up to 1 millisecond for APE to service previous event. */
760 	for (i = 0; i < 10; i++) {
761 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
762 			return;
763 
764 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
765 
766 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
767 			tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
768 					event | APE_EVENT_STATUS_EVENT_PENDING);
769 
770 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
771 
772 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
773 			break;
774 
775 		udelay(100);
776 	}
777 
778 	if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
779 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
780 }
781 
782 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
783 {
784 	u32 event;
785 	u32 apedata;
786 
787 	if (!tg3_flag(tp, ENABLE_APE))
788 		return;
789 
790 	switch (kind) {
791 	case RESET_KIND_INIT:
792 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
793 				APE_HOST_SEG_SIG_MAGIC);
794 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
795 				APE_HOST_SEG_LEN_MAGIC);
796 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
797 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
798 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
799 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
800 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
801 				APE_HOST_BEHAV_NO_PHYLOCK);
802 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
803 				    TG3_APE_HOST_DRVR_STATE_START);
804 
805 		event = APE_EVENT_STATUS_STATE_START;
806 		break;
807 	case RESET_KIND_SHUTDOWN:
808 		/* With the interface we are currently using,
809 		 * APE does not track driver state.  Wiping
810 		 * out the HOST SEGMENT SIGNATURE forces
811 		 * the APE to assume OS absent status.
812 		 */
813 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
814 
815 		if (device_may_wakeup(&tp->pdev->dev) &&
816 		    tg3_flag(tp, WOL_ENABLE)) {
817 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
818 					    TG3_APE_HOST_WOL_SPEED_AUTO);
819 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
820 		} else
821 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
822 
823 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
824 
825 		event = APE_EVENT_STATUS_STATE_UNLOAD;
826 		break;
827 	case RESET_KIND_SUSPEND:
828 		event = APE_EVENT_STATUS_STATE_SUSPEND;
829 		break;
830 	default:
831 		return;
832 	}
833 
834 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
835 
836 	tg3_ape_send_event(tp, event);
837 }
838 
839 static void tg3_disable_ints(struct tg3 *tp)
840 {
841 	int i;
842 
843 	tw32(TG3PCI_MISC_HOST_CTRL,
844 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
845 	for (i = 0; i < tp->irq_max; i++)
846 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
847 }
848 
849 static void tg3_enable_ints(struct tg3 *tp)
850 {
851 	int i;
852 
853 	tp->irq_sync = 0;
854 	wmb();
855 
856 	tw32(TG3PCI_MISC_HOST_CTRL,
857 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
858 
859 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
860 	for (i = 0; i < tp->irq_cnt; i++) {
861 		struct tg3_napi *tnapi = &tp->napi[i];
862 
863 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
864 		if (tg3_flag(tp, 1SHOT_MSI))
865 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
866 
867 		tp->coal_now |= tnapi->coal_now;
868 	}
869 
870 	/* Force an initial interrupt */
871 	if (!tg3_flag(tp, TAGGED_STATUS) &&
872 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
873 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
874 	else
875 		tw32(HOSTCC_MODE, tp->coal_now);
876 
877 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
878 }
879 
880 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
881 {
882 	struct tg3 *tp = tnapi->tp;
883 	struct tg3_hw_status *sblk = tnapi->hw_status;
884 	unsigned int work_exists = 0;
885 
886 	/* check for phy events */
887 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
888 		if (sblk->status & SD_STATUS_LINK_CHG)
889 			work_exists = 1;
890 	}
891 
892 	/* check for TX work to do */
893 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
894 		work_exists = 1;
895 
896 	/* check for RX work to do */
897 	if (tnapi->rx_rcb_prod_idx &&
898 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
899 		work_exists = 1;
900 
901 	return work_exists;
902 }
903 
904 /* tg3_int_reenable
905  *  similar to tg3_enable_ints, but it accurately determines whether there
906  *  is new work pending and can return without flushing the PIO write
907  *  which reenables interrupts
908  */
909 static void tg3_int_reenable(struct tg3_napi *tnapi)
910 {
911 	struct tg3 *tp = tnapi->tp;
912 
913 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
914 	mmiowb();
915 
916 	/* When doing tagged status, this work check is unnecessary.
917 	 * The last_tag we write above tells the chip which piece of
918 	 * work we've completed.
919 	 */
920 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
921 		tw32(HOSTCC_MODE, tp->coalesce_mode |
922 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
923 }
924 
925 static void tg3_switch_clocks(struct tg3 *tp)
926 {
927 	u32 clock_ctrl;
928 	u32 orig_clock_ctrl;
929 
930 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
931 		return;
932 
933 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
934 
935 	orig_clock_ctrl = clock_ctrl;
936 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
937 		       CLOCK_CTRL_CLKRUN_OENABLE |
938 		       0x1f);
939 	tp->pci_clock_ctrl = clock_ctrl;
940 
941 	if (tg3_flag(tp, 5705_PLUS)) {
942 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
943 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
944 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
945 		}
946 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
947 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
948 			    clock_ctrl |
949 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
950 			    40);
951 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
952 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
953 			    40);
954 	}
955 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
956 }
957 
958 #define PHY_BUSY_LOOPS	5000
959 
960 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
961 {
962 	u32 frame_val;
963 	unsigned int loops;
964 	int ret;
965 
966 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
967 		tw32_f(MAC_MI_MODE,
968 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
969 		udelay(80);
970 	}
971 
972 	*val = 0x0;
973 
974 	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
975 		      MI_COM_PHY_ADDR_MASK);
976 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
977 		      MI_COM_REG_ADDR_MASK);
978 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
979 
980 	tw32_f(MAC_MI_COM, frame_val);
981 
982 	loops = PHY_BUSY_LOOPS;
983 	while (loops != 0) {
984 		udelay(10);
985 		frame_val = tr32(MAC_MI_COM);
986 
987 		if ((frame_val & MI_COM_BUSY) == 0) {
988 			udelay(5);
989 			frame_val = tr32(MAC_MI_COM);
990 			break;
991 		}
992 		loops -= 1;
993 	}
994 
995 	ret = -EBUSY;
996 	if (loops != 0) {
997 		*val = frame_val & MI_COM_DATA_MASK;
998 		ret = 0;
999 	}
1000 
1001 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1002 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1003 		udelay(80);
1004 	}
1005 
1006 	return ret;
1007 }
1008 
1009 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1010 {
1011 	u32 frame_val;
1012 	unsigned int loops;
1013 	int ret;
1014 
1015 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1016 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1017 		return 0;
1018 
1019 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1020 		tw32_f(MAC_MI_MODE,
1021 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1022 		udelay(80);
1023 	}
1024 
1025 	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1026 		      MI_COM_PHY_ADDR_MASK);
1027 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1028 		      MI_COM_REG_ADDR_MASK);
1029 	frame_val |= (val & MI_COM_DATA_MASK);
1030 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1031 
1032 	tw32_f(MAC_MI_COM, frame_val);
1033 
1034 	loops = PHY_BUSY_LOOPS;
1035 	while (loops != 0) {
1036 		udelay(10);
1037 		frame_val = tr32(MAC_MI_COM);
1038 		if ((frame_val & MI_COM_BUSY) == 0) {
1039 			udelay(5);
1040 			frame_val = tr32(MAC_MI_COM);
1041 			break;
1042 		}
1043 		loops -= 1;
1044 	}
1045 
1046 	ret = -EBUSY;
1047 	if (loops != 0)
1048 		ret = 0;
1049 
1050 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1051 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1052 		udelay(80);
1053 	}
1054 
1055 	return ret;
1056 }
1057 
1058 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1059 {
1060 	int err;
1061 
1062 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1063 	if (err)
1064 		goto done;
1065 
1066 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1067 	if (err)
1068 		goto done;
1069 
1070 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1071 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1072 	if (err)
1073 		goto done;
1074 
1075 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1076 
1077 done:
1078 	return err;
1079 }
1080 
1081 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1082 {
1083 	int err;
1084 
1085 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1086 	if (err)
1087 		goto done;
1088 
1089 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1090 	if (err)
1091 		goto done;
1092 
1093 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1094 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1095 	if (err)
1096 		goto done;
1097 
1098 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1099 
1100 done:
1101 	return err;
1102 }
1103 
1104 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1105 {
1106 	int err;
1107 
1108 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1109 	if (!err)
1110 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1111 
1112 	return err;
1113 }
1114 
1115 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1116 {
1117 	int err;
1118 
1119 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1120 	if (!err)
1121 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1122 
1123 	return err;
1124 }
1125 
1126 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1127 {
1128 	int err;
1129 
1130 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1131 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1132 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1133 	if (!err)
1134 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1135 
1136 	return err;
1137 }
1138 
1139 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1140 {
1141 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1142 		set |= MII_TG3_AUXCTL_MISC_WREN;
1143 
1144 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1145 }
1146 
1147 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1148 	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1149 			     MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1150 			     MII_TG3_AUXCTL_ACTL_TX_6DB)
1151 
1152 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1153 	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1154 			     MII_TG3_AUXCTL_ACTL_TX_6DB);
1155 
1156 static int tg3_bmcr_reset(struct tg3 *tp)
1157 {
1158 	u32 phy_control;
1159 	int limit, err;
1160 
1161 	/* OK, reset it, and poll the BMCR_RESET bit until it
1162 	 * clears or we time out.
1163 	 */
1164 	phy_control = BMCR_RESET;
1165 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1166 	if (err != 0)
1167 		return -EBUSY;
1168 
1169 	limit = 5000;
1170 	while (limit--) {
1171 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1172 		if (err != 0)
1173 			return -EBUSY;
1174 
1175 		if ((phy_control & BMCR_RESET) == 0) {
1176 			udelay(40);
1177 			break;
1178 		}
1179 		udelay(10);
1180 	}
1181 	if (limit < 0)
1182 		return -EBUSY;
1183 
1184 	return 0;
1185 }
1186 
1187 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1188 {
1189 	struct tg3 *tp = bp->priv;
1190 	u32 val;
1191 
1192 	spin_lock_bh(&tp->lock);
1193 
1194 	if (tg3_readphy(tp, reg, &val))
1195 		val = -EIO;
1196 
1197 	spin_unlock_bh(&tp->lock);
1198 
1199 	return val;
1200 }
1201 
1202 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1203 {
1204 	struct tg3 *tp = bp->priv;
1205 	u32 ret = 0;
1206 
1207 	spin_lock_bh(&tp->lock);
1208 
1209 	if (tg3_writephy(tp, reg, val))
1210 		ret = -EIO;
1211 
1212 	spin_unlock_bh(&tp->lock);
1213 
1214 	return ret;
1215 }
1216 
1217 static int tg3_mdio_reset(struct mii_bus *bp)
1218 {
1219 	return 0;
1220 }
1221 
1222 static void tg3_mdio_config_5785(struct tg3 *tp)
1223 {
1224 	u32 val;
1225 	struct phy_device *phydev;
1226 
1227 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1228 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1229 	case PHY_ID_BCM50610:
1230 	case PHY_ID_BCM50610M:
1231 		val = MAC_PHYCFG2_50610_LED_MODES;
1232 		break;
1233 	case PHY_ID_BCMAC131:
1234 		val = MAC_PHYCFG2_AC131_LED_MODES;
1235 		break;
1236 	case PHY_ID_RTL8211C:
1237 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1238 		break;
1239 	case PHY_ID_RTL8201E:
1240 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1241 		break;
1242 	default:
1243 		return;
1244 	}
1245 
1246 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1247 		tw32(MAC_PHYCFG2, val);
1248 
1249 		val = tr32(MAC_PHYCFG1);
1250 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1251 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1252 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1253 		tw32(MAC_PHYCFG1, val);
1254 
1255 		return;
1256 	}
1257 
1258 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1259 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1260 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1261 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1262 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1263 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1264 		       MAC_PHYCFG2_INBAND_ENABLE;
1265 
1266 	tw32(MAC_PHYCFG2, val);
1267 
1268 	val = tr32(MAC_PHYCFG1);
1269 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1270 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1271 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1272 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1273 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1274 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1275 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1276 	}
1277 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1278 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1279 	tw32(MAC_PHYCFG1, val);
1280 
1281 	val = tr32(MAC_EXT_RGMII_MODE);
1282 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1283 		 MAC_RGMII_MODE_RX_QUALITY |
1284 		 MAC_RGMII_MODE_RX_ACTIVITY |
1285 		 MAC_RGMII_MODE_RX_ENG_DET |
1286 		 MAC_RGMII_MODE_TX_ENABLE |
1287 		 MAC_RGMII_MODE_TX_LOWPWR |
1288 		 MAC_RGMII_MODE_TX_RESET);
1289 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1290 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1291 			val |= MAC_RGMII_MODE_RX_INT_B |
1292 			       MAC_RGMII_MODE_RX_QUALITY |
1293 			       MAC_RGMII_MODE_RX_ACTIVITY |
1294 			       MAC_RGMII_MODE_RX_ENG_DET;
1295 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1296 			val |= MAC_RGMII_MODE_TX_ENABLE |
1297 			       MAC_RGMII_MODE_TX_LOWPWR |
1298 			       MAC_RGMII_MODE_TX_RESET;
1299 	}
1300 	tw32(MAC_EXT_RGMII_MODE, val);
1301 }
1302 
1303 static void tg3_mdio_start(struct tg3 *tp)
1304 {
1305 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1306 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1307 	udelay(80);
1308 
1309 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1310 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1311 		tg3_mdio_config_5785(tp);
1312 }
1313 
1314 static int tg3_mdio_init(struct tg3 *tp)
1315 {
1316 	int i;
1317 	u32 reg;
1318 	struct phy_device *phydev;
1319 
1320 	if (tg3_flag(tp, 5717_PLUS)) {
1321 		u32 is_serdes;
1322 
1323 		tp->phy_addr = tp->pci_fn + 1;
1324 
1325 		if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1326 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1327 		else
1328 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1329 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1330 		if (is_serdes)
1331 			tp->phy_addr += 7;
1332 	} else
1333 		tp->phy_addr = TG3_PHY_MII_ADDR;
1334 
1335 	tg3_mdio_start(tp);
1336 
1337 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1338 		return 0;
1339 
1340 	tp->mdio_bus = mdiobus_alloc();
1341 	if (tp->mdio_bus == NULL)
1342 		return -ENOMEM;
1343 
1344 	tp->mdio_bus->name     = "tg3 mdio bus";
1345 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1346 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1347 	tp->mdio_bus->priv     = tp;
1348 	tp->mdio_bus->parent   = &tp->pdev->dev;
1349 	tp->mdio_bus->read     = &tg3_mdio_read;
1350 	tp->mdio_bus->write    = &tg3_mdio_write;
1351 	tp->mdio_bus->reset    = &tg3_mdio_reset;
1352 	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1353 	tp->mdio_bus->irq      = &tp->mdio_irq[0];
1354 
1355 	for (i = 0; i < PHY_MAX_ADDR; i++)
1356 		tp->mdio_bus->irq[i] = PHY_POLL;
1357 
1358 	/* The bus registration will look for all the PHYs on the mdio bus.
1359 	 * Unfortunately, it does not ensure the PHY is powered up before
1360 	 * accessing the PHY ID registers.  A chip reset is the
1361 	 * quickest way to bring the device back to an operational state..
1362 	 */
1363 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1364 		tg3_bmcr_reset(tp);
1365 
1366 	i = mdiobus_register(tp->mdio_bus);
1367 	if (i) {
1368 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1369 		mdiobus_free(tp->mdio_bus);
1370 		return i;
1371 	}
1372 
1373 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1374 
1375 	if (!phydev || !phydev->drv) {
1376 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1377 		mdiobus_unregister(tp->mdio_bus);
1378 		mdiobus_free(tp->mdio_bus);
1379 		return -ENODEV;
1380 	}
1381 
1382 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1383 	case PHY_ID_BCM57780:
1384 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1385 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1386 		break;
1387 	case PHY_ID_BCM50610:
1388 	case PHY_ID_BCM50610M:
1389 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1390 				     PHY_BRCM_RX_REFCLK_UNUSED |
1391 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1392 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1394 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1395 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1396 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1397 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1398 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1399 		/* fallthru */
1400 	case PHY_ID_RTL8211C:
1401 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1402 		break;
1403 	case PHY_ID_RTL8201E:
1404 	case PHY_ID_BCMAC131:
1405 		phydev->interface = PHY_INTERFACE_MODE_MII;
1406 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1407 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1408 		break;
1409 	}
1410 
1411 	tg3_flag_set(tp, MDIOBUS_INITED);
1412 
1413 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1414 		tg3_mdio_config_5785(tp);
1415 
1416 	return 0;
1417 }
1418 
1419 static void tg3_mdio_fini(struct tg3 *tp)
1420 {
1421 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1422 		tg3_flag_clear(tp, MDIOBUS_INITED);
1423 		mdiobus_unregister(tp->mdio_bus);
1424 		mdiobus_free(tp->mdio_bus);
1425 	}
1426 }
1427 
1428 /* tp->lock is held. */
1429 static inline void tg3_generate_fw_event(struct tg3 *tp)
1430 {
1431 	u32 val;
1432 
1433 	val = tr32(GRC_RX_CPU_EVENT);
1434 	val |= GRC_RX_CPU_DRIVER_EVENT;
1435 	tw32_f(GRC_RX_CPU_EVENT, val);
1436 
1437 	tp->last_event_jiffies = jiffies;
1438 }
1439 
1440 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1441 
1442 /* tp->lock is held. */
1443 static void tg3_wait_for_event_ack(struct tg3 *tp)
1444 {
1445 	int i;
1446 	unsigned int delay_cnt;
1447 	long time_remain;
1448 
1449 	/* If enough time has passed, no wait is necessary. */
1450 	time_remain = (long)(tp->last_event_jiffies + 1 +
1451 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1452 		      (long)jiffies;
1453 	if (time_remain < 0)
1454 		return;
1455 
1456 	/* Check if we can shorten the wait time. */
1457 	delay_cnt = jiffies_to_usecs(time_remain);
1458 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1459 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1460 	delay_cnt = (delay_cnt >> 3) + 1;
1461 
1462 	for (i = 0; i < delay_cnt; i++) {
1463 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1464 			break;
1465 		udelay(8);
1466 	}
1467 }
1468 
1469 /* tp->lock is held. */
1470 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1471 {
1472 	u32 reg, val;
1473 
1474 	val = 0;
1475 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1476 		val = reg << 16;
1477 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1478 		val |= (reg & 0xffff);
1479 	*data++ = val;
1480 
1481 	val = 0;
1482 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1483 		val = reg << 16;
1484 	if (!tg3_readphy(tp, MII_LPA, &reg))
1485 		val |= (reg & 0xffff);
1486 	*data++ = val;
1487 
1488 	val = 0;
1489 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1490 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1491 			val = reg << 16;
1492 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1493 			val |= (reg & 0xffff);
1494 	}
1495 	*data++ = val;
1496 
1497 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1498 		val = reg << 16;
1499 	else
1500 		val = 0;
1501 	*data++ = val;
1502 }
1503 
1504 /* tp->lock is held. */
1505 static void tg3_ump_link_report(struct tg3 *tp)
1506 {
1507 	u32 data[4];
1508 
1509 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1510 		return;
1511 
1512 	tg3_phy_gather_ump_data(tp, data);
1513 
1514 	tg3_wait_for_event_ack(tp);
1515 
1516 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1517 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1518 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1519 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1520 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1521 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1522 
1523 	tg3_generate_fw_event(tp);
1524 }
1525 
1526 /* tp->lock is held. */
1527 static void tg3_stop_fw(struct tg3 *tp)
1528 {
1529 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1530 		/* Wait for RX cpu to ACK the previous event. */
1531 		tg3_wait_for_event_ack(tp);
1532 
1533 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1534 
1535 		tg3_generate_fw_event(tp);
1536 
1537 		/* Wait for RX cpu to ACK this event. */
1538 		tg3_wait_for_event_ack(tp);
1539 	}
1540 }
1541 
1542 /* tp->lock is held. */
1543 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1544 {
1545 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1546 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1547 
1548 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1549 		switch (kind) {
1550 		case RESET_KIND_INIT:
1551 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1552 				      DRV_STATE_START);
1553 			break;
1554 
1555 		case RESET_KIND_SHUTDOWN:
1556 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1557 				      DRV_STATE_UNLOAD);
1558 			break;
1559 
1560 		case RESET_KIND_SUSPEND:
1561 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1562 				      DRV_STATE_SUSPEND);
1563 			break;
1564 
1565 		default:
1566 			break;
1567 		}
1568 	}
1569 
1570 	if (kind == RESET_KIND_INIT ||
1571 	    kind == RESET_KIND_SUSPEND)
1572 		tg3_ape_driver_state_change(tp, kind);
1573 }
1574 
1575 /* tp->lock is held. */
1576 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1577 {
1578 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1579 		switch (kind) {
1580 		case RESET_KIND_INIT:
1581 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1582 				      DRV_STATE_START_DONE);
1583 			break;
1584 
1585 		case RESET_KIND_SHUTDOWN:
1586 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1587 				      DRV_STATE_UNLOAD_DONE);
1588 			break;
1589 
1590 		default:
1591 			break;
1592 		}
1593 	}
1594 
1595 	if (kind == RESET_KIND_SHUTDOWN)
1596 		tg3_ape_driver_state_change(tp, kind);
1597 }
1598 
1599 /* tp->lock is held. */
1600 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1601 {
1602 	if (tg3_flag(tp, ENABLE_ASF)) {
1603 		switch (kind) {
1604 		case RESET_KIND_INIT:
1605 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1606 				      DRV_STATE_START);
1607 			break;
1608 
1609 		case RESET_KIND_SHUTDOWN:
1610 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1611 				      DRV_STATE_UNLOAD);
1612 			break;
1613 
1614 		case RESET_KIND_SUSPEND:
1615 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1616 				      DRV_STATE_SUSPEND);
1617 			break;
1618 
1619 		default:
1620 			break;
1621 		}
1622 	}
1623 }
1624 
1625 static int tg3_poll_fw(struct tg3 *tp)
1626 {
1627 	int i;
1628 	u32 val;
1629 
1630 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1631 		/* Wait up to 20ms for init done. */
1632 		for (i = 0; i < 200; i++) {
1633 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1634 				return 0;
1635 			udelay(100);
1636 		}
1637 		return -ENODEV;
1638 	}
1639 
1640 	/* Wait for firmware initialization to complete. */
1641 	for (i = 0; i < 100000; i++) {
1642 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1643 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1644 			break;
1645 		udelay(10);
1646 	}
1647 
1648 	/* Chip might not be fitted with firmware.  Some Sun onboard
1649 	 * parts are configured like that.  So don't signal the timeout
1650 	 * of the above loop as an error, but do report the lack of
1651 	 * running firmware once.
1652 	 */
1653 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1654 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1655 
1656 		netdev_info(tp->dev, "No firmware running\n");
1657 	}
1658 
1659 	if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1660 		/* The 57765 A0 needs a little more
1661 		 * time to do some important work.
1662 		 */
1663 		mdelay(10);
1664 	}
1665 
1666 	return 0;
1667 }
1668 
1669 static void tg3_link_report(struct tg3 *tp)
1670 {
1671 	if (!netif_carrier_ok(tp->dev)) {
1672 		netif_info(tp, link, tp->dev, "Link is down\n");
1673 		tg3_ump_link_report(tp);
1674 	} else if (netif_msg_link(tp)) {
1675 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1676 			    (tp->link_config.active_speed == SPEED_1000 ?
1677 			     1000 :
1678 			     (tp->link_config.active_speed == SPEED_100 ?
1679 			      100 : 10)),
1680 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1681 			     "full" : "half"));
1682 
1683 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1684 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1685 			    "on" : "off",
1686 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1687 			    "on" : "off");
1688 
1689 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1690 			netdev_info(tp->dev, "EEE is %s\n",
1691 				    tp->setlpicnt ? "enabled" : "disabled");
1692 
1693 		tg3_ump_link_report(tp);
1694 	}
1695 }
1696 
1697 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1698 {
1699 	u16 miireg;
1700 
1701 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1702 		miireg = ADVERTISE_1000XPAUSE;
1703 	else if (flow_ctrl & FLOW_CTRL_TX)
1704 		miireg = ADVERTISE_1000XPSE_ASYM;
1705 	else if (flow_ctrl & FLOW_CTRL_RX)
1706 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1707 	else
1708 		miireg = 0;
1709 
1710 	return miireg;
1711 }
1712 
1713 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1714 {
1715 	u8 cap = 0;
1716 
1717 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1718 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1719 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1720 		if (lcladv & ADVERTISE_1000XPAUSE)
1721 			cap = FLOW_CTRL_RX;
1722 		if (rmtadv & ADVERTISE_1000XPAUSE)
1723 			cap = FLOW_CTRL_TX;
1724 	}
1725 
1726 	return cap;
1727 }
1728 
1729 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1730 {
1731 	u8 autoneg;
1732 	u8 flowctrl = 0;
1733 	u32 old_rx_mode = tp->rx_mode;
1734 	u32 old_tx_mode = tp->tx_mode;
1735 
1736 	if (tg3_flag(tp, USE_PHYLIB))
1737 		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1738 	else
1739 		autoneg = tp->link_config.autoneg;
1740 
1741 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1742 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1743 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1744 		else
1745 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1746 	} else
1747 		flowctrl = tp->link_config.flowctrl;
1748 
1749 	tp->link_config.active_flowctrl = flowctrl;
1750 
1751 	if (flowctrl & FLOW_CTRL_RX)
1752 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1753 	else
1754 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1755 
1756 	if (old_rx_mode != tp->rx_mode)
1757 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1758 
1759 	if (flowctrl & FLOW_CTRL_TX)
1760 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1761 	else
1762 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1763 
1764 	if (old_tx_mode != tp->tx_mode)
1765 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1766 }
1767 
1768 static void tg3_adjust_link(struct net_device *dev)
1769 {
1770 	u8 oldflowctrl, linkmesg = 0;
1771 	u32 mac_mode, lcl_adv, rmt_adv;
1772 	struct tg3 *tp = netdev_priv(dev);
1773 	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1774 
1775 	spin_lock_bh(&tp->lock);
1776 
1777 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1778 				    MAC_MODE_HALF_DUPLEX);
1779 
1780 	oldflowctrl = tp->link_config.active_flowctrl;
1781 
1782 	if (phydev->link) {
1783 		lcl_adv = 0;
1784 		rmt_adv = 0;
1785 
1786 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1787 			mac_mode |= MAC_MODE_PORT_MODE_MII;
1788 		else if (phydev->speed == SPEED_1000 ||
1789 			 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1790 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
1791 		else
1792 			mac_mode |= MAC_MODE_PORT_MODE_MII;
1793 
1794 		if (phydev->duplex == DUPLEX_HALF)
1795 			mac_mode |= MAC_MODE_HALF_DUPLEX;
1796 		else {
1797 			lcl_adv = mii_advertise_flowctrl(
1798 				  tp->link_config.flowctrl);
1799 
1800 			if (phydev->pause)
1801 				rmt_adv = LPA_PAUSE_CAP;
1802 			if (phydev->asym_pause)
1803 				rmt_adv |= LPA_PAUSE_ASYM;
1804 		}
1805 
1806 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1807 	} else
1808 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
1809 
1810 	if (mac_mode != tp->mac_mode) {
1811 		tp->mac_mode = mac_mode;
1812 		tw32_f(MAC_MODE, tp->mac_mode);
1813 		udelay(40);
1814 	}
1815 
1816 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1817 		if (phydev->speed == SPEED_10)
1818 			tw32(MAC_MI_STAT,
1819 			     MAC_MI_STAT_10MBPS_MODE |
1820 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1821 		else
1822 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1823 	}
1824 
1825 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1826 		tw32(MAC_TX_LENGTHS,
1827 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1828 		      (6 << TX_LENGTHS_IPG_SHIFT) |
1829 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1830 	else
1831 		tw32(MAC_TX_LENGTHS,
1832 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1833 		      (6 << TX_LENGTHS_IPG_SHIFT) |
1834 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1835 
1836 	if (phydev->link != tp->old_link ||
1837 	    phydev->speed != tp->link_config.active_speed ||
1838 	    phydev->duplex != tp->link_config.active_duplex ||
1839 	    oldflowctrl != tp->link_config.active_flowctrl)
1840 		linkmesg = 1;
1841 
1842 	tp->old_link = phydev->link;
1843 	tp->link_config.active_speed = phydev->speed;
1844 	tp->link_config.active_duplex = phydev->duplex;
1845 
1846 	spin_unlock_bh(&tp->lock);
1847 
1848 	if (linkmesg)
1849 		tg3_link_report(tp);
1850 }
1851 
1852 static int tg3_phy_init(struct tg3 *tp)
1853 {
1854 	struct phy_device *phydev;
1855 
1856 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1857 		return 0;
1858 
1859 	/* Bring the PHY back to a known state. */
1860 	tg3_bmcr_reset(tp);
1861 
1862 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1863 
1864 	/* Attach the MAC to the PHY. */
1865 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1866 			     phydev->dev_flags, phydev->interface);
1867 	if (IS_ERR(phydev)) {
1868 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1869 		return PTR_ERR(phydev);
1870 	}
1871 
1872 	/* Mask with MAC supported features. */
1873 	switch (phydev->interface) {
1874 	case PHY_INTERFACE_MODE_GMII:
1875 	case PHY_INTERFACE_MODE_RGMII:
1876 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1877 			phydev->supported &= (PHY_GBIT_FEATURES |
1878 					      SUPPORTED_Pause |
1879 					      SUPPORTED_Asym_Pause);
1880 			break;
1881 		}
1882 		/* fallthru */
1883 	case PHY_INTERFACE_MODE_MII:
1884 		phydev->supported &= (PHY_BASIC_FEATURES |
1885 				      SUPPORTED_Pause |
1886 				      SUPPORTED_Asym_Pause);
1887 		break;
1888 	default:
1889 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1890 		return -EINVAL;
1891 	}
1892 
1893 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1894 
1895 	phydev->advertising = phydev->supported;
1896 
1897 	return 0;
1898 }
1899 
1900 static void tg3_phy_start(struct tg3 *tp)
1901 {
1902 	struct phy_device *phydev;
1903 
1904 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1905 		return;
1906 
1907 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1908 
1909 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1910 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1911 		phydev->speed = tp->link_config.speed;
1912 		phydev->duplex = tp->link_config.duplex;
1913 		phydev->autoneg = tp->link_config.autoneg;
1914 		phydev->advertising = tp->link_config.advertising;
1915 	}
1916 
1917 	phy_start(phydev);
1918 
1919 	phy_start_aneg(phydev);
1920 }
1921 
1922 static void tg3_phy_stop(struct tg3 *tp)
1923 {
1924 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1925 		return;
1926 
1927 	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1928 }
1929 
1930 static void tg3_phy_fini(struct tg3 *tp)
1931 {
1932 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1933 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1934 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1935 	}
1936 }
1937 
1938 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1939 {
1940 	int err;
1941 	u32 val;
1942 
1943 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1944 		return 0;
1945 
1946 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1947 		/* Cannot do read-modify-write on 5401 */
1948 		err = tg3_phy_auxctl_write(tp,
1949 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1950 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1951 					   0x4c20);
1952 		goto done;
1953 	}
1954 
1955 	err = tg3_phy_auxctl_read(tp,
1956 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1957 	if (err)
1958 		return err;
1959 
1960 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1961 	err = tg3_phy_auxctl_write(tp,
1962 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1963 
1964 done:
1965 	return err;
1966 }
1967 
1968 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1969 {
1970 	u32 phytest;
1971 
1972 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1973 		u32 phy;
1974 
1975 		tg3_writephy(tp, MII_TG3_FET_TEST,
1976 			     phytest | MII_TG3_FET_SHADOW_EN);
1977 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1978 			if (enable)
1979 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1980 			else
1981 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1982 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1983 		}
1984 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1985 	}
1986 }
1987 
1988 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1989 {
1990 	u32 reg;
1991 
1992 	if (!tg3_flag(tp, 5705_PLUS) ||
1993 	    (tg3_flag(tp, 5717_PLUS) &&
1994 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1995 		return;
1996 
1997 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1998 		tg3_phy_fet_toggle_apd(tp, enable);
1999 		return;
2000 	}
2001 
2002 	reg = MII_TG3_MISC_SHDW_WREN |
2003 	      MII_TG3_MISC_SHDW_SCR5_SEL |
2004 	      MII_TG3_MISC_SHDW_SCR5_LPED |
2005 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2006 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2007 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2008 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2009 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2010 
2011 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2012 
2013 
2014 	reg = MII_TG3_MISC_SHDW_WREN |
2015 	      MII_TG3_MISC_SHDW_APD_SEL |
2016 	      MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2017 	if (enable)
2018 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2019 
2020 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2021 }
2022 
2023 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2024 {
2025 	u32 phy;
2026 
2027 	if (!tg3_flag(tp, 5705_PLUS) ||
2028 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2029 		return;
2030 
2031 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2032 		u32 ephy;
2033 
2034 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2035 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2036 
2037 			tg3_writephy(tp, MII_TG3_FET_TEST,
2038 				     ephy | MII_TG3_FET_SHADOW_EN);
2039 			if (!tg3_readphy(tp, reg, &phy)) {
2040 				if (enable)
2041 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2042 				else
2043 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2044 				tg3_writephy(tp, reg, phy);
2045 			}
2046 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2047 		}
2048 	} else {
2049 		int ret;
2050 
2051 		ret = tg3_phy_auxctl_read(tp,
2052 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2053 		if (!ret) {
2054 			if (enable)
2055 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2056 			else
2057 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2058 			tg3_phy_auxctl_write(tp,
2059 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2060 		}
2061 	}
2062 }
2063 
2064 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2065 {
2066 	int ret;
2067 	u32 val;
2068 
2069 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2070 		return;
2071 
2072 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2073 	if (!ret)
2074 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2075 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2076 }
2077 
2078 static void tg3_phy_apply_otp(struct tg3 *tp)
2079 {
2080 	u32 otp, phy;
2081 
2082 	if (!tp->phy_otp)
2083 		return;
2084 
2085 	otp = tp->phy_otp;
2086 
2087 	if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2088 		return;
2089 
2090 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2091 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2092 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2093 
2094 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2095 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2096 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2097 
2098 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2099 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2100 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2101 
2102 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2103 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2104 
2105 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2106 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2107 
2108 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2109 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2110 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2111 
2112 	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2113 }
2114 
2115 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2116 {
2117 	u32 val;
2118 
2119 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2120 		return;
2121 
2122 	tp->setlpicnt = 0;
2123 
2124 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2125 	    current_link_up == 1 &&
2126 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2127 	    (tp->link_config.active_speed == SPEED_100 ||
2128 	     tp->link_config.active_speed == SPEED_1000)) {
2129 		u32 eeectl;
2130 
2131 		if (tp->link_config.active_speed == SPEED_1000)
2132 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2133 		else
2134 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2135 
2136 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2137 
2138 		tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2139 				  TG3_CL45_D7_EEERES_STAT, &val);
2140 
2141 		if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2142 		    val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2143 			tp->setlpicnt = 2;
2144 	}
2145 
2146 	if (!tp->setlpicnt) {
2147 		if (current_link_up == 1 &&
2148 		   !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2149 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2150 			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2151 		}
2152 
2153 		val = tr32(TG3_CPMU_EEE_MODE);
2154 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2155 	}
2156 }
2157 
2158 static void tg3_phy_eee_enable(struct tg3 *tp)
2159 {
2160 	u32 val;
2161 
2162 	if (tp->link_config.active_speed == SPEED_1000 &&
2163 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2164 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2165 	     tg3_flag(tp, 57765_CLASS)) &&
2166 	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2167 		val = MII_TG3_DSP_TAP26_ALNOKO |
2168 		      MII_TG3_DSP_TAP26_RMRXSTO;
2169 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2170 		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2171 	}
2172 
2173 	val = tr32(TG3_CPMU_EEE_MODE);
2174 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2175 }
2176 
2177 static int tg3_wait_macro_done(struct tg3 *tp)
2178 {
2179 	int limit = 100;
2180 
2181 	while (limit--) {
2182 		u32 tmp32;
2183 
2184 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2185 			if ((tmp32 & 0x1000) == 0)
2186 				break;
2187 		}
2188 	}
2189 	if (limit < 0)
2190 		return -EBUSY;
2191 
2192 	return 0;
2193 }
2194 
2195 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2196 {
2197 	static const u32 test_pat[4][6] = {
2198 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2199 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2200 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2201 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2202 	};
2203 	int chan;
2204 
2205 	for (chan = 0; chan < 4; chan++) {
2206 		int i;
2207 
2208 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2209 			     (chan * 0x2000) | 0x0200);
2210 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2211 
2212 		for (i = 0; i < 6; i++)
2213 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2214 				     test_pat[chan][i]);
2215 
2216 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2217 		if (tg3_wait_macro_done(tp)) {
2218 			*resetp = 1;
2219 			return -EBUSY;
2220 		}
2221 
2222 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2223 			     (chan * 0x2000) | 0x0200);
2224 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2225 		if (tg3_wait_macro_done(tp)) {
2226 			*resetp = 1;
2227 			return -EBUSY;
2228 		}
2229 
2230 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2231 		if (tg3_wait_macro_done(tp)) {
2232 			*resetp = 1;
2233 			return -EBUSY;
2234 		}
2235 
2236 		for (i = 0; i < 6; i += 2) {
2237 			u32 low, high;
2238 
2239 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2240 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2241 			    tg3_wait_macro_done(tp)) {
2242 				*resetp = 1;
2243 				return -EBUSY;
2244 			}
2245 			low &= 0x7fff;
2246 			high &= 0x000f;
2247 			if (low != test_pat[chan][i] ||
2248 			    high != test_pat[chan][i+1]) {
2249 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2250 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2251 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2252 
2253 				return -EBUSY;
2254 			}
2255 		}
2256 	}
2257 
2258 	return 0;
2259 }
2260 
2261 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2262 {
2263 	int chan;
2264 
2265 	for (chan = 0; chan < 4; chan++) {
2266 		int i;
2267 
2268 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2269 			     (chan * 0x2000) | 0x0200);
2270 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2271 		for (i = 0; i < 6; i++)
2272 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2273 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2274 		if (tg3_wait_macro_done(tp))
2275 			return -EBUSY;
2276 	}
2277 
2278 	return 0;
2279 }
2280 
2281 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2282 {
2283 	u32 reg32, phy9_orig;
2284 	int retries, do_phy_reset, err;
2285 
2286 	retries = 10;
2287 	do_phy_reset = 1;
2288 	do {
2289 		if (do_phy_reset) {
2290 			err = tg3_bmcr_reset(tp);
2291 			if (err)
2292 				return err;
2293 			do_phy_reset = 0;
2294 		}
2295 
2296 		/* Disable transmitter and interrupt.  */
2297 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2298 			continue;
2299 
2300 		reg32 |= 0x3000;
2301 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2302 
2303 		/* Set full-duplex, 1000 mbps.  */
2304 		tg3_writephy(tp, MII_BMCR,
2305 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2306 
2307 		/* Set to master mode.  */
2308 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2309 			continue;
2310 
2311 		tg3_writephy(tp, MII_CTRL1000,
2312 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2313 
2314 		err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2315 		if (err)
2316 			return err;
2317 
2318 		/* Block the PHY control access.  */
2319 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2320 
2321 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2322 		if (!err)
2323 			break;
2324 	} while (--retries);
2325 
2326 	err = tg3_phy_reset_chanpat(tp);
2327 	if (err)
2328 		return err;
2329 
2330 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2331 
2332 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2333 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2334 
2335 	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2336 
2337 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2338 
2339 	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2340 		reg32 &= ~0x3000;
2341 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2342 	} else if (!err)
2343 		err = -EBUSY;
2344 
2345 	return err;
2346 }
2347 
2348 /* This will reset the tigon3 PHY if there is no valid
2349  * link unless the FORCE argument is non-zero.
2350  */
2351 static int tg3_phy_reset(struct tg3 *tp)
2352 {
2353 	u32 val, cpmuctrl;
2354 	int err;
2355 
2356 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2357 		val = tr32(GRC_MISC_CFG);
2358 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2359 		udelay(40);
2360 	}
2361 	err  = tg3_readphy(tp, MII_BMSR, &val);
2362 	err |= tg3_readphy(tp, MII_BMSR, &val);
2363 	if (err != 0)
2364 		return -EBUSY;
2365 
2366 	if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2367 		netif_carrier_off(tp->dev);
2368 		tg3_link_report(tp);
2369 	}
2370 
2371 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2372 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2373 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2374 		err = tg3_phy_reset_5703_4_5(tp);
2375 		if (err)
2376 			return err;
2377 		goto out;
2378 	}
2379 
2380 	cpmuctrl = 0;
2381 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2382 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2383 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2384 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2385 			tw32(TG3_CPMU_CTRL,
2386 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2387 	}
2388 
2389 	err = tg3_bmcr_reset(tp);
2390 	if (err)
2391 		return err;
2392 
2393 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2394 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2395 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2396 
2397 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2398 	}
2399 
2400 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2401 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2402 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2403 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2404 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2405 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2406 			udelay(40);
2407 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2408 		}
2409 	}
2410 
2411 	if (tg3_flag(tp, 5717_PLUS) &&
2412 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2413 		return 0;
2414 
2415 	tg3_phy_apply_otp(tp);
2416 
2417 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2418 		tg3_phy_toggle_apd(tp, true);
2419 	else
2420 		tg3_phy_toggle_apd(tp, false);
2421 
2422 out:
2423 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2424 	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2425 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2426 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2427 		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2428 	}
2429 
2430 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2431 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2432 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2433 	}
2434 
2435 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2436 		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2437 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2438 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2439 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2440 			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2441 		}
2442 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2443 		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2444 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2445 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2446 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2447 				tg3_writephy(tp, MII_TG3_TEST1,
2448 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2449 			} else
2450 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2451 
2452 			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2453 		}
2454 	}
2455 
2456 	/* Set Extended packet length bit (bit 14) on all chips that */
2457 	/* support jumbo frames */
2458 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2459 		/* Cannot do read-modify-write on 5401 */
2460 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2461 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2462 		/* Set bit 14 with read-modify-write to preserve other bits */
2463 		err = tg3_phy_auxctl_read(tp,
2464 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2465 		if (!err)
2466 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2467 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2468 	}
2469 
2470 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2471 	 * jumbo frames transmission.
2472 	 */
2473 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2474 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2475 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2476 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2477 	}
2478 
2479 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2480 		/* adjust output voltage */
2481 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2482 	}
2483 
2484 	tg3_phy_toggle_automdix(tp, 1);
2485 	tg3_phy_set_wirespeed(tp);
2486 	return 0;
2487 }
2488 
2489 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2490 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2491 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2492 					  TG3_GPIO_MSG_NEED_VAUX)
2493 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2494 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2495 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2496 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2497 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2498 
2499 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2500 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2501 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2502 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2503 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2504 
2505 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2506 {
2507 	u32 status, shift;
2508 
2509 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2510 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2511 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2512 	else
2513 		status = tr32(TG3_CPMU_DRV_STATUS);
2514 
2515 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2516 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2517 	status |= (newstat << shift);
2518 
2519 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2520 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2521 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2522 	else
2523 		tw32(TG3_CPMU_DRV_STATUS, status);
2524 
2525 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2526 }
2527 
2528 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2529 {
2530 	if (!tg3_flag(tp, IS_NIC))
2531 		return 0;
2532 
2533 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2534 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2535 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2536 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2537 			return -EIO;
2538 
2539 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2540 
2541 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2542 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2543 
2544 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2545 	} else {
2546 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2547 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2548 	}
2549 
2550 	return 0;
2551 }
2552 
2553 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2554 {
2555 	u32 grc_local_ctrl;
2556 
2557 	if (!tg3_flag(tp, IS_NIC) ||
2558 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2559 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2560 		return;
2561 
2562 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2563 
2564 	tw32_wait_f(GRC_LOCAL_CTRL,
2565 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2566 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2567 
2568 	tw32_wait_f(GRC_LOCAL_CTRL,
2569 		    grc_local_ctrl,
2570 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2571 
2572 	tw32_wait_f(GRC_LOCAL_CTRL,
2573 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2574 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2575 }
2576 
2577 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2578 {
2579 	if (!tg3_flag(tp, IS_NIC))
2580 		return;
2581 
2582 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2583 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2584 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2585 			    (GRC_LCLCTRL_GPIO_OE0 |
2586 			     GRC_LCLCTRL_GPIO_OE1 |
2587 			     GRC_LCLCTRL_GPIO_OE2 |
2588 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2589 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2590 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2591 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2592 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2593 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2594 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2595 				     GRC_LCLCTRL_GPIO_OE1 |
2596 				     GRC_LCLCTRL_GPIO_OE2 |
2597 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2598 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2599 				     tp->grc_local_ctrl;
2600 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2601 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2602 
2603 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2604 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2605 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2606 
2607 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2608 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2609 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2610 	} else {
2611 		u32 no_gpio2;
2612 		u32 grc_local_ctrl = 0;
2613 
2614 		/* Workaround to prevent overdrawing Amps. */
2615 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2616 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2617 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2618 				    grc_local_ctrl,
2619 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2620 		}
2621 
2622 		/* On 5753 and variants, GPIO2 cannot be used. */
2623 		no_gpio2 = tp->nic_sram_data_cfg &
2624 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2625 
2626 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2627 				  GRC_LCLCTRL_GPIO_OE1 |
2628 				  GRC_LCLCTRL_GPIO_OE2 |
2629 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2630 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2631 		if (no_gpio2) {
2632 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2633 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2634 		}
2635 		tw32_wait_f(GRC_LOCAL_CTRL,
2636 			    tp->grc_local_ctrl | grc_local_ctrl,
2637 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2638 
2639 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2640 
2641 		tw32_wait_f(GRC_LOCAL_CTRL,
2642 			    tp->grc_local_ctrl | grc_local_ctrl,
2643 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2644 
2645 		if (!no_gpio2) {
2646 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2647 			tw32_wait_f(GRC_LOCAL_CTRL,
2648 				    tp->grc_local_ctrl | grc_local_ctrl,
2649 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2650 		}
2651 	}
2652 }
2653 
2654 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2655 {
2656 	u32 msg = 0;
2657 
2658 	/* Serialize power state transitions */
2659 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2660 		return;
2661 
2662 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2663 		msg = TG3_GPIO_MSG_NEED_VAUX;
2664 
2665 	msg = tg3_set_function_status(tp, msg);
2666 
2667 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2668 		goto done;
2669 
2670 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2671 		tg3_pwrsrc_switch_to_vaux(tp);
2672 	else
2673 		tg3_pwrsrc_die_with_vmain(tp);
2674 
2675 done:
2676 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2677 }
2678 
2679 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2680 {
2681 	bool need_vaux = false;
2682 
2683 	/* The GPIOs do something completely different on 57765. */
2684 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2685 		return;
2686 
2687 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2688 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2689 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2690 		tg3_frob_aux_power_5717(tp, include_wol ?
2691 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2692 		return;
2693 	}
2694 
2695 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2696 		struct net_device *dev_peer;
2697 
2698 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2699 
2700 		/* remove_one() may have been run on the peer. */
2701 		if (dev_peer) {
2702 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2703 
2704 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2705 				return;
2706 
2707 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2708 			    tg3_flag(tp_peer, ENABLE_ASF))
2709 				need_vaux = true;
2710 		}
2711 	}
2712 
2713 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2714 	    tg3_flag(tp, ENABLE_ASF))
2715 		need_vaux = true;
2716 
2717 	if (need_vaux)
2718 		tg3_pwrsrc_switch_to_vaux(tp);
2719 	else
2720 		tg3_pwrsrc_die_with_vmain(tp);
2721 }
2722 
2723 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2724 {
2725 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2726 		return 1;
2727 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2728 		if (speed != SPEED_10)
2729 			return 1;
2730 	} else if (speed == SPEED_10)
2731 		return 1;
2732 
2733 	return 0;
2734 }
2735 
2736 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2737 {
2738 	u32 val;
2739 
2740 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2741 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2742 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2743 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2744 
2745 			sg_dig_ctrl |=
2746 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2747 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
2748 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2749 		}
2750 		return;
2751 	}
2752 
2753 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2754 		tg3_bmcr_reset(tp);
2755 		val = tr32(GRC_MISC_CFG);
2756 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2757 		udelay(40);
2758 		return;
2759 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2760 		u32 phytest;
2761 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2762 			u32 phy;
2763 
2764 			tg3_writephy(tp, MII_ADVERTISE, 0);
2765 			tg3_writephy(tp, MII_BMCR,
2766 				     BMCR_ANENABLE | BMCR_ANRESTART);
2767 
2768 			tg3_writephy(tp, MII_TG3_FET_TEST,
2769 				     phytest | MII_TG3_FET_SHADOW_EN);
2770 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2771 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2772 				tg3_writephy(tp,
2773 					     MII_TG3_FET_SHDW_AUXMODE4,
2774 					     phy);
2775 			}
2776 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2777 		}
2778 		return;
2779 	} else if (do_low_power) {
2780 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
2781 			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2782 
2783 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2784 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2785 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
2786 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2787 	}
2788 
2789 	/* The PHY should not be powered down on some chips because
2790 	 * of bugs.
2791 	 */
2792 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2793 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2794 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2795 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2796 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2797 	     !tp->pci_fn))
2798 		return;
2799 
2800 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2801 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2802 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2803 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2804 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2805 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2806 	}
2807 
2808 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2809 }
2810 
2811 /* tp->lock is held. */
2812 static int tg3_nvram_lock(struct tg3 *tp)
2813 {
2814 	if (tg3_flag(tp, NVRAM)) {
2815 		int i;
2816 
2817 		if (tp->nvram_lock_cnt == 0) {
2818 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2819 			for (i = 0; i < 8000; i++) {
2820 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2821 					break;
2822 				udelay(20);
2823 			}
2824 			if (i == 8000) {
2825 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2826 				return -ENODEV;
2827 			}
2828 		}
2829 		tp->nvram_lock_cnt++;
2830 	}
2831 	return 0;
2832 }
2833 
2834 /* tp->lock is held. */
2835 static void tg3_nvram_unlock(struct tg3 *tp)
2836 {
2837 	if (tg3_flag(tp, NVRAM)) {
2838 		if (tp->nvram_lock_cnt > 0)
2839 			tp->nvram_lock_cnt--;
2840 		if (tp->nvram_lock_cnt == 0)
2841 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2842 	}
2843 }
2844 
2845 /* tp->lock is held. */
2846 static void tg3_enable_nvram_access(struct tg3 *tp)
2847 {
2848 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2849 		u32 nvaccess = tr32(NVRAM_ACCESS);
2850 
2851 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2852 	}
2853 }
2854 
2855 /* tp->lock is held. */
2856 static void tg3_disable_nvram_access(struct tg3 *tp)
2857 {
2858 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2859 		u32 nvaccess = tr32(NVRAM_ACCESS);
2860 
2861 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2862 	}
2863 }
2864 
2865 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2866 					u32 offset, u32 *val)
2867 {
2868 	u32 tmp;
2869 	int i;
2870 
2871 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2872 		return -EINVAL;
2873 
2874 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2875 					EEPROM_ADDR_DEVID_MASK |
2876 					EEPROM_ADDR_READ);
2877 	tw32(GRC_EEPROM_ADDR,
2878 	     tmp |
2879 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
2880 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2881 	      EEPROM_ADDR_ADDR_MASK) |
2882 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
2883 
2884 	for (i = 0; i < 1000; i++) {
2885 		tmp = tr32(GRC_EEPROM_ADDR);
2886 
2887 		if (tmp & EEPROM_ADDR_COMPLETE)
2888 			break;
2889 		msleep(1);
2890 	}
2891 	if (!(tmp & EEPROM_ADDR_COMPLETE))
2892 		return -EBUSY;
2893 
2894 	tmp = tr32(GRC_EEPROM_DATA);
2895 
2896 	/*
2897 	 * The data will always be opposite the native endian
2898 	 * format.  Perform a blind byteswap to compensate.
2899 	 */
2900 	*val = swab32(tmp);
2901 
2902 	return 0;
2903 }
2904 
2905 #define NVRAM_CMD_TIMEOUT 10000
2906 
2907 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2908 {
2909 	int i;
2910 
2911 	tw32(NVRAM_CMD, nvram_cmd);
2912 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2913 		udelay(10);
2914 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2915 			udelay(10);
2916 			break;
2917 		}
2918 	}
2919 
2920 	if (i == NVRAM_CMD_TIMEOUT)
2921 		return -EBUSY;
2922 
2923 	return 0;
2924 }
2925 
2926 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2927 {
2928 	if (tg3_flag(tp, NVRAM) &&
2929 	    tg3_flag(tp, NVRAM_BUFFERED) &&
2930 	    tg3_flag(tp, FLASH) &&
2931 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2932 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
2933 
2934 		addr = ((addr / tp->nvram_pagesize) <<
2935 			ATMEL_AT45DB0X1B_PAGE_POS) +
2936 		       (addr % tp->nvram_pagesize);
2937 
2938 	return addr;
2939 }
2940 
2941 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2942 {
2943 	if (tg3_flag(tp, NVRAM) &&
2944 	    tg3_flag(tp, NVRAM_BUFFERED) &&
2945 	    tg3_flag(tp, FLASH) &&
2946 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2947 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
2948 
2949 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2950 			tp->nvram_pagesize) +
2951 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2952 
2953 	return addr;
2954 }
2955 
2956 /* NOTE: Data read in from NVRAM is byteswapped according to
2957  * the byteswapping settings for all other register accesses.
2958  * tg3 devices are BE devices, so on a BE machine, the data
2959  * returned will be exactly as it is seen in NVRAM.  On a LE
2960  * machine, the 32-bit value will be byteswapped.
2961  */
2962 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2963 {
2964 	int ret;
2965 
2966 	if (!tg3_flag(tp, NVRAM))
2967 		return tg3_nvram_read_using_eeprom(tp, offset, val);
2968 
2969 	offset = tg3_nvram_phys_addr(tp, offset);
2970 
2971 	if (offset > NVRAM_ADDR_MSK)
2972 		return -EINVAL;
2973 
2974 	ret = tg3_nvram_lock(tp);
2975 	if (ret)
2976 		return ret;
2977 
2978 	tg3_enable_nvram_access(tp);
2979 
2980 	tw32(NVRAM_ADDR, offset);
2981 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2982 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2983 
2984 	if (ret == 0)
2985 		*val = tr32(NVRAM_RDDATA);
2986 
2987 	tg3_disable_nvram_access(tp);
2988 
2989 	tg3_nvram_unlock(tp);
2990 
2991 	return ret;
2992 }
2993 
2994 /* Ensures NVRAM data is in bytestream format. */
2995 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2996 {
2997 	u32 v;
2998 	int res = tg3_nvram_read(tp, offset, &v);
2999 	if (!res)
3000 		*val = cpu_to_be32(v);
3001 	return res;
3002 }
3003 
3004 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3005 				    u32 offset, u32 len, u8 *buf)
3006 {
3007 	int i, j, rc = 0;
3008 	u32 val;
3009 
3010 	for (i = 0; i < len; i += 4) {
3011 		u32 addr;
3012 		__be32 data;
3013 
3014 		addr = offset + i;
3015 
3016 		memcpy(&data, buf + i, 4);
3017 
3018 		/*
3019 		 * The SEEPROM interface expects the data to always be opposite
3020 		 * the native endian format.  We accomplish this by reversing
3021 		 * all the operations that would have been performed on the
3022 		 * data from a call to tg3_nvram_read_be32().
3023 		 */
3024 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3025 
3026 		val = tr32(GRC_EEPROM_ADDR);
3027 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3028 
3029 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3030 			EEPROM_ADDR_READ);
3031 		tw32(GRC_EEPROM_ADDR, val |
3032 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3033 			(addr & EEPROM_ADDR_ADDR_MASK) |
3034 			EEPROM_ADDR_START |
3035 			EEPROM_ADDR_WRITE);
3036 
3037 		for (j = 0; j < 1000; j++) {
3038 			val = tr32(GRC_EEPROM_ADDR);
3039 
3040 			if (val & EEPROM_ADDR_COMPLETE)
3041 				break;
3042 			msleep(1);
3043 		}
3044 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3045 			rc = -EBUSY;
3046 			break;
3047 		}
3048 	}
3049 
3050 	return rc;
3051 }
3052 
3053 /* offset and length are dword aligned */
3054 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3055 		u8 *buf)
3056 {
3057 	int ret = 0;
3058 	u32 pagesize = tp->nvram_pagesize;
3059 	u32 pagemask = pagesize - 1;
3060 	u32 nvram_cmd;
3061 	u8 *tmp;
3062 
3063 	tmp = kmalloc(pagesize, GFP_KERNEL);
3064 	if (tmp == NULL)
3065 		return -ENOMEM;
3066 
3067 	while (len) {
3068 		int j;
3069 		u32 phy_addr, page_off, size;
3070 
3071 		phy_addr = offset & ~pagemask;
3072 
3073 		for (j = 0; j < pagesize; j += 4) {
3074 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3075 						  (__be32 *) (tmp + j));
3076 			if (ret)
3077 				break;
3078 		}
3079 		if (ret)
3080 			break;
3081 
3082 		page_off = offset & pagemask;
3083 		size = pagesize;
3084 		if (len < size)
3085 			size = len;
3086 
3087 		len -= size;
3088 
3089 		memcpy(tmp + page_off, buf, size);
3090 
3091 		offset = offset + (pagesize - page_off);
3092 
3093 		tg3_enable_nvram_access(tp);
3094 
3095 		/*
3096 		 * Before we can erase the flash page, we need
3097 		 * to issue a special "write enable" command.
3098 		 */
3099 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3100 
3101 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3102 			break;
3103 
3104 		/* Erase the target page */
3105 		tw32(NVRAM_ADDR, phy_addr);
3106 
3107 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3108 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3109 
3110 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3111 			break;
3112 
3113 		/* Issue another write enable to start the write. */
3114 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3115 
3116 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3117 			break;
3118 
3119 		for (j = 0; j < pagesize; j += 4) {
3120 			__be32 data;
3121 
3122 			data = *((__be32 *) (tmp + j));
3123 
3124 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3125 
3126 			tw32(NVRAM_ADDR, phy_addr + j);
3127 
3128 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3129 				NVRAM_CMD_WR;
3130 
3131 			if (j == 0)
3132 				nvram_cmd |= NVRAM_CMD_FIRST;
3133 			else if (j == (pagesize - 4))
3134 				nvram_cmd |= NVRAM_CMD_LAST;
3135 
3136 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3137 			if (ret)
3138 				break;
3139 		}
3140 		if (ret)
3141 			break;
3142 	}
3143 
3144 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3145 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3146 
3147 	kfree(tmp);
3148 
3149 	return ret;
3150 }
3151 
3152 /* offset and length are dword aligned */
3153 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3154 		u8 *buf)
3155 {
3156 	int i, ret = 0;
3157 
3158 	for (i = 0; i < len; i += 4, offset += 4) {
3159 		u32 page_off, phy_addr, nvram_cmd;
3160 		__be32 data;
3161 
3162 		memcpy(&data, buf + i, 4);
3163 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3164 
3165 		page_off = offset % tp->nvram_pagesize;
3166 
3167 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3168 
3169 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3170 
3171 		if (page_off == 0 || i == 0)
3172 			nvram_cmd |= NVRAM_CMD_FIRST;
3173 		if (page_off == (tp->nvram_pagesize - 4))
3174 			nvram_cmd |= NVRAM_CMD_LAST;
3175 
3176 		if (i == (len - 4))
3177 			nvram_cmd |= NVRAM_CMD_LAST;
3178 
3179 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3180 		    !tg3_flag(tp, FLASH) ||
3181 		    !tg3_flag(tp, 57765_PLUS))
3182 			tw32(NVRAM_ADDR, phy_addr);
3183 
3184 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3185 		    !tg3_flag(tp, 5755_PLUS) &&
3186 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3187 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3188 			u32 cmd;
3189 
3190 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3191 			ret = tg3_nvram_exec_cmd(tp, cmd);
3192 			if (ret)
3193 				break;
3194 		}
3195 		if (!tg3_flag(tp, FLASH)) {
3196 			/* We always do complete word writes to eeprom. */
3197 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3198 		}
3199 
3200 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3201 		if (ret)
3202 			break;
3203 	}
3204 	return ret;
3205 }
3206 
3207 /* offset and length are dword aligned */
3208 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3209 {
3210 	int ret;
3211 
3212 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3213 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3214 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3215 		udelay(40);
3216 	}
3217 
3218 	if (!tg3_flag(tp, NVRAM)) {
3219 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3220 	} else {
3221 		u32 grc_mode;
3222 
3223 		ret = tg3_nvram_lock(tp);
3224 		if (ret)
3225 			return ret;
3226 
3227 		tg3_enable_nvram_access(tp);
3228 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3229 			tw32(NVRAM_WRITE1, 0x406);
3230 
3231 		grc_mode = tr32(GRC_MODE);
3232 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3233 
3234 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3235 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3236 				buf);
3237 		} else {
3238 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3239 				buf);
3240 		}
3241 
3242 		grc_mode = tr32(GRC_MODE);
3243 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3244 
3245 		tg3_disable_nvram_access(tp);
3246 		tg3_nvram_unlock(tp);
3247 	}
3248 
3249 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3250 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3251 		udelay(40);
3252 	}
3253 
3254 	return ret;
3255 }
3256 
3257 #define RX_CPU_SCRATCH_BASE	0x30000
3258 #define RX_CPU_SCRATCH_SIZE	0x04000
3259 #define TX_CPU_SCRATCH_BASE	0x34000
3260 #define TX_CPU_SCRATCH_SIZE	0x04000
3261 
3262 /* tp->lock is held. */
3263 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3264 {
3265 	int i;
3266 
3267 	BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3268 
3269 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3270 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3271 
3272 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3273 		return 0;
3274 	}
3275 	if (offset == RX_CPU_BASE) {
3276 		for (i = 0; i < 10000; i++) {
3277 			tw32(offset + CPU_STATE, 0xffffffff);
3278 			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3279 			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3280 				break;
3281 		}
3282 
3283 		tw32(offset + CPU_STATE, 0xffffffff);
3284 		tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3285 		udelay(10);
3286 	} else {
3287 		for (i = 0; i < 10000; i++) {
3288 			tw32(offset + CPU_STATE, 0xffffffff);
3289 			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3290 			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3291 				break;
3292 		}
3293 	}
3294 
3295 	if (i >= 10000) {
3296 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3297 			   __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3298 		return -ENODEV;
3299 	}
3300 
3301 	/* Clear firmware's nvram arbitration. */
3302 	if (tg3_flag(tp, NVRAM))
3303 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3304 	return 0;
3305 }
3306 
3307 struct fw_info {
3308 	unsigned int fw_base;
3309 	unsigned int fw_len;
3310 	const __be32 *fw_data;
3311 };
3312 
3313 /* tp->lock is held. */
3314 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3315 				 u32 cpu_scratch_base, int cpu_scratch_size,
3316 				 struct fw_info *info)
3317 {
3318 	int err, lock_err, i;
3319 	void (*write_op)(struct tg3 *, u32, u32);
3320 
3321 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3322 		netdev_err(tp->dev,
3323 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3324 			   __func__);
3325 		return -EINVAL;
3326 	}
3327 
3328 	if (tg3_flag(tp, 5705_PLUS))
3329 		write_op = tg3_write_mem;
3330 	else
3331 		write_op = tg3_write_indirect_reg32;
3332 
3333 	/* It is possible that bootcode is still loading at this point.
3334 	 * Get the nvram lock first before halting the cpu.
3335 	 */
3336 	lock_err = tg3_nvram_lock(tp);
3337 	err = tg3_halt_cpu(tp, cpu_base);
3338 	if (!lock_err)
3339 		tg3_nvram_unlock(tp);
3340 	if (err)
3341 		goto out;
3342 
3343 	for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3344 		write_op(tp, cpu_scratch_base + i, 0);
3345 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3346 	tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3347 	for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3348 		write_op(tp, (cpu_scratch_base +
3349 			      (info->fw_base & 0xffff) +
3350 			      (i * sizeof(u32))),
3351 			      be32_to_cpu(info->fw_data[i]));
3352 
3353 	err = 0;
3354 
3355 out:
3356 	return err;
3357 }
3358 
3359 /* tp->lock is held. */
3360 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3361 {
3362 	struct fw_info info;
3363 	const __be32 *fw_data;
3364 	int err, i;
3365 
3366 	fw_data = (void *)tp->fw->data;
3367 
3368 	/* Firmware blob starts with version numbers, followed by
3369 	   start address and length. We are setting complete length.
3370 	   length = end_address_of_bss - start_address_of_text.
3371 	   Remainder is the blob to be loaded contiguously
3372 	   from start address. */
3373 
3374 	info.fw_base = be32_to_cpu(fw_data[1]);
3375 	info.fw_len = tp->fw->size - 12;
3376 	info.fw_data = &fw_data[3];
3377 
3378 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3379 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3380 				    &info);
3381 	if (err)
3382 		return err;
3383 
3384 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3385 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3386 				    &info);
3387 	if (err)
3388 		return err;
3389 
3390 	/* Now startup only the RX cpu. */
3391 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3392 	tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3393 
3394 	for (i = 0; i < 5; i++) {
3395 		if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3396 			break;
3397 		tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3398 		tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3399 		tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3400 		udelay(1000);
3401 	}
3402 	if (i >= 5) {
3403 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3404 			   "should be %08x\n", __func__,
3405 			   tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3406 		return -ENODEV;
3407 	}
3408 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3409 	tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3410 
3411 	return 0;
3412 }
3413 
3414 /* tp->lock is held. */
3415 static int tg3_load_tso_firmware(struct tg3 *tp)
3416 {
3417 	struct fw_info info;
3418 	const __be32 *fw_data;
3419 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3420 	int err, i;
3421 
3422 	if (tg3_flag(tp, HW_TSO_1) ||
3423 	    tg3_flag(tp, HW_TSO_2) ||
3424 	    tg3_flag(tp, HW_TSO_3))
3425 		return 0;
3426 
3427 	fw_data = (void *)tp->fw->data;
3428 
3429 	/* Firmware blob starts with version numbers, followed by
3430 	   start address and length. We are setting complete length.
3431 	   length = end_address_of_bss - start_address_of_text.
3432 	   Remainder is the blob to be loaded contiguously
3433 	   from start address. */
3434 
3435 	info.fw_base = be32_to_cpu(fw_data[1]);
3436 	cpu_scratch_size = tp->fw_len;
3437 	info.fw_len = tp->fw->size - 12;
3438 	info.fw_data = &fw_data[3];
3439 
3440 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3441 		cpu_base = RX_CPU_BASE;
3442 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3443 	} else {
3444 		cpu_base = TX_CPU_BASE;
3445 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3446 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3447 	}
3448 
3449 	err = tg3_load_firmware_cpu(tp, cpu_base,
3450 				    cpu_scratch_base, cpu_scratch_size,
3451 				    &info);
3452 	if (err)
3453 		return err;
3454 
3455 	/* Now startup the cpu. */
3456 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3457 	tw32_f(cpu_base + CPU_PC, info.fw_base);
3458 
3459 	for (i = 0; i < 5; i++) {
3460 		if (tr32(cpu_base + CPU_PC) == info.fw_base)
3461 			break;
3462 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3463 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3464 		tw32_f(cpu_base + CPU_PC, info.fw_base);
3465 		udelay(1000);
3466 	}
3467 	if (i >= 5) {
3468 		netdev_err(tp->dev,
3469 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3470 			   __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3471 		return -ENODEV;
3472 	}
3473 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3474 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3475 	return 0;
3476 }
3477 
3478 
3479 /* tp->lock is held. */
3480 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3481 {
3482 	u32 addr_high, addr_low;
3483 	int i;
3484 
3485 	addr_high = ((tp->dev->dev_addr[0] << 8) |
3486 		     tp->dev->dev_addr[1]);
3487 	addr_low = ((tp->dev->dev_addr[2] << 24) |
3488 		    (tp->dev->dev_addr[3] << 16) |
3489 		    (tp->dev->dev_addr[4] <<  8) |
3490 		    (tp->dev->dev_addr[5] <<  0));
3491 	for (i = 0; i < 4; i++) {
3492 		if (i == 1 && skip_mac_1)
3493 			continue;
3494 		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3495 		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3496 	}
3497 
3498 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3499 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3500 		for (i = 0; i < 12; i++) {
3501 			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3502 			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3503 		}
3504 	}
3505 
3506 	addr_high = (tp->dev->dev_addr[0] +
3507 		     tp->dev->dev_addr[1] +
3508 		     tp->dev->dev_addr[2] +
3509 		     tp->dev->dev_addr[3] +
3510 		     tp->dev->dev_addr[4] +
3511 		     tp->dev->dev_addr[5]) &
3512 		TX_BACKOFF_SEED_MASK;
3513 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3514 }
3515 
3516 static void tg3_enable_register_access(struct tg3 *tp)
3517 {
3518 	/*
3519 	 * Make sure register accesses (indirect or otherwise) will function
3520 	 * correctly.
3521 	 */
3522 	pci_write_config_dword(tp->pdev,
3523 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3524 }
3525 
3526 static int tg3_power_up(struct tg3 *tp)
3527 {
3528 	int err;
3529 
3530 	tg3_enable_register_access(tp);
3531 
3532 	err = pci_set_power_state(tp->pdev, PCI_D0);
3533 	if (!err) {
3534 		/* Switch out of Vaux if it is a NIC */
3535 		tg3_pwrsrc_switch_to_vmain(tp);
3536 	} else {
3537 		netdev_err(tp->dev, "Transition to D0 failed\n");
3538 	}
3539 
3540 	return err;
3541 }
3542 
3543 static int tg3_setup_phy(struct tg3 *, int);
3544 
3545 static int tg3_power_down_prepare(struct tg3 *tp)
3546 {
3547 	u32 misc_host_ctrl;
3548 	bool device_should_wake, do_low_power;
3549 
3550 	tg3_enable_register_access(tp);
3551 
3552 	/* Restore the CLKREQ setting. */
3553 	if (tg3_flag(tp, CLKREQ_BUG)) {
3554 		u16 lnkctl;
3555 
3556 		pci_read_config_word(tp->pdev,
3557 				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3558 				     &lnkctl);
3559 		lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3560 		pci_write_config_word(tp->pdev,
3561 				      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3562 				      lnkctl);
3563 	}
3564 
3565 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3566 	tw32(TG3PCI_MISC_HOST_CTRL,
3567 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3568 
3569 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3570 			     tg3_flag(tp, WOL_ENABLE);
3571 
3572 	if (tg3_flag(tp, USE_PHYLIB)) {
3573 		do_low_power = false;
3574 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3575 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3576 			struct phy_device *phydev;
3577 			u32 phyid, advertising;
3578 
3579 			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3580 
3581 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3582 
3583 			tp->link_config.speed = phydev->speed;
3584 			tp->link_config.duplex = phydev->duplex;
3585 			tp->link_config.autoneg = phydev->autoneg;
3586 			tp->link_config.advertising = phydev->advertising;
3587 
3588 			advertising = ADVERTISED_TP |
3589 				      ADVERTISED_Pause |
3590 				      ADVERTISED_Autoneg |
3591 				      ADVERTISED_10baseT_Half;
3592 
3593 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3594 				if (tg3_flag(tp, WOL_SPEED_100MB))
3595 					advertising |=
3596 						ADVERTISED_100baseT_Half |
3597 						ADVERTISED_100baseT_Full |
3598 						ADVERTISED_10baseT_Full;
3599 				else
3600 					advertising |= ADVERTISED_10baseT_Full;
3601 			}
3602 
3603 			phydev->advertising = advertising;
3604 
3605 			phy_start_aneg(phydev);
3606 
3607 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3608 			if (phyid != PHY_ID_BCMAC131) {
3609 				phyid &= PHY_BCM_OUI_MASK;
3610 				if (phyid == PHY_BCM_OUI_1 ||
3611 				    phyid == PHY_BCM_OUI_2 ||
3612 				    phyid == PHY_BCM_OUI_3)
3613 					do_low_power = true;
3614 			}
3615 		}
3616 	} else {
3617 		do_low_power = true;
3618 
3619 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3620 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3621 
3622 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3623 			tg3_setup_phy(tp, 0);
3624 	}
3625 
3626 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3627 		u32 val;
3628 
3629 		val = tr32(GRC_VCPU_EXT_CTRL);
3630 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3631 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
3632 		int i;
3633 		u32 val;
3634 
3635 		for (i = 0; i < 200; i++) {
3636 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3637 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3638 				break;
3639 			msleep(1);
3640 		}
3641 	}
3642 	if (tg3_flag(tp, WOL_CAP))
3643 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3644 						     WOL_DRV_STATE_SHUTDOWN |
3645 						     WOL_DRV_WOL |
3646 						     WOL_SET_MAGIC_PKT);
3647 
3648 	if (device_should_wake) {
3649 		u32 mac_mode;
3650 
3651 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3652 			if (do_low_power &&
3653 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3654 				tg3_phy_auxctl_write(tp,
3655 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3656 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
3657 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3658 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3659 				udelay(40);
3660 			}
3661 
3662 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3663 				mac_mode = MAC_MODE_PORT_MODE_GMII;
3664 			else
3665 				mac_mode = MAC_MODE_PORT_MODE_MII;
3666 
3667 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3668 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3669 			    ASIC_REV_5700) {
3670 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3671 					     SPEED_100 : SPEED_10;
3672 				if (tg3_5700_link_polarity(tp, speed))
3673 					mac_mode |= MAC_MODE_LINK_POLARITY;
3674 				else
3675 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
3676 			}
3677 		} else {
3678 			mac_mode = MAC_MODE_PORT_MODE_TBI;
3679 		}
3680 
3681 		if (!tg3_flag(tp, 5750_PLUS))
3682 			tw32(MAC_LED_CTRL, tp->led_ctrl);
3683 
3684 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3685 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3686 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3687 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3688 
3689 		if (tg3_flag(tp, ENABLE_APE))
3690 			mac_mode |= MAC_MODE_APE_TX_EN |
3691 				    MAC_MODE_APE_RX_EN |
3692 				    MAC_MODE_TDE_ENABLE;
3693 
3694 		tw32_f(MAC_MODE, mac_mode);
3695 		udelay(100);
3696 
3697 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3698 		udelay(10);
3699 	}
3700 
3701 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3702 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3703 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3704 		u32 base_val;
3705 
3706 		base_val = tp->pci_clock_ctrl;
3707 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3708 			     CLOCK_CTRL_TXCLK_DISABLE);
3709 
3710 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3711 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
3712 	} else if (tg3_flag(tp, 5780_CLASS) ||
3713 		   tg3_flag(tp, CPMU_PRESENT) ||
3714 		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3715 		/* do nothing */
3716 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3717 		u32 newbits1, newbits2;
3718 
3719 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3720 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3721 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3722 				    CLOCK_CTRL_TXCLK_DISABLE |
3723 				    CLOCK_CTRL_ALTCLK);
3724 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3725 		} else if (tg3_flag(tp, 5705_PLUS)) {
3726 			newbits1 = CLOCK_CTRL_625_CORE;
3727 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3728 		} else {
3729 			newbits1 = CLOCK_CTRL_ALTCLK;
3730 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3731 		}
3732 
3733 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3734 			    40);
3735 
3736 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3737 			    40);
3738 
3739 		if (!tg3_flag(tp, 5705_PLUS)) {
3740 			u32 newbits3;
3741 
3742 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3743 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3744 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3745 					    CLOCK_CTRL_TXCLK_DISABLE |
3746 					    CLOCK_CTRL_44MHZ_CORE);
3747 			} else {
3748 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
3749 			}
3750 
3751 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
3752 				    tp->pci_clock_ctrl | newbits3, 40);
3753 		}
3754 	}
3755 
3756 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3757 		tg3_power_down_phy(tp, do_low_power);
3758 
3759 	tg3_frob_aux_power(tp, true);
3760 
3761 	/* Workaround for unstable PLL clock */
3762 	if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3763 	    (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3764 		u32 val = tr32(0x7d00);
3765 
3766 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3767 		tw32(0x7d00, val);
3768 		if (!tg3_flag(tp, ENABLE_ASF)) {
3769 			int err;
3770 
3771 			err = tg3_nvram_lock(tp);
3772 			tg3_halt_cpu(tp, RX_CPU_BASE);
3773 			if (!err)
3774 				tg3_nvram_unlock(tp);
3775 		}
3776 	}
3777 
3778 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3779 
3780 	return 0;
3781 }
3782 
3783 static void tg3_power_down(struct tg3 *tp)
3784 {
3785 	tg3_power_down_prepare(tp);
3786 
3787 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3788 	pci_set_power_state(tp->pdev, PCI_D3hot);
3789 }
3790 
3791 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3792 {
3793 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3794 	case MII_TG3_AUX_STAT_10HALF:
3795 		*speed = SPEED_10;
3796 		*duplex = DUPLEX_HALF;
3797 		break;
3798 
3799 	case MII_TG3_AUX_STAT_10FULL:
3800 		*speed = SPEED_10;
3801 		*duplex = DUPLEX_FULL;
3802 		break;
3803 
3804 	case MII_TG3_AUX_STAT_100HALF:
3805 		*speed = SPEED_100;
3806 		*duplex = DUPLEX_HALF;
3807 		break;
3808 
3809 	case MII_TG3_AUX_STAT_100FULL:
3810 		*speed = SPEED_100;
3811 		*duplex = DUPLEX_FULL;
3812 		break;
3813 
3814 	case MII_TG3_AUX_STAT_1000HALF:
3815 		*speed = SPEED_1000;
3816 		*duplex = DUPLEX_HALF;
3817 		break;
3818 
3819 	case MII_TG3_AUX_STAT_1000FULL:
3820 		*speed = SPEED_1000;
3821 		*duplex = DUPLEX_FULL;
3822 		break;
3823 
3824 	default:
3825 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3826 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3827 				 SPEED_10;
3828 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3829 				  DUPLEX_HALF;
3830 			break;
3831 		}
3832 		*speed = SPEED_UNKNOWN;
3833 		*duplex = DUPLEX_UNKNOWN;
3834 		break;
3835 	}
3836 }
3837 
3838 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3839 {
3840 	int err = 0;
3841 	u32 val, new_adv;
3842 
3843 	new_adv = ADVERTISE_CSMA;
3844 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3845 	new_adv |= mii_advertise_flowctrl(flowctrl);
3846 
3847 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3848 	if (err)
3849 		goto done;
3850 
3851 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3852 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3853 
3854 		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3855 		    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3856 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3857 
3858 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3859 		if (err)
3860 			goto done;
3861 	}
3862 
3863 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3864 		goto done;
3865 
3866 	tw32(TG3_CPMU_EEE_MODE,
3867 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3868 
3869 	err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3870 	if (!err) {
3871 		u32 err2;
3872 
3873 		val = 0;
3874 		/* Advertise 100-BaseTX EEE ability */
3875 		if (advertise & ADVERTISED_100baseT_Full)
3876 			val |= MDIO_AN_EEE_ADV_100TX;
3877 		/* Advertise 1000-BaseT EEE ability */
3878 		if (advertise & ADVERTISED_1000baseT_Full)
3879 			val |= MDIO_AN_EEE_ADV_1000T;
3880 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3881 		if (err)
3882 			val = 0;
3883 
3884 		switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3885 		case ASIC_REV_5717:
3886 		case ASIC_REV_57765:
3887 		case ASIC_REV_57766:
3888 		case ASIC_REV_5719:
3889 			/* If we advertised any eee advertisements above... */
3890 			if (val)
3891 				val = MII_TG3_DSP_TAP26_ALNOKO |
3892 				      MII_TG3_DSP_TAP26_RMRXSTO |
3893 				      MII_TG3_DSP_TAP26_OPCSINPT;
3894 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3895 			/* Fall through */
3896 		case ASIC_REV_5720:
3897 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3898 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3899 						 MII_TG3_DSP_CH34TP2_HIBW01);
3900 		}
3901 
3902 		err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3903 		if (!err)
3904 			err = err2;
3905 	}
3906 
3907 done:
3908 	return err;
3909 }
3910 
3911 static void tg3_phy_copper_begin(struct tg3 *tp)
3912 {
3913 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3914 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3915 		u32 adv, fc;
3916 
3917 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3918 			adv = ADVERTISED_10baseT_Half |
3919 			      ADVERTISED_10baseT_Full;
3920 			if (tg3_flag(tp, WOL_SPEED_100MB))
3921 				adv |= ADVERTISED_100baseT_Half |
3922 				       ADVERTISED_100baseT_Full;
3923 
3924 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
3925 		} else {
3926 			adv = tp->link_config.advertising;
3927 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3928 				adv &= ~(ADVERTISED_1000baseT_Half |
3929 					 ADVERTISED_1000baseT_Full);
3930 
3931 			fc = tp->link_config.flowctrl;
3932 		}
3933 
3934 		tg3_phy_autoneg_cfg(tp, adv, fc);
3935 
3936 		tg3_writephy(tp, MII_BMCR,
3937 			     BMCR_ANENABLE | BMCR_ANRESTART);
3938 	} else {
3939 		int i;
3940 		u32 bmcr, orig_bmcr;
3941 
3942 		tp->link_config.active_speed = tp->link_config.speed;
3943 		tp->link_config.active_duplex = tp->link_config.duplex;
3944 
3945 		bmcr = 0;
3946 		switch (tp->link_config.speed) {
3947 		default:
3948 		case SPEED_10:
3949 			break;
3950 
3951 		case SPEED_100:
3952 			bmcr |= BMCR_SPEED100;
3953 			break;
3954 
3955 		case SPEED_1000:
3956 			bmcr |= BMCR_SPEED1000;
3957 			break;
3958 		}
3959 
3960 		if (tp->link_config.duplex == DUPLEX_FULL)
3961 			bmcr |= BMCR_FULLDPLX;
3962 
3963 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3964 		    (bmcr != orig_bmcr)) {
3965 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3966 			for (i = 0; i < 1500; i++) {
3967 				u32 tmp;
3968 
3969 				udelay(10);
3970 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3971 				    tg3_readphy(tp, MII_BMSR, &tmp))
3972 					continue;
3973 				if (!(tmp & BMSR_LSTATUS)) {
3974 					udelay(40);
3975 					break;
3976 				}
3977 			}
3978 			tg3_writephy(tp, MII_BMCR, bmcr);
3979 			udelay(40);
3980 		}
3981 	}
3982 }
3983 
3984 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3985 {
3986 	int err;
3987 
3988 	/* Turn off tap power management. */
3989 	/* Set Extended packet length bit */
3990 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3991 
3992 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3993 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3994 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3995 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3996 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3997 
3998 	udelay(40);
3999 
4000 	return err;
4001 }
4002 
4003 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4004 {
4005 	u32 advmsk, tgtadv, advertising;
4006 
4007 	advertising = tp->link_config.advertising;
4008 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4009 
4010 	advmsk = ADVERTISE_ALL;
4011 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4012 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4013 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4014 	}
4015 
4016 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4017 		return false;
4018 
4019 	if ((*lcladv & advmsk) != tgtadv)
4020 		return false;
4021 
4022 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4023 		u32 tg3_ctrl;
4024 
4025 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4026 
4027 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4028 			return false;
4029 
4030 		if (tgtadv &&
4031 		    (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4032 		     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4033 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4034 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4035 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4036 		} else {
4037 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4038 		}
4039 
4040 		if (tg3_ctrl != tgtadv)
4041 			return false;
4042 	}
4043 
4044 	return true;
4045 }
4046 
4047 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4048 {
4049 	u32 lpeth = 0;
4050 
4051 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4052 		u32 val;
4053 
4054 		if (tg3_readphy(tp, MII_STAT1000, &val))
4055 			return false;
4056 
4057 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4058 	}
4059 
4060 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4061 		return false;
4062 
4063 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4064 	tp->link_config.rmt_adv = lpeth;
4065 
4066 	return true;
4067 }
4068 
4069 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4070 {
4071 	int current_link_up;
4072 	u32 bmsr, val;
4073 	u32 lcl_adv, rmt_adv;
4074 	u16 current_speed;
4075 	u8 current_duplex;
4076 	int i, err;
4077 
4078 	tw32(MAC_EVENT, 0);
4079 
4080 	tw32_f(MAC_STATUS,
4081 	     (MAC_STATUS_SYNC_CHANGED |
4082 	      MAC_STATUS_CFG_CHANGED |
4083 	      MAC_STATUS_MI_COMPLETION |
4084 	      MAC_STATUS_LNKSTATE_CHANGED));
4085 	udelay(40);
4086 
4087 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4088 		tw32_f(MAC_MI_MODE,
4089 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4090 		udelay(80);
4091 	}
4092 
4093 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4094 
4095 	/* Some third-party PHYs need to be reset on link going
4096 	 * down.
4097 	 */
4098 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4099 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4100 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4101 	    netif_carrier_ok(tp->dev)) {
4102 		tg3_readphy(tp, MII_BMSR, &bmsr);
4103 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4104 		    !(bmsr & BMSR_LSTATUS))
4105 			force_reset = 1;
4106 	}
4107 	if (force_reset)
4108 		tg3_phy_reset(tp);
4109 
4110 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4111 		tg3_readphy(tp, MII_BMSR, &bmsr);
4112 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4113 		    !tg3_flag(tp, INIT_COMPLETE))
4114 			bmsr = 0;
4115 
4116 		if (!(bmsr & BMSR_LSTATUS)) {
4117 			err = tg3_init_5401phy_dsp(tp);
4118 			if (err)
4119 				return err;
4120 
4121 			tg3_readphy(tp, MII_BMSR, &bmsr);
4122 			for (i = 0; i < 1000; i++) {
4123 				udelay(10);
4124 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4125 				    (bmsr & BMSR_LSTATUS)) {
4126 					udelay(40);
4127 					break;
4128 				}
4129 			}
4130 
4131 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4132 			    TG3_PHY_REV_BCM5401_B0 &&
4133 			    !(bmsr & BMSR_LSTATUS) &&
4134 			    tp->link_config.active_speed == SPEED_1000) {
4135 				err = tg3_phy_reset(tp);
4136 				if (!err)
4137 					err = tg3_init_5401phy_dsp(tp);
4138 				if (err)
4139 					return err;
4140 			}
4141 		}
4142 	} else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4143 		   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4144 		/* 5701 {A0,B0} CRC bug workaround */
4145 		tg3_writephy(tp, 0x15, 0x0a75);
4146 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4147 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4148 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4149 	}
4150 
4151 	/* Clear pending interrupts... */
4152 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4153 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4154 
4155 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4156 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4157 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4158 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4159 
4160 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4161 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4162 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4163 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4164 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4165 		else
4166 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4167 	}
4168 
4169 	current_link_up = 0;
4170 	current_speed = SPEED_UNKNOWN;
4171 	current_duplex = DUPLEX_UNKNOWN;
4172 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4173 	tp->link_config.rmt_adv = 0;
4174 
4175 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4176 		err = tg3_phy_auxctl_read(tp,
4177 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4178 					  &val);
4179 		if (!err && !(val & (1 << 10))) {
4180 			tg3_phy_auxctl_write(tp,
4181 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4182 					     val | (1 << 10));
4183 			goto relink;
4184 		}
4185 	}
4186 
4187 	bmsr = 0;
4188 	for (i = 0; i < 100; i++) {
4189 		tg3_readphy(tp, MII_BMSR, &bmsr);
4190 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4191 		    (bmsr & BMSR_LSTATUS))
4192 			break;
4193 		udelay(40);
4194 	}
4195 
4196 	if (bmsr & BMSR_LSTATUS) {
4197 		u32 aux_stat, bmcr;
4198 
4199 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4200 		for (i = 0; i < 2000; i++) {
4201 			udelay(10);
4202 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4203 			    aux_stat)
4204 				break;
4205 		}
4206 
4207 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4208 					     &current_speed,
4209 					     &current_duplex);
4210 
4211 		bmcr = 0;
4212 		for (i = 0; i < 200; i++) {
4213 			tg3_readphy(tp, MII_BMCR, &bmcr);
4214 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4215 				continue;
4216 			if (bmcr && bmcr != 0x7fff)
4217 				break;
4218 			udelay(10);
4219 		}
4220 
4221 		lcl_adv = 0;
4222 		rmt_adv = 0;
4223 
4224 		tp->link_config.active_speed = current_speed;
4225 		tp->link_config.active_duplex = current_duplex;
4226 
4227 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4228 			if ((bmcr & BMCR_ANENABLE) &&
4229 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4230 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4231 				current_link_up = 1;
4232 		} else {
4233 			if (!(bmcr & BMCR_ANENABLE) &&
4234 			    tp->link_config.speed == current_speed &&
4235 			    tp->link_config.duplex == current_duplex &&
4236 			    tp->link_config.flowctrl ==
4237 			    tp->link_config.active_flowctrl) {
4238 				current_link_up = 1;
4239 			}
4240 		}
4241 
4242 		if (current_link_up == 1 &&
4243 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4244 			u32 reg, bit;
4245 
4246 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4247 				reg = MII_TG3_FET_GEN_STAT;
4248 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4249 			} else {
4250 				reg = MII_TG3_EXT_STAT;
4251 				bit = MII_TG3_EXT_STAT_MDIX;
4252 			}
4253 
4254 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4255 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4256 
4257 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4258 		}
4259 	}
4260 
4261 relink:
4262 	if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4263 		tg3_phy_copper_begin(tp);
4264 
4265 		tg3_readphy(tp, MII_BMSR, &bmsr);
4266 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4267 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4268 			current_link_up = 1;
4269 	}
4270 
4271 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4272 	if (current_link_up == 1) {
4273 		if (tp->link_config.active_speed == SPEED_100 ||
4274 		    tp->link_config.active_speed == SPEED_10)
4275 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4276 		else
4277 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4278 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4279 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4280 	else
4281 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4282 
4283 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4284 	if (tp->link_config.active_duplex == DUPLEX_HALF)
4285 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4286 
4287 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4288 		if (current_link_up == 1 &&
4289 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4290 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4291 		else
4292 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4293 	}
4294 
4295 	/* ??? Without this setting Netgear GA302T PHY does not
4296 	 * ??? send/receive packets...
4297 	 */
4298 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4299 	    tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4300 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4301 		tw32_f(MAC_MI_MODE, tp->mi_mode);
4302 		udelay(80);
4303 	}
4304 
4305 	tw32_f(MAC_MODE, tp->mac_mode);
4306 	udelay(40);
4307 
4308 	tg3_phy_eee_adjust(tp, current_link_up);
4309 
4310 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
4311 		/* Polled via timer. */
4312 		tw32_f(MAC_EVENT, 0);
4313 	} else {
4314 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4315 	}
4316 	udelay(40);
4317 
4318 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4319 	    current_link_up == 1 &&
4320 	    tp->link_config.active_speed == SPEED_1000 &&
4321 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4322 		udelay(120);
4323 		tw32_f(MAC_STATUS,
4324 		     (MAC_STATUS_SYNC_CHANGED |
4325 		      MAC_STATUS_CFG_CHANGED));
4326 		udelay(40);
4327 		tg3_write_mem(tp,
4328 			      NIC_SRAM_FIRMWARE_MBOX,
4329 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4330 	}
4331 
4332 	/* Prevent send BD corruption. */
4333 	if (tg3_flag(tp, CLKREQ_BUG)) {
4334 		u16 oldlnkctl, newlnkctl;
4335 
4336 		pci_read_config_word(tp->pdev,
4337 				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4338 				     &oldlnkctl);
4339 		if (tp->link_config.active_speed == SPEED_100 ||
4340 		    tp->link_config.active_speed == SPEED_10)
4341 			newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4342 		else
4343 			newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4344 		if (newlnkctl != oldlnkctl)
4345 			pci_write_config_word(tp->pdev,
4346 					      pci_pcie_cap(tp->pdev) +
4347 					      PCI_EXP_LNKCTL, newlnkctl);
4348 	}
4349 
4350 	if (current_link_up != netif_carrier_ok(tp->dev)) {
4351 		if (current_link_up)
4352 			netif_carrier_on(tp->dev);
4353 		else
4354 			netif_carrier_off(tp->dev);
4355 		tg3_link_report(tp);
4356 	}
4357 
4358 	return 0;
4359 }
4360 
4361 struct tg3_fiber_aneginfo {
4362 	int state;
4363 #define ANEG_STATE_UNKNOWN		0
4364 #define ANEG_STATE_AN_ENABLE		1
4365 #define ANEG_STATE_RESTART_INIT		2
4366 #define ANEG_STATE_RESTART		3
4367 #define ANEG_STATE_DISABLE_LINK_OK	4
4368 #define ANEG_STATE_ABILITY_DETECT_INIT	5
4369 #define ANEG_STATE_ABILITY_DETECT	6
4370 #define ANEG_STATE_ACK_DETECT_INIT	7
4371 #define ANEG_STATE_ACK_DETECT		8
4372 #define ANEG_STATE_COMPLETE_ACK_INIT	9
4373 #define ANEG_STATE_COMPLETE_ACK		10
4374 #define ANEG_STATE_IDLE_DETECT_INIT	11
4375 #define ANEG_STATE_IDLE_DETECT		12
4376 #define ANEG_STATE_LINK_OK		13
4377 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
4378 #define ANEG_STATE_NEXT_PAGE_WAIT	15
4379 
4380 	u32 flags;
4381 #define MR_AN_ENABLE		0x00000001
4382 #define MR_RESTART_AN		0x00000002
4383 #define MR_AN_COMPLETE		0x00000004
4384 #define MR_PAGE_RX		0x00000008
4385 #define MR_NP_LOADED		0x00000010
4386 #define MR_TOGGLE_TX		0x00000020
4387 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
4388 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
4389 #define MR_LP_ADV_SYM_PAUSE	0x00000100
4390 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
4391 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
4392 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
4393 #define MR_LP_ADV_NEXT_PAGE	0x00001000
4394 #define MR_TOGGLE_RX		0x00002000
4395 #define MR_NP_RX		0x00004000
4396 
4397 #define MR_LINK_OK		0x80000000
4398 
4399 	unsigned long link_time, cur_time;
4400 
4401 	u32 ability_match_cfg;
4402 	int ability_match_count;
4403 
4404 	char ability_match, idle_match, ack_match;
4405 
4406 	u32 txconfig, rxconfig;
4407 #define ANEG_CFG_NP		0x00000080
4408 #define ANEG_CFG_ACK		0x00000040
4409 #define ANEG_CFG_RF2		0x00000020
4410 #define ANEG_CFG_RF1		0x00000010
4411 #define ANEG_CFG_PS2		0x00000001
4412 #define ANEG_CFG_PS1		0x00008000
4413 #define ANEG_CFG_HD		0x00004000
4414 #define ANEG_CFG_FD		0x00002000
4415 #define ANEG_CFG_INVAL		0x00001f06
4416 
4417 };
4418 #define ANEG_OK		0
4419 #define ANEG_DONE	1
4420 #define ANEG_TIMER_ENAB	2
4421 #define ANEG_FAILED	-1
4422 
4423 #define ANEG_STATE_SETTLE_TIME	10000
4424 
4425 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4426 				   struct tg3_fiber_aneginfo *ap)
4427 {
4428 	u16 flowctrl;
4429 	unsigned long delta;
4430 	u32 rx_cfg_reg;
4431 	int ret;
4432 
4433 	if (ap->state == ANEG_STATE_UNKNOWN) {
4434 		ap->rxconfig = 0;
4435 		ap->link_time = 0;
4436 		ap->cur_time = 0;
4437 		ap->ability_match_cfg = 0;
4438 		ap->ability_match_count = 0;
4439 		ap->ability_match = 0;
4440 		ap->idle_match = 0;
4441 		ap->ack_match = 0;
4442 	}
4443 	ap->cur_time++;
4444 
4445 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4446 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4447 
4448 		if (rx_cfg_reg != ap->ability_match_cfg) {
4449 			ap->ability_match_cfg = rx_cfg_reg;
4450 			ap->ability_match = 0;
4451 			ap->ability_match_count = 0;
4452 		} else {
4453 			if (++ap->ability_match_count > 1) {
4454 				ap->ability_match = 1;
4455 				ap->ability_match_cfg = rx_cfg_reg;
4456 			}
4457 		}
4458 		if (rx_cfg_reg & ANEG_CFG_ACK)
4459 			ap->ack_match = 1;
4460 		else
4461 			ap->ack_match = 0;
4462 
4463 		ap->idle_match = 0;
4464 	} else {
4465 		ap->idle_match = 1;
4466 		ap->ability_match_cfg = 0;
4467 		ap->ability_match_count = 0;
4468 		ap->ability_match = 0;
4469 		ap->ack_match = 0;
4470 
4471 		rx_cfg_reg = 0;
4472 	}
4473 
4474 	ap->rxconfig = rx_cfg_reg;
4475 	ret = ANEG_OK;
4476 
4477 	switch (ap->state) {
4478 	case ANEG_STATE_UNKNOWN:
4479 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4480 			ap->state = ANEG_STATE_AN_ENABLE;
4481 
4482 		/* fallthru */
4483 	case ANEG_STATE_AN_ENABLE:
4484 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4485 		if (ap->flags & MR_AN_ENABLE) {
4486 			ap->link_time = 0;
4487 			ap->cur_time = 0;
4488 			ap->ability_match_cfg = 0;
4489 			ap->ability_match_count = 0;
4490 			ap->ability_match = 0;
4491 			ap->idle_match = 0;
4492 			ap->ack_match = 0;
4493 
4494 			ap->state = ANEG_STATE_RESTART_INIT;
4495 		} else {
4496 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
4497 		}
4498 		break;
4499 
4500 	case ANEG_STATE_RESTART_INIT:
4501 		ap->link_time = ap->cur_time;
4502 		ap->flags &= ~(MR_NP_LOADED);
4503 		ap->txconfig = 0;
4504 		tw32(MAC_TX_AUTO_NEG, 0);
4505 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4506 		tw32_f(MAC_MODE, tp->mac_mode);
4507 		udelay(40);
4508 
4509 		ret = ANEG_TIMER_ENAB;
4510 		ap->state = ANEG_STATE_RESTART;
4511 
4512 		/* fallthru */
4513 	case ANEG_STATE_RESTART:
4514 		delta = ap->cur_time - ap->link_time;
4515 		if (delta > ANEG_STATE_SETTLE_TIME)
4516 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4517 		else
4518 			ret = ANEG_TIMER_ENAB;
4519 		break;
4520 
4521 	case ANEG_STATE_DISABLE_LINK_OK:
4522 		ret = ANEG_DONE;
4523 		break;
4524 
4525 	case ANEG_STATE_ABILITY_DETECT_INIT:
4526 		ap->flags &= ~(MR_TOGGLE_TX);
4527 		ap->txconfig = ANEG_CFG_FD;
4528 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4529 		if (flowctrl & ADVERTISE_1000XPAUSE)
4530 			ap->txconfig |= ANEG_CFG_PS1;
4531 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4532 			ap->txconfig |= ANEG_CFG_PS2;
4533 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4534 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4535 		tw32_f(MAC_MODE, tp->mac_mode);
4536 		udelay(40);
4537 
4538 		ap->state = ANEG_STATE_ABILITY_DETECT;
4539 		break;
4540 
4541 	case ANEG_STATE_ABILITY_DETECT:
4542 		if (ap->ability_match != 0 && ap->rxconfig != 0)
4543 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
4544 		break;
4545 
4546 	case ANEG_STATE_ACK_DETECT_INIT:
4547 		ap->txconfig |= ANEG_CFG_ACK;
4548 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4549 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4550 		tw32_f(MAC_MODE, tp->mac_mode);
4551 		udelay(40);
4552 
4553 		ap->state = ANEG_STATE_ACK_DETECT;
4554 
4555 		/* fallthru */
4556 	case ANEG_STATE_ACK_DETECT:
4557 		if (ap->ack_match != 0) {
4558 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4559 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4560 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4561 			} else {
4562 				ap->state = ANEG_STATE_AN_ENABLE;
4563 			}
4564 		} else if (ap->ability_match != 0 &&
4565 			   ap->rxconfig == 0) {
4566 			ap->state = ANEG_STATE_AN_ENABLE;
4567 		}
4568 		break;
4569 
4570 	case ANEG_STATE_COMPLETE_ACK_INIT:
4571 		if (ap->rxconfig & ANEG_CFG_INVAL) {
4572 			ret = ANEG_FAILED;
4573 			break;
4574 		}
4575 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4576 			       MR_LP_ADV_HALF_DUPLEX |
4577 			       MR_LP_ADV_SYM_PAUSE |
4578 			       MR_LP_ADV_ASYM_PAUSE |
4579 			       MR_LP_ADV_REMOTE_FAULT1 |
4580 			       MR_LP_ADV_REMOTE_FAULT2 |
4581 			       MR_LP_ADV_NEXT_PAGE |
4582 			       MR_TOGGLE_RX |
4583 			       MR_NP_RX);
4584 		if (ap->rxconfig & ANEG_CFG_FD)
4585 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4586 		if (ap->rxconfig & ANEG_CFG_HD)
4587 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4588 		if (ap->rxconfig & ANEG_CFG_PS1)
4589 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
4590 		if (ap->rxconfig & ANEG_CFG_PS2)
4591 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4592 		if (ap->rxconfig & ANEG_CFG_RF1)
4593 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4594 		if (ap->rxconfig & ANEG_CFG_RF2)
4595 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4596 		if (ap->rxconfig & ANEG_CFG_NP)
4597 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
4598 
4599 		ap->link_time = ap->cur_time;
4600 
4601 		ap->flags ^= (MR_TOGGLE_TX);
4602 		if (ap->rxconfig & 0x0008)
4603 			ap->flags |= MR_TOGGLE_RX;
4604 		if (ap->rxconfig & ANEG_CFG_NP)
4605 			ap->flags |= MR_NP_RX;
4606 		ap->flags |= MR_PAGE_RX;
4607 
4608 		ap->state = ANEG_STATE_COMPLETE_ACK;
4609 		ret = ANEG_TIMER_ENAB;
4610 		break;
4611 
4612 	case ANEG_STATE_COMPLETE_ACK:
4613 		if (ap->ability_match != 0 &&
4614 		    ap->rxconfig == 0) {
4615 			ap->state = ANEG_STATE_AN_ENABLE;
4616 			break;
4617 		}
4618 		delta = ap->cur_time - ap->link_time;
4619 		if (delta > ANEG_STATE_SETTLE_TIME) {
4620 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4621 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4622 			} else {
4623 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4624 				    !(ap->flags & MR_NP_RX)) {
4625 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4626 				} else {
4627 					ret = ANEG_FAILED;
4628 				}
4629 			}
4630 		}
4631 		break;
4632 
4633 	case ANEG_STATE_IDLE_DETECT_INIT:
4634 		ap->link_time = ap->cur_time;
4635 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4636 		tw32_f(MAC_MODE, tp->mac_mode);
4637 		udelay(40);
4638 
4639 		ap->state = ANEG_STATE_IDLE_DETECT;
4640 		ret = ANEG_TIMER_ENAB;
4641 		break;
4642 
4643 	case ANEG_STATE_IDLE_DETECT:
4644 		if (ap->ability_match != 0 &&
4645 		    ap->rxconfig == 0) {
4646 			ap->state = ANEG_STATE_AN_ENABLE;
4647 			break;
4648 		}
4649 		delta = ap->cur_time - ap->link_time;
4650 		if (delta > ANEG_STATE_SETTLE_TIME) {
4651 			/* XXX another gem from the Broadcom driver :( */
4652 			ap->state = ANEG_STATE_LINK_OK;
4653 		}
4654 		break;
4655 
4656 	case ANEG_STATE_LINK_OK:
4657 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4658 		ret = ANEG_DONE;
4659 		break;
4660 
4661 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4662 		/* ??? unimplemented */
4663 		break;
4664 
4665 	case ANEG_STATE_NEXT_PAGE_WAIT:
4666 		/* ??? unimplemented */
4667 		break;
4668 
4669 	default:
4670 		ret = ANEG_FAILED;
4671 		break;
4672 	}
4673 
4674 	return ret;
4675 }
4676 
4677 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4678 {
4679 	int res = 0;
4680 	struct tg3_fiber_aneginfo aninfo;
4681 	int status = ANEG_FAILED;
4682 	unsigned int tick;
4683 	u32 tmp;
4684 
4685 	tw32_f(MAC_TX_AUTO_NEG, 0);
4686 
4687 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4688 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4689 	udelay(40);
4690 
4691 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4692 	udelay(40);
4693 
4694 	memset(&aninfo, 0, sizeof(aninfo));
4695 	aninfo.flags |= MR_AN_ENABLE;
4696 	aninfo.state = ANEG_STATE_UNKNOWN;
4697 	aninfo.cur_time = 0;
4698 	tick = 0;
4699 	while (++tick < 195000) {
4700 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
4701 		if (status == ANEG_DONE || status == ANEG_FAILED)
4702 			break;
4703 
4704 		udelay(1);
4705 	}
4706 
4707 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4708 	tw32_f(MAC_MODE, tp->mac_mode);
4709 	udelay(40);
4710 
4711 	*txflags = aninfo.txconfig;
4712 	*rxflags = aninfo.flags;
4713 
4714 	if (status == ANEG_DONE &&
4715 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4716 			     MR_LP_ADV_FULL_DUPLEX)))
4717 		res = 1;
4718 
4719 	return res;
4720 }
4721 
4722 static void tg3_init_bcm8002(struct tg3 *tp)
4723 {
4724 	u32 mac_status = tr32(MAC_STATUS);
4725 	int i;
4726 
4727 	/* Reset when initting first time or we have a link. */
4728 	if (tg3_flag(tp, INIT_COMPLETE) &&
4729 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
4730 		return;
4731 
4732 	/* Set PLL lock range. */
4733 	tg3_writephy(tp, 0x16, 0x8007);
4734 
4735 	/* SW reset */
4736 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4737 
4738 	/* Wait for reset to complete. */
4739 	/* XXX schedule_timeout() ... */
4740 	for (i = 0; i < 500; i++)
4741 		udelay(10);
4742 
4743 	/* Config mode; select PMA/Ch 1 regs. */
4744 	tg3_writephy(tp, 0x10, 0x8411);
4745 
4746 	/* Enable auto-lock and comdet, select txclk for tx. */
4747 	tg3_writephy(tp, 0x11, 0x0a10);
4748 
4749 	tg3_writephy(tp, 0x18, 0x00a0);
4750 	tg3_writephy(tp, 0x16, 0x41ff);
4751 
4752 	/* Assert and deassert POR. */
4753 	tg3_writephy(tp, 0x13, 0x0400);
4754 	udelay(40);
4755 	tg3_writephy(tp, 0x13, 0x0000);
4756 
4757 	tg3_writephy(tp, 0x11, 0x0a50);
4758 	udelay(40);
4759 	tg3_writephy(tp, 0x11, 0x0a10);
4760 
4761 	/* Wait for signal to stabilize */
4762 	/* XXX schedule_timeout() ... */
4763 	for (i = 0; i < 15000; i++)
4764 		udelay(10);
4765 
4766 	/* Deselect the channel register so we can read the PHYID
4767 	 * later.
4768 	 */
4769 	tg3_writephy(tp, 0x10, 0x8011);
4770 }
4771 
4772 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4773 {
4774 	u16 flowctrl;
4775 	u32 sg_dig_ctrl, sg_dig_status;
4776 	u32 serdes_cfg, expected_sg_dig_ctrl;
4777 	int workaround, port_a;
4778 	int current_link_up;
4779 
4780 	serdes_cfg = 0;
4781 	expected_sg_dig_ctrl = 0;
4782 	workaround = 0;
4783 	port_a = 1;
4784 	current_link_up = 0;
4785 
4786 	if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4787 	    tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4788 		workaround = 1;
4789 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4790 			port_a = 0;
4791 
4792 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
4793 		/* preserve bits 20-23 for voltage regulator */
4794 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4795 	}
4796 
4797 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
4798 
4799 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4800 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4801 			if (workaround) {
4802 				u32 val = serdes_cfg;
4803 
4804 				if (port_a)
4805 					val |= 0xc010000;
4806 				else
4807 					val |= 0x4010000;
4808 				tw32_f(MAC_SERDES_CFG, val);
4809 			}
4810 
4811 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4812 		}
4813 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
4814 			tg3_setup_flow_control(tp, 0, 0);
4815 			current_link_up = 1;
4816 		}
4817 		goto out;
4818 	}
4819 
4820 	/* Want auto-negotiation.  */
4821 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4822 
4823 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4824 	if (flowctrl & ADVERTISE_1000XPAUSE)
4825 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4826 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4827 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4828 
4829 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4830 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4831 		    tp->serdes_counter &&
4832 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
4833 				    MAC_STATUS_RCVD_CFG)) ==
4834 		     MAC_STATUS_PCS_SYNCED)) {
4835 			tp->serdes_counter--;
4836 			current_link_up = 1;
4837 			goto out;
4838 		}
4839 restart_autoneg:
4840 		if (workaround)
4841 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4842 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4843 		udelay(5);
4844 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4845 
4846 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4847 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4848 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4849 				 MAC_STATUS_SIGNAL_DET)) {
4850 		sg_dig_status = tr32(SG_DIG_STATUS);
4851 		mac_status = tr32(MAC_STATUS);
4852 
4853 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4854 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
4855 			u32 local_adv = 0, remote_adv = 0;
4856 
4857 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4858 				local_adv |= ADVERTISE_1000XPAUSE;
4859 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4860 				local_adv |= ADVERTISE_1000XPSE_ASYM;
4861 
4862 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4863 				remote_adv |= LPA_1000XPAUSE;
4864 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4865 				remote_adv |= LPA_1000XPAUSE_ASYM;
4866 
4867 			tp->link_config.rmt_adv =
4868 					   mii_adv_to_ethtool_adv_x(remote_adv);
4869 
4870 			tg3_setup_flow_control(tp, local_adv, remote_adv);
4871 			current_link_up = 1;
4872 			tp->serdes_counter = 0;
4873 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4874 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4875 			if (tp->serdes_counter)
4876 				tp->serdes_counter--;
4877 			else {
4878 				if (workaround) {
4879 					u32 val = serdes_cfg;
4880 
4881 					if (port_a)
4882 						val |= 0xc010000;
4883 					else
4884 						val |= 0x4010000;
4885 
4886 					tw32_f(MAC_SERDES_CFG, val);
4887 				}
4888 
4889 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4890 				udelay(40);
4891 
4892 				/* Link parallel detection - link is up */
4893 				/* only if we have PCS_SYNC and not */
4894 				/* receiving config code words */
4895 				mac_status = tr32(MAC_STATUS);
4896 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4897 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
4898 					tg3_setup_flow_control(tp, 0, 0);
4899 					current_link_up = 1;
4900 					tp->phy_flags |=
4901 						TG3_PHYFLG_PARALLEL_DETECT;
4902 					tp->serdes_counter =
4903 						SERDES_PARALLEL_DET_TIMEOUT;
4904 				} else
4905 					goto restart_autoneg;
4906 			}
4907 		}
4908 	} else {
4909 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4910 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4911 	}
4912 
4913 out:
4914 	return current_link_up;
4915 }
4916 
4917 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4918 {
4919 	int current_link_up = 0;
4920 
4921 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4922 		goto out;
4923 
4924 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4925 		u32 txflags, rxflags;
4926 		int i;
4927 
4928 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
4929 			u32 local_adv = 0, remote_adv = 0;
4930 
4931 			if (txflags & ANEG_CFG_PS1)
4932 				local_adv |= ADVERTISE_1000XPAUSE;
4933 			if (txflags & ANEG_CFG_PS2)
4934 				local_adv |= ADVERTISE_1000XPSE_ASYM;
4935 
4936 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
4937 				remote_adv |= LPA_1000XPAUSE;
4938 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4939 				remote_adv |= LPA_1000XPAUSE_ASYM;
4940 
4941 			tp->link_config.rmt_adv =
4942 					   mii_adv_to_ethtool_adv_x(remote_adv);
4943 
4944 			tg3_setup_flow_control(tp, local_adv, remote_adv);
4945 
4946 			current_link_up = 1;
4947 		}
4948 		for (i = 0; i < 30; i++) {
4949 			udelay(20);
4950 			tw32_f(MAC_STATUS,
4951 			       (MAC_STATUS_SYNC_CHANGED |
4952 				MAC_STATUS_CFG_CHANGED));
4953 			udelay(40);
4954 			if ((tr32(MAC_STATUS) &
4955 			     (MAC_STATUS_SYNC_CHANGED |
4956 			      MAC_STATUS_CFG_CHANGED)) == 0)
4957 				break;
4958 		}
4959 
4960 		mac_status = tr32(MAC_STATUS);
4961 		if (current_link_up == 0 &&
4962 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
4963 		    !(mac_status & MAC_STATUS_RCVD_CFG))
4964 			current_link_up = 1;
4965 	} else {
4966 		tg3_setup_flow_control(tp, 0, 0);
4967 
4968 		/* Forcing 1000FD link up. */
4969 		current_link_up = 1;
4970 
4971 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4972 		udelay(40);
4973 
4974 		tw32_f(MAC_MODE, tp->mac_mode);
4975 		udelay(40);
4976 	}
4977 
4978 out:
4979 	return current_link_up;
4980 }
4981 
4982 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4983 {
4984 	u32 orig_pause_cfg;
4985 	u16 orig_active_speed;
4986 	u8 orig_active_duplex;
4987 	u32 mac_status;
4988 	int current_link_up;
4989 	int i;
4990 
4991 	orig_pause_cfg = tp->link_config.active_flowctrl;
4992 	orig_active_speed = tp->link_config.active_speed;
4993 	orig_active_duplex = tp->link_config.active_duplex;
4994 
4995 	if (!tg3_flag(tp, HW_AUTONEG) &&
4996 	    netif_carrier_ok(tp->dev) &&
4997 	    tg3_flag(tp, INIT_COMPLETE)) {
4998 		mac_status = tr32(MAC_STATUS);
4999 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5000 			       MAC_STATUS_SIGNAL_DET |
5001 			       MAC_STATUS_CFG_CHANGED |
5002 			       MAC_STATUS_RCVD_CFG);
5003 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5004 				   MAC_STATUS_SIGNAL_DET)) {
5005 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5006 					    MAC_STATUS_CFG_CHANGED));
5007 			return 0;
5008 		}
5009 	}
5010 
5011 	tw32_f(MAC_TX_AUTO_NEG, 0);
5012 
5013 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5014 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5015 	tw32_f(MAC_MODE, tp->mac_mode);
5016 	udelay(40);
5017 
5018 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5019 		tg3_init_bcm8002(tp);
5020 
5021 	/* Enable link change event even when serdes polling.  */
5022 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5023 	udelay(40);
5024 
5025 	current_link_up = 0;
5026 	tp->link_config.rmt_adv = 0;
5027 	mac_status = tr32(MAC_STATUS);
5028 
5029 	if (tg3_flag(tp, HW_AUTONEG))
5030 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5031 	else
5032 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5033 
5034 	tp->napi[0].hw_status->status =
5035 		(SD_STATUS_UPDATED |
5036 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5037 
5038 	for (i = 0; i < 100; i++) {
5039 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5040 				    MAC_STATUS_CFG_CHANGED));
5041 		udelay(5);
5042 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5043 					 MAC_STATUS_CFG_CHANGED |
5044 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5045 			break;
5046 	}
5047 
5048 	mac_status = tr32(MAC_STATUS);
5049 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5050 		current_link_up = 0;
5051 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5052 		    tp->serdes_counter == 0) {
5053 			tw32_f(MAC_MODE, (tp->mac_mode |
5054 					  MAC_MODE_SEND_CONFIGS));
5055 			udelay(1);
5056 			tw32_f(MAC_MODE, tp->mac_mode);
5057 		}
5058 	}
5059 
5060 	if (current_link_up == 1) {
5061 		tp->link_config.active_speed = SPEED_1000;
5062 		tp->link_config.active_duplex = DUPLEX_FULL;
5063 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5064 				    LED_CTRL_LNKLED_OVERRIDE |
5065 				    LED_CTRL_1000MBPS_ON));
5066 	} else {
5067 		tp->link_config.active_speed = SPEED_UNKNOWN;
5068 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5069 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5070 				    LED_CTRL_LNKLED_OVERRIDE |
5071 				    LED_CTRL_TRAFFIC_OVERRIDE));
5072 	}
5073 
5074 	if (current_link_up != netif_carrier_ok(tp->dev)) {
5075 		if (current_link_up)
5076 			netif_carrier_on(tp->dev);
5077 		else
5078 			netif_carrier_off(tp->dev);
5079 		tg3_link_report(tp);
5080 	} else {
5081 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5082 		if (orig_pause_cfg != now_pause_cfg ||
5083 		    orig_active_speed != tp->link_config.active_speed ||
5084 		    orig_active_duplex != tp->link_config.active_duplex)
5085 			tg3_link_report(tp);
5086 	}
5087 
5088 	return 0;
5089 }
5090 
5091 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5092 {
5093 	int current_link_up, err = 0;
5094 	u32 bmsr, bmcr;
5095 	u16 current_speed;
5096 	u8 current_duplex;
5097 	u32 local_adv, remote_adv;
5098 
5099 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5100 	tw32_f(MAC_MODE, tp->mac_mode);
5101 	udelay(40);
5102 
5103 	tw32(MAC_EVENT, 0);
5104 
5105 	tw32_f(MAC_STATUS,
5106 	     (MAC_STATUS_SYNC_CHANGED |
5107 	      MAC_STATUS_CFG_CHANGED |
5108 	      MAC_STATUS_MI_COMPLETION |
5109 	      MAC_STATUS_LNKSTATE_CHANGED));
5110 	udelay(40);
5111 
5112 	if (force_reset)
5113 		tg3_phy_reset(tp);
5114 
5115 	current_link_up = 0;
5116 	current_speed = SPEED_UNKNOWN;
5117 	current_duplex = DUPLEX_UNKNOWN;
5118 	tp->link_config.rmt_adv = 0;
5119 
5120 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5121 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5122 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5123 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5124 			bmsr |= BMSR_LSTATUS;
5125 		else
5126 			bmsr &= ~BMSR_LSTATUS;
5127 	}
5128 
5129 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5130 
5131 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5132 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5133 		/* do nothing, just check for link up at the end */
5134 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5135 		u32 adv, newadv;
5136 
5137 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5138 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5139 				 ADVERTISE_1000XPAUSE |
5140 				 ADVERTISE_1000XPSE_ASYM |
5141 				 ADVERTISE_SLCT);
5142 
5143 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5144 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5145 
5146 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5147 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5148 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5149 			tg3_writephy(tp, MII_BMCR, bmcr);
5150 
5151 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5152 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5153 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5154 
5155 			return err;
5156 		}
5157 	} else {
5158 		u32 new_bmcr;
5159 
5160 		bmcr &= ~BMCR_SPEED1000;
5161 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5162 
5163 		if (tp->link_config.duplex == DUPLEX_FULL)
5164 			new_bmcr |= BMCR_FULLDPLX;
5165 
5166 		if (new_bmcr != bmcr) {
5167 			/* BMCR_SPEED1000 is a reserved bit that needs
5168 			 * to be set on write.
5169 			 */
5170 			new_bmcr |= BMCR_SPEED1000;
5171 
5172 			/* Force a linkdown */
5173 			if (netif_carrier_ok(tp->dev)) {
5174 				u32 adv;
5175 
5176 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5177 				adv &= ~(ADVERTISE_1000XFULL |
5178 					 ADVERTISE_1000XHALF |
5179 					 ADVERTISE_SLCT);
5180 				tg3_writephy(tp, MII_ADVERTISE, adv);
5181 				tg3_writephy(tp, MII_BMCR, bmcr |
5182 							   BMCR_ANRESTART |
5183 							   BMCR_ANENABLE);
5184 				udelay(10);
5185 				netif_carrier_off(tp->dev);
5186 			}
5187 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5188 			bmcr = new_bmcr;
5189 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5190 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5191 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5192 			    ASIC_REV_5714) {
5193 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5194 					bmsr |= BMSR_LSTATUS;
5195 				else
5196 					bmsr &= ~BMSR_LSTATUS;
5197 			}
5198 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5199 		}
5200 	}
5201 
5202 	if (bmsr & BMSR_LSTATUS) {
5203 		current_speed = SPEED_1000;
5204 		current_link_up = 1;
5205 		if (bmcr & BMCR_FULLDPLX)
5206 			current_duplex = DUPLEX_FULL;
5207 		else
5208 			current_duplex = DUPLEX_HALF;
5209 
5210 		local_adv = 0;
5211 		remote_adv = 0;
5212 
5213 		if (bmcr & BMCR_ANENABLE) {
5214 			u32 common;
5215 
5216 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5217 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5218 			common = local_adv & remote_adv;
5219 			if (common & (ADVERTISE_1000XHALF |
5220 				      ADVERTISE_1000XFULL)) {
5221 				if (common & ADVERTISE_1000XFULL)
5222 					current_duplex = DUPLEX_FULL;
5223 				else
5224 					current_duplex = DUPLEX_HALF;
5225 
5226 				tp->link_config.rmt_adv =
5227 					   mii_adv_to_ethtool_adv_x(remote_adv);
5228 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5229 				/* Link is up via parallel detect */
5230 			} else {
5231 				current_link_up = 0;
5232 			}
5233 		}
5234 	}
5235 
5236 	if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5237 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5238 
5239 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5240 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5241 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5242 
5243 	tw32_f(MAC_MODE, tp->mac_mode);
5244 	udelay(40);
5245 
5246 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5247 
5248 	tp->link_config.active_speed = current_speed;
5249 	tp->link_config.active_duplex = current_duplex;
5250 
5251 	if (current_link_up != netif_carrier_ok(tp->dev)) {
5252 		if (current_link_up)
5253 			netif_carrier_on(tp->dev);
5254 		else {
5255 			netif_carrier_off(tp->dev);
5256 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5257 		}
5258 		tg3_link_report(tp);
5259 	}
5260 	return err;
5261 }
5262 
5263 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5264 {
5265 	if (tp->serdes_counter) {
5266 		/* Give autoneg time to complete. */
5267 		tp->serdes_counter--;
5268 		return;
5269 	}
5270 
5271 	if (!netif_carrier_ok(tp->dev) &&
5272 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5273 		u32 bmcr;
5274 
5275 		tg3_readphy(tp, MII_BMCR, &bmcr);
5276 		if (bmcr & BMCR_ANENABLE) {
5277 			u32 phy1, phy2;
5278 
5279 			/* Select shadow register 0x1f */
5280 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5281 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5282 
5283 			/* Select expansion interrupt status register */
5284 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5285 					 MII_TG3_DSP_EXP1_INT_STAT);
5286 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5287 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5288 
5289 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5290 				/* We have signal detect and not receiving
5291 				 * config code words, link is up by parallel
5292 				 * detection.
5293 				 */
5294 
5295 				bmcr &= ~BMCR_ANENABLE;
5296 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5297 				tg3_writephy(tp, MII_BMCR, bmcr);
5298 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5299 			}
5300 		}
5301 	} else if (netif_carrier_ok(tp->dev) &&
5302 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5303 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5304 		u32 phy2;
5305 
5306 		/* Select expansion interrupt status register */
5307 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5308 				 MII_TG3_DSP_EXP1_INT_STAT);
5309 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5310 		if (phy2 & 0x20) {
5311 			u32 bmcr;
5312 
5313 			/* Config code words received, turn on autoneg. */
5314 			tg3_readphy(tp, MII_BMCR, &bmcr);
5315 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5316 
5317 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5318 
5319 		}
5320 	}
5321 }
5322 
5323 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5324 {
5325 	u32 val;
5326 	int err;
5327 
5328 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5329 		err = tg3_setup_fiber_phy(tp, force_reset);
5330 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5331 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
5332 	else
5333 		err = tg3_setup_copper_phy(tp, force_reset);
5334 
5335 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5336 		u32 scale;
5337 
5338 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5339 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5340 			scale = 65;
5341 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5342 			scale = 6;
5343 		else
5344 			scale = 12;
5345 
5346 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5347 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5348 		tw32(GRC_MISC_CFG, val);
5349 	}
5350 
5351 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5352 	      (6 << TX_LENGTHS_IPG_SHIFT);
5353 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5354 		val |= tr32(MAC_TX_LENGTHS) &
5355 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
5356 			TX_LENGTHS_CNT_DWN_VAL_MSK);
5357 
5358 	if (tp->link_config.active_speed == SPEED_1000 &&
5359 	    tp->link_config.active_duplex == DUPLEX_HALF)
5360 		tw32(MAC_TX_LENGTHS, val |
5361 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5362 	else
5363 		tw32(MAC_TX_LENGTHS, val |
5364 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5365 
5366 	if (!tg3_flag(tp, 5705_PLUS)) {
5367 		if (netif_carrier_ok(tp->dev)) {
5368 			tw32(HOSTCC_STAT_COAL_TICKS,
5369 			     tp->coal.stats_block_coalesce_usecs);
5370 		} else {
5371 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
5372 		}
5373 	}
5374 
5375 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
5376 		val = tr32(PCIE_PWR_MGMT_THRESH);
5377 		if (!netif_carrier_ok(tp->dev))
5378 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5379 			      tp->pwrmgmt_thresh;
5380 		else
5381 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5382 		tw32(PCIE_PWR_MGMT_THRESH, val);
5383 	}
5384 
5385 	return err;
5386 }
5387 
5388 static inline int tg3_irq_sync(struct tg3 *tp)
5389 {
5390 	return tp->irq_sync;
5391 }
5392 
5393 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5394 {
5395 	int i;
5396 
5397 	dst = (u32 *)((u8 *)dst + off);
5398 	for (i = 0; i < len; i += sizeof(u32))
5399 		*dst++ = tr32(off + i);
5400 }
5401 
5402 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5403 {
5404 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5405 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5406 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5407 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5408 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5409 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5410 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5411 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5412 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5413 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5414 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5415 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5416 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5417 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5418 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5419 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5420 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5421 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5422 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5423 
5424 	if (tg3_flag(tp, SUPPORT_MSIX))
5425 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5426 
5427 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5428 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5429 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5430 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5431 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5432 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5433 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5434 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5435 
5436 	if (!tg3_flag(tp, 5705_PLUS)) {
5437 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5438 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5439 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5440 	}
5441 
5442 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5443 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5444 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5445 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5446 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5447 
5448 	if (tg3_flag(tp, NVRAM))
5449 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5450 }
5451 
5452 static void tg3_dump_state(struct tg3 *tp)
5453 {
5454 	int i;
5455 	u32 *regs;
5456 
5457 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5458 	if (!regs) {
5459 		netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5460 		return;
5461 	}
5462 
5463 	if (tg3_flag(tp, PCI_EXPRESS)) {
5464 		/* Read up to but not including private PCI registers */
5465 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5466 			regs[i / sizeof(u32)] = tr32(i);
5467 	} else
5468 		tg3_dump_legacy_regs(tp, regs);
5469 
5470 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5471 		if (!regs[i + 0] && !regs[i + 1] &&
5472 		    !regs[i + 2] && !regs[i + 3])
5473 			continue;
5474 
5475 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5476 			   i * 4,
5477 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5478 	}
5479 
5480 	kfree(regs);
5481 
5482 	for (i = 0; i < tp->irq_cnt; i++) {
5483 		struct tg3_napi *tnapi = &tp->napi[i];
5484 
5485 		/* SW status block */
5486 		netdev_err(tp->dev,
5487 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5488 			   i,
5489 			   tnapi->hw_status->status,
5490 			   tnapi->hw_status->status_tag,
5491 			   tnapi->hw_status->rx_jumbo_consumer,
5492 			   tnapi->hw_status->rx_consumer,
5493 			   tnapi->hw_status->rx_mini_consumer,
5494 			   tnapi->hw_status->idx[0].rx_producer,
5495 			   tnapi->hw_status->idx[0].tx_consumer);
5496 
5497 		netdev_err(tp->dev,
5498 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5499 			   i,
5500 			   tnapi->last_tag, tnapi->last_irq_tag,
5501 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5502 			   tnapi->rx_rcb_ptr,
5503 			   tnapi->prodring.rx_std_prod_idx,
5504 			   tnapi->prodring.rx_std_cons_idx,
5505 			   tnapi->prodring.rx_jmb_prod_idx,
5506 			   tnapi->prodring.rx_jmb_cons_idx);
5507 	}
5508 }
5509 
5510 /* This is called whenever we suspect that the system chipset is re-
5511  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5512  * is bogus tx completions. We try to recover by setting the
5513  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5514  * in the workqueue.
5515  */
5516 static void tg3_tx_recover(struct tg3 *tp)
5517 {
5518 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5519 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
5520 
5521 	netdev_warn(tp->dev,
5522 		    "The system may be re-ordering memory-mapped I/O "
5523 		    "cycles to the network device, attempting to recover. "
5524 		    "Please report the problem to the driver maintainer "
5525 		    "and include system chipset information.\n");
5526 
5527 	spin_lock(&tp->lock);
5528 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
5529 	spin_unlock(&tp->lock);
5530 }
5531 
5532 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5533 {
5534 	/* Tell compiler to fetch tx indices from memory. */
5535 	barrier();
5536 	return tnapi->tx_pending -
5537 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5538 }
5539 
5540 /* Tigon3 never reports partial packet sends.  So we do not
5541  * need special logic to handle SKBs that have not had all
5542  * of their frags sent yet, like SunGEM does.
5543  */
5544 static void tg3_tx(struct tg3_napi *tnapi)
5545 {
5546 	struct tg3 *tp = tnapi->tp;
5547 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5548 	u32 sw_idx = tnapi->tx_cons;
5549 	struct netdev_queue *txq;
5550 	int index = tnapi - tp->napi;
5551 	unsigned int pkts_compl = 0, bytes_compl = 0;
5552 
5553 	if (tg3_flag(tp, ENABLE_TSS))
5554 		index--;
5555 
5556 	txq = netdev_get_tx_queue(tp->dev, index);
5557 
5558 	while (sw_idx != hw_idx) {
5559 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5560 		struct sk_buff *skb = ri->skb;
5561 		int i, tx_bug = 0;
5562 
5563 		if (unlikely(skb == NULL)) {
5564 			tg3_tx_recover(tp);
5565 			return;
5566 		}
5567 
5568 		pci_unmap_single(tp->pdev,
5569 				 dma_unmap_addr(ri, mapping),
5570 				 skb_headlen(skb),
5571 				 PCI_DMA_TODEVICE);
5572 
5573 		ri->skb = NULL;
5574 
5575 		while (ri->fragmented) {
5576 			ri->fragmented = false;
5577 			sw_idx = NEXT_TX(sw_idx);
5578 			ri = &tnapi->tx_buffers[sw_idx];
5579 		}
5580 
5581 		sw_idx = NEXT_TX(sw_idx);
5582 
5583 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5584 			ri = &tnapi->tx_buffers[sw_idx];
5585 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5586 				tx_bug = 1;
5587 
5588 			pci_unmap_page(tp->pdev,
5589 				       dma_unmap_addr(ri, mapping),
5590 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
5591 				       PCI_DMA_TODEVICE);
5592 
5593 			while (ri->fragmented) {
5594 				ri->fragmented = false;
5595 				sw_idx = NEXT_TX(sw_idx);
5596 				ri = &tnapi->tx_buffers[sw_idx];
5597 			}
5598 
5599 			sw_idx = NEXT_TX(sw_idx);
5600 		}
5601 
5602 		pkts_compl++;
5603 		bytes_compl += skb->len;
5604 
5605 		dev_kfree_skb(skb);
5606 
5607 		if (unlikely(tx_bug)) {
5608 			tg3_tx_recover(tp);
5609 			return;
5610 		}
5611 	}
5612 
5613 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5614 
5615 	tnapi->tx_cons = sw_idx;
5616 
5617 	/* Need to make the tx_cons update visible to tg3_start_xmit()
5618 	 * before checking for netif_queue_stopped().  Without the
5619 	 * memory barrier, there is a small possibility that tg3_start_xmit()
5620 	 * will miss it and cause the queue to be stopped forever.
5621 	 */
5622 	smp_mb();
5623 
5624 	if (unlikely(netif_tx_queue_stopped(txq) &&
5625 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5626 		__netif_tx_lock(txq, smp_processor_id());
5627 		if (netif_tx_queue_stopped(txq) &&
5628 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5629 			netif_tx_wake_queue(txq);
5630 		__netif_tx_unlock(txq);
5631 	}
5632 }
5633 
5634 static void *tg3_frag_alloc(struct tg3_rx_prodring_set *tpr)
5635 {
5636 	void *data;
5637 
5638 	if (tpr->rx_page_size < TG3_FRAGSIZE) {
5639 		struct page *page = alloc_page(GFP_ATOMIC);
5640 
5641 		if (!page)
5642 			return NULL;
5643 		atomic_add((PAGE_SIZE / TG3_FRAGSIZE) - 1, &page->_count);
5644 		tpr->rx_page_addr = page_address(page);
5645 		tpr->rx_page_size = PAGE_SIZE;
5646 	}
5647 	data = tpr->rx_page_addr;
5648 	tpr->rx_page_addr += TG3_FRAGSIZE;
5649 	tpr->rx_page_size -= TG3_FRAGSIZE;
5650 	return data;
5651 }
5652 
5653 static void tg3_frag_free(bool is_frag, void *data)
5654 {
5655 	if (is_frag)
5656 		put_page(virt_to_head_page(data));
5657 	else
5658 		kfree(data);
5659 }
5660 
5661 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5662 {
5663 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5664 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5665 
5666 	if (!ri->data)
5667 		return;
5668 
5669 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5670 			 map_sz, PCI_DMA_FROMDEVICE);
5671 	tg3_frag_free(skb_size <= TG3_FRAGSIZE, ri->data);
5672 	ri->data = NULL;
5673 }
5674 
5675 
5676 /* Returns size of skb allocated or < 0 on error.
5677  *
5678  * We only need to fill in the address because the other members
5679  * of the RX descriptor are invariant, see tg3_init_rings.
5680  *
5681  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5682  * posting buffers we only dirty the first cache line of the RX
5683  * descriptor (containing the address).  Whereas for the RX status
5684  * buffers the cpu only reads the last cacheline of the RX descriptor
5685  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5686  */
5687 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5688 			     u32 opaque_key, u32 dest_idx_unmasked,
5689 			     unsigned int *frag_size)
5690 {
5691 	struct tg3_rx_buffer_desc *desc;
5692 	struct ring_info *map;
5693 	u8 *data;
5694 	dma_addr_t mapping;
5695 	int skb_size, data_size, dest_idx;
5696 
5697 	switch (opaque_key) {
5698 	case RXD_OPAQUE_RING_STD:
5699 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5700 		desc = &tpr->rx_std[dest_idx];
5701 		map = &tpr->rx_std_buffers[dest_idx];
5702 		data_size = tp->rx_pkt_map_sz;
5703 		break;
5704 
5705 	case RXD_OPAQUE_RING_JUMBO:
5706 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5707 		desc = &tpr->rx_jmb[dest_idx].std;
5708 		map = &tpr->rx_jmb_buffers[dest_idx];
5709 		data_size = TG3_RX_JMB_MAP_SZ;
5710 		break;
5711 
5712 	default:
5713 		return -EINVAL;
5714 	}
5715 
5716 	/* Do not overwrite any of the map or rp information
5717 	 * until we are sure we can commit to a new buffer.
5718 	 *
5719 	 * Callers depend upon this behavior and assume that
5720 	 * we leave everything unchanged if we fail.
5721 	 */
5722 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5723 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5724 	if (skb_size <= TG3_FRAGSIZE) {
5725 		data = tg3_frag_alloc(tpr);
5726 		*frag_size = TG3_FRAGSIZE;
5727 	} else {
5728 		data = kmalloc(skb_size, GFP_ATOMIC);
5729 		*frag_size = 0;
5730 	}
5731 	if (!data)
5732 		return -ENOMEM;
5733 
5734 	mapping = pci_map_single(tp->pdev,
5735 				 data + TG3_RX_OFFSET(tp),
5736 				 data_size,
5737 				 PCI_DMA_FROMDEVICE);
5738 	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5739 		tg3_frag_free(skb_size <= TG3_FRAGSIZE, data);
5740 		return -EIO;
5741 	}
5742 
5743 	map->data = data;
5744 	dma_unmap_addr_set(map, mapping, mapping);
5745 
5746 	desc->addr_hi = ((u64)mapping >> 32);
5747 	desc->addr_lo = ((u64)mapping & 0xffffffff);
5748 
5749 	return data_size;
5750 }
5751 
5752 /* We only need to move over in the address because the other
5753  * members of the RX descriptor are invariant.  See notes above
5754  * tg3_alloc_rx_data for full details.
5755  */
5756 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5757 			   struct tg3_rx_prodring_set *dpr,
5758 			   u32 opaque_key, int src_idx,
5759 			   u32 dest_idx_unmasked)
5760 {
5761 	struct tg3 *tp = tnapi->tp;
5762 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5763 	struct ring_info *src_map, *dest_map;
5764 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5765 	int dest_idx;
5766 
5767 	switch (opaque_key) {
5768 	case RXD_OPAQUE_RING_STD:
5769 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5770 		dest_desc = &dpr->rx_std[dest_idx];
5771 		dest_map = &dpr->rx_std_buffers[dest_idx];
5772 		src_desc = &spr->rx_std[src_idx];
5773 		src_map = &spr->rx_std_buffers[src_idx];
5774 		break;
5775 
5776 	case RXD_OPAQUE_RING_JUMBO:
5777 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5778 		dest_desc = &dpr->rx_jmb[dest_idx].std;
5779 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
5780 		src_desc = &spr->rx_jmb[src_idx].std;
5781 		src_map = &spr->rx_jmb_buffers[src_idx];
5782 		break;
5783 
5784 	default:
5785 		return;
5786 	}
5787 
5788 	dest_map->data = src_map->data;
5789 	dma_unmap_addr_set(dest_map, mapping,
5790 			   dma_unmap_addr(src_map, mapping));
5791 	dest_desc->addr_hi = src_desc->addr_hi;
5792 	dest_desc->addr_lo = src_desc->addr_lo;
5793 
5794 	/* Ensure that the update to the skb happens after the physical
5795 	 * addresses have been transferred to the new BD location.
5796 	 */
5797 	smp_wmb();
5798 
5799 	src_map->data = NULL;
5800 }
5801 
5802 /* The RX ring scheme is composed of multiple rings which post fresh
5803  * buffers to the chip, and one special ring the chip uses to report
5804  * status back to the host.
5805  *
5806  * The special ring reports the status of received packets to the
5807  * host.  The chip does not write into the original descriptor the
5808  * RX buffer was obtained from.  The chip simply takes the original
5809  * descriptor as provided by the host, updates the status and length
5810  * field, then writes this into the next status ring entry.
5811  *
5812  * Each ring the host uses to post buffers to the chip is described
5813  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5814  * it is first placed into the on-chip ram.  When the packet's length
5815  * is known, it walks down the TG3_BDINFO entries to select the ring.
5816  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5817  * which is within the range of the new packet's length is chosen.
5818  *
5819  * The "separate ring for rx status" scheme may sound queer, but it makes
5820  * sense from a cache coherency perspective.  If only the host writes
5821  * to the buffer post rings, and only the chip writes to the rx status
5822  * rings, then cache lines never move beyond shared-modified state.
5823  * If both the host and chip were to write into the same ring, cache line
5824  * eviction could occur since both entities want it in an exclusive state.
5825  */
5826 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5827 {
5828 	struct tg3 *tp = tnapi->tp;
5829 	u32 work_mask, rx_std_posted = 0;
5830 	u32 std_prod_idx, jmb_prod_idx;
5831 	u32 sw_idx = tnapi->rx_rcb_ptr;
5832 	u16 hw_idx;
5833 	int received;
5834 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5835 
5836 	hw_idx = *(tnapi->rx_rcb_prod_idx);
5837 	/*
5838 	 * We need to order the read of hw_idx and the read of
5839 	 * the opaque cookie.
5840 	 */
5841 	rmb();
5842 	work_mask = 0;
5843 	received = 0;
5844 	std_prod_idx = tpr->rx_std_prod_idx;
5845 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
5846 	while (sw_idx != hw_idx && budget > 0) {
5847 		struct ring_info *ri;
5848 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5849 		unsigned int len;
5850 		struct sk_buff *skb;
5851 		dma_addr_t dma_addr;
5852 		u32 opaque_key, desc_idx, *post_ptr;
5853 		u8 *data;
5854 
5855 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5856 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5857 		if (opaque_key == RXD_OPAQUE_RING_STD) {
5858 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5859 			dma_addr = dma_unmap_addr(ri, mapping);
5860 			data = ri->data;
5861 			post_ptr = &std_prod_idx;
5862 			rx_std_posted++;
5863 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5864 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5865 			dma_addr = dma_unmap_addr(ri, mapping);
5866 			data = ri->data;
5867 			post_ptr = &jmb_prod_idx;
5868 		} else
5869 			goto next_pkt_nopost;
5870 
5871 		work_mask |= opaque_key;
5872 
5873 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5874 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5875 		drop_it:
5876 			tg3_recycle_rx(tnapi, tpr, opaque_key,
5877 				       desc_idx, *post_ptr);
5878 		drop_it_no_recycle:
5879 			/* Other statistics kept track of by card. */
5880 			tp->rx_dropped++;
5881 			goto next_pkt;
5882 		}
5883 
5884 		prefetch(data + TG3_RX_OFFSET(tp));
5885 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5886 		      ETH_FCS_LEN;
5887 
5888 		if (len > TG3_RX_COPY_THRESH(tp)) {
5889 			int skb_size;
5890 			unsigned int frag_size;
5891 
5892 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5893 						    *post_ptr, &frag_size);
5894 			if (skb_size < 0)
5895 				goto drop_it;
5896 
5897 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
5898 					 PCI_DMA_FROMDEVICE);
5899 
5900 			skb = build_skb(data, frag_size);
5901 			if (!skb) {
5902 				tg3_frag_free(frag_size != 0, data);
5903 				goto drop_it_no_recycle;
5904 			}
5905 			skb_reserve(skb, TG3_RX_OFFSET(tp));
5906 			/* Ensure that the update to the data happens
5907 			 * after the usage of the old DMA mapping.
5908 			 */
5909 			smp_wmb();
5910 
5911 			ri->data = NULL;
5912 
5913 		} else {
5914 			tg3_recycle_rx(tnapi, tpr, opaque_key,
5915 				       desc_idx, *post_ptr);
5916 
5917 			skb = netdev_alloc_skb(tp->dev,
5918 					       len + TG3_RAW_IP_ALIGN);
5919 			if (skb == NULL)
5920 				goto drop_it_no_recycle;
5921 
5922 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
5923 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5924 			memcpy(skb->data,
5925 			       data + TG3_RX_OFFSET(tp),
5926 			       len);
5927 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5928 		}
5929 
5930 		skb_put(skb, len);
5931 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
5932 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5933 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5934 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
5935 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5936 		else
5937 			skb_checksum_none_assert(skb);
5938 
5939 		skb->protocol = eth_type_trans(skb, tp->dev);
5940 
5941 		if (len > (tp->dev->mtu + ETH_HLEN) &&
5942 		    skb->protocol != htons(ETH_P_8021Q)) {
5943 			dev_kfree_skb(skb);
5944 			goto drop_it_no_recycle;
5945 		}
5946 
5947 		if (desc->type_flags & RXD_FLAG_VLAN &&
5948 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5949 			__vlan_hwaccel_put_tag(skb,
5950 					       desc->err_vlan & RXD_VLAN_MASK);
5951 
5952 		napi_gro_receive(&tnapi->napi, skb);
5953 
5954 		received++;
5955 		budget--;
5956 
5957 next_pkt:
5958 		(*post_ptr)++;
5959 
5960 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5961 			tpr->rx_std_prod_idx = std_prod_idx &
5962 					       tp->rx_std_ring_mask;
5963 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5964 				     tpr->rx_std_prod_idx);
5965 			work_mask &= ~RXD_OPAQUE_RING_STD;
5966 			rx_std_posted = 0;
5967 		}
5968 next_pkt_nopost:
5969 		sw_idx++;
5970 		sw_idx &= tp->rx_ret_ring_mask;
5971 
5972 		/* Refresh hw_idx to see if there is new work */
5973 		if (sw_idx == hw_idx) {
5974 			hw_idx = *(tnapi->rx_rcb_prod_idx);
5975 			rmb();
5976 		}
5977 	}
5978 
5979 	/* ACK the status ring. */
5980 	tnapi->rx_rcb_ptr = sw_idx;
5981 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
5982 
5983 	/* Refill RX ring(s). */
5984 	if (!tg3_flag(tp, ENABLE_RSS)) {
5985 		/* Sync BD data before updating mailbox */
5986 		wmb();
5987 
5988 		if (work_mask & RXD_OPAQUE_RING_STD) {
5989 			tpr->rx_std_prod_idx = std_prod_idx &
5990 					       tp->rx_std_ring_mask;
5991 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5992 				     tpr->rx_std_prod_idx);
5993 		}
5994 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5995 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
5996 					       tp->rx_jmb_ring_mask;
5997 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5998 				     tpr->rx_jmb_prod_idx);
5999 		}
6000 		mmiowb();
6001 	} else if (work_mask) {
6002 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6003 		 * updated before the producer indices can be updated.
6004 		 */
6005 		smp_wmb();
6006 
6007 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6008 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6009 
6010 		if (tnapi != &tp->napi[1]) {
6011 			tp->rx_refill = true;
6012 			napi_schedule(&tp->napi[1].napi);
6013 		}
6014 	}
6015 
6016 	return received;
6017 }
6018 
6019 static void tg3_poll_link(struct tg3 *tp)
6020 {
6021 	/* handle link change and other phy events */
6022 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6023 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6024 
6025 		if (sblk->status & SD_STATUS_LINK_CHG) {
6026 			sblk->status = SD_STATUS_UPDATED |
6027 				       (sblk->status & ~SD_STATUS_LINK_CHG);
6028 			spin_lock(&tp->lock);
6029 			if (tg3_flag(tp, USE_PHYLIB)) {
6030 				tw32_f(MAC_STATUS,
6031 				     (MAC_STATUS_SYNC_CHANGED |
6032 				      MAC_STATUS_CFG_CHANGED |
6033 				      MAC_STATUS_MI_COMPLETION |
6034 				      MAC_STATUS_LNKSTATE_CHANGED));
6035 				udelay(40);
6036 			} else
6037 				tg3_setup_phy(tp, 0);
6038 			spin_unlock(&tp->lock);
6039 		}
6040 	}
6041 }
6042 
6043 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6044 				struct tg3_rx_prodring_set *dpr,
6045 				struct tg3_rx_prodring_set *spr)
6046 {
6047 	u32 si, di, cpycnt, src_prod_idx;
6048 	int i, err = 0;
6049 
6050 	while (1) {
6051 		src_prod_idx = spr->rx_std_prod_idx;
6052 
6053 		/* Make sure updates to the rx_std_buffers[] entries and the
6054 		 * standard producer index are seen in the correct order.
6055 		 */
6056 		smp_rmb();
6057 
6058 		if (spr->rx_std_cons_idx == src_prod_idx)
6059 			break;
6060 
6061 		if (spr->rx_std_cons_idx < src_prod_idx)
6062 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6063 		else
6064 			cpycnt = tp->rx_std_ring_mask + 1 -
6065 				 spr->rx_std_cons_idx;
6066 
6067 		cpycnt = min(cpycnt,
6068 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6069 
6070 		si = spr->rx_std_cons_idx;
6071 		di = dpr->rx_std_prod_idx;
6072 
6073 		for (i = di; i < di + cpycnt; i++) {
6074 			if (dpr->rx_std_buffers[i].data) {
6075 				cpycnt = i - di;
6076 				err = -ENOSPC;
6077 				break;
6078 			}
6079 		}
6080 
6081 		if (!cpycnt)
6082 			break;
6083 
6084 		/* Ensure that updates to the rx_std_buffers ring and the
6085 		 * shadowed hardware producer ring from tg3_recycle_skb() are
6086 		 * ordered correctly WRT the skb check above.
6087 		 */
6088 		smp_rmb();
6089 
6090 		memcpy(&dpr->rx_std_buffers[di],
6091 		       &spr->rx_std_buffers[si],
6092 		       cpycnt * sizeof(struct ring_info));
6093 
6094 		for (i = 0; i < cpycnt; i++, di++, si++) {
6095 			struct tg3_rx_buffer_desc *sbd, *dbd;
6096 			sbd = &spr->rx_std[si];
6097 			dbd = &dpr->rx_std[di];
6098 			dbd->addr_hi = sbd->addr_hi;
6099 			dbd->addr_lo = sbd->addr_lo;
6100 		}
6101 
6102 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6103 				       tp->rx_std_ring_mask;
6104 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6105 				       tp->rx_std_ring_mask;
6106 	}
6107 
6108 	while (1) {
6109 		src_prod_idx = spr->rx_jmb_prod_idx;
6110 
6111 		/* Make sure updates to the rx_jmb_buffers[] entries and
6112 		 * the jumbo producer index are seen in the correct order.
6113 		 */
6114 		smp_rmb();
6115 
6116 		if (spr->rx_jmb_cons_idx == src_prod_idx)
6117 			break;
6118 
6119 		if (spr->rx_jmb_cons_idx < src_prod_idx)
6120 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6121 		else
6122 			cpycnt = tp->rx_jmb_ring_mask + 1 -
6123 				 spr->rx_jmb_cons_idx;
6124 
6125 		cpycnt = min(cpycnt,
6126 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6127 
6128 		si = spr->rx_jmb_cons_idx;
6129 		di = dpr->rx_jmb_prod_idx;
6130 
6131 		for (i = di; i < di + cpycnt; i++) {
6132 			if (dpr->rx_jmb_buffers[i].data) {
6133 				cpycnt = i - di;
6134 				err = -ENOSPC;
6135 				break;
6136 			}
6137 		}
6138 
6139 		if (!cpycnt)
6140 			break;
6141 
6142 		/* Ensure that updates to the rx_jmb_buffers ring and the
6143 		 * shadowed hardware producer ring from tg3_recycle_skb() are
6144 		 * ordered correctly WRT the skb check above.
6145 		 */
6146 		smp_rmb();
6147 
6148 		memcpy(&dpr->rx_jmb_buffers[di],
6149 		       &spr->rx_jmb_buffers[si],
6150 		       cpycnt * sizeof(struct ring_info));
6151 
6152 		for (i = 0; i < cpycnt; i++, di++, si++) {
6153 			struct tg3_rx_buffer_desc *sbd, *dbd;
6154 			sbd = &spr->rx_jmb[si].std;
6155 			dbd = &dpr->rx_jmb[di].std;
6156 			dbd->addr_hi = sbd->addr_hi;
6157 			dbd->addr_lo = sbd->addr_lo;
6158 		}
6159 
6160 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6161 				       tp->rx_jmb_ring_mask;
6162 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6163 				       tp->rx_jmb_ring_mask;
6164 	}
6165 
6166 	return err;
6167 }
6168 
6169 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6170 {
6171 	struct tg3 *tp = tnapi->tp;
6172 
6173 	/* run TX completion thread */
6174 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6175 		tg3_tx(tnapi);
6176 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6177 			return work_done;
6178 	}
6179 
6180 	if (!tnapi->rx_rcb_prod_idx)
6181 		return work_done;
6182 
6183 	/* run RX thread, within the bounds set by NAPI.
6184 	 * All RX "locking" is done by ensuring outside
6185 	 * code synchronizes with tg3->napi.poll()
6186 	 */
6187 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6188 		work_done += tg3_rx(tnapi, budget - work_done);
6189 
6190 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6191 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6192 		int i, err = 0;
6193 		u32 std_prod_idx = dpr->rx_std_prod_idx;
6194 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6195 
6196 		tp->rx_refill = false;
6197 		for (i = 1; i < tp->irq_cnt; i++)
6198 			err |= tg3_rx_prodring_xfer(tp, dpr,
6199 						    &tp->napi[i].prodring);
6200 
6201 		wmb();
6202 
6203 		if (std_prod_idx != dpr->rx_std_prod_idx)
6204 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6205 				     dpr->rx_std_prod_idx);
6206 
6207 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6208 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6209 				     dpr->rx_jmb_prod_idx);
6210 
6211 		mmiowb();
6212 
6213 		if (err)
6214 			tw32_f(HOSTCC_MODE, tp->coal_now);
6215 	}
6216 
6217 	return work_done;
6218 }
6219 
6220 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6221 {
6222 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6223 		schedule_work(&tp->reset_task);
6224 }
6225 
6226 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6227 {
6228 	cancel_work_sync(&tp->reset_task);
6229 	tg3_flag_clear(tp, RESET_TASK_PENDING);
6230 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6231 }
6232 
6233 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6234 {
6235 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6236 	struct tg3 *tp = tnapi->tp;
6237 	int work_done = 0;
6238 	struct tg3_hw_status *sblk = tnapi->hw_status;
6239 
6240 	while (1) {
6241 		work_done = tg3_poll_work(tnapi, work_done, budget);
6242 
6243 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6244 			goto tx_recovery;
6245 
6246 		if (unlikely(work_done >= budget))
6247 			break;
6248 
6249 		/* tp->last_tag is used in tg3_int_reenable() below
6250 		 * to tell the hw how much work has been processed,
6251 		 * so we must read it before checking for more work.
6252 		 */
6253 		tnapi->last_tag = sblk->status_tag;
6254 		tnapi->last_irq_tag = tnapi->last_tag;
6255 		rmb();
6256 
6257 		/* check for RX/TX work to do */
6258 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6259 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6260 
6261 			/* This test here is not race free, but will reduce
6262 			 * the number of interrupts by looping again.
6263 			 */
6264 			if (tnapi == &tp->napi[1] && tp->rx_refill)
6265 				continue;
6266 
6267 			napi_complete(napi);
6268 			/* Reenable interrupts. */
6269 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6270 
6271 			/* This test here is synchronized by napi_schedule()
6272 			 * and napi_complete() to close the race condition.
6273 			 */
6274 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6275 				tw32(HOSTCC_MODE, tp->coalesce_mode |
6276 						  HOSTCC_MODE_ENABLE |
6277 						  tnapi->coal_now);
6278 			}
6279 			mmiowb();
6280 			break;
6281 		}
6282 	}
6283 
6284 	return work_done;
6285 
6286 tx_recovery:
6287 	/* work_done is guaranteed to be less than budget. */
6288 	napi_complete(napi);
6289 	tg3_reset_task_schedule(tp);
6290 	return work_done;
6291 }
6292 
6293 static void tg3_process_error(struct tg3 *tp)
6294 {
6295 	u32 val;
6296 	bool real_error = false;
6297 
6298 	if (tg3_flag(tp, ERROR_PROCESSED))
6299 		return;
6300 
6301 	/* Check Flow Attention register */
6302 	val = tr32(HOSTCC_FLOW_ATTN);
6303 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6304 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6305 		real_error = true;
6306 	}
6307 
6308 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6309 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6310 		real_error = true;
6311 	}
6312 
6313 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6314 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6315 		real_error = true;
6316 	}
6317 
6318 	if (!real_error)
6319 		return;
6320 
6321 	tg3_dump_state(tp);
6322 
6323 	tg3_flag_set(tp, ERROR_PROCESSED);
6324 	tg3_reset_task_schedule(tp);
6325 }
6326 
6327 static int tg3_poll(struct napi_struct *napi, int budget)
6328 {
6329 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6330 	struct tg3 *tp = tnapi->tp;
6331 	int work_done = 0;
6332 	struct tg3_hw_status *sblk = tnapi->hw_status;
6333 
6334 	while (1) {
6335 		if (sblk->status & SD_STATUS_ERROR)
6336 			tg3_process_error(tp);
6337 
6338 		tg3_poll_link(tp);
6339 
6340 		work_done = tg3_poll_work(tnapi, work_done, budget);
6341 
6342 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6343 			goto tx_recovery;
6344 
6345 		if (unlikely(work_done >= budget))
6346 			break;
6347 
6348 		if (tg3_flag(tp, TAGGED_STATUS)) {
6349 			/* tp->last_tag is used in tg3_int_reenable() below
6350 			 * to tell the hw how much work has been processed,
6351 			 * so we must read it before checking for more work.
6352 			 */
6353 			tnapi->last_tag = sblk->status_tag;
6354 			tnapi->last_irq_tag = tnapi->last_tag;
6355 			rmb();
6356 		} else
6357 			sblk->status &= ~SD_STATUS_UPDATED;
6358 
6359 		if (likely(!tg3_has_work(tnapi))) {
6360 			napi_complete(napi);
6361 			tg3_int_reenable(tnapi);
6362 			break;
6363 		}
6364 	}
6365 
6366 	return work_done;
6367 
6368 tx_recovery:
6369 	/* work_done is guaranteed to be less than budget. */
6370 	napi_complete(napi);
6371 	tg3_reset_task_schedule(tp);
6372 	return work_done;
6373 }
6374 
6375 static void tg3_napi_disable(struct tg3 *tp)
6376 {
6377 	int i;
6378 
6379 	for (i = tp->irq_cnt - 1; i >= 0; i--)
6380 		napi_disable(&tp->napi[i].napi);
6381 }
6382 
6383 static void tg3_napi_enable(struct tg3 *tp)
6384 {
6385 	int i;
6386 
6387 	for (i = 0; i < tp->irq_cnt; i++)
6388 		napi_enable(&tp->napi[i].napi);
6389 }
6390 
6391 static void tg3_napi_init(struct tg3 *tp)
6392 {
6393 	int i;
6394 
6395 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6396 	for (i = 1; i < tp->irq_cnt; i++)
6397 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6398 }
6399 
6400 static void tg3_napi_fini(struct tg3 *tp)
6401 {
6402 	int i;
6403 
6404 	for (i = 0; i < tp->irq_cnt; i++)
6405 		netif_napi_del(&tp->napi[i].napi);
6406 }
6407 
6408 static inline void tg3_netif_stop(struct tg3 *tp)
6409 {
6410 	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
6411 	tg3_napi_disable(tp);
6412 	netif_tx_disable(tp->dev);
6413 }
6414 
6415 static inline void tg3_netif_start(struct tg3 *tp)
6416 {
6417 	/* NOTE: unconditional netif_tx_wake_all_queues is only
6418 	 * appropriate so long as all callers are assured to
6419 	 * have free tx slots (such as after tg3_init_hw)
6420 	 */
6421 	netif_tx_wake_all_queues(tp->dev);
6422 
6423 	tg3_napi_enable(tp);
6424 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6425 	tg3_enable_ints(tp);
6426 }
6427 
6428 static void tg3_irq_quiesce(struct tg3 *tp)
6429 {
6430 	int i;
6431 
6432 	BUG_ON(tp->irq_sync);
6433 
6434 	tp->irq_sync = 1;
6435 	smp_mb();
6436 
6437 	for (i = 0; i < tp->irq_cnt; i++)
6438 		synchronize_irq(tp->napi[i].irq_vec);
6439 }
6440 
6441 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6442  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6443  * with as well.  Most of the time, this is not necessary except when
6444  * shutting down the device.
6445  */
6446 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6447 {
6448 	spin_lock_bh(&tp->lock);
6449 	if (irq_sync)
6450 		tg3_irq_quiesce(tp);
6451 }
6452 
6453 static inline void tg3_full_unlock(struct tg3 *tp)
6454 {
6455 	spin_unlock_bh(&tp->lock);
6456 }
6457 
6458 /* One-shot MSI handler - Chip automatically disables interrupt
6459  * after sending MSI so driver doesn't have to do it.
6460  */
6461 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6462 {
6463 	struct tg3_napi *tnapi = dev_id;
6464 	struct tg3 *tp = tnapi->tp;
6465 
6466 	prefetch(tnapi->hw_status);
6467 	if (tnapi->rx_rcb)
6468 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6469 
6470 	if (likely(!tg3_irq_sync(tp)))
6471 		napi_schedule(&tnapi->napi);
6472 
6473 	return IRQ_HANDLED;
6474 }
6475 
6476 /* MSI ISR - No need to check for interrupt sharing and no need to
6477  * flush status block and interrupt mailbox. PCI ordering rules
6478  * guarantee that MSI will arrive after the status block.
6479  */
6480 static irqreturn_t tg3_msi(int irq, void *dev_id)
6481 {
6482 	struct tg3_napi *tnapi = dev_id;
6483 	struct tg3 *tp = tnapi->tp;
6484 
6485 	prefetch(tnapi->hw_status);
6486 	if (tnapi->rx_rcb)
6487 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6488 	/*
6489 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
6490 	 * chip-internal interrupt pending events.
6491 	 * Writing non-zero to intr-mbox-0 additional tells the
6492 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6493 	 * event coalescing.
6494 	 */
6495 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
6496 	if (likely(!tg3_irq_sync(tp)))
6497 		napi_schedule(&tnapi->napi);
6498 
6499 	return IRQ_RETVAL(1);
6500 }
6501 
6502 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6503 {
6504 	struct tg3_napi *tnapi = dev_id;
6505 	struct tg3 *tp = tnapi->tp;
6506 	struct tg3_hw_status *sblk = tnapi->hw_status;
6507 	unsigned int handled = 1;
6508 
6509 	/* In INTx mode, it is possible for the interrupt to arrive at
6510 	 * the CPU before the status block posted prior to the interrupt.
6511 	 * Reading the PCI State register will confirm whether the
6512 	 * interrupt is ours and will flush the status block.
6513 	 */
6514 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6515 		if (tg3_flag(tp, CHIP_RESETTING) ||
6516 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6517 			handled = 0;
6518 			goto out;
6519 		}
6520 	}
6521 
6522 	/*
6523 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
6524 	 * chip-internal interrupt pending events.
6525 	 * Writing non-zero to intr-mbox-0 additional tells the
6526 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6527 	 * event coalescing.
6528 	 *
6529 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
6530 	 * spurious interrupts.  The flush impacts performance but
6531 	 * excessive spurious interrupts can be worse in some cases.
6532 	 */
6533 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6534 	if (tg3_irq_sync(tp))
6535 		goto out;
6536 	sblk->status &= ~SD_STATUS_UPDATED;
6537 	if (likely(tg3_has_work(tnapi))) {
6538 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6539 		napi_schedule(&tnapi->napi);
6540 	} else {
6541 		/* No work, shared interrupt perhaps?  re-enable
6542 		 * interrupts, and flush that PCI write
6543 		 */
6544 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6545 			       0x00000000);
6546 	}
6547 out:
6548 	return IRQ_RETVAL(handled);
6549 }
6550 
6551 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6552 {
6553 	struct tg3_napi *tnapi = dev_id;
6554 	struct tg3 *tp = tnapi->tp;
6555 	struct tg3_hw_status *sblk = tnapi->hw_status;
6556 	unsigned int handled = 1;
6557 
6558 	/* In INTx mode, it is possible for the interrupt to arrive at
6559 	 * the CPU before the status block posted prior to the interrupt.
6560 	 * Reading the PCI State register will confirm whether the
6561 	 * interrupt is ours and will flush the status block.
6562 	 */
6563 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6564 		if (tg3_flag(tp, CHIP_RESETTING) ||
6565 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6566 			handled = 0;
6567 			goto out;
6568 		}
6569 	}
6570 
6571 	/*
6572 	 * writing any value to intr-mbox-0 clears PCI INTA# and
6573 	 * chip-internal interrupt pending events.
6574 	 * writing non-zero to intr-mbox-0 additional tells the
6575 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6576 	 * event coalescing.
6577 	 *
6578 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
6579 	 * spurious interrupts.  The flush impacts performance but
6580 	 * excessive spurious interrupts can be worse in some cases.
6581 	 */
6582 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6583 
6584 	/*
6585 	 * In a shared interrupt configuration, sometimes other devices'
6586 	 * interrupts will scream.  We record the current status tag here
6587 	 * so that the above check can report that the screaming interrupts
6588 	 * are unhandled.  Eventually they will be silenced.
6589 	 */
6590 	tnapi->last_irq_tag = sblk->status_tag;
6591 
6592 	if (tg3_irq_sync(tp))
6593 		goto out;
6594 
6595 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6596 
6597 	napi_schedule(&tnapi->napi);
6598 
6599 out:
6600 	return IRQ_RETVAL(handled);
6601 }
6602 
6603 /* ISR for interrupt test */
6604 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6605 {
6606 	struct tg3_napi *tnapi = dev_id;
6607 	struct tg3 *tp = tnapi->tp;
6608 	struct tg3_hw_status *sblk = tnapi->hw_status;
6609 
6610 	if ((sblk->status & SD_STATUS_UPDATED) ||
6611 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6612 		tg3_disable_ints(tp);
6613 		return IRQ_RETVAL(1);
6614 	}
6615 	return IRQ_RETVAL(0);
6616 }
6617 
6618 #ifdef CONFIG_NET_POLL_CONTROLLER
6619 static void tg3_poll_controller(struct net_device *dev)
6620 {
6621 	int i;
6622 	struct tg3 *tp = netdev_priv(dev);
6623 
6624 	for (i = 0; i < tp->irq_cnt; i++)
6625 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6626 }
6627 #endif
6628 
6629 static void tg3_tx_timeout(struct net_device *dev)
6630 {
6631 	struct tg3 *tp = netdev_priv(dev);
6632 
6633 	if (netif_msg_tx_err(tp)) {
6634 		netdev_err(dev, "transmit timed out, resetting\n");
6635 		tg3_dump_state(tp);
6636 	}
6637 
6638 	tg3_reset_task_schedule(tp);
6639 }
6640 
6641 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6642 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6643 {
6644 	u32 base = (u32) mapping & 0xffffffff;
6645 
6646 	return (base > 0xffffdcc0) && (base + len + 8 < base);
6647 }
6648 
6649 /* Test for DMA addresses > 40-bit */
6650 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6651 					  int len)
6652 {
6653 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6654 	if (tg3_flag(tp, 40BIT_DMA_BUG))
6655 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
6656 	return 0;
6657 #else
6658 	return 0;
6659 #endif
6660 }
6661 
6662 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6663 				 dma_addr_t mapping, u32 len, u32 flags,
6664 				 u32 mss, u32 vlan)
6665 {
6666 	txbd->addr_hi = ((u64) mapping >> 32);
6667 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
6668 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6669 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6670 }
6671 
6672 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6673 			    dma_addr_t map, u32 len, u32 flags,
6674 			    u32 mss, u32 vlan)
6675 {
6676 	struct tg3 *tp = tnapi->tp;
6677 	bool hwbug = false;
6678 
6679 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6680 		hwbug = true;
6681 
6682 	if (tg3_4g_overflow_test(map, len))
6683 		hwbug = true;
6684 
6685 	if (tg3_40bit_overflow_test(tp, map, len))
6686 		hwbug = true;
6687 
6688 	if (tp->dma_limit) {
6689 		u32 prvidx = *entry;
6690 		u32 tmp_flag = flags & ~TXD_FLAG_END;
6691 		while (len > tp->dma_limit && *budget) {
6692 			u32 frag_len = tp->dma_limit;
6693 			len -= tp->dma_limit;
6694 
6695 			/* Avoid the 8byte DMA problem */
6696 			if (len <= 8) {
6697 				len += tp->dma_limit / 2;
6698 				frag_len = tp->dma_limit / 2;
6699 			}
6700 
6701 			tnapi->tx_buffers[*entry].fragmented = true;
6702 
6703 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6704 				      frag_len, tmp_flag, mss, vlan);
6705 			*budget -= 1;
6706 			prvidx = *entry;
6707 			*entry = NEXT_TX(*entry);
6708 
6709 			map += frag_len;
6710 		}
6711 
6712 		if (len) {
6713 			if (*budget) {
6714 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6715 					      len, flags, mss, vlan);
6716 				*budget -= 1;
6717 				*entry = NEXT_TX(*entry);
6718 			} else {
6719 				hwbug = true;
6720 				tnapi->tx_buffers[prvidx].fragmented = false;
6721 			}
6722 		}
6723 	} else {
6724 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6725 			      len, flags, mss, vlan);
6726 		*entry = NEXT_TX(*entry);
6727 	}
6728 
6729 	return hwbug;
6730 }
6731 
6732 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6733 {
6734 	int i;
6735 	struct sk_buff *skb;
6736 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6737 
6738 	skb = txb->skb;
6739 	txb->skb = NULL;
6740 
6741 	pci_unmap_single(tnapi->tp->pdev,
6742 			 dma_unmap_addr(txb, mapping),
6743 			 skb_headlen(skb),
6744 			 PCI_DMA_TODEVICE);
6745 
6746 	while (txb->fragmented) {
6747 		txb->fragmented = false;
6748 		entry = NEXT_TX(entry);
6749 		txb = &tnapi->tx_buffers[entry];
6750 	}
6751 
6752 	for (i = 0; i <= last; i++) {
6753 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6754 
6755 		entry = NEXT_TX(entry);
6756 		txb = &tnapi->tx_buffers[entry];
6757 
6758 		pci_unmap_page(tnapi->tp->pdev,
6759 			       dma_unmap_addr(txb, mapping),
6760 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
6761 
6762 		while (txb->fragmented) {
6763 			txb->fragmented = false;
6764 			entry = NEXT_TX(entry);
6765 			txb = &tnapi->tx_buffers[entry];
6766 		}
6767 	}
6768 }
6769 
6770 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6771 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6772 				       struct sk_buff **pskb,
6773 				       u32 *entry, u32 *budget,
6774 				       u32 base_flags, u32 mss, u32 vlan)
6775 {
6776 	struct tg3 *tp = tnapi->tp;
6777 	struct sk_buff *new_skb, *skb = *pskb;
6778 	dma_addr_t new_addr = 0;
6779 	int ret = 0;
6780 
6781 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6782 		new_skb = skb_copy(skb, GFP_ATOMIC);
6783 	else {
6784 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
6785 
6786 		new_skb = skb_copy_expand(skb,
6787 					  skb_headroom(skb) + more_headroom,
6788 					  skb_tailroom(skb), GFP_ATOMIC);
6789 	}
6790 
6791 	if (!new_skb) {
6792 		ret = -1;
6793 	} else {
6794 		/* New SKB is guaranteed to be linear. */
6795 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6796 					  PCI_DMA_TODEVICE);
6797 		/* Make sure the mapping succeeded */
6798 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6799 			dev_kfree_skb(new_skb);
6800 			ret = -1;
6801 		} else {
6802 			u32 save_entry = *entry;
6803 
6804 			base_flags |= TXD_FLAG_END;
6805 
6806 			tnapi->tx_buffers[*entry].skb = new_skb;
6807 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6808 					   mapping, new_addr);
6809 
6810 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6811 					    new_skb->len, base_flags,
6812 					    mss, vlan)) {
6813 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
6814 				dev_kfree_skb(new_skb);
6815 				ret = -1;
6816 			}
6817 		}
6818 	}
6819 
6820 	dev_kfree_skb(skb);
6821 	*pskb = new_skb;
6822 	return ret;
6823 }
6824 
6825 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6826 
6827 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6828  * TSO header is greater than 80 bytes.
6829  */
6830 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6831 {
6832 	struct sk_buff *segs, *nskb;
6833 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6834 
6835 	/* Estimate the number of fragments in the worst case */
6836 	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6837 		netif_stop_queue(tp->dev);
6838 
6839 		/* netif_tx_stop_queue() must be done before checking
6840 		 * checking tx index in tg3_tx_avail() below, because in
6841 		 * tg3_tx(), we update tx index before checking for
6842 		 * netif_tx_queue_stopped().
6843 		 */
6844 		smp_mb();
6845 		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6846 			return NETDEV_TX_BUSY;
6847 
6848 		netif_wake_queue(tp->dev);
6849 	}
6850 
6851 	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6852 	if (IS_ERR(segs))
6853 		goto tg3_tso_bug_end;
6854 
6855 	do {
6856 		nskb = segs;
6857 		segs = segs->next;
6858 		nskb->next = NULL;
6859 		tg3_start_xmit(nskb, tp->dev);
6860 	} while (segs);
6861 
6862 tg3_tso_bug_end:
6863 	dev_kfree_skb(skb);
6864 
6865 	return NETDEV_TX_OK;
6866 }
6867 
6868 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6869  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6870  */
6871 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6872 {
6873 	struct tg3 *tp = netdev_priv(dev);
6874 	u32 len, entry, base_flags, mss, vlan = 0;
6875 	u32 budget;
6876 	int i = -1, would_hit_hwbug;
6877 	dma_addr_t mapping;
6878 	struct tg3_napi *tnapi;
6879 	struct netdev_queue *txq;
6880 	unsigned int last;
6881 
6882 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6883 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6884 	if (tg3_flag(tp, ENABLE_TSS))
6885 		tnapi++;
6886 
6887 	budget = tg3_tx_avail(tnapi);
6888 
6889 	/* We are running in BH disabled context with netif_tx_lock
6890 	 * and TX reclaim runs via tp->napi.poll inside of a software
6891 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
6892 	 * no IRQ context deadlocks to worry about either.  Rejoice!
6893 	 */
6894 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6895 		if (!netif_tx_queue_stopped(txq)) {
6896 			netif_tx_stop_queue(txq);
6897 
6898 			/* This is a hard error, log it. */
6899 			netdev_err(dev,
6900 				   "BUG! Tx Ring full when queue awake!\n");
6901 		}
6902 		return NETDEV_TX_BUSY;
6903 	}
6904 
6905 	entry = tnapi->tx_prod;
6906 	base_flags = 0;
6907 	if (skb->ip_summed == CHECKSUM_PARTIAL)
6908 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
6909 
6910 	mss = skb_shinfo(skb)->gso_size;
6911 	if (mss) {
6912 		struct iphdr *iph;
6913 		u32 tcp_opt_len, hdr_len;
6914 
6915 		if (skb_header_cloned(skb) &&
6916 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6917 			goto drop;
6918 
6919 		iph = ip_hdr(skb);
6920 		tcp_opt_len = tcp_optlen(skb);
6921 
6922 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6923 
6924 		if (!skb_is_gso_v6(skb)) {
6925 			iph->check = 0;
6926 			iph->tot_len = htons(mss + hdr_len);
6927 		}
6928 
6929 		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6930 		    tg3_flag(tp, TSO_BUG))
6931 			return tg3_tso_bug(tp, skb);
6932 
6933 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6934 			       TXD_FLAG_CPU_POST_DMA);
6935 
6936 		if (tg3_flag(tp, HW_TSO_1) ||
6937 		    tg3_flag(tp, HW_TSO_2) ||
6938 		    tg3_flag(tp, HW_TSO_3)) {
6939 			tcp_hdr(skb)->check = 0;
6940 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6941 		} else
6942 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6943 								 iph->daddr, 0,
6944 								 IPPROTO_TCP,
6945 								 0);
6946 
6947 		if (tg3_flag(tp, HW_TSO_3)) {
6948 			mss |= (hdr_len & 0xc) << 12;
6949 			if (hdr_len & 0x10)
6950 				base_flags |= 0x00000010;
6951 			base_flags |= (hdr_len & 0x3e0) << 5;
6952 		} else if (tg3_flag(tp, HW_TSO_2))
6953 			mss |= hdr_len << 9;
6954 		else if (tg3_flag(tp, HW_TSO_1) ||
6955 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6956 			if (tcp_opt_len || iph->ihl > 5) {
6957 				int tsflags;
6958 
6959 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6960 				mss |= (tsflags << 11);
6961 			}
6962 		} else {
6963 			if (tcp_opt_len || iph->ihl > 5) {
6964 				int tsflags;
6965 
6966 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6967 				base_flags |= tsflags << 12;
6968 			}
6969 		}
6970 	}
6971 
6972 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6973 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
6974 		base_flags |= TXD_FLAG_JMB_PKT;
6975 
6976 	if (vlan_tx_tag_present(skb)) {
6977 		base_flags |= TXD_FLAG_VLAN;
6978 		vlan = vlan_tx_tag_get(skb);
6979 	}
6980 
6981 	len = skb_headlen(skb);
6982 
6983 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6984 	if (pci_dma_mapping_error(tp->pdev, mapping))
6985 		goto drop;
6986 
6987 
6988 	tnapi->tx_buffers[entry].skb = skb;
6989 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6990 
6991 	would_hit_hwbug = 0;
6992 
6993 	if (tg3_flag(tp, 5701_DMA_BUG))
6994 		would_hit_hwbug = 1;
6995 
6996 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6997 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6998 			    mss, vlan)) {
6999 		would_hit_hwbug = 1;
7000 	} else if (skb_shinfo(skb)->nr_frags > 0) {
7001 		u32 tmp_mss = mss;
7002 
7003 		if (!tg3_flag(tp, HW_TSO_1) &&
7004 		    !tg3_flag(tp, HW_TSO_2) &&
7005 		    !tg3_flag(tp, HW_TSO_3))
7006 			tmp_mss = 0;
7007 
7008 		/* Now loop through additional data
7009 		 * fragments, and queue them.
7010 		 */
7011 		last = skb_shinfo(skb)->nr_frags - 1;
7012 		for (i = 0; i <= last; i++) {
7013 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7014 
7015 			len = skb_frag_size(frag);
7016 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7017 						   len, DMA_TO_DEVICE);
7018 
7019 			tnapi->tx_buffers[entry].skb = NULL;
7020 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7021 					   mapping);
7022 			if (dma_mapping_error(&tp->pdev->dev, mapping))
7023 				goto dma_error;
7024 
7025 			if (!budget ||
7026 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7027 					    len, base_flags |
7028 					    ((i == last) ? TXD_FLAG_END : 0),
7029 					    tmp_mss, vlan)) {
7030 				would_hit_hwbug = 1;
7031 				break;
7032 			}
7033 		}
7034 	}
7035 
7036 	if (would_hit_hwbug) {
7037 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7038 
7039 		/* If the workaround fails due to memory/mapping
7040 		 * failure, silently drop this packet.
7041 		 */
7042 		entry = tnapi->tx_prod;
7043 		budget = tg3_tx_avail(tnapi);
7044 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7045 						base_flags, mss, vlan))
7046 			goto drop_nofree;
7047 	}
7048 
7049 	skb_tx_timestamp(skb);
7050 	netdev_tx_sent_queue(txq, skb->len);
7051 
7052 	/* Sync BD data before updating mailbox */
7053 	wmb();
7054 
7055 	/* Packets are ready, update Tx producer idx local and on card. */
7056 	tw32_tx_mbox(tnapi->prodmbox, entry);
7057 
7058 	tnapi->tx_prod = entry;
7059 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7060 		netif_tx_stop_queue(txq);
7061 
7062 		/* netif_tx_stop_queue() must be done before checking
7063 		 * checking tx index in tg3_tx_avail() below, because in
7064 		 * tg3_tx(), we update tx index before checking for
7065 		 * netif_tx_queue_stopped().
7066 		 */
7067 		smp_mb();
7068 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7069 			netif_tx_wake_queue(txq);
7070 	}
7071 
7072 	mmiowb();
7073 	return NETDEV_TX_OK;
7074 
7075 dma_error:
7076 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7077 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7078 drop:
7079 	dev_kfree_skb(skb);
7080 drop_nofree:
7081 	tp->tx_dropped++;
7082 	return NETDEV_TX_OK;
7083 }
7084 
7085 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7086 {
7087 	if (enable) {
7088 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7089 				  MAC_MODE_PORT_MODE_MASK);
7090 
7091 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7092 
7093 		if (!tg3_flag(tp, 5705_PLUS))
7094 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7095 
7096 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7097 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7098 		else
7099 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7100 	} else {
7101 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7102 
7103 		if (tg3_flag(tp, 5705_PLUS) ||
7104 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7105 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7106 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7107 	}
7108 
7109 	tw32(MAC_MODE, tp->mac_mode);
7110 	udelay(40);
7111 }
7112 
7113 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7114 {
7115 	u32 val, bmcr, mac_mode, ptest = 0;
7116 
7117 	tg3_phy_toggle_apd(tp, false);
7118 	tg3_phy_toggle_automdix(tp, 0);
7119 
7120 	if (extlpbk && tg3_phy_set_extloopbk(tp))
7121 		return -EIO;
7122 
7123 	bmcr = BMCR_FULLDPLX;
7124 	switch (speed) {
7125 	case SPEED_10:
7126 		break;
7127 	case SPEED_100:
7128 		bmcr |= BMCR_SPEED100;
7129 		break;
7130 	case SPEED_1000:
7131 	default:
7132 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7133 			speed = SPEED_100;
7134 			bmcr |= BMCR_SPEED100;
7135 		} else {
7136 			speed = SPEED_1000;
7137 			bmcr |= BMCR_SPEED1000;
7138 		}
7139 	}
7140 
7141 	if (extlpbk) {
7142 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7143 			tg3_readphy(tp, MII_CTRL1000, &val);
7144 			val |= CTL1000_AS_MASTER |
7145 			       CTL1000_ENABLE_MASTER;
7146 			tg3_writephy(tp, MII_CTRL1000, val);
7147 		} else {
7148 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7149 				MII_TG3_FET_PTEST_TRIM_2;
7150 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7151 		}
7152 	} else
7153 		bmcr |= BMCR_LOOPBACK;
7154 
7155 	tg3_writephy(tp, MII_BMCR, bmcr);
7156 
7157 	/* The write needs to be flushed for the FETs */
7158 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7159 		tg3_readphy(tp, MII_BMCR, &bmcr);
7160 
7161 	udelay(40);
7162 
7163 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7164 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7165 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7166 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
7167 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
7168 
7169 		/* The write needs to be flushed for the AC131 */
7170 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7171 	}
7172 
7173 	/* Reset to prevent losing 1st rx packet intermittently */
7174 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7175 	    tg3_flag(tp, 5780_CLASS)) {
7176 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7177 		udelay(10);
7178 		tw32_f(MAC_RX_MODE, tp->rx_mode);
7179 	}
7180 
7181 	mac_mode = tp->mac_mode &
7182 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7183 	if (speed == SPEED_1000)
7184 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
7185 	else
7186 		mac_mode |= MAC_MODE_PORT_MODE_MII;
7187 
7188 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7189 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7190 
7191 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
7192 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
7193 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7194 			mac_mode |= MAC_MODE_LINK_POLARITY;
7195 
7196 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
7197 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7198 	}
7199 
7200 	tw32(MAC_MODE, mac_mode);
7201 	udelay(40);
7202 
7203 	return 0;
7204 }
7205 
7206 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7207 {
7208 	struct tg3 *tp = netdev_priv(dev);
7209 
7210 	if (features & NETIF_F_LOOPBACK) {
7211 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7212 			return;
7213 
7214 		spin_lock_bh(&tp->lock);
7215 		tg3_mac_loopback(tp, true);
7216 		netif_carrier_on(tp->dev);
7217 		spin_unlock_bh(&tp->lock);
7218 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7219 	} else {
7220 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7221 			return;
7222 
7223 		spin_lock_bh(&tp->lock);
7224 		tg3_mac_loopback(tp, false);
7225 		/* Force link status check */
7226 		tg3_setup_phy(tp, 1);
7227 		spin_unlock_bh(&tp->lock);
7228 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7229 	}
7230 }
7231 
7232 static netdev_features_t tg3_fix_features(struct net_device *dev,
7233 	netdev_features_t features)
7234 {
7235 	struct tg3 *tp = netdev_priv(dev);
7236 
7237 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7238 		features &= ~NETIF_F_ALL_TSO;
7239 
7240 	return features;
7241 }
7242 
7243 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7244 {
7245 	netdev_features_t changed = dev->features ^ features;
7246 
7247 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7248 		tg3_set_loopback(dev, features);
7249 
7250 	return 0;
7251 }
7252 
7253 static void tg3_rx_prodring_free(struct tg3 *tp,
7254 				 struct tg3_rx_prodring_set *tpr)
7255 {
7256 	int i;
7257 
7258 	if (tpr != &tp->napi[0].prodring) {
7259 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7260 		     i = (i + 1) & tp->rx_std_ring_mask)
7261 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7262 					tp->rx_pkt_map_sz);
7263 
7264 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
7265 			for (i = tpr->rx_jmb_cons_idx;
7266 			     i != tpr->rx_jmb_prod_idx;
7267 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
7268 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7269 						TG3_RX_JMB_MAP_SZ);
7270 			}
7271 		}
7272 
7273 		return;
7274 	}
7275 
7276 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
7277 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7278 				tp->rx_pkt_map_sz);
7279 
7280 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7281 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7282 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7283 					TG3_RX_JMB_MAP_SZ);
7284 	}
7285 }
7286 
7287 /* Initialize rx rings for packet processing.
7288  *
7289  * The chip has been shut down and the driver detached from
7290  * the networking, so no interrupts or new tx packets will
7291  * end up in the driver.  tp->{tx,}lock are held and thus
7292  * we may not sleep.
7293  */
7294 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7295 				 struct tg3_rx_prodring_set *tpr)
7296 {
7297 	u32 i, rx_pkt_dma_sz;
7298 
7299 	tpr->rx_std_cons_idx = 0;
7300 	tpr->rx_std_prod_idx = 0;
7301 	tpr->rx_jmb_cons_idx = 0;
7302 	tpr->rx_jmb_prod_idx = 0;
7303 
7304 	if (tpr != &tp->napi[0].prodring) {
7305 		memset(&tpr->rx_std_buffers[0], 0,
7306 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
7307 		if (tpr->rx_jmb_buffers)
7308 			memset(&tpr->rx_jmb_buffers[0], 0,
7309 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
7310 		goto done;
7311 	}
7312 
7313 	/* Zero out all descriptors. */
7314 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7315 
7316 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7317 	if (tg3_flag(tp, 5780_CLASS) &&
7318 	    tp->dev->mtu > ETH_DATA_LEN)
7319 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7320 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7321 
7322 	/* Initialize invariants of the rings, we only set this
7323 	 * stuff once.  This works because the card does not
7324 	 * write into the rx buffer posting rings.
7325 	 */
7326 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7327 		struct tg3_rx_buffer_desc *rxd;
7328 
7329 		rxd = &tpr->rx_std[i];
7330 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7331 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7332 		rxd->opaque = (RXD_OPAQUE_RING_STD |
7333 			       (i << RXD_OPAQUE_INDEX_SHIFT));
7334 	}
7335 
7336 	/* Now allocate fresh SKBs for each rx ring. */
7337 	for (i = 0; i < tp->rx_pending; i++) {
7338 		unsigned int frag_size;
7339 
7340 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7341 				      &frag_size) < 0) {
7342 			netdev_warn(tp->dev,
7343 				    "Using a smaller RX standard ring. Only "
7344 				    "%d out of %d buffers were allocated "
7345 				    "successfully\n", i, tp->rx_pending);
7346 			if (i == 0)
7347 				goto initfail;
7348 			tp->rx_pending = i;
7349 			break;
7350 		}
7351 	}
7352 
7353 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7354 		goto done;
7355 
7356 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7357 
7358 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7359 		goto done;
7360 
7361 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7362 		struct tg3_rx_buffer_desc *rxd;
7363 
7364 		rxd = &tpr->rx_jmb[i].std;
7365 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7366 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7367 				  RXD_FLAG_JUMBO;
7368 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7369 		       (i << RXD_OPAQUE_INDEX_SHIFT));
7370 	}
7371 
7372 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
7373 		unsigned int frag_size;
7374 
7375 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7376 				      &frag_size) < 0) {
7377 			netdev_warn(tp->dev,
7378 				    "Using a smaller RX jumbo ring. Only %d "
7379 				    "out of %d buffers were allocated "
7380 				    "successfully\n", i, tp->rx_jumbo_pending);
7381 			if (i == 0)
7382 				goto initfail;
7383 			tp->rx_jumbo_pending = i;
7384 			break;
7385 		}
7386 	}
7387 
7388 done:
7389 	return 0;
7390 
7391 initfail:
7392 	tg3_rx_prodring_free(tp, tpr);
7393 	return -ENOMEM;
7394 }
7395 
7396 static void tg3_rx_prodring_fini(struct tg3 *tp,
7397 				 struct tg3_rx_prodring_set *tpr)
7398 {
7399 	kfree(tpr->rx_std_buffers);
7400 	tpr->rx_std_buffers = NULL;
7401 	kfree(tpr->rx_jmb_buffers);
7402 	tpr->rx_jmb_buffers = NULL;
7403 	if (tpr->rx_std) {
7404 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7405 				  tpr->rx_std, tpr->rx_std_mapping);
7406 		tpr->rx_std = NULL;
7407 	}
7408 	if (tpr->rx_jmb) {
7409 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7410 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
7411 		tpr->rx_jmb = NULL;
7412 	}
7413 }
7414 
7415 static int tg3_rx_prodring_init(struct tg3 *tp,
7416 				struct tg3_rx_prodring_set *tpr)
7417 {
7418 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7419 				      GFP_KERNEL);
7420 	if (!tpr->rx_std_buffers)
7421 		return -ENOMEM;
7422 
7423 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7424 					 TG3_RX_STD_RING_BYTES(tp),
7425 					 &tpr->rx_std_mapping,
7426 					 GFP_KERNEL);
7427 	if (!tpr->rx_std)
7428 		goto err_out;
7429 
7430 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7431 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7432 					      GFP_KERNEL);
7433 		if (!tpr->rx_jmb_buffers)
7434 			goto err_out;
7435 
7436 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7437 						 TG3_RX_JMB_RING_BYTES(tp),
7438 						 &tpr->rx_jmb_mapping,
7439 						 GFP_KERNEL);
7440 		if (!tpr->rx_jmb)
7441 			goto err_out;
7442 	}
7443 
7444 	return 0;
7445 
7446 err_out:
7447 	tg3_rx_prodring_fini(tp, tpr);
7448 	return -ENOMEM;
7449 }
7450 
7451 /* Free up pending packets in all rx/tx rings.
7452  *
7453  * The chip has been shut down and the driver detached from
7454  * the networking, so no interrupts or new tx packets will
7455  * end up in the driver.  tp->{tx,}lock is not held and we are not
7456  * in an interrupt context and thus may sleep.
7457  */
7458 static void tg3_free_rings(struct tg3 *tp)
7459 {
7460 	int i, j;
7461 
7462 	for (j = 0; j < tp->irq_cnt; j++) {
7463 		struct tg3_napi *tnapi = &tp->napi[j];
7464 
7465 		tg3_rx_prodring_free(tp, &tnapi->prodring);
7466 
7467 		if (!tnapi->tx_buffers)
7468 			continue;
7469 
7470 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7471 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7472 
7473 			if (!skb)
7474 				continue;
7475 
7476 			tg3_tx_skb_unmap(tnapi, i,
7477 					 skb_shinfo(skb)->nr_frags - 1);
7478 
7479 			dev_kfree_skb_any(skb);
7480 		}
7481 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7482 	}
7483 }
7484 
7485 /* Initialize tx/rx rings for packet processing.
7486  *
7487  * The chip has been shut down and the driver detached from
7488  * the networking, so no interrupts or new tx packets will
7489  * end up in the driver.  tp->{tx,}lock are held and thus
7490  * we may not sleep.
7491  */
7492 static int tg3_init_rings(struct tg3 *tp)
7493 {
7494 	int i;
7495 
7496 	/* Free up all the SKBs. */
7497 	tg3_free_rings(tp);
7498 
7499 	for (i = 0; i < tp->irq_cnt; i++) {
7500 		struct tg3_napi *tnapi = &tp->napi[i];
7501 
7502 		tnapi->last_tag = 0;
7503 		tnapi->last_irq_tag = 0;
7504 		tnapi->hw_status->status = 0;
7505 		tnapi->hw_status->status_tag = 0;
7506 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7507 
7508 		tnapi->tx_prod = 0;
7509 		tnapi->tx_cons = 0;
7510 		if (tnapi->tx_ring)
7511 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7512 
7513 		tnapi->rx_rcb_ptr = 0;
7514 		if (tnapi->rx_rcb)
7515 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7516 
7517 		if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7518 			tg3_free_rings(tp);
7519 			return -ENOMEM;
7520 		}
7521 	}
7522 
7523 	return 0;
7524 }
7525 
7526 /*
7527  * Must not be invoked with interrupt sources disabled and
7528  * the hardware shutdown down.
7529  */
7530 static void tg3_free_consistent(struct tg3 *tp)
7531 {
7532 	int i;
7533 
7534 	for (i = 0; i < tp->irq_cnt; i++) {
7535 		struct tg3_napi *tnapi = &tp->napi[i];
7536 
7537 		if (tnapi->tx_ring) {
7538 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7539 				tnapi->tx_ring, tnapi->tx_desc_mapping);
7540 			tnapi->tx_ring = NULL;
7541 		}
7542 
7543 		kfree(tnapi->tx_buffers);
7544 		tnapi->tx_buffers = NULL;
7545 
7546 		if (tnapi->rx_rcb) {
7547 			dma_free_coherent(&tp->pdev->dev,
7548 					  TG3_RX_RCB_RING_BYTES(tp),
7549 					  tnapi->rx_rcb,
7550 					  tnapi->rx_rcb_mapping);
7551 			tnapi->rx_rcb = NULL;
7552 		}
7553 
7554 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
7555 
7556 		if (tnapi->hw_status) {
7557 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7558 					  tnapi->hw_status,
7559 					  tnapi->status_mapping);
7560 			tnapi->hw_status = NULL;
7561 		}
7562 	}
7563 
7564 	if (tp->hw_stats) {
7565 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7566 				  tp->hw_stats, tp->stats_mapping);
7567 		tp->hw_stats = NULL;
7568 	}
7569 }
7570 
7571 /*
7572  * Must not be invoked with interrupt sources disabled and
7573  * the hardware shutdown down.  Can sleep.
7574  */
7575 static int tg3_alloc_consistent(struct tg3 *tp)
7576 {
7577 	int i;
7578 
7579 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7580 					  sizeof(struct tg3_hw_stats),
7581 					  &tp->stats_mapping,
7582 					  GFP_KERNEL);
7583 	if (!tp->hw_stats)
7584 		goto err_out;
7585 
7586 	memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7587 
7588 	for (i = 0; i < tp->irq_cnt; i++) {
7589 		struct tg3_napi *tnapi = &tp->napi[i];
7590 		struct tg3_hw_status *sblk;
7591 
7592 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7593 						      TG3_HW_STATUS_SIZE,
7594 						      &tnapi->status_mapping,
7595 						      GFP_KERNEL);
7596 		if (!tnapi->hw_status)
7597 			goto err_out;
7598 
7599 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7600 		sblk = tnapi->hw_status;
7601 
7602 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7603 			goto err_out;
7604 
7605 		/* If multivector TSS is enabled, vector 0 does not handle
7606 		 * tx interrupts.  Don't allocate any resources for it.
7607 		 */
7608 		if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7609 		    (i && tg3_flag(tp, ENABLE_TSS))) {
7610 			tnapi->tx_buffers = kzalloc(
7611 					       sizeof(struct tg3_tx_ring_info) *
7612 					       TG3_TX_RING_SIZE, GFP_KERNEL);
7613 			if (!tnapi->tx_buffers)
7614 				goto err_out;
7615 
7616 			tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7617 							    TG3_TX_RING_BYTES,
7618 							&tnapi->tx_desc_mapping,
7619 							    GFP_KERNEL);
7620 			if (!tnapi->tx_ring)
7621 				goto err_out;
7622 		}
7623 
7624 		/*
7625 		 * When RSS is enabled, the status block format changes
7626 		 * slightly.  The "rx_jumbo_consumer", "reserved",
7627 		 * and "rx_mini_consumer" members get mapped to the
7628 		 * other three rx return ring producer indexes.
7629 		 */
7630 		switch (i) {
7631 		default:
7632 			if (tg3_flag(tp, ENABLE_RSS)) {
7633 				tnapi->rx_rcb_prod_idx = NULL;
7634 				break;
7635 			}
7636 			/* Fall through */
7637 		case 1:
7638 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7639 			break;
7640 		case 2:
7641 			tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7642 			break;
7643 		case 3:
7644 			tnapi->rx_rcb_prod_idx = &sblk->reserved;
7645 			break;
7646 		case 4:
7647 			tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7648 			break;
7649 		}
7650 
7651 		/*
7652 		 * If multivector RSS is enabled, vector 0 does not handle
7653 		 * rx or tx interrupts.  Don't allocate any resources for it.
7654 		 */
7655 		if (!i && tg3_flag(tp, ENABLE_RSS))
7656 			continue;
7657 
7658 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7659 						   TG3_RX_RCB_RING_BYTES(tp),
7660 						   &tnapi->rx_rcb_mapping,
7661 						   GFP_KERNEL);
7662 		if (!tnapi->rx_rcb)
7663 			goto err_out;
7664 
7665 		memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7666 	}
7667 
7668 	return 0;
7669 
7670 err_out:
7671 	tg3_free_consistent(tp);
7672 	return -ENOMEM;
7673 }
7674 
7675 #define MAX_WAIT_CNT 1000
7676 
7677 /* To stop a block, clear the enable bit and poll till it
7678  * clears.  tp->lock is held.
7679  */
7680 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7681 {
7682 	unsigned int i;
7683 	u32 val;
7684 
7685 	if (tg3_flag(tp, 5705_PLUS)) {
7686 		switch (ofs) {
7687 		case RCVLSC_MODE:
7688 		case DMAC_MODE:
7689 		case MBFREE_MODE:
7690 		case BUFMGR_MODE:
7691 		case MEMARB_MODE:
7692 			/* We can't enable/disable these bits of the
7693 			 * 5705/5750, just say success.
7694 			 */
7695 			return 0;
7696 
7697 		default:
7698 			break;
7699 		}
7700 	}
7701 
7702 	val = tr32(ofs);
7703 	val &= ~enable_bit;
7704 	tw32_f(ofs, val);
7705 
7706 	for (i = 0; i < MAX_WAIT_CNT; i++) {
7707 		udelay(100);
7708 		val = tr32(ofs);
7709 		if ((val & enable_bit) == 0)
7710 			break;
7711 	}
7712 
7713 	if (i == MAX_WAIT_CNT && !silent) {
7714 		dev_err(&tp->pdev->dev,
7715 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7716 			ofs, enable_bit);
7717 		return -ENODEV;
7718 	}
7719 
7720 	return 0;
7721 }
7722 
7723 /* tp->lock is held. */
7724 static int tg3_abort_hw(struct tg3 *tp, int silent)
7725 {
7726 	int i, err;
7727 
7728 	tg3_disable_ints(tp);
7729 
7730 	tp->rx_mode &= ~RX_MODE_ENABLE;
7731 	tw32_f(MAC_RX_MODE, tp->rx_mode);
7732 	udelay(10);
7733 
7734 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7735 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7736 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7737 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7738 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7739 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7740 
7741 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7742 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7743 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7744 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7745 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7746 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7747 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7748 
7749 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7750 	tw32_f(MAC_MODE, tp->mac_mode);
7751 	udelay(40);
7752 
7753 	tp->tx_mode &= ~TX_MODE_ENABLE;
7754 	tw32_f(MAC_TX_MODE, tp->tx_mode);
7755 
7756 	for (i = 0; i < MAX_WAIT_CNT; i++) {
7757 		udelay(100);
7758 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7759 			break;
7760 	}
7761 	if (i >= MAX_WAIT_CNT) {
7762 		dev_err(&tp->pdev->dev,
7763 			"%s timed out, TX_MODE_ENABLE will not clear "
7764 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7765 		err |= -ENODEV;
7766 	}
7767 
7768 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7769 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7770 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7771 
7772 	tw32(FTQ_RESET, 0xffffffff);
7773 	tw32(FTQ_RESET, 0x00000000);
7774 
7775 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7776 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7777 
7778 	for (i = 0; i < tp->irq_cnt; i++) {
7779 		struct tg3_napi *tnapi = &tp->napi[i];
7780 		if (tnapi->hw_status)
7781 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7782 	}
7783 
7784 	return err;
7785 }
7786 
7787 /* Save PCI command register before chip reset */
7788 static void tg3_save_pci_state(struct tg3 *tp)
7789 {
7790 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7791 }
7792 
7793 /* Restore PCI state after chip reset */
7794 static void tg3_restore_pci_state(struct tg3 *tp)
7795 {
7796 	u32 val;
7797 
7798 	/* Re-enable indirect register accesses. */
7799 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7800 			       tp->misc_host_ctrl);
7801 
7802 	/* Set MAX PCI retry to zero. */
7803 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7804 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7805 	    tg3_flag(tp, PCIX_MODE))
7806 		val |= PCISTATE_RETRY_SAME_DMA;
7807 	/* Allow reads and writes to the APE register and memory space. */
7808 	if (tg3_flag(tp, ENABLE_APE))
7809 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7810 		       PCISTATE_ALLOW_APE_SHMEM_WR |
7811 		       PCISTATE_ALLOW_APE_PSPACE_WR;
7812 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7813 
7814 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7815 
7816 	if (!tg3_flag(tp, PCI_EXPRESS)) {
7817 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7818 				      tp->pci_cacheline_sz);
7819 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7820 				      tp->pci_lat_timer);
7821 	}
7822 
7823 	/* Make sure PCI-X relaxed ordering bit is clear. */
7824 	if (tg3_flag(tp, PCIX_MODE)) {
7825 		u16 pcix_cmd;
7826 
7827 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7828 				     &pcix_cmd);
7829 		pcix_cmd &= ~PCI_X_CMD_ERO;
7830 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7831 				      pcix_cmd);
7832 	}
7833 
7834 	if (tg3_flag(tp, 5780_CLASS)) {
7835 
7836 		/* Chip reset on 5780 will reset MSI enable bit,
7837 		 * so need to restore it.
7838 		 */
7839 		if (tg3_flag(tp, USING_MSI)) {
7840 			u16 ctrl;
7841 
7842 			pci_read_config_word(tp->pdev,
7843 					     tp->msi_cap + PCI_MSI_FLAGS,
7844 					     &ctrl);
7845 			pci_write_config_word(tp->pdev,
7846 					      tp->msi_cap + PCI_MSI_FLAGS,
7847 					      ctrl | PCI_MSI_FLAGS_ENABLE);
7848 			val = tr32(MSGINT_MODE);
7849 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7850 		}
7851 	}
7852 }
7853 
7854 /* tp->lock is held. */
7855 static int tg3_chip_reset(struct tg3 *tp)
7856 {
7857 	u32 val;
7858 	void (*write_op)(struct tg3 *, u32, u32);
7859 	int i, err;
7860 
7861 	tg3_nvram_lock(tp);
7862 
7863 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7864 
7865 	/* No matching tg3_nvram_unlock() after this because
7866 	 * chip reset below will undo the nvram lock.
7867 	 */
7868 	tp->nvram_lock_cnt = 0;
7869 
7870 	/* GRC_MISC_CFG core clock reset will clear the memory
7871 	 * enable bit in PCI register 4 and the MSI enable bit
7872 	 * on some chips, so we save relevant registers here.
7873 	 */
7874 	tg3_save_pci_state(tp);
7875 
7876 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7877 	    tg3_flag(tp, 5755_PLUS))
7878 		tw32(GRC_FASTBOOT_PC, 0);
7879 
7880 	/*
7881 	 * We must avoid the readl() that normally takes place.
7882 	 * It locks machines, causes machine checks, and other
7883 	 * fun things.  So, temporarily disable the 5701
7884 	 * hardware workaround, while we do the reset.
7885 	 */
7886 	write_op = tp->write32;
7887 	if (write_op == tg3_write_flush_reg32)
7888 		tp->write32 = tg3_write32;
7889 
7890 	/* Prevent the irq handler from reading or writing PCI registers
7891 	 * during chip reset when the memory enable bit in the PCI command
7892 	 * register may be cleared.  The chip does not generate interrupt
7893 	 * at this time, but the irq handler may still be called due to irq
7894 	 * sharing or irqpoll.
7895 	 */
7896 	tg3_flag_set(tp, CHIP_RESETTING);
7897 	for (i = 0; i < tp->irq_cnt; i++) {
7898 		struct tg3_napi *tnapi = &tp->napi[i];
7899 		if (tnapi->hw_status) {
7900 			tnapi->hw_status->status = 0;
7901 			tnapi->hw_status->status_tag = 0;
7902 		}
7903 		tnapi->last_tag = 0;
7904 		tnapi->last_irq_tag = 0;
7905 	}
7906 	smp_mb();
7907 
7908 	for (i = 0; i < tp->irq_cnt; i++)
7909 		synchronize_irq(tp->napi[i].irq_vec);
7910 
7911 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7912 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7913 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7914 	}
7915 
7916 	/* do the reset */
7917 	val = GRC_MISC_CFG_CORECLK_RESET;
7918 
7919 	if (tg3_flag(tp, PCI_EXPRESS)) {
7920 		/* Force PCIe 1.0a mode */
7921 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7922 		    !tg3_flag(tp, 57765_PLUS) &&
7923 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
7924 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7925 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7926 
7927 		if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7928 			tw32(GRC_MISC_CFG, (1 << 29));
7929 			val |= (1 << 29);
7930 		}
7931 	}
7932 
7933 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7934 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7935 		tw32(GRC_VCPU_EXT_CTRL,
7936 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7937 	}
7938 
7939 	/* Manage gphy power for all CPMU absent PCIe devices. */
7940 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7941 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7942 
7943 	tw32(GRC_MISC_CFG, val);
7944 
7945 	/* restore 5701 hardware bug workaround write method */
7946 	tp->write32 = write_op;
7947 
7948 	/* Unfortunately, we have to delay before the PCI read back.
7949 	 * Some 575X chips even will not respond to a PCI cfg access
7950 	 * when the reset command is given to the chip.
7951 	 *
7952 	 * How do these hardware designers expect things to work
7953 	 * properly if the PCI write is posted for a long period
7954 	 * of time?  It is always necessary to have some method by
7955 	 * which a register read back can occur to push the write
7956 	 * out which does the reset.
7957 	 *
7958 	 * For most tg3 variants the trick below was working.
7959 	 * Ho hum...
7960 	 */
7961 	udelay(120);
7962 
7963 	/* Flush PCI posted writes.  The normal MMIO registers
7964 	 * are inaccessible at this time so this is the only
7965 	 * way to make this reliably (actually, this is no longer
7966 	 * the case, see above).  I tried to use indirect
7967 	 * register read/write but this upset some 5701 variants.
7968 	 */
7969 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7970 
7971 	udelay(120);
7972 
7973 	if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7974 		u16 val16;
7975 
7976 		if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7977 			int i;
7978 			u32 cfg_val;
7979 
7980 			/* Wait for link training to complete.  */
7981 			for (i = 0; i < 5000; i++)
7982 				udelay(100);
7983 
7984 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7985 			pci_write_config_dword(tp->pdev, 0xc4,
7986 					       cfg_val | (1 << 15));
7987 		}
7988 
7989 		/* Clear the "no snoop" and "relaxed ordering" bits. */
7990 		pci_read_config_word(tp->pdev,
7991 				     pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7992 				     &val16);
7993 		val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7994 			   PCI_EXP_DEVCTL_NOSNOOP_EN);
7995 		/*
7996 		 * Older PCIe devices only support the 128 byte
7997 		 * MPS setting.  Enforce the restriction.
7998 		 */
7999 		if (!tg3_flag(tp, CPMU_PRESENT))
8000 			val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
8001 		pci_write_config_word(tp->pdev,
8002 				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
8003 				      val16);
8004 
8005 		/* Clear error status */
8006 		pci_write_config_word(tp->pdev,
8007 				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
8008 				      PCI_EXP_DEVSTA_CED |
8009 				      PCI_EXP_DEVSTA_NFED |
8010 				      PCI_EXP_DEVSTA_FED |
8011 				      PCI_EXP_DEVSTA_URD);
8012 	}
8013 
8014 	tg3_restore_pci_state(tp);
8015 
8016 	tg3_flag_clear(tp, CHIP_RESETTING);
8017 	tg3_flag_clear(tp, ERROR_PROCESSED);
8018 
8019 	val = 0;
8020 	if (tg3_flag(tp, 5780_CLASS))
8021 		val = tr32(MEMARB_MODE);
8022 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8023 
8024 	if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8025 		tg3_stop_fw(tp);
8026 		tw32(0x5000, 0x400);
8027 	}
8028 
8029 	tw32(GRC_MODE, tp->grc_mode);
8030 
8031 	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8032 		val = tr32(0xc4);
8033 
8034 		tw32(0xc4, val | (1 << 15));
8035 	}
8036 
8037 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8038 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8039 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8040 		if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8041 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8042 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8043 	}
8044 
8045 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8046 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8047 		val = tp->mac_mode;
8048 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8049 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8050 		val = tp->mac_mode;
8051 	} else
8052 		val = 0;
8053 
8054 	tw32_f(MAC_MODE, val);
8055 	udelay(40);
8056 
8057 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8058 
8059 	err = tg3_poll_fw(tp);
8060 	if (err)
8061 		return err;
8062 
8063 	tg3_mdio_start(tp);
8064 
8065 	if (tg3_flag(tp, PCI_EXPRESS) &&
8066 	    tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8067 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8068 	    !tg3_flag(tp, 57765_PLUS)) {
8069 		val = tr32(0x7c00);
8070 
8071 		tw32(0x7c00, val | (1 << 25));
8072 	}
8073 
8074 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8075 		val = tr32(TG3_CPMU_CLCK_ORIDE);
8076 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8077 	}
8078 
8079 	/* Reprobe ASF enable state.  */
8080 	tg3_flag_clear(tp, ENABLE_ASF);
8081 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8082 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8083 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8084 		u32 nic_cfg;
8085 
8086 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8087 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8088 			tg3_flag_set(tp, ENABLE_ASF);
8089 			tp->last_event_jiffies = jiffies;
8090 			if (tg3_flag(tp, 5750_PLUS))
8091 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8092 		}
8093 	}
8094 
8095 	return 0;
8096 }
8097 
8098 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8099 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8100 
8101 /* tp->lock is held. */
8102 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8103 {
8104 	int err;
8105 
8106 	tg3_stop_fw(tp);
8107 
8108 	tg3_write_sig_pre_reset(tp, kind);
8109 
8110 	tg3_abort_hw(tp, silent);
8111 	err = tg3_chip_reset(tp);
8112 
8113 	__tg3_set_mac_addr(tp, 0);
8114 
8115 	tg3_write_sig_legacy(tp, kind);
8116 	tg3_write_sig_post_reset(tp, kind);
8117 
8118 	if (tp->hw_stats) {
8119 		/* Save the stats across chip resets... */
8120 		tg3_get_nstats(tp, &tp->net_stats_prev);
8121 		tg3_get_estats(tp, &tp->estats_prev);
8122 
8123 		/* And make sure the next sample is new data */
8124 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8125 	}
8126 
8127 	if (err)
8128 		return err;
8129 
8130 	return 0;
8131 }
8132 
8133 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8134 {
8135 	struct tg3 *tp = netdev_priv(dev);
8136 	struct sockaddr *addr = p;
8137 	int err = 0, skip_mac_1 = 0;
8138 
8139 	if (!is_valid_ether_addr(addr->sa_data))
8140 		return -EADDRNOTAVAIL;
8141 
8142 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8143 
8144 	if (!netif_running(dev))
8145 		return 0;
8146 
8147 	if (tg3_flag(tp, ENABLE_ASF)) {
8148 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
8149 
8150 		addr0_high = tr32(MAC_ADDR_0_HIGH);
8151 		addr0_low = tr32(MAC_ADDR_0_LOW);
8152 		addr1_high = tr32(MAC_ADDR_1_HIGH);
8153 		addr1_low = tr32(MAC_ADDR_1_LOW);
8154 
8155 		/* Skip MAC addr 1 if ASF is using it. */
8156 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8157 		    !(addr1_high == 0 && addr1_low == 0))
8158 			skip_mac_1 = 1;
8159 	}
8160 	spin_lock_bh(&tp->lock);
8161 	__tg3_set_mac_addr(tp, skip_mac_1);
8162 	spin_unlock_bh(&tp->lock);
8163 
8164 	return err;
8165 }
8166 
8167 /* tp->lock is held. */
8168 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8169 			   dma_addr_t mapping, u32 maxlen_flags,
8170 			   u32 nic_addr)
8171 {
8172 	tg3_write_mem(tp,
8173 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8174 		      ((u64) mapping >> 32));
8175 	tg3_write_mem(tp,
8176 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8177 		      ((u64) mapping & 0xffffffff));
8178 	tg3_write_mem(tp,
8179 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8180 		       maxlen_flags);
8181 
8182 	if (!tg3_flag(tp, 5705_PLUS))
8183 		tg3_write_mem(tp,
8184 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8185 			      nic_addr);
8186 }
8187 
8188 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8189 {
8190 	int i;
8191 
8192 	if (!tg3_flag(tp, ENABLE_TSS)) {
8193 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8194 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8195 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8196 	} else {
8197 		tw32(HOSTCC_TXCOL_TICKS, 0);
8198 		tw32(HOSTCC_TXMAX_FRAMES, 0);
8199 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8200 	}
8201 
8202 	if (!tg3_flag(tp, ENABLE_RSS)) {
8203 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8204 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8205 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8206 	} else {
8207 		tw32(HOSTCC_RXCOL_TICKS, 0);
8208 		tw32(HOSTCC_RXMAX_FRAMES, 0);
8209 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8210 	}
8211 
8212 	if (!tg3_flag(tp, 5705_PLUS)) {
8213 		u32 val = ec->stats_block_coalesce_usecs;
8214 
8215 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8216 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8217 
8218 		if (!netif_carrier_ok(tp->dev))
8219 			val = 0;
8220 
8221 		tw32(HOSTCC_STAT_COAL_TICKS, val);
8222 	}
8223 
8224 	for (i = 0; i < tp->irq_cnt - 1; i++) {
8225 		u32 reg;
8226 
8227 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8228 		tw32(reg, ec->rx_coalesce_usecs);
8229 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8230 		tw32(reg, ec->rx_max_coalesced_frames);
8231 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8232 		tw32(reg, ec->rx_max_coalesced_frames_irq);
8233 
8234 		if (tg3_flag(tp, ENABLE_TSS)) {
8235 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8236 			tw32(reg, ec->tx_coalesce_usecs);
8237 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8238 			tw32(reg, ec->tx_max_coalesced_frames);
8239 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8240 			tw32(reg, ec->tx_max_coalesced_frames_irq);
8241 		}
8242 	}
8243 
8244 	for (; i < tp->irq_max - 1; i++) {
8245 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8246 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8247 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8248 
8249 		if (tg3_flag(tp, ENABLE_TSS)) {
8250 			tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8251 			tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8252 			tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8253 		}
8254 	}
8255 }
8256 
8257 /* tp->lock is held. */
8258 static void tg3_rings_reset(struct tg3 *tp)
8259 {
8260 	int i;
8261 	u32 stblk, txrcb, rxrcb, limit;
8262 	struct tg3_napi *tnapi = &tp->napi[0];
8263 
8264 	/* Disable all transmit rings but the first. */
8265 	if (!tg3_flag(tp, 5705_PLUS))
8266 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8267 	else if (tg3_flag(tp, 5717_PLUS))
8268 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8269 	else if (tg3_flag(tp, 57765_CLASS))
8270 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8271 	else
8272 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8273 
8274 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8275 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8276 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8277 			      BDINFO_FLAGS_DISABLED);
8278 
8279 
8280 	/* Disable all receive return rings but the first. */
8281 	if (tg3_flag(tp, 5717_PLUS))
8282 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8283 	else if (!tg3_flag(tp, 5705_PLUS))
8284 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8285 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8286 		 tg3_flag(tp, 57765_CLASS))
8287 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8288 	else
8289 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8290 
8291 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8292 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8293 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8294 			      BDINFO_FLAGS_DISABLED);
8295 
8296 	/* Disable interrupts */
8297 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8298 	tp->napi[0].chk_msi_cnt = 0;
8299 	tp->napi[0].last_rx_cons = 0;
8300 	tp->napi[0].last_tx_cons = 0;
8301 
8302 	/* Zero mailbox registers. */
8303 	if (tg3_flag(tp, SUPPORT_MSIX)) {
8304 		for (i = 1; i < tp->irq_max; i++) {
8305 			tp->napi[i].tx_prod = 0;
8306 			tp->napi[i].tx_cons = 0;
8307 			if (tg3_flag(tp, ENABLE_TSS))
8308 				tw32_mailbox(tp->napi[i].prodmbox, 0);
8309 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
8310 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8311 			tp->napi[i].chk_msi_cnt = 0;
8312 			tp->napi[i].last_rx_cons = 0;
8313 			tp->napi[i].last_tx_cons = 0;
8314 		}
8315 		if (!tg3_flag(tp, ENABLE_TSS))
8316 			tw32_mailbox(tp->napi[0].prodmbox, 0);
8317 	} else {
8318 		tp->napi[0].tx_prod = 0;
8319 		tp->napi[0].tx_cons = 0;
8320 		tw32_mailbox(tp->napi[0].prodmbox, 0);
8321 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
8322 	}
8323 
8324 	/* Make sure the NIC-based send BD rings are disabled. */
8325 	if (!tg3_flag(tp, 5705_PLUS)) {
8326 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8327 		for (i = 0; i < 16; i++)
8328 			tw32_tx_mbox(mbox + i * 8, 0);
8329 	}
8330 
8331 	txrcb = NIC_SRAM_SEND_RCB;
8332 	rxrcb = NIC_SRAM_RCV_RET_RCB;
8333 
8334 	/* Clear status block in ram. */
8335 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8336 
8337 	/* Set status block DMA address */
8338 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8339 	     ((u64) tnapi->status_mapping >> 32));
8340 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8341 	     ((u64) tnapi->status_mapping & 0xffffffff));
8342 
8343 	if (tnapi->tx_ring) {
8344 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8345 			       (TG3_TX_RING_SIZE <<
8346 				BDINFO_FLAGS_MAXLEN_SHIFT),
8347 			       NIC_SRAM_TX_BUFFER_DESC);
8348 		txrcb += TG3_BDINFO_SIZE;
8349 	}
8350 
8351 	if (tnapi->rx_rcb) {
8352 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8353 			       (tp->rx_ret_ring_mask + 1) <<
8354 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8355 		rxrcb += TG3_BDINFO_SIZE;
8356 	}
8357 
8358 	stblk = HOSTCC_STATBLCK_RING1;
8359 
8360 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8361 		u64 mapping = (u64)tnapi->status_mapping;
8362 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8363 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8364 
8365 		/* Clear status block in ram. */
8366 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8367 
8368 		if (tnapi->tx_ring) {
8369 			tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8370 				       (TG3_TX_RING_SIZE <<
8371 					BDINFO_FLAGS_MAXLEN_SHIFT),
8372 				       NIC_SRAM_TX_BUFFER_DESC);
8373 			txrcb += TG3_BDINFO_SIZE;
8374 		}
8375 
8376 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8377 			       ((tp->rx_ret_ring_mask + 1) <<
8378 				BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8379 
8380 		stblk += 8;
8381 		rxrcb += TG3_BDINFO_SIZE;
8382 	}
8383 }
8384 
8385 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8386 {
8387 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8388 
8389 	if (!tg3_flag(tp, 5750_PLUS) ||
8390 	    tg3_flag(tp, 5780_CLASS) ||
8391 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8392 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8393 	    tg3_flag(tp, 57765_PLUS))
8394 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8395 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8396 		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8397 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8398 	else
8399 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8400 
8401 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8402 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8403 
8404 	val = min(nic_rep_thresh, host_rep_thresh);
8405 	tw32(RCVBDI_STD_THRESH, val);
8406 
8407 	if (tg3_flag(tp, 57765_PLUS))
8408 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8409 
8410 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8411 		return;
8412 
8413 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8414 
8415 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8416 
8417 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
8418 	tw32(RCVBDI_JUMBO_THRESH, val);
8419 
8420 	if (tg3_flag(tp, 57765_PLUS))
8421 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8422 }
8423 
8424 static inline u32 calc_crc(unsigned char *buf, int len)
8425 {
8426 	u32 reg;
8427 	u32 tmp;
8428 	int j, k;
8429 
8430 	reg = 0xffffffff;
8431 
8432 	for (j = 0; j < len; j++) {
8433 		reg ^= buf[j];
8434 
8435 		for (k = 0; k < 8; k++) {
8436 			tmp = reg & 0x01;
8437 
8438 			reg >>= 1;
8439 
8440 			if (tmp)
8441 				reg ^= 0xedb88320;
8442 		}
8443 	}
8444 
8445 	return ~reg;
8446 }
8447 
8448 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8449 {
8450 	/* accept or reject all multicast frames */
8451 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8452 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8453 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8454 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8455 }
8456 
8457 static void __tg3_set_rx_mode(struct net_device *dev)
8458 {
8459 	struct tg3 *tp = netdev_priv(dev);
8460 	u32 rx_mode;
8461 
8462 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8463 				  RX_MODE_KEEP_VLAN_TAG);
8464 
8465 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8466 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8467 	 * flag clear.
8468 	 */
8469 	if (!tg3_flag(tp, ENABLE_ASF))
8470 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8471 #endif
8472 
8473 	if (dev->flags & IFF_PROMISC) {
8474 		/* Promiscuous mode. */
8475 		rx_mode |= RX_MODE_PROMISC;
8476 	} else if (dev->flags & IFF_ALLMULTI) {
8477 		/* Accept all multicast. */
8478 		tg3_set_multi(tp, 1);
8479 	} else if (netdev_mc_empty(dev)) {
8480 		/* Reject all multicast. */
8481 		tg3_set_multi(tp, 0);
8482 	} else {
8483 		/* Accept one or more multicast(s). */
8484 		struct netdev_hw_addr *ha;
8485 		u32 mc_filter[4] = { 0, };
8486 		u32 regidx;
8487 		u32 bit;
8488 		u32 crc;
8489 
8490 		netdev_for_each_mc_addr(ha, dev) {
8491 			crc = calc_crc(ha->addr, ETH_ALEN);
8492 			bit = ~crc & 0x7f;
8493 			regidx = (bit & 0x60) >> 5;
8494 			bit &= 0x1f;
8495 			mc_filter[regidx] |= (1 << bit);
8496 		}
8497 
8498 		tw32(MAC_HASH_REG_0, mc_filter[0]);
8499 		tw32(MAC_HASH_REG_1, mc_filter[1]);
8500 		tw32(MAC_HASH_REG_2, mc_filter[2]);
8501 		tw32(MAC_HASH_REG_3, mc_filter[3]);
8502 	}
8503 
8504 	if (rx_mode != tp->rx_mode) {
8505 		tp->rx_mode = rx_mode;
8506 		tw32_f(MAC_RX_MODE, rx_mode);
8507 		udelay(10);
8508 	}
8509 }
8510 
8511 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8512 {
8513 	int i;
8514 
8515 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8516 		tp->rss_ind_tbl[i] =
8517 			ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8518 }
8519 
8520 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8521 {
8522 	int i;
8523 
8524 	if (!tg3_flag(tp, SUPPORT_MSIX))
8525 		return;
8526 
8527 	if (tp->irq_cnt <= 2) {
8528 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8529 		return;
8530 	}
8531 
8532 	/* Validate table against current IRQ count */
8533 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8534 		if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8535 			break;
8536 	}
8537 
8538 	if (i != TG3_RSS_INDIR_TBL_SIZE)
8539 		tg3_rss_init_dflt_indir_tbl(tp);
8540 }
8541 
8542 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8543 {
8544 	int i = 0;
8545 	u32 reg = MAC_RSS_INDIR_TBL_0;
8546 
8547 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
8548 		u32 val = tp->rss_ind_tbl[i];
8549 		i++;
8550 		for (; i % 8; i++) {
8551 			val <<= 4;
8552 			val |= tp->rss_ind_tbl[i];
8553 		}
8554 		tw32(reg, val);
8555 		reg += 4;
8556 	}
8557 }
8558 
8559 /* tp->lock is held. */
8560 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8561 {
8562 	u32 val, rdmac_mode;
8563 	int i, err, limit;
8564 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8565 
8566 	tg3_disable_ints(tp);
8567 
8568 	tg3_stop_fw(tp);
8569 
8570 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8571 
8572 	if (tg3_flag(tp, INIT_COMPLETE))
8573 		tg3_abort_hw(tp, 1);
8574 
8575 	/* Enable MAC control of LPI */
8576 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8577 		tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8578 		       TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8579 		       TG3_CPMU_EEE_LNKIDL_UART_IDL);
8580 
8581 		tw32_f(TG3_CPMU_EEE_CTRL,
8582 		       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8583 
8584 		val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8585 		      TG3_CPMU_EEEMD_LPI_IN_TX |
8586 		      TG3_CPMU_EEEMD_LPI_IN_RX |
8587 		      TG3_CPMU_EEEMD_EEE_ENABLE;
8588 
8589 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8590 			val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8591 
8592 		if (tg3_flag(tp, ENABLE_APE))
8593 			val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8594 
8595 		tw32_f(TG3_CPMU_EEE_MODE, val);
8596 
8597 		tw32_f(TG3_CPMU_EEE_DBTMR1,
8598 		       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8599 		       TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8600 
8601 		tw32_f(TG3_CPMU_EEE_DBTMR2,
8602 		       TG3_CPMU_DBTMR2_APE_TX_2047US |
8603 		       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8604 	}
8605 
8606 	if (reset_phy)
8607 		tg3_phy_reset(tp);
8608 
8609 	err = tg3_chip_reset(tp);
8610 	if (err)
8611 		return err;
8612 
8613 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8614 
8615 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8616 		val = tr32(TG3_CPMU_CTRL);
8617 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8618 		tw32(TG3_CPMU_CTRL, val);
8619 
8620 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8621 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8622 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
8623 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8624 
8625 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8626 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8627 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
8628 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8629 
8630 		val = tr32(TG3_CPMU_HST_ACC);
8631 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
8632 		val |= CPMU_HST_ACC_MACCLK_6_25;
8633 		tw32(TG3_CPMU_HST_ACC, val);
8634 	}
8635 
8636 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8637 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8638 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8639 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
8640 		tw32(PCIE_PWR_MGMT_THRESH, val);
8641 
8642 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8643 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8644 
8645 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8646 
8647 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8648 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8649 	}
8650 
8651 	if (tg3_flag(tp, L1PLLPD_EN)) {
8652 		u32 grc_mode = tr32(GRC_MODE);
8653 
8654 		/* Access the lower 1K of PL PCIE block registers. */
8655 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8656 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8657 
8658 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8659 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8660 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8661 
8662 		tw32(GRC_MODE, grc_mode);
8663 	}
8664 
8665 	if (tg3_flag(tp, 57765_CLASS)) {
8666 		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8667 			u32 grc_mode = tr32(GRC_MODE);
8668 
8669 			/* Access the lower 1K of PL PCIE block registers. */
8670 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8671 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8672 
8673 			val = tr32(TG3_PCIE_TLDLPL_PORT +
8674 				   TG3_PCIE_PL_LO_PHYCTL5);
8675 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8676 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8677 
8678 			tw32(GRC_MODE, grc_mode);
8679 		}
8680 
8681 		if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8682 			u32 grc_mode = tr32(GRC_MODE);
8683 
8684 			/* Access the lower 1K of DL PCIE block registers. */
8685 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8686 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8687 
8688 			val = tr32(TG3_PCIE_TLDLPL_PORT +
8689 				   TG3_PCIE_DL_LO_FTSMAX);
8690 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8691 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8692 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8693 
8694 			tw32(GRC_MODE, grc_mode);
8695 		}
8696 
8697 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8698 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8699 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
8700 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8701 	}
8702 
8703 	/* This works around an issue with Athlon chipsets on
8704 	 * B3 tigon3 silicon.  This bit has no effect on any
8705 	 * other revision.  But do not set this on PCI Express
8706 	 * chips and don't even touch the clocks if the CPMU is present.
8707 	 */
8708 	if (!tg3_flag(tp, CPMU_PRESENT)) {
8709 		if (!tg3_flag(tp, PCI_EXPRESS))
8710 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8711 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8712 	}
8713 
8714 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8715 	    tg3_flag(tp, PCIX_MODE)) {
8716 		val = tr32(TG3PCI_PCISTATE);
8717 		val |= PCISTATE_RETRY_SAME_DMA;
8718 		tw32(TG3PCI_PCISTATE, val);
8719 	}
8720 
8721 	if (tg3_flag(tp, ENABLE_APE)) {
8722 		/* Allow reads and writes to the
8723 		 * APE register and memory space.
8724 		 */
8725 		val = tr32(TG3PCI_PCISTATE);
8726 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8727 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8728 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8729 		tw32(TG3PCI_PCISTATE, val);
8730 	}
8731 
8732 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8733 		/* Enable some hw fixes.  */
8734 		val = tr32(TG3PCI_MSI_DATA);
8735 		val |= (1 << 26) | (1 << 28) | (1 << 29);
8736 		tw32(TG3PCI_MSI_DATA, val);
8737 	}
8738 
8739 	/* Descriptor ring init may make accesses to the
8740 	 * NIC SRAM area to setup the TX descriptors, so we
8741 	 * can only do this after the hardware has been
8742 	 * successfully reset.
8743 	 */
8744 	err = tg3_init_rings(tp);
8745 	if (err)
8746 		return err;
8747 
8748 	if (tg3_flag(tp, 57765_PLUS)) {
8749 		val = tr32(TG3PCI_DMA_RW_CTRL) &
8750 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8751 		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8752 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8753 		if (!tg3_flag(tp, 57765_CLASS) &&
8754 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8755 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
8756 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8757 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8758 		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8759 		/* This value is determined during the probe time DMA
8760 		 * engine test, tg3_test_dma.
8761 		 */
8762 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8763 	}
8764 
8765 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8766 			  GRC_MODE_4X_NIC_SEND_RINGS |
8767 			  GRC_MODE_NO_TX_PHDR_CSUM |
8768 			  GRC_MODE_NO_RX_PHDR_CSUM);
8769 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8770 
8771 	/* Pseudo-header checksum is done by hardware logic and not
8772 	 * the offload processers, so make the chip do the pseudo-
8773 	 * header checksums on receive.  For transmit it is more
8774 	 * convenient to do the pseudo-header checksum in software
8775 	 * as Linux does that on transmit for us in all cases.
8776 	 */
8777 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8778 
8779 	tw32(GRC_MODE,
8780 	     tp->grc_mode |
8781 	     (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8782 
8783 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
8784 	val = tr32(GRC_MISC_CFG);
8785 	val &= ~0xff;
8786 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8787 	tw32(GRC_MISC_CFG, val);
8788 
8789 	/* Initialize MBUF/DESC pool. */
8790 	if (tg3_flag(tp, 5750_PLUS)) {
8791 		/* Do nothing.  */
8792 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8793 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8794 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8795 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8796 		else
8797 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8798 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8799 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8800 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
8801 		int fw_len;
8802 
8803 		fw_len = tp->fw_len;
8804 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8805 		tw32(BUFMGR_MB_POOL_ADDR,
8806 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8807 		tw32(BUFMGR_MB_POOL_SIZE,
8808 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8809 	}
8810 
8811 	if (tp->dev->mtu <= ETH_DATA_LEN) {
8812 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
8813 		     tp->bufmgr_config.mbuf_read_dma_low_water);
8814 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
8815 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
8816 		tw32(BUFMGR_MB_HIGH_WATER,
8817 		     tp->bufmgr_config.mbuf_high_water);
8818 	} else {
8819 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
8820 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8821 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
8822 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8823 		tw32(BUFMGR_MB_HIGH_WATER,
8824 		     tp->bufmgr_config.mbuf_high_water_jumbo);
8825 	}
8826 	tw32(BUFMGR_DMA_LOW_WATER,
8827 	     tp->bufmgr_config.dma_low_water);
8828 	tw32(BUFMGR_DMA_HIGH_WATER,
8829 	     tp->bufmgr_config.dma_high_water);
8830 
8831 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8832 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8833 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8834 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8835 	    tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8836 	    tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8837 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8838 	tw32(BUFMGR_MODE, val);
8839 	for (i = 0; i < 2000; i++) {
8840 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8841 			break;
8842 		udelay(10);
8843 	}
8844 	if (i >= 2000) {
8845 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8846 		return -ENODEV;
8847 	}
8848 
8849 	if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8850 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8851 
8852 	tg3_setup_rxbd_thresholds(tp);
8853 
8854 	/* Initialize TG3_BDINFO's at:
8855 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
8856 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
8857 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
8858 	 *
8859 	 * like so:
8860 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
8861 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
8862 	 *                              ring attribute flags
8863 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
8864 	 *
8865 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8866 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8867 	 *
8868 	 * The size of each ring is fixed in the firmware, but the location is
8869 	 * configurable.
8870 	 */
8871 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8872 	     ((u64) tpr->rx_std_mapping >> 32));
8873 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8874 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
8875 	if (!tg3_flag(tp, 5717_PLUS))
8876 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8877 		     NIC_SRAM_RX_BUFFER_DESC);
8878 
8879 	/* Disable the mini ring */
8880 	if (!tg3_flag(tp, 5705_PLUS))
8881 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8882 		     BDINFO_FLAGS_DISABLED);
8883 
8884 	/* Program the jumbo buffer descriptor ring control
8885 	 * blocks on those devices that have them.
8886 	 */
8887 	if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8888 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8889 
8890 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8891 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8892 			     ((u64) tpr->rx_jmb_mapping >> 32));
8893 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8894 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8895 			val = TG3_RX_JMB_RING_SIZE(tp) <<
8896 			      BDINFO_FLAGS_MAXLEN_SHIFT;
8897 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8898 			     val | BDINFO_FLAGS_USE_EXT_RECV);
8899 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8900 			    tg3_flag(tp, 57765_CLASS))
8901 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8902 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8903 		} else {
8904 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8905 			     BDINFO_FLAGS_DISABLED);
8906 		}
8907 
8908 		if (tg3_flag(tp, 57765_PLUS)) {
8909 			val = TG3_RX_STD_RING_SIZE(tp);
8910 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8911 			val |= (TG3_RX_STD_DMA_SZ << 2);
8912 		} else
8913 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8914 	} else
8915 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8916 
8917 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8918 
8919 	tpr->rx_std_prod_idx = tp->rx_pending;
8920 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8921 
8922 	tpr->rx_jmb_prod_idx =
8923 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8924 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8925 
8926 	tg3_rings_reset(tp);
8927 
8928 	/* Initialize MAC address and backoff seed. */
8929 	__tg3_set_mac_addr(tp, 0);
8930 
8931 	/* MTU + ethernet header + FCS + optional VLAN tag */
8932 	tw32(MAC_RX_MTU_SIZE,
8933 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8934 
8935 	/* The slot time is changed by tg3_setup_phy if we
8936 	 * run at gigabit with half duplex.
8937 	 */
8938 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8939 	      (6 << TX_LENGTHS_IPG_SHIFT) |
8940 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8941 
8942 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8943 		val |= tr32(MAC_TX_LENGTHS) &
8944 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
8945 			TX_LENGTHS_CNT_DWN_VAL_MSK);
8946 
8947 	tw32(MAC_TX_LENGTHS, val);
8948 
8949 	/* Receive rules. */
8950 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8951 	tw32(RCVLPC_CONFIG, 0x0181);
8952 
8953 	/* Calculate RDMAC_MODE setting early, we need it to determine
8954 	 * the RCVLPC_STATE_ENABLE mask.
8955 	 */
8956 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8957 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8958 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8959 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8960 		      RDMAC_MODE_LNGREAD_ENAB);
8961 
8962 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8963 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8964 
8965 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8966 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8967 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8968 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8969 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8970 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8971 
8972 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8973 	    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8974 		if (tg3_flag(tp, TSO_CAPABLE) &&
8975 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8976 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8977 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8978 			   !tg3_flag(tp, IS_5788)) {
8979 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8980 		}
8981 	}
8982 
8983 	if (tg3_flag(tp, PCI_EXPRESS))
8984 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8985 
8986 	if (tg3_flag(tp, HW_TSO_1) ||
8987 	    tg3_flag(tp, HW_TSO_2) ||
8988 	    tg3_flag(tp, HW_TSO_3))
8989 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8990 
8991 	if (tg3_flag(tp, 57765_PLUS) ||
8992 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8993 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8994 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8995 
8996 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8997 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8998 
8999 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9000 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9001 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9002 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9003 	    tg3_flag(tp, 57765_PLUS)) {
9004 		val = tr32(TG3_RDMA_RSRVCTRL_REG);
9005 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9006 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9007 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9008 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9009 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9010 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9011 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9012 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9013 		}
9014 		tw32(TG3_RDMA_RSRVCTRL_REG,
9015 		     val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9016 	}
9017 
9018 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9019 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9020 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9021 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9022 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9023 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9024 	}
9025 
9026 	/* Receive/send statistics. */
9027 	if (tg3_flag(tp, 5750_PLUS)) {
9028 		val = tr32(RCVLPC_STATS_ENABLE);
9029 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
9030 		tw32(RCVLPC_STATS_ENABLE, val);
9031 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9032 		   tg3_flag(tp, TSO_CAPABLE)) {
9033 		val = tr32(RCVLPC_STATS_ENABLE);
9034 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9035 		tw32(RCVLPC_STATS_ENABLE, val);
9036 	} else {
9037 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9038 	}
9039 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9040 	tw32(SNDDATAI_STATSENAB, 0xffffff);
9041 	tw32(SNDDATAI_STATSCTRL,
9042 	     (SNDDATAI_SCTRL_ENABLE |
9043 	      SNDDATAI_SCTRL_FASTUPD));
9044 
9045 	/* Setup host coalescing engine. */
9046 	tw32(HOSTCC_MODE, 0);
9047 	for (i = 0; i < 2000; i++) {
9048 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9049 			break;
9050 		udelay(10);
9051 	}
9052 
9053 	__tg3_set_coalesce(tp, &tp->coal);
9054 
9055 	if (!tg3_flag(tp, 5705_PLUS)) {
9056 		/* Status/statistics block address.  See tg3_timer,
9057 		 * the tg3_periodic_fetch_stats call there, and
9058 		 * tg3_get_stats to see how this works for 5705/5750 chips.
9059 		 */
9060 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9061 		     ((u64) tp->stats_mapping >> 32));
9062 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9063 		     ((u64) tp->stats_mapping & 0xffffffff));
9064 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9065 
9066 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9067 
9068 		/* Clear statistics and status block memory areas */
9069 		for (i = NIC_SRAM_STATS_BLK;
9070 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9071 		     i += sizeof(u32)) {
9072 			tg3_write_mem(tp, i, 0);
9073 			udelay(40);
9074 		}
9075 	}
9076 
9077 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9078 
9079 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9080 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9081 	if (!tg3_flag(tp, 5705_PLUS))
9082 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9083 
9084 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9085 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9086 		/* reset to prevent losing 1st rx packet intermittently */
9087 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9088 		udelay(10);
9089 	}
9090 
9091 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9092 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9093 			MAC_MODE_FHDE_ENABLE;
9094 	if (tg3_flag(tp, ENABLE_APE))
9095 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9096 	if (!tg3_flag(tp, 5705_PLUS) &&
9097 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9098 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9099 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9100 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9101 	udelay(40);
9102 
9103 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9104 	 * If TG3_FLAG_IS_NIC is zero, we should read the
9105 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
9106 	 * whether used as inputs or outputs, are set by boot code after
9107 	 * reset.
9108 	 */
9109 	if (!tg3_flag(tp, IS_NIC)) {
9110 		u32 gpio_mask;
9111 
9112 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9113 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9114 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9115 
9116 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9117 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9118 				     GRC_LCLCTRL_GPIO_OUTPUT3;
9119 
9120 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9121 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9122 
9123 		tp->grc_local_ctrl &= ~gpio_mask;
9124 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9125 
9126 		/* GPIO1 must be driven high for eeprom write protect */
9127 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
9128 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9129 					       GRC_LCLCTRL_GPIO_OUTPUT1);
9130 	}
9131 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9132 	udelay(100);
9133 
9134 	if (tg3_flag(tp, USING_MSIX)) {
9135 		val = tr32(MSGINT_MODE);
9136 		val |= MSGINT_MODE_ENABLE;
9137 		if (tp->irq_cnt > 1)
9138 			val |= MSGINT_MODE_MULTIVEC_EN;
9139 		if (!tg3_flag(tp, 1SHOT_MSI))
9140 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9141 		tw32(MSGINT_MODE, val);
9142 	}
9143 
9144 	if (!tg3_flag(tp, 5705_PLUS)) {
9145 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9146 		udelay(40);
9147 	}
9148 
9149 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9150 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9151 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9152 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9153 	       WDMAC_MODE_LNGREAD_ENAB);
9154 
9155 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9156 	    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9157 		if (tg3_flag(tp, TSO_CAPABLE) &&
9158 		    (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9159 		     tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9160 			/* nothing */
9161 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9162 			   !tg3_flag(tp, IS_5788)) {
9163 			val |= WDMAC_MODE_RX_ACCEL;
9164 		}
9165 	}
9166 
9167 	/* Enable host coalescing bug fix */
9168 	if (tg3_flag(tp, 5755_PLUS))
9169 		val |= WDMAC_MODE_STATUS_TAG_FIX;
9170 
9171 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9172 		val |= WDMAC_MODE_BURST_ALL_DATA;
9173 
9174 	tw32_f(WDMAC_MODE, val);
9175 	udelay(40);
9176 
9177 	if (tg3_flag(tp, PCIX_MODE)) {
9178 		u16 pcix_cmd;
9179 
9180 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9181 				     &pcix_cmd);
9182 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9183 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9184 			pcix_cmd |= PCI_X_CMD_READ_2K;
9185 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9186 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9187 			pcix_cmd |= PCI_X_CMD_READ_2K;
9188 		}
9189 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9190 				      pcix_cmd);
9191 	}
9192 
9193 	tw32_f(RDMAC_MODE, rdmac_mode);
9194 	udelay(40);
9195 
9196 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9197 	if (!tg3_flag(tp, 5705_PLUS))
9198 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9199 
9200 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9201 		tw32(SNDDATAC_MODE,
9202 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9203 	else
9204 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9205 
9206 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9207 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9208 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9209 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
9210 		val |= RCVDBDI_MODE_LRG_RING_SZ;
9211 	tw32(RCVDBDI_MODE, val);
9212 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9213 	if (tg3_flag(tp, HW_TSO_1) ||
9214 	    tg3_flag(tp, HW_TSO_2) ||
9215 	    tg3_flag(tp, HW_TSO_3))
9216 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9217 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9218 	if (tg3_flag(tp, ENABLE_TSS))
9219 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
9220 	tw32(SNDBDI_MODE, val);
9221 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9222 
9223 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9224 		err = tg3_load_5701_a0_firmware_fix(tp);
9225 		if (err)
9226 			return err;
9227 	}
9228 
9229 	if (tg3_flag(tp, TSO_CAPABLE)) {
9230 		err = tg3_load_tso_firmware(tp);
9231 		if (err)
9232 			return err;
9233 	}
9234 
9235 	tp->tx_mode = TX_MODE_ENABLE;
9236 
9237 	if (tg3_flag(tp, 5755_PLUS) ||
9238 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9239 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9240 
9241 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9242 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9243 		tp->tx_mode &= ~val;
9244 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9245 	}
9246 
9247 	tw32_f(MAC_TX_MODE, tp->tx_mode);
9248 	udelay(100);
9249 
9250 	if (tg3_flag(tp, ENABLE_RSS)) {
9251 		tg3_rss_write_indir_tbl(tp);
9252 
9253 		/* Setup the "secret" hash key. */
9254 		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9255 		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9256 		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9257 		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9258 		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9259 		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9260 		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9261 		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9262 		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9263 		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9264 	}
9265 
9266 	tp->rx_mode = RX_MODE_ENABLE;
9267 	if (tg3_flag(tp, 5755_PLUS))
9268 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9269 
9270 	if (tg3_flag(tp, ENABLE_RSS))
9271 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
9272 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
9273 			       RX_MODE_RSS_IPV6_HASH_EN |
9274 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
9275 			       RX_MODE_RSS_IPV4_HASH_EN |
9276 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
9277 
9278 	tw32_f(MAC_RX_MODE, tp->rx_mode);
9279 	udelay(10);
9280 
9281 	tw32(MAC_LED_CTRL, tp->led_ctrl);
9282 
9283 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9284 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9285 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9286 		udelay(10);
9287 	}
9288 	tw32_f(MAC_RX_MODE, tp->rx_mode);
9289 	udelay(10);
9290 
9291 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9292 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9293 			!(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9294 			/* Set drive transmission level to 1.2V  */
9295 			/* only if the signal pre-emphasis bit is not set  */
9296 			val = tr32(MAC_SERDES_CFG);
9297 			val &= 0xfffff000;
9298 			val |= 0x880;
9299 			tw32(MAC_SERDES_CFG, val);
9300 		}
9301 		if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9302 			tw32(MAC_SERDES_CFG, 0x616000);
9303 	}
9304 
9305 	/* Prevent chip from dropping frames when flow control
9306 	 * is enabled.
9307 	 */
9308 	if (tg3_flag(tp, 57765_CLASS))
9309 		val = 1;
9310 	else
9311 		val = 2;
9312 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9313 
9314 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9315 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9316 		/* Use hardware link auto-negotiation */
9317 		tg3_flag_set(tp, HW_AUTONEG);
9318 	}
9319 
9320 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9321 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9322 		u32 tmp;
9323 
9324 		tmp = tr32(SERDES_RX_CTRL);
9325 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9326 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9327 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9328 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9329 	}
9330 
9331 	if (!tg3_flag(tp, USE_PHYLIB)) {
9332 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9333 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9334 
9335 		err = tg3_setup_phy(tp, 0);
9336 		if (err)
9337 			return err;
9338 
9339 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9340 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9341 			u32 tmp;
9342 
9343 			/* Clear CRC stats. */
9344 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9345 				tg3_writephy(tp, MII_TG3_TEST1,
9346 					     tmp | MII_TG3_TEST1_CRC_EN);
9347 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9348 			}
9349 		}
9350 	}
9351 
9352 	__tg3_set_rx_mode(tp->dev);
9353 
9354 	/* Initialize receive rules. */
9355 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9356 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9357 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9358 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9359 
9360 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9361 		limit = 8;
9362 	else
9363 		limit = 16;
9364 	if (tg3_flag(tp, ENABLE_ASF))
9365 		limit -= 4;
9366 	switch (limit) {
9367 	case 16:
9368 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9369 	case 15:
9370 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9371 	case 14:
9372 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9373 	case 13:
9374 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9375 	case 12:
9376 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9377 	case 11:
9378 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9379 	case 10:
9380 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9381 	case 9:
9382 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9383 	case 8:
9384 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9385 	case 7:
9386 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9387 	case 6:
9388 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9389 	case 5:
9390 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9391 	case 4:
9392 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9393 	case 3:
9394 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9395 	case 2:
9396 	case 1:
9397 
9398 	default:
9399 		break;
9400 	}
9401 
9402 	if (tg3_flag(tp, ENABLE_APE))
9403 		/* Write our heartbeat update interval to APE. */
9404 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9405 				APE_HOST_HEARTBEAT_INT_DISABLE);
9406 
9407 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9408 
9409 	return 0;
9410 }
9411 
9412 /* Called at device open time to get the chip ready for
9413  * packet processing.  Invoked with tp->lock held.
9414  */
9415 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9416 {
9417 	tg3_switch_clocks(tp);
9418 
9419 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9420 
9421 	return tg3_reset_hw(tp, reset_phy);
9422 }
9423 
9424 #define TG3_STAT_ADD32(PSTAT, REG) \
9425 do {	u32 __val = tr32(REG); \
9426 	(PSTAT)->low += __val; \
9427 	if ((PSTAT)->low < __val) \
9428 		(PSTAT)->high += 1; \
9429 } while (0)
9430 
9431 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9432 {
9433 	struct tg3_hw_stats *sp = tp->hw_stats;
9434 
9435 	if (!netif_carrier_ok(tp->dev))
9436 		return;
9437 
9438 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9439 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9440 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9441 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9442 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9443 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9444 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9445 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9446 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9447 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9448 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9449 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9450 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9451 
9452 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9453 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9454 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9455 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9456 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9457 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9458 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9459 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9460 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9461 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9462 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9463 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9464 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9465 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9466 
9467 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9468 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9469 	    tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9470 	    tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9471 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9472 	} else {
9473 		u32 val = tr32(HOSTCC_FLOW_ATTN);
9474 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9475 		if (val) {
9476 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9477 			sp->rx_discards.low += val;
9478 			if (sp->rx_discards.low < val)
9479 				sp->rx_discards.high += 1;
9480 		}
9481 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9482 	}
9483 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9484 }
9485 
9486 static void tg3_chk_missed_msi(struct tg3 *tp)
9487 {
9488 	u32 i;
9489 
9490 	for (i = 0; i < tp->irq_cnt; i++) {
9491 		struct tg3_napi *tnapi = &tp->napi[i];
9492 
9493 		if (tg3_has_work(tnapi)) {
9494 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9495 			    tnapi->last_tx_cons == tnapi->tx_cons) {
9496 				if (tnapi->chk_msi_cnt < 1) {
9497 					tnapi->chk_msi_cnt++;
9498 					return;
9499 				}
9500 				tg3_msi(0, tnapi);
9501 			}
9502 		}
9503 		tnapi->chk_msi_cnt = 0;
9504 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9505 		tnapi->last_tx_cons = tnapi->tx_cons;
9506 	}
9507 }
9508 
9509 static void tg3_timer(unsigned long __opaque)
9510 {
9511 	struct tg3 *tp = (struct tg3 *) __opaque;
9512 
9513 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9514 		goto restart_timer;
9515 
9516 	spin_lock(&tp->lock);
9517 
9518 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9519 	    tg3_flag(tp, 57765_CLASS))
9520 		tg3_chk_missed_msi(tp);
9521 
9522 	if (!tg3_flag(tp, TAGGED_STATUS)) {
9523 		/* All of this garbage is because when using non-tagged
9524 		 * IRQ status the mailbox/status_block protocol the chip
9525 		 * uses with the cpu is race prone.
9526 		 */
9527 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9528 			tw32(GRC_LOCAL_CTRL,
9529 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9530 		} else {
9531 			tw32(HOSTCC_MODE, tp->coalesce_mode |
9532 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9533 		}
9534 
9535 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9536 			spin_unlock(&tp->lock);
9537 			tg3_reset_task_schedule(tp);
9538 			goto restart_timer;
9539 		}
9540 	}
9541 
9542 	/* This part only runs once per second. */
9543 	if (!--tp->timer_counter) {
9544 		if (tg3_flag(tp, 5705_PLUS))
9545 			tg3_periodic_fetch_stats(tp);
9546 
9547 		if (tp->setlpicnt && !--tp->setlpicnt)
9548 			tg3_phy_eee_enable(tp);
9549 
9550 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
9551 			u32 mac_stat;
9552 			int phy_event;
9553 
9554 			mac_stat = tr32(MAC_STATUS);
9555 
9556 			phy_event = 0;
9557 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9558 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9559 					phy_event = 1;
9560 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9561 				phy_event = 1;
9562 
9563 			if (phy_event)
9564 				tg3_setup_phy(tp, 0);
9565 		} else if (tg3_flag(tp, POLL_SERDES)) {
9566 			u32 mac_stat = tr32(MAC_STATUS);
9567 			int need_setup = 0;
9568 
9569 			if (netif_carrier_ok(tp->dev) &&
9570 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9571 				need_setup = 1;
9572 			}
9573 			if (!netif_carrier_ok(tp->dev) &&
9574 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
9575 					 MAC_STATUS_SIGNAL_DET))) {
9576 				need_setup = 1;
9577 			}
9578 			if (need_setup) {
9579 				if (!tp->serdes_counter) {
9580 					tw32_f(MAC_MODE,
9581 					     (tp->mac_mode &
9582 					      ~MAC_MODE_PORT_MODE_MASK));
9583 					udelay(40);
9584 					tw32_f(MAC_MODE, tp->mac_mode);
9585 					udelay(40);
9586 				}
9587 				tg3_setup_phy(tp, 0);
9588 			}
9589 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9590 			   tg3_flag(tp, 5780_CLASS)) {
9591 			tg3_serdes_parallel_detect(tp);
9592 		}
9593 
9594 		tp->timer_counter = tp->timer_multiplier;
9595 	}
9596 
9597 	/* Heartbeat is only sent once every 2 seconds.
9598 	 *
9599 	 * The heartbeat is to tell the ASF firmware that the host
9600 	 * driver is still alive.  In the event that the OS crashes,
9601 	 * ASF needs to reset the hardware to free up the FIFO space
9602 	 * that may be filled with rx packets destined for the host.
9603 	 * If the FIFO is full, ASF will no longer function properly.
9604 	 *
9605 	 * Unintended resets have been reported on real time kernels
9606 	 * where the timer doesn't run on time.  Netpoll will also have
9607 	 * same problem.
9608 	 *
9609 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9610 	 * to check the ring condition when the heartbeat is expiring
9611 	 * before doing the reset.  This will prevent most unintended
9612 	 * resets.
9613 	 */
9614 	if (!--tp->asf_counter) {
9615 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9616 			tg3_wait_for_event_ack(tp);
9617 
9618 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9619 				      FWCMD_NICDRV_ALIVE3);
9620 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9621 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9622 				      TG3_FW_UPDATE_TIMEOUT_SEC);
9623 
9624 			tg3_generate_fw_event(tp);
9625 		}
9626 		tp->asf_counter = tp->asf_multiplier;
9627 	}
9628 
9629 	spin_unlock(&tp->lock);
9630 
9631 restart_timer:
9632 	tp->timer.expires = jiffies + tp->timer_offset;
9633 	add_timer(&tp->timer);
9634 }
9635 
9636 static void __devinit tg3_timer_init(struct tg3 *tp)
9637 {
9638 	if (tg3_flag(tp, TAGGED_STATUS) &&
9639 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9640 	    !tg3_flag(tp, 57765_CLASS))
9641 		tp->timer_offset = HZ;
9642 	else
9643 		tp->timer_offset = HZ / 10;
9644 
9645 	BUG_ON(tp->timer_offset > HZ);
9646 
9647 	tp->timer_multiplier = (HZ / tp->timer_offset);
9648 	tp->asf_multiplier = (HZ / tp->timer_offset) *
9649 			     TG3_FW_UPDATE_FREQ_SEC;
9650 
9651 	init_timer(&tp->timer);
9652 	tp->timer.data = (unsigned long) tp;
9653 	tp->timer.function = tg3_timer;
9654 }
9655 
9656 static void tg3_timer_start(struct tg3 *tp)
9657 {
9658 	tp->asf_counter   = tp->asf_multiplier;
9659 	tp->timer_counter = tp->timer_multiplier;
9660 
9661 	tp->timer.expires = jiffies + tp->timer_offset;
9662 	add_timer(&tp->timer);
9663 }
9664 
9665 static void tg3_timer_stop(struct tg3 *tp)
9666 {
9667 	del_timer_sync(&tp->timer);
9668 }
9669 
9670 /* Restart hardware after configuration changes, self-test, etc.
9671  * Invoked with tp->lock held.
9672  */
9673 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9674 	__releases(tp->lock)
9675 	__acquires(tp->lock)
9676 {
9677 	int err;
9678 
9679 	err = tg3_init_hw(tp, reset_phy);
9680 	if (err) {
9681 		netdev_err(tp->dev,
9682 			   "Failed to re-initialize device, aborting\n");
9683 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9684 		tg3_full_unlock(tp);
9685 		tg3_timer_stop(tp);
9686 		tp->irq_sync = 0;
9687 		tg3_napi_enable(tp);
9688 		dev_close(tp->dev);
9689 		tg3_full_lock(tp, 0);
9690 	}
9691 	return err;
9692 }
9693 
9694 static void tg3_reset_task(struct work_struct *work)
9695 {
9696 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
9697 	int err;
9698 
9699 	tg3_full_lock(tp, 0);
9700 
9701 	if (!netif_running(tp->dev)) {
9702 		tg3_flag_clear(tp, RESET_TASK_PENDING);
9703 		tg3_full_unlock(tp);
9704 		return;
9705 	}
9706 
9707 	tg3_full_unlock(tp);
9708 
9709 	tg3_phy_stop(tp);
9710 
9711 	tg3_netif_stop(tp);
9712 
9713 	tg3_full_lock(tp, 1);
9714 
9715 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9716 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
9717 		tp->write32_rx_mbox = tg3_write_flush_reg32;
9718 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
9719 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9720 	}
9721 
9722 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9723 	err = tg3_init_hw(tp, 1);
9724 	if (err)
9725 		goto out;
9726 
9727 	tg3_netif_start(tp);
9728 
9729 out:
9730 	tg3_full_unlock(tp);
9731 
9732 	if (!err)
9733 		tg3_phy_start(tp);
9734 
9735 	tg3_flag_clear(tp, RESET_TASK_PENDING);
9736 }
9737 
9738 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9739 {
9740 	irq_handler_t fn;
9741 	unsigned long flags;
9742 	char *name;
9743 	struct tg3_napi *tnapi = &tp->napi[irq_num];
9744 
9745 	if (tp->irq_cnt == 1)
9746 		name = tp->dev->name;
9747 	else {
9748 		name = &tnapi->irq_lbl[0];
9749 		snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9750 		name[IFNAMSIZ-1] = 0;
9751 	}
9752 
9753 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9754 		fn = tg3_msi;
9755 		if (tg3_flag(tp, 1SHOT_MSI))
9756 			fn = tg3_msi_1shot;
9757 		flags = 0;
9758 	} else {
9759 		fn = tg3_interrupt;
9760 		if (tg3_flag(tp, TAGGED_STATUS))
9761 			fn = tg3_interrupt_tagged;
9762 		flags = IRQF_SHARED;
9763 	}
9764 
9765 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9766 }
9767 
9768 static int tg3_test_interrupt(struct tg3 *tp)
9769 {
9770 	struct tg3_napi *tnapi = &tp->napi[0];
9771 	struct net_device *dev = tp->dev;
9772 	int err, i, intr_ok = 0;
9773 	u32 val;
9774 
9775 	if (!netif_running(dev))
9776 		return -ENODEV;
9777 
9778 	tg3_disable_ints(tp);
9779 
9780 	free_irq(tnapi->irq_vec, tnapi);
9781 
9782 	/*
9783 	 * Turn off MSI one shot mode.  Otherwise this test has no
9784 	 * observable way to know whether the interrupt was delivered.
9785 	 */
9786 	if (tg3_flag(tp, 57765_PLUS)) {
9787 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9788 		tw32(MSGINT_MODE, val);
9789 	}
9790 
9791 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
9792 			  IRQF_SHARED, dev->name, tnapi);
9793 	if (err)
9794 		return err;
9795 
9796 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9797 	tg3_enable_ints(tp);
9798 
9799 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9800 	       tnapi->coal_now);
9801 
9802 	for (i = 0; i < 5; i++) {
9803 		u32 int_mbox, misc_host_ctrl;
9804 
9805 		int_mbox = tr32_mailbox(tnapi->int_mbox);
9806 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9807 
9808 		if ((int_mbox != 0) ||
9809 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9810 			intr_ok = 1;
9811 			break;
9812 		}
9813 
9814 		if (tg3_flag(tp, 57765_PLUS) &&
9815 		    tnapi->hw_status->status_tag != tnapi->last_tag)
9816 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9817 
9818 		msleep(10);
9819 	}
9820 
9821 	tg3_disable_ints(tp);
9822 
9823 	free_irq(tnapi->irq_vec, tnapi);
9824 
9825 	err = tg3_request_irq(tp, 0);
9826 
9827 	if (err)
9828 		return err;
9829 
9830 	if (intr_ok) {
9831 		/* Reenable MSI one shot mode. */
9832 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9833 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9834 			tw32(MSGINT_MODE, val);
9835 		}
9836 		return 0;
9837 	}
9838 
9839 	return -EIO;
9840 }
9841 
9842 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9843  * successfully restored
9844  */
9845 static int tg3_test_msi(struct tg3 *tp)
9846 {
9847 	int err;
9848 	u16 pci_cmd;
9849 
9850 	if (!tg3_flag(tp, USING_MSI))
9851 		return 0;
9852 
9853 	/* Turn off SERR reporting in case MSI terminates with Master
9854 	 * Abort.
9855 	 */
9856 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9857 	pci_write_config_word(tp->pdev, PCI_COMMAND,
9858 			      pci_cmd & ~PCI_COMMAND_SERR);
9859 
9860 	err = tg3_test_interrupt(tp);
9861 
9862 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9863 
9864 	if (!err)
9865 		return 0;
9866 
9867 	/* other failures */
9868 	if (err != -EIO)
9869 		return err;
9870 
9871 	/* MSI test failed, go back to INTx mode */
9872 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9873 		    "to INTx mode. Please report this failure to the PCI "
9874 		    "maintainer and include system chipset information\n");
9875 
9876 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9877 
9878 	pci_disable_msi(tp->pdev);
9879 
9880 	tg3_flag_clear(tp, USING_MSI);
9881 	tp->napi[0].irq_vec = tp->pdev->irq;
9882 
9883 	err = tg3_request_irq(tp, 0);
9884 	if (err)
9885 		return err;
9886 
9887 	/* Need to reset the chip because the MSI cycle may have terminated
9888 	 * with Master Abort.
9889 	 */
9890 	tg3_full_lock(tp, 1);
9891 
9892 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9893 	err = tg3_init_hw(tp, 1);
9894 
9895 	tg3_full_unlock(tp);
9896 
9897 	if (err)
9898 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9899 
9900 	return err;
9901 }
9902 
9903 static int tg3_request_firmware(struct tg3 *tp)
9904 {
9905 	const __be32 *fw_data;
9906 
9907 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9908 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9909 			   tp->fw_needed);
9910 		return -ENOENT;
9911 	}
9912 
9913 	fw_data = (void *)tp->fw->data;
9914 
9915 	/* Firmware blob starts with version numbers, followed by
9916 	 * start address and _full_ length including BSS sections
9917 	 * (which must be longer than the actual data, of course
9918 	 */
9919 
9920 	tp->fw_len = be32_to_cpu(fw_data[2]);	/* includes bss */
9921 	if (tp->fw_len < (tp->fw->size - 12)) {
9922 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9923 			   tp->fw_len, tp->fw_needed);
9924 		release_firmware(tp->fw);
9925 		tp->fw = NULL;
9926 		return -EINVAL;
9927 	}
9928 
9929 	/* We no longer need firmware; we have it. */
9930 	tp->fw_needed = NULL;
9931 	return 0;
9932 }
9933 
9934 static bool tg3_enable_msix(struct tg3 *tp)
9935 {
9936 	int i, rc;
9937 	struct msix_entry msix_ent[tp->irq_max];
9938 
9939 	tp->irq_cnt = num_online_cpus();
9940 	if (tp->irq_cnt > 1) {
9941 		/* We want as many rx rings enabled as there are cpus.
9942 		 * In multiqueue MSI-X mode, the first MSI-X vector
9943 		 * only deals with link interrupts, etc, so we add
9944 		 * one to the number of vectors we are requesting.
9945 		 */
9946 		tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9947 	}
9948 
9949 	for (i = 0; i < tp->irq_max; i++) {
9950 		msix_ent[i].entry  = i;
9951 		msix_ent[i].vector = 0;
9952 	}
9953 
9954 	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9955 	if (rc < 0) {
9956 		return false;
9957 	} else if (rc != 0) {
9958 		if (pci_enable_msix(tp->pdev, msix_ent, rc))
9959 			return false;
9960 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9961 			      tp->irq_cnt, rc);
9962 		tp->irq_cnt = rc;
9963 	}
9964 
9965 	for (i = 0; i < tp->irq_max; i++)
9966 		tp->napi[i].irq_vec = msix_ent[i].vector;
9967 
9968 	netif_set_real_num_tx_queues(tp->dev, 1);
9969 	rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9970 	if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9971 		pci_disable_msix(tp->pdev);
9972 		return false;
9973 	}
9974 
9975 	if (tp->irq_cnt > 1) {
9976 		tg3_flag_set(tp, ENABLE_RSS);
9977 
9978 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9979 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9980 			tg3_flag_set(tp, ENABLE_TSS);
9981 			netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9982 		}
9983 	}
9984 
9985 	return true;
9986 }
9987 
9988 static void tg3_ints_init(struct tg3 *tp)
9989 {
9990 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9991 	    !tg3_flag(tp, TAGGED_STATUS)) {
9992 		/* All MSI supporting chips should support tagged
9993 		 * status.  Assert that this is the case.
9994 		 */
9995 		netdev_warn(tp->dev,
9996 			    "MSI without TAGGED_STATUS? Not using MSI\n");
9997 		goto defcfg;
9998 	}
9999 
10000 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10001 		tg3_flag_set(tp, USING_MSIX);
10002 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10003 		tg3_flag_set(tp, USING_MSI);
10004 
10005 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10006 		u32 msi_mode = tr32(MSGINT_MODE);
10007 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10008 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10009 		if (!tg3_flag(tp, 1SHOT_MSI))
10010 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10011 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10012 	}
10013 defcfg:
10014 	if (!tg3_flag(tp, USING_MSIX)) {
10015 		tp->irq_cnt = 1;
10016 		tp->napi[0].irq_vec = tp->pdev->irq;
10017 		netif_set_real_num_tx_queues(tp->dev, 1);
10018 		netif_set_real_num_rx_queues(tp->dev, 1);
10019 	}
10020 }
10021 
10022 static void tg3_ints_fini(struct tg3 *tp)
10023 {
10024 	if (tg3_flag(tp, USING_MSIX))
10025 		pci_disable_msix(tp->pdev);
10026 	else if (tg3_flag(tp, USING_MSI))
10027 		pci_disable_msi(tp->pdev);
10028 	tg3_flag_clear(tp, USING_MSI);
10029 	tg3_flag_clear(tp, USING_MSIX);
10030 	tg3_flag_clear(tp, ENABLE_RSS);
10031 	tg3_flag_clear(tp, ENABLE_TSS);
10032 }
10033 
10034 static int tg3_open(struct net_device *dev)
10035 {
10036 	struct tg3 *tp = netdev_priv(dev);
10037 	int i, err;
10038 
10039 	if (tp->fw_needed) {
10040 		err = tg3_request_firmware(tp);
10041 		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10042 			if (err)
10043 				return err;
10044 		} else if (err) {
10045 			netdev_warn(tp->dev, "TSO capability disabled\n");
10046 			tg3_flag_clear(tp, TSO_CAPABLE);
10047 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
10048 			netdev_notice(tp->dev, "TSO capability restored\n");
10049 			tg3_flag_set(tp, TSO_CAPABLE);
10050 		}
10051 	}
10052 
10053 	netif_carrier_off(tp->dev);
10054 
10055 	err = tg3_power_up(tp);
10056 	if (err)
10057 		return err;
10058 
10059 	tg3_full_lock(tp, 0);
10060 
10061 	tg3_disable_ints(tp);
10062 	tg3_flag_clear(tp, INIT_COMPLETE);
10063 
10064 	tg3_full_unlock(tp);
10065 
10066 	/*
10067 	 * Setup interrupts first so we know how
10068 	 * many NAPI resources to allocate
10069 	 */
10070 	tg3_ints_init(tp);
10071 
10072 	tg3_rss_check_indir_tbl(tp);
10073 
10074 	/* The placement of this call is tied
10075 	 * to the setup and use of Host TX descriptors.
10076 	 */
10077 	err = tg3_alloc_consistent(tp);
10078 	if (err)
10079 		goto err_out1;
10080 
10081 	tg3_napi_init(tp);
10082 
10083 	tg3_napi_enable(tp);
10084 
10085 	for (i = 0; i < tp->irq_cnt; i++) {
10086 		struct tg3_napi *tnapi = &tp->napi[i];
10087 		err = tg3_request_irq(tp, i);
10088 		if (err) {
10089 			for (i--; i >= 0; i--) {
10090 				tnapi = &tp->napi[i];
10091 				free_irq(tnapi->irq_vec, tnapi);
10092 			}
10093 			goto err_out2;
10094 		}
10095 	}
10096 
10097 	tg3_full_lock(tp, 0);
10098 
10099 	err = tg3_init_hw(tp, 1);
10100 	if (err) {
10101 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10102 		tg3_free_rings(tp);
10103 	}
10104 
10105 	tg3_full_unlock(tp);
10106 
10107 	if (err)
10108 		goto err_out3;
10109 
10110 	if (tg3_flag(tp, USING_MSI)) {
10111 		err = tg3_test_msi(tp);
10112 
10113 		if (err) {
10114 			tg3_full_lock(tp, 0);
10115 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10116 			tg3_free_rings(tp);
10117 			tg3_full_unlock(tp);
10118 
10119 			goto err_out2;
10120 		}
10121 
10122 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10123 			u32 val = tr32(PCIE_TRANSACTION_CFG);
10124 
10125 			tw32(PCIE_TRANSACTION_CFG,
10126 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
10127 		}
10128 	}
10129 
10130 	tg3_phy_start(tp);
10131 
10132 	tg3_full_lock(tp, 0);
10133 
10134 	tg3_timer_start(tp);
10135 	tg3_flag_set(tp, INIT_COMPLETE);
10136 	tg3_enable_ints(tp);
10137 
10138 	tg3_full_unlock(tp);
10139 
10140 	netif_tx_start_all_queues(dev);
10141 
10142 	/*
10143 	 * Reset loopback feature if it was turned on while the device was down
10144 	 * make sure that it's installed properly now.
10145 	 */
10146 	if (dev->features & NETIF_F_LOOPBACK)
10147 		tg3_set_loopback(dev, dev->features);
10148 
10149 	return 0;
10150 
10151 err_out3:
10152 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
10153 		struct tg3_napi *tnapi = &tp->napi[i];
10154 		free_irq(tnapi->irq_vec, tnapi);
10155 	}
10156 
10157 err_out2:
10158 	tg3_napi_disable(tp);
10159 	tg3_napi_fini(tp);
10160 	tg3_free_consistent(tp);
10161 
10162 err_out1:
10163 	tg3_ints_fini(tp);
10164 	tg3_frob_aux_power(tp, false);
10165 	pci_set_power_state(tp->pdev, PCI_D3hot);
10166 	return err;
10167 }
10168 
10169 static int tg3_close(struct net_device *dev)
10170 {
10171 	int i;
10172 	struct tg3 *tp = netdev_priv(dev);
10173 
10174 	tg3_napi_disable(tp);
10175 	tg3_reset_task_cancel(tp);
10176 
10177 	netif_tx_stop_all_queues(dev);
10178 
10179 	tg3_timer_stop(tp);
10180 
10181 	tg3_phy_stop(tp);
10182 
10183 	tg3_full_lock(tp, 1);
10184 
10185 	tg3_disable_ints(tp);
10186 
10187 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10188 	tg3_free_rings(tp);
10189 	tg3_flag_clear(tp, INIT_COMPLETE);
10190 
10191 	tg3_full_unlock(tp);
10192 
10193 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
10194 		struct tg3_napi *tnapi = &tp->napi[i];
10195 		free_irq(tnapi->irq_vec, tnapi);
10196 	}
10197 
10198 	tg3_ints_fini(tp);
10199 
10200 	/* Clear stats across close / open calls */
10201 	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10202 	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10203 
10204 	tg3_napi_fini(tp);
10205 
10206 	tg3_free_consistent(tp);
10207 
10208 	tg3_power_down(tp);
10209 
10210 	netif_carrier_off(tp->dev);
10211 
10212 	return 0;
10213 }
10214 
10215 static inline u64 get_stat64(tg3_stat64_t *val)
10216 {
10217        return ((u64)val->high << 32) | ((u64)val->low);
10218 }
10219 
10220 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10221 {
10222 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
10223 
10224 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10225 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10226 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10227 		u32 val;
10228 
10229 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10230 			tg3_writephy(tp, MII_TG3_TEST1,
10231 				     val | MII_TG3_TEST1_CRC_EN);
10232 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10233 		} else
10234 			val = 0;
10235 
10236 		tp->phy_crc_errors += val;
10237 
10238 		return tp->phy_crc_errors;
10239 	}
10240 
10241 	return get_stat64(&hw_stats->rx_fcs_errors);
10242 }
10243 
10244 #define ESTAT_ADD(member) \
10245 	estats->member =	old_estats->member + \
10246 				get_stat64(&hw_stats->member)
10247 
10248 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10249 {
10250 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10251 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
10252 
10253 	ESTAT_ADD(rx_octets);
10254 	ESTAT_ADD(rx_fragments);
10255 	ESTAT_ADD(rx_ucast_packets);
10256 	ESTAT_ADD(rx_mcast_packets);
10257 	ESTAT_ADD(rx_bcast_packets);
10258 	ESTAT_ADD(rx_fcs_errors);
10259 	ESTAT_ADD(rx_align_errors);
10260 	ESTAT_ADD(rx_xon_pause_rcvd);
10261 	ESTAT_ADD(rx_xoff_pause_rcvd);
10262 	ESTAT_ADD(rx_mac_ctrl_rcvd);
10263 	ESTAT_ADD(rx_xoff_entered);
10264 	ESTAT_ADD(rx_frame_too_long_errors);
10265 	ESTAT_ADD(rx_jabbers);
10266 	ESTAT_ADD(rx_undersize_packets);
10267 	ESTAT_ADD(rx_in_length_errors);
10268 	ESTAT_ADD(rx_out_length_errors);
10269 	ESTAT_ADD(rx_64_or_less_octet_packets);
10270 	ESTAT_ADD(rx_65_to_127_octet_packets);
10271 	ESTAT_ADD(rx_128_to_255_octet_packets);
10272 	ESTAT_ADD(rx_256_to_511_octet_packets);
10273 	ESTAT_ADD(rx_512_to_1023_octet_packets);
10274 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
10275 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
10276 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
10277 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
10278 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
10279 
10280 	ESTAT_ADD(tx_octets);
10281 	ESTAT_ADD(tx_collisions);
10282 	ESTAT_ADD(tx_xon_sent);
10283 	ESTAT_ADD(tx_xoff_sent);
10284 	ESTAT_ADD(tx_flow_control);
10285 	ESTAT_ADD(tx_mac_errors);
10286 	ESTAT_ADD(tx_single_collisions);
10287 	ESTAT_ADD(tx_mult_collisions);
10288 	ESTAT_ADD(tx_deferred);
10289 	ESTAT_ADD(tx_excessive_collisions);
10290 	ESTAT_ADD(tx_late_collisions);
10291 	ESTAT_ADD(tx_collide_2times);
10292 	ESTAT_ADD(tx_collide_3times);
10293 	ESTAT_ADD(tx_collide_4times);
10294 	ESTAT_ADD(tx_collide_5times);
10295 	ESTAT_ADD(tx_collide_6times);
10296 	ESTAT_ADD(tx_collide_7times);
10297 	ESTAT_ADD(tx_collide_8times);
10298 	ESTAT_ADD(tx_collide_9times);
10299 	ESTAT_ADD(tx_collide_10times);
10300 	ESTAT_ADD(tx_collide_11times);
10301 	ESTAT_ADD(tx_collide_12times);
10302 	ESTAT_ADD(tx_collide_13times);
10303 	ESTAT_ADD(tx_collide_14times);
10304 	ESTAT_ADD(tx_collide_15times);
10305 	ESTAT_ADD(tx_ucast_packets);
10306 	ESTAT_ADD(tx_mcast_packets);
10307 	ESTAT_ADD(tx_bcast_packets);
10308 	ESTAT_ADD(tx_carrier_sense_errors);
10309 	ESTAT_ADD(tx_discards);
10310 	ESTAT_ADD(tx_errors);
10311 
10312 	ESTAT_ADD(dma_writeq_full);
10313 	ESTAT_ADD(dma_write_prioq_full);
10314 	ESTAT_ADD(rxbds_empty);
10315 	ESTAT_ADD(rx_discards);
10316 	ESTAT_ADD(rx_errors);
10317 	ESTAT_ADD(rx_threshold_hit);
10318 
10319 	ESTAT_ADD(dma_readq_full);
10320 	ESTAT_ADD(dma_read_prioq_full);
10321 	ESTAT_ADD(tx_comp_queue_full);
10322 
10323 	ESTAT_ADD(ring_set_send_prod_index);
10324 	ESTAT_ADD(ring_status_update);
10325 	ESTAT_ADD(nic_irqs);
10326 	ESTAT_ADD(nic_avoided_irqs);
10327 	ESTAT_ADD(nic_tx_threshold_hit);
10328 
10329 	ESTAT_ADD(mbuf_lwm_thresh_hit);
10330 }
10331 
10332 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10333 {
10334 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10335 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
10336 
10337 	stats->rx_packets = old_stats->rx_packets +
10338 		get_stat64(&hw_stats->rx_ucast_packets) +
10339 		get_stat64(&hw_stats->rx_mcast_packets) +
10340 		get_stat64(&hw_stats->rx_bcast_packets);
10341 
10342 	stats->tx_packets = old_stats->tx_packets +
10343 		get_stat64(&hw_stats->tx_ucast_packets) +
10344 		get_stat64(&hw_stats->tx_mcast_packets) +
10345 		get_stat64(&hw_stats->tx_bcast_packets);
10346 
10347 	stats->rx_bytes = old_stats->rx_bytes +
10348 		get_stat64(&hw_stats->rx_octets);
10349 	stats->tx_bytes = old_stats->tx_bytes +
10350 		get_stat64(&hw_stats->tx_octets);
10351 
10352 	stats->rx_errors = old_stats->rx_errors +
10353 		get_stat64(&hw_stats->rx_errors);
10354 	stats->tx_errors = old_stats->tx_errors +
10355 		get_stat64(&hw_stats->tx_errors) +
10356 		get_stat64(&hw_stats->tx_mac_errors) +
10357 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
10358 		get_stat64(&hw_stats->tx_discards);
10359 
10360 	stats->multicast = old_stats->multicast +
10361 		get_stat64(&hw_stats->rx_mcast_packets);
10362 	stats->collisions = old_stats->collisions +
10363 		get_stat64(&hw_stats->tx_collisions);
10364 
10365 	stats->rx_length_errors = old_stats->rx_length_errors +
10366 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
10367 		get_stat64(&hw_stats->rx_undersize_packets);
10368 
10369 	stats->rx_over_errors = old_stats->rx_over_errors +
10370 		get_stat64(&hw_stats->rxbds_empty);
10371 	stats->rx_frame_errors = old_stats->rx_frame_errors +
10372 		get_stat64(&hw_stats->rx_align_errors);
10373 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10374 		get_stat64(&hw_stats->tx_discards);
10375 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10376 		get_stat64(&hw_stats->tx_carrier_sense_errors);
10377 
10378 	stats->rx_crc_errors = old_stats->rx_crc_errors +
10379 		tg3_calc_crc_errors(tp);
10380 
10381 	stats->rx_missed_errors = old_stats->rx_missed_errors +
10382 		get_stat64(&hw_stats->rx_discards);
10383 
10384 	stats->rx_dropped = tp->rx_dropped;
10385 	stats->tx_dropped = tp->tx_dropped;
10386 }
10387 
10388 static int tg3_get_regs_len(struct net_device *dev)
10389 {
10390 	return TG3_REG_BLK_SIZE;
10391 }
10392 
10393 static void tg3_get_regs(struct net_device *dev,
10394 		struct ethtool_regs *regs, void *_p)
10395 {
10396 	struct tg3 *tp = netdev_priv(dev);
10397 
10398 	regs->version = 0;
10399 
10400 	memset(_p, 0, TG3_REG_BLK_SIZE);
10401 
10402 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10403 		return;
10404 
10405 	tg3_full_lock(tp, 0);
10406 
10407 	tg3_dump_legacy_regs(tp, (u32 *)_p);
10408 
10409 	tg3_full_unlock(tp);
10410 }
10411 
10412 static int tg3_get_eeprom_len(struct net_device *dev)
10413 {
10414 	struct tg3 *tp = netdev_priv(dev);
10415 
10416 	return tp->nvram_size;
10417 }
10418 
10419 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10420 {
10421 	struct tg3 *tp = netdev_priv(dev);
10422 	int ret;
10423 	u8  *pd;
10424 	u32 i, offset, len, b_offset, b_count;
10425 	__be32 val;
10426 
10427 	if (tg3_flag(tp, NO_NVRAM))
10428 		return -EINVAL;
10429 
10430 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10431 		return -EAGAIN;
10432 
10433 	offset = eeprom->offset;
10434 	len = eeprom->len;
10435 	eeprom->len = 0;
10436 
10437 	eeprom->magic = TG3_EEPROM_MAGIC;
10438 
10439 	if (offset & 3) {
10440 		/* adjustments to start on required 4 byte boundary */
10441 		b_offset = offset & 3;
10442 		b_count = 4 - b_offset;
10443 		if (b_count > len) {
10444 			/* i.e. offset=1 len=2 */
10445 			b_count = len;
10446 		}
10447 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10448 		if (ret)
10449 			return ret;
10450 		memcpy(data, ((char *)&val) + b_offset, b_count);
10451 		len -= b_count;
10452 		offset += b_count;
10453 		eeprom->len += b_count;
10454 	}
10455 
10456 	/* read bytes up to the last 4 byte boundary */
10457 	pd = &data[eeprom->len];
10458 	for (i = 0; i < (len - (len & 3)); i += 4) {
10459 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
10460 		if (ret) {
10461 			eeprom->len += i;
10462 			return ret;
10463 		}
10464 		memcpy(pd + i, &val, 4);
10465 	}
10466 	eeprom->len += i;
10467 
10468 	if (len & 3) {
10469 		/* read last bytes not ending on 4 byte boundary */
10470 		pd = &data[eeprom->len];
10471 		b_count = len & 3;
10472 		b_offset = offset + len - b_count;
10473 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
10474 		if (ret)
10475 			return ret;
10476 		memcpy(pd, &val, b_count);
10477 		eeprom->len += b_count;
10478 	}
10479 	return 0;
10480 }
10481 
10482 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10483 {
10484 	struct tg3 *tp = netdev_priv(dev);
10485 	int ret;
10486 	u32 offset, len, b_offset, odd_len;
10487 	u8 *buf;
10488 	__be32 start, end;
10489 
10490 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10491 		return -EAGAIN;
10492 
10493 	if (tg3_flag(tp, NO_NVRAM) ||
10494 	    eeprom->magic != TG3_EEPROM_MAGIC)
10495 		return -EINVAL;
10496 
10497 	offset = eeprom->offset;
10498 	len = eeprom->len;
10499 
10500 	if ((b_offset = (offset & 3))) {
10501 		/* adjustments to start on required 4 byte boundary */
10502 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10503 		if (ret)
10504 			return ret;
10505 		len += b_offset;
10506 		offset &= ~3;
10507 		if (len < 4)
10508 			len = 4;
10509 	}
10510 
10511 	odd_len = 0;
10512 	if (len & 3) {
10513 		/* adjustments to end on required 4 byte boundary */
10514 		odd_len = 1;
10515 		len = (len + 3) & ~3;
10516 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10517 		if (ret)
10518 			return ret;
10519 	}
10520 
10521 	buf = data;
10522 	if (b_offset || odd_len) {
10523 		buf = kmalloc(len, GFP_KERNEL);
10524 		if (!buf)
10525 			return -ENOMEM;
10526 		if (b_offset)
10527 			memcpy(buf, &start, 4);
10528 		if (odd_len)
10529 			memcpy(buf+len-4, &end, 4);
10530 		memcpy(buf + b_offset, data, eeprom->len);
10531 	}
10532 
10533 	ret = tg3_nvram_write_block(tp, offset, len, buf);
10534 
10535 	if (buf != data)
10536 		kfree(buf);
10537 
10538 	return ret;
10539 }
10540 
10541 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10542 {
10543 	struct tg3 *tp = netdev_priv(dev);
10544 
10545 	if (tg3_flag(tp, USE_PHYLIB)) {
10546 		struct phy_device *phydev;
10547 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10548 			return -EAGAIN;
10549 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10550 		return phy_ethtool_gset(phydev, cmd);
10551 	}
10552 
10553 	cmd->supported = (SUPPORTED_Autoneg);
10554 
10555 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10556 		cmd->supported |= (SUPPORTED_1000baseT_Half |
10557 				   SUPPORTED_1000baseT_Full);
10558 
10559 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10560 		cmd->supported |= (SUPPORTED_100baseT_Half |
10561 				  SUPPORTED_100baseT_Full |
10562 				  SUPPORTED_10baseT_Half |
10563 				  SUPPORTED_10baseT_Full |
10564 				  SUPPORTED_TP);
10565 		cmd->port = PORT_TP;
10566 	} else {
10567 		cmd->supported |= SUPPORTED_FIBRE;
10568 		cmd->port = PORT_FIBRE;
10569 	}
10570 
10571 	cmd->advertising = tp->link_config.advertising;
10572 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
10573 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10574 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10575 				cmd->advertising |= ADVERTISED_Pause;
10576 			} else {
10577 				cmd->advertising |= ADVERTISED_Pause |
10578 						    ADVERTISED_Asym_Pause;
10579 			}
10580 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10581 			cmd->advertising |= ADVERTISED_Asym_Pause;
10582 		}
10583 	}
10584 	if (netif_running(dev) && netif_carrier_ok(dev)) {
10585 		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10586 		cmd->duplex = tp->link_config.active_duplex;
10587 		cmd->lp_advertising = tp->link_config.rmt_adv;
10588 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10589 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10590 				cmd->eth_tp_mdix = ETH_TP_MDI_X;
10591 			else
10592 				cmd->eth_tp_mdix = ETH_TP_MDI;
10593 		}
10594 	} else {
10595 		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10596 		cmd->duplex = DUPLEX_UNKNOWN;
10597 		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10598 	}
10599 	cmd->phy_address = tp->phy_addr;
10600 	cmd->transceiver = XCVR_INTERNAL;
10601 	cmd->autoneg = tp->link_config.autoneg;
10602 	cmd->maxtxpkt = 0;
10603 	cmd->maxrxpkt = 0;
10604 	return 0;
10605 }
10606 
10607 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10608 {
10609 	struct tg3 *tp = netdev_priv(dev);
10610 	u32 speed = ethtool_cmd_speed(cmd);
10611 
10612 	if (tg3_flag(tp, USE_PHYLIB)) {
10613 		struct phy_device *phydev;
10614 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10615 			return -EAGAIN;
10616 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10617 		return phy_ethtool_sset(phydev, cmd);
10618 	}
10619 
10620 	if (cmd->autoneg != AUTONEG_ENABLE &&
10621 	    cmd->autoneg != AUTONEG_DISABLE)
10622 		return -EINVAL;
10623 
10624 	if (cmd->autoneg == AUTONEG_DISABLE &&
10625 	    cmd->duplex != DUPLEX_FULL &&
10626 	    cmd->duplex != DUPLEX_HALF)
10627 		return -EINVAL;
10628 
10629 	if (cmd->autoneg == AUTONEG_ENABLE) {
10630 		u32 mask = ADVERTISED_Autoneg |
10631 			   ADVERTISED_Pause |
10632 			   ADVERTISED_Asym_Pause;
10633 
10634 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10635 			mask |= ADVERTISED_1000baseT_Half |
10636 				ADVERTISED_1000baseT_Full;
10637 
10638 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10639 			mask |= ADVERTISED_100baseT_Half |
10640 				ADVERTISED_100baseT_Full |
10641 				ADVERTISED_10baseT_Half |
10642 				ADVERTISED_10baseT_Full |
10643 				ADVERTISED_TP;
10644 		else
10645 			mask |= ADVERTISED_FIBRE;
10646 
10647 		if (cmd->advertising & ~mask)
10648 			return -EINVAL;
10649 
10650 		mask &= (ADVERTISED_1000baseT_Half |
10651 			 ADVERTISED_1000baseT_Full |
10652 			 ADVERTISED_100baseT_Half |
10653 			 ADVERTISED_100baseT_Full |
10654 			 ADVERTISED_10baseT_Half |
10655 			 ADVERTISED_10baseT_Full);
10656 
10657 		cmd->advertising &= mask;
10658 	} else {
10659 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10660 			if (speed != SPEED_1000)
10661 				return -EINVAL;
10662 
10663 			if (cmd->duplex != DUPLEX_FULL)
10664 				return -EINVAL;
10665 		} else {
10666 			if (speed != SPEED_100 &&
10667 			    speed != SPEED_10)
10668 				return -EINVAL;
10669 		}
10670 	}
10671 
10672 	tg3_full_lock(tp, 0);
10673 
10674 	tp->link_config.autoneg = cmd->autoneg;
10675 	if (cmd->autoneg == AUTONEG_ENABLE) {
10676 		tp->link_config.advertising = (cmd->advertising |
10677 					      ADVERTISED_Autoneg);
10678 		tp->link_config.speed = SPEED_UNKNOWN;
10679 		tp->link_config.duplex = DUPLEX_UNKNOWN;
10680 	} else {
10681 		tp->link_config.advertising = 0;
10682 		tp->link_config.speed = speed;
10683 		tp->link_config.duplex = cmd->duplex;
10684 	}
10685 
10686 	if (netif_running(dev))
10687 		tg3_setup_phy(tp, 1);
10688 
10689 	tg3_full_unlock(tp);
10690 
10691 	return 0;
10692 }
10693 
10694 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10695 {
10696 	struct tg3 *tp = netdev_priv(dev);
10697 
10698 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10699 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10700 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10701 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10702 }
10703 
10704 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10705 {
10706 	struct tg3 *tp = netdev_priv(dev);
10707 
10708 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10709 		wol->supported = WAKE_MAGIC;
10710 	else
10711 		wol->supported = 0;
10712 	wol->wolopts = 0;
10713 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10714 		wol->wolopts = WAKE_MAGIC;
10715 	memset(&wol->sopass, 0, sizeof(wol->sopass));
10716 }
10717 
10718 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10719 {
10720 	struct tg3 *tp = netdev_priv(dev);
10721 	struct device *dp = &tp->pdev->dev;
10722 
10723 	if (wol->wolopts & ~WAKE_MAGIC)
10724 		return -EINVAL;
10725 	if ((wol->wolopts & WAKE_MAGIC) &&
10726 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10727 		return -EINVAL;
10728 
10729 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10730 
10731 	spin_lock_bh(&tp->lock);
10732 	if (device_may_wakeup(dp))
10733 		tg3_flag_set(tp, WOL_ENABLE);
10734 	else
10735 		tg3_flag_clear(tp, WOL_ENABLE);
10736 	spin_unlock_bh(&tp->lock);
10737 
10738 	return 0;
10739 }
10740 
10741 static u32 tg3_get_msglevel(struct net_device *dev)
10742 {
10743 	struct tg3 *tp = netdev_priv(dev);
10744 	return tp->msg_enable;
10745 }
10746 
10747 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10748 {
10749 	struct tg3 *tp = netdev_priv(dev);
10750 	tp->msg_enable = value;
10751 }
10752 
10753 static int tg3_nway_reset(struct net_device *dev)
10754 {
10755 	struct tg3 *tp = netdev_priv(dev);
10756 	int r;
10757 
10758 	if (!netif_running(dev))
10759 		return -EAGAIN;
10760 
10761 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10762 		return -EINVAL;
10763 
10764 	if (tg3_flag(tp, USE_PHYLIB)) {
10765 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10766 			return -EAGAIN;
10767 		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10768 	} else {
10769 		u32 bmcr;
10770 
10771 		spin_lock_bh(&tp->lock);
10772 		r = -EINVAL;
10773 		tg3_readphy(tp, MII_BMCR, &bmcr);
10774 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10775 		    ((bmcr & BMCR_ANENABLE) ||
10776 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10777 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10778 						   BMCR_ANENABLE);
10779 			r = 0;
10780 		}
10781 		spin_unlock_bh(&tp->lock);
10782 	}
10783 
10784 	return r;
10785 }
10786 
10787 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10788 {
10789 	struct tg3 *tp = netdev_priv(dev);
10790 
10791 	ering->rx_max_pending = tp->rx_std_ring_mask;
10792 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
10793 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10794 	else
10795 		ering->rx_jumbo_max_pending = 0;
10796 
10797 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10798 
10799 	ering->rx_pending = tp->rx_pending;
10800 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
10801 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10802 	else
10803 		ering->rx_jumbo_pending = 0;
10804 
10805 	ering->tx_pending = tp->napi[0].tx_pending;
10806 }
10807 
10808 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10809 {
10810 	struct tg3 *tp = netdev_priv(dev);
10811 	int i, irq_sync = 0, err = 0;
10812 
10813 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10814 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10815 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10816 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
10817 	    (tg3_flag(tp, TSO_BUG) &&
10818 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10819 		return -EINVAL;
10820 
10821 	if (netif_running(dev)) {
10822 		tg3_phy_stop(tp);
10823 		tg3_netif_stop(tp);
10824 		irq_sync = 1;
10825 	}
10826 
10827 	tg3_full_lock(tp, irq_sync);
10828 
10829 	tp->rx_pending = ering->rx_pending;
10830 
10831 	if (tg3_flag(tp, MAX_RXPEND_64) &&
10832 	    tp->rx_pending > 63)
10833 		tp->rx_pending = 63;
10834 	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10835 
10836 	for (i = 0; i < tp->irq_max; i++)
10837 		tp->napi[i].tx_pending = ering->tx_pending;
10838 
10839 	if (netif_running(dev)) {
10840 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10841 		err = tg3_restart_hw(tp, 1);
10842 		if (!err)
10843 			tg3_netif_start(tp);
10844 	}
10845 
10846 	tg3_full_unlock(tp);
10847 
10848 	if (irq_sync && !err)
10849 		tg3_phy_start(tp);
10850 
10851 	return err;
10852 }
10853 
10854 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10855 {
10856 	struct tg3 *tp = netdev_priv(dev);
10857 
10858 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10859 
10860 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10861 		epause->rx_pause = 1;
10862 	else
10863 		epause->rx_pause = 0;
10864 
10865 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10866 		epause->tx_pause = 1;
10867 	else
10868 		epause->tx_pause = 0;
10869 }
10870 
10871 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10872 {
10873 	struct tg3 *tp = netdev_priv(dev);
10874 	int err = 0;
10875 
10876 	if (tg3_flag(tp, USE_PHYLIB)) {
10877 		u32 newadv;
10878 		struct phy_device *phydev;
10879 
10880 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10881 
10882 		if (!(phydev->supported & SUPPORTED_Pause) ||
10883 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10884 		     (epause->rx_pause != epause->tx_pause)))
10885 			return -EINVAL;
10886 
10887 		tp->link_config.flowctrl = 0;
10888 		if (epause->rx_pause) {
10889 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
10890 
10891 			if (epause->tx_pause) {
10892 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
10893 				newadv = ADVERTISED_Pause;
10894 			} else
10895 				newadv = ADVERTISED_Pause |
10896 					 ADVERTISED_Asym_Pause;
10897 		} else if (epause->tx_pause) {
10898 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
10899 			newadv = ADVERTISED_Asym_Pause;
10900 		} else
10901 			newadv = 0;
10902 
10903 		if (epause->autoneg)
10904 			tg3_flag_set(tp, PAUSE_AUTONEG);
10905 		else
10906 			tg3_flag_clear(tp, PAUSE_AUTONEG);
10907 
10908 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10909 			u32 oldadv = phydev->advertising &
10910 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10911 			if (oldadv != newadv) {
10912 				phydev->advertising &=
10913 					~(ADVERTISED_Pause |
10914 					  ADVERTISED_Asym_Pause);
10915 				phydev->advertising |= newadv;
10916 				if (phydev->autoneg) {
10917 					/*
10918 					 * Always renegotiate the link to
10919 					 * inform our link partner of our
10920 					 * flow control settings, even if the
10921 					 * flow control is forced.  Let
10922 					 * tg3_adjust_link() do the final
10923 					 * flow control setup.
10924 					 */
10925 					return phy_start_aneg(phydev);
10926 				}
10927 			}
10928 
10929 			if (!epause->autoneg)
10930 				tg3_setup_flow_control(tp, 0, 0);
10931 		} else {
10932 			tp->link_config.advertising &=
10933 					~(ADVERTISED_Pause |
10934 					  ADVERTISED_Asym_Pause);
10935 			tp->link_config.advertising |= newadv;
10936 		}
10937 	} else {
10938 		int irq_sync = 0;
10939 
10940 		if (netif_running(dev)) {
10941 			tg3_netif_stop(tp);
10942 			irq_sync = 1;
10943 		}
10944 
10945 		tg3_full_lock(tp, irq_sync);
10946 
10947 		if (epause->autoneg)
10948 			tg3_flag_set(tp, PAUSE_AUTONEG);
10949 		else
10950 			tg3_flag_clear(tp, PAUSE_AUTONEG);
10951 		if (epause->rx_pause)
10952 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
10953 		else
10954 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10955 		if (epause->tx_pause)
10956 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
10957 		else
10958 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10959 
10960 		if (netif_running(dev)) {
10961 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10962 			err = tg3_restart_hw(tp, 1);
10963 			if (!err)
10964 				tg3_netif_start(tp);
10965 		}
10966 
10967 		tg3_full_unlock(tp);
10968 	}
10969 
10970 	return err;
10971 }
10972 
10973 static int tg3_get_sset_count(struct net_device *dev, int sset)
10974 {
10975 	switch (sset) {
10976 	case ETH_SS_TEST:
10977 		return TG3_NUM_TEST;
10978 	case ETH_SS_STATS:
10979 		return TG3_NUM_STATS;
10980 	default:
10981 		return -EOPNOTSUPP;
10982 	}
10983 }
10984 
10985 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10986 			 u32 *rules __always_unused)
10987 {
10988 	struct tg3 *tp = netdev_priv(dev);
10989 
10990 	if (!tg3_flag(tp, SUPPORT_MSIX))
10991 		return -EOPNOTSUPP;
10992 
10993 	switch (info->cmd) {
10994 	case ETHTOOL_GRXRINGS:
10995 		if (netif_running(tp->dev))
10996 			info->data = tp->irq_cnt;
10997 		else {
10998 			info->data = num_online_cpus();
10999 			if (info->data > TG3_IRQ_MAX_VECS_RSS)
11000 				info->data = TG3_IRQ_MAX_VECS_RSS;
11001 		}
11002 
11003 		/* The first interrupt vector only
11004 		 * handles link interrupts.
11005 		 */
11006 		info->data -= 1;
11007 		return 0;
11008 
11009 	default:
11010 		return -EOPNOTSUPP;
11011 	}
11012 }
11013 
11014 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11015 {
11016 	u32 size = 0;
11017 	struct tg3 *tp = netdev_priv(dev);
11018 
11019 	if (tg3_flag(tp, SUPPORT_MSIX))
11020 		size = TG3_RSS_INDIR_TBL_SIZE;
11021 
11022 	return size;
11023 }
11024 
11025 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11026 {
11027 	struct tg3 *tp = netdev_priv(dev);
11028 	int i;
11029 
11030 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11031 		indir[i] = tp->rss_ind_tbl[i];
11032 
11033 	return 0;
11034 }
11035 
11036 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11037 {
11038 	struct tg3 *tp = netdev_priv(dev);
11039 	size_t i;
11040 
11041 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11042 		tp->rss_ind_tbl[i] = indir[i];
11043 
11044 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11045 		return 0;
11046 
11047 	/* It is legal to write the indirection
11048 	 * table while the device is running.
11049 	 */
11050 	tg3_full_lock(tp, 0);
11051 	tg3_rss_write_indir_tbl(tp);
11052 	tg3_full_unlock(tp);
11053 
11054 	return 0;
11055 }
11056 
11057 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11058 {
11059 	switch (stringset) {
11060 	case ETH_SS_STATS:
11061 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11062 		break;
11063 	case ETH_SS_TEST:
11064 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11065 		break;
11066 	default:
11067 		WARN_ON(1);	/* we need a WARN() */
11068 		break;
11069 	}
11070 }
11071 
11072 static int tg3_set_phys_id(struct net_device *dev,
11073 			    enum ethtool_phys_id_state state)
11074 {
11075 	struct tg3 *tp = netdev_priv(dev);
11076 
11077 	if (!netif_running(tp->dev))
11078 		return -EAGAIN;
11079 
11080 	switch (state) {
11081 	case ETHTOOL_ID_ACTIVE:
11082 		return 1;	/* cycle on/off once per second */
11083 
11084 	case ETHTOOL_ID_ON:
11085 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11086 		     LED_CTRL_1000MBPS_ON |
11087 		     LED_CTRL_100MBPS_ON |
11088 		     LED_CTRL_10MBPS_ON |
11089 		     LED_CTRL_TRAFFIC_OVERRIDE |
11090 		     LED_CTRL_TRAFFIC_BLINK |
11091 		     LED_CTRL_TRAFFIC_LED);
11092 		break;
11093 
11094 	case ETHTOOL_ID_OFF:
11095 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11096 		     LED_CTRL_TRAFFIC_OVERRIDE);
11097 		break;
11098 
11099 	case ETHTOOL_ID_INACTIVE:
11100 		tw32(MAC_LED_CTRL, tp->led_ctrl);
11101 		break;
11102 	}
11103 
11104 	return 0;
11105 }
11106 
11107 static void tg3_get_ethtool_stats(struct net_device *dev,
11108 				   struct ethtool_stats *estats, u64 *tmp_stats)
11109 {
11110 	struct tg3 *tp = netdev_priv(dev);
11111 
11112 	if (tp->hw_stats)
11113 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11114 	else
11115 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11116 }
11117 
11118 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11119 {
11120 	int i;
11121 	__be32 *buf;
11122 	u32 offset = 0, len = 0;
11123 	u32 magic, val;
11124 
11125 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11126 		return NULL;
11127 
11128 	if (magic == TG3_EEPROM_MAGIC) {
11129 		for (offset = TG3_NVM_DIR_START;
11130 		     offset < TG3_NVM_DIR_END;
11131 		     offset += TG3_NVM_DIRENT_SIZE) {
11132 			if (tg3_nvram_read(tp, offset, &val))
11133 				return NULL;
11134 
11135 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11136 			    TG3_NVM_DIRTYPE_EXTVPD)
11137 				break;
11138 		}
11139 
11140 		if (offset != TG3_NVM_DIR_END) {
11141 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11142 			if (tg3_nvram_read(tp, offset + 4, &offset))
11143 				return NULL;
11144 
11145 			offset = tg3_nvram_logical_addr(tp, offset);
11146 		}
11147 	}
11148 
11149 	if (!offset || !len) {
11150 		offset = TG3_NVM_VPD_OFF;
11151 		len = TG3_NVM_VPD_LEN;
11152 	}
11153 
11154 	buf = kmalloc(len, GFP_KERNEL);
11155 	if (buf == NULL)
11156 		return NULL;
11157 
11158 	if (magic == TG3_EEPROM_MAGIC) {
11159 		for (i = 0; i < len; i += 4) {
11160 			/* The data is in little-endian format in NVRAM.
11161 			 * Use the big-endian read routines to preserve
11162 			 * the byte order as it exists in NVRAM.
11163 			 */
11164 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11165 				goto error;
11166 		}
11167 	} else {
11168 		u8 *ptr;
11169 		ssize_t cnt;
11170 		unsigned int pos = 0;
11171 
11172 		ptr = (u8 *)&buf[0];
11173 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11174 			cnt = pci_read_vpd(tp->pdev, pos,
11175 					   len - pos, ptr);
11176 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
11177 				cnt = 0;
11178 			else if (cnt < 0)
11179 				goto error;
11180 		}
11181 		if (pos != len)
11182 			goto error;
11183 	}
11184 
11185 	*vpdlen = len;
11186 
11187 	return buf;
11188 
11189 error:
11190 	kfree(buf);
11191 	return NULL;
11192 }
11193 
11194 #define NVRAM_TEST_SIZE 0x100
11195 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
11196 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
11197 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
11198 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
11199 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
11200 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
11201 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11202 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11203 
11204 static int tg3_test_nvram(struct tg3 *tp)
11205 {
11206 	u32 csum, magic, len;
11207 	__be32 *buf;
11208 	int i, j, k, err = 0, size;
11209 
11210 	if (tg3_flag(tp, NO_NVRAM))
11211 		return 0;
11212 
11213 	if (tg3_nvram_read(tp, 0, &magic) != 0)
11214 		return -EIO;
11215 
11216 	if (magic == TG3_EEPROM_MAGIC)
11217 		size = NVRAM_TEST_SIZE;
11218 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11219 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11220 		    TG3_EEPROM_SB_FORMAT_1) {
11221 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11222 			case TG3_EEPROM_SB_REVISION_0:
11223 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11224 				break;
11225 			case TG3_EEPROM_SB_REVISION_2:
11226 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11227 				break;
11228 			case TG3_EEPROM_SB_REVISION_3:
11229 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11230 				break;
11231 			case TG3_EEPROM_SB_REVISION_4:
11232 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11233 				break;
11234 			case TG3_EEPROM_SB_REVISION_5:
11235 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11236 				break;
11237 			case TG3_EEPROM_SB_REVISION_6:
11238 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11239 				break;
11240 			default:
11241 				return -EIO;
11242 			}
11243 		} else
11244 			return 0;
11245 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11246 		size = NVRAM_SELFBOOT_HW_SIZE;
11247 	else
11248 		return -EIO;
11249 
11250 	buf = kmalloc(size, GFP_KERNEL);
11251 	if (buf == NULL)
11252 		return -ENOMEM;
11253 
11254 	err = -EIO;
11255 	for (i = 0, j = 0; i < size; i += 4, j++) {
11256 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
11257 		if (err)
11258 			break;
11259 	}
11260 	if (i < size)
11261 		goto out;
11262 
11263 	/* Selfboot format */
11264 	magic = be32_to_cpu(buf[0]);
11265 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11266 	    TG3_EEPROM_MAGIC_FW) {
11267 		u8 *buf8 = (u8 *) buf, csum8 = 0;
11268 
11269 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11270 		    TG3_EEPROM_SB_REVISION_2) {
11271 			/* For rev 2, the csum doesn't include the MBA. */
11272 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11273 				csum8 += buf8[i];
11274 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11275 				csum8 += buf8[i];
11276 		} else {
11277 			for (i = 0; i < size; i++)
11278 				csum8 += buf8[i];
11279 		}
11280 
11281 		if (csum8 == 0) {
11282 			err = 0;
11283 			goto out;
11284 		}
11285 
11286 		err = -EIO;
11287 		goto out;
11288 	}
11289 
11290 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11291 	    TG3_EEPROM_MAGIC_HW) {
11292 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11293 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11294 		u8 *buf8 = (u8 *) buf;
11295 
11296 		/* Separate the parity bits and the data bytes.  */
11297 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11298 			if ((i == 0) || (i == 8)) {
11299 				int l;
11300 				u8 msk;
11301 
11302 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11303 					parity[k++] = buf8[i] & msk;
11304 				i++;
11305 			} else if (i == 16) {
11306 				int l;
11307 				u8 msk;
11308 
11309 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11310 					parity[k++] = buf8[i] & msk;
11311 				i++;
11312 
11313 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11314 					parity[k++] = buf8[i] & msk;
11315 				i++;
11316 			}
11317 			data[j++] = buf8[i];
11318 		}
11319 
11320 		err = -EIO;
11321 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11322 			u8 hw8 = hweight8(data[i]);
11323 
11324 			if ((hw8 & 0x1) && parity[i])
11325 				goto out;
11326 			else if (!(hw8 & 0x1) && !parity[i])
11327 				goto out;
11328 		}
11329 		err = 0;
11330 		goto out;
11331 	}
11332 
11333 	err = -EIO;
11334 
11335 	/* Bootstrap checksum at offset 0x10 */
11336 	csum = calc_crc((unsigned char *) buf, 0x10);
11337 	if (csum != le32_to_cpu(buf[0x10/4]))
11338 		goto out;
11339 
11340 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11341 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11342 	if (csum != le32_to_cpu(buf[0xfc/4]))
11343 		goto out;
11344 
11345 	kfree(buf);
11346 
11347 	buf = tg3_vpd_readblock(tp, &len);
11348 	if (!buf)
11349 		return -ENOMEM;
11350 
11351 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11352 	if (i > 0) {
11353 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11354 		if (j < 0)
11355 			goto out;
11356 
11357 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11358 			goto out;
11359 
11360 		i += PCI_VPD_LRDT_TAG_SIZE;
11361 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11362 					      PCI_VPD_RO_KEYWORD_CHKSUM);
11363 		if (j > 0) {
11364 			u8 csum8 = 0;
11365 
11366 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
11367 
11368 			for (i = 0; i <= j; i++)
11369 				csum8 += ((u8 *)buf)[i];
11370 
11371 			if (csum8)
11372 				goto out;
11373 		}
11374 	}
11375 
11376 	err = 0;
11377 
11378 out:
11379 	kfree(buf);
11380 	return err;
11381 }
11382 
11383 #define TG3_SERDES_TIMEOUT_SEC	2
11384 #define TG3_COPPER_TIMEOUT_SEC	6
11385 
11386 static int tg3_test_link(struct tg3 *tp)
11387 {
11388 	int i, max;
11389 
11390 	if (!netif_running(tp->dev))
11391 		return -ENODEV;
11392 
11393 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11394 		max = TG3_SERDES_TIMEOUT_SEC;
11395 	else
11396 		max = TG3_COPPER_TIMEOUT_SEC;
11397 
11398 	for (i = 0; i < max; i++) {
11399 		if (netif_carrier_ok(tp->dev))
11400 			return 0;
11401 
11402 		if (msleep_interruptible(1000))
11403 			break;
11404 	}
11405 
11406 	return -EIO;
11407 }
11408 
11409 /* Only test the commonly used registers */
11410 static int tg3_test_registers(struct tg3 *tp)
11411 {
11412 	int i, is_5705, is_5750;
11413 	u32 offset, read_mask, write_mask, val, save_val, read_val;
11414 	static struct {
11415 		u16 offset;
11416 		u16 flags;
11417 #define TG3_FL_5705	0x1
11418 #define TG3_FL_NOT_5705	0x2
11419 #define TG3_FL_NOT_5788	0x4
11420 #define TG3_FL_NOT_5750	0x8
11421 		u32 read_mask;
11422 		u32 write_mask;
11423 	} reg_tbl[] = {
11424 		/* MAC Control Registers */
11425 		{ MAC_MODE, TG3_FL_NOT_5705,
11426 			0x00000000, 0x00ef6f8c },
11427 		{ MAC_MODE, TG3_FL_5705,
11428 			0x00000000, 0x01ef6b8c },
11429 		{ MAC_STATUS, TG3_FL_NOT_5705,
11430 			0x03800107, 0x00000000 },
11431 		{ MAC_STATUS, TG3_FL_5705,
11432 			0x03800100, 0x00000000 },
11433 		{ MAC_ADDR_0_HIGH, 0x0000,
11434 			0x00000000, 0x0000ffff },
11435 		{ MAC_ADDR_0_LOW, 0x0000,
11436 			0x00000000, 0xffffffff },
11437 		{ MAC_RX_MTU_SIZE, 0x0000,
11438 			0x00000000, 0x0000ffff },
11439 		{ MAC_TX_MODE, 0x0000,
11440 			0x00000000, 0x00000070 },
11441 		{ MAC_TX_LENGTHS, 0x0000,
11442 			0x00000000, 0x00003fff },
11443 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
11444 			0x00000000, 0x000007fc },
11445 		{ MAC_RX_MODE, TG3_FL_5705,
11446 			0x00000000, 0x000007dc },
11447 		{ MAC_HASH_REG_0, 0x0000,
11448 			0x00000000, 0xffffffff },
11449 		{ MAC_HASH_REG_1, 0x0000,
11450 			0x00000000, 0xffffffff },
11451 		{ MAC_HASH_REG_2, 0x0000,
11452 			0x00000000, 0xffffffff },
11453 		{ MAC_HASH_REG_3, 0x0000,
11454 			0x00000000, 0xffffffff },
11455 
11456 		/* Receive Data and Receive BD Initiator Control Registers. */
11457 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11458 			0x00000000, 0xffffffff },
11459 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11460 			0x00000000, 0xffffffff },
11461 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11462 			0x00000000, 0x00000003 },
11463 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11464 			0x00000000, 0xffffffff },
11465 		{ RCVDBDI_STD_BD+0, 0x0000,
11466 			0x00000000, 0xffffffff },
11467 		{ RCVDBDI_STD_BD+4, 0x0000,
11468 			0x00000000, 0xffffffff },
11469 		{ RCVDBDI_STD_BD+8, 0x0000,
11470 			0x00000000, 0xffff0002 },
11471 		{ RCVDBDI_STD_BD+0xc, 0x0000,
11472 			0x00000000, 0xffffffff },
11473 
11474 		/* Receive BD Initiator Control Registers. */
11475 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11476 			0x00000000, 0xffffffff },
11477 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
11478 			0x00000000, 0x000003ff },
11479 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11480 			0x00000000, 0xffffffff },
11481 
11482 		/* Host Coalescing Control Registers. */
11483 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
11484 			0x00000000, 0x00000004 },
11485 		{ HOSTCC_MODE, TG3_FL_5705,
11486 			0x00000000, 0x000000f6 },
11487 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11488 			0x00000000, 0xffffffff },
11489 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11490 			0x00000000, 0x000003ff },
11491 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11492 			0x00000000, 0xffffffff },
11493 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11494 			0x00000000, 0x000003ff },
11495 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11496 			0x00000000, 0xffffffff },
11497 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11498 			0x00000000, 0x000000ff },
11499 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11500 			0x00000000, 0xffffffff },
11501 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11502 			0x00000000, 0x000000ff },
11503 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11504 			0x00000000, 0xffffffff },
11505 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11506 			0x00000000, 0xffffffff },
11507 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11508 			0x00000000, 0xffffffff },
11509 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11510 			0x00000000, 0x000000ff },
11511 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11512 			0x00000000, 0xffffffff },
11513 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11514 			0x00000000, 0x000000ff },
11515 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11516 			0x00000000, 0xffffffff },
11517 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11518 			0x00000000, 0xffffffff },
11519 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11520 			0x00000000, 0xffffffff },
11521 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11522 			0x00000000, 0xffffffff },
11523 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11524 			0x00000000, 0xffffffff },
11525 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11526 			0xffffffff, 0x00000000 },
11527 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11528 			0xffffffff, 0x00000000 },
11529 
11530 		/* Buffer Manager Control Registers. */
11531 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11532 			0x00000000, 0x007fff80 },
11533 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11534 			0x00000000, 0x007fffff },
11535 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11536 			0x00000000, 0x0000003f },
11537 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11538 			0x00000000, 0x000001ff },
11539 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
11540 			0x00000000, 0x000001ff },
11541 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11542 			0xffffffff, 0x00000000 },
11543 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11544 			0xffffffff, 0x00000000 },
11545 
11546 		/* Mailbox Registers */
11547 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11548 			0x00000000, 0x000001ff },
11549 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11550 			0x00000000, 0x000001ff },
11551 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11552 			0x00000000, 0x000007ff },
11553 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11554 			0x00000000, 0x000001ff },
11555 
11556 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
11557 	};
11558 
11559 	is_5705 = is_5750 = 0;
11560 	if (tg3_flag(tp, 5705_PLUS)) {
11561 		is_5705 = 1;
11562 		if (tg3_flag(tp, 5750_PLUS))
11563 			is_5750 = 1;
11564 	}
11565 
11566 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11567 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11568 			continue;
11569 
11570 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11571 			continue;
11572 
11573 		if (tg3_flag(tp, IS_5788) &&
11574 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
11575 			continue;
11576 
11577 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11578 			continue;
11579 
11580 		offset = (u32) reg_tbl[i].offset;
11581 		read_mask = reg_tbl[i].read_mask;
11582 		write_mask = reg_tbl[i].write_mask;
11583 
11584 		/* Save the original register content */
11585 		save_val = tr32(offset);
11586 
11587 		/* Determine the read-only value. */
11588 		read_val = save_val & read_mask;
11589 
11590 		/* Write zero to the register, then make sure the read-only bits
11591 		 * are not changed and the read/write bits are all zeros.
11592 		 */
11593 		tw32(offset, 0);
11594 
11595 		val = tr32(offset);
11596 
11597 		/* Test the read-only and read/write bits. */
11598 		if (((val & read_mask) != read_val) || (val & write_mask))
11599 			goto out;
11600 
11601 		/* Write ones to all the bits defined by RdMask and WrMask, then
11602 		 * make sure the read-only bits are not changed and the
11603 		 * read/write bits are all ones.
11604 		 */
11605 		tw32(offset, read_mask | write_mask);
11606 
11607 		val = tr32(offset);
11608 
11609 		/* Test the read-only bits. */
11610 		if ((val & read_mask) != read_val)
11611 			goto out;
11612 
11613 		/* Test the read/write bits. */
11614 		if ((val & write_mask) != write_mask)
11615 			goto out;
11616 
11617 		tw32(offset, save_val);
11618 	}
11619 
11620 	return 0;
11621 
11622 out:
11623 	if (netif_msg_hw(tp))
11624 		netdev_err(tp->dev,
11625 			   "Register test failed at offset %x\n", offset);
11626 	tw32(offset, save_val);
11627 	return -EIO;
11628 }
11629 
11630 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11631 {
11632 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11633 	int i;
11634 	u32 j;
11635 
11636 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11637 		for (j = 0; j < len; j += 4) {
11638 			u32 val;
11639 
11640 			tg3_write_mem(tp, offset + j, test_pattern[i]);
11641 			tg3_read_mem(tp, offset + j, &val);
11642 			if (val != test_pattern[i])
11643 				return -EIO;
11644 		}
11645 	}
11646 	return 0;
11647 }
11648 
11649 static int tg3_test_memory(struct tg3 *tp)
11650 {
11651 	static struct mem_entry {
11652 		u32 offset;
11653 		u32 len;
11654 	} mem_tbl_570x[] = {
11655 		{ 0x00000000, 0x00b50},
11656 		{ 0x00002000, 0x1c000},
11657 		{ 0xffffffff, 0x00000}
11658 	}, mem_tbl_5705[] = {
11659 		{ 0x00000100, 0x0000c},
11660 		{ 0x00000200, 0x00008},
11661 		{ 0x00004000, 0x00800},
11662 		{ 0x00006000, 0x01000},
11663 		{ 0x00008000, 0x02000},
11664 		{ 0x00010000, 0x0e000},
11665 		{ 0xffffffff, 0x00000}
11666 	}, mem_tbl_5755[] = {
11667 		{ 0x00000200, 0x00008},
11668 		{ 0x00004000, 0x00800},
11669 		{ 0x00006000, 0x00800},
11670 		{ 0x00008000, 0x02000},
11671 		{ 0x00010000, 0x0c000},
11672 		{ 0xffffffff, 0x00000}
11673 	}, mem_tbl_5906[] = {
11674 		{ 0x00000200, 0x00008},
11675 		{ 0x00004000, 0x00400},
11676 		{ 0x00006000, 0x00400},
11677 		{ 0x00008000, 0x01000},
11678 		{ 0x00010000, 0x01000},
11679 		{ 0xffffffff, 0x00000}
11680 	}, mem_tbl_5717[] = {
11681 		{ 0x00000200, 0x00008},
11682 		{ 0x00010000, 0x0a000},
11683 		{ 0x00020000, 0x13c00},
11684 		{ 0xffffffff, 0x00000}
11685 	}, mem_tbl_57765[] = {
11686 		{ 0x00000200, 0x00008},
11687 		{ 0x00004000, 0x00800},
11688 		{ 0x00006000, 0x09800},
11689 		{ 0x00010000, 0x0a000},
11690 		{ 0xffffffff, 0x00000}
11691 	};
11692 	struct mem_entry *mem_tbl;
11693 	int err = 0;
11694 	int i;
11695 
11696 	if (tg3_flag(tp, 5717_PLUS))
11697 		mem_tbl = mem_tbl_5717;
11698 	else if (tg3_flag(tp, 57765_CLASS))
11699 		mem_tbl = mem_tbl_57765;
11700 	else if (tg3_flag(tp, 5755_PLUS))
11701 		mem_tbl = mem_tbl_5755;
11702 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11703 		mem_tbl = mem_tbl_5906;
11704 	else if (tg3_flag(tp, 5705_PLUS))
11705 		mem_tbl = mem_tbl_5705;
11706 	else
11707 		mem_tbl = mem_tbl_570x;
11708 
11709 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11710 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11711 		if (err)
11712 			break;
11713 	}
11714 
11715 	return err;
11716 }
11717 
11718 #define TG3_TSO_MSS		500
11719 
11720 #define TG3_TSO_IP_HDR_LEN	20
11721 #define TG3_TSO_TCP_HDR_LEN	20
11722 #define TG3_TSO_TCP_OPT_LEN	12
11723 
11724 static const u8 tg3_tso_header[] = {
11725 0x08, 0x00,
11726 0x45, 0x00, 0x00, 0x00,
11727 0x00, 0x00, 0x40, 0x00,
11728 0x40, 0x06, 0x00, 0x00,
11729 0x0a, 0x00, 0x00, 0x01,
11730 0x0a, 0x00, 0x00, 0x02,
11731 0x0d, 0x00, 0xe0, 0x00,
11732 0x00, 0x00, 0x01, 0x00,
11733 0x00, 0x00, 0x02, 0x00,
11734 0x80, 0x10, 0x10, 0x00,
11735 0x14, 0x09, 0x00, 0x00,
11736 0x01, 0x01, 0x08, 0x0a,
11737 0x11, 0x11, 0x11, 0x11,
11738 0x11, 0x11, 0x11, 0x11,
11739 };
11740 
11741 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11742 {
11743 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11744 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11745 	u32 budget;
11746 	struct sk_buff *skb;
11747 	u8 *tx_data, *rx_data;
11748 	dma_addr_t map;
11749 	int num_pkts, tx_len, rx_len, i, err;
11750 	struct tg3_rx_buffer_desc *desc;
11751 	struct tg3_napi *tnapi, *rnapi;
11752 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11753 
11754 	tnapi = &tp->napi[0];
11755 	rnapi = &tp->napi[0];
11756 	if (tp->irq_cnt > 1) {
11757 		if (tg3_flag(tp, ENABLE_RSS))
11758 			rnapi = &tp->napi[1];
11759 		if (tg3_flag(tp, ENABLE_TSS))
11760 			tnapi = &tp->napi[1];
11761 	}
11762 	coal_now = tnapi->coal_now | rnapi->coal_now;
11763 
11764 	err = -EIO;
11765 
11766 	tx_len = pktsz;
11767 	skb = netdev_alloc_skb(tp->dev, tx_len);
11768 	if (!skb)
11769 		return -ENOMEM;
11770 
11771 	tx_data = skb_put(skb, tx_len);
11772 	memcpy(tx_data, tp->dev->dev_addr, 6);
11773 	memset(tx_data + 6, 0x0, 8);
11774 
11775 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11776 
11777 	if (tso_loopback) {
11778 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11779 
11780 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11781 			      TG3_TSO_TCP_OPT_LEN;
11782 
11783 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11784 		       sizeof(tg3_tso_header));
11785 		mss = TG3_TSO_MSS;
11786 
11787 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11788 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11789 
11790 		/* Set the total length field in the IP header */
11791 		iph->tot_len = htons((u16)(mss + hdr_len));
11792 
11793 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
11794 			      TXD_FLAG_CPU_POST_DMA);
11795 
11796 		if (tg3_flag(tp, HW_TSO_1) ||
11797 		    tg3_flag(tp, HW_TSO_2) ||
11798 		    tg3_flag(tp, HW_TSO_3)) {
11799 			struct tcphdr *th;
11800 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11801 			th = (struct tcphdr *)&tx_data[val];
11802 			th->check = 0;
11803 		} else
11804 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
11805 
11806 		if (tg3_flag(tp, HW_TSO_3)) {
11807 			mss |= (hdr_len & 0xc) << 12;
11808 			if (hdr_len & 0x10)
11809 				base_flags |= 0x00000010;
11810 			base_flags |= (hdr_len & 0x3e0) << 5;
11811 		} else if (tg3_flag(tp, HW_TSO_2))
11812 			mss |= hdr_len << 9;
11813 		else if (tg3_flag(tp, HW_TSO_1) ||
11814 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11815 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11816 		} else {
11817 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11818 		}
11819 
11820 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11821 	} else {
11822 		num_pkts = 1;
11823 		data_off = ETH_HLEN;
11824 
11825 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
11826 		    tx_len > VLAN_ETH_FRAME_LEN)
11827 			base_flags |= TXD_FLAG_JMB_PKT;
11828 	}
11829 
11830 	for (i = data_off; i < tx_len; i++)
11831 		tx_data[i] = (u8) (i & 0xff);
11832 
11833 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11834 	if (pci_dma_mapping_error(tp->pdev, map)) {
11835 		dev_kfree_skb(skb);
11836 		return -EIO;
11837 	}
11838 
11839 	val = tnapi->tx_prod;
11840 	tnapi->tx_buffers[val].skb = skb;
11841 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11842 
11843 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11844 	       rnapi->coal_now);
11845 
11846 	udelay(10);
11847 
11848 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11849 
11850 	budget = tg3_tx_avail(tnapi);
11851 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11852 			    base_flags | TXD_FLAG_END, mss, 0)) {
11853 		tnapi->tx_buffers[val].skb = NULL;
11854 		dev_kfree_skb(skb);
11855 		return -EIO;
11856 	}
11857 
11858 	tnapi->tx_prod++;
11859 
11860 	/* Sync BD data before updating mailbox */
11861 	wmb();
11862 
11863 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11864 	tr32_mailbox(tnapi->prodmbox);
11865 
11866 	udelay(10);
11867 
11868 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11869 	for (i = 0; i < 35; i++) {
11870 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11871 		       coal_now);
11872 
11873 		udelay(10);
11874 
11875 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11876 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
11877 		if ((tx_idx == tnapi->tx_prod) &&
11878 		    (rx_idx == (rx_start_idx + num_pkts)))
11879 			break;
11880 	}
11881 
11882 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11883 	dev_kfree_skb(skb);
11884 
11885 	if (tx_idx != tnapi->tx_prod)
11886 		goto out;
11887 
11888 	if (rx_idx != rx_start_idx + num_pkts)
11889 		goto out;
11890 
11891 	val = data_off;
11892 	while (rx_idx != rx_start_idx) {
11893 		desc = &rnapi->rx_rcb[rx_start_idx++];
11894 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11895 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11896 
11897 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11898 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11899 			goto out;
11900 
11901 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11902 			 - ETH_FCS_LEN;
11903 
11904 		if (!tso_loopback) {
11905 			if (rx_len != tx_len)
11906 				goto out;
11907 
11908 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11909 				if (opaque_key != RXD_OPAQUE_RING_STD)
11910 					goto out;
11911 			} else {
11912 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11913 					goto out;
11914 			}
11915 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11916 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11917 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
11918 			goto out;
11919 		}
11920 
11921 		if (opaque_key == RXD_OPAQUE_RING_STD) {
11922 			rx_data = tpr->rx_std_buffers[desc_idx].data;
11923 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11924 					     mapping);
11925 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11926 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11927 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11928 					     mapping);
11929 		} else
11930 			goto out;
11931 
11932 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11933 					    PCI_DMA_FROMDEVICE);
11934 
11935 		rx_data += TG3_RX_OFFSET(tp);
11936 		for (i = data_off; i < rx_len; i++, val++) {
11937 			if (*(rx_data + i) != (u8) (val & 0xff))
11938 				goto out;
11939 		}
11940 	}
11941 
11942 	err = 0;
11943 
11944 	/* tg3_free_rings will unmap and free the rx_data */
11945 out:
11946 	return err;
11947 }
11948 
11949 #define TG3_STD_LOOPBACK_FAILED		1
11950 #define TG3_JMB_LOOPBACK_FAILED		2
11951 #define TG3_TSO_LOOPBACK_FAILED		4
11952 #define TG3_LOOPBACK_FAILED \
11953 	(TG3_STD_LOOPBACK_FAILED | \
11954 	 TG3_JMB_LOOPBACK_FAILED | \
11955 	 TG3_TSO_LOOPBACK_FAILED)
11956 
11957 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11958 {
11959 	int err = -EIO;
11960 	u32 eee_cap;
11961 	u32 jmb_pkt_sz = 9000;
11962 
11963 	if (tp->dma_limit)
11964 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
11965 
11966 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11967 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11968 
11969 	if (!netif_running(tp->dev)) {
11970 		data[0] = TG3_LOOPBACK_FAILED;
11971 		data[1] = TG3_LOOPBACK_FAILED;
11972 		if (do_extlpbk)
11973 			data[2] = TG3_LOOPBACK_FAILED;
11974 		goto done;
11975 	}
11976 
11977 	err = tg3_reset_hw(tp, 1);
11978 	if (err) {
11979 		data[0] = TG3_LOOPBACK_FAILED;
11980 		data[1] = TG3_LOOPBACK_FAILED;
11981 		if (do_extlpbk)
11982 			data[2] = TG3_LOOPBACK_FAILED;
11983 		goto done;
11984 	}
11985 
11986 	if (tg3_flag(tp, ENABLE_RSS)) {
11987 		int i;
11988 
11989 		/* Reroute all rx packets to the 1st queue */
11990 		for (i = MAC_RSS_INDIR_TBL_0;
11991 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11992 			tw32(i, 0x0);
11993 	}
11994 
11995 	/* HW errata - mac loopback fails in some cases on 5780.
11996 	 * Normal traffic and PHY loopback are not affected by
11997 	 * errata.  Also, the MAC loopback test is deprecated for
11998 	 * all newer ASIC revisions.
11999 	 */
12000 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12001 	    !tg3_flag(tp, CPMU_PRESENT)) {
12002 		tg3_mac_loopback(tp, true);
12003 
12004 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12005 			data[0] |= TG3_STD_LOOPBACK_FAILED;
12006 
12007 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12008 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12009 			data[0] |= TG3_JMB_LOOPBACK_FAILED;
12010 
12011 		tg3_mac_loopback(tp, false);
12012 	}
12013 
12014 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12015 	    !tg3_flag(tp, USE_PHYLIB)) {
12016 		int i;
12017 
12018 		tg3_phy_lpbk_set(tp, 0, false);
12019 
12020 		/* Wait for link */
12021 		for (i = 0; i < 100; i++) {
12022 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12023 				break;
12024 			mdelay(1);
12025 		}
12026 
12027 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12028 			data[1] |= TG3_STD_LOOPBACK_FAILED;
12029 		if (tg3_flag(tp, TSO_CAPABLE) &&
12030 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12031 			data[1] |= TG3_TSO_LOOPBACK_FAILED;
12032 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12033 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12034 			data[1] |= TG3_JMB_LOOPBACK_FAILED;
12035 
12036 		if (do_extlpbk) {
12037 			tg3_phy_lpbk_set(tp, 0, true);
12038 
12039 			/* All link indications report up, but the hardware
12040 			 * isn't really ready for about 20 msec.  Double it
12041 			 * to be sure.
12042 			 */
12043 			mdelay(40);
12044 
12045 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12046 				data[2] |= TG3_STD_LOOPBACK_FAILED;
12047 			if (tg3_flag(tp, TSO_CAPABLE) &&
12048 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12049 				data[2] |= TG3_TSO_LOOPBACK_FAILED;
12050 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12051 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12052 				data[2] |= TG3_JMB_LOOPBACK_FAILED;
12053 		}
12054 
12055 		/* Re-enable gphy autopowerdown. */
12056 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12057 			tg3_phy_toggle_apd(tp, true);
12058 	}
12059 
12060 	err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12061 
12062 done:
12063 	tp->phy_flags |= eee_cap;
12064 
12065 	return err;
12066 }
12067 
12068 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12069 			  u64 *data)
12070 {
12071 	struct tg3 *tp = netdev_priv(dev);
12072 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12073 
12074 	if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12075 	    tg3_power_up(tp)) {
12076 		etest->flags |= ETH_TEST_FL_FAILED;
12077 		memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12078 		return;
12079 	}
12080 
12081 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12082 
12083 	if (tg3_test_nvram(tp) != 0) {
12084 		etest->flags |= ETH_TEST_FL_FAILED;
12085 		data[0] = 1;
12086 	}
12087 	if (!doextlpbk && tg3_test_link(tp)) {
12088 		etest->flags |= ETH_TEST_FL_FAILED;
12089 		data[1] = 1;
12090 	}
12091 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
12092 		int err, err2 = 0, irq_sync = 0;
12093 
12094 		if (netif_running(dev)) {
12095 			tg3_phy_stop(tp);
12096 			tg3_netif_stop(tp);
12097 			irq_sync = 1;
12098 		}
12099 
12100 		tg3_full_lock(tp, irq_sync);
12101 
12102 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12103 		err = tg3_nvram_lock(tp);
12104 		tg3_halt_cpu(tp, RX_CPU_BASE);
12105 		if (!tg3_flag(tp, 5705_PLUS))
12106 			tg3_halt_cpu(tp, TX_CPU_BASE);
12107 		if (!err)
12108 			tg3_nvram_unlock(tp);
12109 
12110 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12111 			tg3_phy_reset(tp);
12112 
12113 		if (tg3_test_registers(tp) != 0) {
12114 			etest->flags |= ETH_TEST_FL_FAILED;
12115 			data[2] = 1;
12116 		}
12117 
12118 		if (tg3_test_memory(tp) != 0) {
12119 			etest->flags |= ETH_TEST_FL_FAILED;
12120 			data[3] = 1;
12121 		}
12122 
12123 		if (doextlpbk)
12124 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12125 
12126 		if (tg3_test_loopback(tp, &data[4], doextlpbk))
12127 			etest->flags |= ETH_TEST_FL_FAILED;
12128 
12129 		tg3_full_unlock(tp);
12130 
12131 		if (tg3_test_interrupt(tp) != 0) {
12132 			etest->flags |= ETH_TEST_FL_FAILED;
12133 			data[7] = 1;
12134 		}
12135 
12136 		tg3_full_lock(tp, 0);
12137 
12138 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12139 		if (netif_running(dev)) {
12140 			tg3_flag_set(tp, INIT_COMPLETE);
12141 			err2 = tg3_restart_hw(tp, 1);
12142 			if (!err2)
12143 				tg3_netif_start(tp);
12144 		}
12145 
12146 		tg3_full_unlock(tp);
12147 
12148 		if (irq_sync && !err2)
12149 			tg3_phy_start(tp);
12150 	}
12151 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12152 		tg3_power_down(tp);
12153 
12154 }
12155 
12156 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12157 {
12158 	struct mii_ioctl_data *data = if_mii(ifr);
12159 	struct tg3 *tp = netdev_priv(dev);
12160 	int err;
12161 
12162 	if (tg3_flag(tp, USE_PHYLIB)) {
12163 		struct phy_device *phydev;
12164 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12165 			return -EAGAIN;
12166 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12167 		return phy_mii_ioctl(phydev, ifr, cmd);
12168 	}
12169 
12170 	switch (cmd) {
12171 	case SIOCGMIIPHY:
12172 		data->phy_id = tp->phy_addr;
12173 
12174 		/* fallthru */
12175 	case SIOCGMIIREG: {
12176 		u32 mii_regval;
12177 
12178 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12179 			break;			/* We have no PHY */
12180 
12181 		if (!netif_running(dev))
12182 			return -EAGAIN;
12183 
12184 		spin_lock_bh(&tp->lock);
12185 		err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12186 		spin_unlock_bh(&tp->lock);
12187 
12188 		data->val_out = mii_regval;
12189 
12190 		return err;
12191 	}
12192 
12193 	case SIOCSMIIREG:
12194 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12195 			break;			/* We have no PHY */
12196 
12197 		if (!netif_running(dev))
12198 			return -EAGAIN;
12199 
12200 		spin_lock_bh(&tp->lock);
12201 		err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12202 		spin_unlock_bh(&tp->lock);
12203 
12204 		return err;
12205 
12206 	default:
12207 		/* do nothing */
12208 		break;
12209 	}
12210 	return -EOPNOTSUPP;
12211 }
12212 
12213 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12214 {
12215 	struct tg3 *tp = netdev_priv(dev);
12216 
12217 	memcpy(ec, &tp->coal, sizeof(*ec));
12218 	return 0;
12219 }
12220 
12221 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12222 {
12223 	struct tg3 *tp = netdev_priv(dev);
12224 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12225 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12226 
12227 	if (!tg3_flag(tp, 5705_PLUS)) {
12228 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12229 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12230 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12231 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12232 	}
12233 
12234 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12235 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12236 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12237 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12238 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12239 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12240 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12241 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12242 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12243 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12244 		return -EINVAL;
12245 
12246 	/* No rx interrupts will be generated if both are zero */
12247 	if ((ec->rx_coalesce_usecs == 0) &&
12248 	    (ec->rx_max_coalesced_frames == 0))
12249 		return -EINVAL;
12250 
12251 	/* No tx interrupts will be generated if both are zero */
12252 	if ((ec->tx_coalesce_usecs == 0) &&
12253 	    (ec->tx_max_coalesced_frames == 0))
12254 		return -EINVAL;
12255 
12256 	/* Only copy relevant parameters, ignore all others. */
12257 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12258 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12259 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12260 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12261 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12262 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12263 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12264 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12265 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12266 
12267 	if (netif_running(dev)) {
12268 		tg3_full_lock(tp, 0);
12269 		__tg3_set_coalesce(tp, &tp->coal);
12270 		tg3_full_unlock(tp);
12271 	}
12272 	return 0;
12273 }
12274 
12275 static const struct ethtool_ops tg3_ethtool_ops = {
12276 	.get_settings		= tg3_get_settings,
12277 	.set_settings		= tg3_set_settings,
12278 	.get_drvinfo		= tg3_get_drvinfo,
12279 	.get_regs_len		= tg3_get_regs_len,
12280 	.get_regs		= tg3_get_regs,
12281 	.get_wol		= tg3_get_wol,
12282 	.set_wol		= tg3_set_wol,
12283 	.get_msglevel		= tg3_get_msglevel,
12284 	.set_msglevel		= tg3_set_msglevel,
12285 	.nway_reset		= tg3_nway_reset,
12286 	.get_link		= ethtool_op_get_link,
12287 	.get_eeprom_len		= tg3_get_eeprom_len,
12288 	.get_eeprom		= tg3_get_eeprom,
12289 	.set_eeprom		= tg3_set_eeprom,
12290 	.get_ringparam		= tg3_get_ringparam,
12291 	.set_ringparam		= tg3_set_ringparam,
12292 	.get_pauseparam		= tg3_get_pauseparam,
12293 	.set_pauseparam		= tg3_set_pauseparam,
12294 	.self_test		= tg3_self_test,
12295 	.get_strings		= tg3_get_strings,
12296 	.set_phys_id		= tg3_set_phys_id,
12297 	.get_ethtool_stats	= tg3_get_ethtool_stats,
12298 	.get_coalesce		= tg3_get_coalesce,
12299 	.set_coalesce		= tg3_set_coalesce,
12300 	.get_sset_count		= tg3_get_sset_count,
12301 	.get_rxnfc		= tg3_get_rxnfc,
12302 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12303 	.get_rxfh_indir		= tg3_get_rxfh_indir,
12304 	.set_rxfh_indir		= tg3_set_rxfh_indir,
12305 	.get_ts_info		= ethtool_op_get_ts_info,
12306 };
12307 
12308 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12309 						struct rtnl_link_stats64 *stats)
12310 {
12311 	struct tg3 *tp = netdev_priv(dev);
12312 
12313 	if (!tp->hw_stats)
12314 		return &tp->net_stats_prev;
12315 
12316 	spin_lock_bh(&tp->lock);
12317 	tg3_get_nstats(tp, stats);
12318 	spin_unlock_bh(&tp->lock);
12319 
12320 	return stats;
12321 }
12322 
12323 static void tg3_set_rx_mode(struct net_device *dev)
12324 {
12325 	struct tg3 *tp = netdev_priv(dev);
12326 
12327 	if (!netif_running(dev))
12328 		return;
12329 
12330 	tg3_full_lock(tp, 0);
12331 	__tg3_set_rx_mode(dev);
12332 	tg3_full_unlock(tp);
12333 }
12334 
12335 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12336 			       int new_mtu)
12337 {
12338 	dev->mtu = new_mtu;
12339 
12340 	if (new_mtu > ETH_DATA_LEN) {
12341 		if (tg3_flag(tp, 5780_CLASS)) {
12342 			netdev_update_features(dev);
12343 			tg3_flag_clear(tp, TSO_CAPABLE);
12344 		} else {
12345 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
12346 		}
12347 	} else {
12348 		if (tg3_flag(tp, 5780_CLASS)) {
12349 			tg3_flag_set(tp, TSO_CAPABLE);
12350 			netdev_update_features(dev);
12351 		}
12352 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12353 	}
12354 }
12355 
12356 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12357 {
12358 	struct tg3 *tp = netdev_priv(dev);
12359 	int err, reset_phy = 0;
12360 
12361 	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12362 		return -EINVAL;
12363 
12364 	if (!netif_running(dev)) {
12365 		/* We'll just catch it later when the
12366 		 * device is up'd.
12367 		 */
12368 		tg3_set_mtu(dev, tp, new_mtu);
12369 		return 0;
12370 	}
12371 
12372 	tg3_phy_stop(tp);
12373 
12374 	tg3_netif_stop(tp);
12375 
12376 	tg3_full_lock(tp, 1);
12377 
12378 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12379 
12380 	tg3_set_mtu(dev, tp, new_mtu);
12381 
12382 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
12383 	 * breaks all requests to 256 bytes.
12384 	 */
12385 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12386 		reset_phy = 1;
12387 
12388 	err = tg3_restart_hw(tp, reset_phy);
12389 
12390 	if (!err)
12391 		tg3_netif_start(tp);
12392 
12393 	tg3_full_unlock(tp);
12394 
12395 	if (!err)
12396 		tg3_phy_start(tp);
12397 
12398 	return err;
12399 }
12400 
12401 static const struct net_device_ops tg3_netdev_ops = {
12402 	.ndo_open		= tg3_open,
12403 	.ndo_stop		= tg3_close,
12404 	.ndo_start_xmit		= tg3_start_xmit,
12405 	.ndo_get_stats64	= tg3_get_stats64,
12406 	.ndo_validate_addr	= eth_validate_addr,
12407 	.ndo_set_rx_mode	= tg3_set_rx_mode,
12408 	.ndo_set_mac_address	= tg3_set_mac_addr,
12409 	.ndo_do_ioctl		= tg3_ioctl,
12410 	.ndo_tx_timeout		= tg3_tx_timeout,
12411 	.ndo_change_mtu		= tg3_change_mtu,
12412 	.ndo_fix_features	= tg3_fix_features,
12413 	.ndo_set_features	= tg3_set_features,
12414 #ifdef CONFIG_NET_POLL_CONTROLLER
12415 	.ndo_poll_controller	= tg3_poll_controller,
12416 #endif
12417 };
12418 
12419 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12420 {
12421 	u32 cursize, val, magic;
12422 
12423 	tp->nvram_size = EEPROM_CHIP_SIZE;
12424 
12425 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12426 		return;
12427 
12428 	if ((magic != TG3_EEPROM_MAGIC) &&
12429 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12430 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12431 		return;
12432 
12433 	/*
12434 	 * Size the chip by reading offsets at increasing powers of two.
12435 	 * When we encounter our validation signature, we know the addressing
12436 	 * has wrapped around, and thus have our chip size.
12437 	 */
12438 	cursize = 0x10;
12439 
12440 	while (cursize < tp->nvram_size) {
12441 		if (tg3_nvram_read(tp, cursize, &val) != 0)
12442 			return;
12443 
12444 		if (val == magic)
12445 			break;
12446 
12447 		cursize <<= 1;
12448 	}
12449 
12450 	tp->nvram_size = cursize;
12451 }
12452 
12453 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12454 {
12455 	u32 val;
12456 
12457 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12458 		return;
12459 
12460 	/* Selfboot format */
12461 	if (val != TG3_EEPROM_MAGIC) {
12462 		tg3_get_eeprom_size(tp);
12463 		return;
12464 	}
12465 
12466 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12467 		if (val != 0) {
12468 			/* This is confusing.  We want to operate on the
12469 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12470 			 * call will read from NVRAM and byteswap the data
12471 			 * according to the byteswapping settings for all
12472 			 * other register accesses.  This ensures the data we
12473 			 * want will always reside in the lower 16-bits.
12474 			 * However, the data in NVRAM is in LE format, which
12475 			 * means the data from the NVRAM read will always be
12476 			 * opposite the endianness of the CPU.  The 16-bit
12477 			 * byteswap then brings the data to CPU endianness.
12478 			 */
12479 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12480 			return;
12481 		}
12482 	}
12483 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12484 }
12485 
12486 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12487 {
12488 	u32 nvcfg1;
12489 
12490 	nvcfg1 = tr32(NVRAM_CFG1);
12491 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12492 		tg3_flag_set(tp, FLASH);
12493 	} else {
12494 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12495 		tw32(NVRAM_CFG1, nvcfg1);
12496 	}
12497 
12498 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12499 	    tg3_flag(tp, 5780_CLASS)) {
12500 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12501 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12502 			tp->nvram_jedecnum = JEDEC_ATMEL;
12503 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12504 			tg3_flag_set(tp, NVRAM_BUFFERED);
12505 			break;
12506 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12507 			tp->nvram_jedecnum = JEDEC_ATMEL;
12508 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12509 			break;
12510 		case FLASH_VENDOR_ATMEL_EEPROM:
12511 			tp->nvram_jedecnum = JEDEC_ATMEL;
12512 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12513 			tg3_flag_set(tp, NVRAM_BUFFERED);
12514 			break;
12515 		case FLASH_VENDOR_ST:
12516 			tp->nvram_jedecnum = JEDEC_ST;
12517 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12518 			tg3_flag_set(tp, NVRAM_BUFFERED);
12519 			break;
12520 		case FLASH_VENDOR_SAIFUN:
12521 			tp->nvram_jedecnum = JEDEC_SAIFUN;
12522 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12523 			break;
12524 		case FLASH_VENDOR_SST_SMALL:
12525 		case FLASH_VENDOR_SST_LARGE:
12526 			tp->nvram_jedecnum = JEDEC_SST;
12527 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12528 			break;
12529 		}
12530 	} else {
12531 		tp->nvram_jedecnum = JEDEC_ATMEL;
12532 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12533 		tg3_flag_set(tp, NVRAM_BUFFERED);
12534 	}
12535 }
12536 
12537 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12538 {
12539 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12540 	case FLASH_5752PAGE_SIZE_256:
12541 		tp->nvram_pagesize = 256;
12542 		break;
12543 	case FLASH_5752PAGE_SIZE_512:
12544 		tp->nvram_pagesize = 512;
12545 		break;
12546 	case FLASH_5752PAGE_SIZE_1K:
12547 		tp->nvram_pagesize = 1024;
12548 		break;
12549 	case FLASH_5752PAGE_SIZE_2K:
12550 		tp->nvram_pagesize = 2048;
12551 		break;
12552 	case FLASH_5752PAGE_SIZE_4K:
12553 		tp->nvram_pagesize = 4096;
12554 		break;
12555 	case FLASH_5752PAGE_SIZE_264:
12556 		tp->nvram_pagesize = 264;
12557 		break;
12558 	case FLASH_5752PAGE_SIZE_528:
12559 		tp->nvram_pagesize = 528;
12560 		break;
12561 	}
12562 }
12563 
12564 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12565 {
12566 	u32 nvcfg1;
12567 
12568 	nvcfg1 = tr32(NVRAM_CFG1);
12569 
12570 	/* NVRAM protection for TPM */
12571 	if (nvcfg1 & (1 << 27))
12572 		tg3_flag_set(tp, PROTECTED_NVRAM);
12573 
12574 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12575 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12576 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12577 		tp->nvram_jedecnum = JEDEC_ATMEL;
12578 		tg3_flag_set(tp, NVRAM_BUFFERED);
12579 		break;
12580 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12581 		tp->nvram_jedecnum = JEDEC_ATMEL;
12582 		tg3_flag_set(tp, NVRAM_BUFFERED);
12583 		tg3_flag_set(tp, FLASH);
12584 		break;
12585 	case FLASH_5752VENDOR_ST_M45PE10:
12586 	case FLASH_5752VENDOR_ST_M45PE20:
12587 	case FLASH_5752VENDOR_ST_M45PE40:
12588 		tp->nvram_jedecnum = JEDEC_ST;
12589 		tg3_flag_set(tp, NVRAM_BUFFERED);
12590 		tg3_flag_set(tp, FLASH);
12591 		break;
12592 	}
12593 
12594 	if (tg3_flag(tp, FLASH)) {
12595 		tg3_nvram_get_pagesize(tp, nvcfg1);
12596 	} else {
12597 		/* For eeprom, set pagesize to maximum eeprom size */
12598 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12599 
12600 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12601 		tw32(NVRAM_CFG1, nvcfg1);
12602 	}
12603 }
12604 
12605 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12606 {
12607 	u32 nvcfg1, protect = 0;
12608 
12609 	nvcfg1 = tr32(NVRAM_CFG1);
12610 
12611 	/* NVRAM protection for TPM */
12612 	if (nvcfg1 & (1 << 27)) {
12613 		tg3_flag_set(tp, PROTECTED_NVRAM);
12614 		protect = 1;
12615 	}
12616 
12617 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12618 	switch (nvcfg1) {
12619 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
12620 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
12621 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
12622 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
12623 		tp->nvram_jedecnum = JEDEC_ATMEL;
12624 		tg3_flag_set(tp, NVRAM_BUFFERED);
12625 		tg3_flag_set(tp, FLASH);
12626 		tp->nvram_pagesize = 264;
12627 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12628 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12629 			tp->nvram_size = (protect ? 0x3e200 :
12630 					  TG3_NVRAM_SIZE_512KB);
12631 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12632 			tp->nvram_size = (protect ? 0x1f200 :
12633 					  TG3_NVRAM_SIZE_256KB);
12634 		else
12635 			tp->nvram_size = (protect ? 0x1f200 :
12636 					  TG3_NVRAM_SIZE_128KB);
12637 		break;
12638 	case FLASH_5752VENDOR_ST_M45PE10:
12639 	case FLASH_5752VENDOR_ST_M45PE20:
12640 	case FLASH_5752VENDOR_ST_M45PE40:
12641 		tp->nvram_jedecnum = JEDEC_ST;
12642 		tg3_flag_set(tp, NVRAM_BUFFERED);
12643 		tg3_flag_set(tp, FLASH);
12644 		tp->nvram_pagesize = 256;
12645 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12646 			tp->nvram_size = (protect ?
12647 					  TG3_NVRAM_SIZE_64KB :
12648 					  TG3_NVRAM_SIZE_128KB);
12649 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12650 			tp->nvram_size = (protect ?
12651 					  TG3_NVRAM_SIZE_64KB :
12652 					  TG3_NVRAM_SIZE_256KB);
12653 		else
12654 			tp->nvram_size = (protect ?
12655 					  TG3_NVRAM_SIZE_128KB :
12656 					  TG3_NVRAM_SIZE_512KB);
12657 		break;
12658 	}
12659 }
12660 
12661 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12662 {
12663 	u32 nvcfg1;
12664 
12665 	nvcfg1 = tr32(NVRAM_CFG1);
12666 
12667 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12668 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12669 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12670 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12671 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12672 		tp->nvram_jedecnum = JEDEC_ATMEL;
12673 		tg3_flag_set(tp, NVRAM_BUFFERED);
12674 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12675 
12676 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12677 		tw32(NVRAM_CFG1, nvcfg1);
12678 		break;
12679 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12680 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
12681 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
12682 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
12683 		tp->nvram_jedecnum = JEDEC_ATMEL;
12684 		tg3_flag_set(tp, NVRAM_BUFFERED);
12685 		tg3_flag_set(tp, FLASH);
12686 		tp->nvram_pagesize = 264;
12687 		break;
12688 	case FLASH_5752VENDOR_ST_M45PE10:
12689 	case FLASH_5752VENDOR_ST_M45PE20:
12690 	case FLASH_5752VENDOR_ST_M45PE40:
12691 		tp->nvram_jedecnum = JEDEC_ST;
12692 		tg3_flag_set(tp, NVRAM_BUFFERED);
12693 		tg3_flag_set(tp, FLASH);
12694 		tp->nvram_pagesize = 256;
12695 		break;
12696 	}
12697 }
12698 
12699 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12700 {
12701 	u32 nvcfg1, protect = 0;
12702 
12703 	nvcfg1 = tr32(NVRAM_CFG1);
12704 
12705 	/* NVRAM protection for TPM */
12706 	if (nvcfg1 & (1 << 27)) {
12707 		tg3_flag_set(tp, PROTECTED_NVRAM);
12708 		protect = 1;
12709 	}
12710 
12711 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12712 	switch (nvcfg1) {
12713 	case FLASH_5761VENDOR_ATMEL_ADB021D:
12714 	case FLASH_5761VENDOR_ATMEL_ADB041D:
12715 	case FLASH_5761VENDOR_ATMEL_ADB081D:
12716 	case FLASH_5761VENDOR_ATMEL_ADB161D:
12717 	case FLASH_5761VENDOR_ATMEL_MDB021D:
12718 	case FLASH_5761VENDOR_ATMEL_MDB041D:
12719 	case FLASH_5761VENDOR_ATMEL_MDB081D:
12720 	case FLASH_5761VENDOR_ATMEL_MDB161D:
12721 		tp->nvram_jedecnum = JEDEC_ATMEL;
12722 		tg3_flag_set(tp, NVRAM_BUFFERED);
12723 		tg3_flag_set(tp, FLASH);
12724 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12725 		tp->nvram_pagesize = 256;
12726 		break;
12727 	case FLASH_5761VENDOR_ST_A_M45PE20:
12728 	case FLASH_5761VENDOR_ST_A_M45PE40:
12729 	case FLASH_5761VENDOR_ST_A_M45PE80:
12730 	case FLASH_5761VENDOR_ST_A_M45PE16:
12731 	case FLASH_5761VENDOR_ST_M_M45PE20:
12732 	case FLASH_5761VENDOR_ST_M_M45PE40:
12733 	case FLASH_5761VENDOR_ST_M_M45PE80:
12734 	case FLASH_5761VENDOR_ST_M_M45PE16:
12735 		tp->nvram_jedecnum = JEDEC_ST;
12736 		tg3_flag_set(tp, NVRAM_BUFFERED);
12737 		tg3_flag_set(tp, FLASH);
12738 		tp->nvram_pagesize = 256;
12739 		break;
12740 	}
12741 
12742 	if (protect) {
12743 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12744 	} else {
12745 		switch (nvcfg1) {
12746 		case FLASH_5761VENDOR_ATMEL_ADB161D:
12747 		case FLASH_5761VENDOR_ATMEL_MDB161D:
12748 		case FLASH_5761VENDOR_ST_A_M45PE16:
12749 		case FLASH_5761VENDOR_ST_M_M45PE16:
12750 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12751 			break;
12752 		case FLASH_5761VENDOR_ATMEL_ADB081D:
12753 		case FLASH_5761VENDOR_ATMEL_MDB081D:
12754 		case FLASH_5761VENDOR_ST_A_M45PE80:
12755 		case FLASH_5761VENDOR_ST_M_M45PE80:
12756 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12757 			break;
12758 		case FLASH_5761VENDOR_ATMEL_ADB041D:
12759 		case FLASH_5761VENDOR_ATMEL_MDB041D:
12760 		case FLASH_5761VENDOR_ST_A_M45PE40:
12761 		case FLASH_5761VENDOR_ST_M_M45PE40:
12762 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12763 			break;
12764 		case FLASH_5761VENDOR_ATMEL_ADB021D:
12765 		case FLASH_5761VENDOR_ATMEL_MDB021D:
12766 		case FLASH_5761VENDOR_ST_A_M45PE20:
12767 		case FLASH_5761VENDOR_ST_M_M45PE20:
12768 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12769 			break;
12770 		}
12771 	}
12772 }
12773 
12774 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12775 {
12776 	tp->nvram_jedecnum = JEDEC_ATMEL;
12777 	tg3_flag_set(tp, NVRAM_BUFFERED);
12778 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12779 }
12780 
12781 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12782 {
12783 	u32 nvcfg1;
12784 
12785 	nvcfg1 = tr32(NVRAM_CFG1);
12786 
12787 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12788 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12789 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12790 		tp->nvram_jedecnum = JEDEC_ATMEL;
12791 		tg3_flag_set(tp, NVRAM_BUFFERED);
12792 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12793 
12794 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12795 		tw32(NVRAM_CFG1, nvcfg1);
12796 		return;
12797 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12798 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12799 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12800 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12801 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12802 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12803 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12804 		tp->nvram_jedecnum = JEDEC_ATMEL;
12805 		tg3_flag_set(tp, NVRAM_BUFFERED);
12806 		tg3_flag_set(tp, FLASH);
12807 
12808 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12809 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12810 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12811 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12812 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12813 			break;
12814 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12815 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12816 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12817 			break;
12818 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12819 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12820 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12821 			break;
12822 		}
12823 		break;
12824 	case FLASH_5752VENDOR_ST_M45PE10:
12825 	case FLASH_5752VENDOR_ST_M45PE20:
12826 	case FLASH_5752VENDOR_ST_M45PE40:
12827 		tp->nvram_jedecnum = JEDEC_ST;
12828 		tg3_flag_set(tp, NVRAM_BUFFERED);
12829 		tg3_flag_set(tp, FLASH);
12830 
12831 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12832 		case FLASH_5752VENDOR_ST_M45PE10:
12833 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12834 			break;
12835 		case FLASH_5752VENDOR_ST_M45PE20:
12836 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12837 			break;
12838 		case FLASH_5752VENDOR_ST_M45PE40:
12839 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12840 			break;
12841 		}
12842 		break;
12843 	default:
12844 		tg3_flag_set(tp, NO_NVRAM);
12845 		return;
12846 	}
12847 
12848 	tg3_nvram_get_pagesize(tp, nvcfg1);
12849 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12850 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12851 }
12852 
12853 
12854 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12855 {
12856 	u32 nvcfg1;
12857 
12858 	nvcfg1 = tr32(NVRAM_CFG1);
12859 
12860 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12861 	case FLASH_5717VENDOR_ATMEL_EEPROM:
12862 	case FLASH_5717VENDOR_MICRO_EEPROM:
12863 		tp->nvram_jedecnum = JEDEC_ATMEL;
12864 		tg3_flag_set(tp, NVRAM_BUFFERED);
12865 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12866 
12867 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12868 		tw32(NVRAM_CFG1, nvcfg1);
12869 		return;
12870 	case FLASH_5717VENDOR_ATMEL_MDB011D:
12871 	case FLASH_5717VENDOR_ATMEL_ADB011B:
12872 	case FLASH_5717VENDOR_ATMEL_ADB011D:
12873 	case FLASH_5717VENDOR_ATMEL_MDB021D:
12874 	case FLASH_5717VENDOR_ATMEL_ADB021B:
12875 	case FLASH_5717VENDOR_ATMEL_ADB021D:
12876 	case FLASH_5717VENDOR_ATMEL_45USPT:
12877 		tp->nvram_jedecnum = JEDEC_ATMEL;
12878 		tg3_flag_set(tp, NVRAM_BUFFERED);
12879 		tg3_flag_set(tp, FLASH);
12880 
12881 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12882 		case FLASH_5717VENDOR_ATMEL_MDB021D:
12883 			/* Detect size with tg3_nvram_get_size() */
12884 			break;
12885 		case FLASH_5717VENDOR_ATMEL_ADB021B:
12886 		case FLASH_5717VENDOR_ATMEL_ADB021D:
12887 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12888 			break;
12889 		default:
12890 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12891 			break;
12892 		}
12893 		break;
12894 	case FLASH_5717VENDOR_ST_M_M25PE10:
12895 	case FLASH_5717VENDOR_ST_A_M25PE10:
12896 	case FLASH_5717VENDOR_ST_M_M45PE10:
12897 	case FLASH_5717VENDOR_ST_A_M45PE10:
12898 	case FLASH_5717VENDOR_ST_M_M25PE20:
12899 	case FLASH_5717VENDOR_ST_A_M25PE20:
12900 	case FLASH_5717VENDOR_ST_M_M45PE20:
12901 	case FLASH_5717VENDOR_ST_A_M45PE20:
12902 	case FLASH_5717VENDOR_ST_25USPT:
12903 	case FLASH_5717VENDOR_ST_45USPT:
12904 		tp->nvram_jedecnum = JEDEC_ST;
12905 		tg3_flag_set(tp, NVRAM_BUFFERED);
12906 		tg3_flag_set(tp, FLASH);
12907 
12908 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12909 		case FLASH_5717VENDOR_ST_M_M25PE20:
12910 		case FLASH_5717VENDOR_ST_M_M45PE20:
12911 			/* Detect size with tg3_nvram_get_size() */
12912 			break;
12913 		case FLASH_5717VENDOR_ST_A_M25PE20:
12914 		case FLASH_5717VENDOR_ST_A_M45PE20:
12915 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12916 			break;
12917 		default:
12918 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12919 			break;
12920 		}
12921 		break;
12922 	default:
12923 		tg3_flag_set(tp, NO_NVRAM);
12924 		return;
12925 	}
12926 
12927 	tg3_nvram_get_pagesize(tp, nvcfg1);
12928 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12929 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12930 }
12931 
12932 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12933 {
12934 	u32 nvcfg1, nvmpinstrp;
12935 
12936 	nvcfg1 = tr32(NVRAM_CFG1);
12937 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12938 
12939 	switch (nvmpinstrp) {
12940 	case FLASH_5720_EEPROM_HD:
12941 	case FLASH_5720_EEPROM_LD:
12942 		tp->nvram_jedecnum = JEDEC_ATMEL;
12943 		tg3_flag_set(tp, NVRAM_BUFFERED);
12944 
12945 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12946 		tw32(NVRAM_CFG1, nvcfg1);
12947 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12948 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12949 		else
12950 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12951 		return;
12952 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
12953 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
12954 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
12955 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
12956 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
12957 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
12958 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
12959 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
12960 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
12961 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
12962 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
12963 	case FLASH_5720VENDOR_ATMEL_45USPT:
12964 		tp->nvram_jedecnum = JEDEC_ATMEL;
12965 		tg3_flag_set(tp, NVRAM_BUFFERED);
12966 		tg3_flag_set(tp, FLASH);
12967 
12968 		switch (nvmpinstrp) {
12969 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
12970 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
12971 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
12972 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12973 			break;
12974 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
12975 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
12976 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
12977 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12978 			break;
12979 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
12980 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
12981 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12982 			break;
12983 		default:
12984 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12985 			break;
12986 		}
12987 		break;
12988 	case FLASH_5720VENDOR_M_ST_M25PE10:
12989 	case FLASH_5720VENDOR_M_ST_M45PE10:
12990 	case FLASH_5720VENDOR_A_ST_M25PE10:
12991 	case FLASH_5720VENDOR_A_ST_M45PE10:
12992 	case FLASH_5720VENDOR_M_ST_M25PE20:
12993 	case FLASH_5720VENDOR_M_ST_M45PE20:
12994 	case FLASH_5720VENDOR_A_ST_M25PE20:
12995 	case FLASH_5720VENDOR_A_ST_M45PE20:
12996 	case FLASH_5720VENDOR_M_ST_M25PE40:
12997 	case FLASH_5720VENDOR_M_ST_M45PE40:
12998 	case FLASH_5720VENDOR_A_ST_M25PE40:
12999 	case FLASH_5720VENDOR_A_ST_M45PE40:
13000 	case FLASH_5720VENDOR_M_ST_M25PE80:
13001 	case FLASH_5720VENDOR_M_ST_M45PE80:
13002 	case FLASH_5720VENDOR_A_ST_M25PE80:
13003 	case FLASH_5720VENDOR_A_ST_M45PE80:
13004 	case FLASH_5720VENDOR_ST_25USPT:
13005 	case FLASH_5720VENDOR_ST_45USPT:
13006 		tp->nvram_jedecnum = JEDEC_ST;
13007 		tg3_flag_set(tp, NVRAM_BUFFERED);
13008 		tg3_flag_set(tp, FLASH);
13009 
13010 		switch (nvmpinstrp) {
13011 		case FLASH_5720VENDOR_M_ST_M25PE20:
13012 		case FLASH_5720VENDOR_M_ST_M45PE20:
13013 		case FLASH_5720VENDOR_A_ST_M25PE20:
13014 		case FLASH_5720VENDOR_A_ST_M45PE20:
13015 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13016 			break;
13017 		case FLASH_5720VENDOR_M_ST_M25PE40:
13018 		case FLASH_5720VENDOR_M_ST_M45PE40:
13019 		case FLASH_5720VENDOR_A_ST_M25PE40:
13020 		case FLASH_5720VENDOR_A_ST_M45PE40:
13021 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13022 			break;
13023 		case FLASH_5720VENDOR_M_ST_M25PE80:
13024 		case FLASH_5720VENDOR_M_ST_M45PE80:
13025 		case FLASH_5720VENDOR_A_ST_M25PE80:
13026 		case FLASH_5720VENDOR_A_ST_M45PE80:
13027 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13028 			break;
13029 		default:
13030 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13031 			break;
13032 		}
13033 		break;
13034 	default:
13035 		tg3_flag_set(tp, NO_NVRAM);
13036 		return;
13037 	}
13038 
13039 	tg3_nvram_get_pagesize(tp, nvcfg1);
13040 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13041 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13042 }
13043 
13044 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13045 static void __devinit tg3_nvram_init(struct tg3 *tp)
13046 {
13047 	tw32_f(GRC_EEPROM_ADDR,
13048 	     (EEPROM_ADDR_FSM_RESET |
13049 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
13050 	       EEPROM_ADDR_CLKPERD_SHIFT)));
13051 
13052 	msleep(1);
13053 
13054 	/* Enable seeprom accesses. */
13055 	tw32_f(GRC_LOCAL_CTRL,
13056 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13057 	udelay(100);
13058 
13059 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13060 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13061 		tg3_flag_set(tp, NVRAM);
13062 
13063 		if (tg3_nvram_lock(tp)) {
13064 			netdev_warn(tp->dev,
13065 				    "Cannot get nvram lock, %s failed\n",
13066 				    __func__);
13067 			return;
13068 		}
13069 		tg3_enable_nvram_access(tp);
13070 
13071 		tp->nvram_size = 0;
13072 
13073 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13074 			tg3_get_5752_nvram_info(tp);
13075 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13076 			tg3_get_5755_nvram_info(tp);
13077 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13078 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13079 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13080 			tg3_get_5787_nvram_info(tp);
13081 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13082 			tg3_get_5761_nvram_info(tp);
13083 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13084 			tg3_get_5906_nvram_info(tp);
13085 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13086 			 tg3_flag(tp, 57765_CLASS))
13087 			tg3_get_57780_nvram_info(tp);
13088 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13089 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13090 			tg3_get_5717_nvram_info(tp);
13091 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13092 			tg3_get_5720_nvram_info(tp);
13093 		else
13094 			tg3_get_nvram_info(tp);
13095 
13096 		if (tp->nvram_size == 0)
13097 			tg3_get_nvram_size(tp);
13098 
13099 		tg3_disable_nvram_access(tp);
13100 		tg3_nvram_unlock(tp);
13101 
13102 	} else {
13103 		tg3_flag_clear(tp, NVRAM);
13104 		tg3_flag_clear(tp, NVRAM_BUFFERED);
13105 
13106 		tg3_get_eeprom_size(tp);
13107 	}
13108 }
13109 
13110 struct subsys_tbl_ent {
13111 	u16 subsys_vendor, subsys_devid;
13112 	u32 phy_id;
13113 };
13114 
13115 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13116 	/* Broadcom boards. */
13117 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13118 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13119 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13120 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13121 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13122 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13123 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13124 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13125 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13126 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13127 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13128 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13129 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13130 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13131 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13132 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13133 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13134 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13135 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13136 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13137 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13138 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13139 
13140 	/* 3com boards. */
13141 	{ TG3PCI_SUBVENDOR_ID_3COM,
13142 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13143 	{ TG3PCI_SUBVENDOR_ID_3COM,
13144 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13145 	{ TG3PCI_SUBVENDOR_ID_3COM,
13146 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13147 	{ TG3PCI_SUBVENDOR_ID_3COM,
13148 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13149 	{ TG3PCI_SUBVENDOR_ID_3COM,
13150 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13151 
13152 	/* DELL boards. */
13153 	{ TG3PCI_SUBVENDOR_ID_DELL,
13154 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13155 	{ TG3PCI_SUBVENDOR_ID_DELL,
13156 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13157 	{ TG3PCI_SUBVENDOR_ID_DELL,
13158 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13159 	{ TG3PCI_SUBVENDOR_ID_DELL,
13160 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13161 
13162 	/* Compaq boards. */
13163 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13164 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13165 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13166 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13167 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13168 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13169 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13170 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13171 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13172 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13173 
13174 	/* IBM boards. */
13175 	{ TG3PCI_SUBVENDOR_ID_IBM,
13176 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13177 };
13178 
13179 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13180 {
13181 	int i;
13182 
13183 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13184 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
13185 		     tp->pdev->subsystem_vendor) &&
13186 		    (subsys_id_to_phy_id[i].subsys_devid ==
13187 		     tp->pdev->subsystem_device))
13188 			return &subsys_id_to_phy_id[i];
13189 	}
13190 	return NULL;
13191 }
13192 
13193 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13194 {
13195 	u32 val;
13196 
13197 	tp->phy_id = TG3_PHY_ID_INVALID;
13198 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13199 
13200 	/* Assume an onboard device and WOL capable by default.  */
13201 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
13202 	tg3_flag_set(tp, WOL_CAP);
13203 
13204 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13205 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13206 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13207 			tg3_flag_set(tp, IS_NIC);
13208 		}
13209 		val = tr32(VCPU_CFGSHDW);
13210 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
13211 			tg3_flag_set(tp, ASPM_WORKAROUND);
13212 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13213 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13214 			tg3_flag_set(tp, WOL_ENABLE);
13215 			device_set_wakeup_enable(&tp->pdev->dev, true);
13216 		}
13217 		goto done;
13218 	}
13219 
13220 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13221 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13222 		u32 nic_cfg, led_cfg;
13223 		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13224 		int eeprom_phy_serdes = 0;
13225 
13226 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13227 		tp->nic_sram_data_cfg = nic_cfg;
13228 
13229 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13230 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
13231 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13232 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13233 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13234 		    (ver > 0) && (ver < 0x100))
13235 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13236 
13237 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13238 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13239 
13240 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13241 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13242 			eeprom_phy_serdes = 1;
13243 
13244 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13245 		if (nic_phy_id != 0) {
13246 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13247 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13248 
13249 			eeprom_phy_id  = (id1 >> 16) << 10;
13250 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
13251 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13252 		} else
13253 			eeprom_phy_id = 0;
13254 
13255 		tp->phy_id = eeprom_phy_id;
13256 		if (eeprom_phy_serdes) {
13257 			if (!tg3_flag(tp, 5705_PLUS))
13258 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13259 			else
13260 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13261 		}
13262 
13263 		if (tg3_flag(tp, 5750_PLUS))
13264 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13265 				    SHASTA_EXT_LED_MODE_MASK);
13266 		else
13267 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13268 
13269 		switch (led_cfg) {
13270 		default:
13271 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13272 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13273 			break;
13274 
13275 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13276 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13277 			break;
13278 
13279 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13280 			tp->led_ctrl = LED_CTRL_MODE_MAC;
13281 
13282 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
13283 			 * read on some older 5700/5701 bootcode.
13284 			 */
13285 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13286 			    ASIC_REV_5700 ||
13287 			    GET_ASIC_REV(tp->pci_chip_rev_id) ==
13288 			    ASIC_REV_5701)
13289 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13290 
13291 			break;
13292 
13293 		case SHASTA_EXT_LED_SHARED:
13294 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
13295 			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13296 			    tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13297 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13298 						 LED_CTRL_MODE_PHY_2);
13299 			break;
13300 
13301 		case SHASTA_EXT_LED_MAC:
13302 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13303 			break;
13304 
13305 		case SHASTA_EXT_LED_COMBO:
13306 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
13307 			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13308 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13309 						 LED_CTRL_MODE_PHY_2);
13310 			break;
13311 
13312 		}
13313 
13314 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13315 		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13316 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13317 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13318 
13319 		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13320 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13321 
13322 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13323 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
13324 			if ((tp->pdev->subsystem_vendor ==
13325 			     PCI_VENDOR_ID_ARIMA) &&
13326 			    (tp->pdev->subsystem_device == 0x205a ||
13327 			     tp->pdev->subsystem_device == 0x2063))
13328 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13329 		} else {
13330 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13331 			tg3_flag_set(tp, IS_NIC);
13332 		}
13333 
13334 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13335 			tg3_flag_set(tp, ENABLE_ASF);
13336 			if (tg3_flag(tp, 5750_PLUS))
13337 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13338 		}
13339 
13340 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13341 		    tg3_flag(tp, 5750_PLUS))
13342 			tg3_flag_set(tp, ENABLE_APE);
13343 
13344 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13345 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13346 			tg3_flag_clear(tp, WOL_CAP);
13347 
13348 		if (tg3_flag(tp, WOL_CAP) &&
13349 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13350 			tg3_flag_set(tp, WOL_ENABLE);
13351 			device_set_wakeup_enable(&tp->pdev->dev, true);
13352 		}
13353 
13354 		if (cfg2 & (1 << 17))
13355 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13356 
13357 		/* serdes signal pre-emphasis in register 0x590 set by */
13358 		/* bootcode if bit 18 is set */
13359 		if (cfg2 & (1 << 18))
13360 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13361 
13362 		if ((tg3_flag(tp, 57765_PLUS) ||
13363 		     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13364 		      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13365 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13366 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13367 
13368 		if (tg3_flag(tp, PCI_EXPRESS) &&
13369 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13370 		    !tg3_flag(tp, 57765_PLUS)) {
13371 			u32 cfg3;
13372 
13373 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13374 			if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13375 				tg3_flag_set(tp, ASPM_WORKAROUND);
13376 		}
13377 
13378 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13379 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13380 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13381 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13382 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13383 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13384 	}
13385 done:
13386 	if (tg3_flag(tp, WOL_CAP))
13387 		device_set_wakeup_enable(&tp->pdev->dev,
13388 					 tg3_flag(tp, WOL_ENABLE));
13389 	else
13390 		device_set_wakeup_capable(&tp->pdev->dev, false);
13391 }
13392 
13393 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13394 {
13395 	int i;
13396 	u32 val;
13397 
13398 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13399 	tw32(OTP_CTRL, cmd);
13400 
13401 	/* Wait for up to 1 ms for command to execute. */
13402 	for (i = 0; i < 100; i++) {
13403 		val = tr32(OTP_STATUS);
13404 		if (val & OTP_STATUS_CMD_DONE)
13405 			break;
13406 		udelay(10);
13407 	}
13408 
13409 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13410 }
13411 
13412 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13413  * configuration is a 32-bit value that straddles the alignment boundary.
13414  * We do two 32-bit reads and then shift and merge the results.
13415  */
13416 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13417 {
13418 	u32 bhalf_otp, thalf_otp;
13419 
13420 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13421 
13422 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13423 		return 0;
13424 
13425 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13426 
13427 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13428 		return 0;
13429 
13430 	thalf_otp = tr32(OTP_READ_DATA);
13431 
13432 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13433 
13434 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13435 		return 0;
13436 
13437 	bhalf_otp = tr32(OTP_READ_DATA);
13438 
13439 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13440 }
13441 
13442 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13443 {
13444 	u32 adv = ADVERTISED_Autoneg;
13445 
13446 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13447 		adv |= ADVERTISED_1000baseT_Half |
13448 		       ADVERTISED_1000baseT_Full;
13449 
13450 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13451 		adv |= ADVERTISED_100baseT_Half |
13452 		       ADVERTISED_100baseT_Full |
13453 		       ADVERTISED_10baseT_Half |
13454 		       ADVERTISED_10baseT_Full |
13455 		       ADVERTISED_TP;
13456 	else
13457 		adv |= ADVERTISED_FIBRE;
13458 
13459 	tp->link_config.advertising = adv;
13460 	tp->link_config.speed = SPEED_UNKNOWN;
13461 	tp->link_config.duplex = DUPLEX_UNKNOWN;
13462 	tp->link_config.autoneg = AUTONEG_ENABLE;
13463 	tp->link_config.active_speed = SPEED_UNKNOWN;
13464 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13465 
13466 	tp->old_link = -1;
13467 }
13468 
13469 static int __devinit tg3_phy_probe(struct tg3 *tp)
13470 {
13471 	u32 hw_phy_id_1, hw_phy_id_2;
13472 	u32 hw_phy_id, hw_phy_id_masked;
13473 	int err;
13474 
13475 	/* flow control autonegotiation is default behavior */
13476 	tg3_flag_set(tp, PAUSE_AUTONEG);
13477 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13478 
13479 	if (tg3_flag(tp, USE_PHYLIB))
13480 		return tg3_phy_init(tp);
13481 
13482 	/* Reading the PHY ID register can conflict with ASF
13483 	 * firmware access to the PHY hardware.
13484 	 */
13485 	err = 0;
13486 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13487 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13488 	} else {
13489 		/* Now read the physical PHY_ID from the chip and verify
13490 		 * that it is sane.  If it doesn't look good, we fall back
13491 		 * to either the hard-coded table based PHY_ID and failing
13492 		 * that the value found in the eeprom area.
13493 		 */
13494 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13495 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13496 
13497 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13498 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13499 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13500 
13501 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13502 	}
13503 
13504 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13505 		tp->phy_id = hw_phy_id;
13506 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13507 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13508 		else
13509 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13510 	} else {
13511 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
13512 			/* Do nothing, phy ID already set up in
13513 			 * tg3_get_eeprom_hw_cfg().
13514 			 */
13515 		} else {
13516 			struct subsys_tbl_ent *p;
13517 
13518 			/* No eeprom signature?  Try the hardcoded
13519 			 * subsys device table.
13520 			 */
13521 			p = tg3_lookup_by_subsys(tp);
13522 			if (!p)
13523 				return -ENODEV;
13524 
13525 			tp->phy_id = p->phy_id;
13526 			if (!tp->phy_id ||
13527 			    tp->phy_id == TG3_PHY_ID_BCM8002)
13528 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13529 		}
13530 	}
13531 
13532 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13533 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13534 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13535 	     (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13536 	      tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13537 	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13538 	      tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13539 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13540 
13541 	tg3_phy_init_link_config(tp);
13542 
13543 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13544 	    !tg3_flag(tp, ENABLE_APE) &&
13545 	    !tg3_flag(tp, ENABLE_ASF)) {
13546 		u32 bmsr, dummy;
13547 
13548 		tg3_readphy(tp, MII_BMSR, &bmsr);
13549 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13550 		    (bmsr & BMSR_LSTATUS))
13551 			goto skip_phy_reset;
13552 
13553 		err = tg3_phy_reset(tp);
13554 		if (err)
13555 			return err;
13556 
13557 		tg3_phy_set_wirespeed(tp);
13558 
13559 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13560 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13561 					    tp->link_config.flowctrl);
13562 
13563 			tg3_writephy(tp, MII_BMCR,
13564 				     BMCR_ANENABLE | BMCR_ANRESTART);
13565 		}
13566 	}
13567 
13568 skip_phy_reset:
13569 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13570 		err = tg3_init_5401phy_dsp(tp);
13571 		if (err)
13572 			return err;
13573 
13574 		err = tg3_init_5401phy_dsp(tp);
13575 	}
13576 
13577 	return err;
13578 }
13579 
13580 static void __devinit tg3_read_vpd(struct tg3 *tp)
13581 {
13582 	u8 *vpd_data;
13583 	unsigned int block_end, rosize, len;
13584 	u32 vpdlen;
13585 	int j, i = 0;
13586 
13587 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13588 	if (!vpd_data)
13589 		goto out_no_vpd;
13590 
13591 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13592 	if (i < 0)
13593 		goto out_not_found;
13594 
13595 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13596 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13597 	i += PCI_VPD_LRDT_TAG_SIZE;
13598 
13599 	if (block_end > vpdlen)
13600 		goto out_not_found;
13601 
13602 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13603 				      PCI_VPD_RO_KEYWORD_MFR_ID);
13604 	if (j > 0) {
13605 		len = pci_vpd_info_field_size(&vpd_data[j]);
13606 
13607 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
13608 		if (j + len > block_end || len != 4 ||
13609 		    memcmp(&vpd_data[j], "1028", 4))
13610 			goto partno;
13611 
13612 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13613 					      PCI_VPD_RO_KEYWORD_VENDOR0);
13614 		if (j < 0)
13615 			goto partno;
13616 
13617 		len = pci_vpd_info_field_size(&vpd_data[j]);
13618 
13619 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
13620 		if (j + len > block_end)
13621 			goto partno;
13622 
13623 		memcpy(tp->fw_ver, &vpd_data[j], len);
13624 		strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13625 	}
13626 
13627 partno:
13628 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13629 				      PCI_VPD_RO_KEYWORD_PARTNO);
13630 	if (i < 0)
13631 		goto out_not_found;
13632 
13633 	len = pci_vpd_info_field_size(&vpd_data[i]);
13634 
13635 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
13636 	if (len > TG3_BPN_SIZE ||
13637 	    (len + i) > vpdlen)
13638 		goto out_not_found;
13639 
13640 	memcpy(tp->board_part_number, &vpd_data[i], len);
13641 
13642 out_not_found:
13643 	kfree(vpd_data);
13644 	if (tp->board_part_number[0])
13645 		return;
13646 
13647 out_no_vpd:
13648 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13649 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13650 			strcpy(tp->board_part_number, "BCM5717");
13651 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13652 			strcpy(tp->board_part_number, "BCM5718");
13653 		else
13654 			goto nomatch;
13655 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13656 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13657 			strcpy(tp->board_part_number, "BCM57780");
13658 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13659 			strcpy(tp->board_part_number, "BCM57760");
13660 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13661 			strcpy(tp->board_part_number, "BCM57790");
13662 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13663 			strcpy(tp->board_part_number, "BCM57788");
13664 		else
13665 			goto nomatch;
13666 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13667 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13668 			strcpy(tp->board_part_number, "BCM57761");
13669 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13670 			strcpy(tp->board_part_number, "BCM57765");
13671 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13672 			strcpy(tp->board_part_number, "BCM57781");
13673 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13674 			strcpy(tp->board_part_number, "BCM57785");
13675 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13676 			strcpy(tp->board_part_number, "BCM57791");
13677 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13678 			strcpy(tp->board_part_number, "BCM57795");
13679 		else
13680 			goto nomatch;
13681 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13682 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13683 			strcpy(tp->board_part_number, "BCM57762");
13684 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13685 			strcpy(tp->board_part_number, "BCM57766");
13686 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13687 			strcpy(tp->board_part_number, "BCM57782");
13688 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13689 			strcpy(tp->board_part_number, "BCM57786");
13690 		else
13691 			goto nomatch;
13692 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13693 		strcpy(tp->board_part_number, "BCM95906");
13694 	} else {
13695 nomatch:
13696 		strcpy(tp->board_part_number, "none");
13697 	}
13698 }
13699 
13700 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13701 {
13702 	u32 val;
13703 
13704 	if (tg3_nvram_read(tp, offset, &val) ||
13705 	    (val & 0xfc000000) != 0x0c000000 ||
13706 	    tg3_nvram_read(tp, offset + 4, &val) ||
13707 	    val != 0)
13708 		return 0;
13709 
13710 	return 1;
13711 }
13712 
13713 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13714 {
13715 	u32 val, offset, start, ver_offset;
13716 	int i, dst_off;
13717 	bool newver = false;
13718 
13719 	if (tg3_nvram_read(tp, 0xc, &offset) ||
13720 	    tg3_nvram_read(tp, 0x4, &start))
13721 		return;
13722 
13723 	offset = tg3_nvram_logical_addr(tp, offset);
13724 
13725 	if (tg3_nvram_read(tp, offset, &val))
13726 		return;
13727 
13728 	if ((val & 0xfc000000) == 0x0c000000) {
13729 		if (tg3_nvram_read(tp, offset + 4, &val))
13730 			return;
13731 
13732 		if (val == 0)
13733 			newver = true;
13734 	}
13735 
13736 	dst_off = strlen(tp->fw_ver);
13737 
13738 	if (newver) {
13739 		if (TG3_VER_SIZE - dst_off < 16 ||
13740 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
13741 			return;
13742 
13743 		offset = offset + ver_offset - start;
13744 		for (i = 0; i < 16; i += 4) {
13745 			__be32 v;
13746 			if (tg3_nvram_read_be32(tp, offset + i, &v))
13747 				return;
13748 
13749 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13750 		}
13751 	} else {
13752 		u32 major, minor;
13753 
13754 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13755 			return;
13756 
13757 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13758 			TG3_NVM_BCVER_MAJSFT;
13759 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13760 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13761 			 "v%d.%02d", major, minor);
13762 	}
13763 }
13764 
13765 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13766 {
13767 	u32 val, major, minor;
13768 
13769 	/* Use native endian representation */
13770 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13771 		return;
13772 
13773 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13774 		TG3_NVM_HWSB_CFG1_MAJSFT;
13775 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13776 		TG3_NVM_HWSB_CFG1_MINSFT;
13777 
13778 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13779 }
13780 
13781 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13782 {
13783 	u32 offset, major, minor, build;
13784 
13785 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13786 
13787 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13788 		return;
13789 
13790 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13791 	case TG3_EEPROM_SB_REVISION_0:
13792 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13793 		break;
13794 	case TG3_EEPROM_SB_REVISION_2:
13795 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13796 		break;
13797 	case TG3_EEPROM_SB_REVISION_3:
13798 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13799 		break;
13800 	case TG3_EEPROM_SB_REVISION_4:
13801 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13802 		break;
13803 	case TG3_EEPROM_SB_REVISION_5:
13804 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13805 		break;
13806 	case TG3_EEPROM_SB_REVISION_6:
13807 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13808 		break;
13809 	default:
13810 		return;
13811 	}
13812 
13813 	if (tg3_nvram_read(tp, offset, &val))
13814 		return;
13815 
13816 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13817 		TG3_EEPROM_SB_EDH_BLD_SHFT;
13818 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13819 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
13820 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13821 
13822 	if (minor > 99 || build > 26)
13823 		return;
13824 
13825 	offset = strlen(tp->fw_ver);
13826 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13827 		 " v%d.%02d", major, minor);
13828 
13829 	if (build > 0) {
13830 		offset = strlen(tp->fw_ver);
13831 		if (offset < TG3_VER_SIZE - 1)
13832 			tp->fw_ver[offset] = 'a' + build - 1;
13833 	}
13834 }
13835 
13836 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13837 {
13838 	u32 val, offset, start;
13839 	int i, vlen;
13840 
13841 	for (offset = TG3_NVM_DIR_START;
13842 	     offset < TG3_NVM_DIR_END;
13843 	     offset += TG3_NVM_DIRENT_SIZE) {
13844 		if (tg3_nvram_read(tp, offset, &val))
13845 			return;
13846 
13847 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13848 			break;
13849 	}
13850 
13851 	if (offset == TG3_NVM_DIR_END)
13852 		return;
13853 
13854 	if (!tg3_flag(tp, 5705_PLUS))
13855 		start = 0x08000000;
13856 	else if (tg3_nvram_read(tp, offset - 4, &start))
13857 		return;
13858 
13859 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
13860 	    !tg3_fw_img_is_valid(tp, offset) ||
13861 	    tg3_nvram_read(tp, offset + 8, &val))
13862 		return;
13863 
13864 	offset += val - start;
13865 
13866 	vlen = strlen(tp->fw_ver);
13867 
13868 	tp->fw_ver[vlen++] = ',';
13869 	tp->fw_ver[vlen++] = ' ';
13870 
13871 	for (i = 0; i < 4; i++) {
13872 		__be32 v;
13873 		if (tg3_nvram_read_be32(tp, offset, &v))
13874 			return;
13875 
13876 		offset += sizeof(v);
13877 
13878 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
13879 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13880 			break;
13881 		}
13882 
13883 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13884 		vlen += sizeof(v);
13885 	}
13886 }
13887 
13888 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13889 {
13890 	int vlen;
13891 	u32 apedata;
13892 	char *fwtype;
13893 
13894 	if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13895 		return;
13896 
13897 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13898 	if (apedata != APE_SEG_SIG_MAGIC)
13899 		return;
13900 
13901 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13902 	if (!(apedata & APE_FW_STATUS_READY))
13903 		return;
13904 
13905 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13906 
13907 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13908 		tg3_flag_set(tp, APE_HAS_NCSI);
13909 		fwtype = "NCSI";
13910 	} else {
13911 		fwtype = "DASH";
13912 	}
13913 
13914 	vlen = strlen(tp->fw_ver);
13915 
13916 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13917 		 fwtype,
13918 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13919 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13920 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13921 		 (apedata & APE_FW_VERSION_BLDMSK));
13922 }
13923 
13924 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13925 {
13926 	u32 val;
13927 	bool vpd_vers = false;
13928 
13929 	if (tp->fw_ver[0] != 0)
13930 		vpd_vers = true;
13931 
13932 	if (tg3_flag(tp, NO_NVRAM)) {
13933 		strcat(tp->fw_ver, "sb");
13934 		return;
13935 	}
13936 
13937 	if (tg3_nvram_read(tp, 0, &val))
13938 		return;
13939 
13940 	if (val == TG3_EEPROM_MAGIC)
13941 		tg3_read_bc_ver(tp);
13942 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13943 		tg3_read_sb_ver(tp, val);
13944 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13945 		tg3_read_hwsb_ver(tp);
13946 	else
13947 		return;
13948 
13949 	if (vpd_vers)
13950 		goto done;
13951 
13952 	if (tg3_flag(tp, ENABLE_APE)) {
13953 		if (tg3_flag(tp, ENABLE_ASF))
13954 			tg3_read_dash_ver(tp);
13955 	} else if (tg3_flag(tp, ENABLE_ASF)) {
13956 		tg3_read_mgmtfw_ver(tp);
13957 	}
13958 
13959 done:
13960 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13961 }
13962 
13963 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13964 {
13965 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
13966 		return TG3_RX_RET_MAX_SIZE_5717;
13967 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13968 		return TG3_RX_RET_MAX_SIZE_5700;
13969 	else
13970 		return TG3_RX_RET_MAX_SIZE_5705;
13971 }
13972 
13973 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13974 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13975 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13976 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13977 	{ },
13978 };
13979 
13980 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13981 {
13982 	struct pci_dev *peer;
13983 	unsigned int func, devnr = tp->pdev->devfn & ~7;
13984 
13985 	for (func = 0; func < 8; func++) {
13986 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
13987 		if (peer && peer != tp->pdev)
13988 			break;
13989 		pci_dev_put(peer);
13990 	}
13991 	/* 5704 can be configured in single-port mode, set peer to
13992 	 * tp->pdev in that case.
13993 	 */
13994 	if (!peer) {
13995 		peer = tp->pdev;
13996 		return peer;
13997 	}
13998 
13999 	/*
14000 	 * We don't need to keep the refcount elevated; there's no way
14001 	 * to remove one half of this device without removing the other
14002 	 */
14003 	pci_dev_put(peer);
14004 
14005 	return peer;
14006 }
14007 
14008 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14009 {
14010 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14011 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14012 		u32 reg;
14013 
14014 		/* All devices that use the alternate
14015 		 * ASIC REV location have a CPMU.
14016 		 */
14017 		tg3_flag_set(tp, CPMU_PRESENT);
14018 
14019 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14020 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14021 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14022 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14023 			reg = TG3PCI_GEN2_PRODID_ASICREV;
14024 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14025 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14026 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14027 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14028 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14029 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14030 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14031 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14032 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14033 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14034 			reg = TG3PCI_GEN15_PRODID_ASICREV;
14035 		else
14036 			reg = TG3PCI_PRODID_ASICREV;
14037 
14038 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14039 	}
14040 
14041 	/* Wrong chip ID in 5752 A0. This code can be removed later
14042 	 * as A0 is not in production.
14043 	 */
14044 	if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14045 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14046 
14047 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14048 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14049 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14050 		tg3_flag_set(tp, 5717_PLUS);
14051 
14052 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14053 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14054 		tg3_flag_set(tp, 57765_CLASS);
14055 
14056 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14057 		tg3_flag_set(tp, 57765_PLUS);
14058 
14059 	/* Intentionally exclude ASIC_REV_5906 */
14060 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14061 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14062 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14063 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14064 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14065 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14066 	    tg3_flag(tp, 57765_PLUS))
14067 		tg3_flag_set(tp, 5755_PLUS);
14068 
14069 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14070 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14071 		tg3_flag_set(tp, 5780_CLASS);
14072 
14073 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14074 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14075 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14076 	    tg3_flag(tp, 5755_PLUS) ||
14077 	    tg3_flag(tp, 5780_CLASS))
14078 		tg3_flag_set(tp, 5750_PLUS);
14079 
14080 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14081 	    tg3_flag(tp, 5750_PLUS))
14082 		tg3_flag_set(tp, 5705_PLUS);
14083 }
14084 
14085 static int __devinit tg3_get_invariants(struct tg3 *tp)
14086 {
14087 	u32 misc_ctrl_reg;
14088 	u32 pci_state_reg, grc_misc_cfg;
14089 	u32 val;
14090 	u16 pci_cmd;
14091 	int err;
14092 
14093 	/* Force memory write invalidate off.  If we leave it on,
14094 	 * then on 5700_BX chips we have to enable a workaround.
14095 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14096 	 * to match the cacheline size.  The Broadcom driver have this
14097 	 * workaround but turns MWI off all the times so never uses
14098 	 * it.  This seems to suggest that the workaround is insufficient.
14099 	 */
14100 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14101 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14102 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14103 
14104 	/* Important! -- Make sure register accesses are byteswapped
14105 	 * correctly.  Also, for those chips that require it, make
14106 	 * sure that indirect register accesses are enabled before
14107 	 * the first operation.
14108 	 */
14109 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14110 			      &misc_ctrl_reg);
14111 	tp->misc_host_ctrl |= (misc_ctrl_reg &
14112 			       MISC_HOST_CTRL_CHIPREV);
14113 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14114 			       tp->misc_host_ctrl);
14115 
14116 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
14117 
14118 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14119 	 * we need to disable memory and use config. cycles
14120 	 * only to access all registers. The 5702/03 chips
14121 	 * can mistakenly decode the special cycles from the
14122 	 * ICH chipsets as memory write cycles, causing corruption
14123 	 * of register and memory space. Only certain ICH bridges
14124 	 * will drive special cycles with non-zero data during the
14125 	 * address phase which can fall within the 5703's address
14126 	 * range. This is not an ICH bug as the PCI spec allows
14127 	 * non-zero address during special cycles. However, only
14128 	 * these ICH bridges are known to drive non-zero addresses
14129 	 * during special cycles.
14130 	 *
14131 	 * Since special cycles do not cross PCI bridges, we only
14132 	 * enable this workaround if the 5703 is on the secondary
14133 	 * bus of these ICH bridges.
14134 	 */
14135 	if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14136 	    (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14137 		static struct tg3_dev_id {
14138 			u32	vendor;
14139 			u32	device;
14140 			u32	rev;
14141 		} ich_chipsets[] = {
14142 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14143 			  PCI_ANY_ID },
14144 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14145 			  PCI_ANY_ID },
14146 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14147 			  0xa },
14148 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14149 			  PCI_ANY_ID },
14150 			{ },
14151 		};
14152 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
14153 		struct pci_dev *bridge = NULL;
14154 
14155 		while (pci_id->vendor != 0) {
14156 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
14157 						bridge);
14158 			if (!bridge) {
14159 				pci_id++;
14160 				continue;
14161 			}
14162 			if (pci_id->rev != PCI_ANY_ID) {
14163 				if (bridge->revision > pci_id->rev)
14164 					continue;
14165 			}
14166 			if (bridge->subordinate &&
14167 			    (bridge->subordinate->number ==
14168 			     tp->pdev->bus->number)) {
14169 				tg3_flag_set(tp, ICH_WORKAROUND);
14170 				pci_dev_put(bridge);
14171 				break;
14172 			}
14173 		}
14174 	}
14175 
14176 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14177 		static struct tg3_dev_id {
14178 			u32	vendor;
14179 			u32	device;
14180 		} bridge_chipsets[] = {
14181 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14182 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14183 			{ },
14184 		};
14185 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14186 		struct pci_dev *bridge = NULL;
14187 
14188 		while (pci_id->vendor != 0) {
14189 			bridge = pci_get_device(pci_id->vendor,
14190 						pci_id->device,
14191 						bridge);
14192 			if (!bridge) {
14193 				pci_id++;
14194 				continue;
14195 			}
14196 			if (bridge->subordinate &&
14197 			    (bridge->subordinate->number <=
14198 			     tp->pdev->bus->number) &&
14199 			    (bridge->subordinate->subordinate >=
14200 			     tp->pdev->bus->number)) {
14201 				tg3_flag_set(tp, 5701_DMA_BUG);
14202 				pci_dev_put(bridge);
14203 				break;
14204 			}
14205 		}
14206 	}
14207 
14208 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
14209 	 * DMA addresses > 40-bit. This bridge may have other additional
14210 	 * 57xx devices behind it in some 4-port NIC designs for example.
14211 	 * Any tg3 device found behind the bridge will also need the 40-bit
14212 	 * DMA workaround.
14213 	 */
14214 	if (tg3_flag(tp, 5780_CLASS)) {
14215 		tg3_flag_set(tp, 40BIT_DMA_BUG);
14216 		tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14217 	} else {
14218 		struct pci_dev *bridge = NULL;
14219 
14220 		do {
14221 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14222 						PCI_DEVICE_ID_SERVERWORKS_EPB,
14223 						bridge);
14224 			if (bridge && bridge->subordinate &&
14225 			    (bridge->subordinate->number <=
14226 			     tp->pdev->bus->number) &&
14227 			    (bridge->subordinate->subordinate >=
14228 			     tp->pdev->bus->number)) {
14229 				tg3_flag_set(tp, 40BIT_DMA_BUG);
14230 				pci_dev_put(bridge);
14231 				break;
14232 			}
14233 		} while (bridge);
14234 	}
14235 
14236 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14237 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14238 		tp->pdev_peer = tg3_find_peer(tp);
14239 
14240 	/* Determine TSO capabilities */
14241 	if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14242 		; /* Do nothing. HW bug. */
14243 	else if (tg3_flag(tp, 57765_PLUS))
14244 		tg3_flag_set(tp, HW_TSO_3);
14245 	else if (tg3_flag(tp, 5755_PLUS) ||
14246 		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14247 		tg3_flag_set(tp, HW_TSO_2);
14248 	else if (tg3_flag(tp, 5750_PLUS)) {
14249 		tg3_flag_set(tp, HW_TSO_1);
14250 		tg3_flag_set(tp, TSO_BUG);
14251 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14252 		    tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14253 			tg3_flag_clear(tp, TSO_BUG);
14254 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14255 		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14256 		   tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14257 			tg3_flag_set(tp, TSO_BUG);
14258 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14259 			tp->fw_needed = FIRMWARE_TG3TSO5;
14260 		else
14261 			tp->fw_needed = FIRMWARE_TG3TSO;
14262 	}
14263 
14264 	/* Selectively allow TSO based on operating conditions */
14265 	if (tg3_flag(tp, HW_TSO_1) ||
14266 	    tg3_flag(tp, HW_TSO_2) ||
14267 	    tg3_flag(tp, HW_TSO_3) ||
14268 	    tp->fw_needed) {
14269 		/* For firmware TSO, assume ASF is disabled.
14270 		 * We'll disable TSO later if we discover ASF
14271 		 * is enabled in tg3_get_eeprom_hw_cfg().
14272 		 */
14273 		tg3_flag_set(tp, TSO_CAPABLE);
14274 	} else {
14275 		tg3_flag_clear(tp, TSO_CAPABLE);
14276 		tg3_flag_clear(tp, TSO_BUG);
14277 		tp->fw_needed = NULL;
14278 	}
14279 
14280 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14281 		tp->fw_needed = FIRMWARE_TG3;
14282 
14283 	tp->irq_max = 1;
14284 
14285 	if (tg3_flag(tp, 5750_PLUS)) {
14286 		tg3_flag_set(tp, SUPPORT_MSI);
14287 		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14288 		    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14289 		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14290 		     tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14291 		     tp->pdev_peer == tp->pdev))
14292 			tg3_flag_clear(tp, SUPPORT_MSI);
14293 
14294 		if (tg3_flag(tp, 5755_PLUS) ||
14295 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14296 			tg3_flag_set(tp, 1SHOT_MSI);
14297 		}
14298 
14299 		if (tg3_flag(tp, 57765_PLUS)) {
14300 			tg3_flag_set(tp, SUPPORT_MSIX);
14301 			tp->irq_max = TG3_IRQ_MAX_VECS;
14302 			tg3_rss_init_dflt_indir_tbl(tp);
14303 		}
14304 	}
14305 
14306 	if (tg3_flag(tp, 5755_PLUS))
14307 		tg3_flag_set(tp, SHORT_DMA_BUG);
14308 
14309 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14310 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14311 
14312 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14313 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14314 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14315 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
14316 
14317 	if (tg3_flag(tp, 57765_PLUS) &&
14318 	    tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14319 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14320 
14321 	if (!tg3_flag(tp, 5705_PLUS) ||
14322 	    tg3_flag(tp, 5780_CLASS) ||
14323 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
14324 		tg3_flag_set(tp, JUMBO_CAPABLE);
14325 
14326 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14327 			      &pci_state_reg);
14328 
14329 	if (pci_is_pcie(tp->pdev)) {
14330 		u16 lnkctl;
14331 
14332 		tg3_flag_set(tp, PCI_EXPRESS);
14333 
14334 		pci_read_config_word(tp->pdev,
14335 				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14336 				     &lnkctl);
14337 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14338 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14339 			    ASIC_REV_5906) {
14340 				tg3_flag_clear(tp, HW_TSO_2);
14341 				tg3_flag_clear(tp, TSO_CAPABLE);
14342 			}
14343 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14344 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14345 			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14346 			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14347 				tg3_flag_set(tp, CLKREQ_BUG);
14348 		} else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14349 			tg3_flag_set(tp, L1PLLPD_EN);
14350 		}
14351 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14352 		/* BCM5785 devices are effectively PCIe devices, and should
14353 		 * follow PCIe codepaths, but do not have a PCIe capabilities
14354 		 * section.
14355 		 */
14356 		tg3_flag_set(tp, PCI_EXPRESS);
14357 	} else if (!tg3_flag(tp, 5705_PLUS) ||
14358 		   tg3_flag(tp, 5780_CLASS)) {
14359 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14360 		if (!tp->pcix_cap) {
14361 			dev_err(&tp->pdev->dev,
14362 				"Cannot find PCI-X capability, aborting\n");
14363 			return -EIO;
14364 		}
14365 
14366 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14367 			tg3_flag_set(tp, PCIX_MODE);
14368 	}
14369 
14370 	/* If we have an AMD 762 or VIA K8T800 chipset, write
14371 	 * reordering to the mailbox registers done by the host
14372 	 * controller can cause major troubles.  We read back from
14373 	 * every mailbox register write to force the writes to be
14374 	 * posted to the chip in order.
14375 	 */
14376 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
14377 	    !tg3_flag(tp, PCI_EXPRESS))
14378 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
14379 
14380 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14381 			     &tp->pci_cacheline_sz);
14382 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14383 			     &tp->pci_lat_timer);
14384 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14385 	    tp->pci_lat_timer < 64) {
14386 		tp->pci_lat_timer = 64;
14387 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14388 				      tp->pci_lat_timer);
14389 	}
14390 
14391 	/* Important! -- It is critical that the PCI-X hw workaround
14392 	 * situation is decided before the first MMIO register access.
14393 	 */
14394 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14395 		/* 5700 BX chips need to have their TX producer index
14396 		 * mailboxes written twice to workaround a bug.
14397 		 */
14398 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
14399 
14400 		/* If we are in PCI-X mode, enable register write workaround.
14401 		 *
14402 		 * The workaround is to use indirect register accesses
14403 		 * for all chip writes not to mailbox registers.
14404 		 */
14405 		if (tg3_flag(tp, PCIX_MODE)) {
14406 			u32 pm_reg;
14407 
14408 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14409 
14410 			/* The chip can have it's power management PCI config
14411 			 * space registers clobbered due to this bug.
14412 			 * So explicitly force the chip into D0 here.
14413 			 */
14414 			pci_read_config_dword(tp->pdev,
14415 					      tp->pm_cap + PCI_PM_CTRL,
14416 					      &pm_reg);
14417 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14418 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14419 			pci_write_config_dword(tp->pdev,
14420 					       tp->pm_cap + PCI_PM_CTRL,
14421 					       pm_reg);
14422 
14423 			/* Also, force SERR#/PERR# in PCI command. */
14424 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14425 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14426 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14427 		}
14428 	}
14429 
14430 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14431 		tg3_flag_set(tp, PCI_HIGH_SPEED);
14432 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14433 		tg3_flag_set(tp, PCI_32BIT);
14434 
14435 	/* Chip-specific fixup from Broadcom driver */
14436 	if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14437 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14438 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14439 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14440 	}
14441 
14442 	/* Default fast path register access methods */
14443 	tp->read32 = tg3_read32;
14444 	tp->write32 = tg3_write32;
14445 	tp->read32_mbox = tg3_read32;
14446 	tp->write32_mbox = tg3_write32;
14447 	tp->write32_tx_mbox = tg3_write32;
14448 	tp->write32_rx_mbox = tg3_write32;
14449 
14450 	/* Various workaround register access methods */
14451 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14452 		tp->write32 = tg3_write_indirect_reg32;
14453 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14454 		 (tg3_flag(tp, PCI_EXPRESS) &&
14455 		  tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14456 		/*
14457 		 * Back to back register writes can cause problems on these
14458 		 * chips, the workaround is to read back all reg writes
14459 		 * except those to mailbox regs.
14460 		 *
14461 		 * See tg3_write_indirect_reg32().
14462 		 */
14463 		tp->write32 = tg3_write_flush_reg32;
14464 	}
14465 
14466 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14467 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
14468 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
14469 			tp->write32_rx_mbox = tg3_write_flush_reg32;
14470 	}
14471 
14472 	if (tg3_flag(tp, ICH_WORKAROUND)) {
14473 		tp->read32 = tg3_read_indirect_reg32;
14474 		tp->write32 = tg3_write_indirect_reg32;
14475 		tp->read32_mbox = tg3_read_indirect_mbox;
14476 		tp->write32_mbox = tg3_write_indirect_mbox;
14477 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
14478 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
14479 
14480 		iounmap(tp->regs);
14481 		tp->regs = NULL;
14482 
14483 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14484 		pci_cmd &= ~PCI_COMMAND_MEMORY;
14485 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14486 	}
14487 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14488 		tp->read32_mbox = tg3_read32_mbox_5906;
14489 		tp->write32_mbox = tg3_write32_mbox_5906;
14490 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
14491 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
14492 	}
14493 
14494 	if (tp->write32 == tg3_write_indirect_reg32 ||
14495 	    (tg3_flag(tp, PCIX_MODE) &&
14496 	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14497 	      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14498 		tg3_flag_set(tp, SRAM_USE_CONFIG);
14499 
14500 	/* The memory arbiter has to be enabled in order for SRAM accesses
14501 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
14502 	 * sure it is enabled, but other entities such as system netboot
14503 	 * code might disable it.
14504 	 */
14505 	val = tr32(MEMARB_MODE);
14506 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14507 
14508 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14509 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14510 	    tg3_flag(tp, 5780_CLASS)) {
14511 		if (tg3_flag(tp, PCIX_MODE)) {
14512 			pci_read_config_dword(tp->pdev,
14513 					      tp->pcix_cap + PCI_X_STATUS,
14514 					      &val);
14515 			tp->pci_fn = val & 0x7;
14516 		}
14517 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14518 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14519 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14520 		    NIC_SRAM_CPMUSTAT_SIG) {
14521 			tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14522 			tp->pci_fn = tp->pci_fn ? 1 : 0;
14523 		}
14524 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14525 		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14526 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14527 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14528 		    NIC_SRAM_CPMUSTAT_SIG) {
14529 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14530 				     TG3_CPMU_STATUS_FSHFT_5719;
14531 		}
14532 	}
14533 
14534 	/* Get eeprom hw config before calling tg3_set_power_state().
14535 	 * In particular, the TG3_FLAG_IS_NIC flag must be
14536 	 * determined before calling tg3_set_power_state() so that
14537 	 * we know whether or not to switch out of Vaux power.
14538 	 * When the flag is set, it means that GPIO1 is used for eeprom
14539 	 * write protect and also implies that it is a LOM where GPIOs
14540 	 * are not used to switch power.
14541 	 */
14542 	tg3_get_eeprom_hw_cfg(tp);
14543 
14544 	if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14545 		tg3_flag_clear(tp, TSO_CAPABLE);
14546 		tg3_flag_clear(tp, TSO_BUG);
14547 		tp->fw_needed = NULL;
14548 	}
14549 
14550 	if (tg3_flag(tp, ENABLE_APE)) {
14551 		/* Allow reads and writes to the
14552 		 * APE register and memory space.
14553 		 */
14554 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14555 				 PCISTATE_ALLOW_APE_SHMEM_WR |
14556 				 PCISTATE_ALLOW_APE_PSPACE_WR;
14557 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14558 				       pci_state_reg);
14559 
14560 		tg3_ape_lock_init(tp);
14561 	}
14562 
14563 	/* Set up tp->grc_local_ctrl before calling
14564 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14565 	 * will bring 5700's external PHY out of reset.
14566 	 * It is also used as eeprom write protect on LOMs.
14567 	 */
14568 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14569 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14570 	    tg3_flag(tp, EEPROM_WRITE_PROT))
14571 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14572 				       GRC_LCLCTRL_GPIO_OUTPUT1);
14573 	/* Unused GPIO3 must be driven as output on 5752 because there
14574 	 * are no pull-up resistors on unused GPIO pins.
14575 	 */
14576 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14577 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14578 
14579 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14580 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14581 	    tg3_flag(tp, 57765_CLASS))
14582 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14583 
14584 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14585 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14586 		/* Turn off the debug UART. */
14587 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14588 		if (tg3_flag(tp, IS_NIC))
14589 			/* Keep VMain power. */
14590 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14591 					      GRC_LCLCTRL_GPIO_OUTPUT0;
14592 	}
14593 
14594 	/* Switch out of Vaux if it is a NIC */
14595 	tg3_pwrsrc_switch_to_vmain(tp);
14596 
14597 	/* Derive initial jumbo mode from MTU assigned in
14598 	 * ether_setup() via the alloc_etherdev() call
14599 	 */
14600 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14601 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
14602 
14603 	/* Determine WakeOnLan speed to use. */
14604 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14605 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14606 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14607 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14608 		tg3_flag_clear(tp, WOL_SPEED_100MB);
14609 	} else {
14610 		tg3_flag_set(tp, WOL_SPEED_100MB);
14611 	}
14612 
14613 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14614 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
14615 
14616 	/* A few boards don't want Ethernet@WireSpeed phy feature */
14617 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14618 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14619 	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14620 	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14621 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14622 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14623 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14624 
14625 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14626 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14627 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14628 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14629 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14630 
14631 	if (tg3_flag(tp, 5705_PLUS) &&
14632 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14633 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14634 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14635 	    !tg3_flag(tp, 57765_PLUS)) {
14636 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14637 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14638 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14639 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14640 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14641 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14642 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14643 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14644 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14645 		} else
14646 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14647 	}
14648 
14649 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14650 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14651 		tp->phy_otp = tg3_read_otp_phycfg(tp);
14652 		if (tp->phy_otp == 0)
14653 			tp->phy_otp = TG3_OTP_DEFAULT;
14654 	}
14655 
14656 	if (tg3_flag(tp, CPMU_PRESENT))
14657 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14658 	else
14659 		tp->mi_mode = MAC_MI_MODE_BASE;
14660 
14661 	tp->coalesce_mode = 0;
14662 	if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14663 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14664 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14665 
14666 	/* Set these bits to enable statistics workaround. */
14667 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14668 	    tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14669 	    tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14670 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14671 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14672 	}
14673 
14674 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14675 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14676 		tg3_flag_set(tp, USE_PHYLIB);
14677 
14678 	err = tg3_mdio_init(tp);
14679 	if (err)
14680 		return err;
14681 
14682 	/* Initialize data/descriptor byte/word swapping. */
14683 	val = tr32(GRC_MODE);
14684 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14685 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14686 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
14687 			GRC_MODE_B2HRX_ENABLE |
14688 			GRC_MODE_HTX2B_ENABLE |
14689 			GRC_MODE_HOST_STACKUP);
14690 	else
14691 		val &= GRC_MODE_HOST_STACKUP;
14692 
14693 	tw32(GRC_MODE, val | tp->grc_mode);
14694 
14695 	tg3_switch_clocks(tp);
14696 
14697 	/* Clear this out for sanity. */
14698 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14699 
14700 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14701 			      &pci_state_reg);
14702 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14703 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14704 		u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14705 
14706 		if (chiprevid == CHIPREV_ID_5701_A0 ||
14707 		    chiprevid == CHIPREV_ID_5701_B0 ||
14708 		    chiprevid == CHIPREV_ID_5701_B2 ||
14709 		    chiprevid == CHIPREV_ID_5701_B5) {
14710 			void __iomem *sram_base;
14711 
14712 			/* Write some dummy words into the SRAM status block
14713 			 * area, see if it reads back correctly.  If the return
14714 			 * value is bad, force enable the PCIX workaround.
14715 			 */
14716 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14717 
14718 			writel(0x00000000, sram_base);
14719 			writel(0x00000000, sram_base + 4);
14720 			writel(0xffffffff, sram_base + 4);
14721 			if (readl(sram_base) != 0x00000000)
14722 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14723 		}
14724 	}
14725 
14726 	udelay(50);
14727 	tg3_nvram_init(tp);
14728 
14729 	grc_misc_cfg = tr32(GRC_MISC_CFG);
14730 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14731 
14732 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14733 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14734 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14735 		tg3_flag_set(tp, IS_5788);
14736 
14737 	if (!tg3_flag(tp, IS_5788) &&
14738 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14739 		tg3_flag_set(tp, TAGGED_STATUS);
14740 	if (tg3_flag(tp, TAGGED_STATUS)) {
14741 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14742 				      HOSTCC_MODE_CLRTICK_TXBD);
14743 
14744 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14745 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14746 				       tp->misc_host_ctrl);
14747 	}
14748 
14749 	/* Preserve the APE MAC_MODE bits */
14750 	if (tg3_flag(tp, ENABLE_APE))
14751 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14752 	else
14753 		tp->mac_mode = 0;
14754 
14755 	/* these are limited to 10/100 only */
14756 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14757 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14758 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14759 	     tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14760 	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14761 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14762 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14763 	    (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14764 	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14765 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14766 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14767 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14768 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14769 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14770 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
14771 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14772 
14773 	err = tg3_phy_probe(tp);
14774 	if (err) {
14775 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14776 		/* ... but do not return immediately ... */
14777 		tg3_mdio_fini(tp);
14778 	}
14779 
14780 	tg3_read_vpd(tp);
14781 	tg3_read_fw_ver(tp);
14782 
14783 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14784 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14785 	} else {
14786 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14787 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14788 		else
14789 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14790 	}
14791 
14792 	/* 5700 {AX,BX} chips have a broken status block link
14793 	 * change bit implementation, so we must use the
14794 	 * status register in those cases.
14795 	 */
14796 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14797 		tg3_flag_set(tp, USE_LINKCHG_REG);
14798 	else
14799 		tg3_flag_clear(tp, USE_LINKCHG_REG);
14800 
14801 	/* The led_ctrl is set during tg3_phy_probe, here we might
14802 	 * have to force the link status polling mechanism based
14803 	 * upon subsystem IDs.
14804 	 */
14805 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14806 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14807 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14808 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14809 		tg3_flag_set(tp, USE_LINKCHG_REG);
14810 	}
14811 
14812 	/* For all SERDES we poll the MAC status register. */
14813 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14814 		tg3_flag_set(tp, POLL_SERDES);
14815 	else
14816 		tg3_flag_clear(tp, POLL_SERDES);
14817 
14818 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14819 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14820 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14821 	    tg3_flag(tp, PCIX_MODE)) {
14822 		tp->rx_offset = NET_SKB_PAD;
14823 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14824 		tp->rx_copy_thresh = ~(u16)0;
14825 #endif
14826 	}
14827 
14828 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14829 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14830 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14831 
14832 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14833 
14834 	/* Increment the rx prod index on the rx std ring by at most
14835 	 * 8 for these chips to workaround hw errata.
14836 	 */
14837 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14838 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14839 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14840 		tp->rx_std_max_post = 8;
14841 
14842 	if (tg3_flag(tp, ASPM_WORKAROUND))
14843 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14844 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
14845 
14846 	return err;
14847 }
14848 
14849 #ifdef CONFIG_SPARC
14850 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14851 {
14852 	struct net_device *dev = tp->dev;
14853 	struct pci_dev *pdev = tp->pdev;
14854 	struct device_node *dp = pci_device_to_OF_node(pdev);
14855 	const unsigned char *addr;
14856 	int len;
14857 
14858 	addr = of_get_property(dp, "local-mac-address", &len);
14859 	if (addr && len == 6) {
14860 		memcpy(dev->dev_addr, addr, 6);
14861 		memcpy(dev->perm_addr, dev->dev_addr, 6);
14862 		return 0;
14863 	}
14864 	return -ENODEV;
14865 }
14866 
14867 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14868 {
14869 	struct net_device *dev = tp->dev;
14870 
14871 	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14872 	memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14873 	return 0;
14874 }
14875 #endif
14876 
14877 static int __devinit tg3_get_device_address(struct tg3 *tp)
14878 {
14879 	struct net_device *dev = tp->dev;
14880 	u32 hi, lo, mac_offset;
14881 	int addr_ok = 0;
14882 
14883 #ifdef CONFIG_SPARC
14884 	if (!tg3_get_macaddr_sparc(tp))
14885 		return 0;
14886 #endif
14887 
14888 	mac_offset = 0x7c;
14889 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14890 	    tg3_flag(tp, 5780_CLASS)) {
14891 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14892 			mac_offset = 0xcc;
14893 		if (tg3_nvram_lock(tp))
14894 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14895 		else
14896 			tg3_nvram_unlock(tp);
14897 	} else if (tg3_flag(tp, 5717_PLUS)) {
14898 		if (tp->pci_fn & 1)
14899 			mac_offset = 0xcc;
14900 		if (tp->pci_fn > 1)
14901 			mac_offset += 0x18c;
14902 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14903 		mac_offset = 0x10;
14904 
14905 	/* First try to get it from MAC address mailbox. */
14906 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14907 	if ((hi >> 16) == 0x484b) {
14908 		dev->dev_addr[0] = (hi >>  8) & 0xff;
14909 		dev->dev_addr[1] = (hi >>  0) & 0xff;
14910 
14911 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14912 		dev->dev_addr[2] = (lo >> 24) & 0xff;
14913 		dev->dev_addr[3] = (lo >> 16) & 0xff;
14914 		dev->dev_addr[4] = (lo >>  8) & 0xff;
14915 		dev->dev_addr[5] = (lo >>  0) & 0xff;
14916 
14917 		/* Some old bootcode may report a 0 MAC address in SRAM */
14918 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14919 	}
14920 	if (!addr_ok) {
14921 		/* Next, try NVRAM. */
14922 		if (!tg3_flag(tp, NO_NVRAM) &&
14923 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14924 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14925 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14926 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14927 		}
14928 		/* Finally just fetch it out of the MAC control regs. */
14929 		else {
14930 			hi = tr32(MAC_ADDR_0_HIGH);
14931 			lo = tr32(MAC_ADDR_0_LOW);
14932 
14933 			dev->dev_addr[5] = lo & 0xff;
14934 			dev->dev_addr[4] = (lo >> 8) & 0xff;
14935 			dev->dev_addr[3] = (lo >> 16) & 0xff;
14936 			dev->dev_addr[2] = (lo >> 24) & 0xff;
14937 			dev->dev_addr[1] = hi & 0xff;
14938 			dev->dev_addr[0] = (hi >> 8) & 0xff;
14939 		}
14940 	}
14941 
14942 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14943 #ifdef CONFIG_SPARC
14944 		if (!tg3_get_default_macaddr_sparc(tp))
14945 			return 0;
14946 #endif
14947 		return -EINVAL;
14948 	}
14949 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14950 	return 0;
14951 }
14952 
14953 #define BOUNDARY_SINGLE_CACHELINE	1
14954 #define BOUNDARY_MULTI_CACHELINE	2
14955 
14956 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14957 {
14958 	int cacheline_size;
14959 	u8 byte;
14960 	int goal;
14961 
14962 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14963 	if (byte == 0)
14964 		cacheline_size = 1024;
14965 	else
14966 		cacheline_size = (int) byte * 4;
14967 
14968 	/* On 5703 and later chips, the boundary bits have no
14969 	 * effect.
14970 	 */
14971 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14972 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14973 	    !tg3_flag(tp, PCI_EXPRESS))
14974 		goto out;
14975 
14976 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14977 	goal = BOUNDARY_MULTI_CACHELINE;
14978 #else
14979 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14980 	goal = BOUNDARY_SINGLE_CACHELINE;
14981 #else
14982 	goal = 0;
14983 #endif
14984 #endif
14985 
14986 	if (tg3_flag(tp, 57765_PLUS)) {
14987 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14988 		goto out;
14989 	}
14990 
14991 	if (!goal)
14992 		goto out;
14993 
14994 	/* PCI controllers on most RISC systems tend to disconnect
14995 	 * when a device tries to burst across a cache-line boundary.
14996 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14997 	 *
14998 	 * Unfortunately, for PCI-E there are only limited
14999 	 * write-side controls for this, and thus for reads
15000 	 * we will still get the disconnects.  We'll also waste
15001 	 * these PCI cycles for both read and write for chips
15002 	 * other than 5700 and 5701 which do not implement the
15003 	 * boundary bits.
15004 	 */
15005 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15006 		switch (cacheline_size) {
15007 		case 16:
15008 		case 32:
15009 		case 64:
15010 		case 128:
15011 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15012 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15013 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15014 			} else {
15015 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15016 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15017 			}
15018 			break;
15019 
15020 		case 256:
15021 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15022 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15023 			break;
15024 
15025 		default:
15026 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15027 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15028 			break;
15029 		}
15030 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
15031 		switch (cacheline_size) {
15032 		case 16:
15033 		case 32:
15034 		case 64:
15035 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15036 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15037 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15038 				break;
15039 			}
15040 			/* fallthrough */
15041 		case 128:
15042 		default:
15043 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15044 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15045 			break;
15046 		}
15047 	} else {
15048 		switch (cacheline_size) {
15049 		case 16:
15050 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15051 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
15052 					DMA_RWCTRL_WRITE_BNDRY_16);
15053 				break;
15054 			}
15055 			/* fallthrough */
15056 		case 32:
15057 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15058 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
15059 					DMA_RWCTRL_WRITE_BNDRY_32);
15060 				break;
15061 			}
15062 			/* fallthrough */
15063 		case 64:
15064 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15065 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
15066 					DMA_RWCTRL_WRITE_BNDRY_64);
15067 				break;
15068 			}
15069 			/* fallthrough */
15070 		case 128:
15071 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15072 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
15073 					DMA_RWCTRL_WRITE_BNDRY_128);
15074 				break;
15075 			}
15076 			/* fallthrough */
15077 		case 256:
15078 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
15079 				DMA_RWCTRL_WRITE_BNDRY_256);
15080 			break;
15081 		case 512:
15082 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
15083 				DMA_RWCTRL_WRITE_BNDRY_512);
15084 			break;
15085 		case 1024:
15086 		default:
15087 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15088 				DMA_RWCTRL_WRITE_BNDRY_1024);
15089 			break;
15090 		}
15091 	}
15092 
15093 out:
15094 	return val;
15095 }
15096 
15097 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15098 {
15099 	struct tg3_internal_buffer_desc test_desc;
15100 	u32 sram_dma_descs;
15101 	int i, ret;
15102 
15103 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15104 
15105 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15106 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15107 	tw32(RDMAC_STATUS, 0);
15108 	tw32(WDMAC_STATUS, 0);
15109 
15110 	tw32(BUFMGR_MODE, 0);
15111 	tw32(FTQ_RESET, 0);
15112 
15113 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
15114 	test_desc.addr_lo = buf_dma & 0xffffffff;
15115 	test_desc.nic_mbuf = 0x00002100;
15116 	test_desc.len = size;
15117 
15118 	/*
15119 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15120 	 * the *second* time the tg3 driver was getting loaded after an
15121 	 * initial scan.
15122 	 *
15123 	 * Broadcom tells me:
15124 	 *   ...the DMA engine is connected to the GRC block and a DMA
15125 	 *   reset may affect the GRC block in some unpredictable way...
15126 	 *   The behavior of resets to individual blocks has not been tested.
15127 	 *
15128 	 * Broadcom noted the GRC reset will also reset all sub-components.
15129 	 */
15130 	if (to_device) {
15131 		test_desc.cqid_sqid = (13 << 8) | 2;
15132 
15133 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15134 		udelay(40);
15135 	} else {
15136 		test_desc.cqid_sqid = (16 << 8) | 7;
15137 
15138 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15139 		udelay(40);
15140 	}
15141 	test_desc.flags = 0x00000005;
15142 
15143 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15144 		u32 val;
15145 
15146 		val = *(((u32 *)&test_desc) + i);
15147 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15148 				       sram_dma_descs + (i * sizeof(u32)));
15149 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15150 	}
15151 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15152 
15153 	if (to_device)
15154 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15155 	else
15156 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15157 
15158 	ret = -ENODEV;
15159 	for (i = 0; i < 40; i++) {
15160 		u32 val;
15161 
15162 		if (to_device)
15163 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15164 		else
15165 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15166 		if ((val & 0xffff) == sram_dma_descs) {
15167 			ret = 0;
15168 			break;
15169 		}
15170 
15171 		udelay(100);
15172 	}
15173 
15174 	return ret;
15175 }
15176 
15177 #define TEST_BUFFER_SIZE	0x2000
15178 
15179 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15180 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15181 	{ },
15182 };
15183 
15184 static int __devinit tg3_test_dma(struct tg3 *tp)
15185 {
15186 	dma_addr_t buf_dma;
15187 	u32 *buf, saved_dma_rwctrl;
15188 	int ret = 0;
15189 
15190 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15191 				 &buf_dma, GFP_KERNEL);
15192 	if (!buf) {
15193 		ret = -ENOMEM;
15194 		goto out_nofree;
15195 	}
15196 
15197 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15198 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15199 
15200 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15201 
15202 	if (tg3_flag(tp, 57765_PLUS))
15203 		goto out;
15204 
15205 	if (tg3_flag(tp, PCI_EXPRESS)) {
15206 		/* DMA read watermark not used on PCIE */
15207 		tp->dma_rwctrl |= 0x00180000;
15208 	} else if (!tg3_flag(tp, PCIX_MODE)) {
15209 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15210 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15211 			tp->dma_rwctrl |= 0x003f0000;
15212 		else
15213 			tp->dma_rwctrl |= 0x003f000f;
15214 	} else {
15215 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15216 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15217 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15218 			u32 read_water = 0x7;
15219 
15220 			/* If the 5704 is behind the EPB bridge, we can
15221 			 * do the less restrictive ONE_DMA workaround for
15222 			 * better performance.
15223 			 */
15224 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15225 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15226 				tp->dma_rwctrl |= 0x8000;
15227 			else if (ccval == 0x6 || ccval == 0x7)
15228 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15229 
15230 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15231 				read_water = 4;
15232 			/* Set bit 23 to enable PCIX hw bug fix */
15233 			tp->dma_rwctrl |=
15234 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15235 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15236 				(1 << 23);
15237 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15238 			/* 5780 always in PCIX mode */
15239 			tp->dma_rwctrl |= 0x00144000;
15240 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15241 			/* 5714 always in PCIX mode */
15242 			tp->dma_rwctrl |= 0x00148000;
15243 		} else {
15244 			tp->dma_rwctrl |= 0x001b000f;
15245 		}
15246 	}
15247 
15248 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15249 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15250 		tp->dma_rwctrl &= 0xfffffff0;
15251 
15252 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15253 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15254 		/* Remove this if it causes problems for some boards. */
15255 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15256 
15257 		/* On 5700/5701 chips, we need to set this bit.
15258 		 * Otherwise the chip will issue cacheline transactions
15259 		 * to streamable DMA memory with not all the byte
15260 		 * enables turned on.  This is an error on several
15261 		 * RISC PCI controllers, in particular sparc64.
15262 		 *
15263 		 * On 5703/5704 chips, this bit has been reassigned
15264 		 * a different meaning.  In particular, it is used
15265 		 * on those chips to enable a PCI-X workaround.
15266 		 */
15267 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15268 	}
15269 
15270 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15271 
15272 #if 0
15273 	/* Unneeded, already done by tg3_get_invariants.  */
15274 	tg3_switch_clocks(tp);
15275 #endif
15276 
15277 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15278 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15279 		goto out;
15280 
15281 	/* It is best to perform DMA test with maximum write burst size
15282 	 * to expose the 5700/5701 write DMA bug.
15283 	 */
15284 	saved_dma_rwctrl = tp->dma_rwctrl;
15285 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15286 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15287 
15288 	while (1) {
15289 		u32 *p = buf, i;
15290 
15291 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15292 			p[i] = i;
15293 
15294 		/* Send the buffer to the chip. */
15295 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15296 		if (ret) {
15297 			dev_err(&tp->pdev->dev,
15298 				"%s: Buffer write failed. err = %d\n",
15299 				__func__, ret);
15300 			break;
15301 		}
15302 
15303 #if 0
15304 		/* validate data reached card RAM correctly. */
15305 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15306 			u32 val;
15307 			tg3_read_mem(tp, 0x2100 + (i*4), &val);
15308 			if (le32_to_cpu(val) != p[i]) {
15309 				dev_err(&tp->pdev->dev,
15310 					"%s: Buffer corrupted on device! "
15311 					"(%d != %d)\n", __func__, val, i);
15312 				/* ret = -ENODEV here? */
15313 			}
15314 			p[i] = 0;
15315 		}
15316 #endif
15317 		/* Now read it back. */
15318 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15319 		if (ret) {
15320 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15321 				"err = %d\n", __func__, ret);
15322 			break;
15323 		}
15324 
15325 		/* Verify it. */
15326 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15327 			if (p[i] == i)
15328 				continue;
15329 
15330 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15331 			    DMA_RWCTRL_WRITE_BNDRY_16) {
15332 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15333 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15334 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15335 				break;
15336 			} else {
15337 				dev_err(&tp->pdev->dev,
15338 					"%s: Buffer corrupted on read back! "
15339 					"(%d != %d)\n", __func__, p[i], i);
15340 				ret = -ENODEV;
15341 				goto out;
15342 			}
15343 		}
15344 
15345 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15346 			/* Success. */
15347 			ret = 0;
15348 			break;
15349 		}
15350 	}
15351 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15352 	    DMA_RWCTRL_WRITE_BNDRY_16) {
15353 		/* DMA test passed without adjusting DMA boundary,
15354 		 * now look for chipsets that are known to expose the
15355 		 * DMA bug without failing the test.
15356 		 */
15357 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15358 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15359 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15360 		} else {
15361 			/* Safe to use the calculated DMA boundary. */
15362 			tp->dma_rwctrl = saved_dma_rwctrl;
15363 		}
15364 
15365 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15366 	}
15367 
15368 out:
15369 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15370 out_nofree:
15371 	return ret;
15372 }
15373 
15374 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15375 {
15376 	if (tg3_flag(tp, 57765_PLUS)) {
15377 		tp->bufmgr_config.mbuf_read_dma_low_water =
15378 			DEFAULT_MB_RDMA_LOW_WATER_5705;
15379 		tp->bufmgr_config.mbuf_mac_rx_low_water =
15380 			DEFAULT_MB_MACRX_LOW_WATER_57765;
15381 		tp->bufmgr_config.mbuf_high_water =
15382 			DEFAULT_MB_HIGH_WATER_57765;
15383 
15384 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15385 			DEFAULT_MB_RDMA_LOW_WATER_5705;
15386 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15387 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15388 		tp->bufmgr_config.mbuf_high_water_jumbo =
15389 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15390 	} else if (tg3_flag(tp, 5705_PLUS)) {
15391 		tp->bufmgr_config.mbuf_read_dma_low_water =
15392 			DEFAULT_MB_RDMA_LOW_WATER_5705;
15393 		tp->bufmgr_config.mbuf_mac_rx_low_water =
15394 			DEFAULT_MB_MACRX_LOW_WATER_5705;
15395 		tp->bufmgr_config.mbuf_high_water =
15396 			DEFAULT_MB_HIGH_WATER_5705;
15397 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15398 			tp->bufmgr_config.mbuf_mac_rx_low_water =
15399 				DEFAULT_MB_MACRX_LOW_WATER_5906;
15400 			tp->bufmgr_config.mbuf_high_water =
15401 				DEFAULT_MB_HIGH_WATER_5906;
15402 		}
15403 
15404 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15405 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15406 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15407 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15408 		tp->bufmgr_config.mbuf_high_water_jumbo =
15409 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15410 	} else {
15411 		tp->bufmgr_config.mbuf_read_dma_low_water =
15412 			DEFAULT_MB_RDMA_LOW_WATER;
15413 		tp->bufmgr_config.mbuf_mac_rx_low_water =
15414 			DEFAULT_MB_MACRX_LOW_WATER;
15415 		tp->bufmgr_config.mbuf_high_water =
15416 			DEFAULT_MB_HIGH_WATER;
15417 
15418 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15419 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15420 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15421 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15422 		tp->bufmgr_config.mbuf_high_water_jumbo =
15423 			DEFAULT_MB_HIGH_WATER_JUMBO;
15424 	}
15425 
15426 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15427 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15428 }
15429 
15430 static char * __devinit tg3_phy_string(struct tg3 *tp)
15431 {
15432 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
15433 	case TG3_PHY_ID_BCM5400:	return "5400";
15434 	case TG3_PHY_ID_BCM5401:	return "5401";
15435 	case TG3_PHY_ID_BCM5411:	return "5411";
15436 	case TG3_PHY_ID_BCM5701:	return "5701";
15437 	case TG3_PHY_ID_BCM5703:	return "5703";
15438 	case TG3_PHY_ID_BCM5704:	return "5704";
15439 	case TG3_PHY_ID_BCM5705:	return "5705";
15440 	case TG3_PHY_ID_BCM5750:	return "5750";
15441 	case TG3_PHY_ID_BCM5752:	return "5752";
15442 	case TG3_PHY_ID_BCM5714:	return "5714";
15443 	case TG3_PHY_ID_BCM5780:	return "5780";
15444 	case TG3_PHY_ID_BCM5755:	return "5755";
15445 	case TG3_PHY_ID_BCM5787:	return "5787";
15446 	case TG3_PHY_ID_BCM5784:	return "5784";
15447 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
15448 	case TG3_PHY_ID_BCM5906:	return "5906";
15449 	case TG3_PHY_ID_BCM5761:	return "5761";
15450 	case TG3_PHY_ID_BCM5718C:	return "5718C";
15451 	case TG3_PHY_ID_BCM5718S:	return "5718S";
15452 	case TG3_PHY_ID_BCM57765:	return "57765";
15453 	case TG3_PHY_ID_BCM5719C:	return "5719C";
15454 	case TG3_PHY_ID_BCM5720C:	return "5720C";
15455 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
15456 	case 0:			return "serdes";
15457 	default:		return "unknown";
15458 	}
15459 }
15460 
15461 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15462 {
15463 	if (tg3_flag(tp, PCI_EXPRESS)) {
15464 		strcpy(str, "PCI Express");
15465 		return str;
15466 	} else if (tg3_flag(tp, PCIX_MODE)) {
15467 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15468 
15469 		strcpy(str, "PCIX:");
15470 
15471 		if ((clock_ctrl == 7) ||
15472 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15473 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15474 			strcat(str, "133MHz");
15475 		else if (clock_ctrl == 0)
15476 			strcat(str, "33MHz");
15477 		else if (clock_ctrl == 2)
15478 			strcat(str, "50MHz");
15479 		else if (clock_ctrl == 4)
15480 			strcat(str, "66MHz");
15481 		else if (clock_ctrl == 6)
15482 			strcat(str, "100MHz");
15483 	} else {
15484 		strcpy(str, "PCI:");
15485 		if (tg3_flag(tp, PCI_HIGH_SPEED))
15486 			strcat(str, "66MHz");
15487 		else
15488 			strcat(str, "33MHz");
15489 	}
15490 	if (tg3_flag(tp, PCI_32BIT))
15491 		strcat(str, ":32-bit");
15492 	else
15493 		strcat(str, ":64-bit");
15494 	return str;
15495 }
15496 
15497 static void __devinit tg3_init_coal(struct tg3 *tp)
15498 {
15499 	struct ethtool_coalesce *ec = &tp->coal;
15500 
15501 	memset(ec, 0, sizeof(*ec));
15502 	ec->cmd = ETHTOOL_GCOALESCE;
15503 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15504 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15505 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15506 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15507 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15508 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15509 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15510 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15511 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15512 
15513 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15514 				 HOSTCC_MODE_CLRTICK_TXBD)) {
15515 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15516 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15517 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15518 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15519 	}
15520 
15521 	if (tg3_flag(tp, 5705_PLUS)) {
15522 		ec->rx_coalesce_usecs_irq = 0;
15523 		ec->tx_coalesce_usecs_irq = 0;
15524 		ec->stats_block_coalesce_usecs = 0;
15525 	}
15526 }
15527 
15528 static int __devinit tg3_init_one(struct pci_dev *pdev,
15529 				  const struct pci_device_id *ent)
15530 {
15531 	struct net_device *dev;
15532 	struct tg3 *tp;
15533 	int i, err, pm_cap;
15534 	u32 sndmbx, rcvmbx, intmbx;
15535 	char str[40];
15536 	u64 dma_mask, persist_dma_mask;
15537 	netdev_features_t features = 0;
15538 
15539 	printk_once(KERN_INFO "%s\n", version);
15540 
15541 	err = pci_enable_device(pdev);
15542 	if (err) {
15543 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15544 		return err;
15545 	}
15546 
15547 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
15548 	if (err) {
15549 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15550 		goto err_out_disable_pdev;
15551 	}
15552 
15553 	pci_set_master(pdev);
15554 
15555 	/* Find power-management capability. */
15556 	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15557 	if (pm_cap == 0) {
15558 		dev_err(&pdev->dev,
15559 			"Cannot find Power Management capability, aborting\n");
15560 		err = -EIO;
15561 		goto err_out_free_res;
15562 	}
15563 
15564 	err = pci_set_power_state(pdev, PCI_D0);
15565 	if (err) {
15566 		dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15567 		goto err_out_free_res;
15568 	}
15569 
15570 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15571 	if (!dev) {
15572 		err = -ENOMEM;
15573 		goto err_out_power_down;
15574 	}
15575 
15576 	SET_NETDEV_DEV(dev, &pdev->dev);
15577 
15578 	tp = netdev_priv(dev);
15579 	tp->pdev = pdev;
15580 	tp->dev = dev;
15581 	tp->pm_cap = pm_cap;
15582 	tp->rx_mode = TG3_DEF_RX_MODE;
15583 	tp->tx_mode = TG3_DEF_TX_MODE;
15584 
15585 	if (tg3_debug > 0)
15586 		tp->msg_enable = tg3_debug;
15587 	else
15588 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
15589 
15590 	/* The word/byte swap controls here control register access byte
15591 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15592 	 * setting below.
15593 	 */
15594 	tp->misc_host_ctrl =
15595 		MISC_HOST_CTRL_MASK_PCI_INT |
15596 		MISC_HOST_CTRL_WORD_SWAP |
15597 		MISC_HOST_CTRL_INDIR_ACCESS |
15598 		MISC_HOST_CTRL_PCISTATE_RW;
15599 
15600 	/* The NONFRM (non-frame) byte/word swap controls take effect
15601 	 * on descriptor entries, anything which isn't packet data.
15602 	 *
15603 	 * The StrongARM chips on the board (one for tx, one for rx)
15604 	 * are running in big-endian mode.
15605 	 */
15606 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15607 			GRC_MODE_WSWAP_NONFRM_DATA);
15608 #ifdef __BIG_ENDIAN
15609 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15610 #endif
15611 	spin_lock_init(&tp->lock);
15612 	spin_lock_init(&tp->indirect_lock);
15613 	INIT_WORK(&tp->reset_task, tg3_reset_task);
15614 
15615 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
15616 	if (!tp->regs) {
15617 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15618 		err = -ENOMEM;
15619 		goto err_out_free_dev;
15620 	}
15621 
15622 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15623 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15624 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15625 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15626 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15627 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15628 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15629 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15630 		tg3_flag_set(tp, ENABLE_APE);
15631 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15632 		if (!tp->aperegs) {
15633 			dev_err(&pdev->dev,
15634 				"Cannot map APE registers, aborting\n");
15635 			err = -ENOMEM;
15636 			goto err_out_iounmap;
15637 		}
15638 	}
15639 
15640 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15641 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15642 
15643 	dev->ethtool_ops = &tg3_ethtool_ops;
15644 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
15645 	dev->netdev_ops = &tg3_netdev_ops;
15646 	dev->irq = pdev->irq;
15647 
15648 	err = tg3_get_invariants(tp);
15649 	if (err) {
15650 		dev_err(&pdev->dev,
15651 			"Problem fetching invariants of chip, aborting\n");
15652 		goto err_out_apeunmap;
15653 	}
15654 
15655 	/* The EPB bridge inside 5714, 5715, and 5780 and any
15656 	 * device behind the EPB cannot support DMA addresses > 40-bit.
15657 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15658 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15659 	 * do DMA address check in tg3_start_xmit().
15660 	 */
15661 	if (tg3_flag(tp, IS_5788))
15662 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15663 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15664 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15665 #ifdef CONFIG_HIGHMEM
15666 		dma_mask = DMA_BIT_MASK(64);
15667 #endif
15668 	} else
15669 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15670 
15671 	/* Configure DMA attributes. */
15672 	if (dma_mask > DMA_BIT_MASK(32)) {
15673 		err = pci_set_dma_mask(pdev, dma_mask);
15674 		if (!err) {
15675 			features |= NETIF_F_HIGHDMA;
15676 			err = pci_set_consistent_dma_mask(pdev,
15677 							  persist_dma_mask);
15678 			if (err < 0) {
15679 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
15680 					"DMA for consistent allocations\n");
15681 				goto err_out_apeunmap;
15682 			}
15683 		}
15684 	}
15685 	if (err || dma_mask == DMA_BIT_MASK(32)) {
15686 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15687 		if (err) {
15688 			dev_err(&pdev->dev,
15689 				"No usable DMA configuration, aborting\n");
15690 			goto err_out_apeunmap;
15691 		}
15692 	}
15693 
15694 	tg3_init_bufmgr_config(tp);
15695 
15696 	features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15697 
15698 	/* 5700 B0 chips do not support checksumming correctly due
15699 	 * to hardware bugs.
15700 	 */
15701 	if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15702 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15703 
15704 		if (tg3_flag(tp, 5755_PLUS))
15705 			features |= NETIF_F_IPV6_CSUM;
15706 	}
15707 
15708 	/* TSO is on by default on chips that support hardware TSO.
15709 	 * Firmware TSO on older chips gives lower performance, so it
15710 	 * is off by default, but can be enabled using ethtool.
15711 	 */
15712 	if ((tg3_flag(tp, HW_TSO_1) ||
15713 	     tg3_flag(tp, HW_TSO_2) ||
15714 	     tg3_flag(tp, HW_TSO_3)) &&
15715 	    (features & NETIF_F_IP_CSUM))
15716 		features |= NETIF_F_TSO;
15717 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15718 		if (features & NETIF_F_IPV6_CSUM)
15719 			features |= NETIF_F_TSO6;
15720 		if (tg3_flag(tp, HW_TSO_3) ||
15721 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15722 		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15723 		     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15724 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15725 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15726 			features |= NETIF_F_TSO_ECN;
15727 	}
15728 
15729 	dev->features |= features;
15730 	dev->vlan_features |= features;
15731 
15732 	/*
15733 	 * Add loopback capability only for a subset of devices that support
15734 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15735 	 * loopback for the remaining devices.
15736 	 */
15737 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15738 	    !tg3_flag(tp, CPMU_PRESENT))
15739 		/* Add the loopback capability */
15740 		features |= NETIF_F_LOOPBACK;
15741 
15742 	dev->hw_features |= features;
15743 
15744 	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15745 	    !tg3_flag(tp, TSO_CAPABLE) &&
15746 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15747 		tg3_flag_set(tp, MAX_RXPEND_64);
15748 		tp->rx_pending = 63;
15749 	}
15750 
15751 	err = tg3_get_device_address(tp);
15752 	if (err) {
15753 		dev_err(&pdev->dev,
15754 			"Could not obtain valid ethernet address, aborting\n");
15755 		goto err_out_apeunmap;
15756 	}
15757 
15758 	/*
15759 	 * Reset chip in case UNDI or EFI driver did not shutdown
15760 	 * DMA self test will enable WDMAC and we'll see (spurious)
15761 	 * pending DMA on the PCI bus at that point.
15762 	 */
15763 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15764 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15765 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15766 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15767 	}
15768 
15769 	err = tg3_test_dma(tp);
15770 	if (err) {
15771 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15772 		goto err_out_apeunmap;
15773 	}
15774 
15775 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15776 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15777 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15778 	for (i = 0; i < tp->irq_max; i++) {
15779 		struct tg3_napi *tnapi = &tp->napi[i];
15780 
15781 		tnapi->tp = tp;
15782 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15783 
15784 		tnapi->int_mbox = intmbx;
15785 		if (i <= 4)
15786 			intmbx += 0x8;
15787 		else
15788 			intmbx += 0x4;
15789 
15790 		tnapi->consmbox = rcvmbx;
15791 		tnapi->prodmbox = sndmbx;
15792 
15793 		if (i)
15794 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15795 		else
15796 			tnapi->coal_now = HOSTCC_MODE_NOW;
15797 
15798 		if (!tg3_flag(tp, SUPPORT_MSIX))
15799 			break;
15800 
15801 		/*
15802 		 * If we support MSIX, we'll be using RSS.  If we're using
15803 		 * RSS, the first vector only handles link interrupts and the
15804 		 * remaining vectors handle rx and tx interrupts.  Reuse the
15805 		 * mailbox values for the next iteration.  The values we setup
15806 		 * above are still useful for the single vectored mode.
15807 		 */
15808 		if (!i)
15809 			continue;
15810 
15811 		rcvmbx += 0x8;
15812 
15813 		if (sndmbx & 0x4)
15814 			sndmbx -= 0x4;
15815 		else
15816 			sndmbx += 0xc;
15817 	}
15818 
15819 	tg3_init_coal(tp);
15820 
15821 	pci_set_drvdata(pdev, dev);
15822 
15823 	if (tg3_flag(tp, 5717_PLUS)) {
15824 		/* Resume a low-power mode */
15825 		tg3_frob_aux_power(tp, false);
15826 	}
15827 
15828 	tg3_timer_init(tp);
15829 
15830 	err = register_netdev(dev);
15831 	if (err) {
15832 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15833 		goto err_out_apeunmap;
15834 	}
15835 
15836 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15837 		    tp->board_part_number,
15838 		    tp->pci_chip_rev_id,
15839 		    tg3_bus_string(tp, str),
15840 		    dev->dev_addr);
15841 
15842 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15843 		struct phy_device *phydev;
15844 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15845 		netdev_info(dev,
15846 			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15847 			    phydev->drv->name, dev_name(&phydev->dev));
15848 	} else {
15849 		char *ethtype;
15850 
15851 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15852 			ethtype = "10/100Base-TX";
15853 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15854 			ethtype = "1000Base-SX";
15855 		else
15856 			ethtype = "10/100/1000Base-T";
15857 
15858 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15859 			    "(WireSpeed[%d], EEE[%d])\n",
15860 			    tg3_phy_string(tp), ethtype,
15861 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15862 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15863 	}
15864 
15865 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15866 		    (dev->features & NETIF_F_RXCSUM) != 0,
15867 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
15868 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15869 		    tg3_flag(tp, ENABLE_ASF) != 0,
15870 		    tg3_flag(tp, TSO_CAPABLE) != 0);
15871 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15872 		    tp->dma_rwctrl,
15873 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15874 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15875 
15876 	pci_save_state(pdev);
15877 
15878 	return 0;
15879 
15880 err_out_apeunmap:
15881 	if (tp->aperegs) {
15882 		iounmap(tp->aperegs);
15883 		tp->aperegs = NULL;
15884 	}
15885 
15886 err_out_iounmap:
15887 	if (tp->regs) {
15888 		iounmap(tp->regs);
15889 		tp->regs = NULL;
15890 	}
15891 
15892 err_out_free_dev:
15893 	free_netdev(dev);
15894 
15895 err_out_power_down:
15896 	pci_set_power_state(pdev, PCI_D3hot);
15897 
15898 err_out_free_res:
15899 	pci_release_regions(pdev);
15900 
15901 err_out_disable_pdev:
15902 	pci_disable_device(pdev);
15903 	pci_set_drvdata(pdev, NULL);
15904 	return err;
15905 }
15906 
15907 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15908 {
15909 	struct net_device *dev = pci_get_drvdata(pdev);
15910 
15911 	if (dev) {
15912 		struct tg3 *tp = netdev_priv(dev);
15913 
15914 		if (tp->fw)
15915 			release_firmware(tp->fw);
15916 
15917 		tg3_reset_task_cancel(tp);
15918 
15919 		if (tg3_flag(tp, USE_PHYLIB)) {
15920 			tg3_phy_fini(tp);
15921 			tg3_mdio_fini(tp);
15922 		}
15923 
15924 		unregister_netdev(dev);
15925 		if (tp->aperegs) {
15926 			iounmap(tp->aperegs);
15927 			tp->aperegs = NULL;
15928 		}
15929 		if (tp->regs) {
15930 			iounmap(tp->regs);
15931 			tp->regs = NULL;
15932 		}
15933 		free_netdev(dev);
15934 		pci_release_regions(pdev);
15935 		pci_disable_device(pdev);
15936 		pci_set_drvdata(pdev, NULL);
15937 	}
15938 }
15939 
15940 #ifdef CONFIG_PM_SLEEP
15941 static int tg3_suspend(struct device *device)
15942 {
15943 	struct pci_dev *pdev = to_pci_dev(device);
15944 	struct net_device *dev = pci_get_drvdata(pdev);
15945 	struct tg3 *tp = netdev_priv(dev);
15946 	int err;
15947 
15948 	if (!netif_running(dev))
15949 		return 0;
15950 
15951 	tg3_reset_task_cancel(tp);
15952 	tg3_phy_stop(tp);
15953 	tg3_netif_stop(tp);
15954 
15955 	tg3_timer_stop(tp);
15956 
15957 	tg3_full_lock(tp, 1);
15958 	tg3_disable_ints(tp);
15959 	tg3_full_unlock(tp);
15960 
15961 	netif_device_detach(dev);
15962 
15963 	tg3_full_lock(tp, 0);
15964 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15965 	tg3_flag_clear(tp, INIT_COMPLETE);
15966 	tg3_full_unlock(tp);
15967 
15968 	err = tg3_power_down_prepare(tp);
15969 	if (err) {
15970 		int err2;
15971 
15972 		tg3_full_lock(tp, 0);
15973 
15974 		tg3_flag_set(tp, INIT_COMPLETE);
15975 		err2 = tg3_restart_hw(tp, 1);
15976 		if (err2)
15977 			goto out;
15978 
15979 		tg3_timer_start(tp);
15980 
15981 		netif_device_attach(dev);
15982 		tg3_netif_start(tp);
15983 
15984 out:
15985 		tg3_full_unlock(tp);
15986 
15987 		if (!err2)
15988 			tg3_phy_start(tp);
15989 	}
15990 
15991 	return err;
15992 }
15993 
15994 static int tg3_resume(struct device *device)
15995 {
15996 	struct pci_dev *pdev = to_pci_dev(device);
15997 	struct net_device *dev = pci_get_drvdata(pdev);
15998 	struct tg3 *tp = netdev_priv(dev);
15999 	int err;
16000 
16001 	if (!netif_running(dev))
16002 		return 0;
16003 
16004 	netif_device_attach(dev);
16005 
16006 	tg3_full_lock(tp, 0);
16007 
16008 	tg3_flag_set(tp, INIT_COMPLETE);
16009 	err = tg3_restart_hw(tp, 1);
16010 	if (err)
16011 		goto out;
16012 
16013 	tg3_timer_start(tp);
16014 
16015 	tg3_netif_start(tp);
16016 
16017 out:
16018 	tg3_full_unlock(tp);
16019 
16020 	if (!err)
16021 		tg3_phy_start(tp);
16022 
16023 	return err;
16024 }
16025 
16026 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16027 #define TG3_PM_OPS (&tg3_pm_ops)
16028 
16029 #else
16030 
16031 #define TG3_PM_OPS NULL
16032 
16033 #endif /* CONFIG_PM_SLEEP */
16034 
16035 /**
16036  * tg3_io_error_detected - called when PCI error is detected
16037  * @pdev: Pointer to PCI device
16038  * @state: The current pci connection state
16039  *
16040  * This function is called after a PCI bus error affecting
16041  * this device has been detected.
16042  */
16043 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16044 					      pci_channel_state_t state)
16045 {
16046 	struct net_device *netdev = pci_get_drvdata(pdev);
16047 	struct tg3 *tp = netdev_priv(netdev);
16048 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16049 
16050 	netdev_info(netdev, "PCI I/O error detected\n");
16051 
16052 	rtnl_lock();
16053 
16054 	if (!netif_running(netdev))
16055 		goto done;
16056 
16057 	tg3_phy_stop(tp);
16058 
16059 	tg3_netif_stop(tp);
16060 
16061 	tg3_timer_stop(tp);
16062 
16063 	/* Want to make sure that the reset task doesn't run */
16064 	tg3_reset_task_cancel(tp);
16065 
16066 	netif_device_detach(netdev);
16067 
16068 	/* Clean up software state, even if MMIO is blocked */
16069 	tg3_full_lock(tp, 0);
16070 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16071 	tg3_full_unlock(tp);
16072 
16073 done:
16074 	if (state == pci_channel_io_perm_failure)
16075 		err = PCI_ERS_RESULT_DISCONNECT;
16076 	else
16077 		pci_disable_device(pdev);
16078 
16079 	rtnl_unlock();
16080 
16081 	return err;
16082 }
16083 
16084 /**
16085  * tg3_io_slot_reset - called after the pci bus has been reset.
16086  * @pdev: Pointer to PCI device
16087  *
16088  * Restart the card from scratch, as if from a cold-boot.
16089  * At this point, the card has exprienced a hard reset,
16090  * followed by fixups by BIOS, and has its config space
16091  * set up identically to what it was at cold boot.
16092  */
16093 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16094 {
16095 	struct net_device *netdev = pci_get_drvdata(pdev);
16096 	struct tg3 *tp = netdev_priv(netdev);
16097 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16098 	int err;
16099 
16100 	rtnl_lock();
16101 
16102 	if (pci_enable_device(pdev)) {
16103 		netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16104 		goto done;
16105 	}
16106 
16107 	pci_set_master(pdev);
16108 	pci_restore_state(pdev);
16109 	pci_save_state(pdev);
16110 
16111 	if (!netif_running(netdev)) {
16112 		rc = PCI_ERS_RESULT_RECOVERED;
16113 		goto done;
16114 	}
16115 
16116 	err = tg3_power_up(tp);
16117 	if (err)
16118 		goto done;
16119 
16120 	rc = PCI_ERS_RESULT_RECOVERED;
16121 
16122 done:
16123 	rtnl_unlock();
16124 
16125 	return rc;
16126 }
16127 
16128 /**
16129  * tg3_io_resume - called when traffic can start flowing again.
16130  * @pdev: Pointer to PCI device
16131  *
16132  * This callback is called when the error recovery driver tells
16133  * us that its OK to resume normal operation.
16134  */
16135 static void tg3_io_resume(struct pci_dev *pdev)
16136 {
16137 	struct net_device *netdev = pci_get_drvdata(pdev);
16138 	struct tg3 *tp = netdev_priv(netdev);
16139 	int err;
16140 
16141 	rtnl_lock();
16142 
16143 	if (!netif_running(netdev))
16144 		goto done;
16145 
16146 	tg3_full_lock(tp, 0);
16147 	tg3_flag_set(tp, INIT_COMPLETE);
16148 	err = tg3_restart_hw(tp, 1);
16149 	tg3_full_unlock(tp);
16150 	if (err) {
16151 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
16152 		goto done;
16153 	}
16154 
16155 	netif_device_attach(netdev);
16156 
16157 	tg3_timer_start(tp);
16158 
16159 	tg3_netif_start(tp);
16160 
16161 	tg3_phy_start(tp);
16162 
16163 done:
16164 	rtnl_unlock();
16165 }
16166 
16167 static struct pci_error_handlers tg3_err_handler = {
16168 	.error_detected	= tg3_io_error_detected,
16169 	.slot_reset	= tg3_io_slot_reset,
16170 	.resume		= tg3_io_resume
16171 };
16172 
16173 static struct pci_driver tg3_driver = {
16174 	.name		= DRV_MODULE_NAME,
16175 	.id_table	= tg3_pci_tbl,
16176 	.probe		= tg3_init_one,
16177 	.remove		= __devexit_p(tg3_remove_one),
16178 	.err_handler	= &tg3_err_handler,
16179 	.driver.pm	= TG3_PM_OPS,
16180 };
16181 
16182 static int __init tg3_init(void)
16183 {
16184 	return pci_register_driver(&tg3_driver);
16185 }
16186 
16187 static void __exit tg3_cleanup(void)
16188 {
16189 	pci_unregister_driver(&tg3_driver);
16190 }
16191 
16192 module_init(tg3_init);
16193 module_exit(tg3_cleanup);
16194