xref: /linux/drivers/net/ethernet/broadcom/tg3.c (revision 9e8ba5f3ec35cba4fd8a8bebda548c4db2651e40)
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *	Derived from proprietary unpublished source code,
11  *	Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *	Permission is hereby granted for the distribution of this firmware
14  *	data in hexadecimal or equivalent format, provided this copyright
15  *	notice is accompanying it.
16  */
17 
18 
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 
48 #include <net/checksum.h>
49 #include <net/ip.h>
50 
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55 
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60 
61 #define BAR_0	0
62 #define BAR_2	2
63 
64 #include "tg3.h"
65 
66 /* Functions & macros to verify TG3_FLAGS types */
67 
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70 	return test_bit(flag, bits);
71 }
72 
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 	set_bit(flag, bits);
76 }
77 
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 	clear_bit(flag, bits);
81 }
82 
83 #define tg3_flag(tp, flag)				\
84 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)				\
86 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)			\
88 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89 
90 #define DRV_MODULE_NAME		"tg3"
91 #define TG3_MAJ_NUM			3
92 #define TG3_MIN_NUM			122
93 #define DRV_MODULE_VERSION	\
94 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE	"December 7, 2011"
96 
97 #define RESET_KIND_SHUTDOWN	0
98 #define RESET_KIND_INIT		1
99 #define RESET_KIND_SUSPEND	2
100 
101 #define TG3_DEF_RX_MODE		0
102 #define TG3_DEF_TX_MODE		0
103 #define TG3_DEF_MSG_ENABLE	  \
104 	(NETIF_MSG_DRV		| \
105 	 NETIF_MSG_PROBE	| \
106 	 NETIF_MSG_LINK		| \
107 	 NETIF_MSG_TIMER	| \
108 	 NETIF_MSG_IFDOWN	| \
109 	 NETIF_MSG_IFUP		| \
110 	 NETIF_MSG_RX_ERR	| \
111 	 NETIF_MSG_TX_ERR)
112 
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
114 
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118 
119 #define TG3_TX_TIMEOUT			(5 * HZ)
120 
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU			60
123 #define TG3_MAX_MTU(tp)	\
124 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125 
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING		200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
138 
139 /* Do not place this n-ring entries value into the tp struct itself,
140  * we really want to expose these constants to GCC so that modulo et
141  * al.  operations are done with shifts and masks instead of with
142  * hw multiply/modulo instructions.  Another solution would be to
143  * replace things like '% foo' with '& (foo - 1)'.
144  */
145 
146 #define TG3_TX_RING_SIZE		512
147 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
148 
149 #define TG3_RX_STD_RING_BYTES(tp) \
150 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
156 				 TG3_TX_RING_SIZE)
157 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
158 
159 #define TG3_DMA_BYTE_ENAB		64
160 
161 #define TG3_RX_STD_DMA_SZ		1536
162 #define TG3_RX_JMB_DMA_SZ		9046
163 
164 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
165 
166 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168 
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171 
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174 
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176  * that are at least dword aligned when used in PCIX mode.  The driver
177  * works around this bug by double copying the packet.  This workaround
178  * is built into the normal double copy length check for efficiency.
179  *
180  * However, the double copy is only necessary on those architectures
181  * where unaligned memory accesses are inefficient.  For those architectures
182  * where unaligned memory accesses incur little penalty, we can reintegrate
183  * the 5701 in the normal rx path.  Doing so saves a device structure
184  * dereference by hardcoding the double copy threshold in place.
185  */
186 #define TG3_RX_COPY_THRESHOLD		256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
189 #else
190 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
191 #endif
192 
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
195 #else
196 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
197 #endif
198 
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K		2048
202 #define TG3_TX_BD_DMA_MAX_4K		4096
203 
204 #define TG3_RAW_IP_ALIGN 2
205 
206 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
207 
208 #define FIRMWARE_TG3		"tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
211 
212 static char version[] __devinitdata =
213 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214 
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222 
223 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226 
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309 	{}
310 };
311 
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313 
314 static const struct {
315 	const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317 	{ "rx_octets" },
318 	{ "rx_fragments" },
319 	{ "rx_ucast_packets" },
320 	{ "rx_mcast_packets" },
321 	{ "rx_bcast_packets" },
322 	{ "rx_fcs_errors" },
323 	{ "rx_align_errors" },
324 	{ "rx_xon_pause_rcvd" },
325 	{ "rx_xoff_pause_rcvd" },
326 	{ "rx_mac_ctrl_rcvd" },
327 	{ "rx_xoff_entered" },
328 	{ "rx_frame_too_long_errors" },
329 	{ "rx_jabbers" },
330 	{ "rx_undersize_packets" },
331 	{ "rx_in_length_errors" },
332 	{ "rx_out_length_errors" },
333 	{ "rx_64_or_less_octet_packets" },
334 	{ "rx_65_to_127_octet_packets" },
335 	{ "rx_128_to_255_octet_packets" },
336 	{ "rx_256_to_511_octet_packets" },
337 	{ "rx_512_to_1023_octet_packets" },
338 	{ "rx_1024_to_1522_octet_packets" },
339 	{ "rx_1523_to_2047_octet_packets" },
340 	{ "rx_2048_to_4095_octet_packets" },
341 	{ "rx_4096_to_8191_octet_packets" },
342 	{ "rx_8192_to_9022_octet_packets" },
343 
344 	{ "tx_octets" },
345 	{ "tx_collisions" },
346 
347 	{ "tx_xon_sent" },
348 	{ "tx_xoff_sent" },
349 	{ "tx_flow_control" },
350 	{ "tx_mac_errors" },
351 	{ "tx_single_collisions" },
352 	{ "tx_mult_collisions" },
353 	{ "tx_deferred" },
354 	{ "tx_excessive_collisions" },
355 	{ "tx_late_collisions" },
356 	{ "tx_collide_2times" },
357 	{ "tx_collide_3times" },
358 	{ "tx_collide_4times" },
359 	{ "tx_collide_5times" },
360 	{ "tx_collide_6times" },
361 	{ "tx_collide_7times" },
362 	{ "tx_collide_8times" },
363 	{ "tx_collide_9times" },
364 	{ "tx_collide_10times" },
365 	{ "tx_collide_11times" },
366 	{ "tx_collide_12times" },
367 	{ "tx_collide_13times" },
368 	{ "tx_collide_14times" },
369 	{ "tx_collide_15times" },
370 	{ "tx_ucast_packets" },
371 	{ "tx_mcast_packets" },
372 	{ "tx_bcast_packets" },
373 	{ "tx_carrier_sense_errors" },
374 	{ "tx_discards" },
375 	{ "tx_errors" },
376 
377 	{ "dma_writeq_full" },
378 	{ "dma_write_prioq_full" },
379 	{ "rxbds_empty" },
380 	{ "rx_discards" },
381 	{ "rx_errors" },
382 	{ "rx_threshold_hit" },
383 
384 	{ "dma_readq_full" },
385 	{ "dma_read_prioq_full" },
386 	{ "tx_comp_queue_full" },
387 
388 	{ "ring_set_send_prod_index" },
389 	{ "ring_status_update" },
390 	{ "nic_irqs" },
391 	{ "nic_avoided_irqs" },
392 	{ "nic_tx_threshold_hit" },
393 
394 	{ "mbuf_lwm_thresh_hit" },
395 };
396 
397 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
398 
399 
400 static const struct {
401 	const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403 	{ "nvram test        (online) " },
404 	{ "link test         (online) " },
405 	{ "register test     (offline)" },
406 	{ "memory test       (offline)" },
407 	{ "mac loopback test (offline)" },
408 	{ "phy loopback test (offline)" },
409 	{ "ext loopback test (offline)" },
410 	{ "interrupt test    (offline)" },
411 };
412 
413 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
414 
415 
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418 	writel(val, tp->regs + off);
419 }
420 
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423 	return readl(tp->regs + off);
424 }
425 
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428 	writel(val, tp->aperegs + off);
429 }
430 
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433 	return readl(tp->aperegs + off);
434 }
435 
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438 	unsigned long flags;
439 
440 	spin_lock_irqsave(&tp->indirect_lock, flags);
441 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445 
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448 	writel(val, tp->regs + off);
449 	readl(tp->regs + off);
450 }
451 
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454 	unsigned long flags;
455 	u32 val;
456 
457 	spin_lock_irqsave(&tp->indirect_lock, flags);
458 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
461 	return val;
462 }
463 
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466 	unsigned long flags;
467 
468 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470 				       TG3_64BIT_REG_LOW, val);
471 		return;
472 	}
473 	if (off == TG3_RX_STD_PROD_IDX_REG) {
474 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475 				       TG3_64BIT_REG_LOW, val);
476 		return;
477 	}
478 
479 	spin_lock_irqsave(&tp->indirect_lock, flags);
480 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
483 
484 	/* In indirect mode when disabling interrupts, we also need
485 	 * to clear the interrupt bit in the GRC local ctrl register.
486 	 */
487 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488 	    (val == 0x1)) {
489 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491 	}
492 }
493 
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496 	unsigned long flags;
497 	u32 val;
498 
499 	spin_lock_irqsave(&tp->indirect_lock, flags);
500 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
503 	return val;
504 }
505 
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514 		/* Non-posted methods */
515 		tp->write32(tp, off, val);
516 	else {
517 		/* Posted method */
518 		tg3_write32(tp, off, val);
519 		if (usec_wait)
520 			udelay(usec_wait);
521 		tp->read32(tp, off);
522 	}
523 	/* Wait again after the read for the posted method to guarantee that
524 	 * the wait time is met.
525 	 */
526 	if (usec_wait)
527 		udelay(usec_wait);
528 }
529 
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532 	tp->write32_mbox(tp, off, val);
533 	if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534 		tp->read32_mbox(tp, off);
535 }
536 
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539 	void __iomem *mbox = tp->regs + off;
540 	writel(val, mbox);
541 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
542 		writel(val, mbox);
543 	if (tg3_flag(tp, MBOX_WRITE_REORDER))
544 		readl(mbox);
545 }
546 
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549 	return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551 
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554 	writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556 
557 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
562 
563 #define tw32(reg, val)			tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)			tp->read32(tp, reg)
567 
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570 	unsigned long flags;
571 
572 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574 		return;
575 
576 	spin_lock_irqsave(&tp->indirect_lock, flags);
577 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580 
581 		/* Always leave this as zero. */
582 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583 	} else {
584 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
586 
587 		/* Always leave this as zero. */
588 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589 	}
590 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592 
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595 	unsigned long flags;
596 
597 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599 		*val = 0;
600 		return;
601 	}
602 
603 	spin_lock_irqsave(&tp->indirect_lock, flags);
604 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607 
608 		/* Always leave this as zero. */
609 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610 	} else {
611 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612 		*val = tr32(TG3PCI_MEM_WIN_DATA);
613 
614 		/* Always leave this as zero. */
615 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616 	}
617 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619 
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622 	int i;
623 	u32 regbase, bit;
624 
625 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626 		regbase = TG3_APE_LOCK_GRANT;
627 	else
628 		regbase = TG3_APE_PER_LOCK_GRANT;
629 
630 	/* Make sure the driver hasn't any stale locks. */
631 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632 		switch (i) {
633 		case TG3_APE_LOCK_PHY0:
634 		case TG3_APE_LOCK_PHY1:
635 		case TG3_APE_LOCK_PHY2:
636 		case TG3_APE_LOCK_PHY3:
637 			bit = APE_LOCK_GRANT_DRIVER;
638 			break;
639 		default:
640 			if (!tp->pci_fn)
641 				bit = APE_LOCK_GRANT_DRIVER;
642 			else
643 				bit = 1 << tp->pci_fn;
644 		}
645 		tg3_ape_write32(tp, regbase + 4 * i, bit);
646 	}
647 
648 }
649 
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652 	int i, off;
653 	int ret = 0;
654 	u32 status, req, gnt, bit;
655 
656 	if (!tg3_flag(tp, ENABLE_APE))
657 		return 0;
658 
659 	switch (locknum) {
660 	case TG3_APE_LOCK_GPIO:
661 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662 			return 0;
663 	case TG3_APE_LOCK_GRC:
664 	case TG3_APE_LOCK_MEM:
665 		if (!tp->pci_fn)
666 			bit = APE_LOCK_REQ_DRIVER;
667 		else
668 			bit = 1 << tp->pci_fn;
669 		break;
670 	default:
671 		return -EINVAL;
672 	}
673 
674 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675 		req = TG3_APE_LOCK_REQ;
676 		gnt = TG3_APE_LOCK_GRANT;
677 	} else {
678 		req = TG3_APE_PER_LOCK_REQ;
679 		gnt = TG3_APE_PER_LOCK_GRANT;
680 	}
681 
682 	off = 4 * locknum;
683 
684 	tg3_ape_write32(tp, req + off, bit);
685 
686 	/* Wait for up to 1 millisecond to acquire lock. */
687 	for (i = 0; i < 100; i++) {
688 		status = tg3_ape_read32(tp, gnt + off);
689 		if (status == bit)
690 			break;
691 		udelay(10);
692 	}
693 
694 	if (status != bit) {
695 		/* Revoke the lock request. */
696 		tg3_ape_write32(tp, gnt + off, bit);
697 		ret = -EBUSY;
698 	}
699 
700 	return ret;
701 }
702 
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705 	u32 gnt, bit;
706 
707 	if (!tg3_flag(tp, ENABLE_APE))
708 		return;
709 
710 	switch (locknum) {
711 	case TG3_APE_LOCK_GPIO:
712 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713 			return;
714 	case TG3_APE_LOCK_GRC:
715 	case TG3_APE_LOCK_MEM:
716 		if (!tp->pci_fn)
717 			bit = APE_LOCK_GRANT_DRIVER;
718 		else
719 			bit = 1 << tp->pci_fn;
720 		break;
721 	default:
722 		return;
723 	}
724 
725 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726 		gnt = TG3_APE_LOCK_GRANT;
727 	else
728 		gnt = TG3_APE_PER_LOCK_GRANT;
729 
730 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732 
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735 	int i;
736 	u32 apedata;
737 
738 	/* NCSI does not support APE events */
739 	if (tg3_flag(tp, APE_HAS_NCSI))
740 		return;
741 
742 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743 	if (apedata != APE_SEG_SIG_MAGIC)
744 		return;
745 
746 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747 	if (!(apedata & APE_FW_STATUS_READY))
748 		return;
749 
750 	/* Wait for up to 1 millisecond for APE to service previous event. */
751 	for (i = 0; i < 10; i++) {
752 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753 			return;
754 
755 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756 
757 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758 			tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759 					event | APE_EVENT_STATUS_EVENT_PENDING);
760 
761 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762 
763 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764 			break;
765 
766 		udelay(100);
767 	}
768 
769 	if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772 
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775 	u32 event;
776 	u32 apedata;
777 
778 	if (!tg3_flag(tp, ENABLE_APE))
779 		return;
780 
781 	switch (kind) {
782 	case RESET_KIND_INIT:
783 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784 				APE_HOST_SEG_SIG_MAGIC);
785 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786 				APE_HOST_SEG_LEN_MAGIC);
787 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792 				APE_HOST_BEHAV_NO_PHYLOCK);
793 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794 				    TG3_APE_HOST_DRVR_STATE_START);
795 
796 		event = APE_EVENT_STATUS_STATE_START;
797 		break;
798 	case RESET_KIND_SHUTDOWN:
799 		/* With the interface we are currently using,
800 		 * APE does not track driver state.  Wiping
801 		 * out the HOST SEGMENT SIGNATURE forces
802 		 * the APE to assume OS absent status.
803 		 */
804 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805 
806 		if (device_may_wakeup(&tp->pdev->dev) &&
807 		    tg3_flag(tp, WOL_ENABLE)) {
808 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809 					    TG3_APE_HOST_WOL_SPEED_AUTO);
810 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811 		} else
812 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813 
814 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815 
816 		event = APE_EVENT_STATUS_STATE_UNLOAD;
817 		break;
818 	case RESET_KIND_SUSPEND:
819 		event = APE_EVENT_STATUS_STATE_SUSPEND;
820 		break;
821 	default:
822 		return;
823 	}
824 
825 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826 
827 	tg3_ape_send_event(tp, event);
828 }
829 
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832 	int i;
833 
834 	tw32(TG3PCI_MISC_HOST_CTRL,
835 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836 	for (i = 0; i < tp->irq_max; i++)
837 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839 
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842 	int i;
843 
844 	tp->irq_sync = 0;
845 	wmb();
846 
847 	tw32(TG3PCI_MISC_HOST_CTRL,
848 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849 
850 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851 	for (i = 0; i < tp->irq_cnt; i++) {
852 		struct tg3_napi *tnapi = &tp->napi[i];
853 
854 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855 		if (tg3_flag(tp, 1SHOT_MSI))
856 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857 
858 		tp->coal_now |= tnapi->coal_now;
859 	}
860 
861 	/* Force an initial interrupt */
862 	if (!tg3_flag(tp, TAGGED_STATUS) &&
863 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865 	else
866 		tw32(HOSTCC_MODE, tp->coal_now);
867 
868 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870 
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873 	struct tg3 *tp = tnapi->tp;
874 	struct tg3_hw_status *sblk = tnapi->hw_status;
875 	unsigned int work_exists = 0;
876 
877 	/* check for phy events */
878 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879 		if (sblk->status & SD_STATUS_LINK_CHG)
880 			work_exists = 1;
881 	}
882 	/* check for RX/TX work to do */
883 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885 		work_exists = 1;
886 
887 	return work_exists;
888 }
889 
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897 	struct tg3 *tp = tnapi->tp;
898 
899 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900 	mmiowb();
901 
902 	/* When doing tagged status, this work check is unnecessary.
903 	 * The last_tag we write above tells the chip which piece of
904 	 * work we've completed.
905 	 */
906 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907 		tw32(HOSTCC_MODE, tp->coalesce_mode |
908 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910 
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913 	u32 clock_ctrl;
914 	u32 orig_clock_ctrl;
915 
916 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917 		return;
918 
919 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920 
921 	orig_clock_ctrl = clock_ctrl;
922 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923 		       CLOCK_CTRL_CLKRUN_OENABLE |
924 		       0x1f);
925 	tp->pci_clock_ctrl = clock_ctrl;
926 
927 	if (tg3_flag(tp, 5705_PLUS)) {
928 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
930 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931 		}
932 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
934 			    clock_ctrl |
935 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936 			    40);
937 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
938 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
939 			    40);
940 	}
941 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943 
944 #define PHY_BUSY_LOOPS	5000
945 
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948 	u32 frame_val;
949 	unsigned int loops;
950 	int ret;
951 
952 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953 		tw32_f(MAC_MI_MODE,
954 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955 		udelay(80);
956 	}
957 
958 	*val = 0x0;
959 
960 	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961 		      MI_COM_PHY_ADDR_MASK);
962 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963 		      MI_COM_REG_ADDR_MASK);
964 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965 
966 	tw32_f(MAC_MI_COM, frame_val);
967 
968 	loops = PHY_BUSY_LOOPS;
969 	while (loops != 0) {
970 		udelay(10);
971 		frame_val = tr32(MAC_MI_COM);
972 
973 		if ((frame_val & MI_COM_BUSY) == 0) {
974 			udelay(5);
975 			frame_val = tr32(MAC_MI_COM);
976 			break;
977 		}
978 		loops -= 1;
979 	}
980 
981 	ret = -EBUSY;
982 	if (loops != 0) {
983 		*val = frame_val & MI_COM_DATA_MASK;
984 		ret = 0;
985 	}
986 
987 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988 		tw32_f(MAC_MI_MODE, tp->mi_mode);
989 		udelay(80);
990 	}
991 
992 	return ret;
993 }
994 
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997 	u32 frame_val;
998 	unsigned int loops;
999 	int ret;
1000 
1001 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003 		return 0;
1004 
1005 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006 		tw32_f(MAC_MI_MODE,
1007 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008 		udelay(80);
1009 	}
1010 
1011 	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012 		      MI_COM_PHY_ADDR_MASK);
1013 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014 		      MI_COM_REG_ADDR_MASK);
1015 	frame_val |= (val & MI_COM_DATA_MASK);
1016 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017 
1018 	tw32_f(MAC_MI_COM, frame_val);
1019 
1020 	loops = PHY_BUSY_LOOPS;
1021 	while (loops != 0) {
1022 		udelay(10);
1023 		frame_val = tr32(MAC_MI_COM);
1024 		if ((frame_val & MI_COM_BUSY) == 0) {
1025 			udelay(5);
1026 			frame_val = tr32(MAC_MI_COM);
1027 			break;
1028 		}
1029 		loops -= 1;
1030 	}
1031 
1032 	ret = -EBUSY;
1033 	if (loops != 0)
1034 		ret = 0;
1035 
1036 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1038 		udelay(80);
1039 	}
1040 
1041 	return ret;
1042 }
1043 
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046 	int err;
1047 
1048 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049 	if (err)
1050 		goto done;
1051 
1052 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053 	if (err)
1054 		goto done;
1055 
1056 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058 	if (err)
1059 		goto done;
1060 
1061 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062 
1063 done:
1064 	return err;
1065 }
1066 
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069 	int err;
1070 
1071 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072 	if (err)
1073 		goto done;
1074 
1075 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076 	if (err)
1077 		goto done;
1078 
1079 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081 	if (err)
1082 		goto done;
1083 
1084 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085 
1086 done:
1087 	return err;
1088 }
1089 
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092 	int err;
1093 
1094 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095 	if (!err)
1096 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097 
1098 	return err;
1099 }
1100 
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103 	int err;
1104 
1105 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106 	if (!err)
1107 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108 
1109 	return err;
1110 }
1111 
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114 	int err;
1115 
1116 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1119 	if (!err)
1120 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121 
1122 	return err;
1123 }
1124 
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128 		set |= MII_TG3_AUXCTL_MISC_WREN;
1129 
1130 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132 
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134 	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135 			     MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136 			     MII_TG3_AUXCTL_ACTL_TX_6DB)
1137 
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139 	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140 			     MII_TG3_AUXCTL_ACTL_TX_6DB);
1141 
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144 	u32 phy_control;
1145 	int limit, err;
1146 
1147 	/* OK, reset it, and poll the BMCR_RESET bit until it
1148 	 * clears or we time out.
1149 	 */
1150 	phy_control = BMCR_RESET;
1151 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1152 	if (err != 0)
1153 		return -EBUSY;
1154 
1155 	limit = 5000;
1156 	while (limit--) {
1157 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158 		if (err != 0)
1159 			return -EBUSY;
1160 
1161 		if ((phy_control & BMCR_RESET) == 0) {
1162 			udelay(40);
1163 			break;
1164 		}
1165 		udelay(10);
1166 	}
1167 	if (limit < 0)
1168 		return -EBUSY;
1169 
1170 	return 0;
1171 }
1172 
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175 	struct tg3 *tp = bp->priv;
1176 	u32 val;
1177 
1178 	spin_lock_bh(&tp->lock);
1179 
1180 	if (tg3_readphy(tp, reg, &val))
1181 		val = -EIO;
1182 
1183 	spin_unlock_bh(&tp->lock);
1184 
1185 	return val;
1186 }
1187 
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190 	struct tg3 *tp = bp->priv;
1191 	u32 ret = 0;
1192 
1193 	spin_lock_bh(&tp->lock);
1194 
1195 	if (tg3_writephy(tp, reg, val))
1196 		ret = -EIO;
1197 
1198 	spin_unlock_bh(&tp->lock);
1199 
1200 	return ret;
1201 }
1202 
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205 	return 0;
1206 }
1207 
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210 	u32 val;
1211 	struct phy_device *phydev;
1212 
1213 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215 	case PHY_ID_BCM50610:
1216 	case PHY_ID_BCM50610M:
1217 		val = MAC_PHYCFG2_50610_LED_MODES;
1218 		break;
1219 	case PHY_ID_BCMAC131:
1220 		val = MAC_PHYCFG2_AC131_LED_MODES;
1221 		break;
1222 	case PHY_ID_RTL8211C:
1223 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224 		break;
1225 	case PHY_ID_RTL8201E:
1226 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227 		break;
1228 	default:
1229 		return;
1230 	}
1231 
1232 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233 		tw32(MAC_PHYCFG2, val);
1234 
1235 		val = tr32(MAC_PHYCFG1);
1236 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1237 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239 		tw32(MAC_PHYCFG1, val);
1240 
1241 		return;
1242 	}
1243 
1244 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1247 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1248 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1249 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1250 		       MAC_PHYCFG2_INBAND_ENABLE;
1251 
1252 	tw32(MAC_PHYCFG2, val);
1253 
1254 	val = tr32(MAC_PHYCFG1);
1255 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262 	}
1263 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265 	tw32(MAC_PHYCFG1, val);
1266 
1267 	val = tr32(MAC_EXT_RGMII_MODE);
1268 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269 		 MAC_RGMII_MODE_RX_QUALITY |
1270 		 MAC_RGMII_MODE_RX_ACTIVITY |
1271 		 MAC_RGMII_MODE_RX_ENG_DET |
1272 		 MAC_RGMII_MODE_TX_ENABLE |
1273 		 MAC_RGMII_MODE_TX_LOWPWR |
1274 		 MAC_RGMII_MODE_TX_RESET);
1275 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277 			val |= MAC_RGMII_MODE_RX_INT_B |
1278 			       MAC_RGMII_MODE_RX_QUALITY |
1279 			       MAC_RGMII_MODE_RX_ACTIVITY |
1280 			       MAC_RGMII_MODE_RX_ENG_DET;
1281 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282 			val |= MAC_RGMII_MODE_TX_ENABLE |
1283 			       MAC_RGMII_MODE_TX_LOWPWR |
1284 			       MAC_RGMII_MODE_TX_RESET;
1285 	}
1286 	tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288 
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1293 	udelay(80);
1294 
1295 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1296 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297 		tg3_mdio_config_5785(tp);
1298 }
1299 
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302 	int i;
1303 	u32 reg;
1304 	struct phy_device *phydev;
1305 
1306 	if (tg3_flag(tp, 5717_PLUS)) {
1307 		u32 is_serdes;
1308 
1309 		tp->phy_addr = tp->pci_fn + 1;
1310 
1311 		if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313 		else
1314 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1316 		if (is_serdes)
1317 			tp->phy_addr += 7;
1318 	} else
1319 		tp->phy_addr = TG3_PHY_MII_ADDR;
1320 
1321 	tg3_mdio_start(tp);
1322 
1323 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324 		return 0;
1325 
1326 	tp->mdio_bus = mdiobus_alloc();
1327 	if (tp->mdio_bus == NULL)
1328 		return -ENOMEM;
1329 
1330 	tp->mdio_bus->name     = "tg3 mdio bus";
1331 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333 	tp->mdio_bus->priv     = tp;
1334 	tp->mdio_bus->parent   = &tp->pdev->dev;
1335 	tp->mdio_bus->read     = &tg3_mdio_read;
1336 	tp->mdio_bus->write    = &tg3_mdio_write;
1337 	tp->mdio_bus->reset    = &tg3_mdio_reset;
1338 	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339 	tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340 
1341 	for (i = 0; i < PHY_MAX_ADDR; i++)
1342 		tp->mdio_bus->irq[i] = PHY_POLL;
1343 
1344 	/* The bus registration will look for all the PHYs on the mdio bus.
1345 	 * Unfortunately, it does not ensure the PHY is powered up before
1346 	 * accessing the PHY ID registers.  A chip reset is the
1347 	 * quickest way to bring the device back to an operational state..
1348 	 */
1349 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350 		tg3_bmcr_reset(tp);
1351 
1352 	i = mdiobus_register(tp->mdio_bus);
1353 	if (i) {
1354 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355 		mdiobus_free(tp->mdio_bus);
1356 		return i;
1357 	}
1358 
1359 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360 
1361 	if (!phydev || !phydev->drv) {
1362 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363 		mdiobus_unregister(tp->mdio_bus);
1364 		mdiobus_free(tp->mdio_bus);
1365 		return -ENODEV;
1366 	}
1367 
1368 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369 	case PHY_ID_BCM57780:
1370 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1371 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372 		break;
1373 	case PHY_ID_BCM50610:
1374 	case PHY_ID_BCM50610M:
1375 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376 				     PHY_BRCM_RX_REFCLK_UNUSED |
1377 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385 		/* fallthru */
1386 	case PHY_ID_RTL8211C:
1387 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388 		break;
1389 	case PHY_ID_RTL8201E:
1390 	case PHY_ID_BCMAC131:
1391 		phydev->interface = PHY_INTERFACE_MODE_MII;
1392 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394 		break;
1395 	}
1396 
1397 	tg3_flag_set(tp, MDIOBUS_INITED);
1398 
1399 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400 		tg3_mdio_config_5785(tp);
1401 
1402 	return 0;
1403 }
1404 
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1408 		tg3_flag_clear(tp, MDIOBUS_INITED);
1409 		mdiobus_unregister(tp->mdio_bus);
1410 		mdiobus_free(tp->mdio_bus);
1411 	}
1412 }
1413 
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417 	u32 val;
1418 
1419 	val = tr32(GRC_RX_CPU_EVENT);
1420 	val |= GRC_RX_CPU_DRIVER_EVENT;
1421 	tw32_f(GRC_RX_CPU_EVENT, val);
1422 
1423 	tp->last_event_jiffies = jiffies;
1424 }
1425 
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427 
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431 	int i;
1432 	unsigned int delay_cnt;
1433 	long time_remain;
1434 
1435 	/* If enough time has passed, no wait is necessary. */
1436 	time_remain = (long)(tp->last_event_jiffies + 1 +
1437 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438 		      (long)jiffies;
1439 	if (time_remain < 0)
1440 		return;
1441 
1442 	/* Check if we can shorten the wait time. */
1443 	delay_cnt = jiffies_to_usecs(time_remain);
1444 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446 	delay_cnt = (delay_cnt >> 3) + 1;
1447 
1448 	for (i = 0; i < delay_cnt; i++) {
1449 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450 			break;
1451 		udelay(8);
1452 	}
1453 }
1454 
1455 /* tp->lock is held. */
1456 static void tg3_ump_link_report(struct tg3 *tp)
1457 {
1458 	u32 reg;
1459 	u32 val;
1460 
1461 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1462 		return;
1463 
1464 	tg3_wait_for_event_ack(tp);
1465 
1466 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1467 
1468 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1469 
1470 	val = 0;
1471 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1472 		val = reg << 16;
1473 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1474 		val |= (reg & 0xffff);
1475 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1476 
1477 	val = 0;
1478 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1479 		val = reg << 16;
1480 	if (!tg3_readphy(tp, MII_LPA, &reg))
1481 		val |= (reg & 0xffff);
1482 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1483 
1484 	val = 0;
1485 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1486 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1487 			val = reg << 16;
1488 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1489 			val |= (reg & 0xffff);
1490 	}
1491 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1492 
1493 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1494 		val = reg << 16;
1495 	else
1496 		val = 0;
1497 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1498 
1499 	tg3_generate_fw_event(tp);
1500 }
1501 
1502 /* tp->lock is held. */
1503 static void tg3_stop_fw(struct tg3 *tp)
1504 {
1505 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1506 		/* Wait for RX cpu to ACK the previous event. */
1507 		tg3_wait_for_event_ack(tp);
1508 
1509 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1510 
1511 		tg3_generate_fw_event(tp);
1512 
1513 		/* Wait for RX cpu to ACK this event. */
1514 		tg3_wait_for_event_ack(tp);
1515 	}
1516 }
1517 
1518 /* tp->lock is held. */
1519 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1520 {
1521 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1522 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1523 
1524 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1525 		switch (kind) {
1526 		case RESET_KIND_INIT:
1527 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1528 				      DRV_STATE_START);
1529 			break;
1530 
1531 		case RESET_KIND_SHUTDOWN:
1532 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1533 				      DRV_STATE_UNLOAD);
1534 			break;
1535 
1536 		case RESET_KIND_SUSPEND:
1537 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538 				      DRV_STATE_SUSPEND);
1539 			break;
1540 
1541 		default:
1542 			break;
1543 		}
1544 	}
1545 
1546 	if (kind == RESET_KIND_INIT ||
1547 	    kind == RESET_KIND_SUSPEND)
1548 		tg3_ape_driver_state_change(tp, kind);
1549 }
1550 
1551 /* tp->lock is held. */
1552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1553 {
1554 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1555 		switch (kind) {
1556 		case RESET_KIND_INIT:
1557 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1558 				      DRV_STATE_START_DONE);
1559 			break;
1560 
1561 		case RESET_KIND_SHUTDOWN:
1562 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1563 				      DRV_STATE_UNLOAD_DONE);
1564 			break;
1565 
1566 		default:
1567 			break;
1568 		}
1569 	}
1570 
1571 	if (kind == RESET_KIND_SHUTDOWN)
1572 		tg3_ape_driver_state_change(tp, kind);
1573 }
1574 
1575 /* tp->lock is held. */
1576 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1577 {
1578 	if (tg3_flag(tp, ENABLE_ASF)) {
1579 		switch (kind) {
1580 		case RESET_KIND_INIT:
1581 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1582 				      DRV_STATE_START);
1583 			break;
1584 
1585 		case RESET_KIND_SHUTDOWN:
1586 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1587 				      DRV_STATE_UNLOAD);
1588 			break;
1589 
1590 		case RESET_KIND_SUSPEND:
1591 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592 				      DRV_STATE_SUSPEND);
1593 			break;
1594 
1595 		default:
1596 			break;
1597 		}
1598 	}
1599 }
1600 
1601 static int tg3_poll_fw(struct tg3 *tp)
1602 {
1603 	int i;
1604 	u32 val;
1605 
1606 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1607 		/* Wait up to 20ms for init done. */
1608 		for (i = 0; i < 200; i++) {
1609 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1610 				return 0;
1611 			udelay(100);
1612 		}
1613 		return -ENODEV;
1614 	}
1615 
1616 	/* Wait for firmware initialization to complete. */
1617 	for (i = 0; i < 100000; i++) {
1618 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1619 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1620 			break;
1621 		udelay(10);
1622 	}
1623 
1624 	/* Chip might not be fitted with firmware.  Some Sun onboard
1625 	 * parts are configured like that.  So don't signal the timeout
1626 	 * of the above loop as an error, but do report the lack of
1627 	 * running firmware once.
1628 	 */
1629 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1630 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1631 
1632 		netdev_info(tp->dev, "No firmware running\n");
1633 	}
1634 
1635 	if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1636 		/* The 57765 A0 needs a little more
1637 		 * time to do some important work.
1638 		 */
1639 		mdelay(10);
1640 	}
1641 
1642 	return 0;
1643 }
1644 
1645 static void tg3_link_report(struct tg3 *tp)
1646 {
1647 	if (!netif_carrier_ok(tp->dev)) {
1648 		netif_info(tp, link, tp->dev, "Link is down\n");
1649 		tg3_ump_link_report(tp);
1650 	} else if (netif_msg_link(tp)) {
1651 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1652 			    (tp->link_config.active_speed == SPEED_1000 ?
1653 			     1000 :
1654 			     (tp->link_config.active_speed == SPEED_100 ?
1655 			      100 : 10)),
1656 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1657 			     "full" : "half"));
1658 
1659 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1660 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1661 			    "on" : "off",
1662 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1663 			    "on" : "off");
1664 
1665 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1666 			netdev_info(tp->dev, "EEE is %s\n",
1667 				    tp->setlpicnt ? "enabled" : "disabled");
1668 
1669 		tg3_ump_link_report(tp);
1670 	}
1671 }
1672 
1673 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1674 {
1675 	u16 miireg;
1676 
1677 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1678 		miireg = ADVERTISE_1000XPAUSE;
1679 	else if (flow_ctrl & FLOW_CTRL_TX)
1680 		miireg = ADVERTISE_1000XPSE_ASYM;
1681 	else if (flow_ctrl & FLOW_CTRL_RX)
1682 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1683 	else
1684 		miireg = 0;
1685 
1686 	return miireg;
1687 }
1688 
1689 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1690 {
1691 	u8 cap = 0;
1692 
1693 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1694 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1695 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1696 		if (lcladv & ADVERTISE_1000XPAUSE)
1697 			cap = FLOW_CTRL_RX;
1698 		if (rmtadv & ADVERTISE_1000XPAUSE)
1699 			cap = FLOW_CTRL_TX;
1700 	}
1701 
1702 	return cap;
1703 }
1704 
1705 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1706 {
1707 	u8 autoneg;
1708 	u8 flowctrl = 0;
1709 	u32 old_rx_mode = tp->rx_mode;
1710 	u32 old_tx_mode = tp->tx_mode;
1711 
1712 	if (tg3_flag(tp, USE_PHYLIB))
1713 		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1714 	else
1715 		autoneg = tp->link_config.autoneg;
1716 
1717 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1718 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1719 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1720 		else
1721 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1722 	} else
1723 		flowctrl = tp->link_config.flowctrl;
1724 
1725 	tp->link_config.active_flowctrl = flowctrl;
1726 
1727 	if (flowctrl & FLOW_CTRL_RX)
1728 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1729 	else
1730 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1731 
1732 	if (old_rx_mode != tp->rx_mode)
1733 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1734 
1735 	if (flowctrl & FLOW_CTRL_TX)
1736 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1737 	else
1738 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1739 
1740 	if (old_tx_mode != tp->tx_mode)
1741 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1742 }
1743 
1744 static void tg3_adjust_link(struct net_device *dev)
1745 {
1746 	u8 oldflowctrl, linkmesg = 0;
1747 	u32 mac_mode, lcl_adv, rmt_adv;
1748 	struct tg3 *tp = netdev_priv(dev);
1749 	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1750 
1751 	spin_lock_bh(&tp->lock);
1752 
1753 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1754 				    MAC_MODE_HALF_DUPLEX);
1755 
1756 	oldflowctrl = tp->link_config.active_flowctrl;
1757 
1758 	if (phydev->link) {
1759 		lcl_adv = 0;
1760 		rmt_adv = 0;
1761 
1762 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1763 			mac_mode |= MAC_MODE_PORT_MODE_MII;
1764 		else if (phydev->speed == SPEED_1000 ||
1765 			 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1766 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
1767 		else
1768 			mac_mode |= MAC_MODE_PORT_MODE_MII;
1769 
1770 		if (phydev->duplex == DUPLEX_HALF)
1771 			mac_mode |= MAC_MODE_HALF_DUPLEX;
1772 		else {
1773 			lcl_adv = mii_advertise_flowctrl(
1774 				  tp->link_config.flowctrl);
1775 
1776 			if (phydev->pause)
1777 				rmt_adv = LPA_PAUSE_CAP;
1778 			if (phydev->asym_pause)
1779 				rmt_adv |= LPA_PAUSE_ASYM;
1780 		}
1781 
1782 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1783 	} else
1784 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
1785 
1786 	if (mac_mode != tp->mac_mode) {
1787 		tp->mac_mode = mac_mode;
1788 		tw32_f(MAC_MODE, tp->mac_mode);
1789 		udelay(40);
1790 	}
1791 
1792 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1793 		if (phydev->speed == SPEED_10)
1794 			tw32(MAC_MI_STAT,
1795 			     MAC_MI_STAT_10MBPS_MODE |
1796 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1797 		else
1798 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1799 	}
1800 
1801 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1802 		tw32(MAC_TX_LENGTHS,
1803 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1804 		      (6 << TX_LENGTHS_IPG_SHIFT) |
1805 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1806 	else
1807 		tw32(MAC_TX_LENGTHS,
1808 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1809 		      (6 << TX_LENGTHS_IPG_SHIFT) |
1810 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1811 
1812 	if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1813 	    (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1814 	    phydev->speed != tp->link_config.active_speed ||
1815 	    phydev->duplex != tp->link_config.active_duplex ||
1816 	    oldflowctrl != tp->link_config.active_flowctrl)
1817 		linkmesg = 1;
1818 
1819 	tp->link_config.active_speed = phydev->speed;
1820 	tp->link_config.active_duplex = phydev->duplex;
1821 
1822 	spin_unlock_bh(&tp->lock);
1823 
1824 	if (linkmesg)
1825 		tg3_link_report(tp);
1826 }
1827 
1828 static int tg3_phy_init(struct tg3 *tp)
1829 {
1830 	struct phy_device *phydev;
1831 
1832 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1833 		return 0;
1834 
1835 	/* Bring the PHY back to a known state. */
1836 	tg3_bmcr_reset(tp);
1837 
1838 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1839 
1840 	/* Attach the MAC to the PHY. */
1841 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1842 			     phydev->dev_flags, phydev->interface);
1843 	if (IS_ERR(phydev)) {
1844 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1845 		return PTR_ERR(phydev);
1846 	}
1847 
1848 	/* Mask with MAC supported features. */
1849 	switch (phydev->interface) {
1850 	case PHY_INTERFACE_MODE_GMII:
1851 	case PHY_INTERFACE_MODE_RGMII:
1852 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1853 			phydev->supported &= (PHY_GBIT_FEATURES |
1854 					      SUPPORTED_Pause |
1855 					      SUPPORTED_Asym_Pause);
1856 			break;
1857 		}
1858 		/* fallthru */
1859 	case PHY_INTERFACE_MODE_MII:
1860 		phydev->supported &= (PHY_BASIC_FEATURES |
1861 				      SUPPORTED_Pause |
1862 				      SUPPORTED_Asym_Pause);
1863 		break;
1864 	default:
1865 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1866 		return -EINVAL;
1867 	}
1868 
1869 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1870 
1871 	phydev->advertising = phydev->supported;
1872 
1873 	return 0;
1874 }
1875 
1876 static void tg3_phy_start(struct tg3 *tp)
1877 {
1878 	struct phy_device *phydev;
1879 
1880 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1881 		return;
1882 
1883 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1884 
1885 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1886 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1887 		phydev->speed = tp->link_config.orig_speed;
1888 		phydev->duplex = tp->link_config.orig_duplex;
1889 		phydev->autoneg = tp->link_config.orig_autoneg;
1890 		phydev->advertising = tp->link_config.orig_advertising;
1891 	}
1892 
1893 	phy_start(phydev);
1894 
1895 	phy_start_aneg(phydev);
1896 }
1897 
1898 static void tg3_phy_stop(struct tg3 *tp)
1899 {
1900 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1901 		return;
1902 
1903 	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1904 }
1905 
1906 static void tg3_phy_fini(struct tg3 *tp)
1907 {
1908 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1909 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1910 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1911 	}
1912 }
1913 
1914 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1915 {
1916 	int err;
1917 	u32 val;
1918 
1919 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1920 		return 0;
1921 
1922 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1923 		/* Cannot do read-modify-write on 5401 */
1924 		err = tg3_phy_auxctl_write(tp,
1925 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1926 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1927 					   0x4c20);
1928 		goto done;
1929 	}
1930 
1931 	err = tg3_phy_auxctl_read(tp,
1932 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1933 	if (err)
1934 		return err;
1935 
1936 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1937 	err = tg3_phy_auxctl_write(tp,
1938 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1939 
1940 done:
1941 	return err;
1942 }
1943 
1944 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1945 {
1946 	u32 phytest;
1947 
1948 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1949 		u32 phy;
1950 
1951 		tg3_writephy(tp, MII_TG3_FET_TEST,
1952 			     phytest | MII_TG3_FET_SHADOW_EN);
1953 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1954 			if (enable)
1955 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1956 			else
1957 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1958 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1959 		}
1960 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1961 	}
1962 }
1963 
1964 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1965 {
1966 	u32 reg;
1967 
1968 	if (!tg3_flag(tp, 5705_PLUS) ||
1969 	    (tg3_flag(tp, 5717_PLUS) &&
1970 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1971 		return;
1972 
1973 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1974 		tg3_phy_fet_toggle_apd(tp, enable);
1975 		return;
1976 	}
1977 
1978 	reg = MII_TG3_MISC_SHDW_WREN |
1979 	      MII_TG3_MISC_SHDW_SCR5_SEL |
1980 	      MII_TG3_MISC_SHDW_SCR5_LPED |
1981 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1982 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
1983 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
1984 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1985 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1986 
1987 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1988 
1989 
1990 	reg = MII_TG3_MISC_SHDW_WREN |
1991 	      MII_TG3_MISC_SHDW_APD_SEL |
1992 	      MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1993 	if (enable)
1994 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1995 
1996 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1997 }
1998 
1999 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2000 {
2001 	u32 phy;
2002 
2003 	if (!tg3_flag(tp, 5705_PLUS) ||
2004 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2005 		return;
2006 
2007 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2008 		u32 ephy;
2009 
2010 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2011 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2012 
2013 			tg3_writephy(tp, MII_TG3_FET_TEST,
2014 				     ephy | MII_TG3_FET_SHADOW_EN);
2015 			if (!tg3_readphy(tp, reg, &phy)) {
2016 				if (enable)
2017 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2018 				else
2019 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2020 				tg3_writephy(tp, reg, phy);
2021 			}
2022 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2023 		}
2024 	} else {
2025 		int ret;
2026 
2027 		ret = tg3_phy_auxctl_read(tp,
2028 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2029 		if (!ret) {
2030 			if (enable)
2031 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2032 			else
2033 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2034 			tg3_phy_auxctl_write(tp,
2035 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2036 		}
2037 	}
2038 }
2039 
2040 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2041 {
2042 	int ret;
2043 	u32 val;
2044 
2045 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2046 		return;
2047 
2048 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2049 	if (!ret)
2050 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2051 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2052 }
2053 
2054 static void tg3_phy_apply_otp(struct tg3 *tp)
2055 {
2056 	u32 otp, phy;
2057 
2058 	if (!tp->phy_otp)
2059 		return;
2060 
2061 	otp = tp->phy_otp;
2062 
2063 	if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2064 		return;
2065 
2066 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2067 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2068 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2069 
2070 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2071 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2072 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2073 
2074 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2075 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2076 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2077 
2078 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2079 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2080 
2081 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2082 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2083 
2084 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2085 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2086 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2087 
2088 	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2089 }
2090 
2091 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2092 {
2093 	u32 val;
2094 
2095 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2096 		return;
2097 
2098 	tp->setlpicnt = 0;
2099 
2100 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2101 	    current_link_up == 1 &&
2102 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2103 	    (tp->link_config.active_speed == SPEED_100 ||
2104 	     tp->link_config.active_speed == SPEED_1000)) {
2105 		u32 eeectl;
2106 
2107 		if (tp->link_config.active_speed == SPEED_1000)
2108 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2109 		else
2110 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2111 
2112 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2113 
2114 		tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2115 				  TG3_CL45_D7_EEERES_STAT, &val);
2116 
2117 		if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2118 		    val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2119 			tp->setlpicnt = 2;
2120 	}
2121 
2122 	if (!tp->setlpicnt) {
2123 		if (current_link_up == 1 &&
2124 		   !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2125 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2126 			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2127 		}
2128 
2129 		val = tr32(TG3_CPMU_EEE_MODE);
2130 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2131 	}
2132 }
2133 
2134 static void tg3_phy_eee_enable(struct tg3 *tp)
2135 {
2136 	u32 val;
2137 
2138 	if (tp->link_config.active_speed == SPEED_1000 &&
2139 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2140 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2141 	     tg3_flag(tp, 57765_CLASS)) &&
2142 	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2143 		val = MII_TG3_DSP_TAP26_ALNOKO |
2144 		      MII_TG3_DSP_TAP26_RMRXSTO;
2145 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2146 		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2147 	}
2148 
2149 	val = tr32(TG3_CPMU_EEE_MODE);
2150 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2151 }
2152 
2153 static int tg3_wait_macro_done(struct tg3 *tp)
2154 {
2155 	int limit = 100;
2156 
2157 	while (limit--) {
2158 		u32 tmp32;
2159 
2160 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2161 			if ((tmp32 & 0x1000) == 0)
2162 				break;
2163 		}
2164 	}
2165 	if (limit < 0)
2166 		return -EBUSY;
2167 
2168 	return 0;
2169 }
2170 
2171 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2172 {
2173 	static const u32 test_pat[4][6] = {
2174 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2175 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2176 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2177 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2178 	};
2179 	int chan;
2180 
2181 	for (chan = 0; chan < 4; chan++) {
2182 		int i;
2183 
2184 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2185 			     (chan * 0x2000) | 0x0200);
2186 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2187 
2188 		for (i = 0; i < 6; i++)
2189 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2190 				     test_pat[chan][i]);
2191 
2192 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2193 		if (tg3_wait_macro_done(tp)) {
2194 			*resetp = 1;
2195 			return -EBUSY;
2196 		}
2197 
2198 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2199 			     (chan * 0x2000) | 0x0200);
2200 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2201 		if (tg3_wait_macro_done(tp)) {
2202 			*resetp = 1;
2203 			return -EBUSY;
2204 		}
2205 
2206 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2207 		if (tg3_wait_macro_done(tp)) {
2208 			*resetp = 1;
2209 			return -EBUSY;
2210 		}
2211 
2212 		for (i = 0; i < 6; i += 2) {
2213 			u32 low, high;
2214 
2215 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2216 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2217 			    tg3_wait_macro_done(tp)) {
2218 				*resetp = 1;
2219 				return -EBUSY;
2220 			}
2221 			low &= 0x7fff;
2222 			high &= 0x000f;
2223 			if (low != test_pat[chan][i] ||
2224 			    high != test_pat[chan][i+1]) {
2225 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2226 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2227 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2228 
2229 				return -EBUSY;
2230 			}
2231 		}
2232 	}
2233 
2234 	return 0;
2235 }
2236 
2237 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2238 {
2239 	int chan;
2240 
2241 	for (chan = 0; chan < 4; chan++) {
2242 		int i;
2243 
2244 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2245 			     (chan * 0x2000) | 0x0200);
2246 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2247 		for (i = 0; i < 6; i++)
2248 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2249 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2250 		if (tg3_wait_macro_done(tp))
2251 			return -EBUSY;
2252 	}
2253 
2254 	return 0;
2255 }
2256 
2257 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2258 {
2259 	u32 reg32, phy9_orig;
2260 	int retries, do_phy_reset, err;
2261 
2262 	retries = 10;
2263 	do_phy_reset = 1;
2264 	do {
2265 		if (do_phy_reset) {
2266 			err = tg3_bmcr_reset(tp);
2267 			if (err)
2268 				return err;
2269 			do_phy_reset = 0;
2270 		}
2271 
2272 		/* Disable transmitter and interrupt.  */
2273 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2274 			continue;
2275 
2276 		reg32 |= 0x3000;
2277 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2278 
2279 		/* Set full-duplex, 1000 mbps.  */
2280 		tg3_writephy(tp, MII_BMCR,
2281 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2282 
2283 		/* Set to master mode.  */
2284 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2285 			continue;
2286 
2287 		tg3_writephy(tp, MII_CTRL1000,
2288 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2289 
2290 		err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2291 		if (err)
2292 			return err;
2293 
2294 		/* Block the PHY control access.  */
2295 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2296 
2297 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2298 		if (!err)
2299 			break;
2300 	} while (--retries);
2301 
2302 	err = tg3_phy_reset_chanpat(tp);
2303 	if (err)
2304 		return err;
2305 
2306 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2307 
2308 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2309 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2310 
2311 	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2312 
2313 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2314 
2315 	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2316 		reg32 &= ~0x3000;
2317 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2318 	} else if (!err)
2319 		err = -EBUSY;
2320 
2321 	return err;
2322 }
2323 
2324 /* This will reset the tigon3 PHY if there is no valid
2325  * link unless the FORCE argument is non-zero.
2326  */
2327 static int tg3_phy_reset(struct tg3 *tp)
2328 {
2329 	u32 val, cpmuctrl;
2330 	int err;
2331 
2332 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2333 		val = tr32(GRC_MISC_CFG);
2334 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2335 		udelay(40);
2336 	}
2337 	err  = tg3_readphy(tp, MII_BMSR, &val);
2338 	err |= tg3_readphy(tp, MII_BMSR, &val);
2339 	if (err != 0)
2340 		return -EBUSY;
2341 
2342 	if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2343 		netif_carrier_off(tp->dev);
2344 		tg3_link_report(tp);
2345 	}
2346 
2347 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2348 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2349 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2350 		err = tg3_phy_reset_5703_4_5(tp);
2351 		if (err)
2352 			return err;
2353 		goto out;
2354 	}
2355 
2356 	cpmuctrl = 0;
2357 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2358 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2359 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2360 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2361 			tw32(TG3_CPMU_CTRL,
2362 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2363 	}
2364 
2365 	err = tg3_bmcr_reset(tp);
2366 	if (err)
2367 		return err;
2368 
2369 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2370 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2371 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2372 
2373 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2374 	}
2375 
2376 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2377 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2378 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2379 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2380 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2381 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2382 			udelay(40);
2383 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2384 		}
2385 	}
2386 
2387 	if (tg3_flag(tp, 5717_PLUS) &&
2388 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2389 		return 0;
2390 
2391 	tg3_phy_apply_otp(tp);
2392 
2393 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2394 		tg3_phy_toggle_apd(tp, true);
2395 	else
2396 		tg3_phy_toggle_apd(tp, false);
2397 
2398 out:
2399 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2400 	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2401 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2402 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2403 		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2404 	}
2405 
2406 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2407 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2408 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2409 	}
2410 
2411 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2412 		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2413 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2414 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2415 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2416 			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2417 		}
2418 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2419 		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2420 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2421 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2422 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2423 				tg3_writephy(tp, MII_TG3_TEST1,
2424 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2425 			} else
2426 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2427 
2428 			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2429 		}
2430 	}
2431 
2432 	/* Set Extended packet length bit (bit 14) on all chips that */
2433 	/* support jumbo frames */
2434 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2435 		/* Cannot do read-modify-write on 5401 */
2436 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2437 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2438 		/* Set bit 14 with read-modify-write to preserve other bits */
2439 		err = tg3_phy_auxctl_read(tp,
2440 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2441 		if (!err)
2442 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2443 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2444 	}
2445 
2446 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2447 	 * jumbo frames transmission.
2448 	 */
2449 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2450 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2451 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2452 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2453 	}
2454 
2455 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2456 		/* adjust output voltage */
2457 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2458 	}
2459 
2460 	tg3_phy_toggle_automdix(tp, 1);
2461 	tg3_phy_set_wirespeed(tp);
2462 	return 0;
2463 }
2464 
2465 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2466 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2467 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2468 					  TG3_GPIO_MSG_NEED_VAUX)
2469 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2470 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2471 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2472 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2473 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2474 
2475 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2476 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2477 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2478 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2479 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2480 
2481 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2482 {
2483 	u32 status, shift;
2484 
2485 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2486 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2487 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2488 	else
2489 		status = tr32(TG3_CPMU_DRV_STATUS);
2490 
2491 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2492 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2493 	status |= (newstat << shift);
2494 
2495 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2496 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2497 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2498 	else
2499 		tw32(TG3_CPMU_DRV_STATUS, status);
2500 
2501 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2502 }
2503 
2504 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2505 {
2506 	if (!tg3_flag(tp, IS_NIC))
2507 		return 0;
2508 
2509 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2510 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2511 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2512 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2513 			return -EIO;
2514 
2515 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2516 
2517 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2518 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2519 
2520 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2521 	} else {
2522 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2523 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2524 	}
2525 
2526 	return 0;
2527 }
2528 
2529 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2530 {
2531 	u32 grc_local_ctrl;
2532 
2533 	if (!tg3_flag(tp, IS_NIC) ||
2534 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2535 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2536 		return;
2537 
2538 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2539 
2540 	tw32_wait_f(GRC_LOCAL_CTRL,
2541 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2542 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2543 
2544 	tw32_wait_f(GRC_LOCAL_CTRL,
2545 		    grc_local_ctrl,
2546 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2547 
2548 	tw32_wait_f(GRC_LOCAL_CTRL,
2549 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2550 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2551 }
2552 
2553 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2554 {
2555 	if (!tg3_flag(tp, IS_NIC))
2556 		return;
2557 
2558 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2559 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2560 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2561 			    (GRC_LCLCTRL_GPIO_OE0 |
2562 			     GRC_LCLCTRL_GPIO_OE1 |
2563 			     GRC_LCLCTRL_GPIO_OE2 |
2564 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2565 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2566 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2567 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2568 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2569 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2570 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2571 				     GRC_LCLCTRL_GPIO_OE1 |
2572 				     GRC_LCLCTRL_GPIO_OE2 |
2573 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2574 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2575 				     tp->grc_local_ctrl;
2576 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2577 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2578 
2579 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2580 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2581 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2582 
2583 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2584 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2585 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2586 	} else {
2587 		u32 no_gpio2;
2588 		u32 grc_local_ctrl = 0;
2589 
2590 		/* Workaround to prevent overdrawing Amps. */
2591 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2592 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2593 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2594 				    grc_local_ctrl,
2595 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2596 		}
2597 
2598 		/* On 5753 and variants, GPIO2 cannot be used. */
2599 		no_gpio2 = tp->nic_sram_data_cfg &
2600 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2601 
2602 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2603 				  GRC_LCLCTRL_GPIO_OE1 |
2604 				  GRC_LCLCTRL_GPIO_OE2 |
2605 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2606 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2607 		if (no_gpio2) {
2608 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2609 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2610 		}
2611 		tw32_wait_f(GRC_LOCAL_CTRL,
2612 			    tp->grc_local_ctrl | grc_local_ctrl,
2613 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2614 
2615 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2616 
2617 		tw32_wait_f(GRC_LOCAL_CTRL,
2618 			    tp->grc_local_ctrl | grc_local_ctrl,
2619 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2620 
2621 		if (!no_gpio2) {
2622 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2623 			tw32_wait_f(GRC_LOCAL_CTRL,
2624 				    tp->grc_local_ctrl | grc_local_ctrl,
2625 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2626 		}
2627 	}
2628 }
2629 
2630 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2631 {
2632 	u32 msg = 0;
2633 
2634 	/* Serialize power state transitions */
2635 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2636 		return;
2637 
2638 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2639 		msg = TG3_GPIO_MSG_NEED_VAUX;
2640 
2641 	msg = tg3_set_function_status(tp, msg);
2642 
2643 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2644 		goto done;
2645 
2646 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2647 		tg3_pwrsrc_switch_to_vaux(tp);
2648 	else
2649 		tg3_pwrsrc_die_with_vmain(tp);
2650 
2651 done:
2652 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2653 }
2654 
2655 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2656 {
2657 	bool need_vaux = false;
2658 
2659 	/* The GPIOs do something completely different on 57765. */
2660 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2661 		return;
2662 
2663 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2664 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2665 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2666 		tg3_frob_aux_power_5717(tp, include_wol ?
2667 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2668 		return;
2669 	}
2670 
2671 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2672 		struct net_device *dev_peer;
2673 
2674 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2675 
2676 		/* remove_one() may have been run on the peer. */
2677 		if (dev_peer) {
2678 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2679 
2680 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2681 				return;
2682 
2683 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2684 			    tg3_flag(tp_peer, ENABLE_ASF))
2685 				need_vaux = true;
2686 		}
2687 	}
2688 
2689 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2690 	    tg3_flag(tp, ENABLE_ASF))
2691 		need_vaux = true;
2692 
2693 	if (need_vaux)
2694 		tg3_pwrsrc_switch_to_vaux(tp);
2695 	else
2696 		tg3_pwrsrc_die_with_vmain(tp);
2697 }
2698 
2699 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2700 {
2701 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2702 		return 1;
2703 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2704 		if (speed != SPEED_10)
2705 			return 1;
2706 	} else if (speed == SPEED_10)
2707 		return 1;
2708 
2709 	return 0;
2710 }
2711 
2712 static int tg3_setup_phy(struct tg3 *, int);
2713 static int tg3_halt_cpu(struct tg3 *, u32);
2714 
2715 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2716 {
2717 	u32 val;
2718 
2719 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2720 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2721 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2722 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2723 
2724 			sg_dig_ctrl |=
2725 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2726 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
2727 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2728 		}
2729 		return;
2730 	}
2731 
2732 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2733 		tg3_bmcr_reset(tp);
2734 		val = tr32(GRC_MISC_CFG);
2735 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2736 		udelay(40);
2737 		return;
2738 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2739 		u32 phytest;
2740 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2741 			u32 phy;
2742 
2743 			tg3_writephy(tp, MII_ADVERTISE, 0);
2744 			tg3_writephy(tp, MII_BMCR,
2745 				     BMCR_ANENABLE | BMCR_ANRESTART);
2746 
2747 			tg3_writephy(tp, MII_TG3_FET_TEST,
2748 				     phytest | MII_TG3_FET_SHADOW_EN);
2749 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2750 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2751 				tg3_writephy(tp,
2752 					     MII_TG3_FET_SHDW_AUXMODE4,
2753 					     phy);
2754 			}
2755 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2756 		}
2757 		return;
2758 	} else if (do_low_power) {
2759 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
2760 			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2761 
2762 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2763 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2764 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
2765 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2766 	}
2767 
2768 	/* The PHY should not be powered down on some chips because
2769 	 * of bugs.
2770 	 */
2771 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2772 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2773 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2774 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2775 		return;
2776 
2777 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2778 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2779 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2780 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2781 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2782 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2783 	}
2784 
2785 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2786 }
2787 
2788 /* tp->lock is held. */
2789 static int tg3_nvram_lock(struct tg3 *tp)
2790 {
2791 	if (tg3_flag(tp, NVRAM)) {
2792 		int i;
2793 
2794 		if (tp->nvram_lock_cnt == 0) {
2795 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2796 			for (i = 0; i < 8000; i++) {
2797 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2798 					break;
2799 				udelay(20);
2800 			}
2801 			if (i == 8000) {
2802 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2803 				return -ENODEV;
2804 			}
2805 		}
2806 		tp->nvram_lock_cnt++;
2807 	}
2808 	return 0;
2809 }
2810 
2811 /* tp->lock is held. */
2812 static void tg3_nvram_unlock(struct tg3 *tp)
2813 {
2814 	if (tg3_flag(tp, NVRAM)) {
2815 		if (tp->nvram_lock_cnt > 0)
2816 			tp->nvram_lock_cnt--;
2817 		if (tp->nvram_lock_cnt == 0)
2818 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2819 	}
2820 }
2821 
2822 /* tp->lock is held. */
2823 static void tg3_enable_nvram_access(struct tg3 *tp)
2824 {
2825 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2826 		u32 nvaccess = tr32(NVRAM_ACCESS);
2827 
2828 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2829 	}
2830 }
2831 
2832 /* tp->lock is held. */
2833 static void tg3_disable_nvram_access(struct tg3 *tp)
2834 {
2835 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2836 		u32 nvaccess = tr32(NVRAM_ACCESS);
2837 
2838 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2839 	}
2840 }
2841 
2842 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2843 					u32 offset, u32 *val)
2844 {
2845 	u32 tmp;
2846 	int i;
2847 
2848 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2849 		return -EINVAL;
2850 
2851 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2852 					EEPROM_ADDR_DEVID_MASK |
2853 					EEPROM_ADDR_READ);
2854 	tw32(GRC_EEPROM_ADDR,
2855 	     tmp |
2856 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
2857 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2858 	      EEPROM_ADDR_ADDR_MASK) |
2859 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
2860 
2861 	for (i = 0; i < 1000; i++) {
2862 		tmp = tr32(GRC_EEPROM_ADDR);
2863 
2864 		if (tmp & EEPROM_ADDR_COMPLETE)
2865 			break;
2866 		msleep(1);
2867 	}
2868 	if (!(tmp & EEPROM_ADDR_COMPLETE))
2869 		return -EBUSY;
2870 
2871 	tmp = tr32(GRC_EEPROM_DATA);
2872 
2873 	/*
2874 	 * The data will always be opposite the native endian
2875 	 * format.  Perform a blind byteswap to compensate.
2876 	 */
2877 	*val = swab32(tmp);
2878 
2879 	return 0;
2880 }
2881 
2882 #define NVRAM_CMD_TIMEOUT 10000
2883 
2884 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2885 {
2886 	int i;
2887 
2888 	tw32(NVRAM_CMD, nvram_cmd);
2889 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2890 		udelay(10);
2891 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2892 			udelay(10);
2893 			break;
2894 		}
2895 	}
2896 
2897 	if (i == NVRAM_CMD_TIMEOUT)
2898 		return -EBUSY;
2899 
2900 	return 0;
2901 }
2902 
2903 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2904 {
2905 	if (tg3_flag(tp, NVRAM) &&
2906 	    tg3_flag(tp, NVRAM_BUFFERED) &&
2907 	    tg3_flag(tp, FLASH) &&
2908 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2909 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
2910 
2911 		addr = ((addr / tp->nvram_pagesize) <<
2912 			ATMEL_AT45DB0X1B_PAGE_POS) +
2913 		       (addr % tp->nvram_pagesize);
2914 
2915 	return addr;
2916 }
2917 
2918 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2919 {
2920 	if (tg3_flag(tp, NVRAM) &&
2921 	    tg3_flag(tp, NVRAM_BUFFERED) &&
2922 	    tg3_flag(tp, FLASH) &&
2923 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2924 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
2925 
2926 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2927 			tp->nvram_pagesize) +
2928 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2929 
2930 	return addr;
2931 }
2932 
2933 /* NOTE: Data read in from NVRAM is byteswapped according to
2934  * the byteswapping settings for all other register accesses.
2935  * tg3 devices are BE devices, so on a BE machine, the data
2936  * returned will be exactly as it is seen in NVRAM.  On a LE
2937  * machine, the 32-bit value will be byteswapped.
2938  */
2939 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2940 {
2941 	int ret;
2942 
2943 	if (!tg3_flag(tp, NVRAM))
2944 		return tg3_nvram_read_using_eeprom(tp, offset, val);
2945 
2946 	offset = tg3_nvram_phys_addr(tp, offset);
2947 
2948 	if (offset > NVRAM_ADDR_MSK)
2949 		return -EINVAL;
2950 
2951 	ret = tg3_nvram_lock(tp);
2952 	if (ret)
2953 		return ret;
2954 
2955 	tg3_enable_nvram_access(tp);
2956 
2957 	tw32(NVRAM_ADDR, offset);
2958 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2959 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2960 
2961 	if (ret == 0)
2962 		*val = tr32(NVRAM_RDDATA);
2963 
2964 	tg3_disable_nvram_access(tp);
2965 
2966 	tg3_nvram_unlock(tp);
2967 
2968 	return ret;
2969 }
2970 
2971 /* Ensures NVRAM data is in bytestream format. */
2972 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2973 {
2974 	u32 v;
2975 	int res = tg3_nvram_read(tp, offset, &v);
2976 	if (!res)
2977 		*val = cpu_to_be32(v);
2978 	return res;
2979 }
2980 
2981 #define RX_CPU_SCRATCH_BASE	0x30000
2982 #define RX_CPU_SCRATCH_SIZE	0x04000
2983 #define TX_CPU_SCRATCH_BASE	0x34000
2984 #define TX_CPU_SCRATCH_SIZE	0x04000
2985 
2986 /* tp->lock is held. */
2987 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
2988 {
2989 	int i;
2990 
2991 	BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
2992 
2993 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2994 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
2995 
2996 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
2997 		return 0;
2998 	}
2999 	if (offset == RX_CPU_BASE) {
3000 		for (i = 0; i < 10000; i++) {
3001 			tw32(offset + CPU_STATE, 0xffffffff);
3002 			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3003 			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3004 				break;
3005 		}
3006 
3007 		tw32(offset + CPU_STATE, 0xffffffff);
3008 		tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3009 		udelay(10);
3010 	} else {
3011 		for (i = 0; i < 10000; i++) {
3012 			tw32(offset + CPU_STATE, 0xffffffff);
3013 			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3014 			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3015 				break;
3016 		}
3017 	}
3018 
3019 	if (i >= 10000) {
3020 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3021 			   __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3022 		return -ENODEV;
3023 	}
3024 
3025 	/* Clear firmware's nvram arbitration. */
3026 	if (tg3_flag(tp, NVRAM))
3027 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3028 	return 0;
3029 }
3030 
3031 struct fw_info {
3032 	unsigned int fw_base;
3033 	unsigned int fw_len;
3034 	const __be32 *fw_data;
3035 };
3036 
3037 /* tp->lock is held. */
3038 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3039 				 u32 cpu_scratch_base, int cpu_scratch_size,
3040 				 struct fw_info *info)
3041 {
3042 	int err, lock_err, i;
3043 	void (*write_op)(struct tg3 *, u32, u32);
3044 
3045 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3046 		netdev_err(tp->dev,
3047 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3048 			   __func__);
3049 		return -EINVAL;
3050 	}
3051 
3052 	if (tg3_flag(tp, 5705_PLUS))
3053 		write_op = tg3_write_mem;
3054 	else
3055 		write_op = tg3_write_indirect_reg32;
3056 
3057 	/* It is possible that bootcode is still loading at this point.
3058 	 * Get the nvram lock first before halting the cpu.
3059 	 */
3060 	lock_err = tg3_nvram_lock(tp);
3061 	err = tg3_halt_cpu(tp, cpu_base);
3062 	if (!lock_err)
3063 		tg3_nvram_unlock(tp);
3064 	if (err)
3065 		goto out;
3066 
3067 	for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3068 		write_op(tp, cpu_scratch_base + i, 0);
3069 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3070 	tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3071 	for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3072 		write_op(tp, (cpu_scratch_base +
3073 			      (info->fw_base & 0xffff) +
3074 			      (i * sizeof(u32))),
3075 			      be32_to_cpu(info->fw_data[i]));
3076 
3077 	err = 0;
3078 
3079 out:
3080 	return err;
3081 }
3082 
3083 /* tp->lock is held. */
3084 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3085 {
3086 	struct fw_info info;
3087 	const __be32 *fw_data;
3088 	int err, i;
3089 
3090 	fw_data = (void *)tp->fw->data;
3091 
3092 	/* Firmware blob starts with version numbers, followed by
3093 	   start address and length. We are setting complete length.
3094 	   length = end_address_of_bss - start_address_of_text.
3095 	   Remainder is the blob to be loaded contiguously
3096 	   from start address. */
3097 
3098 	info.fw_base = be32_to_cpu(fw_data[1]);
3099 	info.fw_len = tp->fw->size - 12;
3100 	info.fw_data = &fw_data[3];
3101 
3102 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3103 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3104 				    &info);
3105 	if (err)
3106 		return err;
3107 
3108 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3109 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3110 				    &info);
3111 	if (err)
3112 		return err;
3113 
3114 	/* Now startup only the RX cpu. */
3115 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3116 	tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3117 
3118 	for (i = 0; i < 5; i++) {
3119 		if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3120 			break;
3121 		tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3122 		tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3123 		tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3124 		udelay(1000);
3125 	}
3126 	if (i >= 5) {
3127 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3128 			   "should be %08x\n", __func__,
3129 			   tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3130 		return -ENODEV;
3131 	}
3132 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3133 	tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3134 
3135 	return 0;
3136 }
3137 
3138 /* tp->lock is held. */
3139 static int tg3_load_tso_firmware(struct tg3 *tp)
3140 {
3141 	struct fw_info info;
3142 	const __be32 *fw_data;
3143 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3144 	int err, i;
3145 
3146 	if (tg3_flag(tp, HW_TSO_1) ||
3147 	    tg3_flag(tp, HW_TSO_2) ||
3148 	    tg3_flag(tp, HW_TSO_3))
3149 		return 0;
3150 
3151 	fw_data = (void *)tp->fw->data;
3152 
3153 	/* Firmware blob starts with version numbers, followed by
3154 	   start address and length. We are setting complete length.
3155 	   length = end_address_of_bss - start_address_of_text.
3156 	   Remainder is the blob to be loaded contiguously
3157 	   from start address. */
3158 
3159 	info.fw_base = be32_to_cpu(fw_data[1]);
3160 	cpu_scratch_size = tp->fw_len;
3161 	info.fw_len = tp->fw->size - 12;
3162 	info.fw_data = &fw_data[3];
3163 
3164 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3165 		cpu_base = RX_CPU_BASE;
3166 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3167 	} else {
3168 		cpu_base = TX_CPU_BASE;
3169 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3170 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3171 	}
3172 
3173 	err = tg3_load_firmware_cpu(tp, cpu_base,
3174 				    cpu_scratch_base, cpu_scratch_size,
3175 				    &info);
3176 	if (err)
3177 		return err;
3178 
3179 	/* Now startup the cpu. */
3180 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3181 	tw32_f(cpu_base + CPU_PC, info.fw_base);
3182 
3183 	for (i = 0; i < 5; i++) {
3184 		if (tr32(cpu_base + CPU_PC) == info.fw_base)
3185 			break;
3186 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3187 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3188 		tw32_f(cpu_base + CPU_PC, info.fw_base);
3189 		udelay(1000);
3190 	}
3191 	if (i >= 5) {
3192 		netdev_err(tp->dev,
3193 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3194 			   __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3195 		return -ENODEV;
3196 	}
3197 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3198 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3199 	return 0;
3200 }
3201 
3202 
3203 /* tp->lock is held. */
3204 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3205 {
3206 	u32 addr_high, addr_low;
3207 	int i;
3208 
3209 	addr_high = ((tp->dev->dev_addr[0] << 8) |
3210 		     tp->dev->dev_addr[1]);
3211 	addr_low = ((tp->dev->dev_addr[2] << 24) |
3212 		    (tp->dev->dev_addr[3] << 16) |
3213 		    (tp->dev->dev_addr[4] <<  8) |
3214 		    (tp->dev->dev_addr[5] <<  0));
3215 	for (i = 0; i < 4; i++) {
3216 		if (i == 1 && skip_mac_1)
3217 			continue;
3218 		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3219 		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3220 	}
3221 
3222 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3223 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3224 		for (i = 0; i < 12; i++) {
3225 			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3226 			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3227 		}
3228 	}
3229 
3230 	addr_high = (tp->dev->dev_addr[0] +
3231 		     tp->dev->dev_addr[1] +
3232 		     tp->dev->dev_addr[2] +
3233 		     tp->dev->dev_addr[3] +
3234 		     tp->dev->dev_addr[4] +
3235 		     tp->dev->dev_addr[5]) &
3236 		TX_BACKOFF_SEED_MASK;
3237 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3238 }
3239 
3240 static void tg3_enable_register_access(struct tg3 *tp)
3241 {
3242 	/*
3243 	 * Make sure register accesses (indirect or otherwise) will function
3244 	 * correctly.
3245 	 */
3246 	pci_write_config_dword(tp->pdev,
3247 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3248 }
3249 
3250 static int tg3_power_up(struct tg3 *tp)
3251 {
3252 	int err;
3253 
3254 	tg3_enable_register_access(tp);
3255 
3256 	err = pci_set_power_state(tp->pdev, PCI_D0);
3257 	if (!err) {
3258 		/* Switch out of Vaux if it is a NIC */
3259 		tg3_pwrsrc_switch_to_vmain(tp);
3260 	} else {
3261 		netdev_err(tp->dev, "Transition to D0 failed\n");
3262 	}
3263 
3264 	return err;
3265 }
3266 
3267 static int tg3_power_down_prepare(struct tg3 *tp)
3268 {
3269 	u32 misc_host_ctrl;
3270 	bool device_should_wake, do_low_power;
3271 
3272 	tg3_enable_register_access(tp);
3273 
3274 	/* Restore the CLKREQ setting. */
3275 	if (tg3_flag(tp, CLKREQ_BUG)) {
3276 		u16 lnkctl;
3277 
3278 		pci_read_config_word(tp->pdev,
3279 				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3280 				     &lnkctl);
3281 		lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3282 		pci_write_config_word(tp->pdev,
3283 				      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3284 				      lnkctl);
3285 	}
3286 
3287 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3288 	tw32(TG3PCI_MISC_HOST_CTRL,
3289 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3290 
3291 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3292 			     tg3_flag(tp, WOL_ENABLE);
3293 
3294 	if (tg3_flag(tp, USE_PHYLIB)) {
3295 		do_low_power = false;
3296 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3297 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3298 			struct phy_device *phydev;
3299 			u32 phyid, advertising;
3300 
3301 			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3302 
3303 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3304 
3305 			tp->link_config.orig_speed = phydev->speed;
3306 			tp->link_config.orig_duplex = phydev->duplex;
3307 			tp->link_config.orig_autoneg = phydev->autoneg;
3308 			tp->link_config.orig_advertising = phydev->advertising;
3309 
3310 			advertising = ADVERTISED_TP |
3311 				      ADVERTISED_Pause |
3312 				      ADVERTISED_Autoneg |
3313 				      ADVERTISED_10baseT_Half;
3314 
3315 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3316 				if (tg3_flag(tp, WOL_SPEED_100MB))
3317 					advertising |=
3318 						ADVERTISED_100baseT_Half |
3319 						ADVERTISED_100baseT_Full |
3320 						ADVERTISED_10baseT_Full;
3321 				else
3322 					advertising |= ADVERTISED_10baseT_Full;
3323 			}
3324 
3325 			phydev->advertising = advertising;
3326 
3327 			phy_start_aneg(phydev);
3328 
3329 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3330 			if (phyid != PHY_ID_BCMAC131) {
3331 				phyid &= PHY_BCM_OUI_MASK;
3332 				if (phyid == PHY_BCM_OUI_1 ||
3333 				    phyid == PHY_BCM_OUI_2 ||
3334 				    phyid == PHY_BCM_OUI_3)
3335 					do_low_power = true;
3336 			}
3337 		}
3338 	} else {
3339 		do_low_power = true;
3340 
3341 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3342 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3343 			tp->link_config.orig_speed = tp->link_config.speed;
3344 			tp->link_config.orig_duplex = tp->link_config.duplex;
3345 			tp->link_config.orig_autoneg = tp->link_config.autoneg;
3346 		}
3347 
3348 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3349 			tp->link_config.speed = SPEED_10;
3350 			tp->link_config.duplex = DUPLEX_HALF;
3351 			tp->link_config.autoneg = AUTONEG_ENABLE;
3352 			tg3_setup_phy(tp, 0);
3353 		}
3354 	}
3355 
3356 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3357 		u32 val;
3358 
3359 		val = tr32(GRC_VCPU_EXT_CTRL);
3360 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3361 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
3362 		int i;
3363 		u32 val;
3364 
3365 		for (i = 0; i < 200; i++) {
3366 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3367 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3368 				break;
3369 			msleep(1);
3370 		}
3371 	}
3372 	if (tg3_flag(tp, WOL_CAP))
3373 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3374 						     WOL_DRV_STATE_SHUTDOWN |
3375 						     WOL_DRV_WOL |
3376 						     WOL_SET_MAGIC_PKT);
3377 
3378 	if (device_should_wake) {
3379 		u32 mac_mode;
3380 
3381 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3382 			if (do_low_power &&
3383 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3384 				tg3_phy_auxctl_write(tp,
3385 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3386 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
3387 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3388 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3389 				udelay(40);
3390 			}
3391 
3392 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3393 				mac_mode = MAC_MODE_PORT_MODE_GMII;
3394 			else
3395 				mac_mode = MAC_MODE_PORT_MODE_MII;
3396 
3397 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3398 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3399 			    ASIC_REV_5700) {
3400 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3401 					     SPEED_100 : SPEED_10;
3402 				if (tg3_5700_link_polarity(tp, speed))
3403 					mac_mode |= MAC_MODE_LINK_POLARITY;
3404 				else
3405 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
3406 			}
3407 		} else {
3408 			mac_mode = MAC_MODE_PORT_MODE_TBI;
3409 		}
3410 
3411 		if (!tg3_flag(tp, 5750_PLUS))
3412 			tw32(MAC_LED_CTRL, tp->led_ctrl);
3413 
3414 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3415 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3416 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3417 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3418 
3419 		if (tg3_flag(tp, ENABLE_APE))
3420 			mac_mode |= MAC_MODE_APE_TX_EN |
3421 				    MAC_MODE_APE_RX_EN |
3422 				    MAC_MODE_TDE_ENABLE;
3423 
3424 		tw32_f(MAC_MODE, mac_mode);
3425 		udelay(100);
3426 
3427 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3428 		udelay(10);
3429 	}
3430 
3431 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3432 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3433 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3434 		u32 base_val;
3435 
3436 		base_val = tp->pci_clock_ctrl;
3437 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3438 			     CLOCK_CTRL_TXCLK_DISABLE);
3439 
3440 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3441 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
3442 	} else if (tg3_flag(tp, 5780_CLASS) ||
3443 		   tg3_flag(tp, CPMU_PRESENT) ||
3444 		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3445 		/* do nothing */
3446 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3447 		u32 newbits1, newbits2;
3448 
3449 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3450 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3451 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3452 				    CLOCK_CTRL_TXCLK_DISABLE |
3453 				    CLOCK_CTRL_ALTCLK);
3454 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3455 		} else if (tg3_flag(tp, 5705_PLUS)) {
3456 			newbits1 = CLOCK_CTRL_625_CORE;
3457 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3458 		} else {
3459 			newbits1 = CLOCK_CTRL_ALTCLK;
3460 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3461 		}
3462 
3463 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3464 			    40);
3465 
3466 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3467 			    40);
3468 
3469 		if (!tg3_flag(tp, 5705_PLUS)) {
3470 			u32 newbits3;
3471 
3472 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3473 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3474 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3475 					    CLOCK_CTRL_TXCLK_DISABLE |
3476 					    CLOCK_CTRL_44MHZ_CORE);
3477 			} else {
3478 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
3479 			}
3480 
3481 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
3482 				    tp->pci_clock_ctrl | newbits3, 40);
3483 		}
3484 	}
3485 
3486 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3487 		tg3_power_down_phy(tp, do_low_power);
3488 
3489 	tg3_frob_aux_power(tp, true);
3490 
3491 	/* Workaround for unstable PLL clock */
3492 	if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3493 	    (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3494 		u32 val = tr32(0x7d00);
3495 
3496 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3497 		tw32(0x7d00, val);
3498 		if (!tg3_flag(tp, ENABLE_ASF)) {
3499 			int err;
3500 
3501 			err = tg3_nvram_lock(tp);
3502 			tg3_halt_cpu(tp, RX_CPU_BASE);
3503 			if (!err)
3504 				tg3_nvram_unlock(tp);
3505 		}
3506 	}
3507 
3508 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3509 
3510 	return 0;
3511 }
3512 
3513 static void tg3_power_down(struct tg3 *tp)
3514 {
3515 	tg3_power_down_prepare(tp);
3516 
3517 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3518 	pci_set_power_state(tp->pdev, PCI_D3hot);
3519 }
3520 
3521 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3522 {
3523 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3524 	case MII_TG3_AUX_STAT_10HALF:
3525 		*speed = SPEED_10;
3526 		*duplex = DUPLEX_HALF;
3527 		break;
3528 
3529 	case MII_TG3_AUX_STAT_10FULL:
3530 		*speed = SPEED_10;
3531 		*duplex = DUPLEX_FULL;
3532 		break;
3533 
3534 	case MII_TG3_AUX_STAT_100HALF:
3535 		*speed = SPEED_100;
3536 		*duplex = DUPLEX_HALF;
3537 		break;
3538 
3539 	case MII_TG3_AUX_STAT_100FULL:
3540 		*speed = SPEED_100;
3541 		*duplex = DUPLEX_FULL;
3542 		break;
3543 
3544 	case MII_TG3_AUX_STAT_1000HALF:
3545 		*speed = SPEED_1000;
3546 		*duplex = DUPLEX_HALF;
3547 		break;
3548 
3549 	case MII_TG3_AUX_STAT_1000FULL:
3550 		*speed = SPEED_1000;
3551 		*duplex = DUPLEX_FULL;
3552 		break;
3553 
3554 	default:
3555 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3556 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3557 				 SPEED_10;
3558 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3559 				  DUPLEX_HALF;
3560 			break;
3561 		}
3562 		*speed = SPEED_INVALID;
3563 		*duplex = DUPLEX_INVALID;
3564 		break;
3565 	}
3566 }
3567 
3568 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3569 {
3570 	int err = 0;
3571 	u32 val, new_adv;
3572 
3573 	new_adv = ADVERTISE_CSMA;
3574 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3575 	new_adv |= mii_advertise_flowctrl(flowctrl);
3576 
3577 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3578 	if (err)
3579 		goto done;
3580 
3581 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3582 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3583 
3584 		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3585 		    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3586 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3587 
3588 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3589 		if (err)
3590 			goto done;
3591 	}
3592 
3593 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3594 		goto done;
3595 
3596 	tw32(TG3_CPMU_EEE_MODE,
3597 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3598 
3599 	err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3600 	if (!err) {
3601 		u32 err2;
3602 
3603 		val = 0;
3604 		/* Advertise 100-BaseTX EEE ability */
3605 		if (advertise & ADVERTISED_100baseT_Full)
3606 			val |= MDIO_AN_EEE_ADV_100TX;
3607 		/* Advertise 1000-BaseT EEE ability */
3608 		if (advertise & ADVERTISED_1000baseT_Full)
3609 			val |= MDIO_AN_EEE_ADV_1000T;
3610 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3611 		if (err)
3612 			val = 0;
3613 
3614 		switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3615 		case ASIC_REV_5717:
3616 		case ASIC_REV_57765:
3617 		case ASIC_REV_57766:
3618 		case ASIC_REV_5719:
3619 			/* If we advertised any eee advertisements above... */
3620 			if (val)
3621 				val = MII_TG3_DSP_TAP26_ALNOKO |
3622 				      MII_TG3_DSP_TAP26_RMRXSTO |
3623 				      MII_TG3_DSP_TAP26_OPCSINPT;
3624 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3625 			/* Fall through */
3626 		case ASIC_REV_5720:
3627 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3628 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3629 						 MII_TG3_DSP_CH34TP2_HIBW01);
3630 		}
3631 
3632 		err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3633 		if (!err)
3634 			err = err2;
3635 	}
3636 
3637 done:
3638 	return err;
3639 }
3640 
3641 static void tg3_phy_copper_begin(struct tg3 *tp)
3642 {
3643 	u32 new_adv;
3644 	int i;
3645 
3646 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3647 		new_adv = ADVERTISED_10baseT_Half |
3648 			  ADVERTISED_10baseT_Full;
3649 		if (tg3_flag(tp, WOL_SPEED_100MB))
3650 			new_adv |= ADVERTISED_100baseT_Half |
3651 				   ADVERTISED_100baseT_Full;
3652 
3653 		tg3_phy_autoneg_cfg(tp, new_adv,
3654 				    FLOW_CTRL_TX | FLOW_CTRL_RX);
3655 	} else if (tp->link_config.speed == SPEED_INVALID) {
3656 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3657 			tp->link_config.advertising &=
3658 				~(ADVERTISED_1000baseT_Half |
3659 				  ADVERTISED_1000baseT_Full);
3660 
3661 		tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3662 				    tp->link_config.flowctrl);
3663 	} else {
3664 		/* Asking for a specific link mode. */
3665 		if (tp->link_config.speed == SPEED_1000) {
3666 			if (tp->link_config.duplex == DUPLEX_FULL)
3667 				new_adv = ADVERTISED_1000baseT_Full;
3668 			else
3669 				new_adv = ADVERTISED_1000baseT_Half;
3670 		} else if (tp->link_config.speed == SPEED_100) {
3671 			if (tp->link_config.duplex == DUPLEX_FULL)
3672 				new_adv = ADVERTISED_100baseT_Full;
3673 			else
3674 				new_adv = ADVERTISED_100baseT_Half;
3675 		} else {
3676 			if (tp->link_config.duplex == DUPLEX_FULL)
3677 				new_adv = ADVERTISED_10baseT_Full;
3678 			else
3679 				new_adv = ADVERTISED_10baseT_Half;
3680 		}
3681 
3682 		tg3_phy_autoneg_cfg(tp, new_adv,
3683 				    tp->link_config.flowctrl);
3684 	}
3685 
3686 	if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3687 	    tp->link_config.speed != SPEED_INVALID) {
3688 		u32 bmcr, orig_bmcr;
3689 
3690 		tp->link_config.active_speed = tp->link_config.speed;
3691 		tp->link_config.active_duplex = tp->link_config.duplex;
3692 
3693 		bmcr = 0;
3694 		switch (tp->link_config.speed) {
3695 		default:
3696 		case SPEED_10:
3697 			break;
3698 
3699 		case SPEED_100:
3700 			bmcr |= BMCR_SPEED100;
3701 			break;
3702 
3703 		case SPEED_1000:
3704 			bmcr |= BMCR_SPEED1000;
3705 			break;
3706 		}
3707 
3708 		if (tp->link_config.duplex == DUPLEX_FULL)
3709 			bmcr |= BMCR_FULLDPLX;
3710 
3711 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3712 		    (bmcr != orig_bmcr)) {
3713 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3714 			for (i = 0; i < 1500; i++) {
3715 				u32 tmp;
3716 
3717 				udelay(10);
3718 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3719 				    tg3_readphy(tp, MII_BMSR, &tmp))
3720 					continue;
3721 				if (!(tmp & BMSR_LSTATUS)) {
3722 					udelay(40);
3723 					break;
3724 				}
3725 			}
3726 			tg3_writephy(tp, MII_BMCR, bmcr);
3727 			udelay(40);
3728 		}
3729 	} else {
3730 		tg3_writephy(tp, MII_BMCR,
3731 			     BMCR_ANENABLE | BMCR_ANRESTART);
3732 	}
3733 }
3734 
3735 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3736 {
3737 	int err;
3738 
3739 	/* Turn off tap power management. */
3740 	/* Set Extended packet length bit */
3741 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3742 
3743 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3744 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3745 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3746 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3747 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3748 
3749 	udelay(40);
3750 
3751 	return err;
3752 }
3753 
3754 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3755 {
3756 	u32 advmsk, tgtadv, advertising;
3757 
3758 	advertising = tp->link_config.advertising;
3759 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
3760 
3761 	advmsk = ADVERTISE_ALL;
3762 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
3763 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
3764 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3765 	}
3766 
3767 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3768 		return false;
3769 
3770 	if ((*lcladv & advmsk) != tgtadv)
3771 		return false;
3772 
3773 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3774 		u32 tg3_ctrl;
3775 
3776 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
3777 
3778 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3779 			return false;
3780 
3781 		tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3782 		if (tg3_ctrl != tgtadv)
3783 			return false;
3784 	}
3785 
3786 	return true;
3787 }
3788 
3789 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
3790 {
3791 	u32 lpeth = 0;
3792 
3793 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3794 		u32 val;
3795 
3796 		if (tg3_readphy(tp, MII_STAT1000, &val))
3797 			return false;
3798 
3799 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
3800 	}
3801 
3802 	if (tg3_readphy(tp, MII_LPA, rmtadv))
3803 		return false;
3804 
3805 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
3806 	tp->link_config.rmt_adv = lpeth;
3807 
3808 	return true;
3809 }
3810 
3811 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3812 {
3813 	int current_link_up;
3814 	u32 bmsr, val;
3815 	u32 lcl_adv, rmt_adv;
3816 	u16 current_speed;
3817 	u8 current_duplex;
3818 	int i, err;
3819 
3820 	tw32(MAC_EVENT, 0);
3821 
3822 	tw32_f(MAC_STATUS,
3823 	     (MAC_STATUS_SYNC_CHANGED |
3824 	      MAC_STATUS_CFG_CHANGED |
3825 	      MAC_STATUS_MI_COMPLETION |
3826 	      MAC_STATUS_LNKSTATE_CHANGED));
3827 	udelay(40);
3828 
3829 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3830 		tw32_f(MAC_MI_MODE,
3831 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3832 		udelay(80);
3833 	}
3834 
3835 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3836 
3837 	/* Some third-party PHYs need to be reset on link going
3838 	 * down.
3839 	 */
3840 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3841 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3842 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3843 	    netif_carrier_ok(tp->dev)) {
3844 		tg3_readphy(tp, MII_BMSR, &bmsr);
3845 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3846 		    !(bmsr & BMSR_LSTATUS))
3847 			force_reset = 1;
3848 	}
3849 	if (force_reset)
3850 		tg3_phy_reset(tp);
3851 
3852 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3853 		tg3_readphy(tp, MII_BMSR, &bmsr);
3854 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3855 		    !tg3_flag(tp, INIT_COMPLETE))
3856 			bmsr = 0;
3857 
3858 		if (!(bmsr & BMSR_LSTATUS)) {
3859 			err = tg3_init_5401phy_dsp(tp);
3860 			if (err)
3861 				return err;
3862 
3863 			tg3_readphy(tp, MII_BMSR, &bmsr);
3864 			for (i = 0; i < 1000; i++) {
3865 				udelay(10);
3866 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3867 				    (bmsr & BMSR_LSTATUS)) {
3868 					udelay(40);
3869 					break;
3870 				}
3871 			}
3872 
3873 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3874 			    TG3_PHY_REV_BCM5401_B0 &&
3875 			    !(bmsr & BMSR_LSTATUS) &&
3876 			    tp->link_config.active_speed == SPEED_1000) {
3877 				err = tg3_phy_reset(tp);
3878 				if (!err)
3879 					err = tg3_init_5401phy_dsp(tp);
3880 				if (err)
3881 					return err;
3882 			}
3883 		}
3884 	} else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3885 		   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3886 		/* 5701 {A0,B0} CRC bug workaround */
3887 		tg3_writephy(tp, 0x15, 0x0a75);
3888 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3889 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3890 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3891 	}
3892 
3893 	/* Clear pending interrupts... */
3894 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
3895 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
3896 
3897 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3898 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3899 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3900 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
3901 
3902 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3903 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3904 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3905 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3906 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3907 		else
3908 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3909 	}
3910 
3911 	current_link_up = 0;
3912 	current_speed = SPEED_INVALID;
3913 	current_duplex = DUPLEX_INVALID;
3914 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
3915 	tp->link_config.rmt_adv = 0;
3916 
3917 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3918 		err = tg3_phy_auxctl_read(tp,
3919 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3920 					  &val);
3921 		if (!err && !(val & (1 << 10))) {
3922 			tg3_phy_auxctl_write(tp,
3923 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3924 					     val | (1 << 10));
3925 			goto relink;
3926 		}
3927 	}
3928 
3929 	bmsr = 0;
3930 	for (i = 0; i < 100; i++) {
3931 		tg3_readphy(tp, MII_BMSR, &bmsr);
3932 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3933 		    (bmsr & BMSR_LSTATUS))
3934 			break;
3935 		udelay(40);
3936 	}
3937 
3938 	if (bmsr & BMSR_LSTATUS) {
3939 		u32 aux_stat, bmcr;
3940 
3941 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3942 		for (i = 0; i < 2000; i++) {
3943 			udelay(10);
3944 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3945 			    aux_stat)
3946 				break;
3947 		}
3948 
3949 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3950 					     &current_speed,
3951 					     &current_duplex);
3952 
3953 		bmcr = 0;
3954 		for (i = 0; i < 200; i++) {
3955 			tg3_readphy(tp, MII_BMCR, &bmcr);
3956 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
3957 				continue;
3958 			if (bmcr && bmcr != 0x7fff)
3959 				break;
3960 			udelay(10);
3961 		}
3962 
3963 		lcl_adv = 0;
3964 		rmt_adv = 0;
3965 
3966 		tp->link_config.active_speed = current_speed;
3967 		tp->link_config.active_duplex = current_duplex;
3968 
3969 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3970 			if ((bmcr & BMCR_ANENABLE) &&
3971 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
3972 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
3973 				current_link_up = 1;
3974 		} else {
3975 			if (!(bmcr & BMCR_ANENABLE) &&
3976 			    tp->link_config.speed == current_speed &&
3977 			    tp->link_config.duplex == current_duplex &&
3978 			    tp->link_config.flowctrl ==
3979 			    tp->link_config.active_flowctrl) {
3980 				current_link_up = 1;
3981 			}
3982 		}
3983 
3984 		if (current_link_up == 1 &&
3985 		    tp->link_config.active_duplex == DUPLEX_FULL) {
3986 			u32 reg, bit;
3987 
3988 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3989 				reg = MII_TG3_FET_GEN_STAT;
3990 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
3991 			} else {
3992 				reg = MII_TG3_EXT_STAT;
3993 				bit = MII_TG3_EXT_STAT_MDIX;
3994 			}
3995 
3996 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
3997 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
3998 
3999 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4000 		}
4001 	}
4002 
4003 relink:
4004 	if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4005 		tg3_phy_copper_begin(tp);
4006 
4007 		tg3_readphy(tp, MII_BMSR, &bmsr);
4008 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4009 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4010 			current_link_up = 1;
4011 	}
4012 
4013 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4014 	if (current_link_up == 1) {
4015 		if (tp->link_config.active_speed == SPEED_100 ||
4016 		    tp->link_config.active_speed == SPEED_10)
4017 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4018 		else
4019 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4020 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4021 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4022 	else
4023 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4024 
4025 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4026 	if (tp->link_config.active_duplex == DUPLEX_HALF)
4027 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4028 
4029 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4030 		if (current_link_up == 1 &&
4031 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4032 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4033 		else
4034 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4035 	}
4036 
4037 	/* ??? Without this setting Netgear GA302T PHY does not
4038 	 * ??? send/receive packets...
4039 	 */
4040 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4041 	    tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4042 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4043 		tw32_f(MAC_MI_MODE, tp->mi_mode);
4044 		udelay(80);
4045 	}
4046 
4047 	tw32_f(MAC_MODE, tp->mac_mode);
4048 	udelay(40);
4049 
4050 	tg3_phy_eee_adjust(tp, current_link_up);
4051 
4052 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
4053 		/* Polled via timer. */
4054 		tw32_f(MAC_EVENT, 0);
4055 	} else {
4056 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4057 	}
4058 	udelay(40);
4059 
4060 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4061 	    current_link_up == 1 &&
4062 	    tp->link_config.active_speed == SPEED_1000 &&
4063 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4064 		udelay(120);
4065 		tw32_f(MAC_STATUS,
4066 		     (MAC_STATUS_SYNC_CHANGED |
4067 		      MAC_STATUS_CFG_CHANGED));
4068 		udelay(40);
4069 		tg3_write_mem(tp,
4070 			      NIC_SRAM_FIRMWARE_MBOX,
4071 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4072 	}
4073 
4074 	/* Prevent send BD corruption. */
4075 	if (tg3_flag(tp, CLKREQ_BUG)) {
4076 		u16 oldlnkctl, newlnkctl;
4077 
4078 		pci_read_config_word(tp->pdev,
4079 				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4080 				     &oldlnkctl);
4081 		if (tp->link_config.active_speed == SPEED_100 ||
4082 		    tp->link_config.active_speed == SPEED_10)
4083 			newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4084 		else
4085 			newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4086 		if (newlnkctl != oldlnkctl)
4087 			pci_write_config_word(tp->pdev,
4088 					      pci_pcie_cap(tp->pdev) +
4089 					      PCI_EXP_LNKCTL, newlnkctl);
4090 	}
4091 
4092 	if (current_link_up != netif_carrier_ok(tp->dev)) {
4093 		if (current_link_up)
4094 			netif_carrier_on(tp->dev);
4095 		else
4096 			netif_carrier_off(tp->dev);
4097 		tg3_link_report(tp);
4098 	}
4099 
4100 	return 0;
4101 }
4102 
4103 struct tg3_fiber_aneginfo {
4104 	int state;
4105 #define ANEG_STATE_UNKNOWN		0
4106 #define ANEG_STATE_AN_ENABLE		1
4107 #define ANEG_STATE_RESTART_INIT		2
4108 #define ANEG_STATE_RESTART		3
4109 #define ANEG_STATE_DISABLE_LINK_OK	4
4110 #define ANEG_STATE_ABILITY_DETECT_INIT	5
4111 #define ANEG_STATE_ABILITY_DETECT	6
4112 #define ANEG_STATE_ACK_DETECT_INIT	7
4113 #define ANEG_STATE_ACK_DETECT		8
4114 #define ANEG_STATE_COMPLETE_ACK_INIT	9
4115 #define ANEG_STATE_COMPLETE_ACK		10
4116 #define ANEG_STATE_IDLE_DETECT_INIT	11
4117 #define ANEG_STATE_IDLE_DETECT		12
4118 #define ANEG_STATE_LINK_OK		13
4119 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
4120 #define ANEG_STATE_NEXT_PAGE_WAIT	15
4121 
4122 	u32 flags;
4123 #define MR_AN_ENABLE		0x00000001
4124 #define MR_RESTART_AN		0x00000002
4125 #define MR_AN_COMPLETE		0x00000004
4126 #define MR_PAGE_RX		0x00000008
4127 #define MR_NP_LOADED		0x00000010
4128 #define MR_TOGGLE_TX		0x00000020
4129 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
4130 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
4131 #define MR_LP_ADV_SYM_PAUSE	0x00000100
4132 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
4133 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
4134 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
4135 #define MR_LP_ADV_NEXT_PAGE	0x00001000
4136 #define MR_TOGGLE_RX		0x00002000
4137 #define MR_NP_RX		0x00004000
4138 
4139 #define MR_LINK_OK		0x80000000
4140 
4141 	unsigned long link_time, cur_time;
4142 
4143 	u32 ability_match_cfg;
4144 	int ability_match_count;
4145 
4146 	char ability_match, idle_match, ack_match;
4147 
4148 	u32 txconfig, rxconfig;
4149 #define ANEG_CFG_NP		0x00000080
4150 #define ANEG_CFG_ACK		0x00000040
4151 #define ANEG_CFG_RF2		0x00000020
4152 #define ANEG_CFG_RF1		0x00000010
4153 #define ANEG_CFG_PS2		0x00000001
4154 #define ANEG_CFG_PS1		0x00008000
4155 #define ANEG_CFG_HD		0x00004000
4156 #define ANEG_CFG_FD		0x00002000
4157 #define ANEG_CFG_INVAL		0x00001f06
4158 
4159 };
4160 #define ANEG_OK		0
4161 #define ANEG_DONE	1
4162 #define ANEG_TIMER_ENAB	2
4163 #define ANEG_FAILED	-1
4164 
4165 #define ANEG_STATE_SETTLE_TIME	10000
4166 
4167 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4168 				   struct tg3_fiber_aneginfo *ap)
4169 {
4170 	u16 flowctrl;
4171 	unsigned long delta;
4172 	u32 rx_cfg_reg;
4173 	int ret;
4174 
4175 	if (ap->state == ANEG_STATE_UNKNOWN) {
4176 		ap->rxconfig = 0;
4177 		ap->link_time = 0;
4178 		ap->cur_time = 0;
4179 		ap->ability_match_cfg = 0;
4180 		ap->ability_match_count = 0;
4181 		ap->ability_match = 0;
4182 		ap->idle_match = 0;
4183 		ap->ack_match = 0;
4184 	}
4185 	ap->cur_time++;
4186 
4187 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4188 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4189 
4190 		if (rx_cfg_reg != ap->ability_match_cfg) {
4191 			ap->ability_match_cfg = rx_cfg_reg;
4192 			ap->ability_match = 0;
4193 			ap->ability_match_count = 0;
4194 		} else {
4195 			if (++ap->ability_match_count > 1) {
4196 				ap->ability_match = 1;
4197 				ap->ability_match_cfg = rx_cfg_reg;
4198 			}
4199 		}
4200 		if (rx_cfg_reg & ANEG_CFG_ACK)
4201 			ap->ack_match = 1;
4202 		else
4203 			ap->ack_match = 0;
4204 
4205 		ap->idle_match = 0;
4206 	} else {
4207 		ap->idle_match = 1;
4208 		ap->ability_match_cfg = 0;
4209 		ap->ability_match_count = 0;
4210 		ap->ability_match = 0;
4211 		ap->ack_match = 0;
4212 
4213 		rx_cfg_reg = 0;
4214 	}
4215 
4216 	ap->rxconfig = rx_cfg_reg;
4217 	ret = ANEG_OK;
4218 
4219 	switch (ap->state) {
4220 	case ANEG_STATE_UNKNOWN:
4221 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4222 			ap->state = ANEG_STATE_AN_ENABLE;
4223 
4224 		/* fallthru */
4225 	case ANEG_STATE_AN_ENABLE:
4226 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4227 		if (ap->flags & MR_AN_ENABLE) {
4228 			ap->link_time = 0;
4229 			ap->cur_time = 0;
4230 			ap->ability_match_cfg = 0;
4231 			ap->ability_match_count = 0;
4232 			ap->ability_match = 0;
4233 			ap->idle_match = 0;
4234 			ap->ack_match = 0;
4235 
4236 			ap->state = ANEG_STATE_RESTART_INIT;
4237 		} else {
4238 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
4239 		}
4240 		break;
4241 
4242 	case ANEG_STATE_RESTART_INIT:
4243 		ap->link_time = ap->cur_time;
4244 		ap->flags &= ~(MR_NP_LOADED);
4245 		ap->txconfig = 0;
4246 		tw32(MAC_TX_AUTO_NEG, 0);
4247 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4248 		tw32_f(MAC_MODE, tp->mac_mode);
4249 		udelay(40);
4250 
4251 		ret = ANEG_TIMER_ENAB;
4252 		ap->state = ANEG_STATE_RESTART;
4253 
4254 		/* fallthru */
4255 	case ANEG_STATE_RESTART:
4256 		delta = ap->cur_time - ap->link_time;
4257 		if (delta > ANEG_STATE_SETTLE_TIME)
4258 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4259 		else
4260 			ret = ANEG_TIMER_ENAB;
4261 		break;
4262 
4263 	case ANEG_STATE_DISABLE_LINK_OK:
4264 		ret = ANEG_DONE;
4265 		break;
4266 
4267 	case ANEG_STATE_ABILITY_DETECT_INIT:
4268 		ap->flags &= ~(MR_TOGGLE_TX);
4269 		ap->txconfig = ANEG_CFG_FD;
4270 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4271 		if (flowctrl & ADVERTISE_1000XPAUSE)
4272 			ap->txconfig |= ANEG_CFG_PS1;
4273 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4274 			ap->txconfig |= ANEG_CFG_PS2;
4275 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4276 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4277 		tw32_f(MAC_MODE, tp->mac_mode);
4278 		udelay(40);
4279 
4280 		ap->state = ANEG_STATE_ABILITY_DETECT;
4281 		break;
4282 
4283 	case ANEG_STATE_ABILITY_DETECT:
4284 		if (ap->ability_match != 0 && ap->rxconfig != 0)
4285 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
4286 		break;
4287 
4288 	case ANEG_STATE_ACK_DETECT_INIT:
4289 		ap->txconfig |= ANEG_CFG_ACK;
4290 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4291 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4292 		tw32_f(MAC_MODE, tp->mac_mode);
4293 		udelay(40);
4294 
4295 		ap->state = ANEG_STATE_ACK_DETECT;
4296 
4297 		/* fallthru */
4298 	case ANEG_STATE_ACK_DETECT:
4299 		if (ap->ack_match != 0) {
4300 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4301 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4302 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4303 			} else {
4304 				ap->state = ANEG_STATE_AN_ENABLE;
4305 			}
4306 		} else if (ap->ability_match != 0 &&
4307 			   ap->rxconfig == 0) {
4308 			ap->state = ANEG_STATE_AN_ENABLE;
4309 		}
4310 		break;
4311 
4312 	case ANEG_STATE_COMPLETE_ACK_INIT:
4313 		if (ap->rxconfig & ANEG_CFG_INVAL) {
4314 			ret = ANEG_FAILED;
4315 			break;
4316 		}
4317 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4318 			       MR_LP_ADV_HALF_DUPLEX |
4319 			       MR_LP_ADV_SYM_PAUSE |
4320 			       MR_LP_ADV_ASYM_PAUSE |
4321 			       MR_LP_ADV_REMOTE_FAULT1 |
4322 			       MR_LP_ADV_REMOTE_FAULT2 |
4323 			       MR_LP_ADV_NEXT_PAGE |
4324 			       MR_TOGGLE_RX |
4325 			       MR_NP_RX);
4326 		if (ap->rxconfig & ANEG_CFG_FD)
4327 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4328 		if (ap->rxconfig & ANEG_CFG_HD)
4329 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4330 		if (ap->rxconfig & ANEG_CFG_PS1)
4331 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
4332 		if (ap->rxconfig & ANEG_CFG_PS2)
4333 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4334 		if (ap->rxconfig & ANEG_CFG_RF1)
4335 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4336 		if (ap->rxconfig & ANEG_CFG_RF2)
4337 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4338 		if (ap->rxconfig & ANEG_CFG_NP)
4339 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
4340 
4341 		ap->link_time = ap->cur_time;
4342 
4343 		ap->flags ^= (MR_TOGGLE_TX);
4344 		if (ap->rxconfig & 0x0008)
4345 			ap->flags |= MR_TOGGLE_RX;
4346 		if (ap->rxconfig & ANEG_CFG_NP)
4347 			ap->flags |= MR_NP_RX;
4348 		ap->flags |= MR_PAGE_RX;
4349 
4350 		ap->state = ANEG_STATE_COMPLETE_ACK;
4351 		ret = ANEG_TIMER_ENAB;
4352 		break;
4353 
4354 	case ANEG_STATE_COMPLETE_ACK:
4355 		if (ap->ability_match != 0 &&
4356 		    ap->rxconfig == 0) {
4357 			ap->state = ANEG_STATE_AN_ENABLE;
4358 			break;
4359 		}
4360 		delta = ap->cur_time - ap->link_time;
4361 		if (delta > ANEG_STATE_SETTLE_TIME) {
4362 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4363 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4364 			} else {
4365 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4366 				    !(ap->flags & MR_NP_RX)) {
4367 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4368 				} else {
4369 					ret = ANEG_FAILED;
4370 				}
4371 			}
4372 		}
4373 		break;
4374 
4375 	case ANEG_STATE_IDLE_DETECT_INIT:
4376 		ap->link_time = ap->cur_time;
4377 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4378 		tw32_f(MAC_MODE, tp->mac_mode);
4379 		udelay(40);
4380 
4381 		ap->state = ANEG_STATE_IDLE_DETECT;
4382 		ret = ANEG_TIMER_ENAB;
4383 		break;
4384 
4385 	case ANEG_STATE_IDLE_DETECT:
4386 		if (ap->ability_match != 0 &&
4387 		    ap->rxconfig == 0) {
4388 			ap->state = ANEG_STATE_AN_ENABLE;
4389 			break;
4390 		}
4391 		delta = ap->cur_time - ap->link_time;
4392 		if (delta > ANEG_STATE_SETTLE_TIME) {
4393 			/* XXX another gem from the Broadcom driver :( */
4394 			ap->state = ANEG_STATE_LINK_OK;
4395 		}
4396 		break;
4397 
4398 	case ANEG_STATE_LINK_OK:
4399 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4400 		ret = ANEG_DONE;
4401 		break;
4402 
4403 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4404 		/* ??? unimplemented */
4405 		break;
4406 
4407 	case ANEG_STATE_NEXT_PAGE_WAIT:
4408 		/* ??? unimplemented */
4409 		break;
4410 
4411 	default:
4412 		ret = ANEG_FAILED;
4413 		break;
4414 	}
4415 
4416 	return ret;
4417 }
4418 
4419 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4420 {
4421 	int res = 0;
4422 	struct tg3_fiber_aneginfo aninfo;
4423 	int status = ANEG_FAILED;
4424 	unsigned int tick;
4425 	u32 tmp;
4426 
4427 	tw32_f(MAC_TX_AUTO_NEG, 0);
4428 
4429 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4430 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4431 	udelay(40);
4432 
4433 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4434 	udelay(40);
4435 
4436 	memset(&aninfo, 0, sizeof(aninfo));
4437 	aninfo.flags |= MR_AN_ENABLE;
4438 	aninfo.state = ANEG_STATE_UNKNOWN;
4439 	aninfo.cur_time = 0;
4440 	tick = 0;
4441 	while (++tick < 195000) {
4442 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
4443 		if (status == ANEG_DONE || status == ANEG_FAILED)
4444 			break;
4445 
4446 		udelay(1);
4447 	}
4448 
4449 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4450 	tw32_f(MAC_MODE, tp->mac_mode);
4451 	udelay(40);
4452 
4453 	*txflags = aninfo.txconfig;
4454 	*rxflags = aninfo.flags;
4455 
4456 	if (status == ANEG_DONE &&
4457 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4458 			     MR_LP_ADV_FULL_DUPLEX)))
4459 		res = 1;
4460 
4461 	return res;
4462 }
4463 
4464 static void tg3_init_bcm8002(struct tg3 *tp)
4465 {
4466 	u32 mac_status = tr32(MAC_STATUS);
4467 	int i;
4468 
4469 	/* Reset when initting first time or we have a link. */
4470 	if (tg3_flag(tp, INIT_COMPLETE) &&
4471 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
4472 		return;
4473 
4474 	/* Set PLL lock range. */
4475 	tg3_writephy(tp, 0x16, 0x8007);
4476 
4477 	/* SW reset */
4478 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4479 
4480 	/* Wait for reset to complete. */
4481 	/* XXX schedule_timeout() ... */
4482 	for (i = 0; i < 500; i++)
4483 		udelay(10);
4484 
4485 	/* Config mode; select PMA/Ch 1 regs. */
4486 	tg3_writephy(tp, 0x10, 0x8411);
4487 
4488 	/* Enable auto-lock and comdet, select txclk for tx. */
4489 	tg3_writephy(tp, 0x11, 0x0a10);
4490 
4491 	tg3_writephy(tp, 0x18, 0x00a0);
4492 	tg3_writephy(tp, 0x16, 0x41ff);
4493 
4494 	/* Assert and deassert POR. */
4495 	tg3_writephy(tp, 0x13, 0x0400);
4496 	udelay(40);
4497 	tg3_writephy(tp, 0x13, 0x0000);
4498 
4499 	tg3_writephy(tp, 0x11, 0x0a50);
4500 	udelay(40);
4501 	tg3_writephy(tp, 0x11, 0x0a10);
4502 
4503 	/* Wait for signal to stabilize */
4504 	/* XXX schedule_timeout() ... */
4505 	for (i = 0; i < 15000; i++)
4506 		udelay(10);
4507 
4508 	/* Deselect the channel register so we can read the PHYID
4509 	 * later.
4510 	 */
4511 	tg3_writephy(tp, 0x10, 0x8011);
4512 }
4513 
4514 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4515 {
4516 	u16 flowctrl;
4517 	u32 sg_dig_ctrl, sg_dig_status;
4518 	u32 serdes_cfg, expected_sg_dig_ctrl;
4519 	int workaround, port_a;
4520 	int current_link_up;
4521 
4522 	serdes_cfg = 0;
4523 	expected_sg_dig_ctrl = 0;
4524 	workaround = 0;
4525 	port_a = 1;
4526 	current_link_up = 0;
4527 
4528 	if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4529 	    tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4530 		workaround = 1;
4531 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4532 			port_a = 0;
4533 
4534 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
4535 		/* preserve bits 20-23 for voltage regulator */
4536 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4537 	}
4538 
4539 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
4540 
4541 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4542 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4543 			if (workaround) {
4544 				u32 val = serdes_cfg;
4545 
4546 				if (port_a)
4547 					val |= 0xc010000;
4548 				else
4549 					val |= 0x4010000;
4550 				tw32_f(MAC_SERDES_CFG, val);
4551 			}
4552 
4553 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4554 		}
4555 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
4556 			tg3_setup_flow_control(tp, 0, 0);
4557 			current_link_up = 1;
4558 		}
4559 		goto out;
4560 	}
4561 
4562 	/* Want auto-negotiation.  */
4563 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4564 
4565 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4566 	if (flowctrl & ADVERTISE_1000XPAUSE)
4567 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4568 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4569 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4570 
4571 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4572 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4573 		    tp->serdes_counter &&
4574 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
4575 				    MAC_STATUS_RCVD_CFG)) ==
4576 		     MAC_STATUS_PCS_SYNCED)) {
4577 			tp->serdes_counter--;
4578 			current_link_up = 1;
4579 			goto out;
4580 		}
4581 restart_autoneg:
4582 		if (workaround)
4583 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4584 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4585 		udelay(5);
4586 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4587 
4588 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4589 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4590 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4591 				 MAC_STATUS_SIGNAL_DET)) {
4592 		sg_dig_status = tr32(SG_DIG_STATUS);
4593 		mac_status = tr32(MAC_STATUS);
4594 
4595 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4596 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
4597 			u32 local_adv = 0, remote_adv = 0;
4598 
4599 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4600 				local_adv |= ADVERTISE_1000XPAUSE;
4601 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4602 				local_adv |= ADVERTISE_1000XPSE_ASYM;
4603 
4604 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4605 				remote_adv |= LPA_1000XPAUSE;
4606 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4607 				remote_adv |= LPA_1000XPAUSE_ASYM;
4608 
4609 			tp->link_config.rmt_adv =
4610 					   mii_adv_to_ethtool_adv_x(remote_adv);
4611 
4612 			tg3_setup_flow_control(tp, local_adv, remote_adv);
4613 			current_link_up = 1;
4614 			tp->serdes_counter = 0;
4615 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4616 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4617 			if (tp->serdes_counter)
4618 				tp->serdes_counter--;
4619 			else {
4620 				if (workaround) {
4621 					u32 val = serdes_cfg;
4622 
4623 					if (port_a)
4624 						val |= 0xc010000;
4625 					else
4626 						val |= 0x4010000;
4627 
4628 					tw32_f(MAC_SERDES_CFG, val);
4629 				}
4630 
4631 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4632 				udelay(40);
4633 
4634 				/* Link parallel detection - link is up */
4635 				/* only if we have PCS_SYNC and not */
4636 				/* receiving config code words */
4637 				mac_status = tr32(MAC_STATUS);
4638 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4639 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
4640 					tg3_setup_flow_control(tp, 0, 0);
4641 					current_link_up = 1;
4642 					tp->phy_flags |=
4643 						TG3_PHYFLG_PARALLEL_DETECT;
4644 					tp->serdes_counter =
4645 						SERDES_PARALLEL_DET_TIMEOUT;
4646 				} else
4647 					goto restart_autoneg;
4648 			}
4649 		}
4650 	} else {
4651 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4652 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4653 	}
4654 
4655 out:
4656 	return current_link_up;
4657 }
4658 
4659 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4660 {
4661 	int current_link_up = 0;
4662 
4663 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4664 		goto out;
4665 
4666 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4667 		u32 txflags, rxflags;
4668 		int i;
4669 
4670 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
4671 			u32 local_adv = 0, remote_adv = 0;
4672 
4673 			if (txflags & ANEG_CFG_PS1)
4674 				local_adv |= ADVERTISE_1000XPAUSE;
4675 			if (txflags & ANEG_CFG_PS2)
4676 				local_adv |= ADVERTISE_1000XPSE_ASYM;
4677 
4678 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
4679 				remote_adv |= LPA_1000XPAUSE;
4680 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4681 				remote_adv |= LPA_1000XPAUSE_ASYM;
4682 
4683 			tp->link_config.rmt_adv =
4684 					   mii_adv_to_ethtool_adv_x(remote_adv);
4685 
4686 			tg3_setup_flow_control(tp, local_adv, remote_adv);
4687 
4688 			current_link_up = 1;
4689 		}
4690 		for (i = 0; i < 30; i++) {
4691 			udelay(20);
4692 			tw32_f(MAC_STATUS,
4693 			       (MAC_STATUS_SYNC_CHANGED |
4694 				MAC_STATUS_CFG_CHANGED));
4695 			udelay(40);
4696 			if ((tr32(MAC_STATUS) &
4697 			     (MAC_STATUS_SYNC_CHANGED |
4698 			      MAC_STATUS_CFG_CHANGED)) == 0)
4699 				break;
4700 		}
4701 
4702 		mac_status = tr32(MAC_STATUS);
4703 		if (current_link_up == 0 &&
4704 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
4705 		    !(mac_status & MAC_STATUS_RCVD_CFG))
4706 			current_link_up = 1;
4707 	} else {
4708 		tg3_setup_flow_control(tp, 0, 0);
4709 
4710 		/* Forcing 1000FD link up. */
4711 		current_link_up = 1;
4712 
4713 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4714 		udelay(40);
4715 
4716 		tw32_f(MAC_MODE, tp->mac_mode);
4717 		udelay(40);
4718 	}
4719 
4720 out:
4721 	return current_link_up;
4722 }
4723 
4724 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4725 {
4726 	u32 orig_pause_cfg;
4727 	u16 orig_active_speed;
4728 	u8 orig_active_duplex;
4729 	u32 mac_status;
4730 	int current_link_up;
4731 	int i;
4732 
4733 	orig_pause_cfg = tp->link_config.active_flowctrl;
4734 	orig_active_speed = tp->link_config.active_speed;
4735 	orig_active_duplex = tp->link_config.active_duplex;
4736 
4737 	if (!tg3_flag(tp, HW_AUTONEG) &&
4738 	    netif_carrier_ok(tp->dev) &&
4739 	    tg3_flag(tp, INIT_COMPLETE)) {
4740 		mac_status = tr32(MAC_STATUS);
4741 		mac_status &= (MAC_STATUS_PCS_SYNCED |
4742 			       MAC_STATUS_SIGNAL_DET |
4743 			       MAC_STATUS_CFG_CHANGED |
4744 			       MAC_STATUS_RCVD_CFG);
4745 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
4746 				   MAC_STATUS_SIGNAL_DET)) {
4747 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4748 					    MAC_STATUS_CFG_CHANGED));
4749 			return 0;
4750 		}
4751 	}
4752 
4753 	tw32_f(MAC_TX_AUTO_NEG, 0);
4754 
4755 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4756 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4757 	tw32_f(MAC_MODE, tp->mac_mode);
4758 	udelay(40);
4759 
4760 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
4761 		tg3_init_bcm8002(tp);
4762 
4763 	/* Enable link change event even when serdes polling.  */
4764 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4765 	udelay(40);
4766 
4767 	current_link_up = 0;
4768 	tp->link_config.rmt_adv = 0;
4769 	mac_status = tr32(MAC_STATUS);
4770 
4771 	if (tg3_flag(tp, HW_AUTONEG))
4772 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4773 	else
4774 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4775 
4776 	tp->napi[0].hw_status->status =
4777 		(SD_STATUS_UPDATED |
4778 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4779 
4780 	for (i = 0; i < 100; i++) {
4781 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4782 				    MAC_STATUS_CFG_CHANGED));
4783 		udelay(5);
4784 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4785 					 MAC_STATUS_CFG_CHANGED |
4786 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4787 			break;
4788 	}
4789 
4790 	mac_status = tr32(MAC_STATUS);
4791 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4792 		current_link_up = 0;
4793 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4794 		    tp->serdes_counter == 0) {
4795 			tw32_f(MAC_MODE, (tp->mac_mode |
4796 					  MAC_MODE_SEND_CONFIGS));
4797 			udelay(1);
4798 			tw32_f(MAC_MODE, tp->mac_mode);
4799 		}
4800 	}
4801 
4802 	if (current_link_up == 1) {
4803 		tp->link_config.active_speed = SPEED_1000;
4804 		tp->link_config.active_duplex = DUPLEX_FULL;
4805 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
4806 				    LED_CTRL_LNKLED_OVERRIDE |
4807 				    LED_CTRL_1000MBPS_ON));
4808 	} else {
4809 		tp->link_config.active_speed = SPEED_INVALID;
4810 		tp->link_config.active_duplex = DUPLEX_INVALID;
4811 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
4812 				    LED_CTRL_LNKLED_OVERRIDE |
4813 				    LED_CTRL_TRAFFIC_OVERRIDE));
4814 	}
4815 
4816 	if (current_link_up != netif_carrier_ok(tp->dev)) {
4817 		if (current_link_up)
4818 			netif_carrier_on(tp->dev);
4819 		else
4820 			netif_carrier_off(tp->dev);
4821 		tg3_link_report(tp);
4822 	} else {
4823 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
4824 		if (orig_pause_cfg != now_pause_cfg ||
4825 		    orig_active_speed != tp->link_config.active_speed ||
4826 		    orig_active_duplex != tp->link_config.active_duplex)
4827 			tg3_link_report(tp);
4828 	}
4829 
4830 	return 0;
4831 }
4832 
4833 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4834 {
4835 	int current_link_up, err = 0;
4836 	u32 bmsr, bmcr;
4837 	u16 current_speed;
4838 	u8 current_duplex;
4839 	u32 local_adv, remote_adv;
4840 
4841 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4842 	tw32_f(MAC_MODE, tp->mac_mode);
4843 	udelay(40);
4844 
4845 	tw32(MAC_EVENT, 0);
4846 
4847 	tw32_f(MAC_STATUS,
4848 	     (MAC_STATUS_SYNC_CHANGED |
4849 	      MAC_STATUS_CFG_CHANGED |
4850 	      MAC_STATUS_MI_COMPLETION |
4851 	      MAC_STATUS_LNKSTATE_CHANGED));
4852 	udelay(40);
4853 
4854 	if (force_reset)
4855 		tg3_phy_reset(tp);
4856 
4857 	current_link_up = 0;
4858 	current_speed = SPEED_INVALID;
4859 	current_duplex = DUPLEX_INVALID;
4860 	tp->link_config.rmt_adv = 0;
4861 
4862 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4863 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4864 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4865 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4866 			bmsr |= BMSR_LSTATUS;
4867 		else
4868 			bmsr &= ~BMSR_LSTATUS;
4869 	}
4870 
4871 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4872 
4873 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4874 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4875 		/* do nothing, just check for link up at the end */
4876 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4877 		u32 adv, newadv;
4878 
4879 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4880 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4881 				 ADVERTISE_1000XPAUSE |
4882 				 ADVERTISE_1000XPSE_ASYM |
4883 				 ADVERTISE_SLCT);
4884 
4885 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4886 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
4887 
4888 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
4889 			tg3_writephy(tp, MII_ADVERTISE, newadv);
4890 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4891 			tg3_writephy(tp, MII_BMCR, bmcr);
4892 
4893 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4894 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4895 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4896 
4897 			return err;
4898 		}
4899 	} else {
4900 		u32 new_bmcr;
4901 
4902 		bmcr &= ~BMCR_SPEED1000;
4903 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4904 
4905 		if (tp->link_config.duplex == DUPLEX_FULL)
4906 			new_bmcr |= BMCR_FULLDPLX;
4907 
4908 		if (new_bmcr != bmcr) {
4909 			/* BMCR_SPEED1000 is a reserved bit that needs
4910 			 * to be set on write.
4911 			 */
4912 			new_bmcr |= BMCR_SPEED1000;
4913 
4914 			/* Force a linkdown */
4915 			if (netif_carrier_ok(tp->dev)) {
4916 				u32 adv;
4917 
4918 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4919 				adv &= ~(ADVERTISE_1000XFULL |
4920 					 ADVERTISE_1000XHALF |
4921 					 ADVERTISE_SLCT);
4922 				tg3_writephy(tp, MII_ADVERTISE, adv);
4923 				tg3_writephy(tp, MII_BMCR, bmcr |
4924 							   BMCR_ANRESTART |
4925 							   BMCR_ANENABLE);
4926 				udelay(10);
4927 				netif_carrier_off(tp->dev);
4928 			}
4929 			tg3_writephy(tp, MII_BMCR, new_bmcr);
4930 			bmcr = new_bmcr;
4931 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4932 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4933 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4934 			    ASIC_REV_5714) {
4935 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4936 					bmsr |= BMSR_LSTATUS;
4937 				else
4938 					bmsr &= ~BMSR_LSTATUS;
4939 			}
4940 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4941 		}
4942 	}
4943 
4944 	if (bmsr & BMSR_LSTATUS) {
4945 		current_speed = SPEED_1000;
4946 		current_link_up = 1;
4947 		if (bmcr & BMCR_FULLDPLX)
4948 			current_duplex = DUPLEX_FULL;
4949 		else
4950 			current_duplex = DUPLEX_HALF;
4951 
4952 		local_adv = 0;
4953 		remote_adv = 0;
4954 
4955 		if (bmcr & BMCR_ANENABLE) {
4956 			u32 common;
4957 
4958 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4959 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4960 			common = local_adv & remote_adv;
4961 			if (common & (ADVERTISE_1000XHALF |
4962 				      ADVERTISE_1000XFULL)) {
4963 				if (common & ADVERTISE_1000XFULL)
4964 					current_duplex = DUPLEX_FULL;
4965 				else
4966 					current_duplex = DUPLEX_HALF;
4967 
4968 				tp->link_config.rmt_adv =
4969 					   mii_adv_to_ethtool_adv_x(remote_adv);
4970 			} else if (!tg3_flag(tp, 5780_CLASS)) {
4971 				/* Link is up via parallel detect */
4972 			} else {
4973 				current_link_up = 0;
4974 			}
4975 		}
4976 	}
4977 
4978 	if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4979 		tg3_setup_flow_control(tp, local_adv, remote_adv);
4980 
4981 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4982 	if (tp->link_config.active_duplex == DUPLEX_HALF)
4983 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4984 
4985 	tw32_f(MAC_MODE, tp->mac_mode);
4986 	udelay(40);
4987 
4988 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4989 
4990 	tp->link_config.active_speed = current_speed;
4991 	tp->link_config.active_duplex = current_duplex;
4992 
4993 	if (current_link_up != netif_carrier_ok(tp->dev)) {
4994 		if (current_link_up)
4995 			netif_carrier_on(tp->dev);
4996 		else {
4997 			netif_carrier_off(tp->dev);
4998 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4999 		}
5000 		tg3_link_report(tp);
5001 	}
5002 	return err;
5003 }
5004 
5005 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5006 {
5007 	if (tp->serdes_counter) {
5008 		/* Give autoneg time to complete. */
5009 		tp->serdes_counter--;
5010 		return;
5011 	}
5012 
5013 	if (!netif_carrier_ok(tp->dev) &&
5014 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5015 		u32 bmcr;
5016 
5017 		tg3_readphy(tp, MII_BMCR, &bmcr);
5018 		if (bmcr & BMCR_ANENABLE) {
5019 			u32 phy1, phy2;
5020 
5021 			/* Select shadow register 0x1f */
5022 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5023 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5024 
5025 			/* Select expansion interrupt status register */
5026 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5027 					 MII_TG3_DSP_EXP1_INT_STAT);
5028 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5029 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5030 
5031 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5032 				/* We have signal detect and not receiving
5033 				 * config code words, link is up by parallel
5034 				 * detection.
5035 				 */
5036 
5037 				bmcr &= ~BMCR_ANENABLE;
5038 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5039 				tg3_writephy(tp, MII_BMCR, bmcr);
5040 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5041 			}
5042 		}
5043 	} else if (netif_carrier_ok(tp->dev) &&
5044 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5045 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5046 		u32 phy2;
5047 
5048 		/* Select expansion interrupt status register */
5049 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5050 				 MII_TG3_DSP_EXP1_INT_STAT);
5051 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5052 		if (phy2 & 0x20) {
5053 			u32 bmcr;
5054 
5055 			/* Config code words received, turn on autoneg. */
5056 			tg3_readphy(tp, MII_BMCR, &bmcr);
5057 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5058 
5059 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5060 
5061 		}
5062 	}
5063 }
5064 
5065 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5066 {
5067 	u32 val;
5068 	int err;
5069 
5070 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5071 		err = tg3_setup_fiber_phy(tp, force_reset);
5072 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5073 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
5074 	else
5075 		err = tg3_setup_copper_phy(tp, force_reset);
5076 
5077 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5078 		u32 scale;
5079 
5080 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5081 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5082 			scale = 65;
5083 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5084 			scale = 6;
5085 		else
5086 			scale = 12;
5087 
5088 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5089 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5090 		tw32(GRC_MISC_CFG, val);
5091 	}
5092 
5093 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5094 	      (6 << TX_LENGTHS_IPG_SHIFT);
5095 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5096 		val |= tr32(MAC_TX_LENGTHS) &
5097 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
5098 			TX_LENGTHS_CNT_DWN_VAL_MSK);
5099 
5100 	if (tp->link_config.active_speed == SPEED_1000 &&
5101 	    tp->link_config.active_duplex == DUPLEX_HALF)
5102 		tw32(MAC_TX_LENGTHS, val |
5103 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5104 	else
5105 		tw32(MAC_TX_LENGTHS, val |
5106 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5107 
5108 	if (!tg3_flag(tp, 5705_PLUS)) {
5109 		if (netif_carrier_ok(tp->dev)) {
5110 			tw32(HOSTCC_STAT_COAL_TICKS,
5111 			     tp->coal.stats_block_coalesce_usecs);
5112 		} else {
5113 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
5114 		}
5115 	}
5116 
5117 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
5118 		val = tr32(PCIE_PWR_MGMT_THRESH);
5119 		if (!netif_carrier_ok(tp->dev))
5120 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5121 			      tp->pwrmgmt_thresh;
5122 		else
5123 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5124 		tw32(PCIE_PWR_MGMT_THRESH, val);
5125 	}
5126 
5127 	return err;
5128 }
5129 
5130 static inline int tg3_irq_sync(struct tg3 *tp)
5131 {
5132 	return tp->irq_sync;
5133 }
5134 
5135 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5136 {
5137 	int i;
5138 
5139 	dst = (u32 *)((u8 *)dst + off);
5140 	for (i = 0; i < len; i += sizeof(u32))
5141 		*dst++ = tr32(off + i);
5142 }
5143 
5144 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5145 {
5146 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5147 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5148 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5149 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5150 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5151 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5152 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5153 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5154 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5155 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5156 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5157 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5158 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5159 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5160 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5161 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5162 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5163 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5164 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5165 
5166 	if (tg3_flag(tp, SUPPORT_MSIX))
5167 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5168 
5169 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5170 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5171 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5172 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5173 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5174 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5175 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5176 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5177 
5178 	if (!tg3_flag(tp, 5705_PLUS)) {
5179 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5180 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5181 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5182 	}
5183 
5184 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5185 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5186 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5187 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5188 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5189 
5190 	if (tg3_flag(tp, NVRAM))
5191 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5192 }
5193 
5194 static void tg3_dump_state(struct tg3 *tp)
5195 {
5196 	int i;
5197 	u32 *regs;
5198 
5199 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5200 	if (!regs) {
5201 		netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5202 		return;
5203 	}
5204 
5205 	if (tg3_flag(tp, PCI_EXPRESS)) {
5206 		/* Read up to but not including private PCI registers */
5207 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5208 			regs[i / sizeof(u32)] = tr32(i);
5209 	} else
5210 		tg3_dump_legacy_regs(tp, regs);
5211 
5212 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5213 		if (!regs[i + 0] && !regs[i + 1] &&
5214 		    !regs[i + 2] && !regs[i + 3])
5215 			continue;
5216 
5217 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5218 			   i * 4,
5219 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5220 	}
5221 
5222 	kfree(regs);
5223 
5224 	for (i = 0; i < tp->irq_cnt; i++) {
5225 		struct tg3_napi *tnapi = &tp->napi[i];
5226 
5227 		/* SW status block */
5228 		netdev_err(tp->dev,
5229 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5230 			   i,
5231 			   tnapi->hw_status->status,
5232 			   tnapi->hw_status->status_tag,
5233 			   tnapi->hw_status->rx_jumbo_consumer,
5234 			   tnapi->hw_status->rx_consumer,
5235 			   tnapi->hw_status->rx_mini_consumer,
5236 			   tnapi->hw_status->idx[0].rx_producer,
5237 			   tnapi->hw_status->idx[0].tx_consumer);
5238 
5239 		netdev_err(tp->dev,
5240 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5241 			   i,
5242 			   tnapi->last_tag, tnapi->last_irq_tag,
5243 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5244 			   tnapi->rx_rcb_ptr,
5245 			   tnapi->prodring.rx_std_prod_idx,
5246 			   tnapi->prodring.rx_std_cons_idx,
5247 			   tnapi->prodring.rx_jmb_prod_idx,
5248 			   tnapi->prodring.rx_jmb_cons_idx);
5249 	}
5250 }
5251 
5252 /* This is called whenever we suspect that the system chipset is re-
5253  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5254  * is bogus tx completions. We try to recover by setting the
5255  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5256  * in the workqueue.
5257  */
5258 static void tg3_tx_recover(struct tg3 *tp)
5259 {
5260 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5261 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
5262 
5263 	netdev_warn(tp->dev,
5264 		    "The system may be re-ordering memory-mapped I/O "
5265 		    "cycles to the network device, attempting to recover. "
5266 		    "Please report the problem to the driver maintainer "
5267 		    "and include system chipset information.\n");
5268 
5269 	spin_lock(&tp->lock);
5270 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
5271 	spin_unlock(&tp->lock);
5272 }
5273 
5274 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5275 {
5276 	/* Tell compiler to fetch tx indices from memory. */
5277 	barrier();
5278 	return tnapi->tx_pending -
5279 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5280 }
5281 
5282 /* Tigon3 never reports partial packet sends.  So we do not
5283  * need special logic to handle SKBs that have not had all
5284  * of their frags sent yet, like SunGEM does.
5285  */
5286 static void tg3_tx(struct tg3_napi *tnapi)
5287 {
5288 	struct tg3 *tp = tnapi->tp;
5289 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5290 	u32 sw_idx = tnapi->tx_cons;
5291 	struct netdev_queue *txq;
5292 	int index = tnapi - tp->napi;
5293 	unsigned int pkts_compl = 0, bytes_compl = 0;
5294 
5295 	if (tg3_flag(tp, ENABLE_TSS))
5296 		index--;
5297 
5298 	txq = netdev_get_tx_queue(tp->dev, index);
5299 
5300 	while (sw_idx != hw_idx) {
5301 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5302 		struct sk_buff *skb = ri->skb;
5303 		int i, tx_bug = 0;
5304 
5305 		if (unlikely(skb == NULL)) {
5306 			tg3_tx_recover(tp);
5307 			return;
5308 		}
5309 
5310 		pci_unmap_single(tp->pdev,
5311 				 dma_unmap_addr(ri, mapping),
5312 				 skb_headlen(skb),
5313 				 PCI_DMA_TODEVICE);
5314 
5315 		ri->skb = NULL;
5316 
5317 		while (ri->fragmented) {
5318 			ri->fragmented = false;
5319 			sw_idx = NEXT_TX(sw_idx);
5320 			ri = &tnapi->tx_buffers[sw_idx];
5321 		}
5322 
5323 		sw_idx = NEXT_TX(sw_idx);
5324 
5325 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5326 			ri = &tnapi->tx_buffers[sw_idx];
5327 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5328 				tx_bug = 1;
5329 
5330 			pci_unmap_page(tp->pdev,
5331 				       dma_unmap_addr(ri, mapping),
5332 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
5333 				       PCI_DMA_TODEVICE);
5334 
5335 			while (ri->fragmented) {
5336 				ri->fragmented = false;
5337 				sw_idx = NEXT_TX(sw_idx);
5338 				ri = &tnapi->tx_buffers[sw_idx];
5339 			}
5340 
5341 			sw_idx = NEXT_TX(sw_idx);
5342 		}
5343 
5344 		pkts_compl++;
5345 		bytes_compl += skb->len;
5346 
5347 		dev_kfree_skb(skb);
5348 
5349 		if (unlikely(tx_bug)) {
5350 			tg3_tx_recover(tp);
5351 			return;
5352 		}
5353 	}
5354 
5355 	netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
5356 
5357 	tnapi->tx_cons = sw_idx;
5358 
5359 	/* Need to make the tx_cons update visible to tg3_start_xmit()
5360 	 * before checking for netif_queue_stopped().  Without the
5361 	 * memory barrier, there is a small possibility that tg3_start_xmit()
5362 	 * will miss it and cause the queue to be stopped forever.
5363 	 */
5364 	smp_mb();
5365 
5366 	if (unlikely(netif_tx_queue_stopped(txq) &&
5367 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5368 		__netif_tx_lock(txq, smp_processor_id());
5369 		if (netif_tx_queue_stopped(txq) &&
5370 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5371 			netif_tx_wake_queue(txq);
5372 		__netif_tx_unlock(txq);
5373 	}
5374 }
5375 
5376 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5377 {
5378 	if (!ri->data)
5379 		return;
5380 
5381 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5382 			 map_sz, PCI_DMA_FROMDEVICE);
5383 	kfree(ri->data);
5384 	ri->data = NULL;
5385 }
5386 
5387 /* Returns size of skb allocated or < 0 on error.
5388  *
5389  * We only need to fill in the address because the other members
5390  * of the RX descriptor are invariant, see tg3_init_rings.
5391  *
5392  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5393  * posting buffers we only dirty the first cache line of the RX
5394  * descriptor (containing the address).  Whereas for the RX status
5395  * buffers the cpu only reads the last cacheline of the RX descriptor
5396  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5397  */
5398 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5399 			    u32 opaque_key, u32 dest_idx_unmasked)
5400 {
5401 	struct tg3_rx_buffer_desc *desc;
5402 	struct ring_info *map;
5403 	u8 *data;
5404 	dma_addr_t mapping;
5405 	int skb_size, data_size, dest_idx;
5406 
5407 	switch (opaque_key) {
5408 	case RXD_OPAQUE_RING_STD:
5409 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5410 		desc = &tpr->rx_std[dest_idx];
5411 		map = &tpr->rx_std_buffers[dest_idx];
5412 		data_size = tp->rx_pkt_map_sz;
5413 		break;
5414 
5415 	case RXD_OPAQUE_RING_JUMBO:
5416 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5417 		desc = &tpr->rx_jmb[dest_idx].std;
5418 		map = &tpr->rx_jmb_buffers[dest_idx];
5419 		data_size = TG3_RX_JMB_MAP_SZ;
5420 		break;
5421 
5422 	default:
5423 		return -EINVAL;
5424 	}
5425 
5426 	/* Do not overwrite any of the map or rp information
5427 	 * until we are sure we can commit to a new buffer.
5428 	 *
5429 	 * Callers depend upon this behavior and assume that
5430 	 * we leave everything unchanged if we fail.
5431 	 */
5432 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5433 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5434 	data = kmalloc(skb_size, GFP_ATOMIC);
5435 	if (!data)
5436 		return -ENOMEM;
5437 
5438 	mapping = pci_map_single(tp->pdev,
5439 				 data + TG3_RX_OFFSET(tp),
5440 				 data_size,
5441 				 PCI_DMA_FROMDEVICE);
5442 	if (pci_dma_mapping_error(tp->pdev, mapping)) {
5443 		kfree(data);
5444 		return -EIO;
5445 	}
5446 
5447 	map->data = data;
5448 	dma_unmap_addr_set(map, mapping, mapping);
5449 
5450 	desc->addr_hi = ((u64)mapping >> 32);
5451 	desc->addr_lo = ((u64)mapping & 0xffffffff);
5452 
5453 	return data_size;
5454 }
5455 
5456 /* We only need to move over in the address because the other
5457  * members of the RX descriptor are invariant.  See notes above
5458  * tg3_alloc_rx_data for full details.
5459  */
5460 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5461 			   struct tg3_rx_prodring_set *dpr,
5462 			   u32 opaque_key, int src_idx,
5463 			   u32 dest_idx_unmasked)
5464 {
5465 	struct tg3 *tp = tnapi->tp;
5466 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5467 	struct ring_info *src_map, *dest_map;
5468 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5469 	int dest_idx;
5470 
5471 	switch (opaque_key) {
5472 	case RXD_OPAQUE_RING_STD:
5473 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5474 		dest_desc = &dpr->rx_std[dest_idx];
5475 		dest_map = &dpr->rx_std_buffers[dest_idx];
5476 		src_desc = &spr->rx_std[src_idx];
5477 		src_map = &spr->rx_std_buffers[src_idx];
5478 		break;
5479 
5480 	case RXD_OPAQUE_RING_JUMBO:
5481 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5482 		dest_desc = &dpr->rx_jmb[dest_idx].std;
5483 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
5484 		src_desc = &spr->rx_jmb[src_idx].std;
5485 		src_map = &spr->rx_jmb_buffers[src_idx];
5486 		break;
5487 
5488 	default:
5489 		return;
5490 	}
5491 
5492 	dest_map->data = src_map->data;
5493 	dma_unmap_addr_set(dest_map, mapping,
5494 			   dma_unmap_addr(src_map, mapping));
5495 	dest_desc->addr_hi = src_desc->addr_hi;
5496 	dest_desc->addr_lo = src_desc->addr_lo;
5497 
5498 	/* Ensure that the update to the skb happens after the physical
5499 	 * addresses have been transferred to the new BD location.
5500 	 */
5501 	smp_wmb();
5502 
5503 	src_map->data = NULL;
5504 }
5505 
5506 /* The RX ring scheme is composed of multiple rings which post fresh
5507  * buffers to the chip, and one special ring the chip uses to report
5508  * status back to the host.
5509  *
5510  * The special ring reports the status of received packets to the
5511  * host.  The chip does not write into the original descriptor the
5512  * RX buffer was obtained from.  The chip simply takes the original
5513  * descriptor as provided by the host, updates the status and length
5514  * field, then writes this into the next status ring entry.
5515  *
5516  * Each ring the host uses to post buffers to the chip is described
5517  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5518  * it is first placed into the on-chip ram.  When the packet's length
5519  * is known, it walks down the TG3_BDINFO entries to select the ring.
5520  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5521  * which is within the range of the new packet's length is chosen.
5522  *
5523  * The "separate ring for rx status" scheme may sound queer, but it makes
5524  * sense from a cache coherency perspective.  If only the host writes
5525  * to the buffer post rings, and only the chip writes to the rx status
5526  * rings, then cache lines never move beyond shared-modified state.
5527  * If both the host and chip were to write into the same ring, cache line
5528  * eviction could occur since both entities want it in an exclusive state.
5529  */
5530 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5531 {
5532 	struct tg3 *tp = tnapi->tp;
5533 	u32 work_mask, rx_std_posted = 0;
5534 	u32 std_prod_idx, jmb_prod_idx;
5535 	u32 sw_idx = tnapi->rx_rcb_ptr;
5536 	u16 hw_idx;
5537 	int received;
5538 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5539 
5540 	hw_idx = *(tnapi->rx_rcb_prod_idx);
5541 	/*
5542 	 * We need to order the read of hw_idx and the read of
5543 	 * the opaque cookie.
5544 	 */
5545 	rmb();
5546 	work_mask = 0;
5547 	received = 0;
5548 	std_prod_idx = tpr->rx_std_prod_idx;
5549 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
5550 	while (sw_idx != hw_idx && budget > 0) {
5551 		struct ring_info *ri;
5552 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5553 		unsigned int len;
5554 		struct sk_buff *skb;
5555 		dma_addr_t dma_addr;
5556 		u32 opaque_key, desc_idx, *post_ptr;
5557 		u8 *data;
5558 
5559 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5560 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5561 		if (opaque_key == RXD_OPAQUE_RING_STD) {
5562 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5563 			dma_addr = dma_unmap_addr(ri, mapping);
5564 			data = ri->data;
5565 			post_ptr = &std_prod_idx;
5566 			rx_std_posted++;
5567 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5568 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5569 			dma_addr = dma_unmap_addr(ri, mapping);
5570 			data = ri->data;
5571 			post_ptr = &jmb_prod_idx;
5572 		} else
5573 			goto next_pkt_nopost;
5574 
5575 		work_mask |= opaque_key;
5576 
5577 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5578 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5579 		drop_it:
5580 			tg3_recycle_rx(tnapi, tpr, opaque_key,
5581 				       desc_idx, *post_ptr);
5582 		drop_it_no_recycle:
5583 			/* Other statistics kept track of by card. */
5584 			tp->rx_dropped++;
5585 			goto next_pkt;
5586 		}
5587 
5588 		prefetch(data + TG3_RX_OFFSET(tp));
5589 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5590 		      ETH_FCS_LEN;
5591 
5592 		if (len > TG3_RX_COPY_THRESH(tp)) {
5593 			int skb_size;
5594 
5595 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5596 						    *post_ptr);
5597 			if (skb_size < 0)
5598 				goto drop_it;
5599 
5600 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
5601 					 PCI_DMA_FROMDEVICE);
5602 
5603 			skb = build_skb(data);
5604 			if (!skb) {
5605 				kfree(data);
5606 				goto drop_it_no_recycle;
5607 			}
5608 			skb_reserve(skb, TG3_RX_OFFSET(tp));
5609 			/* Ensure that the update to the data happens
5610 			 * after the usage of the old DMA mapping.
5611 			 */
5612 			smp_wmb();
5613 
5614 			ri->data = NULL;
5615 
5616 		} else {
5617 			tg3_recycle_rx(tnapi, tpr, opaque_key,
5618 				       desc_idx, *post_ptr);
5619 
5620 			skb = netdev_alloc_skb(tp->dev,
5621 					       len + TG3_RAW_IP_ALIGN);
5622 			if (skb == NULL)
5623 				goto drop_it_no_recycle;
5624 
5625 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
5626 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5627 			memcpy(skb->data,
5628 			       data + TG3_RX_OFFSET(tp),
5629 			       len);
5630 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5631 		}
5632 
5633 		skb_put(skb, len);
5634 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
5635 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5636 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5637 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
5638 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5639 		else
5640 			skb_checksum_none_assert(skb);
5641 
5642 		skb->protocol = eth_type_trans(skb, tp->dev);
5643 
5644 		if (len > (tp->dev->mtu + ETH_HLEN) &&
5645 		    skb->protocol != htons(ETH_P_8021Q)) {
5646 			dev_kfree_skb(skb);
5647 			goto drop_it_no_recycle;
5648 		}
5649 
5650 		if (desc->type_flags & RXD_FLAG_VLAN &&
5651 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5652 			__vlan_hwaccel_put_tag(skb,
5653 					       desc->err_vlan & RXD_VLAN_MASK);
5654 
5655 		napi_gro_receive(&tnapi->napi, skb);
5656 
5657 		received++;
5658 		budget--;
5659 
5660 next_pkt:
5661 		(*post_ptr)++;
5662 
5663 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5664 			tpr->rx_std_prod_idx = std_prod_idx &
5665 					       tp->rx_std_ring_mask;
5666 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5667 				     tpr->rx_std_prod_idx);
5668 			work_mask &= ~RXD_OPAQUE_RING_STD;
5669 			rx_std_posted = 0;
5670 		}
5671 next_pkt_nopost:
5672 		sw_idx++;
5673 		sw_idx &= tp->rx_ret_ring_mask;
5674 
5675 		/* Refresh hw_idx to see if there is new work */
5676 		if (sw_idx == hw_idx) {
5677 			hw_idx = *(tnapi->rx_rcb_prod_idx);
5678 			rmb();
5679 		}
5680 	}
5681 
5682 	/* ACK the status ring. */
5683 	tnapi->rx_rcb_ptr = sw_idx;
5684 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
5685 
5686 	/* Refill RX ring(s). */
5687 	if (!tg3_flag(tp, ENABLE_RSS)) {
5688 		if (work_mask & RXD_OPAQUE_RING_STD) {
5689 			tpr->rx_std_prod_idx = std_prod_idx &
5690 					       tp->rx_std_ring_mask;
5691 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5692 				     tpr->rx_std_prod_idx);
5693 		}
5694 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5695 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
5696 					       tp->rx_jmb_ring_mask;
5697 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5698 				     tpr->rx_jmb_prod_idx);
5699 		}
5700 		mmiowb();
5701 	} else if (work_mask) {
5702 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5703 		 * updated before the producer indices can be updated.
5704 		 */
5705 		smp_wmb();
5706 
5707 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5708 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5709 
5710 		if (tnapi != &tp->napi[1])
5711 			napi_schedule(&tp->napi[1].napi);
5712 	}
5713 
5714 	return received;
5715 }
5716 
5717 static void tg3_poll_link(struct tg3 *tp)
5718 {
5719 	/* handle link change and other phy events */
5720 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5721 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5722 
5723 		if (sblk->status & SD_STATUS_LINK_CHG) {
5724 			sblk->status = SD_STATUS_UPDATED |
5725 				       (sblk->status & ~SD_STATUS_LINK_CHG);
5726 			spin_lock(&tp->lock);
5727 			if (tg3_flag(tp, USE_PHYLIB)) {
5728 				tw32_f(MAC_STATUS,
5729 				     (MAC_STATUS_SYNC_CHANGED |
5730 				      MAC_STATUS_CFG_CHANGED |
5731 				      MAC_STATUS_MI_COMPLETION |
5732 				      MAC_STATUS_LNKSTATE_CHANGED));
5733 				udelay(40);
5734 			} else
5735 				tg3_setup_phy(tp, 0);
5736 			spin_unlock(&tp->lock);
5737 		}
5738 	}
5739 }
5740 
5741 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5742 				struct tg3_rx_prodring_set *dpr,
5743 				struct tg3_rx_prodring_set *spr)
5744 {
5745 	u32 si, di, cpycnt, src_prod_idx;
5746 	int i, err = 0;
5747 
5748 	while (1) {
5749 		src_prod_idx = spr->rx_std_prod_idx;
5750 
5751 		/* Make sure updates to the rx_std_buffers[] entries and the
5752 		 * standard producer index are seen in the correct order.
5753 		 */
5754 		smp_rmb();
5755 
5756 		if (spr->rx_std_cons_idx == src_prod_idx)
5757 			break;
5758 
5759 		if (spr->rx_std_cons_idx < src_prod_idx)
5760 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5761 		else
5762 			cpycnt = tp->rx_std_ring_mask + 1 -
5763 				 spr->rx_std_cons_idx;
5764 
5765 		cpycnt = min(cpycnt,
5766 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5767 
5768 		si = spr->rx_std_cons_idx;
5769 		di = dpr->rx_std_prod_idx;
5770 
5771 		for (i = di; i < di + cpycnt; i++) {
5772 			if (dpr->rx_std_buffers[i].data) {
5773 				cpycnt = i - di;
5774 				err = -ENOSPC;
5775 				break;
5776 			}
5777 		}
5778 
5779 		if (!cpycnt)
5780 			break;
5781 
5782 		/* Ensure that updates to the rx_std_buffers ring and the
5783 		 * shadowed hardware producer ring from tg3_recycle_skb() are
5784 		 * ordered correctly WRT the skb check above.
5785 		 */
5786 		smp_rmb();
5787 
5788 		memcpy(&dpr->rx_std_buffers[di],
5789 		       &spr->rx_std_buffers[si],
5790 		       cpycnt * sizeof(struct ring_info));
5791 
5792 		for (i = 0; i < cpycnt; i++, di++, si++) {
5793 			struct tg3_rx_buffer_desc *sbd, *dbd;
5794 			sbd = &spr->rx_std[si];
5795 			dbd = &dpr->rx_std[di];
5796 			dbd->addr_hi = sbd->addr_hi;
5797 			dbd->addr_lo = sbd->addr_lo;
5798 		}
5799 
5800 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5801 				       tp->rx_std_ring_mask;
5802 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5803 				       tp->rx_std_ring_mask;
5804 	}
5805 
5806 	while (1) {
5807 		src_prod_idx = spr->rx_jmb_prod_idx;
5808 
5809 		/* Make sure updates to the rx_jmb_buffers[] entries and
5810 		 * the jumbo producer index are seen in the correct order.
5811 		 */
5812 		smp_rmb();
5813 
5814 		if (spr->rx_jmb_cons_idx == src_prod_idx)
5815 			break;
5816 
5817 		if (spr->rx_jmb_cons_idx < src_prod_idx)
5818 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5819 		else
5820 			cpycnt = tp->rx_jmb_ring_mask + 1 -
5821 				 spr->rx_jmb_cons_idx;
5822 
5823 		cpycnt = min(cpycnt,
5824 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5825 
5826 		si = spr->rx_jmb_cons_idx;
5827 		di = dpr->rx_jmb_prod_idx;
5828 
5829 		for (i = di; i < di + cpycnt; i++) {
5830 			if (dpr->rx_jmb_buffers[i].data) {
5831 				cpycnt = i - di;
5832 				err = -ENOSPC;
5833 				break;
5834 			}
5835 		}
5836 
5837 		if (!cpycnt)
5838 			break;
5839 
5840 		/* Ensure that updates to the rx_jmb_buffers ring and the
5841 		 * shadowed hardware producer ring from tg3_recycle_skb() are
5842 		 * ordered correctly WRT the skb check above.
5843 		 */
5844 		smp_rmb();
5845 
5846 		memcpy(&dpr->rx_jmb_buffers[di],
5847 		       &spr->rx_jmb_buffers[si],
5848 		       cpycnt * sizeof(struct ring_info));
5849 
5850 		for (i = 0; i < cpycnt; i++, di++, si++) {
5851 			struct tg3_rx_buffer_desc *sbd, *dbd;
5852 			sbd = &spr->rx_jmb[si].std;
5853 			dbd = &dpr->rx_jmb[di].std;
5854 			dbd->addr_hi = sbd->addr_hi;
5855 			dbd->addr_lo = sbd->addr_lo;
5856 		}
5857 
5858 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5859 				       tp->rx_jmb_ring_mask;
5860 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5861 				       tp->rx_jmb_ring_mask;
5862 	}
5863 
5864 	return err;
5865 }
5866 
5867 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5868 {
5869 	struct tg3 *tp = tnapi->tp;
5870 
5871 	/* run TX completion thread */
5872 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5873 		tg3_tx(tnapi);
5874 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5875 			return work_done;
5876 	}
5877 
5878 	/* run RX thread, within the bounds set by NAPI.
5879 	 * All RX "locking" is done by ensuring outside
5880 	 * code synchronizes with tg3->napi.poll()
5881 	 */
5882 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5883 		work_done += tg3_rx(tnapi, budget - work_done);
5884 
5885 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5886 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5887 		int i, err = 0;
5888 		u32 std_prod_idx = dpr->rx_std_prod_idx;
5889 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5890 
5891 		for (i = 1; i < tp->irq_cnt; i++)
5892 			err |= tg3_rx_prodring_xfer(tp, dpr,
5893 						    &tp->napi[i].prodring);
5894 
5895 		wmb();
5896 
5897 		if (std_prod_idx != dpr->rx_std_prod_idx)
5898 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5899 				     dpr->rx_std_prod_idx);
5900 
5901 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5902 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5903 				     dpr->rx_jmb_prod_idx);
5904 
5905 		mmiowb();
5906 
5907 		if (err)
5908 			tw32_f(HOSTCC_MODE, tp->coal_now);
5909 	}
5910 
5911 	return work_done;
5912 }
5913 
5914 static inline void tg3_reset_task_schedule(struct tg3 *tp)
5915 {
5916 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5917 		schedule_work(&tp->reset_task);
5918 }
5919 
5920 static inline void tg3_reset_task_cancel(struct tg3 *tp)
5921 {
5922 	cancel_work_sync(&tp->reset_task);
5923 	tg3_flag_clear(tp, RESET_TASK_PENDING);
5924 }
5925 
5926 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5927 {
5928 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5929 	struct tg3 *tp = tnapi->tp;
5930 	int work_done = 0;
5931 	struct tg3_hw_status *sblk = tnapi->hw_status;
5932 
5933 	while (1) {
5934 		work_done = tg3_poll_work(tnapi, work_done, budget);
5935 
5936 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5937 			goto tx_recovery;
5938 
5939 		if (unlikely(work_done >= budget))
5940 			break;
5941 
5942 		/* tp->last_tag is used in tg3_int_reenable() below
5943 		 * to tell the hw how much work has been processed,
5944 		 * so we must read it before checking for more work.
5945 		 */
5946 		tnapi->last_tag = sblk->status_tag;
5947 		tnapi->last_irq_tag = tnapi->last_tag;
5948 		rmb();
5949 
5950 		/* check for RX/TX work to do */
5951 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5952 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5953 			napi_complete(napi);
5954 			/* Reenable interrupts. */
5955 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5956 			mmiowb();
5957 			break;
5958 		}
5959 	}
5960 
5961 	return work_done;
5962 
5963 tx_recovery:
5964 	/* work_done is guaranteed to be less than budget. */
5965 	napi_complete(napi);
5966 	tg3_reset_task_schedule(tp);
5967 	return work_done;
5968 }
5969 
5970 static void tg3_process_error(struct tg3 *tp)
5971 {
5972 	u32 val;
5973 	bool real_error = false;
5974 
5975 	if (tg3_flag(tp, ERROR_PROCESSED))
5976 		return;
5977 
5978 	/* Check Flow Attention register */
5979 	val = tr32(HOSTCC_FLOW_ATTN);
5980 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5981 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5982 		real_error = true;
5983 	}
5984 
5985 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5986 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5987 		real_error = true;
5988 	}
5989 
5990 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5991 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5992 		real_error = true;
5993 	}
5994 
5995 	if (!real_error)
5996 		return;
5997 
5998 	tg3_dump_state(tp);
5999 
6000 	tg3_flag_set(tp, ERROR_PROCESSED);
6001 	tg3_reset_task_schedule(tp);
6002 }
6003 
6004 static int tg3_poll(struct napi_struct *napi, int budget)
6005 {
6006 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6007 	struct tg3 *tp = tnapi->tp;
6008 	int work_done = 0;
6009 	struct tg3_hw_status *sblk = tnapi->hw_status;
6010 
6011 	while (1) {
6012 		if (sblk->status & SD_STATUS_ERROR)
6013 			tg3_process_error(tp);
6014 
6015 		tg3_poll_link(tp);
6016 
6017 		work_done = tg3_poll_work(tnapi, work_done, budget);
6018 
6019 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6020 			goto tx_recovery;
6021 
6022 		if (unlikely(work_done >= budget))
6023 			break;
6024 
6025 		if (tg3_flag(tp, TAGGED_STATUS)) {
6026 			/* tp->last_tag is used in tg3_int_reenable() below
6027 			 * to tell the hw how much work has been processed,
6028 			 * so we must read it before checking for more work.
6029 			 */
6030 			tnapi->last_tag = sblk->status_tag;
6031 			tnapi->last_irq_tag = tnapi->last_tag;
6032 			rmb();
6033 		} else
6034 			sblk->status &= ~SD_STATUS_UPDATED;
6035 
6036 		if (likely(!tg3_has_work(tnapi))) {
6037 			napi_complete(napi);
6038 			tg3_int_reenable(tnapi);
6039 			break;
6040 		}
6041 	}
6042 
6043 	return work_done;
6044 
6045 tx_recovery:
6046 	/* work_done is guaranteed to be less than budget. */
6047 	napi_complete(napi);
6048 	tg3_reset_task_schedule(tp);
6049 	return work_done;
6050 }
6051 
6052 static void tg3_napi_disable(struct tg3 *tp)
6053 {
6054 	int i;
6055 
6056 	for (i = tp->irq_cnt - 1; i >= 0; i--)
6057 		napi_disable(&tp->napi[i].napi);
6058 }
6059 
6060 static void tg3_napi_enable(struct tg3 *tp)
6061 {
6062 	int i;
6063 
6064 	for (i = 0; i < tp->irq_cnt; i++)
6065 		napi_enable(&tp->napi[i].napi);
6066 }
6067 
6068 static void tg3_napi_init(struct tg3 *tp)
6069 {
6070 	int i;
6071 
6072 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6073 	for (i = 1; i < tp->irq_cnt; i++)
6074 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6075 }
6076 
6077 static void tg3_napi_fini(struct tg3 *tp)
6078 {
6079 	int i;
6080 
6081 	for (i = 0; i < tp->irq_cnt; i++)
6082 		netif_napi_del(&tp->napi[i].napi);
6083 }
6084 
6085 static inline void tg3_netif_stop(struct tg3 *tp)
6086 {
6087 	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
6088 	tg3_napi_disable(tp);
6089 	netif_tx_disable(tp->dev);
6090 }
6091 
6092 static inline void tg3_netif_start(struct tg3 *tp)
6093 {
6094 	/* NOTE: unconditional netif_tx_wake_all_queues is only
6095 	 * appropriate so long as all callers are assured to
6096 	 * have free tx slots (such as after tg3_init_hw)
6097 	 */
6098 	netif_tx_wake_all_queues(tp->dev);
6099 
6100 	tg3_napi_enable(tp);
6101 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6102 	tg3_enable_ints(tp);
6103 }
6104 
6105 static void tg3_irq_quiesce(struct tg3 *tp)
6106 {
6107 	int i;
6108 
6109 	BUG_ON(tp->irq_sync);
6110 
6111 	tp->irq_sync = 1;
6112 	smp_mb();
6113 
6114 	for (i = 0; i < tp->irq_cnt; i++)
6115 		synchronize_irq(tp->napi[i].irq_vec);
6116 }
6117 
6118 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6119  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6120  * with as well.  Most of the time, this is not necessary except when
6121  * shutting down the device.
6122  */
6123 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6124 {
6125 	spin_lock_bh(&tp->lock);
6126 	if (irq_sync)
6127 		tg3_irq_quiesce(tp);
6128 }
6129 
6130 static inline void tg3_full_unlock(struct tg3 *tp)
6131 {
6132 	spin_unlock_bh(&tp->lock);
6133 }
6134 
6135 /* One-shot MSI handler - Chip automatically disables interrupt
6136  * after sending MSI so driver doesn't have to do it.
6137  */
6138 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6139 {
6140 	struct tg3_napi *tnapi = dev_id;
6141 	struct tg3 *tp = tnapi->tp;
6142 
6143 	prefetch(tnapi->hw_status);
6144 	if (tnapi->rx_rcb)
6145 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6146 
6147 	if (likely(!tg3_irq_sync(tp)))
6148 		napi_schedule(&tnapi->napi);
6149 
6150 	return IRQ_HANDLED;
6151 }
6152 
6153 /* MSI ISR - No need to check for interrupt sharing and no need to
6154  * flush status block and interrupt mailbox. PCI ordering rules
6155  * guarantee that MSI will arrive after the status block.
6156  */
6157 static irqreturn_t tg3_msi(int irq, void *dev_id)
6158 {
6159 	struct tg3_napi *tnapi = dev_id;
6160 	struct tg3 *tp = tnapi->tp;
6161 
6162 	prefetch(tnapi->hw_status);
6163 	if (tnapi->rx_rcb)
6164 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6165 	/*
6166 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
6167 	 * chip-internal interrupt pending events.
6168 	 * Writing non-zero to intr-mbox-0 additional tells the
6169 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6170 	 * event coalescing.
6171 	 */
6172 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
6173 	if (likely(!tg3_irq_sync(tp)))
6174 		napi_schedule(&tnapi->napi);
6175 
6176 	return IRQ_RETVAL(1);
6177 }
6178 
6179 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6180 {
6181 	struct tg3_napi *tnapi = dev_id;
6182 	struct tg3 *tp = tnapi->tp;
6183 	struct tg3_hw_status *sblk = tnapi->hw_status;
6184 	unsigned int handled = 1;
6185 
6186 	/* In INTx mode, it is possible for the interrupt to arrive at
6187 	 * the CPU before the status block posted prior to the interrupt.
6188 	 * Reading the PCI State register will confirm whether the
6189 	 * interrupt is ours and will flush the status block.
6190 	 */
6191 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6192 		if (tg3_flag(tp, CHIP_RESETTING) ||
6193 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6194 			handled = 0;
6195 			goto out;
6196 		}
6197 	}
6198 
6199 	/*
6200 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
6201 	 * chip-internal interrupt pending events.
6202 	 * Writing non-zero to intr-mbox-0 additional tells the
6203 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6204 	 * event coalescing.
6205 	 *
6206 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
6207 	 * spurious interrupts.  The flush impacts performance but
6208 	 * excessive spurious interrupts can be worse in some cases.
6209 	 */
6210 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6211 	if (tg3_irq_sync(tp))
6212 		goto out;
6213 	sblk->status &= ~SD_STATUS_UPDATED;
6214 	if (likely(tg3_has_work(tnapi))) {
6215 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6216 		napi_schedule(&tnapi->napi);
6217 	} else {
6218 		/* No work, shared interrupt perhaps?  re-enable
6219 		 * interrupts, and flush that PCI write
6220 		 */
6221 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6222 			       0x00000000);
6223 	}
6224 out:
6225 	return IRQ_RETVAL(handled);
6226 }
6227 
6228 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6229 {
6230 	struct tg3_napi *tnapi = dev_id;
6231 	struct tg3 *tp = tnapi->tp;
6232 	struct tg3_hw_status *sblk = tnapi->hw_status;
6233 	unsigned int handled = 1;
6234 
6235 	/* In INTx mode, it is possible for the interrupt to arrive at
6236 	 * the CPU before the status block posted prior to the interrupt.
6237 	 * Reading the PCI State register will confirm whether the
6238 	 * interrupt is ours and will flush the status block.
6239 	 */
6240 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6241 		if (tg3_flag(tp, CHIP_RESETTING) ||
6242 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6243 			handled = 0;
6244 			goto out;
6245 		}
6246 	}
6247 
6248 	/*
6249 	 * writing any value to intr-mbox-0 clears PCI INTA# and
6250 	 * chip-internal interrupt pending events.
6251 	 * writing non-zero to intr-mbox-0 additional tells the
6252 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6253 	 * event coalescing.
6254 	 *
6255 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
6256 	 * spurious interrupts.  The flush impacts performance but
6257 	 * excessive spurious interrupts can be worse in some cases.
6258 	 */
6259 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6260 
6261 	/*
6262 	 * In a shared interrupt configuration, sometimes other devices'
6263 	 * interrupts will scream.  We record the current status tag here
6264 	 * so that the above check can report that the screaming interrupts
6265 	 * are unhandled.  Eventually they will be silenced.
6266 	 */
6267 	tnapi->last_irq_tag = sblk->status_tag;
6268 
6269 	if (tg3_irq_sync(tp))
6270 		goto out;
6271 
6272 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6273 
6274 	napi_schedule(&tnapi->napi);
6275 
6276 out:
6277 	return IRQ_RETVAL(handled);
6278 }
6279 
6280 /* ISR for interrupt test */
6281 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6282 {
6283 	struct tg3_napi *tnapi = dev_id;
6284 	struct tg3 *tp = tnapi->tp;
6285 	struct tg3_hw_status *sblk = tnapi->hw_status;
6286 
6287 	if ((sblk->status & SD_STATUS_UPDATED) ||
6288 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6289 		tg3_disable_ints(tp);
6290 		return IRQ_RETVAL(1);
6291 	}
6292 	return IRQ_RETVAL(0);
6293 }
6294 
6295 static int tg3_init_hw(struct tg3 *, int);
6296 static int tg3_halt(struct tg3 *, int, int);
6297 
6298 /* Restart hardware after configuration changes, self-test, etc.
6299  * Invoked with tp->lock held.
6300  */
6301 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6302 	__releases(tp->lock)
6303 	__acquires(tp->lock)
6304 {
6305 	int err;
6306 
6307 	err = tg3_init_hw(tp, reset_phy);
6308 	if (err) {
6309 		netdev_err(tp->dev,
6310 			   "Failed to re-initialize device, aborting\n");
6311 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6312 		tg3_full_unlock(tp);
6313 		del_timer_sync(&tp->timer);
6314 		tp->irq_sync = 0;
6315 		tg3_napi_enable(tp);
6316 		dev_close(tp->dev);
6317 		tg3_full_lock(tp, 0);
6318 	}
6319 	return err;
6320 }
6321 
6322 #ifdef CONFIG_NET_POLL_CONTROLLER
6323 static void tg3_poll_controller(struct net_device *dev)
6324 {
6325 	int i;
6326 	struct tg3 *tp = netdev_priv(dev);
6327 
6328 	for (i = 0; i < tp->irq_cnt; i++)
6329 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6330 }
6331 #endif
6332 
6333 static void tg3_reset_task(struct work_struct *work)
6334 {
6335 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
6336 	int err;
6337 
6338 	tg3_full_lock(tp, 0);
6339 
6340 	if (!netif_running(tp->dev)) {
6341 		tg3_flag_clear(tp, RESET_TASK_PENDING);
6342 		tg3_full_unlock(tp);
6343 		return;
6344 	}
6345 
6346 	tg3_full_unlock(tp);
6347 
6348 	tg3_phy_stop(tp);
6349 
6350 	tg3_netif_stop(tp);
6351 
6352 	tg3_full_lock(tp, 1);
6353 
6354 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6355 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
6356 		tp->write32_rx_mbox = tg3_write_flush_reg32;
6357 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
6358 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6359 	}
6360 
6361 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6362 	err = tg3_init_hw(tp, 1);
6363 	if (err)
6364 		goto out;
6365 
6366 	tg3_netif_start(tp);
6367 
6368 out:
6369 	tg3_full_unlock(tp);
6370 
6371 	if (!err)
6372 		tg3_phy_start(tp);
6373 
6374 	tg3_flag_clear(tp, RESET_TASK_PENDING);
6375 }
6376 
6377 static void tg3_tx_timeout(struct net_device *dev)
6378 {
6379 	struct tg3 *tp = netdev_priv(dev);
6380 
6381 	if (netif_msg_tx_err(tp)) {
6382 		netdev_err(dev, "transmit timed out, resetting\n");
6383 		tg3_dump_state(tp);
6384 	}
6385 
6386 	tg3_reset_task_schedule(tp);
6387 }
6388 
6389 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6390 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6391 {
6392 	u32 base = (u32) mapping & 0xffffffff;
6393 
6394 	return (base > 0xffffdcc0) && (base + len + 8 < base);
6395 }
6396 
6397 /* Test for DMA addresses > 40-bit */
6398 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6399 					  int len)
6400 {
6401 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6402 	if (tg3_flag(tp, 40BIT_DMA_BUG))
6403 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
6404 	return 0;
6405 #else
6406 	return 0;
6407 #endif
6408 }
6409 
6410 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6411 				 dma_addr_t mapping, u32 len, u32 flags,
6412 				 u32 mss, u32 vlan)
6413 {
6414 	txbd->addr_hi = ((u64) mapping >> 32);
6415 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
6416 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6417 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6418 }
6419 
6420 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6421 			    dma_addr_t map, u32 len, u32 flags,
6422 			    u32 mss, u32 vlan)
6423 {
6424 	struct tg3 *tp = tnapi->tp;
6425 	bool hwbug = false;
6426 
6427 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6428 		hwbug = true;
6429 
6430 	if (tg3_4g_overflow_test(map, len))
6431 		hwbug = true;
6432 
6433 	if (tg3_40bit_overflow_test(tp, map, len))
6434 		hwbug = true;
6435 
6436 	if (tp->dma_limit) {
6437 		u32 prvidx = *entry;
6438 		u32 tmp_flag = flags & ~TXD_FLAG_END;
6439 		while (len > tp->dma_limit && *budget) {
6440 			u32 frag_len = tp->dma_limit;
6441 			len -= tp->dma_limit;
6442 
6443 			/* Avoid the 8byte DMA problem */
6444 			if (len <= 8) {
6445 				len += tp->dma_limit / 2;
6446 				frag_len = tp->dma_limit / 2;
6447 			}
6448 
6449 			tnapi->tx_buffers[*entry].fragmented = true;
6450 
6451 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6452 				      frag_len, tmp_flag, mss, vlan);
6453 			*budget -= 1;
6454 			prvidx = *entry;
6455 			*entry = NEXT_TX(*entry);
6456 
6457 			map += frag_len;
6458 		}
6459 
6460 		if (len) {
6461 			if (*budget) {
6462 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6463 					      len, flags, mss, vlan);
6464 				*budget -= 1;
6465 				*entry = NEXT_TX(*entry);
6466 			} else {
6467 				hwbug = true;
6468 				tnapi->tx_buffers[prvidx].fragmented = false;
6469 			}
6470 		}
6471 	} else {
6472 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6473 			      len, flags, mss, vlan);
6474 		*entry = NEXT_TX(*entry);
6475 	}
6476 
6477 	return hwbug;
6478 }
6479 
6480 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6481 {
6482 	int i;
6483 	struct sk_buff *skb;
6484 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6485 
6486 	skb = txb->skb;
6487 	txb->skb = NULL;
6488 
6489 	pci_unmap_single(tnapi->tp->pdev,
6490 			 dma_unmap_addr(txb, mapping),
6491 			 skb_headlen(skb),
6492 			 PCI_DMA_TODEVICE);
6493 
6494 	while (txb->fragmented) {
6495 		txb->fragmented = false;
6496 		entry = NEXT_TX(entry);
6497 		txb = &tnapi->tx_buffers[entry];
6498 	}
6499 
6500 	for (i = 0; i <= last; i++) {
6501 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6502 
6503 		entry = NEXT_TX(entry);
6504 		txb = &tnapi->tx_buffers[entry];
6505 
6506 		pci_unmap_page(tnapi->tp->pdev,
6507 			       dma_unmap_addr(txb, mapping),
6508 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
6509 
6510 		while (txb->fragmented) {
6511 			txb->fragmented = false;
6512 			entry = NEXT_TX(entry);
6513 			txb = &tnapi->tx_buffers[entry];
6514 		}
6515 	}
6516 }
6517 
6518 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6519 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6520 				       struct sk_buff **pskb,
6521 				       u32 *entry, u32 *budget,
6522 				       u32 base_flags, u32 mss, u32 vlan)
6523 {
6524 	struct tg3 *tp = tnapi->tp;
6525 	struct sk_buff *new_skb, *skb = *pskb;
6526 	dma_addr_t new_addr = 0;
6527 	int ret = 0;
6528 
6529 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6530 		new_skb = skb_copy(skb, GFP_ATOMIC);
6531 	else {
6532 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
6533 
6534 		new_skb = skb_copy_expand(skb,
6535 					  skb_headroom(skb) + more_headroom,
6536 					  skb_tailroom(skb), GFP_ATOMIC);
6537 	}
6538 
6539 	if (!new_skb) {
6540 		ret = -1;
6541 	} else {
6542 		/* New SKB is guaranteed to be linear. */
6543 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6544 					  PCI_DMA_TODEVICE);
6545 		/* Make sure the mapping succeeded */
6546 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6547 			dev_kfree_skb(new_skb);
6548 			ret = -1;
6549 		} else {
6550 			u32 save_entry = *entry;
6551 
6552 			base_flags |= TXD_FLAG_END;
6553 
6554 			tnapi->tx_buffers[*entry].skb = new_skb;
6555 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6556 					   mapping, new_addr);
6557 
6558 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6559 					    new_skb->len, base_flags,
6560 					    mss, vlan)) {
6561 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
6562 				dev_kfree_skb(new_skb);
6563 				ret = -1;
6564 			}
6565 		}
6566 	}
6567 
6568 	dev_kfree_skb(skb);
6569 	*pskb = new_skb;
6570 	return ret;
6571 }
6572 
6573 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6574 
6575 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6576  * TSO header is greater than 80 bytes.
6577  */
6578 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6579 {
6580 	struct sk_buff *segs, *nskb;
6581 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6582 
6583 	/* Estimate the number of fragments in the worst case */
6584 	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6585 		netif_stop_queue(tp->dev);
6586 
6587 		/* netif_tx_stop_queue() must be done before checking
6588 		 * checking tx index in tg3_tx_avail() below, because in
6589 		 * tg3_tx(), we update tx index before checking for
6590 		 * netif_tx_queue_stopped().
6591 		 */
6592 		smp_mb();
6593 		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6594 			return NETDEV_TX_BUSY;
6595 
6596 		netif_wake_queue(tp->dev);
6597 	}
6598 
6599 	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6600 	if (IS_ERR(segs))
6601 		goto tg3_tso_bug_end;
6602 
6603 	do {
6604 		nskb = segs;
6605 		segs = segs->next;
6606 		nskb->next = NULL;
6607 		tg3_start_xmit(nskb, tp->dev);
6608 	} while (segs);
6609 
6610 tg3_tso_bug_end:
6611 	dev_kfree_skb(skb);
6612 
6613 	return NETDEV_TX_OK;
6614 }
6615 
6616 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6617  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6618  */
6619 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6620 {
6621 	struct tg3 *tp = netdev_priv(dev);
6622 	u32 len, entry, base_flags, mss, vlan = 0;
6623 	u32 budget;
6624 	int i = -1, would_hit_hwbug;
6625 	dma_addr_t mapping;
6626 	struct tg3_napi *tnapi;
6627 	struct netdev_queue *txq;
6628 	unsigned int last;
6629 
6630 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6631 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6632 	if (tg3_flag(tp, ENABLE_TSS))
6633 		tnapi++;
6634 
6635 	budget = tg3_tx_avail(tnapi);
6636 
6637 	/* We are running in BH disabled context with netif_tx_lock
6638 	 * and TX reclaim runs via tp->napi.poll inside of a software
6639 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
6640 	 * no IRQ context deadlocks to worry about either.  Rejoice!
6641 	 */
6642 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6643 		if (!netif_tx_queue_stopped(txq)) {
6644 			netif_tx_stop_queue(txq);
6645 
6646 			/* This is a hard error, log it. */
6647 			netdev_err(dev,
6648 				   "BUG! Tx Ring full when queue awake!\n");
6649 		}
6650 		return NETDEV_TX_BUSY;
6651 	}
6652 
6653 	entry = tnapi->tx_prod;
6654 	base_flags = 0;
6655 	if (skb->ip_summed == CHECKSUM_PARTIAL)
6656 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
6657 
6658 	mss = skb_shinfo(skb)->gso_size;
6659 	if (mss) {
6660 		struct iphdr *iph;
6661 		u32 tcp_opt_len, hdr_len;
6662 
6663 		if (skb_header_cloned(skb) &&
6664 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6665 			goto drop;
6666 
6667 		iph = ip_hdr(skb);
6668 		tcp_opt_len = tcp_optlen(skb);
6669 
6670 		if (skb_is_gso_v6(skb)) {
6671 			hdr_len = skb_headlen(skb) - ETH_HLEN;
6672 		} else {
6673 			u32 ip_tcp_len;
6674 
6675 			ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6676 			hdr_len = ip_tcp_len + tcp_opt_len;
6677 
6678 			iph->check = 0;
6679 			iph->tot_len = htons(mss + hdr_len);
6680 		}
6681 
6682 		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6683 		    tg3_flag(tp, TSO_BUG))
6684 			return tg3_tso_bug(tp, skb);
6685 
6686 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6687 			       TXD_FLAG_CPU_POST_DMA);
6688 
6689 		if (tg3_flag(tp, HW_TSO_1) ||
6690 		    tg3_flag(tp, HW_TSO_2) ||
6691 		    tg3_flag(tp, HW_TSO_3)) {
6692 			tcp_hdr(skb)->check = 0;
6693 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6694 		} else
6695 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6696 								 iph->daddr, 0,
6697 								 IPPROTO_TCP,
6698 								 0);
6699 
6700 		if (tg3_flag(tp, HW_TSO_3)) {
6701 			mss |= (hdr_len & 0xc) << 12;
6702 			if (hdr_len & 0x10)
6703 				base_flags |= 0x00000010;
6704 			base_flags |= (hdr_len & 0x3e0) << 5;
6705 		} else if (tg3_flag(tp, HW_TSO_2))
6706 			mss |= hdr_len << 9;
6707 		else if (tg3_flag(tp, HW_TSO_1) ||
6708 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6709 			if (tcp_opt_len || iph->ihl > 5) {
6710 				int tsflags;
6711 
6712 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6713 				mss |= (tsflags << 11);
6714 			}
6715 		} else {
6716 			if (tcp_opt_len || iph->ihl > 5) {
6717 				int tsflags;
6718 
6719 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6720 				base_flags |= tsflags << 12;
6721 			}
6722 		}
6723 	}
6724 
6725 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6726 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
6727 		base_flags |= TXD_FLAG_JMB_PKT;
6728 
6729 	if (vlan_tx_tag_present(skb)) {
6730 		base_flags |= TXD_FLAG_VLAN;
6731 		vlan = vlan_tx_tag_get(skb);
6732 	}
6733 
6734 	len = skb_headlen(skb);
6735 
6736 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6737 	if (pci_dma_mapping_error(tp->pdev, mapping))
6738 		goto drop;
6739 
6740 
6741 	tnapi->tx_buffers[entry].skb = skb;
6742 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6743 
6744 	would_hit_hwbug = 0;
6745 
6746 	if (tg3_flag(tp, 5701_DMA_BUG))
6747 		would_hit_hwbug = 1;
6748 
6749 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6750 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6751 			    mss, vlan)) {
6752 		would_hit_hwbug = 1;
6753 	/* Now loop through additional data fragments, and queue them. */
6754 	} else if (skb_shinfo(skb)->nr_frags > 0) {
6755 		u32 tmp_mss = mss;
6756 
6757 		if (!tg3_flag(tp, HW_TSO_1) &&
6758 		    !tg3_flag(tp, HW_TSO_2) &&
6759 		    !tg3_flag(tp, HW_TSO_3))
6760 			tmp_mss = 0;
6761 
6762 		last = skb_shinfo(skb)->nr_frags - 1;
6763 		for (i = 0; i <= last; i++) {
6764 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6765 
6766 			len = skb_frag_size(frag);
6767 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6768 						   len, DMA_TO_DEVICE);
6769 
6770 			tnapi->tx_buffers[entry].skb = NULL;
6771 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6772 					   mapping);
6773 			if (dma_mapping_error(&tp->pdev->dev, mapping))
6774 				goto dma_error;
6775 
6776 			if (!budget ||
6777 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6778 					    len, base_flags |
6779 					    ((i == last) ? TXD_FLAG_END : 0),
6780 					    tmp_mss, vlan)) {
6781 				would_hit_hwbug = 1;
6782 				break;
6783 			}
6784 		}
6785 	}
6786 
6787 	if (would_hit_hwbug) {
6788 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6789 
6790 		/* If the workaround fails due to memory/mapping
6791 		 * failure, silently drop this packet.
6792 		 */
6793 		entry = tnapi->tx_prod;
6794 		budget = tg3_tx_avail(tnapi);
6795 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6796 						base_flags, mss, vlan))
6797 			goto drop_nofree;
6798 	}
6799 
6800 	skb_tx_timestamp(skb);
6801 	netdev_sent_queue(tp->dev, skb->len);
6802 
6803 	/* Packets are ready, update Tx producer idx local and on card. */
6804 	tw32_tx_mbox(tnapi->prodmbox, entry);
6805 
6806 	tnapi->tx_prod = entry;
6807 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6808 		netif_tx_stop_queue(txq);
6809 
6810 		/* netif_tx_stop_queue() must be done before checking
6811 		 * checking tx index in tg3_tx_avail() below, because in
6812 		 * tg3_tx(), we update tx index before checking for
6813 		 * netif_tx_queue_stopped().
6814 		 */
6815 		smp_mb();
6816 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6817 			netif_tx_wake_queue(txq);
6818 	}
6819 
6820 	mmiowb();
6821 	return NETDEV_TX_OK;
6822 
6823 dma_error:
6824 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6825 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6826 drop:
6827 	dev_kfree_skb(skb);
6828 drop_nofree:
6829 	tp->tx_dropped++;
6830 	return NETDEV_TX_OK;
6831 }
6832 
6833 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6834 {
6835 	if (enable) {
6836 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6837 				  MAC_MODE_PORT_MODE_MASK);
6838 
6839 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6840 
6841 		if (!tg3_flag(tp, 5705_PLUS))
6842 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6843 
6844 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6845 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6846 		else
6847 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6848 	} else {
6849 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6850 
6851 		if (tg3_flag(tp, 5705_PLUS) ||
6852 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6853 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6854 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6855 	}
6856 
6857 	tw32(MAC_MODE, tp->mac_mode);
6858 	udelay(40);
6859 }
6860 
6861 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6862 {
6863 	u32 val, bmcr, mac_mode, ptest = 0;
6864 
6865 	tg3_phy_toggle_apd(tp, false);
6866 	tg3_phy_toggle_automdix(tp, 0);
6867 
6868 	if (extlpbk && tg3_phy_set_extloopbk(tp))
6869 		return -EIO;
6870 
6871 	bmcr = BMCR_FULLDPLX;
6872 	switch (speed) {
6873 	case SPEED_10:
6874 		break;
6875 	case SPEED_100:
6876 		bmcr |= BMCR_SPEED100;
6877 		break;
6878 	case SPEED_1000:
6879 	default:
6880 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6881 			speed = SPEED_100;
6882 			bmcr |= BMCR_SPEED100;
6883 		} else {
6884 			speed = SPEED_1000;
6885 			bmcr |= BMCR_SPEED1000;
6886 		}
6887 	}
6888 
6889 	if (extlpbk) {
6890 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6891 			tg3_readphy(tp, MII_CTRL1000, &val);
6892 			val |= CTL1000_AS_MASTER |
6893 			       CTL1000_ENABLE_MASTER;
6894 			tg3_writephy(tp, MII_CTRL1000, val);
6895 		} else {
6896 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6897 				MII_TG3_FET_PTEST_TRIM_2;
6898 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6899 		}
6900 	} else
6901 		bmcr |= BMCR_LOOPBACK;
6902 
6903 	tg3_writephy(tp, MII_BMCR, bmcr);
6904 
6905 	/* The write needs to be flushed for the FETs */
6906 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6907 		tg3_readphy(tp, MII_BMCR, &bmcr);
6908 
6909 	udelay(40);
6910 
6911 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6912 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6913 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6914 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
6915 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
6916 
6917 		/* The write needs to be flushed for the AC131 */
6918 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6919 	}
6920 
6921 	/* Reset to prevent losing 1st rx packet intermittently */
6922 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6923 	    tg3_flag(tp, 5780_CLASS)) {
6924 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6925 		udelay(10);
6926 		tw32_f(MAC_RX_MODE, tp->rx_mode);
6927 	}
6928 
6929 	mac_mode = tp->mac_mode &
6930 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6931 	if (speed == SPEED_1000)
6932 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
6933 	else
6934 		mac_mode |= MAC_MODE_PORT_MODE_MII;
6935 
6936 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6937 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6938 
6939 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
6940 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
6941 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6942 			mac_mode |= MAC_MODE_LINK_POLARITY;
6943 
6944 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
6945 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6946 	}
6947 
6948 	tw32(MAC_MODE, mac_mode);
6949 	udelay(40);
6950 
6951 	return 0;
6952 }
6953 
6954 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
6955 {
6956 	struct tg3 *tp = netdev_priv(dev);
6957 
6958 	if (features & NETIF_F_LOOPBACK) {
6959 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6960 			return;
6961 
6962 		spin_lock_bh(&tp->lock);
6963 		tg3_mac_loopback(tp, true);
6964 		netif_carrier_on(tp->dev);
6965 		spin_unlock_bh(&tp->lock);
6966 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6967 	} else {
6968 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6969 			return;
6970 
6971 		spin_lock_bh(&tp->lock);
6972 		tg3_mac_loopback(tp, false);
6973 		/* Force link status check */
6974 		tg3_setup_phy(tp, 1);
6975 		spin_unlock_bh(&tp->lock);
6976 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6977 	}
6978 }
6979 
6980 static netdev_features_t tg3_fix_features(struct net_device *dev,
6981 	netdev_features_t features)
6982 {
6983 	struct tg3 *tp = netdev_priv(dev);
6984 
6985 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6986 		features &= ~NETIF_F_ALL_TSO;
6987 
6988 	return features;
6989 }
6990 
6991 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
6992 {
6993 	netdev_features_t changed = dev->features ^ features;
6994 
6995 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6996 		tg3_set_loopback(dev, features);
6997 
6998 	return 0;
6999 }
7000 
7001 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
7002 			       int new_mtu)
7003 {
7004 	dev->mtu = new_mtu;
7005 
7006 	if (new_mtu > ETH_DATA_LEN) {
7007 		if (tg3_flag(tp, 5780_CLASS)) {
7008 			netdev_update_features(dev);
7009 			tg3_flag_clear(tp, TSO_CAPABLE);
7010 		} else {
7011 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
7012 		}
7013 	} else {
7014 		if (tg3_flag(tp, 5780_CLASS)) {
7015 			tg3_flag_set(tp, TSO_CAPABLE);
7016 			netdev_update_features(dev);
7017 		}
7018 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7019 	}
7020 }
7021 
7022 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7023 {
7024 	struct tg3 *tp = netdev_priv(dev);
7025 	int err;
7026 
7027 	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7028 		return -EINVAL;
7029 
7030 	if (!netif_running(dev)) {
7031 		/* We'll just catch it later when the
7032 		 * device is up'd.
7033 		 */
7034 		tg3_set_mtu(dev, tp, new_mtu);
7035 		return 0;
7036 	}
7037 
7038 	tg3_phy_stop(tp);
7039 
7040 	tg3_netif_stop(tp);
7041 
7042 	tg3_full_lock(tp, 1);
7043 
7044 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7045 
7046 	tg3_set_mtu(dev, tp, new_mtu);
7047 
7048 	err = tg3_restart_hw(tp, 0);
7049 
7050 	if (!err)
7051 		tg3_netif_start(tp);
7052 
7053 	tg3_full_unlock(tp);
7054 
7055 	if (!err)
7056 		tg3_phy_start(tp);
7057 
7058 	return err;
7059 }
7060 
7061 static void tg3_rx_prodring_free(struct tg3 *tp,
7062 				 struct tg3_rx_prodring_set *tpr)
7063 {
7064 	int i;
7065 
7066 	if (tpr != &tp->napi[0].prodring) {
7067 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7068 		     i = (i + 1) & tp->rx_std_ring_mask)
7069 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7070 					tp->rx_pkt_map_sz);
7071 
7072 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
7073 			for (i = tpr->rx_jmb_cons_idx;
7074 			     i != tpr->rx_jmb_prod_idx;
7075 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
7076 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7077 						TG3_RX_JMB_MAP_SZ);
7078 			}
7079 		}
7080 
7081 		return;
7082 	}
7083 
7084 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
7085 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7086 				tp->rx_pkt_map_sz);
7087 
7088 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7089 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7090 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7091 					TG3_RX_JMB_MAP_SZ);
7092 	}
7093 }
7094 
7095 /* Initialize rx rings for packet processing.
7096  *
7097  * The chip has been shut down and the driver detached from
7098  * the networking, so no interrupts or new tx packets will
7099  * end up in the driver.  tp->{tx,}lock are held and thus
7100  * we may not sleep.
7101  */
7102 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7103 				 struct tg3_rx_prodring_set *tpr)
7104 {
7105 	u32 i, rx_pkt_dma_sz;
7106 
7107 	tpr->rx_std_cons_idx = 0;
7108 	tpr->rx_std_prod_idx = 0;
7109 	tpr->rx_jmb_cons_idx = 0;
7110 	tpr->rx_jmb_prod_idx = 0;
7111 
7112 	if (tpr != &tp->napi[0].prodring) {
7113 		memset(&tpr->rx_std_buffers[0], 0,
7114 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
7115 		if (tpr->rx_jmb_buffers)
7116 			memset(&tpr->rx_jmb_buffers[0], 0,
7117 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
7118 		goto done;
7119 	}
7120 
7121 	/* Zero out all descriptors. */
7122 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7123 
7124 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7125 	if (tg3_flag(tp, 5780_CLASS) &&
7126 	    tp->dev->mtu > ETH_DATA_LEN)
7127 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7128 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7129 
7130 	/* Initialize invariants of the rings, we only set this
7131 	 * stuff once.  This works because the card does not
7132 	 * write into the rx buffer posting rings.
7133 	 */
7134 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7135 		struct tg3_rx_buffer_desc *rxd;
7136 
7137 		rxd = &tpr->rx_std[i];
7138 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7139 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7140 		rxd->opaque = (RXD_OPAQUE_RING_STD |
7141 			       (i << RXD_OPAQUE_INDEX_SHIFT));
7142 	}
7143 
7144 	/* Now allocate fresh SKBs for each rx ring. */
7145 	for (i = 0; i < tp->rx_pending; i++) {
7146 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7147 			netdev_warn(tp->dev,
7148 				    "Using a smaller RX standard ring. Only "
7149 				    "%d out of %d buffers were allocated "
7150 				    "successfully\n", i, tp->rx_pending);
7151 			if (i == 0)
7152 				goto initfail;
7153 			tp->rx_pending = i;
7154 			break;
7155 		}
7156 	}
7157 
7158 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7159 		goto done;
7160 
7161 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7162 
7163 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7164 		goto done;
7165 
7166 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7167 		struct tg3_rx_buffer_desc *rxd;
7168 
7169 		rxd = &tpr->rx_jmb[i].std;
7170 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7171 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7172 				  RXD_FLAG_JUMBO;
7173 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7174 		       (i << RXD_OPAQUE_INDEX_SHIFT));
7175 	}
7176 
7177 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
7178 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7179 			netdev_warn(tp->dev,
7180 				    "Using a smaller RX jumbo ring. Only %d "
7181 				    "out of %d buffers were allocated "
7182 				    "successfully\n", i, tp->rx_jumbo_pending);
7183 			if (i == 0)
7184 				goto initfail;
7185 			tp->rx_jumbo_pending = i;
7186 			break;
7187 		}
7188 	}
7189 
7190 done:
7191 	return 0;
7192 
7193 initfail:
7194 	tg3_rx_prodring_free(tp, tpr);
7195 	return -ENOMEM;
7196 }
7197 
7198 static void tg3_rx_prodring_fini(struct tg3 *tp,
7199 				 struct tg3_rx_prodring_set *tpr)
7200 {
7201 	kfree(tpr->rx_std_buffers);
7202 	tpr->rx_std_buffers = NULL;
7203 	kfree(tpr->rx_jmb_buffers);
7204 	tpr->rx_jmb_buffers = NULL;
7205 	if (tpr->rx_std) {
7206 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7207 				  tpr->rx_std, tpr->rx_std_mapping);
7208 		tpr->rx_std = NULL;
7209 	}
7210 	if (tpr->rx_jmb) {
7211 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7212 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
7213 		tpr->rx_jmb = NULL;
7214 	}
7215 }
7216 
7217 static int tg3_rx_prodring_init(struct tg3 *tp,
7218 				struct tg3_rx_prodring_set *tpr)
7219 {
7220 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7221 				      GFP_KERNEL);
7222 	if (!tpr->rx_std_buffers)
7223 		return -ENOMEM;
7224 
7225 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7226 					 TG3_RX_STD_RING_BYTES(tp),
7227 					 &tpr->rx_std_mapping,
7228 					 GFP_KERNEL);
7229 	if (!tpr->rx_std)
7230 		goto err_out;
7231 
7232 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7233 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7234 					      GFP_KERNEL);
7235 		if (!tpr->rx_jmb_buffers)
7236 			goto err_out;
7237 
7238 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7239 						 TG3_RX_JMB_RING_BYTES(tp),
7240 						 &tpr->rx_jmb_mapping,
7241 						 GFP_KERNEL);
7242 		if (!tpr->rx_jmb)
7243 			goto err_out;
7244 	}
7245 
7246 	return 0;
7247 
7248 err_out:
7249 	tg3_rx_prodring_fini(tp, tpr);
7250 	return -ENOMEM;
7251 }
7252 
7253 /* Free up pending packets in all rx/tx rings.
7254  *
7255  * The chip has been shut down and the driver detached from
7256  * the networking, so no interrupts or new tx packets will
7257  * end up in the driver.  tp->{tx,}lock is not held and we are not
7258  * in an interrupt context and thus may sleep.
7259  */
7260 static void tg3_free_rings(struct tg3 *tp)
7261 {
7262 	int i, j;
7263 
7264 	for (j = 0; j < tp->irq_cnt; j++) {
7265 		struct tg3_napi *tnapi = &tp->napi[j];
7266 
7267 		tg3_rx_prodring_free(tp, &tnapi->prodring);
7268 
7269 		if (!tnapi->tx_buffers)
7270 			continue;
7271 
7272 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7273 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7274 
7275 			if (!skb)
7276 				continue;
7277 
7278 			tg3_tx_skb_unmap(tnapi, i,
7279 					 skb_shinfo(skb)->nr_frags - 1);
7280 
7281 			dev_kfree_skb_any(skb);
7282 		}
7283 	}
7284 	netdev_reset_queue(tp->dev);
7285 }
7286 
7287 /* Initialize tx/rx rings for packet processing.
7288  *
7289  * The chip has been shut down and the driver detached from
7290  * the networking, so no interrupts or new tx packets will
7291  * end up in the driver.  tp->{tx,}lock are held and thus
7292  * we may not sleep.
7293  */
7294 static int tg3_init_rings(struct tg3 *tp)
7295 {
7296 	int i;
7297 
7298 	/* Free up all the SKBs. */
7299 	tg3_free_rings(tp);
7300 
7301 	for (i = 0; i < tp->irq_cnt; i++) {
7302 		struct tg3_napi *tnapi = &tp->napi[i];
7303 
7304 		tnapi->last_tag = 0;
7305 		tnapi->last_irq_tag = 0;
7306 		tnapi->hw_status->status = 0;
7307 		tnapi->hw_status->status_tag = 0;
7308 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7309 
7310 		tnapi->tx_prod = 0;
7311 		tnapi->tx_cons = 0;
7312 		if (tnapi->tx_ring)
7313 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7314 
7315 		tnapi->rx_rcb_ptr = 0;
7316 		if (tnapi->rx_rcb)
7317 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7318 
7319 		if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7320 			tg3_free_rings(tp);
7321 			return -ENOMEM;
7322 		}
7323 	}
7324 
7325 	return 0;
7326 }
7327 
7328 /*
7329  * Must not be invoked with interrupt sources disabled and
7330  * the hardware shutdown down.
7331  */
7332 static void tg3_free_consistent(struct tg3 *tp)
7333 {
7334 	int i;
7335 
7336 	for (i = 0; i < tp->irq_cnt; i++) {
7337 		struct tg3_napi *tnapi = &tp->napi[i];
7338 
7339 		if (tnapi->tx_ring) {
7340 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7341 				tnapi->tx_ring, tnapi->tx_desc_mapping);
7342 			tnapi->tx_ring = NULL;
7343 		}
7344 
7345 		kfree(tnapi->tx_buffers);
7346 		tnapi->tx_buffers = NULL;
7347 
7348 		if (tnapi->rx_rcb) {
7349 			dma_free_coherent(&tp->pdev->dev,
7350 					  TG3_RX_RCB_RING_BYTES(tp),
7351 					  tnapi->rx_rcb,
7352 					  tnapi->rx_rcb_mapping);
7353 			tnapi->rx_rcb = NULL;
7354 		}
7355 
7356 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
7357 
7358 		if (tnapi->hw_status) {
7359 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7360 					  tnapi->hw_status,
7361 					  tnapi->status_mapping);
7362 			tnapi->hw_status = NULL;
7363 		}
7364 	}
7365 
7366 	if (tp->hw_stats) {
7367 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7368 				  tp->hw_stats, tp->stats_mapping);
7369 		tp->hw_stats = NULL;
7370 	}
7371 }
7372 
7373 /*
7374  * Must not be invoked with interrupt sources disabled and
7375  * the hardware shutdown down.  Can sleep.
7376  */
7377 static int tg3_alloc_consistent(struct tg3 *tp)
7378 {
7379 	int i;
7380 
7381 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7382 					  sizeof(struct tg3_hw_stats),
7383 					  &tp->stats_mapping,
7384 					  GFP_KERNEL);
7385 	if (!tp->hw_stats)
7386 		goto err_out;
7387 
7388 	memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7389 
7390 	for (i = 0; i < tp->irq_cnt; i++) {
7391 		struct tg3_napi *tnapi = &tp->napi[i];
7392 		struct tg3_hw_status *sblk;
7393 
7394 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7395 						      TG3_HW_STATUS_SIZE,
7396 						      &tnapi->status_mapping,
7397 						      GFP_KERNEL);
7398 		if (!tnapi->hw_status)
7399 			goto err_out;
7400 
7401 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7402 		sblk = tnapi->hw_status;
7403 
7404 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7405 			goto err_out;
7406 
7407 		/* If multivector TSS is enabled, vector 0 does not handle
7408 		 * tx interrupts.  Don't allocate any resources for it.
7409 		 */
7410 		if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7411 		    (i && tg3_flag(tp, ENABLE_TSS))) {
7412 			tnapi->tx_buffers = kzalloc(
7413 					       sizeof(struct tg3_tx_ring_info) *
7414 					       TG3_TX_RING_SIZE, GFP_KERNEL);
7415 			if (!tnapi->tx_buffers)
7416 				goto err_out;
7417 
7418 			tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7419 							    TG3_TX_RING_BYTES,
7420 							&tnapi->tx_desc_mapping,
7421 							    GFP_KERNEL);
7422 			if (!tnapi->tx_ring)
7423 				goto err_out;
7424 		}
7425 
7426 		/*
7427 		 * When RSS is enabled, the status block format changes
7428 		 * slightly.  The "rx_jumbo_consumer", "reserved",
7429 		 * and "rx_mini_consumer" members get mapped to the
7430 		 * other three rx return ring producer indexes.
7431 		 */
7432 		switch (i) {
7433 		default:
7434 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7435 			break;
7436 		case 2:
7437 			tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7438 			break;
7439 		case 3:
7440 			tnapi->rx_rcb_prod_idx = &sblk->reserved;
7441 			break;
7442 		case 4:
7443 			tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7444 			break;
7445 		}
7446 
7447 		/*
7448 		 * If multivector RSS is enabled, vector 0 does not handle
7449 		 * rx or tx interrupts.  Don't allocate any resources for it.
7450 		 */
7451 		if (!i && tg3_flag(tp, ENABLE_RSS))
7452 			continue;
7453 
7454 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7455 						   TG3_RX_RCB_RING_BYTES(tp),
7456 						   &tnapi->rx_rcb_mapping,
7457 						   GFP_KERNEL);
7458 		if (!tnapi->rx_rcb)
7459 			goto err_out;
7460 
7461 		memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7462 	}
7463 
7464 	return 0;
7465 
7466 err_out:
7467 	tg3_free_consistent(tp);
7468 	return -ENOMEM;
7469 }
7470 
7471 #define MAX_WAIT_CNT 1000
7472 
7473 /* To stop a block, clear the enable bit and poll till it
7474  * clears.  tp->lock is held.
7475  */
7476 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7477 {
7478 	unsigned int i;
7479 	u32 val;
7480 
7481 	if (tg3_flag(tp, 5705_PLUS)) {
7482 		switch (ofs) {
7483 		case RCVLSC_MODE:
7484 		case DMAC_MODE:
7485 		case MBFREE_MODE:
7486 		case BUFMGR_MODE:
7487 		case MEMARB_MODE:
7488 			/* We can't enable/disable these bits of the
7489 			 * 5705/5750, just say success.
7490 			 */
7491 			return 0;
7492 
7493 		default:
7494 			break;
7495 		}
7496 	}
7497 
7498 	val = tr32(ofs);
7499 	val &= ~enable_bit;
7500 	tw32_f(ofs, val);
7501 
7502 	for (i = 0; i < MAX_WAIT_CNT; i++) {
7503 		udelay(100);
7504 		val = tr32(ofs);
7505 		if ((val & enable_bit) == 0)
7506 			break;
7507 	}
7508 
7509 	if (i == MAX_WAIT_CNT && !silent) {
7510 		dev_err(&tp->pdev->dev,
7511 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7512 			ofs, enable_bit);
7513 		return -ENODEV;
7514 	}
7515 
7516 	return 0;
7517 }
7518 
7519 /* tp->lock is held. */
7520 static int tg3_abort_hw(struct tg3 *tp, int silent)
7521 {
7522 	int i, err;
7523 
7524 	tg3_disable_ints(tp);
7525 
7526 	tp->rx_mode &= ~RX_MODE_ENABLE;
7527 	tw32_f(MAC_RX_MODE, tp->rx_mode);
7528 	udelay(10);
7529 
7530 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7531 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7532 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7533 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7534 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7535 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7536 
7537 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7538 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7539 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7540 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7541 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7542 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7543 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7544 
7545 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7546 	tw32_f(MAC_MODE, tp->mac_mode);
7547 	udelay(40);
7548 
7549 	tp->tx_mode &= ~TX_MODE_ENABLE;
7550 	tw32_f(MAC_TX_MODE, tp->tx_mode);
7551 
7552 	for (i = 0; i < MAX_WAIT_CNT; i++) {
7553 		udelay(100);
7554 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7555 			break;
7556 	}
7557 	if (i >= MAX_WAIT_CNT) {
7558 		dev_err(&tp->pdev->dev,
7559 			"%s timed out, TX_MODE_ENABLE will not clear "
7560 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7561 		err |= -ENODEV;
7562 	}
7563 
7564 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7565 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7566 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7567 
7568 	tw32(FTQ_RESET, 0xffffffff);
7569 	tw32(FTQ_RESET, 0x00000000);
7570 
7571 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7572 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7573 
7574 	for (i = 0; i < tp->irq_cnt; i++) {
7575 		struct tg3_napi *tnapi = &tp->napi[i];
7576 		if (tnapi->hw_status)
7577 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7578 	}
7579 
7580 	return err;
7581 }
7582 
7583 /* Save PCI command register before chip reset */
7584 static void tg3_save_pci_state(struct tg3 *tp)
7585 {
7586 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7587 }
7588 
7589 /* Restore PCI state after chip reset */
7590 static void tg3_restore_pci_state(struct tg3 *tp)
7591 {
7592 	u32 val;
7593 
7594 	/* Re-enable indirect register accesses. */
7595 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7596 			       tp->misc_host_ctrl);
7597 
7598 	/* Set MAX PCI retry to zero. */
7599 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7600 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7601 	    tg3_flag(tp, PCIX_MODE))
7602 		val |= PCISTATE_RETRY_SAME_DMA;
7603 	/* Allow reads and writes to the APE register and memory space. */
7604 	if (tg3_flag(tp, ENABLE_APE))
7605 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7606 		       PCISTATE_ALLOW_APE_SHMEM_WR |
7607 		       PCISTATE_ALLOW_APE_PSPACE_WR;
7608 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7609 
7610 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7611 
7612 	if (!tg3_flag(tp, PCI_EXPRESS)) {
7613 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7614 				      tp->pci_cacheline_sz);
7615 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7616 				      tp->pci_lat_timer);
7617 	}
7618 
7619 	/* Make sure PCI-X relaxed ordering bit is clear. */
7620 	if (tg3_flag(tp, PCIX_MODE)) {
7621 		u16 pcix_cmd;
7622 
7623 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7624 				     &pcix_cmd);
7625 		pcix_cmd &= ~PCI_X_CMD_ERO;
7626 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7627 				      pcix_cmd);
7628 	}
7629 
7630 	if (tg3_flag(tp, 5780_CLASS)) {
7631 
7632 		/* Chip reset on 5780 will reset MSI enable bit,
7633 		 * so need to restore it.
7634 		 */
7635 		if (tg3_flag(tp, USING_MSI)) {
7636 			u16 ctrl;
7637 
7638 			pci_read_config_word(tp->pdev,
7639 					     tp->msi_cap + PCI_MSI_FLAGS,
7640 					     &ctrl);
7641 			pci_write_config_word(tp->pdev,
7642 					      tp->msi_cap + PCI_MSI_FLAGS,
7643 					      ctrl | PCI_MSI_FLAGS_ENABLE);
7644 			val = tr32(MSGINT_MODE);
7645 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7646 		}
7647 	}
7648 }
7649 
7650 /* tp->lock is held. */
7651 static int tg3_chip_reset(struct tg3 *tp)
7652 {
7653 	u32 val;
7654 	void (*write_op)(struct tg3 *, u32, u32);
7655 	int i, err;
7656 
7657 	tg3_nvram_lock(tp);
7658 
7659 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7660 
7661 	/* No matching tg3_nvram_unlock() after this because
7662 	 * chip reset below will undo the nvram lock.
7663 	 */
7664 	tp->nvram_lock_cnt = 0;
7665 
7666 	/* GRC_MISC_CFG core clock reset will clear the memory
7667 	 * enable bit in PCI register 4 and the MSI enable bit
7668 	 * on some chips, so we save relevant registers here.
7669 	 */
7670 	tg3_save_pci_state(tp);
7671 
7672 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7673 	    tg3_flag(tp, 5755_PLUS))
7674 		tw32(GRC_FASTBOOT_PC, 0);
7675 
7676 	/*
7677 	 * We must avoid the readl() that normally takes place.
7678 	 * It locks machines, causes machine checks, and other
7679 	 * fun things.  So, temporarily disable the 5701
7680 	 * hardware workaround, while we do the reset.
7681 	 */
7682 	write_op = tp->write32;
7683 	if (write_op == tg3_write_flush_reg32)
7684 		tp->write32 = tg3_write32;
7685 
7686 	/* Prevent the irq handler from reading or writing PCI registers
7687 	 * during chip reset when the memory enable bit in the PCI command
7688 	 * register may be cleared.  The chip does not generate interrupt
7689 	 * at this time, but the irq handler may still be called due to irq
7690 	 * sharing or irqpoll.
7691 	 */
7692 	tg3_flag_set(tp, CHIP_RESETTING);
7693 	for (i = 0; i < tp->irq_cnt; i++) {
7694 		struct tg3_napi *tnapi = &tp->napi[i];
7695 		if (tnapi->hw_status) {
7696 			tnapi->hw_status->status = 0;
7697 			tnapi->hw_status->status_tag = 0;
7698 		}
7699 		tnapi->last_tag = 0;
7700 		tnapi->last_irq_tag = 0;
7701 	}
7702 	smp_mb();
7703 
7704 	for (i = 0; i < tp->irq_cnt; i++)
7705 		synchronize_irq(tp->napi[i].irq_vec);
7706 
7707 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7708 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7709 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7710 	}
7711 
7712 	/* do the reset */
7713 	val = GRC_MISC_CFG_CORECLK_RESET;
7714 
7715 	if (tg3_flag(tp, PCI_EXPRESS)) {
7716 		/* Force PCIe 1.0a mode */
7717 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7718 		    !tg3_flag(tp, 57765_PLUS) &&
7719 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
7720 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7721 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7722 
7723 		if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7724 			tw32(GRC_MISC_CFG, (1 << 29));
7725 			val |= (1 << 29);
7726 		}
7727 	}
7728 
7729 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7730 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7731 		tw32(GRC_VCPU_EXT_CTRL,
7732 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7733 	}
7734 
7735 	/* Manage gphy power for all CPMU absent PCIe devices. */
7736 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7737 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7738 
7739 	tw32(GRC_MISC_CFG, val);
7740 
7741 	/* restore 5701 hardware bug workaround write method */
7742 	tp->write32 = write_op;
7743 
7744 	/* Unfortunately, we have to delay before the PCI read back.
7745 	 * Some 575X chips even will not respond to a PCI cfg access
7746 	 * when the reset command is given to the chip.
7747 	 *
7748 	 * How do these hardware designers expect things to work
7749 	 * properly if the PCI write is posted for a long period
7750 	 * of time?  It is always necessary to have some method by
7751 	 * which a register read back can occur to push the write
7752 	 * out which does the reset.
7753 	 *
7754 	 * For most tg3 variants the trick below was working.
7755 	 * Ho hum...
7756 	 */
7757 	udelay(120);
7758 
7759 	/* Flush PCI posted writes.  The normal MMIO registers
7760 	 * are inaccessible at this time so this is the only
7761 	 * way to make this reliably (actually, this is no longer
7762 	 * the case, see above).  I tried to use indirect
7763 	 * register read/write but this upset some 5701 variants.
7764 	 */
7765 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7766 
7767 	udelay(120);
7768 
7769 	if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7770 		u16 val16;
7771 
7772 		if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7773 			int i;
7774 			u32 cfg_val;
7775 
7776 			/* Wait for link training to complete.  */
7777 			for (i = 0; i < 5000; i++)
7778 				udelay(100);
7779 
7780 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7781 			pci_write_config_dword(tp->pdev, 0xc4,
7782 					       cfg_val | (1 << 15));
7783 		}
7784 
7785 		/* Clear the "no snoop" and "relaxed ordering" bits. */
7786 		pci_read_config_word(tp->pdev,
7787 				     pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7788 				     &val16);
7789 		val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7790 			   PCI_EXP_DEVCTL_NOSNOOP_EN);
7791 		/*
7792 		 * Older PCIe devices only support the 128 byte
7793 		 * MPS setting.  Enforce the restriction.
7794 		 */
7795 		if (!tg3_flag(tp, CPMU_PRESENT))
7796 			val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7797 		pci_write_config_word(tp->pdev,
7798 				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7799 				      val16);
7800 
7801 		/* Clear error status */
7802 		pci_write_config_word(tp->pdev,
7803 				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7804 				      PCI_EXP_DEVSTA_CED |
7805 				      PCI_EXP_DEVSTA_NFED |
7806 				      PCI_EXP_DEVSTA_FED |
7807 				      PCI_EXP_DEVSTA_URD);
7808 	}
7809 
7810 	tg3_restore_pci_state(tp);
7811 
7812 	tg3_flag_clear(tp, CHIP_RESETTING);
7813 	tg3_flag_clear(tp, ERROR_PROCESSED);
7814 
7815 	val = 0;
7816 	if (tg3_flag(tp, 5780_CLASS))
7817 		val = tr32(MEMARB_MODE);
7818 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7819 
7820 	if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7821 		tg3_stop_fw(tp);
7822 		tw32(0x5000, 0x400);
7823 	}
7824 
7825 	tw32(GRC_MODE, tp->grc_mode);
7826 
7827 	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7828 		val = tr32(0xc4);
7829 
7830 		tw32(0xc4, val | (1 << 15));
7831 	}
7832 
7833 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7834 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7835 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7836 		if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7837 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7838 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7839 	}
7840 
7841 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7842 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7843 		val = tp->mac_mode;
7844 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7845 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7846 		val = tp->mac_mode;
7847 	} else
7848 		val = 0;
7849 
7850 	tw32_f(MAC_MODE, val);
7851 	udelay(40);
7852 
7853 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7854 
7855 	err = tg3_poll_fw(tp);
7856 	if (err)
7857 		return err;
7858 
7859 	tg3_mdio_start(tp);
7860 
7861 	if (tg3_flag(tp, PCI_EXPRESS) &&
7862 	    tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7863 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7864 	    !tg3_flag(tp, 57765_PLUS)) {
7865 		val = tr32(0x7c00);
7866 
7867 		tw32(0x7c00, val | (1 << 25));
7868 	}
7869 
7870 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7871 		val = tr32(TG3_CPMU_CLCK_ORIDE);
7872 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7873 	}
7874 
7875 	/* Reprobe ASF enable state.  */
7876 	tg3_flag_clear(tp, ENABLE_ASF);
7877 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7878 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7879 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7880 		u32 nic_cfg;
7881 
7882 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7883 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7884 			tg3_flag_set(tp, ENABLE_ASF);
7885 			tp->last_event_jiffies = jiffies;
7886 			if (tg3_flag(tp, 5750_PLUS))
7887 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7888 		}
7889 	}
7890 
7891 	return 0;
7892 }
7893 
7894 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
7895 						 struct rtnl_link_stats64 *);
7896 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
7897 						struct tg3_ethtool_stats *);
7898 
7899 /* tp->lock is held. */
7900 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7901 {
7902 	int err;
7903 
7904 	tg3_stop_fw(tp);
7905 
7906 	tg3_write_sig_pre_reset(tp, kind);
7907 
7908 	tg3_abort_hw(tp, silent);
7909 	err = tg3_chip_reset(tp);
7910 
7911 	__tg3_set_mac_addr(tp, 0);
7912 
7913 	tg3_write_sig_legacy(tp, kind);
7914 	tg3_write_sig_post_reset(tp, kind);
7915 
7916 	if (tp->hw_stats) {
7917 		/* Save the stats across chip resets... */
7918 		tg3_get_stats64(tp->dev, &tp->net_stats_prev),
7919 		tg3_get_estats(tp, &tp->estats_prev);
7920 
7921 		/* And make sure the next sample is new data */
7922 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7923 	}
7924 
7925 	if (err)
7926 		return err;
7927 
7928 	return 0;
7929 }
7930 
7931 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7932 {
7933 	struct tg3 *tp = netdev_priv(dev);
7934 	struct sockaddr *addr = p;
7935 	int err = 0, skip_mac_1 = 0;
7936 
7937 	if (!is_valid_ether_addr(addr->sa_data))
7938 		return -EINVAL;
7939 
7940 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7941 
7942 	if (!netif_running(dev))
7943 		return 0;
7944 
7945 	if (tg3_flag(tp, ENABLE_ASF)) {
7946 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
7947 
7948 		addr0_high = tr32(MAC_ADDR_0_HIGH);
7949 		addr0_low = tr32(MAC_ADDR_0_LOW);
7950 		addr1_high = tr32(MAC_ADDR_1_HIGH);
7951 		addr1_low = tr32(MAC_ADDR_1_LOW);
7952 
7953 		/* Skip MAC addr 1 if ASF is using it. */
7954 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7955 		    !(addr1_high == 0 && addr1_low == 0))
7956 			skip_mac_1 = 1;
7957 	}
7958 	spin_lock_bh(&tp->lock);
7959 	__tg3_set_mac_addr(tp, skip_mac_1);
7960 	spin_unlock_bh(&tp->lock);
7961 
7962 	return err;
7963 }
7964 
7965 /* tp->lock is held. */
7966 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7967 			   dma_addr_t mapping, u32 maxlen_flags,
7968 			   u32 nic_addr)
7969 {
7970 	tg3_write_mem(tp,
7971 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7972 		      ((u64) mapping >> 32));
7973 	tg3_write_mem(tp,
7974 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7975 		      ((u64) mapping & 0xffffffff));
7976 	tg3_write_mem(tp,
7977 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7978 		       maxlen_flags);
7979 
7980 	if (!tg3_flag(tp, 5705_PLUS))
7981 		tg3_write_mem(tp,
7982 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7983 			      nic_addr);
7984 }
7985 
7986 static void __tg3_set_rx_mode(struct net_device *);
7987 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7988 {
7989 	int i;
7990 
7991 	if (!tg3_flag(tp, ENABLE_TSS)) {
7992 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7993 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7994 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7995 	} else {
7996 		tw32(HOSTCC_TXCOL_TICKS, 0);
7997 		tw32(HOSTCC_TXMAX_FRAMES, 0);
7998 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7999 	}
8000 
8001 	if (!tg3_flag(tp, ENABLE_RSS)) {
8002 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8003 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8004 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8005 	} else {
8006 		tw32(HOSTCC_RXCOL_TICKS, 0);
8007 		tw32(HOSTCC_RXMAX_FRAMES, 0);
8008 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8009 	}
8010 
8011 	if (!tg3_flag(tp, 5705_PLUS)) {
8012 		u32 val = ec->stats_block_coalesce_usecs;
8013 
8014 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8015 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8016 
8017 		if (!netif_carrier_ok(tp->dev))
8018 			val = 0;
8019 
8020 		tw32(HOSTCC_STAT_COAL_TICKS, val);
8021 	}
8022 
8023 	for (i = 0; i < tp->irq_cnt - 1; i++) {
8024 		u32 reg;
8025 
8026 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8027 		tw32(reg, ec->rx_coalesce_usecs);
8028 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8029 		tw32(reg, ec->rx_max_coalesced_frames);
8030 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8031 		tw32(reg, ec->rx_max_coalesced_frames_irq);
8032 
8033 		if (tg3_flag(tp, ENABLE_TSS)) {
8034 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8035 			tw32(reg, ec->tx_coalesce_usecs);
8036 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8037 			tw32(reg, ec->tx_max_coalesced_frames);
8038 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8039 			tw32(reg, ec->tx_max_coalesced_frames_irq);
8040 		}
8041 	}
8042 
8043 	for (; i < tp->irq_max - 1; i++) {
8044 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8045 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8046 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8047 
8048 		if (tg3_flag(tp, ENABLE_TSS)) {
8049 			tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8050 			tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8051 			tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8052 		}
8053 	}
8054 }
8055 
8056 /* tp->lock is held. */
8057 static void tg3_rings_reset(struct tg3 *tp)
8058 {
8059 	int i;
8060 	u32 stblk, txrcb, rxrcb, limit;
8061 	struct tg3_napi *tnapi = &tp->napi[0];
8062 
8063 	/* Disable all transmit rings but the first. */
8064 	if (!tg3_flag(tp, 5705_PLUS))
8065 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8066 	else if (tg3_flag(tp, 5717_PLUS))
8067 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8068 	else if (tg3_flag(tp, 57765_CLASS))
8069 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8070 	else
8071 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8072 
8073 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8074 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8075 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8076 			      BDINFO_FLAGS_DISABLED);
8077 
8078 
8079 	/* Disable all receive return rings but the first. */
8080 	if (tg3_flag(tp, 5717_PLUS))
8081 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8082 	else if (!tg3_flag(tp, 5705_PLUS))
8083 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8084 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8085 		 tg3_flag(tp, 57765_CLASS))
8086 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8087 	else
8088 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8089 
8090 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8091 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8092 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8093 			      BDINFO_FLAGS_DISABLED);
8094 
8095 	/* Disable interrupts */
8096 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8097 	tp->napi[0].chk_msi_cnt = 0;
8098 	tp->napi[0].last_rx_cons = 0;
8099 	tp->napi[0].last_tx_cons = 0;
8100 
8101 	/* Zero mailbox registers. */
8102 	if (tg3_flag(tp, SUPPORT_MSIX)) {
8103 		for (i = 1; i < tp->irq_max; i++) {
8104 			tp->napi[i].tx_prod = 0;
8105 			tp->napi[i].tx_cons = 0;
8106 			if (tg3_flag(tp, ENABLE_TSS))
8107 				tw32_mailbox(tp->napi[i].prodmbox, 0);
8108 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
8109 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8110 			tp->napi[i].chk_msi_cnt = 0;
8111 			tp->napi[i].last_rx_cons = 0;
8112 			tp->napi[i].last_tx_cons = 0;
8113 		}
8114 		if (!tg3_flag(tp, ENABLE_TSS))
8115 			tw32_mailbox(tp->napi[0].prodmbox, 0);
8116 	} else {
8117 		tp->napi[0].tx_prod = 0;
8118 		tp->napi[0].tx_cons = 0;
8119 		tw32_mailbox(tp->napi[0].prodmbox, 0);
8120 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
8121 	}
8122 
8123 	/* Make sure the NIC-based send BD rings are disabled. */
8124 	if (!tg3_flag(tp, 5705_PLUS)) {
8125 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8126 		for (i = 0; i < 16; i++)
8127 			tw32_tx_mbox(mbox + i * 8, 0);
8128 	}
8129 
8130 	txrcb = NIC_SRAM_SEND_RCB;
8131 	rxrcb = NIC_SRAM_RCV_RET_RCB;
8132 
8133 	/* Clear status block in ram. */
8134 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8135 
8136 	/* Set status block DMA address */
8137 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8138 	     ((u64) tnapi->status_mapping >> 32));
8139 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8140 	     ((u64) tnapi->status_mapping & 0xffffffff));
8141 
8142 	if (tnapi->tx_ring) {
8143 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8144 			       (TG3_TX_RING_SIZE <<
8145 				BDINFO_FLAGS_MAXLEN_SHIFT),
8146 			       NIC_SRAM_TX_BUFFER_DESC);
8147 		txrcb += TG3_BDINFO_SIZE;
8148 	}
8149 
8150 	if (tnapi->rx_rcb) {
8151 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8152 			       (tp->rx_ret_ring_mask + 1) <<
8153 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8154 		rxrcb += TG3_BDINFO_SIZE;
8155 	}
8156 
8157 	stblk = HOSTCC_STATBLCK_RING1;
8158 
8159 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8160 		u64 mapping = (u64)tnapi->status_mapping;
8161 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8162 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8163 
8164 		/* Clear status block in ram. */
8165 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8166 
8167 		if (tnapi->tx_ring) {
8168 			tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8169 				       (TG3_TX_RING_SIZE <<
8170 					BDINFO_FLAGS_MAXLEN_SHIFT),
8171 				       NIC_SRAM_TX_BUFFER_DESC);
8172 			txrcb += TG3_BDINFO_SIZE;
8173 		}
8174 
8175 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8176 			       ((tp->rx_ret_ring_mask + 1) <<
8177 				BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8178 
8179 		stblk += 8;
8180 		rxrcb += TG3_BDINFO_SIZE;
8181 	}
8182 }
8183 
8184 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8185 {
8186 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8187 
8188 	if (!tg3_flag(tp, 5750_PLUS) ||
8189 	    tg3_flag(tp, 5780_CLASS) ||
8190 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8191 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8192 	    tg3_flag(tp, 57765_PLUS))
8193 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8194 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8195 		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8196 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8197 	else
8198 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8199 
8200 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8201 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8202 
8203 	val = min(nic_rep_thresh, host_rep_thresh);
8204 	tw32(RCVBDI_STD_THRESH, val);
8205 
8206 	if (tg3_flag(tp, 57765_PLUS))
8207 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8208 
8209 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8210 		return;
8211 
8212 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8213 
8214 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8215 
8216 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
8217 	tw32(RCVBDI_JUMBO_THRESH, val);
8218 
8219 	if (tg3_flag(tp, 57765_PLUS))
8220 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8221 }
8222 
8223 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8224 {
8225 	int i;
8226 
8227 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8228 		tp->rss_ind_tbl[i] =
8229 			ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8230 }
8231 
8232 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8233 {
8234 	int i;
8235 
8236 	if (!tg3_flag(tp, SUPPORT_MSIX))
8237 		return;
8238 
8239 	if (tp->irq_cnt <= 2) {
8240 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8241 		return;
8242 	}
8243 
8244 	/* Validate table against current IRQ count */
8245 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8246 		if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8247 			break;
8248 	}
8249 
8250 	if (i != TG3_RSS_INDIR_TBL_SIZE)
8251 		tg3_rss_init_dflt_indir_tbl(tp);
8252 }
8253 
8254 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8255 {
8256 	int i = 0;
8257 	u32 reg = MAC_RSS_INDIR_TBL_0;
8258 
8259 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
8260 		u32 val = tp->rss_ind_tbl[i];
8261 		i++;
8262 		for (; i % 8; i++) {
8263 			val <<= 4;
8264 			val |= tp->rss_ind_tbl[i];
8265 		}
8266 		tw32(reg, val);
8267 		reg += 4;
8268 	}
8269 }
8270 
8271 /* tp->lock is held. */
8272 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8273 {
8274 	u32 val, rdmac_mode;
8275 	int i, err, limit;
8276 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8277 
8278 	tg3_disable_ints(tp);
8279 
8280 	tg3_stop_fw(tp);
8281 
8282 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8283 
8284 	if (tg3_flag(tp, INIT_COMPLETE))
8285 		tg3_abort_hw(tp, 1);
8286 
8287 	/* Enable MAC control of LPI */
8288 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8289 		tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8290 		       TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8291 		       TG3_CPMU_EEE_LNKIDL_UART_IDL);
8292 
8293 		tw32_f(TG3_CPMU_EEE_CTRL,
8294 		       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8295 
8296 		val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8297 		      TG3_CPMU_EEEMD_LPI_IN_TX |
8298 		      TG3_CPMU_EEEMD_LPI_IN_RX |
8299 		      TG3_CPMU_EEEMD_EEE_ENABLE;
8300 
8301 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8302 			val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8303 
8304 		if (tg3_flag(tp, ENABLE_APE))
8305 			val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8306 
8307 		tw32_f(TG3_CPMU_EEE_MODE, val);
8308 
8309 		tw32_f(TG3_CPMU_EEE_DBTMR1,
8310 		       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8311 		       TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8312 
8313 		tw32_f(TG3_CPMU_EEE_DBTMR2,
8314 		       TG3_CPMU_DBTMR2_APE_TX_2047US |
8315 		       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8316 	}
8317 
8318 	if (reset_phy)
8319 		tg3_phy_reset(tp);
8320 
8321 	err = tg3_chip_reset(tp);
8322 	if (err)
8323 		return err;
8324 
8325 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8326 
8327 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8328 		val = tr32(TG3_CPMU_CTRL);
8329 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8330 		tw32(TG3_CPMU_CTRL, val);
8331 
8332 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8333 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8334 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
8335 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8336 
8337 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8338 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8339 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
8340 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8341 
8342 		val = tr32(TG3_CPMU_HST_ACC);
8343 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
8344 		val |= CPMU_HST_ACC_MACCLK_6_25;
8345 		tw32(TG3_CPMU_HST_ACC, val);
8346 	}
8347 
8348 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8349 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8350 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8351 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
8352 		tw32(PCIE_PWR_MGMT_THRESH, val);
8353 
8354 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8355 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8356 
8357 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8358 
8359 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8360 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8361 	}
8362 
8363 	if (tg3_flag(tp, L1PLLPD_EN)) {
8364 		u32 grc_mode = tr32(GRC_MODE);
8365 
8366 		/* Access the lower 1K of PL PCIE block registers. */
8367 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8368 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8369 
8370 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8371 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8372 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8373 
8374 		tw32(GRC_MODE, grc_mode);
8375 	}
8376 
8377 	if (tg3_flag(tp, 57765_CLASS)) {
8378 		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8379 			u32 grc_mode = tr32(GRC_MODE);
8380 
8381 			/* Access the lower 1K of PL PCIE block registers. */
8382 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8383 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8384 
8385 			val = tr32(TG3_PCIE_TLDLPL_PORT +
8386 				   TG3_PCIE_PL_LO_PHYCTL5);
8387 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8388 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8389 
8390 			tw32(GRC_MODE, grc_mode);
8391 		}
8392 
8393 		if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8394 			u32 grc_mode = tr32(GRC_MODE);
8395 
8396 			/* Access the lower 1K of DL PCIE block registers. */
8397 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8398 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8399 
8400 			val = tr32(TG3_PCIE_TLDLPL_PORT +
8401 				   TG3_PCIE_DL_LO_FTSMAX);
8402 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8403 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8404 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8405 
8406 			tw32(GRC_MODE, grc_mode);
8407 		}
8408 
8409 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8410 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8411 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
8412 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8413 	}
8414 
8415 	/* This works around an issue with Athlon chipsets on
8416 	 * B3 tigon3 silicon.  This bit has no effect on any
8417 	 * other revision.  But do not set this on PCI Express
8418 	 * chips and don't even touch the clocks if the CPMU is present.
8419 	 */
8420 	if (!tg3_flag(tp, CPMU_PRESENT)) {
8421 		if (!tg3_flag(tp, PCI_EXPRESS))
8422 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8423 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8424 	}
8425 
8426 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8427 	    tg3_flag(tp, PCIX_MODE)) {
8428 		val = tr32(TG3PCI_PCISTATE);
8429 		val |= PCISTATE_RETRY_SAME_DMA;
8430 		tw32(TG3PCI_PCISTATE, val);
8431 	}
8432 
8433 	if (tg3_flag(tp, ENABLE_APE)) {
8434 		/* Allow reads and writes to the
8435 		 * APE register and memory space.
8436 		 */
8437 		val = tr32(TG3PCI_PCISTATE);
8438 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8439 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8440 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8441 		tw32(TG3PCI_PCISTATE, val);
8442 	}
8443 
8444 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8445 		/* Enable some hw fixes.  */
8446 		val = tr32(TG3PCI_MSI_DATA);
8447 		val |= (1 << 26) | (1 << 28) | (1 << 29);
8448 		tw32(TG3PCI_MSI_DATA, val);
8449 	}
8450 
8451 	/* Descriptor ring init may make accesses to the
8452 	 * NIC SRAM area to setup the TX descriptors, so we
8453 	 * can only do this after the hardware has been
8454 	 * successfully reset.
8455 	 */
8456 	err = tg3_init_rings(tp);
8457 	if (err)
8458 		return err;
8459 
8460 	if (tg3_flag(tp, 57765_PLUS)) {
8461 		val = tr32(TG3PCI_DMA_RW_CTRL) &
8462 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8463 		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8464 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8465 		if (!tg3_flag(tp, 57765_CLASS) &&
8466 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8467 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
8468 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8469 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8470 		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8471 		/* This value is determined during the probe time DMA
8472 		 * engine test, tg3_test_dma.
8473 		 */
8474 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8475 	}
8476 
8477 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8478 			  GRC_MODE_4X_NIC_SEND_RINGS |
8479 			  GRC_MODE_NO_TX_PHDR_CSUM |
8480 			  GRC_MODE_NO_RX_PHDR_CSUM);
8481 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8482 
8483 	/* Pseudo-header checksum is done by hardware logic and not
8484 	 * the offload processers, so make the chip do the pseudo-
8485 	 * header checksums on receive.  For transmit it is more
8486 	 * convenient to do the pseudo-header checksum in software
8487 	 * as Linux does that on transmit for us in all cases.
8488 	 */
8489 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8490 
8491 	tw32(GRC_MODE,
8492 	     tp->grc_mode |
8493 	     (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8494 
8495 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
8496 	val = tr32(GRC_MISC_CFG);
8497 	val &= ~0xff;
8498 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8499 	tw32(GRC_MISC_CFG, val);
8500 
8501 	/* Initialize MBUF/DESC pool. */
8502 	if (tg3_flag(tp, 5750_PLUS)) {
8503 		/* Do nothing.  */
8504 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8505 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8506 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8507 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8508 		else
8509 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8510 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8511 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8512 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
8513 		int fw_len;
8514 
8515 		fw_len = tp->fw_len;
8516 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8517 		tw32(BUFMGR_MB_POOL_ADDR,
8518 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8519 		tw32(BUFMGR_MB_POOL_SIZE,
8520 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8521 	}
8522 
8523 	if (tp->dev->mtu <= ETH_DATA_LEN) {
8524 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
8525 		     tp->bufmgr_config.mbuf_read_dma_low_water);
8526 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
8527 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
8528 		tw32(BUFMGR_MB_HIGH_WATER,
8529 		     tp->bufmgr_config.mbuf_high_water);
8530 	} else {
8531 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
8532 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8533 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
8534 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8535 		tw32(BUFMGR_MB_HIGH_WATER,
8536 		     tp->bufmgr_config.mbuf_high_water_jumbo);
8537 	}
8538 	tw32(BUFMGR_DMA_LOW_WATER,
8539 	     tp->bufmgr_config.dma_low_water);
8540 	tw32(BUFMGR_DMA_HIGH_WATER,
8541 	     tp->bufmgr_config.dma_high_water);
8542 
8543 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8544 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8545 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8546 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8547 	    tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8548 	    tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8549 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8550 	tw32(BUFMGR_MODE, val);
8551 	for (i = 0; i < 2000; i++) {
8552 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8553 			break;
8554 		udelay(10);
8555 	}
8556 	if (i >= 2000) {
8557 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8558 		return -ENODEV;
8559 	}
8560 
8561 	if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8562 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8563 
8564 	tg3_setup_rxbd_thresholds(tp);
8565 
8566 	/* Initialize TG3_BDINFO's at:
8567 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
8568 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
8569 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
8570 	 *
8571 	 * like so:
8572 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
8573 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
8574 	 *                              ring attribute flags
8575 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
8576 	 *
8577 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8578 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8579 	 *
8580 	 * The size of each ring is fixed in the firmware, but the location is
8581 	 * configurable.
8582 	 */
8583 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8584 	     ((u64) tpr->rx_std_mapping >> 32));
8585 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8586 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
8587 	if (!tg3_flag(tp, 5717_PLUS))
8588 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8589 		     NIC_SRAM_RX_BUFFER_DESC);
8590 
8591 	/* Disable the mini ring */
8592 	if (!tg3_flag(tp, 5705_PLUS))
8593 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8594 		     BDINFO_FLAGS_DISABLED);
8595 
8596 	/* Program the jumbo buffer descriptor ring control
8597 	 * blocks on those devices that have them.
8598 	 */
8599 	if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8600 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8601 
8602 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8603 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8604 			     ((u64) tpr->rx_jmb_mapping >> 32));
8605 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8606 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8607 			val = TG3_RX_JMB_RING_SIZE(tp) <<
8608 			      BDINFO_FLAGS_MAXLEN_SHIFT;
8609 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8610 			     val | BDINFO_FLAGS_USE_EXT_RECV);
8611 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8612 			    tg3_flag(tp, 57765_CLASS))
8613 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8614 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8615 		} else {
8616 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8617 			     BDINFO_FLAGS_DISABLED);
8618 		}
8619 
8620 		if (tg3_flag(tp, 57765_PLUS)) {
8621 			val = TG3_RX_STD_RING_SIZE(tp);
8622 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8623 			val |= (TG3_RX_STD_DMA_SZ << 2);
8624 		} else
8625 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8626 	} else
8627 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8628 
8629 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8630 
8631 	tpr->rx_std_prod_idx = tp->rx_pending;
8632 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8633 
8634 	tpr->rx_jmb_prod_idx =
8635 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8636 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8637 
8638 	tg3_rings_reset(tp);
8639 
8640 	/* Initialize MAC address and backoff seed. */
8641 	__tg3_set_mac_addr(tp, 0);
8642 
8643 	/* MTU + ethernet header + FCS + optional VLAN tag */
8644 	tw32(MAC_RX_MTU_SIZE,
8645 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8646 
8647 	/* The slot time is changed by tg3_setup_phy if we
8648 	 * run at gigabit with half duplex.
8649 	 */
8650 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8651 	      (6 << TX_LENGTHS_IPG_SHIFT) |
8652 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8653 
8654 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8655 		val |= tr32(MAC_TX_LENGTHS) &
8656 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
8657 			TX_LENGTHS_CNT_DWN_VAL_MSK);
8658 
8659 	tw32(MAC_TX_LENGTHS, val);
8660 
8661 	/* Receive rules. */
8662 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8663 	tw32(RCVLPC_CONFIG, 0x0181);
8664 
8665 	/* Calculate RDMAC_MODE setting early, we need it to determine
8666 	 * the RCVLPC_STATE_ENABLE mask.
8667 	 */
8668 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8669 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8670 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8671 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8672 		      RDMAC_MODE_LNGREAD_ENAB);
8673 
8674 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8675 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8676 
8677 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8678 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8679 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8680 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8681 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8682 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8683 
8684 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8685 	    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8686 		if (tg3_flag(tp, TSO_CAPABLE) &&
8687 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8688 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8689 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8690 			   !tg3_flag(tp, IS_5788)) {
8691 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8692 		}
8693 	}
8694 
8695 	if (tg3_flag(tp, PCI_EXPRESS))
8696 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8697 
8698 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
8699 		rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
8700 
8701 	if (tg3_flag(tp, HW_TSO_1) ||
8702 	    tg3_flag(tp, HW_TSO_2) ||
8703 	    tg3_flag(tp, HW_TSO_3))
8704 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8705 
8706 	if (tg3_flag(tp, 57765_PLUS) ||
8707 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8708 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8709 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8710 
8711 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8712 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8713 
8714 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8715 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8716 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8717 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8718 	    tg3_flag(tp, 57765_PLUS)) {
8719 		val = tr32(TG3_RDMA_RSRVCTRL_REG);
8720 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8721 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8722 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8723 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8724 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8725 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8726 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8727 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8728 		}
8729 		tw32(TG3_RDMA_RSRVCTRL_REG,
8730 		     val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8731 	}
8732 
8733 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8734 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8735 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8736 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8737 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8738 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8739 	}
8740 
8741 	/* Receive/send statistics. */
8742 	if (tg3_flag(tp, 5750_PLUS)) {
8743 		val = tr32(RCVLPC_STATS_ENABLE);
8744 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
8745 		tw32(RCVLPC_STATS_ENABLE, val);
8746 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8747 		   tg3_flag(tp, TSO_CAPABLE)) {
8748 		val = tr32(RCVLPC_STATS_ENABLE);
8749 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8750 		tw32(RCVLPC_STATS_ENABLE, val);
8751 	} else {
8752 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8753 	}
8754 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8755 	tw32(SNDDATAI_STATSENAB, 0xffffff);
8756 	tw32(SNDDATAI_STATSCTRL,
8757 	     (SNDDATAI_SCTRL_ENABLE |
8758 	      SNDDATAI_SCTRL_FASTUPD));
8759 
8760 	/* Setup host coalescing engine. */
8761 	tw32(HOSTCC_MODE, 0);
8762 	for (i = 0; i < 2000; i++) {
8763 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8764 			break;
8765 		udelay(10);
8766 	}
8767 
8768 	__tg3_set_coalesce(tp, &tp->coal);
8769 
8770 	if (!tg3_flag(tp, 5705_PLUS)) {
8771 		/* Status/statistics block address.  See tg3_timer,
8772 		 * the tg3_periodic_fetch_stats call there, and
8773 		 * tg3_get_stats to see how this works for 5705/5750 chips.
8774 		 */
8775 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8776 		     ((u64) tp->stats_mapping >> 32));
8777 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8778 		     ((u64) tp->stats_mapping & 0xffffffff));
8779 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8780 
8781 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8782 
8783 		/* Clear statistics and status block memory areas */
8784 		for (i = NIC_SRAM_STATS_BLK;
8785 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8786 		     i += sizeof(u32)) {
8787 			tg3_write_mem(tp, i, 0);
8788 			udelay(40);
8789 		}
8790 	}
8791 
8792 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8793 
8794 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8795 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8796 	if (!tg3_flag(tp, 5705_PLUS))
8797 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8798 
8799 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8800 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8801 		/* reset to prevent losing 1st rx packet intermittently */
8802 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8803 		udelay(10);
8804 	}
8805 
8806 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8807 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8808 			MAC_MODE_FHDE_ENABLE;
8809 	if (tg3_flag(tp, ENABLE_APE))
8810 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8811 	if (!tg3_flag(tp, 5705_PLUS) &&
8812 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8813 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8814 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8815 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8816 	udelay(40);
8817 
8818 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8819 	 * If TG3_FLAG_IS_NIC is zero, we should read the
8820 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
8821 	 * whether used as inputs or outputs, are set by boot code after
8822 	 * reset.
8823 	 */
8824 	if (!tg3_flag(tp, IS_NIC)) {
8825 		u32 gpio_mask;
8826 
8827 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8828 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8829 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8830 
8831 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8832 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8833 				     GRC_LCLCTRL_GPIO_OUTPUT3;
8834 
8835 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8836 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8837 
8838 		tp->grc_local_ctrl &= ~gpio_mask;
8839 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8840 
8841 		/* GPIO1 must be driven high for eeprom write protect */
8842 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
8843 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8844 					       GRC_LCLCTRL_GPIO_OUTPUT1);
8845 	}
8846 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8847 	udelay(100);
8848 
8849 	if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8850 		val = tr32(MSGINT_MODE);
8851 		val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8852 		if (!tg3_flag(tp, 1SHOT_MSI))
8853 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8854 		tw32(MSGINT_MODE, val);
8855 	}
8856 
8857 	if (!tg3_flag(tp, 5705_PLUS)) {
8858 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8859 		udelay(40);
8860 	}
8861 
8862 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8863 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8864 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8865 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8866 	       WDMAC_MODE_LNGREAD_ENAB);
8867 
8868 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8869 	    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8870 		if (tg3_flag(tp, TSO_CAPABLE) &&
8871 		    (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8872 		     tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8873 			/* nothing */
8874 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8875 			   !tg3_flag(tp, IS_5788)) {
8876 			val |= WDMAC_MODE_RX_ACCEL;
8877 		}
8878 	}
8879 
8880 	/* Enable host coalescing bug fix */
8881 	if (tg3_flag(tp, 5755_PLUS))
8882 		val |= WDMAC_MODE_STATUS_TAG_FIX;
8883 
8884 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8885 		val |= WDMAC_MODE_BURST_ALL_DATA;
8886 
8887 	tw32_f(WDMAC_MODE, val);
8888 	udelay(40);
8889 
8890 	if (tg3_flag(tp, PCIX_MODE)) {
8891 		u16 pcix_cmd;
8892 
8893 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8894 				     &pcix_cmd);
8895 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8896 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8897 			pcix_cmd |= PCI_X_CMD_READ_2K;
8898 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8899 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8900 			pcix_cmd |= PCI_X_CMD_READ_2K;
8901 		}
8902 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8903 				      pcix_cmd);
8904 	}
8905 
8906 	tw32_f(RDMAC_MODE, rdmac_mode);
8907 	udelay(40);
8908 
8909 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8910 	if (!tg3_flag(tp, 5705_PLUS))
8911 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8912 
8913 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8914 		tw32(SNDDATAC_MODE,
8915 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8916 	else
8917 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8918 
8919 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8920 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8921 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8922 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
8923 		val |= RCVDBDI_MODE_LRG_RING_SZ;
8924 	tw32(RCVDBDI_MODE, val);
8925 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8926 	if (tg3_flag(tp, HW_TSO_1) ||
8927 	    tg3_flag(tp, HW_TSO_2) ||
8928 	    tg3_flag(tp, HW_TSO_3))
8929 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8930 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8931 	if (tg3_flag(tp, ENABLE_TSS))
8932 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
8933 	tw32(SNDBDI_MODE, val);
8934 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8935 
8936 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8937 		err = tg3_load_5701_a0_firmware_fix(tp);
8938 		if (err)
8939 			return err;
8940 	}
8941 
8942 	if (tg3_flag(tp, TSO_CAPABLE)) {
8943 		err = tg3_load_tso_firmware(tp);
8944 		if (err)
8945 			return err;
8946 	}
8947 
8948 	tp->tx_mode = TX_MODE_ENABLE;
8949 
8950 	if (tg3_flag(tp, 5755_PLUS) ||
8951 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8952 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8953 
8954 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8955 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8956 		tp->tx_mode &= ~val;
8957 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8958 	}
8959 
8960 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8961 	udelay(100);
8962 
8963 	if (tg3_flag(tp, ENABLE_RSS)) {
8964 		tg3_rss_write_indir_tbl(tp);
8965 
8966 		/* Setup the "secret" hash key. */
8967 		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8968 		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8969 		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8970 		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8971 		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8972 		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8973 		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8974 		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8975 		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8976 		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8977 	}
8978 
8979 	tp->rx_mode = RX_MODE_ENABLE;
8980 	if (tg3_flag(tp, 5755_PLUS))
8981 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8982 
8983 	if (tg3_flag(tp, ENABLE_RSS))
8984 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
8985 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
8986 			       RX_MODE_RSS_IPV6_HASH_EN |
8987 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
8988 			       RX_MODE_RSS_IPV4_HASH_EN |
8989 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
8990 
8991 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8992 	udelay(10);
8993 
8994 	tw32(MAC_LED_CTRL, tp->led_ctrl);
8995 
8996 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8997 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8998 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8999 		udelay(10);
9000 	}
9001 	tw32_f(MAC_RX_MODE, tp->rx_mode);
9002 	udelay(10);
9003 
9004 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9005 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9006 			!(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9007 			/* Set drive transmission level to 1.2V  */
9008 			/* only if the signal pre-emphasis bit is not set  */
9009 			val = tr32(MAC_SERDES_CFG);
9010 			val &= 0xfffff000;
9011 			val |= 0x880;
9012 			tw32(MAC_SERDES_CFG, val);
9013 		}
9014 		if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9015 			tw32(MAC_SERDES_CFG, 0x616000);
9016 	}
9017 
9018 	/* Prevent chip from dropping frames when flow control
9019 	 * is enabled.
9020 	 */
9021 	if (tg3_flag(tp, 57765_CLASS))
9022 		val = 1;
9023 	else
9024 		val = 2;
9025 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9026 
9027 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9028 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9029 		/* Use hardware link auto-negotiation */
9030 		tg3_flag_set(tp, HW_AUTONEG);
9031 	}
9032 
9033 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9034 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9035 		u32 tmp;
9036 
9037 		tmp = tr32(SERDES_RX_CTRL);
9038 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9039 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9040 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9041 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9042 	}
9043 
9044 	if (!tg3_flag(tp, USE_PHYLIB)) {
9045 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9046 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9047 			tp->link_config.speed = tp->link_config.orig_speed;
9048 			tp->link_config.duplex = tp->link_config.orig_duplex;
9049 			tp->link_config.autoneg = tp->link_config.orig_autoneg;
9050 		}
9051 
9052 		err = tg3_setup_phy(tp, 0);
9053 		if (err)
9054 			return err;
9055 
9056 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9057 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9058 			u32 tmp;
9059 
9060 			/* Clear CRC stats. */
9061 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9062 				tg3_writephy(tp, MII_TG3_TEST1,
9063 					     tmp | MII_TG3_TEST1_CRC_EN);
9064 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9065 			}
9066 		}
9067 	}
9068 
9069 	__tg3_set_rx_mode(tp->dev);
9070 
9071 	/* Initialize receive rules. */
9072 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9073 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9074 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9075 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9076 
9077 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9078 		limit = 8;
9079 	else
9080 		limit = 16;
9081 	if (tg3_flag(tp, ENABLE_ASF))
9082 		limit -= 4;
9083 	switch (limit) {
9084 	case 16:
9085 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9086 	case 15:
9087 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9088 	case 14:
9089 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9090 	case 13:
9091 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9092 	case 12:
9093 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9094 	case 11:
9095 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9096 	case 10:
9097 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9098 	case 9:
9099 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9100 	case 8:
9101 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9102 	case 7:
9103 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9104 	case 6:
9105 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9106 	case 5:
9107 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9108 	case 4:
9109 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9110 	case 3:
9111 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9112 	case 2:
9113 	case 1:
9114 
9115 	default:
9116 		break;
9117 	}
9118 
9119 	if (tg3_flag(tp, ENABLE_APE))
9120 		/* Write our heartbeat update interval to APE. */
9121 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9122 				APE_HOST_HEARTBEAT_INT_DISABLE);
9123 
9124 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9125 
9126 	return 0;
9127 }
9128 
9129 /* Called at device open time to get the chip ready for
9130  * packet processing.  Invoked with tp->lock held.
9131  */
9132 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9133 {
9134 	tg3_switch_clocks(tp);
9135 
9136 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9137 
9138 	return tg3_reset_hw(tp, reset_phy);
9139 }
9140 
9141 #define TG3_STAT_ADD32(PSTAT, REG) \
9142 do {	u32 __val = tr32(REG); \
9143 	(PSTAT)->low += __val; \
9144 	if ((PSTAT)->low < __val) \
9145 		(PSTAT)->high += 1; \
9146 } while (0)
9147 
9148 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9149 {
9150 	struct tg3_hw_stats *sp = tp->hw_stats;
9151 
9152 	if (!netif_carrier_ok(tp->dev))
9153 		return;
9154 
9155 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9156 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9157 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9158 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9159 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9160 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9161 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9162 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9163 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9164 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9165 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9166 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9167 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9168 
9169 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9170 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9171 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9172 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9173 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9174 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9175 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9176 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9177 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9178 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9179 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9180 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9181 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9182 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9183 
9184 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9185 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9186 	    tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9187 	    tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9188 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9189 	} else {
9190 		u32 val = tr32(HOSTCC_FLOW_ATTN);
9191 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9192 		if (val) {
9193 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9194 			sp->rx_discards.low += val;
9195 			if (sp->rx_discards.low < val)
9196 				sp->rx_discards.high += 1;
9197 		}
9198 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9199 	}
9200 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9201 }
9202 
9203 static void tg3_chk_missed_msi(struct tg3 *tp)
9204 {
9205 	u32 i;
9206 
9207 	for (i = 0; i < tp->irq_cnt; i++) {
9208 		struct tg3_napi *tnapi = &tp->napi[i];
9209 
9210 		if (tg3_has_work(tnapi)) {
9211 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9212 			    tnapi->last_tx_cons == tnapi->tx_cons) {
9213 				if (tnapi->chk_msi_cnt < 1) {
9214 					tnapi->chk_msi_cnt++;
9215 					return;
9216 				}
9217 				tg3_msi(0, tnapi);
9218 			}
9219 		}
9220 		tnapi->chk_msi_cnt = 0;
9221 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9222 		tnapi->last_tx_cons = tnapi->tx_cons;
9223 	}
9224 }
9225 
9226 static void tg3_timer(unsigned long __opaque)
9227 {
9228 	struct tg3 *tp = (struct tg3 *) __opaque;
9229 
9230 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9231 		goto restart_timer;
9232 
9233 	spin_lock(&tp->lock);
9234 
9235 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9236 	    tg3_flag(tp, 57765_CLASS))
9237 		tg3_chk_missed_msi(tp);
9238 
9239 	if (!tg3_flag(tp, TAGGED_STATUS)) {
9240 		/* All of this garbage is because when using non-tagged
9241 		 * IRQ status the mailbox/status_block protocol the chip
9242 		 * uses with the cpu is race prone.
9243 		 */
9244 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9245 			tw32(GRC_LOCAL_CTRL,
9246 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9247 		} else {
9248 			tw32(HOSTCC_MODE, tp->coalesce_mode |
9249 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9250 		}
9251 
9252 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9253 			spin_unlock(&tp->lock);
9254 			tg3_reset_task_schedule(tp);
9255 			goto restart_timer;
9256 		}
9257 	}
9258 
9259 	/* This part only runs once per second. */
9260 	if (!--tp->timer_counter) {
9261 		if (tg3_flag(tp, 5705_PLUS))
9262 			tg3_periodic_fetch_stats(tp);
9263 
9264 		if (tp->setlpicnt && !--tp->setlpicnt)
9265 			tg3_phy_eee_enable(tp);
9266 
9267 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
9268 			u32 mac_stat;
9269 			int phy_event;
9270 
9271 			mac_stat = tr32(MAC_STATUS);
9272 
9273 			phy_event = 0;
9274 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9275 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9276 					phy_event = 1;
9277 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9278 				phy_event = 1;
9279 
9280 			if (phy_event)
9281 				tg3_setup_phy(tp, 0);
9282 		} else if (tg3_flag(tp, POLL_SERDES)) {
9283 			u32 mac_stat = tr32(MAC_STATUS);
9284 			int need_setup = 0;
9285 
9286 			if (netif_carrier_ok(tp->dev) &&
9287 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9288 				need_setup = 1;
9289 			}
9290 			if (!netif_carrier_ok(tp->dev) &&
9291 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
9292 					 MAC_STATUS_SIGNAL_DET))) {
9293 				need_setup = 1;
9294 			}
9295 			if (need_setup) {
9296 				if (!tp->serdes_counter) {
9297 					tw32_f(MAC_MODE,
9298 					     (tp->mac_mode &
9299 					      ~MAC_MODE_PORT_MODE_MASK));
9300 					udelay(40);
9301 					tw32_f(MAC_MODE, tp->mac_mode);
9302 					udelay(40);
9303 				}
9304 				tg3_setup_phy(tp, 0);
9305 			}
9306 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9307 			   tg3_flag(tp, 5780_CLASS)) {
9308 			tg3_serdes_parallel_detect(tp);
9309 		}
9310 
9311 		tp->timer_counter = tp->timer_multiplier;
9312 	}
9313 
9314 	/* Heartbeat is only sent once every 2 seconds.
9315 	 *
9316 	 * The heartbeat is to tell the ASF firmware that the host
9317 	 * driver is still alive.  In the event that the OS crashes,
9318 	 * ASF needs to reset the hardware to free up the FIFO space
9319 	 * that may be filled with rx packets destined for the host.
9320 	 * If the FIFO is full, ASF will no longer function properly.
9321 	 *
9322 	 * Unintended resets have been reported on real time kernels
9323 	 * where the timer doesn't run on time.  Netpoll will also have
9324 	 * same problem.
9325 	 *
9326 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9327 	 * to check the ring condition when the heartbeat is expiring
9328 	 * before doing the reset.  This will prevent most unintended
9329 	 * resets.
9330 	 */
9331 	if (!--tp->asf_counter) {
9332 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9333 			tg3_wait_for_event_ack(tp);
9334 
9335 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9336 				      FWCMD_NICDRV_ALIVE3);
9337 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9338 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9339 				      TG3_FW_UPDATE_TIMEOUT_SEC);
9340 
9341 			tg3_generate_fw_event(tp);
9342 		}
9343 		tp->asf_counter = tp->asf_multiplier;
9344 	}
9345 
9346 	spin_unlock(&tp->lock);
9347 
9348 restart_timer:
9349 	tp->timer.expires = jiffies + tp->timer_offset;
9350 	add_timer(&tp->timer);
9351 }
9352 
9353 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9354 {
9355 	irq_handler_t fn;
9356 	unsigned long flags;
9357 	char *name;
9358 	struct tg3_napi *tnapi = &tp->napi[irq_num];
9359 
9360 	if (tp->irq_cnt == 1)
9361 		name = tp->dev->name;
9362 	else {
9363 		name = &tnapi->irq_lbl[0];
9364 		snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9365 		name[IFNAMSIZ-1] = 0;
9366 	}
9367 
9368 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9369 		fn = tg3_msi;
9370 		if (tg3_flag(tp, 1SHOT_MSI))
9371 			fn = tg3_msi_1shot;
9372 		flags = 0;
9373 	} else {
9374 		fn = tg3_interrupt;
9375 		if (tg3_flag(tp, TAGGED_STATUS))
9376 			fn = tg3_interrupt_tagged;
9377 		flags = IRQF_SHARED;
9378 	}
9379 
9380 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9381 }
9382 
9383 static int tg3_test_interrupt(struct tg3 *tp)
9384 {
9385 	struct tg3_napi *tnapi = &tp->napi[0];
9386 	struct net_device *dev = tp->dev;
9387 	int err, i, intr_ok = 0;
9388 	u32 val;
9389 
9390 	if (!netif_running(dev))
9391 		return -ENODEV;
9392 
9393 	tg3_disable_ints(tp);
9394 
9395 	free_irq(tnapi->irq_vec, tnapi);
9396 
9397 	/*
9398 	 * Turn off MSI one shot mode.  Otherwise this test has no
9399 	 * observable way to know whether the interrupt was delivered.
9400 	 */
9401 	if (tg3_flag(tp, 57765_PLUS)) {
9402 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9403 		tw32(MSGINT_MODE, val);
9404 	}
9405 
9406 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
9407 			  IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9408 	if (err)
9409 		return err;
9410 
9411 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9412 	tg3_enable_ints(tp);
9413 
9414 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9415 	       tnapi->coal_now);
9416 
9417 	for (i = 0; i < 5; i++) {
9418 		u32 int_mbox, misc_host_ctrl;
9419 
9420 		int_mbox = tr32_mailbox(tnapi->int_mbox);
9421 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9422 
9423 		if ((int_mbox != 0) ||
9424 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9425 			intr_ok = 1;
9426 			break;
9427 		}
9428 
9429 		if (tg3_flag(tp, 57765_PLUS) &&
9430 		    tnapi->hw_status->status_tag != tnapi->last_tag)
9431 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9432 
9433 		msleep(10);
9434 	}
9435 
9436 	tg3_disable_ints(tp);
9437 
9438 	free_irq(tnapi->irq_vec, tnapi);
9439 
9440 	err = tg3_request_irq(tp, 0);
9441 
9442 	if (err)
9443 		return err;
9444 
9445 	if (intr_ok) {
9446 		/* Reenable MSI one shot mode. */
9447 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9448 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9449 			tw32(MSGINT_MODE, val);
9450 		}
9451 		return 0;
9452 	}
9453 
9454 	return -EIO;
9455 }
9456 
9457 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9458  * successfully restored
9459  */
9460 static int tg3_test_msi(struct tg3 *tp)
9461 {
9462 	int err;
9463 	u16 pci_cmd;
9464 
9465 	if (!tg3_flag(tp, USING_MSI))
9466 		return 0;
9467 
9468 	/* Turn off SERR reporting in case MSI terminates with Master
9469 	 * Abort.
9470 	 */
9471 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9472 	pci_write_config_word(tp->pdev, PCI_COMMAND,
9473 			      pci_cmd & ~PCI_COMMAND_SERR);
9474 
9475 	err = tg3_test_interrupt(tp);
9476 
9477 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9478 
9479 	if (!err)
9480 		return 0;
9481 
9482 	/* other failures */
9483 	if (err != -EIO)
9484 		return err;
9485 
9486 	/* MSI test failed, go back to INTx mode */
9487 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9488 		    "to INTx mode. Please report this failure to the PCI "
9489 		    "maintainer and include system chipset information\n");
9490 
9491 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9492 
9493 	pci_disable_msi(tp->pdev);
9494 
9495 	tg3_flag_clear(tp, USING_MSI);
9496 	tp->napi[0].irq_vec = tp->pdev->irq;
9497 
9498 	err = tg3_request_irq(tp, 0);
9499 	if (err)
9500 		return err;
9501 
9502 	/* Need to reset the chip because the MSI cycle may have terminated
9503 	 * with Master Abort.
9504 	 */
9505 	tg3_full_lock(tp, 1);
9506 
9507 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9508 	err = tg3_init_hw(tp, 1);
9509 
9510 	tg3_full_unlock(tp);
9511 
9512 	if (err)
9513 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9514 
9515 	return err;
9516 }
9517 
9518 static int tg3_request_firmware(struct tg3 *tp)
9519 {
9520 	const __be32 *fw_data;
9521 
9522 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9523 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9524 			   tp->fw_needed);
9525 		return -ENOENT;
9526 	}
9527 
9528 	fw_data = (void *)tp->fw->data;
9529 
9530 	/* Firmware blob starts with version numbers, followed by
9531 	 * start address and _full_ length including BSS sections
9532 	 * (which must be longer than the actual data, of course
9533 	 */
9534 
9535 	tp->fw_len = be32_to_cpu(fw_data[2]);	/* includes bss */
9536 	if (tp->fw_len < (tp->fw->size - 12)) {
9537 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9538 			   tp->fw_len, tp->fw_needed);
9539 		release_firmware(tp->fw);
9540 		tp->fw = NULL;
9541 		return -EINVAL;
9542 	}
9543 
9544 	/* We no longer need firmware; we have it. */
9545 	tp->fw_needed = NULL;
9546 	return 0;
9547 }
9548 
9549 static bool tg3_enable_msix(struct tg3 *tp)
9550 {
9551 	int i, rc, cpus = num_online_cpus();
9552 	struct msix_entry msix_ent[tp->irq_max];
9553 
9554 	if (cpus == 1)
9555 		/* Just fallback to the simpler MSI mode. */
9556 		return false;
9557 
9558 	/*
9559 	 * We want as many rx rings enabled as there are cpus.
9560 	 * The first MSIX vector only deals with link interrupts, etc,
9561 	 * so we add one to the number of vectors we are requesting.
9562 	 */
9563 	tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9564 
9565 	for (i = 0; i < tp->irq_max; i++) {
9566 		msix_ent[i].entry  = i;
9567 		msix_ent[i].vector = 0;
9568 	}
9569 
9570 	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9571 	if (rc < 0) {
9572 		return false;
9573 	} else if (rc != 0) {
9574 		if (pci_enable_msix(tp->pdev, msix_ent, rc))
9575 			return false;
9576 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9577 			      tp->irq_cnt, rc);
9578 		tp->irq_cnt = rc;
9579 	}
9580 
9581 	for (i = 0; i < tp->irq_max; i++)
9582 		tp->napi[i].irq_vec = msix_ent[i].vector;
9583 
9584 	netif_set_real_num_tx_queues(tp->dev, 1);
9585 	rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9586 	if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9587 		pci_disable_msix(tp->pdev);
9588 		return false;
9589 	}
9590 
9591 	if (tp->irq_cnt > 1) {
9592 		tg3_flag_set(tp, ENABLE_RSS);
9593 
9594 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9595 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9596 			tg3_flag_set(tp, ENABLE_TSS);
9597 			netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9598 		}
9599 	}
9600 
9601 	return true;
9602 }
9603 
9604 static void tg3_ints_init(struct tg3 *tp)
9605 {
9606 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9607 	    !tg3_flag(tp, TAGGED_STATUS)) {
9608 		/* All MSI supporting chips should support tagged
9609 		 * status.  Assert that this is the case.
9610 		 */
9611 		netdev_warn(tp->dev,
9612 			    "MSI without TAGGED_STATUS? Not using MSI\n");
9613 		goto defcfg;
9614 	}
9615 
9616 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9617 		tg3_flag_set(tp, USING_MSIX);
9618 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9619 		tg3_flag_set(tp, USING_MSI);
9620 
9621 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9622 		u32 msi_mode = tr32(MSGINT_MODE);
9623 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9624 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9625 		if (!tg3_flag(tp, 1SHOT_MSI))
9626 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9627 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9628 	}
9629 defcfg:
9630 	if (!tg3_flag(tp, USING_MSIX)) {
9631 		tp->irq_cnt = 1;
9632 		tp->napi[0].irq_vec = tp->pdev->irq;
9633 		netif_set_real_num_tx_queues(tp->dev, 1);
9634 		netif_set_real_num_rx_queues(tp->dev, 1);
9635 	}
9636 }
9637 
9638 static void tg3_ints_fini(struct tg3 *tp)
9639 {
9640 	if (tg3_flag(tp, USING_MSIX))
9641 		pci_disable_msix(tp->pdev);
9642 	else if (tg3_flag(tp, USING_MSI))
9643 		pci_disable_msi(tp->pdev);
9644 	tg3_flag_clear(tp, USING_MSI);
9645 	tg3_flag_clear(tp, USING_MSIX);
9646 	tg3_flag_clear(tp, ENABLE_RSS);
9647 	tg3_flag_clear(tp, ENABLE_TSS);
9648 }
9649 
9650 static int tg3_open(struct net_device *dev)
9651 {
9652 	struct tg3 *tp = netdev_priv(dev);
9653 	int i, err;
9654 
9655 	if (tp->fw_needed) {
9656 		err = tg3_request_firmware(tp);
9657 		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9658 			if (err)
9659 				return err;
9660 		} else if (err) {
9661 			netdev_warn(tp->dev, "TSO capability disabled\n");
9662 			tg3_flag_clear(tp, TSO_CAPABLE);
9663 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
9664 			netdev_notice(tp->dev, "TSO capability restored\n");
9665 			tg3_flag_set(tp, TSO_CAPABLE);
9666 		}
9667 	}
9668 
9669 	netif_carrier_off(tp->dev);
9670 
9671 	err = tg3_power_up(tp);
9672 	if (err)
9673 		return err;
9674 
9675 	tg3_full_lock(tp, 0);
9676 
9677 	tg3_disable_ints(tp);
9678 	tg3_flag_clear(tp, INIT_COMPLETE);
9679 
9680 	tg3_full_unlock(tp);
9681 
9682 	/*
9683 	 * Setup interrupts first so we know how
9684 	 * many NAPI resources to allocate
9685 	 */
9686 	tg3_ints_init(tp);
9687 
9688 	tg3_rss_check_indir_tbl(tp);
9689 
9690 	/* The placement of this call is tied
9691 	 * to the setup and use of Host TX descriptors.
9692 	 */
9693 	err = tg3_alloc_consistent(tp);
9694 	if (err)
9695 		goto err_out1;
9696 
9697 	tg3_napi_init(tp);
9698 
9699 	tg3_napi_enable(tp);
9700 
9701 	for (i = 0; i < tp->irq_cnt; i++) {
9702 		struct tg3_napi *tnapi = &tp->napi[i];
9703 		err = tg3_request_irq(tp, i);
9704 		if (err) {
9705 			for (i--; i >= 0; i--) {
9706 				tnapi = &tp->napi[i];
9707 				free_irq(tnapi->irq_vec, tnapi);
9708 			}
9709 			goto err_out2;
9710 		}
9711 	}
9712 
9713 	tg3_full_lock(tp, 0);
9714 
9715 	err = tg3_init_hw(tp, 1);
9716 	if (err) {
9717 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9718 		tg3_free_rings(tp);
9719 	} else {
9720 		if (tg3_flag(tp, TAGGED_STATUS) &&
9721 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9722 		    !tg3_flag(tp, 57765_CLASS))
9723 			tp->timer_offset = HZ;
9724 		else
9725 			tp->timer_offset = HZ / 10;
9726 
9727 		BUG_ON(tp->timer_offset > HZ);
9728 		tp->timer_counter = tp->timer_multiplier =
9729 			(HZ / tp->timer_offset);
9730 		tp->asf_counter = tp->asf_multiplier =
9731 			((HZ / tp->timer_offset) * 2);
9732 
9733 		init_timer(&tp->timer);
9734 		tp->timer.expires = jiffies + tp->timer_offset;
9735 		tp->timer.data = (unsigned long) tp;
9736 		tp->timer.function = tg3_timer;
9737 	}
9738 
9739 	tg3_full_unlock(tp);
9740 
9741 	if (err)
9742 		goto err_out3;
9743 
9744 	if (tg3_flag(tp, USING_MSI)) {
9745 		err = tg3_test_msi(tp);
9746 
9747 		if (err) {
9748 			tg3_full_lock(tp, 0);
9749 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9750 			tg3_free_rings(tp);
9751 			tg3_full_unlock(tp);
9752 
9753 			goto err_out2;
9754 		}
9755 
9756 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9757 			u32 val = tr32(PCIE_TRANSACTION_CFG);
9758 
9759 			tw32(PCIE_TRANSACTION_CFG,
9760 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
9761 		}
9762 	}
9763 
9764 	tg3_phy_start(tp);
9765 
9766 	tg3_full_lock(tp, 0);
9767 
9768 	add_timer(&tp->timer);
9769 	tg3_flag_set(tp, INIT_COMPLETE);
9770 	tg3_enable_ints(tp);
9771 
9772 	tg3_full_unlock(tp);
9773 
9774 	netif_tx_start_all_queues(dev);
9775 
9776 	/*
9777 	 * Reset loopback feature if it was turned on while the device was down
9778 	 * make sure that it's installed properly now.
9779 	 */
9780 	if (dev->features & NETIF_F_LOOPBACK)
9781 		tg3_set_loopback(dev, dev->features);
9782 
9783 	return 0;
9784 
9785 err_out3:
9786 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
9787 		struct tg3_napi *tnapi = &tp->napi[i];
9788 		free_irq(tnapi->irq_vec, tnapi);
9789 	}
9790 
9791 err_out2:
9792 	tg3_napi_disable(tp);
9793 	tg3_napi_fini(tp);
9794 	tg3_free_consistent(tp);
9795 
9796 err_out1:
9797 	tg3_ints_fini(tp);
9798 	tg3_frob_aux_power(tp, false);
9799 	pci_set_power_state(tp->pdev, PCI_D3hot);
9800 	return err;
9801 }
9802 
9803 static int tg3_close(struct net_device *dev)
9804 {
9805 	int i;
9806 	struct tg3 *tp = netdev_priv(dev);
9807 
9808 	tg3_napi_disable(tp);
9809 	tg3_reset_task_cancel(tp);
9810 
9811 	netif_tx_stop_all_queues(dev);
9812 
9813 	del_timer_sync(&tp->timer);
9814 
9815 	tg3_phy_stop(tp);
9816 
9817 	tg3_full_lock(tp, 1);
9818 
9819 	tg3_disable_ints(tp);
9820 
9821 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9822 	tg3_free_rings(tp);
9823 	tg3_flag_clear(tp, INIT_COMPLETE);
9824 
9825 	tg3_full_unlock(tp);
9826 
9827 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
9828 		struct tg3_napi *tnapi = &tp->napi[i];
9829 		free_irq(tnapi->irq_vec, tnapi);
9830 	}
9831 
9832 	tg3_ints_fini(tp);
9833 
9834 	/* Clear stats across close / open calls */
9835 	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
9836 	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
9837 
9838 	tg3_napi_fini(tp);
9839 
9840 	tg3_free_consistent(tp);
9841 
9842 	tg3_power_down(tp);
9843 
9844 	netif_carrier_off(tp->dev);
9845 
9846 	return 0;
9847 }
9848 
9849 static inline u64 get_stat64(tg3_stat64_t *val)
9850 {
9851        return ((u64)val->high << 32) | ((u64)val->low);
9852 }
9853 
9854 static u64 calc_crc_errors(struct tg3 *tp)
9855 {
9856 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
9857 
9858 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9859 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9860 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9861 		u32 val;
9862 
9863 		spin_lock_bh(&tp->lock);
9864 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9865 			tg3_writephy(tp, MII_TG3_TEST1,
9866 				     val | MII_TG3_TEST1_CRC_EN);
9867 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9868 		} else
9869 			val = 0;
9870 		spin_unlock_bh(&tp->lock);
9871 
9872 		tp->phy_crc_errors += val;
9873 
9874 		return tp->phy_crc_errors;
9875 	}
9876 
9877 	return get_stat64(&hw_stats->rx_fcs_errors);
9878 }
9879 
9880 #define ESTAT_ADD(member) \
9881 	estats->member =	old_estats->member + \
9882 				get_stat64(&hw_stats->member)
9883 
9884 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
9885 					       struct tg3_ethtool_stats *estats)
9886 {
9887 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9888 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
9889 
9890 	if (!hw_stats)
9891 		return old_estats;
9892 
9893 	ESTAT_ADD(rx_octets);
9894 	ESTAT_ADD(rx_fragments);
9895 	ESTAT_ADD(rx_ucast_packets);
9896 	ESTAT_ADD(rx_mcast_packets);
9897 	ESTAT_ADD(rx_bcast_packets);
9898 	ESTAT_ADD(rx_fcs_errors);
9899 	ESTAT_ADD(rx_align_errors);
9900 	ESTAT_ADD(rx_xon_pause_rcvd);
9901 	ESTAT_ADD(rx_xoff_pause_rcvd);
9902 	ESTAT_ADD(rx_mac_ctrl_rcvd);
9903 	ESTAT_ADD(rx_xoff_entered);
9904 	ESTAT_ADD(rx_frame_too_long_errors);
9905 	ESTAT_ADD(rx_jabbers);
9906 	ESTAT_ADD(rx_undersize_packets);
9907 	ESTAT_ADD(rx_in_length_errors);
9908 	ESTAT_ADD(rx_out_length_errors);
9909 	ESTAT_ADD(rx_64_or_less_octet_packets);
9910 	ESTAT_ADD(rx_65_to_127_octet_packets);
9911 	ESTAT_ADD(rx_128_to_255_octet_packets);
9912 	ESTAT_ADD(rx_256_to_511_octet_packets);
9913 	ESTAT_ADD(rx_512_to_1023_octet_packets);
9914 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
9915 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
9916 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
9917 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
9918 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
9919 
9920 	ESTAT_ADD(tx_octets);
9921 	ESTAT_ADD(tx_collisions);
9922 	ESTAT_ADD(tx_xon_sent);
9923 	ESTAT_ADD(tx_xoff_sent);
9924 	ESTAT_ADD(tx_flow_control);
9925 	ESTAT_ADD(tx_mac_errors);
9926 	ESTAT_ADD(tx_single_collisions);
9927 	ESTAT_ADD(tx_mult_collisions);
9928 	ESTAT_ADD(tx_deferred);
9929 	ESTAT_ADD(tx_excessive_collisions);
9930 	ESTAT_ADD(tx_late_collisions);
9931 	ESTAT_ADD(tx_collide_2times);
9932 	ESTAT_ADD(tx_collide_3times);
9933 	ESTAT_ADD(tx_collide_4times);
9934 	ESTAT_ADD(tx_collide_5times);
9935 	ESTAT_ADD(tx_collide_6times);
9936 	ESTAT_ADD(tx_collide_7times);
9937 	ESTAT_ADD(tx_collide_8times);
9938 	ESTAT_ADD(tx_collide_9times);
9939 	ESTAT_ADD(tx_collide_10times);
9940 	ESTAT_ADD(tx_collide_11times);
9941 	ESTAT_ADD(tx_collide_12times);
9942 	ESTAT_ADD(tx_collide_13times);
9943 	ESTAT_ADD(tx_collide_14times);
9944 	ESTAT_ADD(tx_collide_15times);
9945 	ESTAT_ADD(tx_ucast_packets);
9946 	ESTAT_ADD(tx_mcast_packets);
9947 	ESTAT_ADD(tx_bcast_packets);
9948 	ESTAT_ADD(tx_carrier_sense_errors);
9949 	ESTAT_ADD(tx_discards);
9950 	ESTAT_ADD(tx_errors);
9951 
9952 	ESTAT_ADD(dma_writeq_full);
9953 	ESTAT_ADD(dma_write_prioq_full);
9954 	ESTAT_ADD(rxbds_empty);
9955 	ESTAT_ADD(rx_discards);
9956 	ESTAT_ADD(rx_errors);
9957 	ESTAT_ADD(rx_threshold_hit);
9958 
9959 	ESTAT_ADD(dma_readq_full);
9960 	ESTAT_ADD(dma_read_prioq_full);
9961 	ESTAT_ADD(tx_comp_queue_full);
9962 
9963 	ESTAT_ADD(ring_set_send_prod_index);
9964 	ESTAT_ADD(ring_status_update);
9965 	ESTAT_ADD(nic_irqs);
9966 	ESTAT_ADD(nic_avoided_irqs);
9967 	ESTAT_ADD(nic_tx_threshold_hit);
9968 
9969 	ESTAT_ADD(mbuf_lwm_thresh_hit);
9970 
9971 	return estats;
9972 }
9973 
9974 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9975 						 struct rtnl_link_stats64 *stats)
9976 {
9977 	struct tg3 *tp = netdev_priv(dev);
9978 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9979 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
9980 
9981 	if (!hw_stats)
9982 		return old_stats;
9983 
9984 	stats->rx_packets = old_stats->rx_packets +
9985 		get_stat64(&hw_stats->rx_ucast_packets) +
9986 		get_stat64(&hw_stats->rx_mcast_packets) +
9987 		get_stat64(&hw_stats->rx_bcast_packets);
9988 
9989 	stats->tx_packets = old_stats->tx_packets +
9990 		get_stat64(&hw_stats->tx_ucast_packets) +
9991 		get_stat64(&hw_stats->tx_mcast_packets) +
9992 		get_stat64(&hw_stats->tx_bcast_packets);
9993 
9994 	stats->rx_bytes = old_stats->rx_bytes +
9995 		get_stat64(&hw_stats->rx_octets);
9996 	stats->tx_bytes = old_stats->tx_bytes +
9997 		get_stat64(&hw_stats->tx_octets);
9998 
9999 	stats->rx_errors = old_stats->rx_errors +
10000 		get_stat64(&hw_stats->rx_errors);
10001 	stats->tx_errors = old_stats->tx_errors +
10002 		get_stat64(&hw_stats->tx_errors) +
10003 		get_stat64(&hw_stats->tx_mac_errors) +
10004 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
10005 		get_stat64(&hw_stats->tx_discards);
10006 
10007 	stats->multicast = old_stats->multicast +
10008 		get_stat64(&hw_stats->rx_mcast_packets);
10009 	stats->collisions = old_stats->collisions +
10010 		get_stat64(&hw_stats->tx_collisions);
10011 
10012 	stats->rx_length_errors = old_stats->rx_length_errors +
10013 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
10014 		get_stat64(&hw_stats->rx_undersize_packets);
10015 
10016 	stats->rx_over_errors = old_stats->rx_over_errors +
10017 		get_stat64(&hw_stats->rxbds_empty);
10018 	stats->rx_frame_errors = old_stats->rx_frame_errors +
10019 		get_stat64(&hw_stats->rx_align_errors);
10020 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10021 		get_stat64(&hw_stats->tx_discards);
10022 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10023 		get_stat64(&hw_stats->tx_carrier_sense_errors);
10024 
10025 	stats->rx_crc_errors = old_stats->rx_crc_errors +
10026 		calc_crc_errors(tp);
10027 
10028 	stats->rx_missed_errors = old_stats->rx_missed_errors +
10029 		get_stat64(&hw_stats->rx_discards);
10030 
10031 	stats->rx_dropped = tp->rx_dropped;
10032 	stats->tx_dropped = tp->tx_dropped;
10033 
10034 	return stats;
10035 }
10036 
10037 static inline u32 calc_crc(unsigned char *buf, int len)
10038 {
10039 	u32 reg;
10040 	u32 tmp;
10041 	int j, k;
10042 
10043 	reg = 0xffffffff;
10044 
10045 	for (j = 0; j < len; j++) {
10046 		reg ^= buf[j];
10047 
10048 		for (k = 0; k < 8; k++) {
10049 			tmp = reg & 0x01;
10050 
10051 			reg >>= 1;
10052 
10053 			if (tmp)
10054 				reg ^= 0xedb88320;
10055 		}
10056 	}
10057 
10058 	return ~reg;
10059 }
10060 
10061 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10062 {
10063 	/* accept or reject all multicast frames */
10064 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10065 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10066 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10067 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10068 }
10069 
10070 static void __tg3_set_rx_mode(struct net_device *dev)
10071 {
10072 	struct tg3 *tp = netdev_priv(dev);
10073 	u32 rx_mode;
10074 
10075 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10076 				  RX_MODE_KEEP_VLAN_TAG);
10077 
10078 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10079 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10080 	 * flag clear.
10081 	 */
10082 	if (!tg3_flag(tp, ENABLE_ASF))
10083 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10084 #endif
10085 
10086 	if (dev->flags & IFF_PROMISC) {
10087 		/* Promiscuous mode. */
10088 		rx_mode |= RX_MODE_PROMISC;
10089 	} else if (dev->flags & IFF_ALLMULTI) {
10090 		/* Accept all multicast. */
10091 		tg3_set_multi(tp, 1);
10092 	} else if (netdev_mc_empty(dev)) {
10093 		/* Reject all multicast. */
10094 		tg3_set_multi(tp, 0);
10095 	} else {
10096 		/* Accept one or more multicast(s). */
10097 		struct netdev_hw_addr *ha;
10098 		u32 mc_filter[4] = { 0, };
10099 		u32 regidx;
10100 		u32 bit;
10101 		u32 crc;
10102 
10103 		netdev_for_each_mc_addr(ha, dev) {
10104 			crc = calc_crc(ha->addr, ETH_ALEN);
10105 			bit = ~crc & 0x7f;
10106 			regidx = (bit & 0x60) >> 5;
10107 			bit &= 0x1f;
10108 			mc_filter[regidx] |= (1 << bit);
10109 		}
10110 
10111 		tw32(MAC_HASH_REG_0, mc_filter[0]);
10112 		tw32(MAC_HASH_REG_1, mc_filter[1]);
10113 		tw32(MAC_HASH_REG_2, mc_filter[2]);
10114 		tw32(MAC_HASH_REG_3, mc_filter[3]);
10115 	}
10116 
10117 	if (rx_mode != tp->rx_mode) {
10118 		tp->rx_mode = rx_mode;
10119 		tw32_f(MAC_RX_MODE, rx_mode);
10120 		udelay(10);
10121 	}
10122 }
10123 
10124 static void tg3_set_rx_mode(struct net_device *dev)
10125 {
10126 	struct tg3 *tp = netdev_priv(dev);
10127 
10128 	if (!netif_running(dev))
10129 		return;
10130 
10131 	tg3_full_lock(tp, 0);
10132 	__tg3_set_rx_mode(dev);
10133 	tg3_full_unlock(tp);
10134 }
10135 
10136 static int tg3_get_regs_len(struct net_device *dev)
10137 {
10138 	return TG3_REG_BLK_SIZE;
10139 }
10140 
10141 static void tg3_get_regs(struct net_device *dev,
10142 		struct ethtool_regs *regs, void *_p)
10143 {
10144 	struct tg3 *tp = netdev_priv(dev);
10145 
10146 	regs->version = 0;
10147 
10148 	memset(_p, 0, TG3_REG_BLK_SIZE);
10149 
10150 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10151 		return;
10152 
10153 	tg3_full_lock(tp, 0);
10154 
10155 	tg3_dump_legacy_regs(tp, (u32 *)_p);
10156 
10157 	tg3_full_unlock(tp);
10158 }
10159 
10160 static int tg3_get_eeprom_len(struct net_device *dev)
10161 {
10162 	struct tg3 *tp = netdev_priv(dev);
10163 
10164 	return tp->nvram_size;
10165 }
10166 
10167 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10168 {
10169 	struct tg3 *tp = netdev_priv(dev);
10170 	int ret;
10171 	u8  *pd;
10172 	u32 i, offset, len, b_offset, b_count;
10173 	__be32 val;
10174 
10175 	if (tg3_flag(tp, NO_NVRAM))
10176 		return -EINVAL;
10177 
10178 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10179 		return -EAGAIN;
10180 
10181 	offset = eeprom->offset;
10182 	len = eeprom->len;
10183 	eeprom->len = 0;
10184 
10185 	eeprom->magic = TG3_EEPROM_MAGIC;
10186 
10187 	if (offset & 3) {
10188 		/* adjustments to start on required 4 byte boundary */
10189 		b_offset = offset & 3;
10190 		b_count = 4 - b_offset;
10191 		if (b_count > len) {
10192 			/* i.e. offset=1 len=2 */
10193 			b_count = len;
10194 		}
10195 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10196 		if (ret)
10197 			return ret;
10198 		memcpy(data, ((char *)&val) + b_offset, b_count);
10199 		len -= b_count;
10200 		offset += b_count;
10201 		eeprom->len += b_count;
10202 	}
10203 
10204 	/* read bytes up to the last 4 byte boundary */
10205 	pd = &data[eeprom->len];
10206 	for (i = 0; i < (len - (len & 3)); i += 4) {
10207 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
10208 		if (ret) {
10209 			eeprom->len += i;
10210 			return ret;
10211 		}
10212 		memcpy(pd + i, &val, 4);
10213 	}
10214 	eeprom->len += i;
10215 
10216 	if (len & 3) {
10217 		/* read last bytes not ending on 4 byte boundary */
10218 		pd = &data[eeprom->len];
10219 		b_count = len & 3;
10220 		b_offset = offset + len - b_count;
10221 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
10222 		if (ret)
10223 			return ret;
10224 		memcpy(pd, &val, b_count);
10225 		eeprom->len += b_count;
10226 	}
10227 	return 0;
10228 }
10229 
10230 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10231 
10232 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10233 {
10234 	struct tg3 *tp = netdev_priv(dev);
10235 	int ret;
10236 	u32 offset, len, b_offset, odd_len;
10237 	u8 *buf;
10238 	__be32 start, end;
10239 
10240 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10241 		return -EAGAIN;
10242 
10243 	if (tg3_flag(tp, NO_NVRAM) ||
10244 	    eeprom->magic != TG3_EEPROM_MAGIC)
10245 		return -EINVAL;
10246 
10247 	offset = eeprom->offset;
10248 	len = eeprom->len;
10249 
10250 	if ((b_offset = (offset & 3))) {
10251 		/* adjustments to start on required 4 byte boundary */
10252 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10253 		if (ret)
10254 			return ret;
10255 		len += b_offset;
10256 		offset &= ~3;
10257 		if (len < 4)
10258 			len = 4;
10259 	}
10260 
10261 	odd_len = 0;
10262 	if (len & 3) {
10263 		/* adjustments to end on required 4 byte boundary */
10264 		odd_len = 1;
10265 		len = (len + 3) & ~3;
10266 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10267 		if (ret)
10268 			return ret;
10269 	}
10270 
10271 	buf = data;
10272 	if (b_offset || odd_len) {
10273 		buf = kmalloc(len, GFP_KERNEL);
10274 		if (!buf)
10275 			return -ENOMEM;
10276 		if (b_offset)
10277 			memcpy(buf, &start, 4);
10278 		if (odd_len)
10279 			memcpy(buf+len-4, &end, 4);
10280 		memcpy(buf + b_offset, data, eeprom->len);
10281 	}
10282 
10283 	ret = tg3_nvram_write_block(tp, offset, len, buf);
10284 
10285 	if (buf != data)
10286 		kfree(buf);
10287 
10288 	return ret;
10289 }
10290 
10291 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10292 {
10293 	struct tg3 *tp = netdev_priv(dev);
10294 
10295 	if (tg3_flag(tp, USE_PHYLIB)) {
10296 		struct phy_device *phydev;
10297 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10298 			return -EAGAIN;
10299 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10300 		return phy_ethtool_gset(phydev, cmd);
10301 	}
10302 
10303 	cmd->supported = (SUPPORTED_Autoneg);
10304 
10305 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10306 		cmd->supported |= (SUPPORTED_1000baseT_Half |
10307 				   SUPPORTED_1000baseT_Full);
10308 
10309 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10310 		cmd->supported |= (SUPPORTED_100baseT_Half |
10311 				  SUPPORTED_100baseT_Full |
10312 				  SUPPORTED_10baseT_Half |
10313 				  SUPPORTED_10baseT_Full |
10314 				  SUPPORTED_TP);
10315 		cmd->port = PORT_TP;
10316 	} else {
10317 		cmd->supported |= SUPPORTED_FIBRE;
10318 		cmd->port = PORT_FIBRE;
10319 	}
10320 
10321 	cmd->advertising = tp->link_config.advertising;
10322 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
10323 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10324 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10325 				cmd->advertising |= ADVERTISED_Pause;
10326 			} else {
10327 				cmd->advertising |= ADVERTISED_Pause |
10328 						    ADVERTISED_Asym_Pause;
10329 			}
10330 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10331 			cmd->advertising |= ADVERTISED_Asym_Pause;
10332 		}
10333 	}
10334 	if (netif_running(dev) && netif_carrier_ok(dev)) {
10335 		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10336 		cmd->duplex = tp->link_config.active_duplex;
10337 		cmd->lp_advertising = tp->link_config.rmt_adv;
10338 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10339 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10340 				cmd->eth_tp_mdix = ETH_TP_MDI_X;
10341 			else
10342 				cmd->eth_tp_mdix = ETH_TP_MDI;
10343 		}
10344 	} else {
10345 		ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10346 		cmd->duplex = DUPLEX_INVALID;
10347 		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10348 	}
10349 	cmd->phy_address = tp->phy_addr;
10350 	cmd->transceiver = XCVR_INTERNAL;
10351 	cmd->autoneg = tp->link_config.autoneg;
10352 	cmd->maxtxpkt = 0;
10353 	cmd->maxrxpkt = 0;
10354 	return 0;
10355 }
10356 
10357 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10358 {
10359 	struct tg3 *tp = netdev_priv(dev);
10360 	u32 speed = ethtool_cmd_speed(cmd);
10361 
10362 	if (tg3_flag(tp, USE_PHYLIB)) {
10363 		struct phy_device *phydev;
10364 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10365 			return -EAGAIN;
10366 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10367 		return phy_ethtool_sset(phydev, cmd);
10368 	}
10369 
10370 	if (cmd->autoneg != AUTONEG_ENABLE &&
10371 	    cmd->autoneg != AUTONEG_DISABLE)
10372 		return -EINVAL;
10373 
10374 	if (cmd->autoneg == AUTONEG_DISABLE &&
10375 	    cmd->duplex != DUPLEX_FULL &&
10376 	    cmd->duplex != DUPLEX_HALF)
10377 		return -EINVAL;
10378 
10379 	if (cmd->autoneg == AUTONEG_ENABLE) {
10380 		u32 mask = ADVERTISED_Autoneg |
10381 			   ADVERTISED_Pause |
10382 			   ADVERTISED_Asym_Pause;
10383 
10384 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10385 			mask |= ADVERTISED_1000baseT_Half |
10386 				ADVERTISED_1000baseT_Full;
10387 
10388 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10389 			mask |= ADVERTISED_100baseT_Half |
10390 				ADVERTISED_100baseT_Full |
10391 				ADVERTISED_10baseT_Half |
10392 				ADVERTISED_10baseT_Full |
10393 				ADVERTISED_TP;
10394 		else
10395 			mask |= ADVERTISED_FIBRE;
10396 
10397 		if (cmd->advertising & ~mask)
10398 			return -EINVAL;
10399 
10400 		mask &= (ADVERTISED_1000baseT_Half |
10401 			 ADVERTISED_1000baseT_Full |
10402 			 ADVERTISED_100baseT_Half |
10403 			 ADVERTISED_100baseT_Full |
10404 			 ADVERTISED_10baseT_Half |
10405 			 ADVERTISED_10baseT_Full);
10406 
10407 		cmd->advertising &= mask;
10408 	} else {
10409 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10410 			if (speed != SPEED_1000)
10411 				return -EINVAL;
10412 
10413 			if (cmd->duplex != DUPLEX_FULL)
10414 				return -EINVAL;
10415 		} else {
10416 			if (speed != SPEED_100 &&
10417 			    speed != SPEED_10)
10418 				return -EINVAL;
10419 		}
10420 	}
10421 
10422 	tg3_full_lock(tp, 0);
10423 
10424 	tp->link_config.autoneg = cmd->autoneg;
10425 	if (cmd->autoneg == AUTONEG_ENABLE) {
10426 		tp->link_config.advertising = (cmd->advertising |
10427 					      ADVERTISED_Autoneg);
10428 		tp->link_config.speed = SPEED_INVALID;
10429 		tp->link_config.duplex = DUPLEX_INVALID;
10430 	} else {
10431 		tp->link_config.advertising = 0;
10432 		tp->link_config.speed = speed;
10433 		tp->link_config.duplex = cmd->duplex;
10434 	}
10435 
10436 	tp->link_config.orig_speed = tp->link_config.speed;
10437 	tp->link_config.orig_duplex = tp->link_config.duplex;
10438 	tp->link_config.orig_autoneg = tp->link_config.autoneg;
10439 
10440 	if (netif_running(dev))
10441 		tg3_setup_phy(tp, 1);
10442 
10443 	tg3_full_unlock(tp);
10444 
10445 	return 0;
10446 }
10447 
10448 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10449 {
10450 	struct tg3 *tp = netdev_priv(dev);
10451 
10452 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10453 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10454 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10455 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10456 }
10457 
10458 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10459 {
10460 	struct tg3 *tp = netdev_priv(dev);
10461 
10462 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10463 		wol->supported = WAKE_MAGIC;
10464 	else
10465 		wol->supported = 0;
10466 	wol->wolopts = 0;
10467 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10468 		wol->wolopts = WAKE_MAGIC;
10469 	memset(&wol->sopass, 0, sizeof(wol->sopass));
10470 }
10471 
10472 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10473 {
10474 	struct tg3 *tp = netdev_priv(dev);
10475 	struct device *dp = &tp->pdev->dev;
10476 
10477 	if (wol->wolopts & ~WAKE_MAGIC)
10478 		return -EINVAL;
10479 	if ((wol->wolopts & WAKE_MAGIC) &&
10480 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10481 		return -EINVAL;
10482 
10483 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10484 
10485 	spin_lock_bh(&tp->lock);
10486 	if (device_may_wakeup(dp))
10487 		tg3_flag_set(tp, WOL_ENABLE);
10488 	else
10489 		tg3_flag_clear(tp, WOL_ENABLE);
10490 	spin_unlock_bh(&tp->lock);
10491 
10492 	return 0;
10493 }
10494 
10495 static u32 tg3_get_msglevel(struct net_device *dev)
10496 {
10497 	struct tg3 *tp = netdev_priv(dev);
10498 	return tp->msg_enable;
10499 }
10500 
10501 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10502 {
10503 	struct tg3 *tp = netdev_priv(dev);
10504 	tp->msg_enable = value;
10505 }
10506 
10507 static int tg3_nway_reset(struct net_device *dev)
10508 {
10509 	struct tg3 *tp = netdev_priv(dev);
10510 	int r;
10511 
10512 	if (!netif_running(dev))
10513 		return -EAGAIN;
10514 
10515 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10516 		return -EINVAL;
10517 
10518 	if (tg3_flag(tp, USE_PHYLIB)) {
10519 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10520 			return -EAGAIN;
10521 		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10522 	} else {
10523 		u32 bmcr;
10524 
10525 		spin_lock_bh(&tp->lock);
10526 		r = -EINVAL;
10527 		tg3_readphy(tp, MII_BMCR, &bmcr);
10528 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10529 		    ((bmcr & BMCR_ANENABLE) ||
10530 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10531 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10532 						   BMCR_ANENABLE);
10533 			r = 0;
10534 		}
10535 		spin_unlock_bh(&tp->lock);
10536 	}
10537 
10538 	return r;
10539 }
10540 
10541 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10542 {
10543 	struct tg3 *tp = netdev_priv(dev);
10544 
10545 	ering->rx_max_pending = tp->rx_std_ring_mask;
10546 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
10547 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10548 	else
10549 		ering->rx_jumbo_max_pending = 0;
10550 
10551 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10552 
10553 	ering->rx_pending = tp->rx_pending;
10554 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
10555 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10556 	else
10557 		ering->rx_jumbo_pending = 0;
10558 
10559 	ering->tx_pending = tp->napi[0].tx_pending;
10560 }
10561 
10562 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10563 {
10564 	struct tg3 *tp = netdev_priv(dev);
10565 	int i, irq_sync = 0, err = 0;
10566 
10567 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10568 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10569 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10570 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
10571 	    (tg3_flag(tp, TSO_BUG) &&
10572 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10573 		return -EINVAL;
10574 
10575 	if (netif_running(dev)) {
10576 		tg3_phy_stop(tp);
10577 		tg3_netif_stop(tp);
10578 		irq_sync = 1;
10579 	}
10580 
10581 	tg3_full_lock(tp, irq_sync);
10582 
10583 	tp->rx_pending = ering->rx_pending;
10584 
10585 	if (tg3_flag(tp, MAX_RXPEND_64) &&
10586 	    tp->rx_pending > 63)
10587 		tp->rx_pending = 63;
10588 	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10589 
10590 	for (i = 0; i < tp->irq_max; i++)
10591 		tp->napi[i].tx_pending = ering->tx_pending;
10592 
10593 	if (netif_running(dev)) {
10594 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10595 		err = tg3_restart_hw(tp, 1);
10596 		if (!err)
10597 			tg3_netif_start(tp);
10598 	}
10599 
10600 	tg3_full_unlock(tp);
10601 
10602 	if (irq_sync && !err)
10603 		tg3_phy_start(tp);
10604 
10605 	return err;
10606 }
10607 
10608 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10609 {
10610 	struct tg3 *tp = netdev_priv(dev);
10611 
10612 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10613 
10614 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10615 		epause->rx_pause = 1;
10616 	else
10617 		epause->rx_pause = 0;
10618 
10619 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10620 		epause->tx_pause = 1;
10621 	else
10622 		epause->tx_pause = 0;
10623 }
10624 
10625 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10626 {
10627 	struct tg3 *tp = netdev_priv(dev);
10628 	int err = 0;
10629 
10630 	if (tg3_flag(tp, USE_PHYLIB)) {
10631 		u32 newadv;
10632 		struct phy_device *phydev;
10633 
10634 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10635 
10636 		if (!(phydev->supported & SUPPORTED_Pause) ||
10637 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10638 		     (epause->rx_pause != epause->tx_pause)))
10639 			return -EINVAL;
10640 
10641 		tp->link_config.flowctrl = 0;
10642 		if (epause->rx_pause) {
10643 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
10644 
10645 			if (epause->tx_pause) {
10646 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
10647 				newadv = ADVERTISED_Pause;
10648 			} else
10649 				newadv = ADVERTISED_Pause |
10650 					 ADVERTISED_Asym_Pause;
10651 		} else if (epause->tx_pause) {
10652 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
10653 			newadv = ADVERTISED_Asym_Pause;
10654 		} else
10655 			newadv = 0;
10656 
10657 		if (epause->autoneg)
10658 			tg3_flag_set(tp, PAUSE_AUTONEG);
10659 		else
10660 			tg3_flag_clear(tp, PAUSE_AUTONEG);
10661 
10662 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10663 			u32 oldadv = phydev->advertising &
10664 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10665 			if (oldadv != newadv) {
10666 				phydev->advertising &=
10667 					~(ADVERTISED_Pause |
10668 					  ADVERTISED_Asym_Pause);
10669 				phydev->advertising |= newadv;
10670 				if (phydev->autoneg) {
10671 					/*
10672 					 * Always renegotiate the link to
10673 					 * inform our link partner of our
10674 					 * flow control settings, even if the
10675 					 * flow control is forced.  Let
10676 					 * tg3_adjust_link() do the final
10677 					 * flow control setup.
10678 					 */
10679 					return phy_start_aneg(phydev);
10680 				}
10681 			}
10682 
10683 			if (!epause->autoneg)
10684 				tg3_setup_flow_control(tp, 0, 0);
10685 		} else {
10686 			tp->link_config.orig_advertising &=
10687 					~(ADVERTISED_Pause |
10688 					  ADVERTISED_Asym_Pause);
10689 			tp->link_config.orig_advertising |= newadv;
10690 		}
10691 	} else {
10692 		int irq_sync = 0;
10693 
10694 		if (netif_running(dev)) {
10695 			tg3_netif_stop(tp);
10696 			irq_sync = 1;
10697 		}
10698 
10699 		tg3_full_lock(tp, irq_sync);
10700 
10701 		if (epause->autoneg)
10702 			tg3_flag_set(tp, PAUSE_AUTONEG);
10703 		else
10704 			tg3_flag_clear(tp, PAUSE_AUTONEG);
10705 		if (epause->rx_pause)
10706 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
10707 		else
10708 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10709 		if (epause->tx_pause)
10710 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
10711 		else
10712 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10713 
10714 		if (netif_running(dev)) {
10715 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10716 			err = tg3_restart_hw(tp, 1);
10717 			if (!err)
10718 				tg3_netif_start(tp);
10719 		}
10720 
10721 		tg3_full_unlock(tp);
10722 	}
10723 
10724 	return err;
10725 }
10726 
10727 static int tg3_get_sset_count(struct net_device *dev, int sset)
10728 {
10729 	switch (sset) {
10730 	case ETH_SS_TEST:
10731 		return TG3_NUM_TEST;
10732 	case ETH_SS_STATS:
10733 		return TG3_NUM_STATS;
10734 	default:
10735 		return -EOPNOTSUPP;
10736 	}
10737 }
10738 
10739 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10740 			 u32 *rules __always_unused)
10741 {
10742 	struct tg3 *tp = netdev_priv(dev);
10743 
10744 	if (!tg3_flag(tp, SUPPORT_MSIX))
10745 		return -EOPNOTSUPP;
10746 
10747 	switch (info->cmd) {
10748 	case ETHTOOL_GRXRINGS:
10749 		if (netif_running(tp->dev))
10750 			info->data = tp->irq_cnt;
10751 		else {
10752 			info->data = num_online_cpus();
10753 			if (info->data > TG3_IRQ_MAX_VECS_RSS)
10754 				info->data = TG3_IRQ_MAX_VECS_RSS;
10755 		}
10756 
10757 		/* The first interrupt vector only
10758 		 * handles link interrupts.
10759 		 */
10760 		info->data -= 1;
10761 		return 0;
10762 
10763 	default:
10764 		return -EOPNOTSUPP;
10765 	}
10766 }
10767 
10768 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10769 {
10770 	u32 size = 0;
10771 	struct tg3 *tp = netdev_priv(dev);
10772 
10773 	if (tg3_flag(tp, SUPPORT_MSIX))
10774 		size = TG3_RSS_INDIR_TBL_SIZE;
10775 
10776 	return size;
10777 }
10778 
10779 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10780 {
10781 	struct tg3 *tp = netdev_priv(dev);
10782 	int i;
10783 
10784 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10785 		indir[i] = tp->rss_ind_tbl[i];
10786 
10787 	return 0;
10788 }
10789 
10790 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10791 {
10792 	struct tg3 *tp = netdev_priv(dev);
10793 	size_t i;
10794 
10795 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10796 		tp->rss_ind_tbl[i] = indir[i];
10797 
10798 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10799 		return 0;
10800 
10801 	/* It is legal to write the indirection
10802 	 * table while the device is running.
10803 	 */
10804 	tg3_full_lock(tp, 0);
10805 	tg3_rss_write_indir_tbl(tp);
10806 	tg3_full_unlock(tp);
10807 
10808 	return 0;
10809 }
10810 
10811 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10812 {
10813 	switch (stringset) {
10814 	case ETH_SS_STATS:
10815 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10816 		break;
10817 	case ETH_SS_TEST:
10818 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10819 		break;
10820 	default:
10821 		WARN_ON(1);	/* we need a WARN() */
10822 		break;
10823 	}
10824 }
10825 
10826 static int tg3_set_phys_id(struct net_device *dev,
10827 			    enum ethtool_phys_id_state state)
10828 {
10829 	struct tg3 *tp = netdev_priv(dev);
10830 
10831 	if (!netif_running(tp->dev))
10832 		return -EAGAIN;
10833 
10834 	switch (state) {
10835 	case ETHTOOL_ID_ACTIVE:
10836 		return 1;	/* cycle on/off once per second */
10837 
10838 	case ETHTOOL_ID_ON:
10839 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10840 		     LED_CTRL_1000MBPS_ON |
10841 		     LED_CTRL_100MBPS_ON |
10842 		     LED_CTRL_10MBPS_ON |
10843 		     LED_CTRL_TRAFFIC_OVERRIDE |
10844 		     LED_CTRL_TRAFFIC_BLINK |
10845 		     LED_CTRL_TRAFFIC_LED);
10846 		break;
10847 
10848 	case ETHTOOL_ID_OFF:
10849 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10850 		     LED_CTRL_TRAFFIC_OVERRIDE);
10851 		break;
10852 
10853 	case ETHTOOL_ID_INACTIVE:
10854 		tw32(MAC_LED_CTRL, tp->led_ctrl);
10855 		break;
10856 	}
10857 
10858 	return 0;
10859 }
10860 
10861 static void tg3_get_ethtool_stats(struct net_device *dev,
10862 				   struct ethtool_stats *estats, u64 *tmp_stats)
10863 {
10864 	struct tg3 *tp = netdev_priv(dev);
10865 
10866 	tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
10867 }
10868 
10869 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10870 {
10871 	int i;
10872 	__be32 *buf;
10873 	u32 offset = 0, len = 0;
10874 	u32 magic, val;
10875 
10876 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10877 		return NULL;
10878 
10879 	if (magic == TG3_EEPROM_MAGIC) {
10880 		for (offset = TG3_NVM_DIR_START;
10881 		     offset < TG3_NVM_DIR_END;
10882 		     offset += TG3_NVM_DIRENT_SIZE) {
10883 			if (tg3_nvram_read(tp, offset, &val))
10884 				return NULL;
10885 
10886 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10887 			    TG3_NVM_DIRTYPE_EXTVPD)
10888 				break;
10889 		}
10890 
10891 		if (offset != TG3_NVM_DIR_END) {
10892 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10893 			if (tg3_nvram_read(tp, offset + 4, &offset))
10894 				return NULL;
10895 
10896 			offset = tg3_nvram_logical_addr(tp, offset);
10897 		}
10898 	}
10899 
10900 	if (!offset || !len) {
10901 		offset = TG3_NVM_VPD_OFF;
10902 		len = TG3_NVM_VPD_LEN;
10903 	}
10904 
10905 	buf = kmalloc(len, GFP_KERNEL);
10906 	if (buf == NULL)
10907 		return NULL;
10908 
10909 	if (magic == TG3_EEPROM_MAGIC) {
10910 		for (i = 0; i < len; i += 4) {
10911 			/* The data is in little-endian format in NVRAM.
10912 			 * Use the big-endian read routines to preserve
10913 			 * the byte order as it exists in NVRAM.
10914 			 */
10915 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10916 				goto error;
10917 		}
10918 	} else {
10919 		u8 *ptr;
10920 		ssize_t cnt;
10921 		unsigned int pos = 0;
10922 
10923 		ptr = (u8 *)&buf[0];
10924 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10925 			cnt = pci_read_vpd(tp->pdev, pos,
10926 					   len - pos, ptr);
10927 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
10928 				cnt = 0;
10929 			else if (cnt < 0)
10930 				goto error;
10931 		}
10932 		if (pos != len)
10933 			goto error;
10934 	}
10935 
10936 	*vpdlen = len;
10937 
10938 	return buf;
10939 
10940 error:
10941 	kfree(buf);
10942 	return NULL;
10943 }
10944 
10945 #define NVRAM_TEST_SIZE 0x100
10946 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
10947 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
10948 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
10949 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
10950 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
10951 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
10952 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10953 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10954 
10955 static int tg3_test_nvram(struct tg3 *tp)
10956 {
10957 	u32 csum, magic, len;
10958 	__be32 *buf;
10959 	int i, j, k, err = 0, size;
10960 
10961 	if (tg3_flag(tp, NO_NVRAM))
10962 		return 0;
10963 
10964 	if (tg3_nvram_read(tp, 0, &magic) != 0)
10965 		return -EIO;
10966 
10967 	if (magic == TG3_EEPROM_MAGIC)
10968 		size = NVRAM_TEST_SIZE;
10969 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10970 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10971 		    TG3_EEPROM_SB_FORMAT_1) {
10972 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10973 			case TG3_EEPROM_SB_REVISION_0:
10974 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10975 				break;
10976 			case TG3_EEPROM_SB_REVISION_2:
10977 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10978 				break;
10979 			case TG3_EEPROM_SB_REVISION_3:
10980 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10981 				break;
10982 			case TG3_EEPROM_SB_REVISION_4:
10983 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10984 				break;
10985 			case TG3_EEPROM_SB_REVISION_5:
10986 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10987 				break;
10988 			case TG3_EEPROM_SB_REVISION_6:
10989 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10990 				break;
10991 			default:
10992 				return -EIO;
10993 			}
10994 		} else
10995 			return 0;
10996 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10997 		size = NVRAM_SELFBOOT_HW_SIZE;
10998 	else
10999 		return -EIO;
11000 
11001 	buf = kmalloc(size, GFP_KERNEL);
11002 	if (buf == NULL)
11003 		return -ENOMEM;
11004 
11005 	err = -EIO;
11006 	for (i = 0, j = 0; i < size; i += 4, j++) {
11007 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
11008 		if (err)
11009 			break;
11010 	}
11011 	if (i < size)
11012 		goto out;
11013 
11014 	/* Selfboot format */
11015 	magic = be32_to_cpu(buf[0]);
11016 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11017 	    TG3_EEPROM_MAGIC_FW) {
11018 		u8 *buf8 = (u8 *) buf, csum8 = 0;
11019 
11020 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11021 		    TG3_EEPROM_SB_REVISION_2) {
11022 			/* For rev 2, the csum doesn't include the MBA. */
11023 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11024 				csum8 += buf8[i];
11025 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11026 				csum8 += buf8[i];
11027 		} else {
11028 			for (i = 0; i < size; i++)
11029 				csum8 += buf8[i];
11030 		}
11031 
11032 		if (csum8 == 0) {
11033 			err = 0;
11034 			goto out;
11035 		}
11036 
11037 		err = -EIO;
11038 		goto out;
11039 	}
11040 
11041 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11042 	    TG3_EEPROM_MAGIC_HW) {
11043 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11044 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11045 		u8 *buf8 = (u8 *) buf;
11046 
11047 		/* Separate the parity bits and the data bytes.  */
11048 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11049 			if ((i == 0) || (i == 8)) {
11050 				int l;
11051 				u8 msk;
11052 
11053 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11054 					parity[k++] = buf8[i] & msk;
11055 				i++;
11056 			} else if (i == 16) {
11057 				int l;
11058 				u8 msk;
11059 
11060 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11061 					parity[k++] = buf8[i] & msk;
11062 				i++;
11063 
11064 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11065 					parity[k++] = buf8[i] & msk;
11066 				i++;
11067 			}
11068 			data[j++] = buf8[i];
11069 		}
11070 
11071 		err = -EIO;
11072 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11073 			u8 hw8 = hweight8(data[i]);
11074 
11075 			if ((hw8 & 0x1) && parity[i])
11076 				goto out;
11077 			else if (!(hw8 & 0x1) && !parity[i])
11078 				goto out;
11079 		}
11080 		err = 0;
11081 		goto out;
11082 	}
11083 
11084 	err = -EIO;
11085 
11086 	/* Bootstrap checksum at offset 0x10 */
11087 	csum = calc_crc((unsigned char *) buf, 0x10);
11088 	if (csum != le32_to_cpu(buf[0x10/4]))
11089 		goto out;
11090 
11091 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11092 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11093 	if (csum != le32_to_cpu(buf[0xfc/4]))
11094 		goto out;
11095 
11096 	kfree(buf);
11097 
11098 	buf = tg3_vpd_readblock(tp, &len);
11099 	if (!buf)
11100 		return -ENOMEM;
11101 
11102 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11103 	if (i > 0) {
11104 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11105 		if (j < 0)
11106 			goto out;
11107 
11108 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11109 			goto out;
11110 
11111 		i += PCI_VPD_LRDT_TAG_SIZE;
11112 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11113 					      PCI_VPD_RO_KEYWORD_CHKSUM);
11114 		if (j > 0) {
11115 			u8 csum8 = 0;
11116 
11117 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
11118 
11119 			for (i = 0; i <= j; i++)
11120 				csum8 += ((u8 *)buf)[i];
11121 
11122 			if (csum8)
11123 				goto out;
11124 		}
11125 	}
11126 
11127 	err = 0;
11128 
11129 out:
11130 	kfree(buf);
11131 	return err;
11132 }
11133 
11134 #define TG3_SERDES_TIMEOUT_SEC	2
11135 #define TG3_COPPER_TIMEOUT_SEC	6
11136 
11137 static int tg3_test_link(struct tg3 *tp)
11138 {
11139 	int i, max;
11140 
11141 	if (!netif_running(tp->dev))
11142 		return -ENODEV;
11143 
11144 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11145 		max = TG3_SERDES_TIMEOUT_SEC;
11146 	else
11147 		max = TG3_COPPER_TIMEOUT_SEC;
11148 
11149 	for (i = 0; i < max; i++) {
11150 		if (netif_carrier_ok(tp->dev))
11151 			return 0;
11152 
11153 		if (msleep_interruptible(1000))
11154 			break;
11155 	}
11156 
11157 	return -EIO;
11158 }
11159 
11160 /* Only test the commonly used registers */
11161 static int tg3_test_registers(struct tg3 *tp)
11162 {
11163 	int i, is_5705, is_5750;
11164 	u32 offset, read_mask, write_mask, val, save_val, read_val;
11165 	static struct {
11166 		u16 offset;
11167 		u16 flags;
11168 #define TG3_FL_5705	0x1
11169 #define TG3_FL_NOT_5705	0x2
11170 #define TG3_FL_NOT_5788	0x4
11171 #define TG3_FL_NOT_5750	0x8
11172 		u32 read_mask;
11173 		u32 write_mask;
11174 	} reg_tbl[] = {
11175 		/* MAC Control Registers */
11176 		{ MAC_MODE, TG3_FL_NOT_5705,
11177 			0x00000000, 0x00ef6f8c },
11178 		{ MAC_MODE, TG3_FL_5705,
11179 			0x00000000, 0x01ef6b8c },
11180 		{ MAC_STATUS, TG3_FL_NOT_5705,
11181 			0x03800107, 0x00000000 },
11182 		{ MAC_STATUS, TG3_FL_5705,
11183 			0x03800100, 0x00000000 },
11184 		{ MAC_ADDR_0_HIGH, 0x0000,
11185 			0x00000000, 0x0000ffff },
11186 		{ MAC_ADDR_0_LOW, 0x0000,
11187 			0x00000000, 0xffffffff },
11188 		{ MAC_RX_MTU_SIZE, 0x0000,
11189 			0x00000000, 0x0000ffff },
11190 		{ MAC_TX_MODE, 0x0000,
11191 			0x00000000, 0x00000070 },
11192 		{ MAC_TX_LENGTHS, 0x0000,
11193 			0x00000000, 0x00003fff },
11194 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
11195 			0x00000000, 0x000007fc },
11196 		{ MAC_RX_MODE, TG3_FL_5705,
11197 			0x00000000, 0x000007dc },
11198 		{ MAC_HASH_REG_0, 0x0000,
11199 			0x00000000, 0xffffffff },
11200 		{ MAC_HASH_REG_1, 0x0000,
11201 			0x00000000, 0xffffffff },
11202 		{ MAC_HASH_REG_2, 0x0000,
11203 			0x00000000, 0xffffffff },
11204 		{ MAC_HASH_REG_3, 0x0000,
11205 			0x00000000, 0xffffffff },
11206 
11207 		/* Receive Data and Receive BD Initiator Control Registers. */
11208 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11209 			0x00000000, 0xffffffff },
11210 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11211 			0x00000000, 0xffffffff },
11212 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11213 			0x00000000, 0x00000003 },
11214 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11215 			0x00000000, 0xffffffff },
11216 		{ RCVDBDI_STD_BD+0, 0x0000,
11217 			0x00000000, 0xffffffff },
11218 		{ RCVDBDI_STD_BD+4, 0x0000,
11219 			0x00000000, 0xffffffff },
11220 		{ RCVDBDI_STD_BD+8, 0x0000,
11221 			0x00000000, 0xffff0002 },
11222 		{ RCVDBDI_STD_BD+0xc, 0x0000,
11223 			0x00000000, 0xffffffff },
11224 
11225 		/* Receive BD Initiator Control Registers. */
11226 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11227 			0x00000000, 0xffffffff },
11228 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
11229 			0x00000000, 0x000003ff },
11230 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11231 			0x00000000, 0xffffffff },
11232 
11233 		/* Host Coalescing Control Registers. */
11234 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
11235 			0x00000000, 0x00000004 },
11236 		{ HOSTCC_MODE, TG3_FL_5705,
11237 			0x00000000, 0x000000f6 },
11238 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11239 			0x00000000, 0xffffffff },
11240 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11241 			0x00000000, 0x000003ff },
11242 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11243 			0x00000000, 0xffffffff },
11244 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11245 			0x00000000, 0x000003ff },
11246 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11247 			0x00000000, 0xffffffff },
11248 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11249 			0x00000000, 0x000000ff },
11250 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11251 			0x00000000, 0xffffffff },
11252 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11253 			0x00000000, 0x000000ff },
11254 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11255 			0x00000000, 0xffffffff },
11256 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11257 			0x00000000, 0xffffffff },
11258 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11259 			0x00000000, 0xffffffff },
11260 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11261 			0x00000000, 0x000000ff },
11262 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11263 			0x00000000, 0xffffffff },
11264 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11265 			0x00000000, 0x000000ff },
11266 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11267 			0x00000000, 0xffffffff },
11268 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11269 			0x00000000, 0xffffffff },
11270 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11271 			0x00000000, 0xffffffff },
11272 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11273 			0x00000000, 0xffffffff },
11274 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11275 			0x00000000, 0xffffffff },
11276 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11277 			0xffffffff, 0x00000000 },
11278 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11279 			0xffffffff, 0x00000000 },
11280 
11281 		/* Buffer Manager Control Registers. */
11282 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11283 			0x00000000, 0x007fff80 },
11284 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11285 			0x00000000, 0x007fffff },
11286 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11287 			0x00000000, 0x0000003f },
11288 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11289 			0x00000000, 0x000001ff },
11290 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
11291 			0x00000000, 0x000001ff },
11292 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11293 			0xffffffff, 0x00000000 },
11294 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11295 			0xffffffff, 0x00000000 },
11296 
11297 		/* Mailbox Registers */
11298 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11299 			0x00000000, 0x000001ff },
11300 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11301 			0x00000000, 0x000001ff },
11302 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11303 			0x00000000, 0x000007ff },
11304 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11305 			0x00000000, 0x000001ff },
11306 
11307 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
11308 	};
11309 
11310 	is_5705 = is_5750 = 0;
11311 	if (tg3_flag(tp, 5705_PLUS)) {
11312 		is_5705 = 1;
11313 		if (tg3_flag(tp, 5750_PLUS))
11314 			is_5750 = 1;
11315 	}
11316 
11317 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11318 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11319 			continue;
11320 
11321 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11322 			continue;
11323 
11324 		if (tg3_flag(tp, IS_5788) &&
11325 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
11326 			continue;
11327 
11328 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11329 			continue;
11330 
11331 		offset = (u32) reg_tbl[i].offset;
11332 		read_mask = reg_tbl[i].read_mask;
11333 		write_mask = reg_tbl[i].write_mask;
11334 
11335 		/* Save the original register content */
11336 		save_val = tr32(offset);
11337 
11338 		/* Determine the read-only value. */
11339 		read_val = save_val & read_mask;
11340 
11341 		/* Write zero to the register, then make sure the read-only bits
11342 		 * are not changed and the read/write bits are all zeros.
11343 		 */
11344 		tw32(offset, 0);
11345 
11346 		val = tr32(offset);
11347 
11348 		/* Test the read-only and read/write bits. */
11349 		if (((val & read_mask) != read_val) || (val & write_mask))
11350 			goto out;
11351 
11352 		/* Write ones to all the bits defined by RdMask and WrMask, then
11353 		 * make sure the read-only bits are not changed and the
11354 		 * read/write bits are all ones.
11355 		 */
11356 		tw32(offset, read_mask | write_mask);
11357 
11358 		val = tr32(offset);
11359 
11360 		/* Test the read-only bits. */
11361 		if ((val & read_mask) != read_val)
11362 			goto out;
11363 
11364 		/* Test the read/write bits. */
11365 		if ((val & write_mask) != write_mask)
11366 			goto out;
11367 
11368 		tw32(offset, save_val);
11369 	}
11370 
11371 	return 0;
11372 
11373 out:
11374 	if (netif_msg_hw(tp))
11375 		netdev_err(tp->dev,
11376 			   "Register test failed at offset %x\n", offset);
11377 	tw32(offset, save_val);
11378 	return -EIO;
11379 }
11380 
11381 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11382 {
11383 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11384 	int i;
11385 	u32 j;
11386 
11387 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11388 		for (j = 0; j < len; j += 4) {
11389 			u32 val;
11390 
11391 			tg3_write_mem(tp, offset + j, test_pattern[i]);
11392 			tg3_read_mem(tp, offset + j, &val);
11393 			if (val != test_pattern[i])
11394 				return -EIO;
11395 		}
11396 	}
11397 	return 0;
11398 }
11399 
11400 static int tg3_test_memory(struct tg3 *tp)
11401 {
11402 	static struct mem_entry {
11403 		u32 offset;
11404 		u32 len;
11405 	} mem_tbl_570x[] = {
11406 		{ 0x00000000, 0x00b50},
11407 		{ 0x00002000, 0x1c000},
11408 		{ 0xffffffff, 0x00000}
11409 	}, mem_tbl_5705[] = {
11410 		{ 0x00000100, 0x0000c},
11411 		{ 0x00000200, 0x00008},
11412 		{ 0x00004000, 0x00800},
11413 		{ 0x00006000, 0x01000},
11414 		{ 0x00008000, 0x02000},
11415 		{ 0x00010000, 0x0e000},
11416 		{ 0xffffffff, 0x00000}
11417 	}, mem_tbl_5755[] = {
11418 		{ 0x00000200, 0x00008},
11419 		{ 0x00004000, 0x00800},
11420 		{ 0x00006000, 0x00800},
11421 		{ 0x00008000, 0x02000},
11422 		{ 0x00010000, 0x0c000},
11423 		{ 0xffffffff, 0x00000}
11424 	}, mem_tbl_5906[] = {
11425 		{ 0x00000200, 0x00008},
11426 		{ 0x00004000, 0x00400},
11427 		{ 0x00006000, 0x00400},
11428 		{ 0x00008000, 0x01000},
11429 		{ 0x00010000, 0x01000},
11430 		{ 0xffffffff, 0x00000}
11431 	}, mem_tbl_5717[] = {
11432 		{ 0x00000200, 0x00008},
11433 		{ 0x00010000, 0x0a000},
11434 		{ 0x00020000, 0x13c00},
11435 		{ 0xffffffff, 0x00000}
11436 	}, mem_tbl_57765[] = {
11437 		{ 0x00000200, 0x00008},
11438 		{ 0x00004000, 0x00800},
11439 		{ 0x00006000, 0x09800},
11440 		{ 0x00010000, 0x0a000},
11441 		{ 0xffffffff, 0x00000}
11442 	};
11443 	struct mem_entry *mem_tbl;
11444 	int err = 0;
11445 	int i;
11446 
11447 	if (tg3_flag(tp, 5717_PLUS))
11448 		mem_tbl = mem_tbl_5717;
11449 	else if (tg3_flag(tp, 57765_CLASS))
11450 		mem_tbl = mem_tbl_57765;
11451 	else if (tg3_flag(tp, 5755_PLUS))
11452 		mem_tbl = mem_tbl_5755;
11453 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11454 		mem_tbl = mem_tbl_5906;
11455 	else if (tg3_flag(tp, 5705_PLUS))
11456 		mem_tbl = mem_tbl_5705;
11457 	else
11458 		mem_tbl = mem_tbl_570x;
11459 
11460 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11461 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11462 		if (err)
11463 			break;
11464 	}
11465 
11466 	return err;
11467 }
11468 
11469 #define TG3_TSO_MSS		500
11470 
11471 #define TG3_TSO_IP_HDR_LEN	20
11472 #define TG3_TSO_TCP_HDR_LEN	20
11473 #define TG3_TSO_TCP_OPT_LEN	12
11474 
11475 static const u8 tg3_tso_header[] = {
11476 0x08, 0x00,
11477 0x45, 0x00, 0x00, 0x00,
11478 0x00, 0x00, 0x40, 0x00,
11479 0x40, 0x06, 0x00, 0x00,
11480 0x0a, 0x00, 0x00, 0x01,
11481 0x0a, 0x00, 0x00, 0x02,
11482 0x0d, 0x00, 0xe0, 0x00,
11483 0x00, 0x00, 0x01, 0x00,
11484 0x00, 0x00, 0x02, 0x00,
11485 0x80, 0x10, 0x10, 0x00,
11486 0x14, 0x09, 0x00, 0x00,
11487 0x01, 0x01, 0x08, 0x0a,
11488 0x11, 0x11, 0x11, 0x11,
11489 0x11, 0x11, 0x11, 0x11,
11490 };
11491 
11492 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11493 {
11494 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11495 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11496 	u32 budget;
11497 	struct sk_buff *skb;
11498 	u8 *tx_data, *rx_data;
11499 	dma_addr_t map;
11500 	int num_pkts, tx_len, rx_len, i, err;
11501 	struct tg3_rx_buffer_desc *desc;
11502 	struct tg3_napi *tnapi, *rnapi;
11503 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11504 
11505 	tnapi = &tp->napi[0];
11506 	rnapi = &tp->napi[0];
11507 	if (tp->irq_cnt > 1) {
11508 		if (tg3_flag(tp, ENABLE_RSS))
11509 			rnapi = &tp->napi[1];
11510 		if (tg3_flag(tp, ENABLE_TSS))
11511 			tnapi = &tp->napi[1];
11512 	}
11513 	coal_now = tnapi->coal_now | rnapi->coal_now;
11514 
11515 	err = -EIO;
11516 
11517 	tx_len = pktsz;
11518 	skb = netdev_alloc_skb(tp->dev, tx_len);
11519 	if (!skb)
11520 		return -ENOMEM;
11521 
11522 	tx_data = skb_put(skb, tx_len);
11523 	memcpy(tx_data, tp->dev->dev_addr, 6);
11524 	memset(tx_data + 6, 0x0, 8);
11525 
11526 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11527 
11528 	if (tso_loopback) {
11529 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11530 
11531 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11532 			      TG3_TSO_TCP_OPT_LEN;
11533 
11534 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11535 		       sizeof(tg3_tso_header));
11536 		mss = TG3_TSO_MSS;
11537 
11538 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11539 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11540 
11541 		/* Set the total length field in the IP header */
11542 		iph->tot_len = htons((u16)(mss + hdr_len));
11543 
11544 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
11545 			      TXD_FLAG_CPU_POST_DMA);
11546 
11547 		if (tg3_flag(tp, HW_TSO_1) ||
11548 		    tg3_flag(tp, HW_TSO_2) ||
11549 		    tg3_flag(tp, HW_TSO_3)) {
11550 			struct tcphdr *th;
11551 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11552 			th = (struct tcphdr *)&tx_data[val];
11553 			th->check = 0;
11554 		} else
11555 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
11556 
11557 		if (tg3_flag(tp, HW_TSO_3)) {
11558 			mss |= (hdr_len & 0xc) << 12;
11559 			if (hdr_len & 0x10)
11560 				base_flags |= 0x00000010;
11561 			base_flags |= (hdr_len & 0x3e0) << 5;
11562 		} else if (tg3_flag(tp, HW_TSO_2))
11563 			mss |= hdr_len << 9;
11564 		else if (tg3_flag(tp, HW_TSO_1) ||
11565 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11566 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11567 		} else {
11568 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11569 		}
11570 
11571 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11572 	} else {
11573 		num_pkts = 1;
11574 		data_off = ETH_HLEN;
11575 	}
11576 
11577 	for (i = data_off; i < tx_len; i++)
11578 		tx_data[i] = (u8) (i & 0xff);
11579 
11580 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11581 	if (pci_dma_mapping_error(tp->pdev, map)) {
11582 		dev_kfree_skb(skb);
11583 		return -EIO;
11584 	}
11585 
11586 	val = tnapi->tx_prod;
11587 	tnapi->tx_buffers[val].skb = skb;
11588 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11589 
11590 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11591 	       rnapi->coal_now);
11592 
11593 	udelay(10);
11594 
11595 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11596 
11597 	budget = tg3_tx_avail(tnapi);
11598 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11599 			    base_flags | TXD_FLAG_END, mss, 0)) {
11600 		tnapi->tx_buffers[val].skb = NULL;
11601 		dev_kfree_skb(skb);
11602 		return -EIO;
11603 	}
11604 
11605 	tnapi->tx_prod++;
11606 
11607 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11608 	tr32_mailbox(tnapi->prodmbox);
11609 
11610 	udelay(10);
11611 
11612 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11613 	for (i = 0; i < 35; i++) {
11614 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11615 		       coal_now);
11616 
11617 		udelay(10);
11618 
11619 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11620 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
11621 		if ((tx_idx == tnapi->tx_prod) &&
11622 		    (rx_idx == (rx_start_idx + num_pkts)))
11623 			break;
11624 	}
11625 
11626 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11627 	dev_kfree_skb(skb);
11628 
11629 	if (tx_idx != tnapi->tx_prod)
11630 		goto out;
11631 
11632 	if (rx_idx != rx_start_idx + num_pkts)
11633 		goto out;
11634 
11635 	val = data_off;
11636 	while (rx_idx != rx_start_idx) {
11637 		desc = &rnapi->rx_rcb[rx_start_idx++];
11638 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11639 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11640 
11641 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11642 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11643 			goto out;
11644 
11645 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11646 			 - ETH_FCS_LEN;
11647 
11648 		if (!tso_loopback) {
11649 			if (rx_len != tx_len)
11650 				goto out;
11651 
11652 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11653 				if (opaque_key != RXD_OPAQUE_RING_STD)
11654 					goto out;
11655 			} else {
11656 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11657 					goto out;
11658 			}
11659 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11660 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11661 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
11662 			goto out;
11663 		}
11664 
11665 		if (opaque_key == RXD_OPAQUE_RING_STD) {
11666 			rx_data = tpr->rx_std_buffers[desc_idx].data;
11667 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11668 					     mapping);
11669 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11670 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11671 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11672 					     mapping);
11673 		} else
11674 			goto out;
11675 
11676 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11677 					    PCI_DMA_FROMDEVICE);
11678 
11679 		rx_data += TG3_RX_OFFSET(tp);
11680 		for (i = data_off; i < rx_len; i++, val++) {
11681 			if (*(rx_data + i) != (u8) (val & 0xff))
11682 				goto out;
11683 		}
11684 	}
11685 
11686 	err = 0;
11687 
11688 	/* tg3_free_rings will unmap and free the rx_data */
11689 out:
11690 	return err;
11691 }
11692 
11693 #define TG3_STD_LOOPBACK_FAILED		1
11694 #define TG3_JMB_LOOPBACK_FAILED		2
11695 #define TG3_TSO_LOOPBACK_FAILED		4
11696 #define TG3_LOOPBACK_FAILED \
11697 	(TG3_STD_LOOPBACK_FAILED | \
11698 	 TG3_JMB_LOOPBACK_FAILED | \
11699 	 TG3_TSO_LOOPBACK_FAILED)
11700 
11701 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11702 {
11703 	int err = -EIO;
11704 	u32 eee_cap;
11705 
11706 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11707 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11708 
11709 	if (!netif_running(tp->dev)) {
11710 		data[0] = TG3_LOOPBACK_FAILED;
11711 		data[1] = TG3_LOOPBACK_FAILED;
11712 		if (do_extlpbk)
11713 			data[2] = TG3_LOOPBACK_FAILED;
11714 		goto done;
11715 	}
11716 
11717 	err = tg3_reset_hw(tp, 1);
11718 	if (err) {
11719 		data[0] = TG3_LOOPBACK_FAILED;
11720 		data[1] = TG3_LOOPBACK_FAILED;
11721 		if (do_extlpbk)
11722 			data[2] = TG3_LOOPBACK_FAILED;
11723 		goto done;
11724 	}
11725 
11726 	if (tg3_flag(tp, ENABLE_RSS)) {
11727 		int i;
11728 
11729 		/* Reroute all rx packets to the 1st queue */
11730 		for (i = MAC_RSS_INDIR_TBL_0;
11731 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11732 			tw32(i, 0x0);
11733 	}
11734 
11735 	/* HW errata - mac loopback fails in some cases on 5780.
11736 	 * Normal traffic and PHY loopback are not affected by
11737 	 * errata.  Also, the MAC loopback test is deprecated for
11738 	 * all newer ASIC revisions.
11739 	 */
11740 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11741 	    !tg3_flag(tp, CPMU_PRESENT)) {
11742 		tg3_mac_loopback(tp, true);
11743 
11744 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11745 			data[0] |= TG3_STD_LOOPBACK_FAILED;
11746 
11747 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11748 		    tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11749 			data[0] |= TG3_JMB_LOOPBACK_FAILED;
11750 
11751 		tg3_mac_loopback(tp, false);
11752 	}
11753 
11754 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11755 	    !tg3_flag(tp, USE_PHYLIB)) {
11756 		int i;
11757 
11758 		tg3_phy_lpbk_set(tp, 0, false);
11759 
11760 		/* Wait for link */
11761 		for (i = 0; i < 100; i++) {
11762 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11763 				break;
11764 			mdelay(1);
11765 		}
11766 
11767 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11768 			data[1] |= TG3_STD_LOOPBACK_FAILED;
11769 		if (tg3_flag(tp, TSO_CAPABLE) &&
11770 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11771 			data[1] |= TG3_TSO_LOOPBACK_FAILED;
11772 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11773 		    tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11774 			data[1] |= TG3_JMB_LOOPBACK_FAILED;
11775 
11776 		if (do_extlpbk) {
11777 			tg3_phy_lpbk_set(tp, 0, true);
11778 
11779 			/* All link indications report up, but the hardware
11780 			 * isn't really ready for about 20 msec.  Double it
11781 			 * to be sure.
11782 			 */
11783 			mdelay(40);
11784 
11785 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11786 				data[2] |= TG3_STD_LOOPBACK_FAILED;
11787 			if (tg3_flag(tp, TSO_CAPABLE) &&
11788 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11789 				data[2] |= TG3_TSO_LOOPBACK_FAILED;
11790 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11791 			    tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11792 				data[2] |= TG3_JMB_LOOPBACK_FAILED;
11793 		}
11794 
11795 		/* Re-enable gphy autopowerdown. */
11796 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11797 			tg3_phy_toggle_apd(tp, true);
11798 	}
11799 
11800 	err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11801 
11802 done:
11803 	tp->phy_flags |= eee_cap;
11804 
11805 	return err;
11806 }
11807 
11808 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11809 			  u64 *data)
11810 {
11811 	struct tg3 *tp = netdev_priv(dev);
11812 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11813 
11814 	if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11815 	    tg3_power_up(tp)) {
11816 		etest->flags |= ETH_TEST_FL_FAILED;
11817 		memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11818 		return;
11819 	}
11820 
11821 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11822 
11823 	if (tg3_test_nvram(tp) != 0) {
11824 		etest->flags |= ETH_TEST_FL_FAILED;
11825 		data[0] = 1;
11826 	}
11827 	if (!doextlpbk && tg3_test_link(tp)) {
11828 		etest->flags |= ETH_TEST_FL_FAILED;
11829 		data[1] = 1;
11830 	}
11831 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
11832 		int err, err2 = 0, irq_sync = 0;
11833 
11834 		if (netif_running(dev)) {
11835 			tg3_phy_stop(tp);
11836 			tg3_netif_stop(tp);
11837 			irq_sync = 1;
11838 		}
11839 
11840 		tg3_full_lock(tp, irq_sync);
11841 
11842 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11843 		err = tg3_nvram_lock(tp);
11844 		tg3_halt_cpu(tp, RX_CPU_BASE);
11845 		if (!tg3_flag(tp, 5705_PLUS))
11846 			tg3_halt_cpu(tp, TX_CPU_BASE);
11847 		if (!err)
11848 			tg3_nvram_unlock(tp);
11849 
11850 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11851 			tg3_phy_reset(tp);
11852 
11853 		if (tg3_test_registers(tp) != 0) {
11854 			etest->flags |= ETH_TEST_FL_FAILED;
11855 			data[2] = 1;
11856 		}
11857 
11858 		if (tg3_test_memory(tp) != 0) {
11859 			etest->flags |= ETH_TEST_FL_FAILED;
11860 			data[3] = 1;
11861 		}
11862 
11863 		if (doextlpbk)
11864 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11865 
11866 		if (tg3_test_loopback(tp, &data[4], doextlpbk))
11867 			etest->flags |= ETH_TEST_FL_FAILED;
11868 
11869 		tg3_full_unlock(tp);
11870 
11871 		if (tg3_test_interrupt(tp) != 0) {
11872 			etest->flags |= ETH_TEST_FL_FAILED;
11873 			data[7] = 1;
11874 		}
11875 
11876 		tg3_full_lock(tp, 0);
11877 
11878 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11879 		if (netif_running(dev)) {
11880 			tg3_flag_set(tp, INIT_COMPLETE);
11881 			err2 = tg3_restart_hw(tp, 1);
11882 			if (!err2)
11883 				tg3_netif_start(tp);
11884 		}
11885 
11886 		tg3_full_unlock(tp);
11887 
11888 		if (irq_sync && !err2)
11889 			tg3_phy_start(tp);
11890 	}
11891 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11892 		tg3_power_down(tp);
11893 
11894 }
11895 
11896 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11897 {
11898 	struct mii_ioctl_data *data = if_mii(ifr);
11899 	struct tg3 *tp = netdev_priv(dev);
11900 	int err;
11901 
11902 	if (tg3_flag(tp, USE_PHYLIB)) {
11903 		struct phy_device *phydev;
11904 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11905 			return -EAGAIN;
11906 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11907 		return phy_mii_ioctl(phydev, ifr, cmd);
11908 	}
11909 
11910 	switch (cmd) {
11911 	case SIOCGMIIPHY:
11912 		data->phy_id = tp->phy_addr;
11913 
11914 		/* fallthru */
11915 	case SIOCGMIIREG: {
11916 		u32 mii_regval;
11917 
11918 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11919 			break;			/* We have no PHY */
11920 
11921 		if (!netif_running(dev))
11922 			return -EAGAIN;
11923 
11924 		spin_lock_bh(&tp->lock);
11925 		err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11926 		spin_unlock_bh(&tp->lock);
11927 
11928 		data->val_out = mii_regval;
11929 
11930 		return err;
11931 	}
11932 
11933 	case SIOCSMIIREG:
11934 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11935 			break;			/* We have no PHY */
11936 
11937 		if (!netif_running(dev))
11938 			return -EAGAIN;
11939 
11940 		spin_lock_bh(&tp->lock);
11941 		err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11942 		spin_unlock_bh(&tp->lock);
11943 
11944 		return err;
11945 
11946 	default:
11947 		/* do nothing */
11948 		break;
11949 	}
11950 	return -EOPNOTSUPP;
11951 }
11952 
11953 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11954 {
11955 	struct tg3 *tp = netdev_priv(dev);
11956 
11957 	memcpy(ec, &tp->coal, sizeof(*ec));
11958 	return 0;
11959 }
11960 
11961 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11962 {
11963 	struct tg3 *tp = netdev_priv(dev);
11964 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11965 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11966 
11967 	if (!tg3_flag(tp, 5705_PLUS)) {
11968 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11969 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11970 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11971 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11972 	}
11973 
11974 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11975 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11976 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11977 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11978 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11979 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11980 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11981 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11982 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11983 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11984 		return -EINVAL;
11985 
11986 	/* No rx interrupts will be generated if both are zero */
11987 	if ((ec->rx_coalesce_usecs == 0) &&
11988 	    (ec->rx_max_coalesced_frames == 0))
11989 		return -EINVAL;
11990 
11991 	/* No tx interrupts will be generated if both are zero */
11992 	if ((ec->tx_coalesce_usecs == 0) &&
11993 	    (ec->tx_max_coalesced_frames == 0))
11994 		return -EINVAL;
11995 
11996 	/* Only copy relevant parameters, ignore all others. */
11997 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11998 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11999 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12000 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12001 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12002 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12003 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12004 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12005 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12006 
12007 	if (netif_running(dev)) {
12008 		tg3_full_lock(tp, 0);
12009 		__tg3_set_coalesce(tp, &tp->coal);
12010 		tg3_full_unlock(tp);
12011 	}
12012 	return 0;
12013 }
12014 
12015 static const struct ethtool_ops tg3_ethtool_ops = {
12016 	.get_settings		= tg3_get_settings,
12017 	.set_settings		= tg3_set_settings,
12018 	.get_drvinfo		= tg3_get_drvinfo,
12019 	.get_regs_len		= tg3_get_regs_len,
12020 	.get_regs		= tg3_get_regs,
12021 	.get_wol		= tg3_get_wol,
12022 	.set_wol		= tg3_set_wol,
12023 	.get_msglevel		= tg3_get_msglevel,
12024 	.set_msglevel		= tg3_set_msglevel,
12025 	.nway_reset		= tg3_nway_reset,
12026 	.get_link		= ethtool_op_get_link,
12027 	.get_eeprom_len		= tg3_get_eeprom_len,
12028 	.get_eeprom		= tg3_get_eeprom,
12029 	.set_eeprom		= tg3_set_eeprom,
12030 	.get_ringparam		= tg3_get_ringparam,
12031 	.set_ringparam		= tg3_set_ringparam,
12032 	.get_pauseparam		= tg3_get_pauseparam,
12033 	.set_pauseparam		= tg3_set_pauseparam,
12034 	.self_test		= tg3_self_test,
12035 	.get_strings		= tg3_get_strings,
12036 	.set_phys_id		= tg3_set_phys_id,
12037 	.get_ethtool_stats	= tg3_get_ethtool_stats,
12038 	.get_coalesce		= tg3_get_coalesce,
12039 	.set_coalesce		= tg3_set_coalesce,
12040 	.get_sset_count		= tg3_get_sset_count,
12041 	.get_rxnfc		= tg3_get_rxnfc,
12042 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12043 	.get_rxfh_indir		= tg3_get_rxfh_indir,
12044 	.set_rxfh_indir		= tg3_set_rxfh_indir,
12045 };
12046 
12047 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12048 {
12049 	u32 cursize, val, magic;
12050 
12051 	tp->nvram_size = EEPROM_CHIP_SIZE;
12052 
12053 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12054 		return;
12055 
12056 	if ((magic != TG3_EEPROM_MAGIC) &&
12057 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12058 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12059 		return;
12060 
12061 	/*
12062 	 * Size the chip by reading offsets at increasing powers of two.
12063 	 * When we encounter our validation signature, we know the addressing
12064 	 * has wrapped around, and thus have our chip size.
12065 	 */
12066 	cursize = 0x10;
12067 
12068 	while (cursize < tp->nvram_size) {
12069 		if (tg3_nvram_read(tp, cursize, &val) != 0)
12070 			return;
12071 
12072 		if (val == magic)
12073 			break;
12074 
12075 		cursize <<= 1;
12076 	}
12077 
12078 	tp->nvram_size = cursize;
12079 }
12080 
12081 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12082 {
12083 	u32 val;
12084 
12085 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12086 		return;
12087 
12088 	/* Selfboot format */
12089 	if (val != TG3_EEPROM_MAGIC) {
12090 		tg3_get_eeprom_size(tp);
12091 		return;
12092 	}
12093 
12094 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12095 		if (val != 0) {
12096 			/* This is confusing.  We want to operate on the
12097 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12098 			 * call will read from NVRAM and byteswap the data
12099 			 * according to the byteswapping settings for all
12100 			 * other register accesses.  This ensures the data we
12101 			 * want will always reside in the lower 16-bits.
12102 			 * However, the data in NVRAM is in LE format, which
12103 			 * means the data from the NVRAM read will always be
12104 			 * opposite the endianness of the CPU.  The 16-bit
12105 			 * byteswap then brings the data to CPU endianness.
12106 			 */
12107 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12108 			return;
12109 		}
12110 	}
12111 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12112 }
12113 
12114 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12115 {
12116 	u32 nvcfg1;
12117 
12118 	nvcfg1 = tr32(NVRAM_CFG1);
12119 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12120 		tg3_flag_set(tp, FLASH);
12121 	} else {
12122 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12123 		tw32(NVRAM_CFG1, nvcfg1);
12124 	}
12125 
12126 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12127 	    tg3_flag(tp, 5780_CLASS)) {
12128 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12129 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12130 			tp->nvram_jedecnum = JEDEC_ATMEL;
12131 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12132 			tg3_flag_set(tp, NVRAM_BUFFERED);
12133 			break;
12134 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12135 			tp->nvram_jedecnum = JEDEC_ATMEL;
12136 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12137 			break;
12138 		case FLASH_VENDOR_ATMEL_EEPROM:
12139 			tp->nvram_jedecnum = JEDEC_ATMEL;
12140 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12141 			tg3_flag_set(tp, NVRAM_BUFFERED);
12142 			break;
12143 		case FLASH_VENDOR_ST:
12144 			tp->nvram_jedecnum = JEDEC_ST;
12145 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12146 			tg3_flag_set(tp, NVRAM_BUFFERED);
12147 			break;
12148 		case FLASH_VENDOR_SAIFUN:
12149 			tp->nvram_jedecnum = JEDEC_SAIFUN;
12150 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12151 			break;
12152 		case FLASH_VENDOR_SST_SMALL:
12153 		case FLASH_VENDOR_SST_LARGE:
12154 			tp->nvram_jedecnum = JEDEC_SST;
12155 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12156 			break;
12157 		}
12158 	} else {
12159 		tp->nvram_jedecnum = JEDEC_ATMEL;
12160 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12161 		tg3_flag_set(tp, NVRAM_BUFFERED);
12162 	}
12163 }
12164 
12165 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12166 {
12167 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12168 	case FLASH_5752PAGE_SIZE_256:
12169 		tp->nvram_pagesize = 256;
12170 		break;
12171 	case FLASH_5752PAGE_SIZE_512:
12172 		tp->nvram_pagesize = 512;
12173 		break;
12174 	case FLASH_5752PAGE_SIZE_1K:
12175 		tp->nvram_pagesize = 1024;
12176 		break;
12177 	case FLASH_5752PAGE_SIZE_2K:
12178 		tp->nvram_pagesize = 2048;
12179 		break;
12180 	case FLASH_5752PAGE_SIZE_4K:
12181 		tp->nvram_pagesize = 4096;
12182 		break;
12183 	case FLASH_5752PAGE_SIZE_264:
12184 		tp->nvram_pagesize = 264;
12185 		break;
12186 	case FLASH_5752PAGE_SIZE_528:
12187 		tp->nvram_pagesize = 528;
12188 		break;
12189 	}
12190 }
12191 
12192 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12193 {
12194 	u32 nvcfg1;
12195 
12196 	nvcfg1 = tr32(NVRAM_CFG1);
12197 
12198 	/* NVRAM protection for TPM */
12199 	if (nvcfg1 & (1 << 27))
12200 		tg3_flag_set(tp, PROTECTED_NVRAM);
12201 
12202 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12203 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12204 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12205 		tp->nvram_jedecnum = JEDEC_ATMEL;
12206 		tg3_flag_set(tp, NVRAM_BUFFERED);
12207 		break;
12208 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12209 		tp->nvram_jedecnum = JEDEC_ATMEL;
12210 		tg3_flag_set(tp, NVRAM_BUFFERED);
12211 		tg3_flag_set(tp, FLASH);
12212 		break;
12213 	case FLASH_5752VENDOR_ST_M45PE10:
12214 	case FLASH_5752VENDOR_ST_M45PE20:
12215 	case FLASH_5752VENDOR_ST_M45PE40:
12216 		tp->nvram_jedecnum = JEDEC_ST;
12217 		tg3_flag_set(tp, NVRAM_BUFFERED);
12218 		tg3_flag_set(tp, FLASH);
12219 		break;
12220 	}
12221 
12222 	if (tg3_flag(tp, FLASH)) {
12223 		tg3_nvram_get_pagesize(tp, nvcfg1);
12224 	} else {
12225 		/* For eeprom, set pagesize to maximum eeprom size */
12226 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12227 
12228 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12229 		tw32(NVRAM_CFG1, nvcfg1);
12230 	}
12231 }
12232 
12233 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12234 {
12235 	u32 nvcfg1, protect = 0;
12236 
12237 	nvcfg1 = tr32(NVRAM_CFG1);
12238 
12239 	/* NVRAM protection for TPM */
12240 	if (nvcfg1 & (1 << 27)) {
12241 		tg3_flag_set(tp, PROTECTED_NVRAM);
12242 		protect = 1;
12243 	}
12244 
12245 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12246 	switch (nvcfg1) {
12247 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
12248 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
12249 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
12250 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
12251 		tp->nvram_jedecnum = JEDEC_ATMEL;
12252 		tg3_flag_set(tp, NVRAM_BUFFERED);
12253 		tg3_flag_set(tp, FLASH);
12254 		tp->nvram_pagesize = 264;
12255 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12256 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12257 			tp->nvram_size = (protect ? 0x3e200 :
12258 					  TG3_NVRAM_SIZE_512KB);
12259 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12260 			tp->nvram_size = (protect ? 0x1f200 :
12261 					  TG3_NVRAM_SIZE_256KB);
12262 		else
12263 			tp->nvram_size = (protect ? 0x1f200 :
12264 					  TG3_NVRAM_SIZE_128KB);
12265 		break;
12266 	case FLASH_5752VENDOR_ST_M45PE10:
12267 	case FLASH_5752VENDOR_ST_M45PE20:
12268 	case FLASH_5752VENDOR_ST_M45PE40:
12269 		tp->nvram_jedecnum = JEDEC_ST;
12270 		tg3_flag_set(tp, NVRAM_BUFFERED);
12271 		tg3_flag_set(tp, FLASH);
12272 		tp->nvram_pagesize = 256;
12273 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12274 			tp->nvram_size = (protect ?
12275 					  TG3_NVRAM_SIZE_64KB :
12276 					  TG3_NVRAM_SIZE_128KB);
12277 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12278 			tp->nvram_size = (protect ?
12279 					  TG3_NVRAM_SIZE_64KB :
12280 					  TG3_NVRAM_SIZE_256KB);
12281 		else
12282 			tp->nvram_size = (protect ?
12283 					  TG3_NVRAM_SIZE_128KB :
12284 					  TG3_NVRAM_SIZE_512KB);
12285 		break;
12286 	}
12287 }
12288 
12289 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12290 {
12291 	u32 nvcfg1;
12292 
12293 	nvcfg1 = tr32(NVRAM_CFG1);
12294 
12295 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12296 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12297 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12298 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12299 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12300 		tp->nvram_jedecnum = JEDEC_ATMEL;
12301 		tg3_flag_set(tp, NVRAM_BUFFERED);
12302 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12303 
12304 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12305 		tw32(NVRAM_CFG1, nvcfg1);
12306 		break;
12307 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12308 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
12309 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
12310 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
12311 		tp->nvram_jedecnum = JEDEC_ATMEL;
12312 		tg3_flag_set(tp, NVRAM_BUFFERED);
12313 		tg3_flag_set(tp, FLASH);
12314 		tp->nvram_pagesize = 264;
12315 		break;
12316 	case FLASH_5752VENDOR_ST_M45PE10:
12317 	case FLASH_5752VENDOR_ST_M45PE20:
12318 	case FLASH_5752VENDOR_ST_M45PE40:
12319 		tp->nvram_jedecnum = JEDEC_ST;
12320 		tg3_flag_set(tp, NVRAM_BUFFERED);
12321 		tg3_flag_set(tp, FLASH);
12322 		tp->nvram_pagesize = 256;
12323 		break;
12324 	}
12325 }
12326 
12327 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12328 {
12329 	u32 nvcfg1, protect = 0;
12330 
12331 	nvcfg1 = tr32(NVRAM_CFG1);
12332 
12333 	/* NVRAM protection for TPM */
12334 	if (nvcfg1 & (1 << 27)) {
12335 		tg3_flag_set(tp, PROTECTED_NVRAM);
12336 		protect = 1;
12337 	}
12338 
12339 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12340 	switch (nvcfg1) {
12341 	case FLASH_5761VENDOR_ATMEL_ADB021D:
12342 	case FLASH_5761VENDOR_ATMEL_ADB041D:
12343 	case FLASH_5761VENDOR_ATMEL_ADB081D:
12344 	case FLASH_5761VENDOR_ATMEL_ADB161D:
12345 	case FLASH_5761VENDOR_ATMEL_MDB021D:
12346 	case FLASH_5761VENDOR_ATMEL_MDB041D:
12347 	case FLASH_5761VENDOR_ATMEL_MDB081D:
12348 	case FLASH_5761VENDOR_ATMEL_MDB161D:
12349 		tp->nvram_jedecnum = JEDEC_ATMEL;
12350 		tg3_flag_set(tp, NVRAM_BUFFERED);
12351 		tg3_flag_set(tp, FLASH);
12352 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12353 		tp->nvram_pagesize = 256;
12354 		break;
12355 	case FLASH_5761VENDOR_ST_A_M45PE20:
12356 	case FLASH_5761VENDOR_ST_A_M45PE40:
12357 	case FLASH_5761VENDOR_ST_A_M45PE80:
12358 	case FLASH_5761VENDOR_ST_A_M45PE16:
12359 	case FLASH_5761VENDOR_ST_M_M45PE20:
12360 	case FLASH_5761VENDOR_ST_M_M45PE40:
12361 	case FLASH_5761VENDOR_ST_M_M45PE80:
12362 	case FLASH_5761VENDOR_ST_M_M45PE16:
12363 		tp->nvram_jedecnum = JEDEC_ST;
12364 		tg3_flag_set(tp, NVRAM_BUFFERED);
12365 		tg3_flag_set(tp, FLASH);
12366 		tp->nvram_pagesize = 256;
12367 		break;
12368 	}
12369 
12370 	if (protect) {
12371 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12372 	} else {
12373 		switch (nvcfg1) {
12374 		case FLASH_5761VENDOR_ATMEL_ADB161D:
12375 		case FLASH_5761VENDOR_ATMEL_MDB161D:
12376 		case FLASH_5761VENDOR_ST_A_M45PE16:
12377 		case FLASH_5761VENDOR_ST_M_M45PE16:
12378 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12379 			break;
12380 		case FLASH_5761VENDOR_ATMEL_ADB081D:
12381 		case FLASH_5761VENDOR_ATMEL_MDB081D:
12382 		case FLASH_5761VENDOR_ST_A_M45PE80:
12383 		case FLASH_5761VENDOR_ST_M_M45PE80:
12384 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12385 			break;
12386 		case FLASH_5761VENDOR_ATMEL_ADB041D:
12387 		case FLASH_5761VENDOR_ATMEL_MDB041D:
12388 		case FLASH_5761VENDOR_ST_A_M45PE40:
12389 		case FLASH_5761VENDOR_ST_M_M45PE40:
12390 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12391 			break;
12392 		case FLASH_5761VENDOR_ATMEL_ADB021D:
12393 		case FLASH_5761VENDOR_ATMEL_MDB021D:
12394 		case FLASH_5761VENDOR_ST_A_M45PE20:
12395 		case FLASH_5761VENDOR_ST_M_M45PE20:
12396 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12397 			break;
12398 		}
12399 	}
12400 }
12401 
12402 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12403 {
12404 	tp->nvram_jedecnum = JEDEC_ATMEL;
12405 	tg3_flag_set(tp, NVRAM_BUFFERED);
12406 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12407 }
12408 
12409 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12410 {
12411 	u32 nvcfg1;
12412 
12413 	nvcfg1 = tr32(NVRAM_CFG1);
12414 
12415 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12416 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12417 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12418 		tp->nvram_jedecnum = JEDEC_ATMEL;
12419 		tg3_flag_set(tp, NVRAM_BUFFERED);
12420 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12421 
12422 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12423 		tw32(NVRAM_CFG1, nvcfg1);
12424 		return;
12425 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12426 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12427 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12428 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12429 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12430 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12431 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12432 		tp->nvram_jedecnum = JEDEC_ATMEL;
12433 		tg3_flag_set(tp, NVRAM_BUFFERED);
12434 		tg3_flag_set(tp, FLASH);
12435 
12436 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12437 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12438 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12439 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12440 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12441 			break;
12442 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12443 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12444 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12445 			break;
12446 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12447 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12448 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12449 			break;
12450 		}
12451 		break;
12452 	case FLASH_5752VENDOR_ST_M45PE10:
12453 	case FLASH_5752VENDOR_ST_M45PE20:
12454 	case FLASH_5752VENDOR_ST_M45PE40:
12455 		tp->nvram_jedecnum = JEDEC_ST;
12456 		tg3_flag_set(tp, NVRAM_BUFFERED);
12457 		tg3_flag_set(tp, FLASH);
12458 
12459 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12460 		case FLASH_5752VENDOR_ST_M45PE10:
12461 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12462 			break;
12463 		case FLASH_5752VENDOR_ST_M45PE20:
12464 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12465 			break;
12466 		case FLASH_5752VENDOR_ST_M45PE40:
12467 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12468 			break;
12469 		}
12470 		break;
12471 	default:
12472 		tg3_flag_set(tp, NO_NVRAM);
12473 		return;
12474 	}
12475 
12476 	tg3_nvram_get_pagesize(tp, nvcfg1);
12477 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12478 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12479 }
12480 
12481 
12482 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12483 {
12484 	u32 nvcfg1;
12485 
12486 	nvcfg1 = tr32(NVRAM_CFG1);
12487 
12488 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12489 	case FLASH_5717VENDOR_ATMEL_EEPROM:
12490 	case FLASH_5717VENDOR_MICRO_EEPROM:
12491 		tp->nvram_jedecnum = JEDEC_ATMEL;
12492 		tg3_flag_set(tp, NVRAM_BUFFERED);
12493 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12494 
12495 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12496 		tw32(NVRAM_CFG1, nvcfg1);
12497 		return;
12498 	case FLASH_5717VENDOR_ATMEL_MDB011D:
12499 	case FLASH_5717VENDOR_ATMEL_ADB011B:
12500 	case FLASH_5717VENDOR_ATMEL_ADB011D:
12501 	case FLASH_5717VENDOR_ATMEL_MDB021D:
12502 	case FLASH_5717VENDOR_ATMEL_ADB021B:
12503 	case FLASH_5717VENDOR_ATMEL_ADB021D:
12504 	case FLASH_5717VENDOR_ATMEL_45USPT:
12505 		tp->nvram_jedecnum = JEDEC_ATMEL;
12506 		tg3_flag_set(tp, NVRAM_BUFFERED);
12507 		tg3_flag_set(tp, FLASH);
12508 
12509 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12510 		case FLASH_5717VENDOR_ATMEL_MDB021D:
12511 			/* Detect size with tg3_nvram_get_size() */
12512 			break;
12513 		case FLASH_5717VENDOR_ATMEL_ADB021B:
12514 		case FLASH_5717VENDOR_ATMEL_ADB021D:
12515 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12516 			break;
12517 		default:
12518 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12519 			break;
12520 		}
12521 		break;
12522 	case FLASH_5717VENDOR_ST_M_M25PE10:
12523 	case FLASH_5717VENDOR_ST_A_M25PE10:
12524 	case FLASH_5717VENDOR_ST_M_M45PE10:
12525 	case FLASH_5717VENDOR_ST_A_M45PE10:
12526 	case FLASH_5717VENDOR_ST_M_M25PE20:
12527 	case FLASH_5717VENDOR_ST_A_M25PE20:
12528 	case FLASH_5717VENDOR_ST_M_M45PE20:
12529 	case FLASH_5717VENDOR_ST_A_M45PE20:
12530 	case FLASH_5717VENDOR_ST_25USPT:
12531 	case FLASH_5717VENDOR_ST_45USPT:
12532 		tp->nvram_jedecnum = JEDEC_ST;
12533 		tg3_flag_set(tp, NVRAM_BUFFERED);
12534 		tg3_flag_set(tp, FLASH);
12535 
12536 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12537 		case FLASH_5717VENDOR_ST_M_M25PE20:
12538 		case FLASH_5717VENDOR_ST_M_M45PE20:
12539 			/* Detect size with tg3_nvram_get_size() */
12540 			break;
12541 		case FLASH_5717VENDOR_ST_A_M25PE20:
12542 		case FLASH_5717VENDOR_ST_A_M45PE20:
12543 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12544 			break;
12545 		default:
12546 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12547 			break;
12548 		}
12549 		break;
12550 	default:
12551 		tg3_flag_set(tp, NO_NVRAM);
12552 		return;
12553 	}
12554 
12555 	tg3_nvram_get_pagesize(tp, nvcfg1);
12556 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12557 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12558 }
12559 
12560 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12561 {
12562 	u32 nvcfg1, nvmpinstrp;
12563 
12564 	nvcfg1 = tr32(NVRAM_CFG1);
12565 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12566 
12567 	switch (nvmpinstrp) {
12568 	case FLASH_5720_EEPROM_HD:
12569 	case FLASH_5720_EEPROM_LD:
12570 		tp->nvram_jedecnum = JEDEC_ATMEL;
12571 		tg3_flag_set(tp, NVRAM_BUFFERED);
12572 
12573 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12574 		tw32(NVRAM_CFG1, nvcfg1);
12575 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12576 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12577 		else
12578 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12579 		return;
12580 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
12581 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
12582 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
12583 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
12584 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
12585 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
12586 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
12587 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
12588 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
12589 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
12590 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
12591 	case FLASH_5720VENDOR_ATMEL_45USPT:
12592 		tp->nvram_jedecnum = JEDEC_ATMEL;
12593 		tg3_flag_set(tp, NVRAM_BUFFERED);
12594 		tg3_flag_set(tp, FLASH);
12595 
12596 		switch (nvmpinstrp) {
12597 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
12598 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
12599 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
12600 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12601 			break;
12602 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
12603 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
12604 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
12605 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12606 			break;
12607 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
12608 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
12609 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12610 			break;
12611 		default:
12612 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12613 			break;
12614 		}
12615 		break;
12616 	case FLASH_5720VENDOR_M_ST_M25PE10:
12617 	case FLASH_5720VENDOR_M_ST_M45PE10:
12618 	case FLASH_5720VENDOR_A_ST_M25PE10:
12619 	case FLASH_5720VENDOR_A_ST_M45PE10:
12620 	case FLASH_5720VENDOR_M_ST_M25PE20:
12621 	case FLASH_5720VENDOR_M_ST_M45PE20:
12622 	case FLASH_5720VENDOR_A_ST_M25PE20:
12623 	case FLASH_5720VENDOR_A_ST_M45PE20:
12624 	case FLASH_5720VENDOR_M_ST_M25PE40:
12625 	case FLASH_5720VENDOR_M_ST_M45PE40:
12626 	case FLASH_5720VENDOR_A_ST_M25PE40:
12627 	case FLASH_5720VENDOR_A_ST_M45PE40:
12628 	case FLASH_5720VENDOR_M_ST_M25PE80:
12629 	case FLASH_5720VENDOR_M_ST_M45PE80:
12630 	case FLASH_5720VENDOR_A_ST_M25PE80:
12631 	case FLASH_5720VENDOR_A_ST_M45PE80:
12632 	case FLASH_5720VENDOR_ST_25USPT:
12633 	case FLASH_5720VENDOR_ST_45USPT:
12634 		tp->nvram_jedecnum = JEDEC_ST;
12635 		tg3_flag_set(tp, NVRAM_BUFFERED);
12636 		tg3_flag_set(tp, FLASH);
12637 
12638 		switch (nvmpinstrp) {
12639 		case FLASH_5720VENDOR_M_ST_M25PE20:
12640 		case FLASH_5720VENDOR_M_ST_M45PE20:
12641 		case FLASH_5720VENDOR_A_ST_M25PE20:
12642 		case FLASH_5720VENDOR_A_ST_M45PE20:
12643 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12644 			break;
12645 		case FLASH_5720VENDOR_M_ST_M25PE40:
12646 		case FLASH_5720VENDOR_M_ST_M45PE40:
12647 		case FLASH_5720VENDOR_A_ST_M25PE40:
12648 		case FLASH_5720VENDOR_A_ST_M45PE40:
12649 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12650 			break;
12651 		case FLASH_5720VENDOR_M_ST_M25PE80:
12652 		case FLASH_5720VENDOR_M_ST_M45PE80:
12653 		case FLASH_5720VENDOR_A_ST_M25PE80:
12654 		case FLASH_5720VENDOR_A_ST_M45PE80:
12655 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12656 			break;
12657 		default:
12658 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12659 			break;
12660 		}
12661 		break;
12662 	default:
12663 		tg3_flag_set(tp, NO_NVRAM);
12664 		return;
12665 	}
12666 
12667 	tg3_nvram_get_pagesize(tp, nvcfg1);
12668 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12669 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12670 }
12671 
12672 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12673 static void __devinit tg3_nvram_init(struct tg3 *tp)
12674 {
12675 	tw32_f(GRC_EEPROM_ADDR,
12676 	     (EEPROM_ADDR_FSM_RESET |
12677 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
12678 	       EEPROM_ADDR_CLKPERD_SHIFT)));
12679 
12680 	msleep(1);
12681 
12682 	/* Enable seeprom accesses. */
12683 	tw32_f(GRC_LOCAL_CTRL,
12684 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12685 	udelay(100);
12686 
12687 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12688 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12689 		tg3_flag_set(tp, NVRAM);
12690 
12691 		if (tg3_nvram_lock(tp)) {
12692 			netdev_warn(tp->dev,
12693 				    "Cannot get nvram lock, %s failed\n",
12694 				    __func__);
12695 			return;
12696 		}
12697 		tg3_enable_nvram_access(tp);
12698 
12699 		tp->nvram_size = 0;
12700 
12701 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12702 			tg3_get_5752_nvram_info(tp);
12703 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12704 			tg3_get_5755_nvram_info(tp);
12705 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12706 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12707 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12708 			tg3_get_5787_nvram_info(tp);
12709 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12710 			tg3_get_5761_nvram_info(tp);
12711 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12712 			tg3_get_5906_nvram_info(tp);
12713 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12714 			 tg3_flag(tp, 57765_CLASS))
12715 			tg3_get_57780_nvram_info(tp);
12716 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12717 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12718 			tg3_get_5717_nvram_info(tp);
12719 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12720 			tg3_get_5720_nvram_info(tp);
12721 		else
12722 			tg3_get_nvram_info(tp);
12723 
12724 		if (tp->nvram_size == 0)
12725 			tg3_get_nvram_size(tp);
12726 
12727 		tg3_disable_nvram_access(tp);
12728 		tg3_nvram_unlock(tp);
12729 
12730 	} else {
12731 		tg3_flag_clear(tp, NVRAM);
12732 		tg3_flag_clear(tp, NVRAM_BUFFERED);
12733 
12734 		tg3_get_eeprom_size(tp);
12735 	}
12736 }
12737 
12738 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12739 				    u32 offset, u32 len, u8 *buf)
12740 {
12741 	int i, j, rc = 0;
12742 	u32 val;
12743 
12744 	for (i = 0; i < len; i += 4) {
12745 		u32 addr;
12746 		__be32 data;
12747 
12748 		addr = offset + i;
12749 
12750 		memcpy(&data, buf + i, 4);
12751 
12752 		/*
12753 		 * The SEEPROM interface expects the data to always be opposite
12754 		 * the native endian format.  We accomplish this by reversing
12755 		 * all the operations that would have been performed on the
12756 		 * data from a call to tg3_nvram_read_be32().
12757 		 */
12758 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12759 
12760 		val = tr32(GRC_EEPROM_ADDR);
12761 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12762 
12763 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12764 			EEPROM_ADDR_READ);
12765 		tw32(GRC_EEPROM_ADDR, val |
12766 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
12767 			(addr & EEPROM_ADDR_ADDR_MASK) |
12768 			EEPROM_ADDR_START |
12769 			EEPROM_ADDR_WRITE);
12770 
12771 		for (j = 0; j < 1000; j++) {
12772 			val = tr32(GRC_EEPROM_ADDR);
12773 
12774 			if (val & EEPROM_ADDR_COMPLETE)
12775 				break;
12776 			msleep(1);
12777 		}
12778 		if (!(val & EEPROM_ADDR_COMPLETE)) {
12779 			rc = -EBUSY;
12780 			break;
12781 		}
12782 	}
12783 
12784 	return rc;
12785 }
12786 
12787 /* offset and length are dword aligned */
12788 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12789 		u8 *buf)
12790 {
12791 	int ret = 0;
12792 	u32 pagesize = tp->nvram_pagesize;
12793 	u32 pagemask = pagesize - 1;
12794 	u32 nvram_cmd;
12795 	u8 *tmp;
12796 
12797 	tmp = kmalloc(pagesize, GFP_KERNEL);
12798 	if (tmp == NULL)
12799 		return -ENOMEM;
12800 
12801 	while (len) {
12802 		int j;
12803 		u32 phy_addr, page_off, size;
12804 
12805 		phy_addr = offset & ~pagemask;
12806 
12807 		for (j = 0; j < pagesize; j += 4) {
12808 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
12809 						  (__be32 *) (tmp + j));
12810 			if (ret)
12811 				break;
12812 		}
12813 		if (ret)
12814 			break;
12815 
12816 		page_off = offset & pagemask;
12817 		size = pagesize;
12818 		if (len < size)
12819 			size = len;
12820 
12821 		len -= size;
12822 
12823 		memcpy(tmp + page_off, buf, size);
12824 
12825 		offset = offset + (pagesize - page_off);
12826 
12827 		tg3_enable_nvram_access(tp);
12828 
12829 		/*
12830 		 * Before we can erase the flash page, we need
12831 		 * to issue a special "write enable" command.
12832 		 */
12833 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12834 
12835 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12836 			break;
12837 
12838 		/* Erase the target page */
12839 		tw32(NVRAM_ADDR, phy_addr);
12840 
12841 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12842 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12843 
12844 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12845 			break;
12846 
12847 		/* Issue another write enable to start the write. */
12848 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12849 
12850 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12851 			break;
12852 
12853 		for (j = 0; j < pagesize; j += 4) {
12854 			__be32 data;
12855 
12856 			data = *((__be32 *) (tmp + j));
12857 
12858 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
12859 
12860 			tw32(NVRAM_ADDR, phy_addr + j);
12861 
12862 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12863 				NVRAM_CMD_WR;
12864 
12865 			if (j == 0)
12866 				nvram_cmd |= NVRAM_CMD_FIRST;
12867 			else if (j == (pagesize - 4))
12868 				nvram_cmd |= NVRAM_CMD_LAST;
12869 
12870 			if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12871 				break;
12872 		}
12873 		if (ret)
12874 			break;
12875 	}
12876 
12877 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12878 	tg3_nvram_exec_cmd(tp, nvram_cmd);
12879 
12880 	kfree(tmp);
12881 
12882 	return ret;
12883 }
12884 
12885 /* offset and length are dword aligned */
12886 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12887 		u8 *buf)
12888 {
12889 	int i, ret = 0;
12890 
12891 	for (i = 0; i < len; i += 4, offset += 4) {
12892 		u32 page_off, phy_addr, nvram_cmd;
12893 		__be32 data;
12894 
12895 		memcpy(&data, buf + i, 4);
12896 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
12897 
12898 		page_off = offset % tp->nvram_pagesize;
12899 
12900 		phy_addr = tg3_nvram_phys_addr(tp, offset);
12901 
12902 		tw32(NVRAM_ADDR, phy_addr);
12903 
12904 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12905 
12906 		if (page_off == 0 || i == 0)
12907 			nvram_cmd |= NVRAM_CMD_FIRST;
12908 		if (page_off == (tp->nvram_pagesize - 4))
12909 			nvram_cmd |= NVRAM_CMD_LAST;
12910 
12911 		if (i == (len - 4))
12912 			nvram_cmd |= NVRAM_CMD_LAST;
12913 
12914 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12915 		    !tg3_flag(tp, 5755_PLUS) &&
12916 		    (tp->nvram_jedecnum == JEDEC_ST) &&
12917 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
12918 
12919 			if ((ret = tg3_nvram_exec_cmd(tp,
12920 				NVRAM_CMD_WREN | NVRAM_CMD_GO |
12921 				NVRAM_CMD_DONE)))
12922 
12923 				break;
12924 		}
12925 		if (!tg3_flag(tp, FLASH)) {
12926 			/* We always do complete word writes to eeprom. */
12927 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12928 		}
12929 
12930 		if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12931 			break;
12932 	}
12933 	return ret;
12934 }
12935 
12936 /* offset and length are dword aligned */
12937 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12938 {
12939 	int ret;
12940 
12941 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12942 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12943 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
12944 		udelay(40);
12945 	}
12946 
12947 	if (!tg3_flag(tp, NVRAM)) {
12948 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12949 	} else {
12950 		u32 grc_mode;
12951 
12952 		ret = tg3_nvram_lock(tp);
12953 		if (ret)
12954 			return ret;
12955 
12956 		tg3_enable_nvram_access(tp);
12957 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12958 			tw32(NVRAM_WRITE1, 0x406);
12959 
12960 		grc_mode = tr32(GRC_MODE);
12961 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12962 
12963 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12964 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
12965 				buf);
12966 		} else {
12967 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12968 				buf);
12969 		}
12970 
12971 		grc_mode = tr32(GRC_MODE);
12972 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12973 
12974 		tg3_disable_nvram_access(tp);
12975 		tg3_nvram_unlock(tp);
12976 	}
12977 
12978 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12979 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12980 		udelay(40);
12981 	}
12982 
12983 	return ret;
12984 }
12985 
12986 struct subsys_tbl_ent {
12987 	u16 subsys_vendor, subsys_devid;
12988 	u32 phy_id;
12989 };
12990 
12991 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12992 	/* Broadcom boards. */
12993 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12994 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12995 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12996 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12997 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12998 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12999 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13000 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13001 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13002 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13003 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13004 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13005 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13006 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13007 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13008 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13009 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13010 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13011 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13012 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13013 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13014 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13015 
13016 	/* 3com boards. */
13017 	{ TG3PCI_SUBVENDOR_ID_3COM,
13018 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13019 	{ TG3PCI_SUBVENDOR_ID_3COM,
13020 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13021 	{ TG3PCI_SUBVENDOR_ID_3COM,
13022 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13023 	{ TG3PCI_SUBVENDOR_ID_3COM,
13024 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13025 	{ TG3PCI_SUBVENDOR_ID_3COM,
13026 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13027 
13028 	/* DELL boards. */
13029 	{ TG3PCI_SUBVENDOR_ID_DELL,
13030 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13031 	{ TG3PCI_SUBVENDOR_ID_DELL,
13032 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13033 	{ TG3PCI_SUBVENDOR_ID_DELL,
13034 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13035 	{ TG3PCI_SUBVENDOR_ID_DELL,
13036 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13037 
13038 	/* Compaq boards. */
13039 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13040 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13041 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13042 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13043 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13044 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13045 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13046 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13047 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13048 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13049 
13050 	/* IBM boards. */
13051 	{ TG3PCI_SUBVENDOR_ID_IBM,
13052 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13053 };
13054 
13055 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13056 {
13057 	int i;
13058 
13059 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13060 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
13061 		     tp->pdev->subsystem_vendor) &&
13062 		    (subsys_id_to_phy_id[i].subsys_devid ==
13063 		     tp->pdev->subsystem_device))
13064 			return &subsys_id_to_phy_id[i];
13065 	}
13066 	return NULL;
13067 }
13068 
13069 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13070 {
13071 	u32 val;
13072 
13073 	tp->phy_id = TG3_PHY_ID_INVALID;
13074 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13075 
13076 	/* Assume an onboard device and WOL capable by default.  */
13077 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
13078 	tg3_flag_set(tp, WOL_CAP);
13079 
13080 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13081 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13082 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13083 			tg3_flag_set(tp, IS_NIC);
13084 		}
13085 		val = tr32(VCPU_CFGSHDW);
13086 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
13087 			tg3_flag_set(tp, ASPM_WORKAROUND);
13088 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13089 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13090 			tg3_flag_set(tp, WOL_ENABLE);
13091 			device_set_wakeup_enable(&tp->pdev->dev, true);
13092 		}
13093 		goto done;
13094 	}
13095 
13096 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13097 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13098 		u32 nic_cfg, led_cfg;
13099 		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13100 		int eeprom_phy_serdes = 0;
13101 
13102 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13103 		tp->nic_sram_data_cfg = nic_cfg;
13104 
13105 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13106 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
13107 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13108 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13109 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13110 		    (ver > 0) && (ver < 0x100))
13111 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13112 
13113 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13114 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13115 
13116 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13117 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13118 			eeprom_phy_serdes = 1;
13119 
13120 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13121 		if (nic_phy_id != 0) {
13122 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13123 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13124 
13125 			eeprom_phy_id  = (id1 >> 16) << 10;
13126 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
13127 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13128 		} else
13129 			eeprom_phy_id = 0;
13130 
13131 		tp->phy_id = eeprom_phy_id;
13132 		if (eeprom_phy_serdes) {
13133 			if (!tg3_flag(tp, 5705_PLUS))
13134 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13135 			else
13136 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13137 		}
13138 
13139 		if (tg3_flag(tp, 5750_PLUS))
13140 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13141 				    SHASTA_EXT_LED_MODE_MASK);
13142 		else
13143 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13144 
13145 		switch (led_cfg) {
13146 		default:
13147 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13148 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13149 			break;
13150 
13151 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13152 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13153 			break;
13154 
13155 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13156 			tp->led_ctrl = LED_CTRL_MODE_MAC;
13157 
13158 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
13159 			 * read on some older 5700/5701 bootcode.
13160 			 */
13161 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13162 			    ASIC_REV_5700 ||
13163 			    GET_ASIC_REV(tp->pci_chip_rev_id) ==
13164 			    ASIC_REV_5701)
13165 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13166 
13167 			break;
13168 
13169 		case SHASTA_EXT_LED_SHARED:
13170 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
13171 			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13172 			    tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13173 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13174 						 LED_CTRL_MODE_PHY_2);
13175 			break;
13176 
13177 		case SHASTA_EXT_LED_MAC:
13178 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13179 			break;
13180 
13181 		case SHASTA_EXT_LED_COMBO:
13182 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
13183 			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13184 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13185 						 LED_CTRL_MODE_PHY_2);
13186 			break;
13187 
13188 		}
13189 
13190 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13191 		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13192 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13193 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13194 
13195 		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13196 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13197 
13198 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13199 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
13200 			if ((tp->pdev->subsystem_vendor ==
13201 			     PCI_VENDOR_ID_ARIMA) &&
13202 			    (tp->pdev->subsystem_device == 0x205a ||
13203 			     tp->pdev->subsystem_device == 0x2063))
13204 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13205 		} else {
13206 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13207 			tg3_flag_set(tp, IS_NIC);
13208 		}
13209 
13210 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13211 			tg3_flag_set(tp, ENABLE_ASF);
13212 			if (tg3_flag(tp, 5750_PLUS))
13213 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13214 		}
13215 
13216 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13217 		    tg3_flag(tp, 5750_PLUS))
13218 			tg3_flag_set(tp, ENABLE_APE);
13219 
13220 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13221 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13222 			tg3_flag_clear(tp, WOL_CAP);
13223 
13224 		if (tg3_flag(tp, WOL_CAP) &&
13225 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13226 			tg3_flag_set(tp, WOL_ENABLE);
13227 			device_set_wakeup_enable(&tp->pdev->dev, true);
13228 		}
13229 
13230 		if (cfg2 & (1 << 17))
13231 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13232 
13233 		/* serdes signal pre-emphasis in register 0x590 set by */
13234 		/* bootcode if bit 18 is set */
13235 		if (cfg2 & (1 << 18))
13236 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13237 
13238 		if ((tg3_flag(tp, 57765_PLUS) ||
13239 		     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13240 		      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13241 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13242 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13243 
13244 		if (tg3_flag(tp, PCI_EXPRESS) &&
13245 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13246 		    !tg3_flag(tp, 57765_PLUS)) {
13247 			u32 cfg3;
13248 
13249 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13250 			if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13251 				tg3_flag_set(tp, ASPM_WORKAROUND);
13252 		}
13253 
13254 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13255 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13256 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13257 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13258 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13259 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13260 	}
13261 done:
13262 	if (tg3_flag(tp, WOL_CAP))
13263 		device_set_wakeup_enable(&tp->pdev->dev,
13264 					 tg3_flag(tp, WOL_ENABLE));
13265 	else
13266 		device_set_wakeup_capable(&tp->pdev->dev, false);
13267 }
13268 
13269 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13270 {
13271 	int i;
13272 	u32 val;
13273 
13274 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13275 	tw32(OTP_CTRL, cmd);
13276 
13277 	/* Wait for up to 1 ms for command to execute. */
13278 	for (i = 0; i < 100; i++) {
13279 		val = tr32(OTP_STATUS);
13280 		if (val & OTP_STATUS_CMD_DONE)
13281 			break;
13282 		udelay(10);
13283 	}
13284 
13285 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13286 }
13287 
13288 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13289  * configuration is a 32-bit value that straddles the alignment boundary.
13290  * We do two 32-bit reads and then shift and merge the results.
13291  */
13292 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13293 {
13294 	u32 bhalf_otp, thalf_otp;
13295 
13296 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13297 
13298 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13299 		return 0;
13300 
13301 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13302 
13303 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13304 		return 0;
13305 
13306 	thalf_otp = tr32(OTP_READ_DATA);
13307 
13308 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13309 
13310 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13311 		return 0;
13312 
13313 	bhalf_otp = tr32(OTP_READ_DATA);
13314 
13315 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13316 }
13317 
13318 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13319 {
13320 	u32 adv = ADVERTISED_Autoneg;
13321 
13322 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13323 		adv |= ADVERTISED_1000baseT_Half |
13324 		       ADVERTISED_1000baseT_Full;
13325 
13326 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13327 		adv |= ADVERTISED_100baseT_Half |
13328 		       ADVERTISED_100baseT_Full |
13329 		       ADVERTISED_10baseT_Half |
13330 		       ADVERTISED_10baseT_Full |
13331 		       ADVERTISED_TP;
13332 	else
13333 		adv |= ADVERTISED_FIBRE;
13334 
13335 	tp->link_config.advertising = adv;
13336 	tp->link_config.speed = SPEED_INVALID;
13337 	tp->link_config.duplex = DUPLEX_INVALID;
13338 	tp->link_config.autoneg = AUTONEG_ENABLE;
13339 	tp->link_config.active_speed = SPEED_INVALID;
13340 	tp->link_config.active_duplex = DUPLEX_INVALID;
13341 	tp->link_config.orig_speed = SPEED_INVALID;
13342 	tp->link_config.orig_duplex = DUPLEX_INVALID;
13343 	tp->link_config.orig_autoneg = AUTONEG_INVALID;
13344 }
13345 
13346 static int __devinit tg3_phy_probe(struct tg3 *tp)
13347 {
13348 	u32 hw_phy_id_1, hw_phy_id_2;
13349 	u32 hw_phy_id, hw_phy_id_masked;
13350 	int err;
13351 
13352 	/* flow control autonegotiation is default behavior */
13353 	tg3_flag_set(tp, PAUSE_AUTONEG);
13354 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13355 
13356 	if (tg3_flag(tp, USE_PHYLIB))
13357 		return tg3_phy_init(tp);
13358 
13359 	/* Reading the PHY ID register can conflict with ASF
13360 	 * firmware access to the PHY hardware.
13361 	 */
13362 	err = 0;
13363 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13364 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13365 	} else {
13366 		/* Now read the physical PHY_ID from the chip and verify
13367 		 * that it is sane.  If it doesn't look good, we fall back
13368 		 * to either the hard-coded table based PHY_ID and failing
13369 		 * that the value found in the eeprom area.
13370 		 */
13371 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13372 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13373 
13374 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13375 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13376 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13377 
13378 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13379 	}
13380 
13381 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13382 		tp->phy_id = hw_phy_id;
13383 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13384 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13385 		else
13386 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13387 	} else {
13388 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
13389 			/* Do nothing, phy ID already set up in
13390 			 * tg3_get_eeprom_hw_cfg().
13391 			 */
13392 		} else {
13393 			struct subsys_tbl_ent *p;
13394 
13395 			/* No eeprom signature?  Try the hardcoded
13396 			 * subsys device table.
13397 			 */
13398 			p = tg3_lookup_by_subsys(tp);
13399 			if (!p)
13400 				return -ENODEV;
13401 
13402 			tp->phy_id = p->phy_id;
13403 			if (!tp->phy_id ||
13404 			    tp->phy_id == TG3_PHY_ID_BCM8002)
13405 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13406 		}
13407 	}
13408 
13409 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13410 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13411 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13412 	     (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13413 	      tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13414 	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13415 	      tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13416 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13417 
13418 	tg3_phy_init_link_config(tp);
13419 
13420 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13421 	    !tg3_flag(tp, ENABLE_APE) &&
13422 	    !tg3_flag(tp, ENABLE_ASF)) {
13423 		u32 bmsr, dummy;
13424 
13425 		tg3_readphy(tp, MII_BMSR, &bmsr);
13426 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13427 		    (bmsr & BMSR_LSTATUS))
13428 			goto skip_phy_reset;
13429 
13430 		err = tg3_phy_reset(tp);
13431 		if (err)
13432 			return err;
13433 
13434 		tg3_phy_set_wirespeed(tp);
13435 
13436 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13437 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13438 					    tp->link_config.flowctrl);
13439 
13440 			tg3_writephy(tp, MII_BMCR,
13441 				     BMCR_ANENABLE | BMCR_ANRESTART);
13442 		}
13443 	}
13444 
13445 skip_phy_reset:
13446 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13447 		err = tg3_init_5401phy_dsp(tp);
13448 		if (err)
13449 			return err;
13450 
13451 		err = tg3_init_5401phy_dsp(tp);
13452 	}
13453 
13454 	return err;
13455 }
13456 
13457 static void __devinit tg3_read_vpd(struct tg3 *tp)
13458 {
13459 	u8 *vpd_data;
13460 	unsigned int block_end, rosize, len;
13461 	u32 vpdlen;
13462 	int j, i = 0;
13463 
13464 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13465 	if (!vpd_data)
13466 		goto out_no_vpd;
13467 
13468 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13469 	if (i < 0)
13470 		goto out_not_found;
13471 
13472 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13473 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13474 	i += PCI_VPD_LRDT_TAG_SIZE;
13475 
13476 	if (block_end > vpdlen)
13477 		goto out_not_found;
13478 
13479 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13480 				      PCI_VPD_RO_KEYWORD_MFR_ID);
13481 	if (j > 0) {
13482 		len = pci_vpd_info_field_size(&vpd_data[j]);
13483 
13484 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
13485 		if (j + len > block_end || len != 4 ||
13486 		    memcmp(&vpd_data[j], "1028", 4))
13487 			goto partno;
13488 
13489 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13490 					      PCI_VPD_RO_KEYWORD_VENDOR0);
13491 		if (j < 0)
13492 			goto partno;
13493 
13494 		len = pci_vpd_info_field_size(&vpd_data[j]);
13495 
13496 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
13497 		if (j + len > block_end)
13498 			goto partno;
13499 
13500 		memcpy(tp->fw_ver, &vpd_data[j], len);
13501 		strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13502 	}
13503 
13504 partno:
13505 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13506 				      PCI_VPD_RO_KEYWORD_PARTNO);
13507 	if (i < 0)
13508 		goto out_not_found;
13509 
13510 	len = pci_vpd_info_field_size(&vpd_data[i]);
13511 
13512 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
13513 	if (len > TG3_BPN_SIZE ||
13514 	    (len + i) > vpdlen)
13515 		goto out_not_found;
13516 
13517 	memcpy(tp->board_part_number, &vpd_data[i], len);
13518 
13519 out_not_found:
13520 	kfree(vpd_data);
13521 	if (tp->board_part_number[0])
13522 		return;
13523 
13524 out_no_vpd:
13525 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13526 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13527 			strcpy(tp->board_part_number, "BCM5717");
13528 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13529 			strcpy(tp->board_part_number, "BCM5718");
13530 		else
13531 			goto nomatch;
13532 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13533 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13534 			strcpy(tp->board_part_number, "BCM57780");
13535 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13536 			strcpy(tp->board_part_number, "BCM57760");
13537 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13538 			strcpy(tp->board_part_number, "BCM57790");
13539 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13540 			strcpy(tp->board_part_number, "BCM57788");
13541 		else
13542 			goto nomatch;
13543 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13544 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13545 			strcpy(tp->board_part_number, "BCM57761");
13546 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13547 			strcpy(tp->board_part_number, "BCM57765");
13548 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13549 			strcpy(tp->board_part_number, "BCM57781");
13550 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13551 			strcpy(tp->board_part_number, "BCM57785");
13552 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13553 			strcpy(tp->board_part_number, "BCM57791");
13554 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13555 			strcpy(tp->board_part_number, "BCM57795");
13556 		else
13557 			goto nomatch;
13558 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13559 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13560 			strcpy(tp->board_part_number, "BCM57762");
13561 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13562 			strcpy(tp->board_part_number, "BCM57766");
13563 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13564 			strcpy(tp->board_part_number, "BCM57782");
13565 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13566 			strcpy(tp->board_part_number, "BCM57786");
13567 		else
13568 			goto nomatch;
13569 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13570 		strcpy(tp->board_part_number, "BCM95906");
13571 	} else {
13572 nomatch:
13573 		strcpy(tp->board_part_number, "none");
13574 	}
13575 }
13576 
13577 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13578 {
13579 	u32 val;
13580 
13581 	if (tg3_nvram_read(tp, offset, &val) ||
13582 	    (val & 0xfc000000) != 0x0c000000 ||
13583 	    tg3_nvram_read(tp, offset + 4, &val) ||
13584 	    val != 0)
13585 		return 0;
13586 
13587 	return 1;
13588 }
13589 
13590 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13591 {
13592 	u32 val, offset, start, ver_offset;
13593 	int i, dst_off;
13594 	bool newver = false;
13595 
13596 	if (tg3_nvram_read(tp, 0xc, &offset) ||
13597 	    tg3_nvram_read(tp, 0x4, &start))
13598 		return;
13599 
13600 	offset = tg3_nvram_logical_addr(tp, offset);
13601 
13602 	if (tg3_nvram_read(tp, offset, &val))
13603 		return;
13604 
13605 	if ((val & 0xfc000000) == 0x0c000000) {
13606 		if (tg3_nvram_read(tp, offset + 4, &val))
13607 			return;
13608 
13609 		if (val == 0)
13610 			newver = true;
13611 	}
13612 
13613 	dst_off = strlen(tp->fw_ver);
13614 
13615 	if (newver) {
13616 		if (TG3_VER_SIZE - dst_off < 16 ||
13617 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
13618 			return;
13619 
13620 		offset = offset + ver_offset - start;
13621 		for (i = 0; i < 16; i += 4) {
13622 			__be32 v;
13623 			if (tg3_nvram_read_be32(tp, offset + i, &v))
13624 				return;
13625 
13626 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13627 		}
13628 	} else {
13629 		u32 major, minor;
13630 
13631 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13632 			return;
13633 
13634 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13635 			TG3_NVM_BCVER_MAJSFT;
13636 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13637 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13638 			 "v%d.%02d", major, minor);
13639 	}
13640 }
13641 
13642 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13643 {
13644 	u32 val, major, minor;
13645 
13646 	/* Use native endian representation */
13647 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13648 		return;
13649 
13650 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13651 		TG3_NVM_HWSB_CFG1_MAJSFT;
13652 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13653 		TG3_NVM_HWSB_CFG1_MINSFT;
13654 
13655 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13656 }
13657 
13658 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13659 {
13660 	u32 offset, major, minor, build;
13661 
13662 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13663 
13664 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13665 		return;
13666 
13667 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13668 	case TG3_EEPROM_SB_REVISION_0:
13669 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13670 		break;
13671 	case TG3_EEPROM_SB_REVISION_2:
13672 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13673 		break;
13674 	case TG3_EEPROM_SB_REVISION_3:
13675 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13676 		break;
13677 	case TG3_EEPROM_SB_REVISION_4:
13678 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13679 		break;
13680 	case TG3_EEPROM_SB_REVISION_5:
13681 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13682 		break;
13683 	case TG3_EEPROM_SB_REVISION_6:
13684 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13685 		break;
13686 	default:
13687 		return;
13688 	}
13689 
13690 	if (tg3_nvram_read(tp, offset, &val))
13691 		return;
13692 
13693 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13694 		TG3_EEPROM_SB_EDH_BLD_SHFT;
13695 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13696 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
13697 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13698 
13699 	if (minor > 99 || build > 26)
13700 		return;
13701 
13702 	offset = strlen(tp->fw_ver);
13703 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13704 		 " v%d.%02d", major, minor);
13705 
13706 	if (build > 0) {
13707 		offset = strlen(tp->fw_ver);
13708 		if (offset < TG3_VER_SIZE - 1)
13709 			tp->fw_ver[offset] = 'a' + build - 1;
13710 	}
13711 }
13712 
13713 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13714 {
13715 	u32 val, offset, start;
13716 	int i, vlen;
13717 
13718 	for (offset = TG3_NVM_DIR_START;
13719 	     offset < TG3_NVM_DIR_END;
13720 	     offset += TG3_NVM_DIRENT_SIZE) {
13721 		if (tg3_nvram_read(tp, offset, &val))
13722 			return;
13723 
13724 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13725 			break;
13726 	}
13727 
13728 	if (offset == TG3_NVM_DIR_END)
13729 		return;
13730 
13731 	if (!tg3_flag(tp, 5705_PLUS))
13732 		start = 0x08000000;
13733 	else if (tg3_nvram_read(tp, offset - 4, &start))
13734 		return;
13735 
13736 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
13737 	    !tg3_fw_img_is_valid(tp, offset) ||
13738 	    tg3_nvram_read(tp, offset + 8, &val))
13739 		return;
13740 
13741 	offset += val - start;
13742 
13743 	vlen = strlen(tp->fw_ver);
13744 
13745 	tp->fw_ver[vlen++] = ',';
13746 	tp->fw_ver[vlen++] = ' ';
13747 
13748 	for (i = 0; i < 4; i++) {
13749 		__be32 v;
13750 		if (tg3_nvram_read_be32(tp, offset, &v))
13751 			return;
13752 
13753 		offset += sizeof(v);
13754 
13755 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
13756 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13757 			break;
13758 		}
13759 
13760 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13761 		vlen += sizeof(v);
13762 	}
13763 }
13764 
13765 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13766 {
13767 	int vlen;
13768 	u32 apedata;
13769 	char *fwtype;
13770 
13771 	if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13772 		return;
13773 
13774 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13775 	if (apedata != APE_SEG_SIG_MAGIC)
13776 		return;
13777 
13778 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13779 	if (!(apedata & APE_FW_STATUS_READY))
13780 		return;
13781 
13782 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13783 
13784 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13785 		tg3_flag_set(tp, APE_HAS_NCSI);
13786 		fwtype = "NCSI";
13787 	} else {
13788 		fwtype = "DASH";
13789 	}
13790 
13791 	vlen = strlen(tp->fw_ver);
13792 
13793 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13794 		 fwtype,
13795 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13796 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13797 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13798 		 (apedata & APE_FW_VERSION_BLDMSK));
13799 }
13800 
13801 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13802 {
13803 	u32 val;
13804 	bool vpd_vers = false;
13805 
13806 	if (tp->fw_ver[0] != 0)
13807 		vpd_vers = true;
13808 
13809 	if (tg3_flag(tp, NO_NVRAM)) {
13810 		strcat(tp->fw_ver, "sb");
13811 		return;
13812 	}
13813 
13814 	if (tg3_nvram_read(tp, 0, &val))
13815 		return;
13816 
13817 	if (val == TG3_EEPROM_MAGIC)
13818 		tg3_read_bc_ver(tp);
13819 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13820 		tg3_read_sb_ver(tp, val);
13821 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13822 		tg3_read_hwsb_ver(tp);
13823 	else
13824 		return;
13825 
13826 	if (vpd_vers)
13827 		goto done;
13828 
13829 	if (tg3_flag(tp, ENABLE_APE)) {
13830 		if (tg3_flag(tp, ENABLE_ASF))
13831 			tg3_read_dash_ver(tp);
13832 	} else if (tg3_flag(tp, ENABLE_ASF)) {
13833 		tg3_read_mgmtfw_ver(tp);
13834 	}
13835 
13836 done:
13837 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13838 }
13839 
13840 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13841 
13842 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13843 {
13844 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
13845 		return TG3_RX_RET_MAX_SIZE_5717;
13846 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13847 		return TG3_RX_RET_MAX_SIZE_5700;
13848 	else
13849 		return TG3_RX_RET_MAX_SIZE_5705;
13850 }
13851 
13852 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13853 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13854 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13855 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13856 	{ },
13857 };
13858 
13859 static int __devinit tg3_get_invariants(struct tg3 *tp)
13860 {
13861 	u32 misc_ctrl_reg;
13862 	u32 pci_state_reg, grc_misc_cfg;
13863 	u32 val;
13864 	u16 pci_cmd;
13865 	int err;
13866 
13867 	/* Force memory write invalidate off.  If we leave it on,
13868 	 * then on 5700_BX chips we have to enable a workaround.
13869 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13870 	 * to match the cacheline size.  The Broadcom driver have this
13871 	 * workaround but turns MWI off all the times so never uses
13872 	 * it.  This seems to suggest that the workaround is insufficient.
13873 	 */
13874 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13875 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13876 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13877 
13878 	/* Important! -- Make sure register accesses are byteswapped
13879 	 * correctly.  Also, for those chips that require it, make
13880 	 * sure that indirect register accesses are enabled before
13881 	 * the first operation.
13882 	 */
13883 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13884 			      &misc_ctrl_reg);
13885 	tp->misc_host_ctrl |= (misc_ctrl_reg &
13886 			       MISC_HOST_CTRL_CHIPREV);
13887 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13888 			       tp->misc_host_ctrl);
13889 
13890 	tp->pci_chip_rev_id = (misc_ctrl_reg >>
13891 			       MISC_HOST_CTRL_CHIPREV_SHIFT);
13892 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13893 		u32 prod_id_asic_rev;
13894 
13895 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13896 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13897 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13898 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13899 			pci_read_config_dword(tp->pdev,
13900 					      TG3PCI_GEN2_PRODID_ASICREV,
13901 					      &prod_id_asic_rev);
13902 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13903 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13904 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13905 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13906 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13907 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13908 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13909 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13910 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13911 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13912 			pci_read_config_dword(tp->pdev,
13913 					      TG3PCI_GEN15_PRODID_ASICREV,
13914 					      &prod_id_asic_rev);
13915 		else
13916 			pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13917 					      &prod_id_asic_rev);
13918 
13919 		tp->pci_chip_rev_id = prod_id_asic_rev;
13920 	}
13921 
13922 	/* Wrong chip ID in 5752 A0. This code can be removed later
13923 	 * as A0 is not in production.
13924 	 */
13925 	if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13926 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13927 
13928 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13929 	 * we need to disable memory and use config. cycles
13930 	 * only to access all registers. The 5702/03 chips
13931 	 * can mistakenly decode the special cycles from the
13932 	 * ICH chipsets as memory write cycles, causing corruption
13933 	 * of register and memory space. Only certain ICH bridges
13934 	 * will drive special cycles with non-zero data during the
13935 	 * address phase which can fall within the 5703's address
13936 	 * range. This is not an ICH bug as the PCI spec allows
13937 	 * non-zero address during special cycles. However, only
13938 	 * these ICH bridges are known to drive non-zero addresses
13939 	 * during special cycles.
13940 	 *
13941 	 * Since special cycles do not cross PCI bridges, we only
13942 	 * enable this workaround if the 5703 is on the secondary
13943 	 * bus of these ICH bridges.
13944 	 */
13945 	if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13946 	    (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13947 		static struct tg3_dev_id {
13948 			u32	vendor;
13949 			u32	device;
13950 			u32	rev;
13951 		} ich_chipsets[] = {
13952 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13953 			  PCI_ANY_ID },
13954 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13955 			  PCI_ANY_ID },
13956 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13957 			  0xa },
13958 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13959 			  PCI_ANY_ID },
13960 			{ },
13961 		};
13962 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
13963 		struct pci_dev *bridge = NULL;
13964 
13965 		while (pci_id->vendor != 0) {
13966 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
13967 						bridge);
13968 			if (!bridge) {
13969 				pci_id++;
13970 				continue;
13971 			}
13972 			if (pci_id->rev != PCI_ANY_ID) {
13973 				if (bridge->revision > pci_id->rev)
13974 					continue;
13975 			}
13976 			if (bridge->subordinate &&
13977 			    (bridge->subordinate->number ==
13978 			     tp->pdev->bus->number)) {
13979 				tg3_flag_set(tp, ICH_WORKAROUND);
13980 				pci_dev_put(bridge);
13981 				break;
13982 			}
13983 		}
13984 	}
13985 
13986 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13987 		static struct tg3_dev_id {
13988 			u32	vendor;
13989 			u32	device;
13990 		} bridge_chipsets[] = {
13991 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13992 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13993 			{ },
13994 		};
13995 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13996 		struct pci_dev *bridge = NULL;
13997 
13998 		while (pci_id->vendor != 0) {
13999 			bridge = pci_get_device(pci_id->vendor,
14000 						pci_id->device,
14001 						bridge);
14002 			if (!bridge) {
14003 				pci_id++;
14004 				continue;
14005 			}
14006 			if (bridge->subordinate &&
14007 			    (bridge->subordinate->number <=
14008 			     tp->pdev->bus->number) &&
14009 			    (bridge->subordinate->subordinate >=
14010 			     tp->pdev->bus->number)) {
14011 				tg3_flag_set(tp, 5701_DMA_BUG);
14012 				pci_dev_put(bridge);
14013 				break;
14014 			}
14015 		}
14016 	}
14017 
14018 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
14019 	 * DMA addresses > 40-bit. This bridge may have other additional
14020 	 * 57xx devices behind it in some 4-port NIC designs for example.
14021 	 * Any tg3 device found behind the bridge will also need the 40-bit
14022 	 * DMA workaround.
14023 	 */
14024 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14025 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14026 		tg3_flag_set(tp, 5780_CLASS);
14027 		tg3_flag_set(tp, 40BIT_DMA_BUG);
14028 		tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14029 	} else {
14030 		struct pci_dev *bridge = NULL;
14031 
14032 		do {
14033 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14034 						PCI_DEVICE_ID_SERVERWORKS_EPB,
14035 						bridge);
14036 			if (bridge && bridge->subordinate &&
14037 			    (bridge->subordinate->number <=
14038 			     tp->pdev->bus->number) &&
14039 			    (bridge->subordinate->subordinate >=
14040 			     tp->pdev->bus->number)) {
14041 				tg3_flag_set(tp, 40BIT_DMA_BUG);
14042 				pci_dev_put(bridge);
14043 				break;
14044 			}
14045 		} while (bridge);
14046 	}
14047 
14048 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14049 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14050 		tp->pdev_peer = tg3_find_peer(tp);
14051 
14052 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14053 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14054 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14055 		tg3_flag_set(tp, 5717_PLUS);
14056 
14057 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14058 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14059 		tg3_flag_set(tp, 57765_CLASS);
14060 
14061 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14062 		tg3_flag_set(tp, 57765_PLUS);
14063 
14064 	/* Intentionally exclude ASIC_REV_5906 */
14065 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14066 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14067 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14068 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14069 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14070 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14071 	    tg3_flag(tp, 57765_PLUS))
14072 		tg3_flag_set(tp, 5755_PLUS);
14073 
14074 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14075 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14076 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14077 	    tg3_flag(tp, 5755_PLUS) ||
14078 	    tg3_flag(tp, 5780_CLASS))
14079 		tg3_flag_set(tp, 5750_PLUS);
14080 
14081 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14082 	    tg3_flag(tp, 5750_PLUS))
14083 		tg3_flag_set(tp, 5705_PLUS);
14084 
14085 	/* Determine TSO capabilities */
14086 	if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14087 		; /* Do nothing. HW bug. */
14088 	else if (tg3_flag(tp, 57765_PLUS))
14089 		tg3_flag_set(tp, HW_TSO_3);
14090 	else if (tg3_flag(tp, 5755_PLUS) ||
14091 		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14092 		tg3_flag_set(tp, HW_TSO_2);
14093 	else if (tg3_flag(tp, 5750_PLUS)) {
14094 		tg3_flag_set(tp, HW_TSO_1);
14095 		tg3_flag_set(tp, TSO_BUG);
14096 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14097 		    tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14098 			tg3_flag_clear(tp, TSO_BUG);
14099 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14100 		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14101 		   tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14102 			tg3_flag_set(tp, TSO_BUG);
14103 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14104 			tp->fw_needed = FIRMWARE_TG3TSO5;
14105 		else
14106 			tp->fw_needed = FIRMWARE_TG3TSO;
14107 	}
14108 
14109 	/* Selectively allow TSO based on operating conditions */
14110 	if (tg3_flag(tp, HW_TSO_1) ||
14111 	    tg3_flag(tp, HW_TSO_2) ||
14112 	    tg3_flag(tp, HW_TSO_3) ||
14113 	    tp->fw_needed) {
14114 		/* For firmware TSO, assume ASF is disabled.
14115 		 * We'll disable TSO later if we discover ASF
14116 		 * is enabled in tg3_get_eeprom_hw_cfg().
14117 		 */
14118 		tg3_flag_set(tp, TSO_CAPABLE);
14119 	} else {
14120 		tg3_flag_clear(tp, TSO_CAPABLE);
14121 		tg3_flag_clear(tp, TSO_BUG);
14122 		tp->fw_needed = NULL;
14123 	}
14124 
14125 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14126 		tp->fw_needed = FIRMWARE_TG3;
14127 
14128 	tp->irq_max = 1;
14129 
14130 	if (tg3_flag(tp, 5750_PLUS)) {
14131 		tg3_flag_set(tp, SUPPORT_MSI);
14132 		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14133 		    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14134 		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14135 		     tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14136 		     tp->pdev_peer == tp->pdev))
14137 			tg3_flag_clear(tp, SUPPORT_MSI);
14138 
14139 		if (tg3_flag(tp, 5755_PLUS) ||
14140 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14141 			tg3_flag_set(tp, 1SHOT_MSI);
14142 		}
14143 
14144 		if (tg3_flag(tp, 57765_PLUS)) {
14145 			tg3_flag_set(tp, SUPPORT_MSIX);
14146 			tp->irq_max = TG3_IRQ_MAX_VECS;
14147 			tg3_rss_init_dflt_indir_tbl(tp);
14148 		}
14149 	}
14150 
14151 	if (tg3_flag(tp, 5755_PLUS))
14152 		tg3_flag_set(tp, SHORT_DMA_BUG);
14153 
14154 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14155 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14156 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14157 		tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
14158 
14159 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14160 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14161 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14162 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
14163 
14164 	if (tg3_flag(tp, 57765_PLUS) &&
14165 	    tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14166 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14167 
14168 	if (!tg3_flag(tp, 5705_PLUS) ||
14169 	    tg3_flag(tp, 5780_CLASS) ||
14170 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
14171 		tg3_flag_set(tp, JUMBO_CAPABLE);
14172 
14173 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14174 			      &pci_state_reg);
14175 
14176 	if (pci_is_pcie(tp->pdev)) {
14177 		u16 lnkctl;
14178 
14179 		tg3_flag_set(tp, PCI_EXPRESS);
14180 
14181 		if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
14182 			int readrq = pcie_get_readrq(tp->pdev);
14183 			if (readrq > 2048)
14184 				pcie_set_readrq(tp->pdev, 2048);
14185 		}
14186 
14187 		pci_read_config_word(tp->pdev,
14188 				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14189 				     &lnkctl);
14190 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14191 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14192 			    ASIC_REV_5906) {
14193 				tg3_flag_clear(tp, HW_TSO_2);
14194 				tg3_flag_clear(tp, TSO_CAPABLE);
14195 			}
14196 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14197 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14198 			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14199 			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14200 				tg3_flag_set(tp, CLKREQ_BUG);
14201 		} else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14202 			tg3_flag_set(tp, L1PLLPD_EN);
14203 		}
14204 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14205 		/* BCM5785 devices are effectively PCIe devices, and should
14206 		 * follow PCIe codepaths, but do not have a PCIe capabilities
14207 		 * section.
14208 		 */
14209 		tg3_flag_set(tp, PCI_EXPRESS);
14210 	} else if (!tg3_flag(tp, 5705_PLUS) ||
14211 		   tg3_flag(tp, 5780_CLASS)) {
14212 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14213 		if (!tp->pcix_cap) {
14214 			dev_err(&tp->pdev->dev,
14215 				"Cannot find PCI-X capability, aborting\n");
14216 			return -EIO;
14217 		}
14218 
14219 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14220 			tg3_flag_set(tp, PCIX_MODE);
14221 	}
14222 
14223 	/* If we have an AMD 762 or VIA K8T800 chipset, write
14224 	 * reordering to the mailbox registers done by the host
14225 	 * controller can cause major troubles.  We read back from
14226 	 * every mailbox register write to force the writes to be
14227 	 * posted to the chip in order.
14228 	 */
14229 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
14230 	    !tg3_flag(tp, PCI_EXPRESS))
14231 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
14232 
14233 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14234 			     &tp->pci_cacheline_sz);
14235 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14236 			     &tp->pci_lat_timer);
14237 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14238 	    tp->pci_lat_timer < 64) {
14239 		tp->pci_lat_timer = 64;
14240 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14241 				      tp->pci_lat_timer);
14242 	}
14243 
14244 	/* Important! -- It is critical that the PCI-X hw workaround
14245 	 * situation is decided before the first MMIO register access.
14246 	 */
14247 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14248 		/* 5700 BX chips need to have their TX producer index
14249 		 * mailboxes written twice to workaround a bug.
14250 		 */
14251 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
14252 
14253 		/* If we are in PCI-X mode, enable register write workaround.
14254 		 *
14255 		 * The workaround is to use indirect register accesses
14256 		 * for all chip writes not to mailbox registers.
14257 		 */
14258 		if (tg3_flag(tp, PCIX_MODE)) {
14259 			u32 pm_reg;
14260 
14261 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14262 
14263 			/* The chip can have it's power management PCI config
14264 			 * space registers clobbered due to this bug.
14265 			 * So explicitly force the chip into D0 here.
14266 			 */
14267 			pci_read_config_dword(tp->pdev,
14268 					      tp->pm_cap + PCI_PM_CTRL,
14269 					      &pm_reg);
14270 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14271 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14272 			pci_write_config_dword(tp->pdev,
14273 					       tp->pm_cap + PCI_PM_CTRL,
14274 					       pm_reg);
14275 
14276 			/* Also, force SERR#/PERR# in PCI command. */
14277 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14278 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14279 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14280 		}
14281 	}
14282 
14283 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14284 		tg3_flag_set(tp, PCI_HIGH_SPEED);
14285 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14286 		tg3_flag_set(tp, PCI_32BIT);
14287 
14288 	/* Chip-specific fixup from Broadcom driver */
14289 	if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14290 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14291 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14292 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14293 	}
14294 
14295 	/* Default fast path register access methods */
14296 	tp->read32 = tg3_read32;
14297 	tp->write32 = tg3_write32;
14298 	tp->read32_mbox = tg3_read32;
14299 	tp->write32_mbox = tg3_write32;
14300 	tp->write32_tx_mbox = tg3_write32;
14301 	tp->write32_rx_mbox = tg3_write32;
14302 
14303 	/* Various workaround register access methods */
14304 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14305 		tp->write32 = tg3_write_indirect_reg32;
14306 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14307 		 (tg3_flag(tp, PCI_EXPRESS) &&
14308 		  tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14309 		/*
14310 		 * Back to back register writes can cause problems on these
14311 		 * chips, the workaround is to read back all reg writes
14312 		 * except those to mailbox regs.
14313 		 *
14314 		 * See tg3_write_indirect_reg32().
14315 		 */
14316 		tp->write32 = tg3_write_flush_reg32;
14317 	}
14318 
14319 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14320 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
14321 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
14322 			tp->write32_rx_mbox = tg3_write_flush_reg32;
14323 	}
14324 
14325 	if (tg3_flag(tp, ICH_WORKAROUND)) {
14326 		tp->read32 = tg3_read_indirect_reg32;
14327 		tp->write32 = tg3_write_indirect_reg32;
14328 		tp->read32_mbox = tg3_read_indirect_mbox;
14329 		tp->write32_mbox = tg3_write_indirect_mbox;
14330 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
14331 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
14332 
14333 		iounmap(tp->regs);
14334 		tp->regs = NULL;
14335 
14336 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14337 		pci_cmd &= ~PCI_COMMAND_MEMORY;
14338 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14339 	}
14340 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14341 		tp->read32_mbox = tg3_read32_mbox_5906;
14342 		tp->write32_mbox = tg3_write32_mbox_5906;
14343 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
14344 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
14345 	}
14346 
14347 	if (tp->write32 == tg3_write_indirect_reg32 ||
14348 	    (tg3_flag(tp, PCIX_MODE) &&
14349 	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14350 	      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14351 		tg3_flag_set(tp, SRAM_USE_CONFIG);
14352 
14353 	/* The memory arbiter has to be enabled in order for SRAM accesses
14354 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
14355 	 * sure it is enabled, but other entities such as system netboot
14356 	 * code might disable it.
14357 	 */
14358 	val = tr32(MEMARB_MODE);
14359 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14360 
14361 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14362 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14363 	    tg3_flag(tp, 5780_CLASS)) {
14364 		if (tg3_flag(tp, PCIX_MODE)) {
14365 			pci_read_config_dword(tp->pdev,
14366 					      tp->pcix_cap + PCI_X_STATUS,
14367 					      &val);
14368 			tp->pci_fn = val & 0x7;
14369 		}
14370 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14371 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14372 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14373 		    NIC_SRAM_CPMUSTAT_SIG) {
14374 			tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14375 			tp->pci_fn = tp->pci_fn ? 1 : 0;
14376 		}
14377 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14378 		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14379 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14380 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14381 		    NIC_SRAM_CPMUSTAT_SIG) {
14382 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14383 				     TG3_CPMU_STATUS_FSHFT_5719;
14384 		}
14385 	}
14386 
14387 	/* Get eeprom hw config before calling tg3_set_power_state().
14388 	 * In particular, the TG3_FLAG_IS_NIC flag must be
14389 	 * determined before calling tg3_set_power_state() so that
14390 	 * we know whether or not to switch out of Vaux power.
14391 	 * When the flag is set, it means that GPIO1 is used for eeprom
14392 	 * write protect and also implies that it is a LOM where GPIOs
14393 	 * are not used to switch power.
14394 	 */
14395 	tg3_get_eeprom_hw_cfg(tp);
14396 
14397 	if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14398 		tg3_flag_clear(tp, TSO_CAPABLE);
14399 		tg3_flag_clear(tp, TSO_BUG);
14400 		tp->fw_needed = NULL;
14401 	}
14402 
14403 	if (tg3_flag(tp, ENABLE_APE)) {
14404 		/* Allow reads and writes to the
14405 		 * APE register and memory space.
14406 		 */
14407 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14408 				 PCISTATE_ALLOW_APE_SHMEM_WR |
14409 				 PCISTATE_ALLOW_APE_PSPACE_WR;
14410 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14411 				       pci_state_reg);
14412 
14413 		tg3_ape_lock_init(tp);
14414 	}
14415 
14416 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14417 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14418 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14419 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14420 	    tg3_flag(tp, 57765_PLUS))
14421 		tg3_flag_set(tp, CPMU_PRESENT);
14422 
14423 	/* Set up tp->grc_local_ctrl before calling
14424 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14425 	 * will bring 5700's external PHY out of reset.
14426 	 * It is also used as eeprom write protect on LOMs.
14427 	 */
14428 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14429 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14430 	    tg3_flag(tp, EEPROM_WRITE_PROT))
14431 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14432 				       GRC_LCLCTRL_GPIO_OUTPUT1);
14433 	/* Unused GPIO3 must be driven as output on 5752 because there
14434 	 * are no pull-up resistors on unused GPIO pins.
14435 	 */
14436 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14437 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14438 
14439 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14440 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14441 	    tg3_flag(tp, 57765_CLASS))
14442 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14443 
14444 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14445 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14446 		/* Turn off the debug UART. */
14447 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14448 		if (tg3_flag(tp, IS_NIC))
14449 			/* Keep VMain power. */
14450 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14451 					      GRC_LCLCTRL_GPIO_OUTPUT0;
14452 	}
14453 
14454 	/* Switch out of Vaux if it is a NIC */
14455 	tg3_pwrsrc_switch_to_vmain(tp);
14456 
14457 	/* Derive initial jumbo mode from MTU assigned in
14458 	 * ether_setup() via the alloc_etherdev() call
14459 	 */
14460 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14461 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
14462 
14463 	/* Determine WakeOnLan speed to use. */
14464 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14465 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14466 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14467 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14468 		tg3_flag_clear(tp, WOL_SPEED_100MB);
14469 	} else {
14470 		tg3_flag_set(tp, WOL_SPEED_100MB);
14471 	}
14472 
14473 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14474 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
14475 
14476 	/* A few boards don't want Ethernet@WireSpeed phy feature */
14477 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14478 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14479 	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14480 	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14481 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14482 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14483 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14484 
14485 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14486 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14487 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14488 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14489 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14490 
14491 	if (tg3_flag(tp, 5705_PLUS) &&
14492 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14493 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14494 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14495 	    !tg3_flag(tp, 57765_PLUS)) {
14496 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14497 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14498 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14499 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14500 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14501 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14502 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14503 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14504 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14505 		} else
14506 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14507 	}
14508 
14509 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14510 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14511 		tp->phy_otp = tg3_read_otp_phycfg(tp);
14512 		if (tp->phy_otp == 0)
14513 			tp->phy_otp = TG3_OTP_DEFAULT;
14514 	}
14515 
14516 	if (tg3_flag(tp, CPMU_PRESENT))
14517 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14518 	else
14519 		tp->mi_mode = MAC_MI_MODE_BASE;
14520 
14521 	tp->coalesce_mode = 0;
14522 	if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14523 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14524 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14525 
14526 	/* Set these bits to enable statistics workaround. */
14527 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14528 	    tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14529 	    tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14530 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14531 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14532 	}
14533 
14534 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14535 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14536 		tg3_flag_set(tp, USE_PHYLIB);
14537 
14538 	err = tg3_mdio_init(tp);
14539 	if (err)
14540 		return err;
14541 
14542 	/* Initialize data/descriptor byte/word swapping. */
14543 	val = tr32(GRC_MODE);
14544 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14545 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14546 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
14547 			GRC_MODE_B2HRX_ENABLE |
14548 			GRC_MODE_HTX2B_ENABLE |
14549 			GRC_MODE_HOST_STACKUP);
14550 	else
14551 		val &= GRC_MODE_HOST_STACKUP;
14552 
14553 	tw32(GRC_MODE, val | tp->grc_mode);
14554 
14555 	tg3_switch_clocks(tp);
14556 
14557 	/* Clear this out for sanity. */
14558 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14559 
14560 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14561 			      &pci_state_reg);
14562 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14563 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14564 		u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14565 
14566 		if (chiprevid == CHIPREV_ID_5701_A0 ||
14567 		    chiprevid == CHIPREV_ID_5701_B0 ||
14568 		    chiprevid == CHIPREV_ID_5701_B2 ||
14569 		    chiprevid == CHIPREV_ID_5701_B5) {
14570 			void __iomem *sram_base;
14571 
14572 			/* Write some dummy words into the SRAM status block
14573 			 * area, see if it reads back correctly.  If the return
14574 			 * value is bad, force enable the PCIX workaround.
14575 			 */
14576 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14577 
14578 			writel(0x00000000, sram_base);
14579 			writel(0x00000000, sram_base + 4);
14580 			writel(0xffffffff, sram_base + 4);
14581 			if (readl(sram_base) != 0x00000000)
14582 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14583 		}
14584 	}
14585 
14586 	udelay(50);
14587 	tg3_nvram_init(tp);
14588 
14589 	grc_misc_cfg = tr32(GRC_MISC_CFG);
14590 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14591 
14592 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14593 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14594 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14595 		tg3_flag_set(tp, IS_5788);
14596 
14597 	if (!tg3_flag(tp, IS_5788) &&
14598 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14599 		tg3_flag_set(tp, TAGGED_STATUS);
14600 	if (tg3_flag(tp, TAGGED_STATUS)) {
14601 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14602 				      HOSTCC_MODE_CLRTICK_TXBD);
14603 
14604 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14605 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14606 				       tp->misc_host_ctrl);
14607 	}
14608 
14609 	/* Preserve the APE MAC_MODE bits */
14610 	if (tg3_flag(tp, ENABLE_APE))
14611 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14612 	else
14613 		tp->mac_mode = 0;
14614 
14615 	/* these are limited to 10/100 only */
14616 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14617 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14618 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14619 	     tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14620 	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14621 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14622 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14623 	    (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14624 	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14625 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14626 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14627 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14628 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14629 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14630 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
14631 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14632 
14633 	err = tg3_phy_probe(tp);
14634 	if (err) {
14635 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14636 		/* ... but do not return immediately ... */
14637 		tg3_mdio_fini(tp);
14638 	}
14639 
14640 	tg3_read_vpd(tp);
14641 	tg3_read_fw_ver(tp);
14642 
14643 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14644 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14645 	} else {
14646 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14647 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14648 		else
14649 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14650 	}
14651 
14652 	/* 5700 {AX,BX} chips have a broken status block link
14653 	 * change bit implementation, so we must use the
14654 	 * status register in those cases.
14655 	 */
14656 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14657 		tg3_flag_set(tp, USE_LINKCHG_REG);
14658 	else
14659 		tg3_flag_clear(tp, USE_LINKCHG_REG);
14660 
14661 	/* The led_ctrl is set during tg3_phy_probe, here we might
14662 	 * have to force the link status polling mechanism based
14663 	 * upon subsystem IDs.
14664 	 */
14665 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14666 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14667 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14668 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14669 		tg3_flag_set(tp, USE_LINKCHG_REG);
14670 	}
14671 
14672 	/* For all SERDES we poll the MAC status register. */
14673 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14674 		tg3_flag_set(tp, POLL_SERDES);
14675 	else
14676 		tg3_flag_clear(tp, POLL_SERDES);
14677 
14678 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14679 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14680 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14681 	    tg3_flag(tp, PCIX_MODE)) {
14682 		tp->rx_offset = NET_SKB_PAD;
14683 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14684 		tp->rx_copy_thresh = ~(u16)0;
14685 #endif
14686 	}
14687 
14688 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14689 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14690 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14691 
14692 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14693 
14694 	/* Increment the rx prod index on the rx std ring by at most
14695 	 * 8 for these chips to workaround hw errata.
14696 	 */
14697 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14698 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14699 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14700 		tp->rx_std_max_post = 8;
14701 
14702 	if (tg3_flag(tp, ASPM_WORKAROUND))
14703 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14704 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
14705 
14706 	return err;
14707 }
14708 
14709 #ifdef CONFIG_SPARC
14710 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14711 {
14712 	struct net_device *dev = tp->dev;
14713 	struct pci_dev *pdev = tp->pdev;
14714 	struct device_node *dp = pci_device_to_OF_node(pdev);
14715 	const unsigned char *addr;
14716 	int len;
14717 
14718 	addr = of_get_property(dp, "local-mac-address", &len);
14719 	if (addr && len == 6) {
14720 		memcpy(dev->dev_addr, addr, 6);
14721 		memcpy(dev->perm_addr, dev->dev_addr, 6);
14722 		return 0;
14723 	}
14724 	return -ENODEV;
14725 }
14726 
14727 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14728 {
14729 	struct net_device *dev = tp->dev;
14730 
14731 	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14732 	memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14733 	return 0;
14734 }
14735 #endif
14736 
14737 static int __devinit tg3_get_device_address(struct tg3 *tp)
14738 {
14739 	struct net_device *dev = tp->dev;
14740 	u32 hi, lo, mac_offset;
14741 	int addr_ok = 0;
14742 
14743 #ifdef CONFIG_SPARC
14744 	if (!tg3_get_macaddr_sparc(tp))
14745 		return 0;
14746 #endif
14747 
14748 	mac_offset = 0x7c;
14749 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14750 	    tg3_flag(tp, 5780_CLASS)) {
14751 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14752 			mac_offset = 0xcc;
14753 		if (tg3_nvram_lock(tp))
14754 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14755 		else
14756 			tg3_nvram_unlock(tp);
14757 	} else if (tg3_flag(tp, 5717_PLUS)) {
14758 		if (tp->pci_fn & 1)
14759 			mac_offset = 0xcc;
14760 		if (tp->pci_fn > 1)
14761 			mac_offset += 0x18c;
14762 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14763 		mac_offset = 0x10;
14764 
14765 	/* First try to get it from MAC address mailbox. */
14766 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14767 	if ((hi >> 16) == 0x484b) {
14768 		dev->dev_addr[0] = (hi >>  8) & 0xff;
14769 		dev->dev_addr[1] = (hi >>  0) & 0xff;
14770 
14771 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14772 		dev->dev_addr[2] = (lo >> 24) & 0xff;
14773 		dev->dev_addr[3] = (lo >> 16) & 0xff;
14774 		dev->dev_addr[4] = (lo >>  8) & 0xff;
14775 		dev->dev_addr[5] = (lo >>  0) & 0xff;
14776 
14777 		/* Some old bootcode may report a 0 MAC address in SRAM */
14778 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14779 	}
14780 	if (!addr_ok) {
14781 		/* Next, try NVRAM. */
14782 		if (!tg3_flag(tp, NO_NVRAM) &&
14783 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14784 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14785 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14786 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14787 		}
14788 		/* Finally just fetch it out of the MAC control regs. */
14789 		else {
14790 			hi = tr32(MAC_ADDR_0_HIGH);
14791 			lo = tr32(MAC_ADDR_0_LOW);
14792 
14793 			dev->dev_addr[5] = lo & 0xff;
14794 			dev->dev_addr[4] = (lo >> 8) & 0xff;
14795 			dev->dev_addr[3] = (lo >> 16) & 0xff;
14796 			dev->dev_addr[2] = (lo >> 24) & 0xff;
14797 			dev->dev_addr[1] = hi & 0xff;
14798 			dev->dev_addr[0] = (hi >> 8) & 0xff;
14799 		}
14800 	}
14801 
14802 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14803 #ifdef CONFIG_SPARC
14804 		if (!tg3_get_default_macaddr_sparc(tp))
14805 			return 0;
14806 #endif
14807 		return -EINVAL;
14808 	}
14809 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14810 	return 0;
14811 }
14812 
14813 #define BOUNDARY_SINGLE_CACHELINE	1
14814 #define BOUNDARY_MULTI_CACHELINE	2
14815 
14816 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14817 {
14818 	int cacheline_size;
14819 	u8 byte;
14820 	int goal;
14821 
14822 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14823 	if (byte == 0)
14824 		cacheline_size = 1024;
14825 	else
14826 		cacheline_size = (int) byte * 4;
14827 
14828 	/* On 5703 and later chips, the boundary bits have no
14829 	 * effect.
14830 	 */
14831 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14832 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14833 	    !tg3_flag(tp, PCI_EXPRESS))
14834 		goto out;
14835 
14836 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14837 	goal = BOUNDARY_MULTI_CACHELINE;
14838 #else
14839 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14840 	goal = BOUNDARY_SINGLE_CACHELINE;
14841 #else
14842 	goal = 0;
14843 #endif
14844 #endif
14845 
14846 	if (tg3_flag(tp, 57765_PLUS)) {
14847 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14848 		goto out;
14849 	}
14850 
14851 	if (!goal)
14852 		goto out;
14853 
14854 	/* PCI controllers on most RISC systems tend to disconnect
14855 	 * when a device tries to burst across a cache-line boundary.
14856 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14857 	 *
14858 	 * Unfortunately, for PCI-E there are only limited
14859 	 * write-side controls for this, and thus for reads
14860 	 * we will still get the disconnects.  We'll also waste
14861 	 * these PCI cycles for both read and write for chips
14862 	 * other than 5700 and 5701 which do not implement the
14863 	 * boundary bits.
14864 	 */
14865 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14866 		switch (cacheline_size) {
14867 		case 16:
14868 		case 32:
14869 		case 64:
14870 		case 128:
14871 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14872 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14873 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14874 			} else {
14875 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14876 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14877 			}
14878 			break;
14879 
14880 		case 256:
14881 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14882 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14883 			break;
14884 
14885 		default:
14886 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14887 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14888 			break;
14889 		}
14890 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
14891 		switch (cacheline_size) {
14892 		case 16:
14893 		case 32:
14894 		case 64:
14895 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14896 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14897 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14898 				break;
14899 			}
14900 			/* fallthrough */
14901 		case 128:
14902 		default:
14903 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14904 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14905 			break;
14906 		}
14907 	} else {
14908 		switch (cacheline_size) {
14909 		case 16:
14910 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14911 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
14912 					DMA_RWCTRL_WRITE_BNDRY_16);
14913 				break;
14914 			}
14915 			/* fallthrough */
14916 		case 32:
14917 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14918 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
14919 					DMA_RWCTRL_WRITE_BNDRY_32);
14920 				break;
14921 			}
14922 			/* fallthrough */
14923 		case 64:
14924 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14925 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
14926 					DMA_RWCTRL_WRITE_BNDRY_64);
14927 				break;
14928 			}
14929 			/* fallthrough */
14930 		case 128:
14931 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14932 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
14933 					DMA_RWCTRL_WRITE_BNDRY_128);
14934 				break;
14935 			}
14936 			/* fallthrough */
14937 		case 256:
14938 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
14939 				DMA_RWCTRL_WRITE_BNDRY_256);
14940 			break;
14941 		case 512:
14942 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
14943 				DMA_RWCTRL_WRITE_BNDRY_512);
14944 			break;
14945 		case 1024:
14946 		default:
14947 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14948 				DMA_RWCTRL_WRITE_BNDRY_1024);
14949 			break;
14950 		}
14951 	}
14952 
14953 out:
14954 	return val;
14955 }
14956 
14957 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14958 {
14959 	struct tg3_internal_buffer_desc test_desc;
14960 	u32 sram_dma_descs;
14961 	int i, ret;
14962 
14963 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14964 
14965 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14966 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14967 	tw32(RDMAC_STATUS, 0);
14968 	tw32(WDMAC_STATUS, 0);
14969 
14970 	tw32(BUFMGR_MODE, 0);
14971 	tw32(FTQ_RESET, 0);
14972 
14973 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
14974 	test_desc.addr_lo = buf_dma & 0xffffffff;
14975 	test_desc.nic_mbuf = 0x00002100;
14976 	test_desc.len = size;
14977 
14978 	/*
14979 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14980 	 * the *second* time the tg3 driver was getting loaded after an
14981 	 * initial scan.
14982 	 *
14983 	 * Broadcom tells me:
14984 	 *   ...the DMA engine is connected to the GRC block and a DMA
14985 	 *   reset may affect the GRC block in some unpredictable way...
14986 	 *   The behavior of resets to individual blocks has not been tested.
14987 	 *
14988 	 * Broadcom noted the GRC reset will also reset all sub-components.
14989 	 */
14990 	if (to_device) {
14991 		test_desc.cqid_sqid = (13 << 8) | 2;
14992 
14993 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14994 		udelay(40);
14995 	} else {
14996 		test_desc.cqid_sqid = (16 << 8) | 7;
14997 
14998 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14999 		udelay(40);
15000 	}
15001 	test_desc.flags = 0x00000005;
15002 
15003 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15004 		u32 val;
15005 
15006 		val = *(((u32 *)&test_desc) + i);
15007 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15008 				       sram_dma_descs + (i * sizeof(u32)));
15009 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15010 	}
15011 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15012 
15013 	if (to_device)
15014 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15015 	else
15016 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15017 
15018 	ret = -ENODEV;
15019 	for (i = 0; i < 40; i++) {
15020 		u32 val;
15021 
15022 		if (to_device)
15023 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15024 		else
15025 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15026 		if ((val & 0xffff) == sram_dma_descs) {
15027 			ret = 0;
15028 			break;
15029 		}
15030 
15031 		udelay(100);
15032 	}
15033 
15034 	return ret;
15035 }
15036 
15037 #define TEST_BUFFER_SIZE	0x2000
15038 
15039 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15040 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15041 	{ },
15042 };
15043 
15044 static int __devinit tg3_test_dma(struct tg3 *tp)
15045 {
15046 	dma_addr_t buf_dma;
15047 	u32 *buf, saved_dma_rwctrl;
15048 	int ret = 0;
15049 
15050 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15051 				 &buf_dma, GFP_KERNEL);
15052 	if (!buf) {
15053 		ret = -ENOMEM;
15054 		goto out_nofree;
15055 	}
15056 
15057 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15058 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15059 
15060 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15061 
15062 	if (tg3_flag(tp, 57765_PLUS))
15063 		goto out;
15064 
15065 	if (tg3_flag(tp, PCI_EXPRESS)) {
15066 		/* DMA read watermark not used on PCIE */
15067 		tp->dma_rwctrl |= 0x00180000;
15068 	} else if (!tg3_flag(tp, PCIX_MODE)) {
15069 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15070 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15071 			tp->dma_rwctrl |= 0x003f0000;
15072 		else
15073 			tp->dma_rwctrl |= 0x003f000f;
15074 	} else {
15075 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15076 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15077 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15078 			u32 read_water = 0x7;
15079 
15080 			/* If the 5704 is behind the EPB bridge, we can
15081 			 * do the less restrictive ONE_DMA workaround for
15082 			 * better performance.
15083 			 */
15084 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15085 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15086 				tp->dma_rwctrl |= 0x8000;
15087 			else if (ccval == 0x6 || ccval == 0x7)
15088 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15089 
15090 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15091 				read_water = 4;
15092 			/* Set bit 23 to enable PCIX hw bug fix */
15093 			tp->dma_rwctrl |=
15094 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15095 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15096 				(1 << 23);
15097 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15098 			/* 5780 always in PCIX mode */
15099 			tp->dma_rwctrl |= 0x00144000;
15100 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15101 			/* 5714 always in PCIX mode */
15102 			tp->dma_rwctrl |= 0x00148000;
15103 		} else {
15104 			tp->dma_rwctrl |= 0x001b000f;
15105 		}
15106 	}
15107 
15108 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15109 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15110 		tp->dma_rwctrl &= 0xfffffff0;
15111 
15112 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15113 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15114 		/* Remove this if it causes problems for some boards. */
15115 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15116 
15117 		/* On 5700/5701 chips, we need to set this bit.
15118 		 * Otherwise the chip will issue cacheline transactions
15119 		 * to streamable DMA memory with not all the byte
15120 		 * enables turned on.  This is an error on several
15121 		 * RISC PCI controllers, in particular sparc64.
15122 		 *
15123 		 * On 5703/5704 chips, this bit has been reassigned
15124 		 * a different meaning.  In particular, it is used
15125 		 * on those chips to enable a PCI-X workaround.
15126 		 */
15127 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15128 	}
15129 
15130 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15131 
15132 #if 0
15133 	/* Unneeded, already done by tg3_get_invariants.  */
15134 	tg3_switch_clocks(tp);
15135 #endif
15136 
15137 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15138 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15139 		goto out;
15140 
15141 	/* It is best to perform DMA test with maximum write burst size
15142 	 * to expose the 5700/5701 write DMA bug.
15143 	 */
15144 	saved_dma_rwctrl = tp->dma_rwctrl;
15145 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15146 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15147 
15148 	while (1) {
15149 		u32 *p = buf, i;
15150 
15151 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15152 			p[i] = i;
15153 
15154 		/* Send the buffer to the chip. */
15155 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15156 		if (ret) {
15157 			dev_err(&tp->pdev->dev,
15158 				"%s: Buffer write failed. err = %d\n",
15159 				__func__, ret);
15160 			break;
15161 		}
15162 
15163 #if 0
15164 		/* validate data reached card RAM correctly. */
15165 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15166 			u32 val;
15167 			tg3_read_mem(tp, 0x2100 + (i*4), &val);
15168 			if (le32_to_cpu(val) != p[i]) {
15169 				dev_err(&tp->pdev->dev,
15170 					"%s: Buffer corrupted on device! "
15171 					"(%d != %d)\n", __func__, val, i);
15172 				/* ret = -ENODEV here? */
15173 			}
15174 			p[i] = 0;
15175 		}
15176 #endif
15177 		/* Now read it back. */
15178 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15179 		if (ret) {
15180 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15181 				"err = %d\n", __func__, ret);
15182 			break;
15183 		}
15184 
15185 		/* Verify it. */
15186 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15187 			if (p[i] == i)
15188 				continue;
15189 
15190 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15191 			    DMA_RWCTRL_WRITE_BNDRY_16) {
15192 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15193 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15194 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15195 				break;
15196 			} else {
15197 				dev_err(&tp->pdev->dev,
15198 					"%s: Buffer corrupted on read back! "
15199 					"(%d != %d)\n", __func__, p[i], i);
15200 				ret = -ENODEV;
15201 				goto out;
15202 			}
15203 		}
15204 
15205 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15206 			/* Success. */
15207 			ret = 0;
15208 			break;
15209 		}
15210 	}
15211 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15212 	    DMA_RWCTRL_WRITE_BNDRY_16) {
15213 		/* DMA test passed without adjusting DMA boundary,
15214 		 * now look for chipsets that are known to expose the
15215 		 * DMA bug without failing the test.
15216 		 */
15217 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15218 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15219 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15220 		} else {
15221 			/* Safe to use the calculated DMA boundary. */
15222 			tp->dma_rwctrl = saved_dma_rwctrl;
15223 		}
15224 
15225 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15226 	}
15227 
15228 out:
15229 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15230 out_nofree:
15231 	return ret;
15232 }
15233 
15234 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15235 {
15236 	if (tg3_flag(tp, 57765_PLUS)) {
15237 		tp->bufmgr_config.mbuf_read_dma_low_water =
15238 			DEFAULT_MB_RDMA_LOW_WATER_5705;
15239 		tp->bufmgr_config.mbuf_mac_rx_low_water =
15240 			DEFAULT_MB_MACRX_LOW_WATER_57765;
15241 		tp->bufmgr_config.mbuf_high_water =
15242 			DEFAULT_MB_HIGH_WATER_57765;
15243 
15244 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15245 			DEFAULT_MB_RDMA_LOW_WATER_5705;
15246 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15247 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15248 		tp->bufmgr_config.mbuf_high_water_jumbo =
15249 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15250 	} else if (tg3_flag(tp, 5705_PLUS)) {
15251 		tp->bufmgr_config.mbuf_read_dma_low_water =
15252 			DEFAULT_MB_RDMA_LOW_WATER_5705;
15253 		tp->bufmgr_config.mbuf_mac_rx_low_water =
15254 			DEFAULT_MB_MACRX_LOW_WATER_5705;
15255 		tp->bufmgr_config.mbuf_high_water =
15256 			DEFAULT_MB_HIGH_WATER_5705;
15257 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15258 			tp->bufmgr_config.mbuf_mac_rx_low_water =
15259 				DEFAULT_MB_MACRX_LOW_WATER_5906;
15260 			tp->bufmgr_config.mbuf_high_water =
15261 				DEFAULT_MB_HIGH_WATER_5906;
15262 		}
15263 
15264 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15265 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15266 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15267 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15268 		tp->bufmgr_config.mbuf_high_water_jumbo =
15269 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15270 	} else {
15271 		tp->bufmgr_config.mbuf_read_dma_low_water =
15272 			DEFAULT_MB_RDMA_LOW_WATER;
15273 		tp->bufmgr_config.mbuf_mac_rx_low_water =
15274 			DEFAULT_MB_MACRX_LOW_WATER;
15275 		tp->bufmgr_config.mbuf_high_water =
15276 			DEFAULT_MB_HIGH_WATER;
15277 
15278 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15279 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15280 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15281 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15282 		tp->bufmgr_config.mbuf_high_water_jumbo =
15283 			DEFAULT_MB_HIGH_WATER_JUMBO;
15284 	}
15285 
15286 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15287 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15288 }
15289 
15290 static char * __devinit tg3_phy_string(struct tg3 *tp)
15291 {
15292 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
15293 	case TG3_PHY_ID_BCM5400:	return "5400";
15294 	case TG3_PHY_ID_BCM5401:	return "5401";
15295 	case TG3_PHY_ID_BCM5411:	return "5411";
15296 	case TG3_PHY_ID_BCM5701:	return "5701";
15297 	case TG3_PHY_ID_BCM5703:	return "5703";
15298 	case TG3_PHY_ID_BCM5704:	return "5704";
15299 	case TG3_PHY_ID_BCM5705:	return "5705";
15300 	case TG3_PHY_ID_BCM5750:	return "5750";
15301 	case TG3_PHY_ID_BCM5752:	return "5752";
15302 	case TG3_PHY_ID_BCM5714:	return "5714";
15303 	case TG3_PHY_ID_BCM5780:	return "5780";
15304 	case TG3_PHY_ID_BCM5755:	return "5755";
15305 	case TG3_PHY_ID_BCM5787:	return "5787";
15306 	case TG3_PHY_ID_BCM5784:	return "5784";
15307 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
15308 	case TG3_PHY_ID_BCM5906:	return "5906";
15309 	case TG3_PHY_ID_BCM5761:	return "5761";
15310 	case TG3_PHY_ID_BCM5718C:	return "5718C";
15311 	case TG3_PHY_ID_BCM5718S:	return "5718S";
15312 	case TG3_PHY_ID_BCM57765:	return "57765";
15313 	case TG3_PHY_ID_BCM5719C:	return "5719C";
15314 	case TG3_PHY_ID_BCM5720C:	return "5720C";
15315 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
15316 	case 0:			return "serdes";
15317 	default:		return "unknown";
15318 	}
15319 }
15320 
15321 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15322 {
15323 	if (tg3_flag(tp, PCI_EXPRESS)) {
15324 		strcpy(str, "PCI Express");
15325 		return str;
15326 	} else if (tg3_flag(tp, PCIX_MODE)) {
15327 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15328 
15329 		strcpy(str, "PCIX:");
15330 
15331 		if ((clock_ctrl == 7) ||
15332 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15333 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15334 			strcat(str, "133MHz");
15335 		else if (clock_ctrl == 0)
15336 			strcat(str, "33MHz");
15337 		else if (clock_ctrl == 2)
15338 			strcat(str, "50MHz");
15339 		else if (clock_ctrl == 4)
15340 			strcat(str, "66MHz");
15341 		else if (clock_ctrl == 6)
15342 			strcat(str, "100MHz");
15343 	} else {
15344 		strcpy(str, "PCI:");
15345 		if (tg3_flag(tp, PCI_HIGH_SPEED))
15346 			strcat(str, "66MHz");
15347 		else
15348 			strcat(str, "33MHz");
15349 	}
15350 	if (tg3_flag(tp, PCI_32BIT))
15351 		strcat(str, ":32-bit");
15352 	else
15353 		strcat(str, ":64-bit");
15354 	return str;
15355 }
15356 
15357 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15358 {
15359 	struct pci_dev *peer;
15360 	unsigned int func, devnr = tp->pdev->devfn & ~7;
15361 
15362 	for (func = 0; func < 8; func++) {
15363 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
15364 		if (peer && peer != tp->pdev)
15365 			break;
15366 		pci_dev_put(peer);
15367 	}
15368 	/* 5704 can be configured in single-port mode, set peer to
15369 	 * tp->pdev in that case.
15370 	 */
15371 	if (!peer) {
15372 		peer = tp->pdev;
15373 		return peer;
15374 	}
15375 
15376 	/*
15377 	 * We don't need to keep the refcount elevated; there's no way
15378 	 * to remove one half of this device without removing the other
15379 	 */
15380 	pci_dev_put(peer);
15381 
15382 	return peer;
15383 }
15384 
15385 static void __devinit tg3_init_coal(struct tg3 *tp)
15386 {
15387 	struct ethtool_coalesce *ec = &tp->coal;
15388 
15389 	memset(ec, 0, sizeof(*ec));
15390 	ec->cmd = ETHTOOL_GCOALESCE;
15391 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15392 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15393 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15394 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15395 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15396 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15397 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15398 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15399 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15400 
15401 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15402 				 HOSTCC_MODE_CLRTICK_TXBD)) {
15403 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15404 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15405 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15406 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15407 	}
15408 
15409 	if (tg3_flag(tp, 5705_PLUS)) {
15410 		ec->rx_coalesce_usecs_irq = 0;
15411 		ec->tx_coalesce_usecs_irq = 0;
15412 		ec->stats_block_coalesce_usecs = 0;
15413 	}
15414 }
15415 
15416 static const struct net_device_ops tg3_netdev_ops = {
15417 	.ndo_open		= tg3_open,
15418 	.ndo_stop		= tg3_close,
15419 	.ndo_start_xmit		= tg3_start_xmit,
15420 	.ndo_get_stats64	= tg3_get_stats64,
15421 	.ndo_validate_addr	= eth_validate_addr,
15422 	.ndo_set_rx_mode	= tg3_set_rx_mode,
15423 	.ndo_set_mac_address	= tg3_set_mac_addr,
15424 	.ndo_do_ioctl		= tg3_ioctl,
15425 	.ndo_tx_timeout		= tg3_tx_timeout,
15426 	.ndo_change_mtu		= tg3_change_mtu,
15427 	.ndo_fix_features	= tg3_fix_features,
15428 	.ndo_set_features	= tg3_set_features,
15429 #ifdef CONFIG_NET_POLL_CONTROLLER
15430 	.ndo_poll_controller	= tg3_poll_controller,
15431 #endif
15432 };
15433 
15434 static int __devinit tg3_init_one(struct pci_dev *pdev,
15435 				  const struct pci_device_id *ent)
15436 {
15437 	struct net_device *dev;
15438 	struct tg3 *tp;
15439 	int i, err, pm_cap;
15440 	u32 sndmbx, rcvmbx, intmbx;
15441 	char str[40];
15442 	u64 dma_mask, persist_dma_mask;
15443 	netdev_features_t features = 0;
15444 
15445 	printk_once(KERN_INFO "%s\n", version);
15446 
15447 	err = pci_enable_device(pdev);
15448 	if (err) {
15449 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15450 		return err;
15451 	}
15452 
15453 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
15454 	if (err) {
15455 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15456 		goto err_out_disable_pdev;
15457 	}
15458 
15459 	pci_set_master(pdev);
15460 
15461 	/* Find power-management capability. */
15462 	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15463 	if (pm_cap == 0) {
15464 		dev_err(&pdev->dev,
15465 			"Cannot find Power Management capability, aborting\n");
15466 		err = -EIO;
15467 		goto err_out_free_res;
15468 	}
15469 
15470 	err = pci_set_power_state(pdev, PCI_D0);
15471 	if (err) {
15472 		dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15473 		goto err_out_free_res;
15474 	}
15475 
15476 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15477 	if (!dev) {
15478 		dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15479 		err = -ENOMEM;
15480 		goto err_out_power_down;
15481 	}
15482 
15483 	SET_NETDEV_DEV(dev, &pdev->dev);
15484 
15485 	tp = netdev_priv(dev);
15486 	tp->pdev = pdev;
15487 	tp->dev = dev;
15488 	tp->pm_cap = pm_cap;
15489 	tp->rx_mode = TG3_DEF_RX_MODE;
15490 	tp->tx_mode = TG3_DEF_TX_MODE;
15491 
15492 	if (tg3_debug > 0)
15493 		tp->msg_enable = tg3_debug;
15494 	else
15495 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
15496 
15497 	/* The word/byte swap controls here control register access byte
15498 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15499 	 * setting below.
15500 	 */
15501 	tp->misc_host_ctrl =
15502 		MISC_HOST_CTRL_MASK_PCI_INT |
15503 		MISC_HOST_CTRL_WORD_SWAP |
15504 		MISC_HOST_CTRL_INDIR_ACCESS |
15505 		MISC_HOST_CTRL_PCISTATE_RW;
15506 
15507 	/* The NONFRM (non-frame) byte/word swap controls take effect
15508 	 * on descriptor entries, anything which isn't packet data.
15509 	 *
15510 	 * The StrongARM chips on the board (one for tx, one for rx)
15511 	 * are running in big-endian mode.
15512 	 */
15513 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15514 			GRC_MODE_WSWAP_NONFRM_DATA);
15515 #ifdef __BIG_ENDIAN
15516 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15517 #endif
15518 	spin_lock_init(&tp->lock);
15519 	spin_lock_init(&tp->indirect_lock);
15520 	INIT_WORK(&tp->reset_task, tg3_reset_task);
15521 
15522 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
15523 	if (!tp->regs) {
15524 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15525 		err = -ENOMEM;
15526 		goto err_out_free_dev;
15527 	}
15528 
15529 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15530 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15531 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15532 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15533 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15534 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15535 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15536 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15537 		tg3_flag_set(tp, ENABLE_APE);
15538 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15539 		if (!tp->aperegs) {
15540 			dev_err(&pdev->dev,
15541 				"Cannot map APE registers, aborting\n");
15542 			err = -ENOMEM;
15543 			goto err_out_iounmap;
15544 		}
15545 	}
15546 
15547 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15548 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15549 
15550 	dev->ethtool_ops = &tg3_ethtool_ops;
15551 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
15552 	dev->netdev_ops = &tg3_netdev_ops;
15553 	dev->irq = pdev->irq;
15554 
15555 	err = tg3_get_invariants(tp);
15556 	if (err) {
15557 		dev_err(&pdev->dev,
15558 			"Problem fetching invariants of chip, aborting\n");
15559 		goto err_out_apeunmap;
15560 	}
15561 
15562 	/* The EPB bridge inside 5714, 5715, and 5780 and any
15563 	 * device behind the EPB cannot support DMA addresses > 40-bit.
15564 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15565 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15566 	 * do DMA address check in tg3_start_xmit().
15567 	 */
15568 	if (tg3_flag(tp, IS_5788))
15569 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15570 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15571 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15572 #ifdef CONFIG_HIGHMEM
15573 		dma_mask = DMA_BIT_MASK(64);
15574 #endif
15575 	} else
15576 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15577 
15578 	/* Configure DMA attributes. */
15579 	if (dma_mask > DMA_BIT_MASK(32)) {
15580 		err = pci_set_dma_mask(pdev, dma_mask);
15581 		if (!err) {
15582 			features |= NETIF_F_HIGHDMA;
15583 			err = pci_set_consistent_dma_mask(pdev,
15584 							  persist_dma_mask);
15585 			if (err < 0) {
15586 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
15587 					"DMA for consistent allocations\n");
15588 				goto err_out_apeunmap;
15589 			}
15590 		}
15591 	}
15592 	if (err || dma_mask == DMA_BIT_MASK(32)) {
15593 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15594 		if (err) {
15595 			dev_err(&pdev->dev,
15596 				"No usable DMA configuration, aborting\n");
15597 			goto err_out_apeunmap;
15598 		}
15599 	}
15600 
15601 	tg3_init_bufmgr_config(tp);
15602 
15603 	features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15604 
15605 	/* 5700 B0 chips do not support checksumming correctly due
15606 	 * to hardware bugs.
15607 	 */
15608 	if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15609 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15610 
15611 		if (tg3_flag(tp, 5755_PLUS))
15612 			features |= NETIF_F_IPV6_CSUM;
15613 	}
15614 
15615 	/* TSO is on by default on chips that support hardware TSO.
15616 	 * Firmware TSO on older chips gives lower performance, so it
15617 	 * is off by default, but can be enabled using ethtool.
15618 	 */
15619 	if ((tg3_flag(tp, HW_TSO_1) ||
15620 	     tg3_flag(tp, HW_TSO_2) ||
15621 	     tg3_flag(tp, HW_TSO_3)) &&
15622 	    (features & NETIF_F_IP_CSUM))
15623 		features |= NETIF_F_TSO;
15624 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15625 		if (features & NETIF_F_IPV6_CSUM)
15626 			features |= NETIF_F_TSO6;
15627 		if (tg3_flag(tp, HW_TSO_3) ||
15628 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15629 		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15630 		     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15631 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15632 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15633 			features |= NETIF_F_TSO_ECN;
15634 	}
15635 
15636 	dev->features |= features;
15637 	dev->vlan_features |= features;
15638 
15639 	/*
15640 	 * Add loopback capability only for a subset of devices that support
15641 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15642 	 * loopback for the remaining devices.
15643 	 */
15644 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15645 	    !tg3_flag(tp, CPMU_PRESENT))
15646 		/* Add the loopback capability */
15647 		features |= NETIF_F_LOOPBACK;
15648 
15649 	dev->hw_features |= features;
15650 
15651 	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15652 	    !tg3_flag(tp, TSO_CAPABLE) &&
15653 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15654 		tg3_flag_set(tp, MAX_RXPEND_64);
15655 		tp->rx_pending = 63;
15656 	}
15657 
15658 	err = tg3_get_device_address(tp);
15659 	if (err) {
15660 		dev_err(&pdev->dev,
15661 			"Could not obtain valid ethernet address, aborting\n");
15662 		goto err_out_apeunmap;
15663 	}
15664 
15665 	/*
15666 	 * Reset chip in case UNDI or EFI driver did not shutdown
15667 	 * DMA self test will enable WDMAC and we'll see (spurious)
15668 	 * pending DMA on the PCI bus at that point.
15669 	 */
15670 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15671 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15672 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15673 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15674 	}
15675 
15676 	err = tg3_test_dma(tp);
15677 	if (err) {
15678 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15679 		goto err_out_apeunmap;
15680 	}
15681 
15682 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15683 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15684 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15685 	for (i = 0; i < tp->irq_max; i++) {
15686 		struct tg3_napi *tnapi = &tp->napi[i];
15687 
15688 		tnapi->tp = tp;
15689 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15690 
15691 		tnapi->int_mbox = intmbx;
15692 		if (i <= 4)
15693 			intmbx += 0x8;
15694 		else
15695 			intmbx += 0x4;
15696 
15697 		tnapi->consmbox = rcvmbx;
15698 		tnapi->prodmbox = sndmbx;
15699 
15700 		if (i)
15701 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15702 		else
15703 			tnapi->coal_now = HOSTCC_MODE_NOW;
15704 
15705 		if (!tg3_flag(tp, SUPPORT_MSIX))
15706 			break;
15707 
15708 		/*
15709 		 * If we support MSIX, we'll be using RSS.  If we're using
15710 		 * RSS, the first vector only handles link interrupts and the
15711 		 * remaining vectors handle rx and tx interrupts.  Reuse the
15712 		 * mailbox values for the next iteration.  The values we setup
15713 		 * above are still useful for the single vectored mode.
15714 		 */
15715 		if (!i)
15716 			continue;
15717 
15718 		rcvmbx += 0x8;
15719 
15720 		if (sndmbx & 0x4)
15721 			sndmbx -= 0x4;
15722 		else
15723 			sndmbx += 0xc;
15724 	}
15725 
15726 	tg3_init_coal(tp);
15727 
15728 	pci_set_drvdata(pdev, dev);
15729 
15730 	if (tg3_flag(tp, 5717_PLUS)) {
15731 		/* Resume a low-power mode */
15732 		tg3_frob_aux_power(tp, false);
15733 	}
15734 
15735 	err = register_netdev(dev);
15736 	if (err) {
15737 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15738 		goto err_out_apeunmap;
15739 	}
15740 
15741 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15742 		    tp->board_part_number,
15743 		    tp->pci_chip_rev_id,
15744 		    tg3_bus_string(tp, str),
15745 		    dev->dev_addr);
15746 
15747 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15748 		struct phy_device *phydev;
15749 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15750 		netdev_info(dev,
15751 			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15752 			    phydev->drv->name, dev_name(&phydev->dev));
15753 	} else {
15754 		char *ethtype;
15755 
15756 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15757 			ethtype = "10/100Base-TX";
15758 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15759 			ethtype = "1000Base-SX";
15760 		else
15761 			ethtype = "10/100/1000Base-T";
15762 
15763 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15764 			    "(WireSpeed[%d], EEE[%d])\n",
15765 			    tg3_phy_string(tp), ethtype,
15766 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15767 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15768 	}
15769 
15770 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15771 		    (dev->features & NETIF_F_RXCSUM) != 0,
15772 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
15773 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15774 		    tg3_flag(tp, ENABLE_ASF) != 0,
15775 		    tg3_flag(tp, TSO_CAPABLE) != 0);
15776 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15777 		    tp->dma_rwctrl,
15778 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15779 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15780 
15781 	pci_save_state(pdev);
15782 
15783 	return 0;
15784 
15785 err_out_apeunmap:
15786 	if (tp->aperegs) {
15787 		iounmap(tp->aperegs);
15788 		tp->aperegs = NULL;
15789 	}
15790 
15791 err_out_iounmap:
15792 	if (tp->regs) {
15793 		iounmap(tp->regs);
15794 		tp->regs = NULL;
15795 	}
15796 
15797 err_out_free_dev:
15798 	free_netdev(dev);
15799 
15800 err_out_power_down:
15801 	pci_set_power_state(pdev, PCI_D3hot);
15802 
15803 err_out_free_res:
15804 	pci_release_regions(pdev);
15805 
15806 err_out_disable_pdev:
15807 	pci_disable_device(pdev);
15808 	pci_set_drvdata(pdev, NULL);
15809 	return err;
15810 }
15811 
15812 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15813 {
15814 	struct net_device *dev = pci_get_drvdata(pdev);
15815 
15816 	if (dev) {
15817 		struct tg3 *tp = netdev_priv(dev);
15818 
15819 		if (tp->fw)
15820 			release_firmware(tp->fw);
15821 
15822 		tg3_reset_task_cancel(tp);
15823 
15824 		if (tg3_flag(tp, USE_PHYLIB)) {
15825 			tg3_phy_fini(tp);
15826 			tg3_mdio_fini(tp);
15827 		}
15828 
15829 		unregister_netdev(dev);
15830 		if (tp->aperegs) {
15831 			iounmap(tp->aperegs);
15832 			tp->aperegs = NULL;
15833 		}
15834 		if (tp->regs) {
15835 			iounmap(tp->regs);
15836 			tp->regs = NULL;
15837 		}
15838 		free_netdev(dev);
15839 		pci_release_regions(pdev);
15840 		pci_disable_device(pdev);
15841 		pci_set_drvdata(pdev, NULL);
15842 	}
15843 }
15844 
15845 #ifdef CONFIG_PM_SLEEP
15846 static int tg3_suspend(struct device *device)
15847 {
15848 	struct pci_dev *pdev = to_pci_dev(device);
15849 	struct net_device *dev = pci_get_drvdata(pdev);
15850 	struct tg3 *tp = netdev_priv(dev);
15851 	int err;
15852 
15853 	if (!netif_running(dev))
15854 		return 0;
15855 
15856 	tg3_reset_task_cancel(tp);
15857 	tg3_phy_stop(tp);
15858 	tg3_netif_stop(tp);
15859 
15860 	del_timer_sync(&tp->timer);
15861 
15862 	tg3_full_lock(tp, 1);
15863 	tg3_disable_ints(tp);
15864 	tg3_full_unlock(tp);
15865 
15866 	netif_device_detach(dev);
15867 
15868 	tg3_full_lock(tp, 0);
15869 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15870 	tg3_flag_clear(tp, INIT_COMPLETE);
15871 	tg3_full_unlock(tp);
15872 
15873 	err = tg3_power_down_prepare(tp);
15874 	if (err) {
15875 		int err2;
15876 
15877 		tg3_full_lock(tp, 0);
15878 
15879 		tg3_flag_set(tp, INIT_COMPLETE);
15880 		err2 = tg3_restart_hw(tp, 1);
15881 		if (err2)
15882 			goto out;
15883 
15884 		tp->timer.expires = jiffies + tp->timer_offset;
15885 		add_timer(&tp->timer);
15886 
15887 		netif_device_attach(dev);
15888 		tg3_netif_start(tp);
15889 
15890 out:
15891 		tg3_full_unlock(tp);
15892 
15893 		if (!err2)
15894 			tg3_phy_start(tp);
15895 	}
15896 
15897 	return err;
15898 }
15899 
15900 static int tg3_resume(struct device *device)
15901 {
15902 	struct pci_dev *pdev = to_pci_dev(device);
15903 	struct net_device *dev = pci_get_drvdata(pdev);
15904 	struct tg3 *tp = netdev_priv(dev);
15905 	int err;
15906 
15907 	if (!netif_running(dev))
15908 		return 0;
15909 
15910 	netif_device_attach(dev);
15911 
15912 	tg3_full_lock(tp, 0);
15913 
15914 	tg3_flag_set(tp, INIT_COMPLETE);
15915 	err = tg3_restart_hw(tp, 1);
15916 	if (err)
15917 		goto out;
15918 
15919 	tp->timer.expires = jiffies + tp->timer_offset;
15920 	add_timer(&tp->timer);
15921 
15922 	tg3_netif_start(tp);
15923 
15924 out:
15925 	tg3_full_unlock(tp);
15926 
15927 	if (!err)
15928 		tg3_phy_start(tp);
15929 
15930 	return err;
15931 }
15932 
15933 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15934 #define TG3_PM_OPS (&tg3_pm_ops)
15935 
15936 #else
15937 
15938 #define TG3_PM_OPS NULL
15939 
15940 #endif /* CONFIG_PM_SLEEP */
15941 
15942 /**
15943  * tg3_io_error_detected - called when PCI error is detected
15944  * @pdev: Pointer to PCI device
15945  * @state: The current pci connection state
15946  *
15947  * This function is called after a PCI bus error affecting
15948  * this device has been detected.
15949  */
15950 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15951 					      pci_channel_state_t state)
15952 {
15953 	struct net_device *netdev = pci_get_drvdata(pdev);
15954 	struct tg3 *tp = netdev_priv(netdev);
15955 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15956 
15957 	netdev_info(netdev, "PCI I/O error detected\n");
15958 
15959 	rtnl_lock();
15960 
15961 	if (!netif_running(netdev))
15962 		goto done;
15963 
15964 	tg3_phy_stop(tp);
15965 
15966 	tg3_netif_stop(tp);
15967 
15968 	del_timer_sync(&tp->timer);
15969 
15970 	/* Want to make sure that the reset task doesn't run */
15971 	tg3_reset_task_cancel(tp);
15972 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15973 
15974 	netif_device_detach(netdev);
15975 
15976 	/* Clean up software state, even if MMIO is blocked */
15977 	tg3_full_lock(tp, 0);
15978 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15979 	tg3_full_unlock(tp);
15980 
15981 done:
15982 	if (state == pci_channel_io_perm_failure)
15983 		err = PCI_ERS_RESULT_DISCONNECT;
15984 	else
15985 		pci_disable_device(pdev);
15986 
15987 	rtnl_unlock();
15988 
15989 	return err;
15990 }
15991 
15992 /**
15993  * tg3_io_slot_reset - called after the pci bus has been reset.
15994  * @pdev: Pointer to PCI device
15995  *
15996  * Restart the card from scratch, as if from a cold-boot.
15997  * At this point, the card has exprienced a hard reset,
15998  * followed by fixups by BIOS, and has its config space
15999  * set up identically to what it was at cold boot.
16000  */
16001 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16002 {
16003 	struct net_device *netdev = pci_get_drvdata(pdev);
16004 	struct tg3 *tp = netdev_priv(netdev);
16005 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16006 	int err;
16007 
16008 	rtnl_lock();
16009 
16010 	if (pci_enable_device(pdev)) {
16011 		netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16012 		goto done;
16013 	}
16014 
16015 	pci_set_master(pdev);
16016 	pci_restore_state(pdev);
16017 	pci_save_state(pdev);
16018 
16019 	if (!netif_running(netdev)) {
16020 		rc = PCI_ERS_RESULT_RECOVERED;
16021 		goto done;
16022 	}
16023 
16024 	err = tg3_power_up(tp);
16025 	if (err)
16026 		goto done;
16027 
16028 	rc = PCI_ERS_RESULT_RECOVERED;
16029 
16030 done:
16031 	rtnl_unlock();
16032 
16033 	return rc;
16034 }
16035 
16036 /**
16037  * tg3_io_resume - called when traffic can start flowing again.
16038  * @pdev: Pointer to PCI device
16039  *
16040  * This callback is called when the error recovery driver tells
16041  * us that its OK to resume normal operation.
16042  */
16043 static void tg3_io_resume(struct pci_dev *pdev)
16044 {
16045 	struct net_device *netdev = pci_get_drvdata(pdev);
16046 	struct tg3 *tp = netdev_priv(netdev);
16047 	int err;
16048 
16049 	rtnl_lock();
16050 
16051 	if (!netif_running(netdev))
16052 		goto done;
16053 
16054 	tg3_full_lock(tp, 0);
16055 	tg3_flag_set(tp, INIT_COMPLETE);
16056 	err = tg3_restart_hw(tp, 1);
16057 	tg3_full_unlock(tp);
16058 	if (err) {
16059 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
16060 		goto done;
16061 	}
16062 
16063 	netif_device_attach(netdev);
16064 
16065 	tp->timer.expires = jiffies + tp->timer_offset;
16066 	add_timer(&tp->timer);
16067 
16068 	tg3_netif_start(tp);
16069 
16070 	tg3_phy_start(tp);
16071 
16072 done:
16073 	rtnl_unlock();
16074 }
16075 
16076 static struct pci_error_handlers tg3_err_handler = {
16077 	.error_detected	= tg3_io_error_detected,
16078 	.slot_reset	= tg3_io_slot_reset,
16079 	.resume		= tg3_io_resume
16080 };
16081 
16082 static struct pci_driver tg3_driver = {
16083 	.name		= DRV_MODULE_NAME,
16084 	.id_table	= tg3_pci_tbl,
16085 	.probe		= tg3_init_one,
16086 	.remove		= __devexit_p(tg3_remove_one),
16087 	.err_handler	= &tg3_err_handler,
16088 	.driver.pm	= TG3_PM_OPS,
16089 };
16090 
16091 static int __init tg3_init(void)
16092 {
16093 	return pci_register_driver(&tg3_driver);
16094 }
16095 
16096 static void __exit tg3_cleanup(void)
16097 {
16098 	pci_unregister_driver(&tg3_driver);
16099 }
16100 
16101 module_init(tg3_init);
16102 module_exit(tg3_cleanup);
16103