xref: /linux/drivers/net/ethernet/broadcom/tg3.c (revision 079c9534a96da9a85a2a2f9715851050fbfbf749)
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *	Derived from proprietary unpublished source code,
11  *	Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *	Permission is hereby granted for the distribution of this firmware
14  *	data in hexadecimal or equivalent format, provided this copyright
15  *	notice is accompanying it.
16  */
17 
18 
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 
48 #include <net/checksum.h>
49 #include <net/ip.h>
50 
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55 
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60 
61 #define BAR_0	0
62 #define BAR_2	2
63 
64 #include "tg3.h"
65 
66 /* Functions & macros to verify TG3_FLAGS types */
67 
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70 	return test_bit(flag, bits);
71 }
72 
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 	set_bit(flag, bits);
76 }
77 
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 	clear_bit(flag, bits);
81 }
82 
83 #define tg3_flag(tp, flag)				\
84 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)				\
86 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)			\
88 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89 
90 #define DRV_MODULE_NAME		"tg3"
91 #define TG3_MAJ_NUM			3
92 #define TG3_MIN_NUM			122
93 #define DRV_MODULE_VERSION	\
94 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE	"December 7, 2011"
96 
97 #define RESET_KIND_SHUTDOWN	0
98 #define RESET_KIND_INIT		1
99 #define RESET_KIND_SUSPEND	2
100 
101 #define TG3_DEF_RX_MODE		0
102 #define TG3_DEF_TX_MODE		0
103 #define TG3_DEF_MSG_ENABLE	  \
104 	(NETIF_MSG_DRV		| \
105 	 NETIF_MSG_PROBE	| \
106 	 NETIF_MSG_LINK		| \
107 	 NETIF_MSG_TIMER	| \
108 	 NETIF_MSG_IFDOWN	| \
109 	 NETIF_MSG_IFUP		| \
110 	 NETIF_MSG_RX_ERR	| \
111 	 NETIF_MSG_TX_ERR)
112 
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
114 
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118 
119 #define TG3_TX_TIMEOUT			(5 * HZ)
120 
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU			60
123 #define TG3_MAX_MTU(tp)	\
124 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125 
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING		200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
138 
139 /* Do not place this n-ring entries value into the tp struct itself,
140  * we really want to expose these constants to GCC so that modulo et
141  * al.  operations are done with shifts and masks instead of with
142  * hw multiply/modulo instructions.  Another solution would be to
143  * replace things like '% foo' with '& (foo - 1)'.
144  */
145 
146 #define TG3_TX_RING_SIZE		512
147 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
148 
149 #define TG3_RX_STD_RING_BYTES(tp) \
150 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
156 				 TG3_TX_RING_SIZE)
157 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
158 
159 #define TG3_DMA_BYTE_ENAB		64
160 
161 #define TG3_RX_STD_DMA_SZ		1536
162 #define TG3_RX_JMB_DMA_SZ		9046
163 
164 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
165 
166 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168 
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171 
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174 
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176  * that are at least dword aligned when used in PCIX mode.  The driver
177  * works around this bug by double copying the packet.  This workaround
178  * is built into the normal double copy length check for efficiency.
179  *
180  * However, the double copy is only necessary on those architectures
181  * where unaligned memory accesses are inefficient.  For those architectures
182  * where unaligned memory accesses incur little penalty, we can reintegrate
183  * the 5701 in the normal rx path.  Doing so saves a device structure
184  * dereference by hardcoding the double copy threshold in place.
185  */
186 #define TG3_RX_COPY_THRESHOLD		256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
189 #else
190 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
191 #endif
192 
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
195 #else
196 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
197 #endif
198 
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K		2048
202 #define TG3_TX_BD_DMA_MAX_4K		4096
203 
204 #define TG3_RAW_IP_ALIGN 2
205 
206 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
207 
208 #define FIRMWARE_TG3		"tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
211 
212 static char version[] __devinitdata =
213 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214 
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222 
223 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226 
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309 	{}
310 };
311 
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313 
314 static const struct {
315 	const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317 	{ "rx_octets" },
318 	{ "rx_fragments" },
319 	{ "rx_ucast_packets" },
320 	{ "rx_mcast_packets" },
321 	{ "rx_bcast_packets" },
322 	{ "rx_fcs_errors" },
323 	{ "rx_align_errors" },
324 	{ "rx_xon_pause_rcvd" },
325 	{ "rx_xoff_pause_rcvd" },
326 	{ "rx_mac_ctrl_rcvd" },
327 	{ "rx_xoff_entered" },
328 	{ "rx_frame_too_long_errors" },
329 	{ "rx_jabbers" },
330 	{ "rx_undersize_packets" },
331 	{ "rx_in_length_errors" },
332 	{ "rx_out_length_errors" },
333 	{ "rx_64_or_less_octet_packets" },
334 	{ "rx_65_to_127_octet_packets" },
335 	{ "rx_128_to_255_octet_packets" },
336 	{ "rx_256_to_511_octet_packets" },
337 	{ "rx_512_to_1023_octet_packets" },
338 	{ "rx_1024_to_1522_octet_packets" },
339 	{ "rx_1523_to_2047_octet_packets" },
340 	{ "rx_2048_to_4095_octet_packets" },
341 	{ "rx_4096_to_8191_octet_packets" },
342 	{ "rx_8192_to_9022_octet_packets" },
343 
344 	{ "tx_octets" },
345 	{ "tx_collisions" },
346 
347 	{ "tx_xon_sent" },
348 	{ "tx_xoff_sent" },
349 	{ "tx_flow_control" },
350 	{ "tx_mac_errors" },
351 	{ "tx_single_collisions" },
352 	{ "tx_mult_collisions" },
353 	{ "tx_deferred" },
354 	{ "tx_excessive_collisions" },
355 	{ "tx_late_collisions" },
356 	{ "tx_collide_2times" },
357 	{ "tx_collide_3times" },
358 	{ "tx_collide_4times" },
359 	{ "tx_collide_5times" },
360 	{ "tx_collide_6times" },
361 	{ "tx_collide_7times" },
362 	{ "tx_collide_8times" },
363 	{ "tx_collide_9times" },
364 	{ "tx_collide_10times" },
365 	{ "tx_collide_11times" },
366 	{ "tx_collide_12times" },
367 	{ "tx_collide_13times" },
368 	{ "tx_collide_14times" },
369 	{ "tx_collide_15times" },
370 	{ "tx_ucast_packets" },
371 	{ "tx_mcast_packets" },
372 	{ "tx_bcast_packets" },
373 	{ "tx_carrier_sense_errors" },
374 	{ "tx_discards" },
375 	{ "tx_errors" },
376 
377 	{ "dma_writeq_full" },
378 	{ "dma_write_prioq_full" },
379 	{ "rxbds_empty" },
380 	{ "rx_discards" },
381 	{ "rx_errors" },
382 	{ "rx_threshold_hit" },
383 
384 	{ "dma_readq_full" },
385 	{ "dma_read_prioq_full" },
386 	{ "tx_comp_queue_full" },
387 
388 	{ "ring_set_send_prod_index" },
389 	{ "ring_status_update" },
390 	{ "nic_irqs" },
391 	{ "nic_avoided_irqs" },
392 	{ "nic_tx_threshold_hit" },
393 
394 	{ "mbuf_lwm_thresh_hit" },
395 };
396 
397 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
398 
399 
400 static const struct {
401 	const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403 	{ "nvram test        (online) " },
404 	{ "link test         (online) " },
405 	{ "register test     (offline)" },
406 	{ "memory test       (offline)" },
407 	{ "mac loopback test (offline)" },
408 	{ "phy loopback test (offline)" },
409 	{ "ext loopback test (offline)" },
410 	{ "interrupt test    (offline)" },
411 };
412 
413 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
414 
415 
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418 	writel(val, tp->regs + off);
419 }
420 
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423 	return readl(tp->regs + off);
424 }
425 
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428 	writel(val, tp->aperegs + off);
429 }
430 
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433 	return readl(tp->aperegs + off);
434 }
435 
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438 	unsigned long flags;
439 
440 	spin_lock_irqsave(&tp->indirect_lock, flags);
441 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445 
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448 	writel(val, tp->regs + off);
449 	readl(tp->regs + off);
450 }
451 
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454 	unsigned long flags;
455 	u32 val;
456 
457 	spin_lock_irqsave(&tp->indirect_lock, flags);
458 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
461 	return val;
462 }
463 
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466 	unsigned long flags;
467 
468 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470 				       TG3_64BIT_REG_LOW, val);
471 		return;
472 	}
473 	if (off == TG3_RX_STD_PROD_IDX_REG) {
474 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475 				       TG3_64BIT_REG_LOW, val);
476 		return;
477 	}
478 
479 	spin_lock_irqsave(&tp->indirect_lock, flags);
480 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
483 
484 	/* In indirect mode when disabling interrupts, we also need
485 	 * to clear the interrupt bit in the GRC local ctrl register.
486 	 */
487 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488 	    (val == 0x1)) {
489 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491 	}
492 }
493 
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496 	unsigned long flags;
497 	u32 val;
498 
499 	spin_lock_irqsave(&tp->indirect_lock, flags);
500 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
503 	return val;
504 }
505 
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514 		/* Non-posted methods */
515 		tp->write32(tp, off, val);
516 	else {
517 		/* Posted method */
518 		tg3_write32(tp, off, val);
519 		if (usec_wait)
520 			udelay(usec_wait);
521 		tp->read32(tp, off);
522 	}
523 	/* Wait again after the read for the posted method to guarantee that
524 	 * the wait time is met.
525 	 */
526 	if (usec_wait)
527 		udelay(usec_wait);
528 }
529 
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532 	tp->write32_mbox(tp, off, val);
533 	if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534 		tp->read32_mbox(tp, off);
535 }
536 
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539 	void __iomem *mbox = tp->regs + off;
540 	writel(val, mbox);
541 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
542 		writel(val, mbox);
543 	if (tg3_flag(tp, MBOX_WRITE_REORDER))
544 		readl(mbox);
545 }
546 
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549 	return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551 
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554 	writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556 
557 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
562 
563 #define tw32(reg, val)			tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)			tp->read32(tp, reg)
567 
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570 	unsigned long flags;
571 
572 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574 		return;
575 
576 	spin_lock_irqsave(&tp->indirect_lock, flags);
577 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580 
581 		/* Always leave this as zero. */
582 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583 	} else {
584 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
586 
587 		/* Always leave this as zero. */
588 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589 	}
590 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592 
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595 	unsigned long flags;
596 
597 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599 		*val = 0;
600 		return;
601 	}
602 
603 	spin_lock_irqsave(&tp->indirect_lock, flags);
604 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607 
608 		/* Always leave this as zero. */
609 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610 	} else {
611 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612 		*val = tr32(TG3PCI_MEM_WIN_DATA);
613 
614 		/* Always leave this as zero. */
615 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616 	}
617 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619 
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622 	int i;
623 	u32 regbase, bit;
624 
625 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626 		regbase = TG3_APE_LOCK_GRANT;
627 	else
628 		regbase = TG3_APE_PER_LOCK_GRANT;
629 
630 	/* Make sure the driver hasn't any stale locks. */
631 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632 		switch (i) {
633 		case TG3_APE_LOCK_PHY0:
634 		case TG3_APE_LOCK_PHY1:
635 		case TG3_APE_LOCK_PHY2:
636 		case TG3_APE_LOCK_PHY3:
637 			bit = APE_LOCK_GRANT_DRIVER;
638 			break;
639 		default:
640 			if (!tp->pci_fn)
641 				bit = APE_LOCK_GRANT_DRIVER;
642 			else
643 				bit = 1 << tp->pci_fn;
644 		}
645 		tg3_ape_write32(tp, regbase + 4 * i, bit);
646 	}
647 
648 }
649 
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652 	int i, off;
653 	int ret = 0;
654 	u32 status, req, gnt, bit;
655 
656 	if (!tg3_flag(tp, ENABLE_APE))
657 		return 0;
658 
659 	switch (locknum) {
660 	case TG3_APE_LOCK_GPIO:
661 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662 			return 0;
663 	case TG3_APE_LOCK_GRC:
664 	case TG3_APE_LOCK_MEM:
665 		if (!tp->pci_fn)
666 			bit = APE_LOCK_REQ_DRIVER;
667 		else
668 			bit = 1 << tp->pci_fn;
669 		break;
670 	default:
671 		return -EINVAL;
672 	}
673 
674 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675 		req = TG3_APE_LOCK_REQ;
676 		gnt = TG3_APE_LOCK_GRANT;
677 	} else {
678 		req = TG3_APE_PER_LOCK_REQ;
679 		gnt = TG3_APE_PER_LOCK_GRANT;
680 	}
681 
682 	off = 4 * locknum;
683 
684 	tg3_ape_write32(tp, req + off, bit);
685 
686 	/* Wait for up to 1 millisecond to acquire lock. */
687 	for (i = 0; i < 100; i++) {
688 		status = tg3_ape_read32(tp, gnt + off);
689 		if (status == bit)
690 			break;
691 		udelay(10);
692 	}
693 
694 	if (status != bit) {
695 		/* Revoke the lock request. */
696 		tg3_ape_write32(tp, gnt + off, bit);
697 		ret = -EBUSY;
698 	}
699 
700 	return ret;
701 }
702 
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705 	u32 gnt, bit;
706 
707 	if (!tg3_flag(tp, ENABLE_APE))
708 		return;
709 
710 	switch (locknum) {
711 	case TG3_APE_LOCK_GPIO:
712 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713 			return;
714 	case TG3_APE_LOCK_GRC:
715 	case TG3_APE_LOCK_MEM:
716 		if (!tp->pci_fn)
717 			bit = APE_LOCK_GRANT_DRIVER;
718 		else
719 			bit = 1 << tp->pci_fn;
720 		break;
721 	default:
722 		return;
723 	}
724 
725 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726 		gnt = TG3_APE_LOCK_GRANT;
727 	else
728 		gnt = TG3_APE_PER_LOCK_GRANT;
729 
730 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732 
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735 	int i;
736 	u32 apedata;
737 
738 	/* NCSI does not support APE events */
739 	if (tg3_flag(tp, APE_HAS_NCSI))
740 		return;
741 
742 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743 	if (apedata != APE_SEG_SIG_MAGIC)
744 		return;
745 
746 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747 	if (!(apedata & APE_FW_STATUS_READY))
748 		return;
749 
750 	/* Wait for up to 1 millisecond for APE to service previous event. */
751 	for (i = 0; i < 10; i++) {
752 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753 			return;
754 
755 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756 
757 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758 			tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759 					event | APE_EVENT_STATUS_EVENT_PENDING);
760 
761 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762 
763 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764 			break;
765 
766 		udelay(100);
767 	}
768 
769 	if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772 
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775 	u32 event;
776 	u32 apedata;
777 
778 	if (!tg3_flag(tp, ENABLE_APE))
779 		return;
780 
781 	switch (kind) {
782 	case RESET_KIND_INIT:
783 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784 				APE_HOST_SEG_SIG_MAGIC);
785 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786 				APE_HOST_SEG_LEN_MAGIC);
787 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792 				APE_HOST_BEHAV_NO_PHYLOCK);
793 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794 				    TG3_APE_HOST_DRVR_STATE_START);
795 
796 		event = APE_EVENT_STATUS_STATE_START;
797 		break;
798 	case RESET_KIND_SHUTDOWN:
799 		/* With the interface we are currently using,
800 		 * APE does not track driver state.  Wiping
801 		 * out the HOST SEGMENT SIGNATURE forces
802 		 * the APE to assume OS absent status.
803 		 */
804 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805 
806 		if (device_may_wakeup(&tp->pdev->dev) &&
807 		    tg3_flag(tp, WOL_ENABLE)) {
808 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809 					    TG3_APE_HOST_WOL_SPEED_AUTO);
810 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811 		} else
812 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813 
814 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815 
816 		event = APE_EVENT_STATUS_STATE_UNLOAD;
817 		break;
818 	case RESET_KIND_SUSPEND:
819 		event = APE_EVENT_STATUS_STATE_SUSPEND;
820 		break;
821 	default:
822 		return;
823 	}
824 
825 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826 
827 	tg3_ape_send_event(tp, event);
828 }
829 
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832 	int i;
833 
834 	tw32(TG3PCI_MISC_HOST_CTRL,
835 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836 	for (i = 0; i < tp->irq_max; i++)
837 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839 
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842 	int i;
843 
844 	tp->irq_sync = 0;
845 	wmb();
846 
847 	tw32(TG3PCI_MISC_HOST_CTRL,
848 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849 
850 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851 	for (i = 0; i < tp->irq_cnt; i++) {
852 		struct tg3_napi *tnapi = &tp->napi[i];
853 
854 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855 		if (tg3_flag(tp, 1SHOT_MSI))
856 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857 
858 		tp->coal_now |= tnapi->coal_now;
859 	}
860 
861 	/* Force an initial interrupt */
862 	if (!tg3_flag(tp, TAGGED_STATUS) &&
863 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865 	else
866 		tw32(HOSTCC_MODE, tp->coal_now);
867 
868 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870 
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873 	struct tg3 *tp = tnapi->tp;
874 	struct tg3_hw_status *sblk = tnapi->hw_status;
875 	unsigned int work_exists = 0;
876 
877 	/* check for phy events */
878 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879 		if (sblk->status & SD_STATUS_LINK_CHG)
880 			work_exists = 1;
881 	}
882 	/* check for RX/TX work to do */
883 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885 		work_exists = 1;
886 
887 	return work_exists;
888 }
889 
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897 	struct tg3 *tp = tnapi->tp;
898 
899 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900 	mmiowb();
901 
902 	/* When doing tagged status, this work check is unnecessary.
903 	 * The last_tag we write above tells the chip which piece of
904 	 * work we've completed.
905 	 */
906 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907 		tw32(HOSTCC_MODE, tp->coalesce_mode |
908 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910 
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913 	u32 clock_ctrl;
914 	u32 orig_clock_ctrl;
915 
916 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917 		return;
918 
919 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920 
921 	orig_clock_ctrl = clock_ctrl;
922 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923 		       CLOCK_CTRL_CLKRUN_OENABLE |
924 		       0x1f);
925 	tp->pci_clock_ctrl = clock_ctrl;
926 
927 	if (tg3_flag(tp, 5705_PLUS)) {
928 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
930 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931 		}
932 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
934 			    clock_ctrl |
935 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936 			    40);
937 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
938 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
939 			    40);
940 	}
941 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943 
944 #define PHY_BUSY_LOOPS	5000
945 
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948 	u32 frame_val;
949 	unsigned int loops;
950 	int ret;
951 
952 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953 		tw32_f(MAC_MI_MODE,
954 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955 		udelay(80);
956 	}
957 
958 	*val = 0x0;
959 
960 	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961 		      MI_COM_PHY_ADDR_MASK);
962 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963 		      MI_COM_REG_ADDR_MASK);
964 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965 
966 	tw32_f(MAC_MI_COM, frame_val);
967 
968 	loops = PHY_BUSY_LOOPS;
969 	while (loops != 0) {
970 		udelay(10);
971 		frame_val = tr32(MAC_MI_COM);
972 
973 		if ((frame_val & MI_COM_BUSY) == 0) {
974 			udelay(5);
975 			frame_val = tr32(MAC_MI_COM);
976 			break;
977 		}
978 		loops -= 1;
979 	}
980 
981 	ret = -EBUSY;
982 	if (loops != 0) {
983 		*val = frame_val & MI_COM_DATA_MASK;
984 		ret = 0;
985 	}
986 
987 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988 		tw32_f(MAC_MI_MODE, tp->mi_mode);
989 		udelay(80);
990 	}
991 
992 	return ret;
993 }
994 
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997 	u32 frame_val;
998 	unsigned int loops;
999 	int ret;
1000 
1001 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003 		return 0;
1004 
1005 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006 		tw32_f(MAC_MI_MODE,
1007 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008 		udelay(80);
1009 	}
1010 
1011 	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012 		      MI_COM_PHY_ADDR_MASK);
1013 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014 		      MI_COM_REG_ADDR_MASK);
1015 	frame_val |= (val & MI_COM_DATA_MASK);
1016 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017 
1018 	tw32_f(MAC_MI_COM, frame_val);
1019 
1020 	loops = PHY_BUSY_LOOPS;
1021 	while (loops != 0) {
1022 		udelay(10);
1023 		frame_val = tr32(MAC_MI_COM);
1024 		if ((frame_val & MI_COM_BUSY) == 0) {
1025 			udelay(5);
1026 			frame_val = tr32(MAC_MI_COM);
1027 			break;
1028 		}
1029 		loops -= 1;
1030 	}
1031 
1032 	ret = -EBUSY;
1033 	if (loops != 0)
1034 		ret = 0;
1035 
1036 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1038 		udelay(80);
1039 	}
1040 
1041 	return ret;
1042 }
1043 
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046 	int err;
1047 
1048 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049 	if (err)
1050 		goto done;
1051 
1052 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053 	if (err)
1054 		goto done;
1055 
1056 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058 	if (err)
1059 		goto done;
1060 
1061 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062 
1063 done:
1064 	return err;
1065 }
1066 
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069 	int err;
1070 
1071 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072 	if (err)
1073 		goto done;
1074 
1075 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076 	if (err)
1077 		goto done;
1078 
1079 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081 	if (err)
1082 		goto done;
1083 
1084 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085 
1086 done:
1087 	return err;
1088 }
1089 
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092 	int err;
1093 
1094 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095 	if (!err)
1096 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097 
1098 	return err;
1099 }
1100 
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103 	int err;
1104 
1105 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106 	if (!err)
1107 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108 
1109 	return err;
1110 }
1111 
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114 	int err;
1115 
1116 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1119 	if (!err)
1120 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121 
1122 	return err;
1123 }
1124 
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128 		set |= MII_TG3_AUXCTL_MISC_WREN;
1129 
1130 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132 
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134 	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135 			     MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136 			     MII_TG3_AUXCTL_ACTL_TX_6DB)
1137 
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139 	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140 			     MII_TG3_AUXCTL_ACTL_TX_6DB);
1141 
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144 	u32 phy_control;
1145 	int limit, err;
1146 
1147 	/* OK, reset it, and poll the BMCR_RESET bit until it
1148 	 * clears or we time out.
1149 	 */
1150 	phy_control = BMCR_RESET;
1151 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1152 	if (err != 0)
1153 		return -EBUSY;
1154 
1155 	limit = 5000;
1156 	while (limit--) {
1157 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158 		if (err != 0)
1159 			return -EBUSY;
1160 
1161 		if ((phy_control & BMCR_RESET) == 0) {
1162 			udelay(40);
1163 			break;
1164 		}
1165 		udelay(10);
1166 	}
1167 	if (limit < 0)
1168 		return -EBUSY;
1169 
1170 	return 0;
1171 }
1172 
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175 	struct tg3 *tp = bp->priv;
1176 	u32 val;
1177 
1178 	spin_lock_bh(&tp->lock);
1179 
1180 	if (tg3_readphy(tp, reg, &val))
1181 		val = -EIO;
1182 
1183 	spin_unlock_bh(&tp->lock);
1184 
1185 	return val;
1186 }
1187 
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190 	struct tg3 *tp = bp->priv;
1191 	u32 ret = 0;
1192 
1193 	spin_lock_bh(&tp->lock);
1194 
1195 	if (tg3_writephy(tp, reg, val))
1196 		ret = -EIO;
1197 
1198 	spin_unlock_bh(&tp->lock);
1199 
1200 	return ret;
1201 }
1202 
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205 	return 0;
1206 }
1207 
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210 	u32 val;
1211 	struct phy_device *phydev;
1212 
1213 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215 	case PHY_ID_BCM50610:
1216 	case PHY_ID_BCM50610M:
1217 		val = MAC_PHYCFG2_50610_LED_MODES;
1218 		break;
1219 	case PHY_ID_BCMAC131:
1220 		val = MAC_PHYCFG2_AC131_LED_MODES;
1221 		break;
1222 	case PHY_ID_RTL8211C:
1223 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224 		break;
1225 	case PHY_ID_RTL8201E:
1226 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227 		break;
1228 	default:
1229 		return;
1230 	}
1231 
1232 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233 		tw32(MAC_PHYCFG2, val);
1234 
1235 		val = tr32(MAC_PHYCFG1);
1236 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1237 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239 		tw32(MAC_PHYCFG1, val);
1240 
1241 		return;
1242 	}
1243 
1244 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1247 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1248 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1249 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1250 		       MAC_PHYCFG2_INBAND_ENABLE;
1251 
1252 	tw32(MAC_PHYCFG2, val);
1253 
1254 	val = tr32(MAC_PHYCFG1);
1255 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262 	}
1263 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265 	tw32(MAC_PHYCFG1, val);
1266 
1267 	val = tr32(MAC_EXT_RGMII_MODE);
1268 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269 		 MAC_RGMII_MODE_RX_QUALITY |
1270 		 MAC_RGMII_MODE_RX_ACTIVITY |
1271 		 MAC_RGMII_MODE_RX_ENG_DET |
1272 		 MAC_RGMII_MODE_TX_ENABLE |
1273 		 MAC_RGMII_MODE_TX_LOWPWR |
1274 		 MAC_RGMII_MODE_TX_RESET);
1275 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277 			val |= MAC_RGMII_MODE_RX_INT_B |
1278 			       MAC_RGMII_MODE_RX_QUALITY |
1279 			       MAC_RGMII_MODE_RX_ACTIVITY |
1280 			       MAC_RGMII_MODE_RX_ENG_DET;
1281 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282 			val |= MAC_RGMII_MODE_TX_ENABLE |
1283 			       MAC_RGMII_MODE_TX_LOWPWR |
1284 			       MAC_RGMII_MODE_TX_RESET;
1285 	}
1286 	tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288 
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1293 	udelay(80);
1294 
1295 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1296 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297 		tg3_mdio_config_5785(tp);
1298 }
1299 
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302 	int i;
1303 	u32 reg;
1304 	struct phy_device *phydev;
1305 
1306 	if (tg3_flag(tp, 5717_PLUS)) {
1307 		u32 is_serdes;
1308 
1309 		tp->phy_addr = tp->pci_fn + 1;
1310 
1311 		if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313 		else
1314 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1316 		if (is_serdes)
1317 			tp->phy_addr += 7;
1318 	} else
1319 		tp->phy_addr = TG3_PHY_MII_ADDR;
1320 
1321 	tg3_mdio_start(tp);
1322 
1323 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324 		return 0;
1325 
1326 	tp->mdio_bus = mdiobus_alloc();
1327 	if (tp->mdio_bus == NULL)
1328 		return -ENOMEM;
1329 
1330 	tp->mdio_bus->name     = "tg3 mdio bus";
1331 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333 	tp->mdio_bus->priv     = tp;
1334 	tp->mdio_bus->parent   = &tp->pdev->dev;
1335 	tp->mdio_bus->read     = &tg3_mdio_read;
1336 	tp->mdio_bus->write    = &tg3_mdio_write;
1337 	tp->mdio_bus->reset    = &tg3_mdio_reset;
1338 	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339 	tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340 
1341 	for (i = 0; i < PHY_MAX_ADDR; i++)
1342 		tp->mdio_bus->irq[i] = PHY_POLL;
1343 
1344 	/* The bus registration will look for all the PHYs on the mdio bus.
1345 	 * Unfortunately, it does not ensure the PHY is powered up before
1346 	 * accessing the PHY ID registers.  A chip reset is the
1347 	 * quickest way to bring the device back to an operational state..
1348 	 */
1349 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350 		tg3_bmcr_reset(tp);
1351 
1352 	i = mdiobus_register(tp->mdio_bus);
1353 	if (i) {
1354 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355 		mdiobus_free(tp->mdio_bus);
1356 		return i;
1357 	}
1358 
1359 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360 
1361 	if (!phydev || !phydev->drv) {
1362 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363 		mdiobus_unregister(tp->mdio_bus);
1364 		mdiobus_free(tp->mdio_bus);
1365 		return -ENODEV;
1366 	}
1367 
1368 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369 	case PHY_ID_BCM57780:
1370 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1371 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372 		break;
1373 	case PHY_ID_BCM50610:
1374 	case PHY_ID_BCM50610M:
1375 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376 				     PHY_BRCM_RX_REFCLK_UNUSED |
1377 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385 		/* fallthru */
1386 	case PHY_ID_RTL8211C:
1387 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388 		break;
1389 	case PHY_ID_RTL8201E:
1390 	case PHY_ID_BCMAC131:
1391 		phydev->interface = PHY_INTERFACE_MODE_MII;
1392 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394 		break;
1395 	}
1396 
1397 	tg3_flag_set(tp, MDIOBUS_INITED);
1398 
1399 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400 		tg3_mdio_config_5785(tp);
1401 
1402 	return 0;
1403 }
1404 
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1408 		tg3_flag_clear(tp, MDIOBUS_INITED);
1409 		mdiobus_unregister(tp->mdio_bus);
1410 		mdiobus_free(tp->mdio_bus);
1411 	}
1412 }
1413 
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417 	u32 val;
1418 
1419 	val = tr32(GRC_RX_CPU_EVENT);
1420 	val |= GRC_RX_CPU_DRIVER_EVENT;
1421 	tw32_f(GRC_RX_CPU_EVENT, val);
1422 
1423 	tp->last_event_jiffies = jiffies;
1424 }
1425 
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427 
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431 	int i;
1432 	unsigned int delay_cnt;
1433 	long time_remain;
1434 
1435 	/* If enough time has passed, no wait is necessary. */
1436 	time_remain = (long)(tp->last_event_jiffies + 1 +
1437 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438 		      (long)jiffies;
1439 	if (time_remain < 0)
1440 		return;
1441 
1442 	/* Check if we can shorten the wait time. */
1443 	delay_cnt = jiffies_to_usecs(time_remain);
1444 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446 	delay_cnt = (delay_cnt >> 3) + 1;
1447 
1448 	for (i = 0; i < delay_cnt; i++) {
1449 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450 			break;
1451 		udelay(8);
1452 	}
1453 }
1454 
1455 /* tp->lock is held. */
1456 static void tg3_ump_link_report(struct tg3 *tp)
1457 {
1458 	u32 reg;
1459 	u32 val;
1460 
1461 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1462 		return;
1463 
1464 	tg3_wait_for_event_ack(tp);
1465 
1466 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1467 
1468 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1469 
1470 	val = 0;
1471 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1472 		val = reg << 16;
1473 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1474 		val |= (reg & 0xffff);
1475 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1476 
1477 	val = 0;
1478 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1479 		val = reg << 16;
1480 	if (!tg3_readphy(tp, MII_LPA, &reg))
1481 		val |= (reg & 0xffff);
1482 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1483 
1484 	val = 0;
1485 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1486 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1487 			val = reg << 16;
1488 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1489 			val |= (reg & 0xffff);
1490 	}
1491 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1492 
1493 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1494 		val = reg << 16;
1495 	else
1496 		val = 0;
1497 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1498 
1499 	tg3_generate_fw_event(tp);
1500 }
1501 
1502 /* tp->lock is held. */
1503 static void tg3_stop_fw(struct tg3 *tp)
1504 {
1505 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1506 		/* Wait for RX cpu to ACK the previous event. */
1507 		tg3_wait_for_event_ack(tp);
1508 
1509 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1510 
1511 		tg3_generate_fw_event(tp);
1512 
1513 		/* Wait for RX cpu to ACK this event. */
1514 		tg3_wait_for_event_ack(tp);
1515 	}
1516 }
1517 
1518 /* tp->lock is held. */
1519 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1520 {
1521 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1522 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1523 
1524 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1525 		switch (kind) {
1526 		case RESET_KIND_INIT:
1527 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1528 				      DRV_STATE_START);
1529 			break;
1530 
1531 		case RESET_KIND_SHUTDOWN:
1532 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1533 				      DRV_STATE_UNLOAD);
1534 			break;
1535 
1536 		case RESET_KIND_SUSPEND:
1537 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538 				      DRV_STATE_SUSPEND);
1539 			break;
1540 
1541 		default:
1542 			break;
1543 		}
1544 	}
1545 
1546 	if (kind == RESET_KIND_INIT ||
1547 	    kind == RESET_KIND_SUSPEND)
1548 		tg3_ape_driver_state_change(tp, kind);
1549 }
1550 
1551 /* tp->lock is held. */
1552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1553 {
1554 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1555 		switch (kind) {
1556 		case RESET_KIND_INIT:
1557 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1558 				      DRV_STATE_START_DONE);
1559 			break;
1560 
1561 		case RESET_KIND_SHUTDOWN:
1562 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1563 				      DRV_STATE_UNLOAD_DONE);
1564 			break;
1565 
1566 		default:
1567 			break;
1568 		}
1569 	}
1570 
1571 	if (kind == RESET_KIND_SHUTDOWN)
1572 		tg3_ape_driver_state_change(tp, kind);
1573 }
1574 
1575 /* tp->lock is held. */
1576 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1577 {
1578 	if (tg3_flag(tp, ENABLE_ASF)) {
1579 		switch (kind) {
1580 		case RESET_KIND_INIT:
1581 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1582 				      DRV_STATE_START);
1583 			break;
1584 
1585 		case RESET_KIND_SHUTDOWN:
1586 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1587 				      DRV_STATE_UNLOAD);
1588 			break;
1589 
1590 		case RESET_KIND_SUSPEND:
1591 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592 				      DRV_STATE_SUSPEND);
1593 			break;
1594 
1595 		default:
1596 			break;
1597 		}
1598 	}
1599 }
1600 
1601 static int tg3_poll_fw(struct tg3 *tp)
1602 {
1603 	int i;
1604 	u32 val;
1605 
1606 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1607 		/* Wait up to 20ms for init done. */
1608 		for (i = 0; i < 200; i++) {
1609 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1610 				return 0;
1611 			udelay(100);
1612 		}
1613 		return -ENODEV;
1614 	}
1615 
1616 	/* Wait for firmware initialization to complete. */
1617 	for (i = 0; i < 100000; i++) {
1618 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1619 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1620 			break;
1621 		udelay(10);
1622 	}
1623 
1624 	/* Chip might not be fitted with firmware.  Some Sun onboard
1625 	 * parts are configured like that.  So don't signal the timeout
1626 	 * of the above loop as an error, but do report the lack of
1627 	 * running firmware once.
1628 	 */
1629 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1630 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1631 
1632 		netdev_info(tp->dev, "No firmware running\n");
1633 	}
1634 
1635 	if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1636 		/* The 57765 A0 needs a little more
1637 		 * time to do some important work.
1638 		 */
1639 		mdelay(10);
1640 	}
1641 
1642 	return 0;
1643 }
1644 
1645 static void tg3_link_report(struct tg3 *tp)
1646 {
1647 	if (!netif_carrier_ok(tp->dev)) {
1648 		netif_info(tp, link, tp->dev, "Link is down\n");
1649 		tg3_ump_link_report(tp);
1650 	} else if (netif_msg_link(tp)) {
1651 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1652 			    (tp->link_config.active_speed == SPEED_1000 ?
1653 			     1000 :
1654 			     (tp->link_config.active_speed == SPEED_100 ?
1655 			      100 : 10)),
1656 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1657 			     "full" : "half"));
1658 
1659 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1660 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1661 			    "on" : "off",
1662 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1663 			    "on" : "off");
1664 
1665 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1666 			netdev_info(tp->dev, "EEE is %s\n",
1667 				    tp->setlpicnt ? "enabled" : "disabled");
1668 
1669 		tg3_ump_link_report(tp);
1670 	}
1671 }
1672 
1673 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1674 {
1675 	u16 miireg;
1676 
1677 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1678 		miireg = ADVERTISE_1000XPAUSE;
1679 	else if (flow_ctrl & FLOW_CTRL_TX)
1680 		miireg = ADVERTISE_1000XPSE_ASYM;
1681 	else if (flow_ctrl & FLOW_CTRL_RX)
1682 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1683 	else
1684 		miireg = 0;
1685 
1686 	return miireg;
1687 }
1688 
1689 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1690 {
1691 	u8 cap = 0;
1692 
1693 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1694 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1695 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1696 		if (lcladv & ADVERTISE_1000XPAUSE)
1697 			cap = FLOW_CTRL_RX;
1698 		if (rmtadv & ADVERTISE_1000XPAUSE)
1699 			cap = FLOW_CTRL_TX;
1700 	}
1701 
1702 	return cap;
1703 }
1704 
1705 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1706 {
1707 	u8 autoneg;
1708 	u8 flowctrl = 0;
1709 	u32 old_rx_mode = tp->rx_mode;
1710 	u32 old_tx_mode = tp->tx_mode;
1711 
1712 	if (tg3_flag(tp, USE_PHYLIB))
1713 		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1714 	else
1715 		autoneg = tp->link_config.autoneg;
1716 
1717 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1718 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1719 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1720 		else
1721 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1722 	} else
1723 		flowctrl = tp->link_config.flowctrl;
1724 
1725 	tp->link_config.active_flowctrl = flowctrl;
1726 
1727 	if (flowctrl & FLOW_CTRL_RX)
1728 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1729 	else
1730 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1731 
1732 	if (old_rx_mode != tp->rx_mode)
1733 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1734 
1735 	if (flowctrl & FLOW_CTRL_TX)
1736 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1737 	else
1738 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1739 
1740 	if (old_tx_mode != tp->tx_mode)
1741 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1742 }
1743 
1744 static void tg3_adjust_link(struct net_device *dev)
1745 {
1746 	u8 oldflowctrl, linkmesg = 0;
1747 	u32 mac_mode, lcl_adv, rmt_adv;
1748 	struct tg3 *tp = netdev_priv(dev);
1749 	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1750 
1751 	spin_lock_bh(&tp->lock);
1752 
1753 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1754 				    MAC_MODE_HALF_DUPLEX);
1755 
1756 	oldflowctrl = tp->link_config.active_flowctrl;
1757 
1758 	if (phydev->link) {
1759 		lcl_adv = 0;
1760 		rmt_adv = 0;
1761 
1762 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1763 			mac_mode |= MAC_MODE_PORT_MODE_MII;
1764 		else if (phydev->speed == SPEED_1000 ||
1765 			 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1766 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
1767 		else
1768 			mac_mode |= MAC_MODE_PORT_MODE_MII;
1769 
1770 		if (phydev->duplex == DUPLEX_HALF)
1771 			mac_mode |= MAC_MODE_HALF_DUPLEX;
1772 		else {
1773 			lcl_adv = mii_advertise_flowctrl(
1774 				  tp->link_config.flowctrl);
1775 
1776 			if (phydev->pause)
1777 				rmt_adv = LPA_PAUSE_CAP;
1778 			if (phydev->asym_pause)
1779 				rmt_adv |= LPA_PAUSE_ASYM;
1780 		}
1781 
1782 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1783 	} else
1784 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
1785 
1786 	if (mac_mode != tp->mac_mode) {
1787 		tp->mac_mode = mac_mode;
1788 		tw32_f(MAC_MODE, tp->mac_mode);
1789 		udelay(40);
1790 	}
1791 
1792 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1793 		if (phydev->speed == SPEED_10)
1794 			tw32(MAC_MI_STAT,
1795 			     MAC_MI_STAT_10MBPS_MODE |
1796 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1797 		else
1798 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1799 	}
1800 
1801 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1802 		tw32(MAC_TX_LENGTHS,
1803 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1804 		      (6 << TX_LENGTHS_IPG_SHIFT) |
1805 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1806 	else
1807 		tw32(MAC_TX_LENGTHS,
1808 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1809 		      (6 << TX_LENGTHS_IPG_SHIFT) |
1810 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1811 
1812 	if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1813 	    (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1814 	    phydev->speed != tp->link_config.active_speed ||
1815 	    phydev->duplex != tp->link_config.active_duplex ||
1816 	    oldflowctrl != tp->link_config.active_flowctrl)
1817 		linkmesg = 1;
1818 
1819 	tp->link_config.active_speed = phydev->speed;
1820 	tp->link_config.active_duplex = phydev->duplex;
1821 
1822 	spin_unlock_bh(&tp->lock);
1823 
1824 	if (linkmesg)
1825 		tg3_link_report(tp);
1826 }
1827 
1828 static int tg3_phy_init(struct tg3 *tp)
1829 {
1830 	struct phy_device *phydev;
1831 
1832 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1833 		return 0;
1834 
1835 	/* Bring the PHY back to a known state. */
1836 	tg3_bmcr_reset(tp);
1837 
1838 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1839 
1840 	/* Attach the MAC to the PHY. */
1841 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1842 			     phydev->dev_flags, phydev->interface);
1843 	if (IS_ERR(phydev)) {
1844 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1845 		return PTR_ERR(phydev);
1846 	}
1847 
1848 	/* Mask with MAC supported features. */
1849 	switch (phydev->interface) {
1850 	case PHY_INTERFACE_MODE_GMII:
1851 	case PHY_INTERFACE_MODE_RGMII:
1852 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1853 			phydev->supported &= (PHY_GBIT_FEATURES |
1854 					      SUPPORTED_Pause |
1855 					      SUPPORTED_Asym_Pause);
1856 			break;
1857 		}
1858 		/* fallthru */
1859 	case PHY_INTERFACE_MODE_MII:
1860 		phydev->supported &= (PHY_BASIC_FEATURES |
1861 				      SUPPORTED_Pause |
1862 				      SUPPORTED_Asym_Pause);
1863 		break;
1864 	default:
1865 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1866 		return -EINVAL;
1867 	}
1868 
1869 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1870 
1871 	phydev->advertising = phydev->supported;
1872 
1873 	return 0;
1874 }
1875 
1876 static void tg3_phy_start(struct tg3 *tp)
1877 {
1878 	struct phy_device *phydev;
1879 
1880 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1881 		return;
1882 
1883 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1884 
1885 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1886 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1887 		phydev->speed = tp->link_config.orig_speed;
1888 		phydev->duplex = tp->link_config.orig_duplex;
1889 		phydev->autoneg = tp->link_config.orig_autoneg;
1890 		phydev->advertising = tp->link_config.orig_advertising;
1891 	}
1892 
1893 	phy_start(phydev);
1894 
1895 	phy_start_aneg(phydev);
1896 }
1897 
1898 static void tg3_phy_stop(struct tg3 *tp)
1899 {
1900 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1901 		return;
1902 
1903 	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1904 }
1905 
1906 static void tg3_phy_fini(struct tg3 *tp)
1907 {
1908 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1909 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1910 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1911 	}
1912 }
1913 
1914 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1915 {
1916 	int err;
1917 	u32 val;
1918 
1919 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1920 		return 0;
1921 
1922 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1923 		/* Cannot do read-modify-write on 5401 */
1924 		err = tg3_phy_auxctl_write(tp,
1925 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1926 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1927 					   0x4c20);
1928 		goto done;
1929 	}
1930 
1931 	err = tg3_phy_auxctl_read(tp,
1932 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1933 	if (err)
1934 		return err;
1935 
1936 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1937 	err = tg3_phy_auxctl_write(tp,
1938 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1939 
1940 done:
1941 	return err;
1942 }
1943 
1944 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1945 {
1946 	u32 phytest;
1947 
1948 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1949 		u32 phy;
1950 
1951 		tg3_writephy(tp, MII_TG3_FET_TEST,
1952 			     phytest | MII_TG3_FET_SHADOW_EN);
1953 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1954 			if (enable)
1955 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1956 			else
1957 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1958 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1959 		}
1960 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1961 	}
1962 }
1963 
1964 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1965 {
1966 	u32 reg;
1967 
1968 	if (!tg3_flag(tp, 5705_PLUS) ||
1969 	    (tg3_flag(tp, 5717_PLUS) &&
1970 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1971 		return;
1972 
1973 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1974 		tg3_phy_fet_toggle_apd(tp, enable);
1975 		return;
1976 	}
1977 
1978 	reg = MII_TG3_MISC_SHDW_WREN |
1979 	      MII_TG3_MISC_SHDW_SCR5_SEL |
1980 	      MII_TG3_MISC_SHDW_SCR5_LPED |
1981 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1982 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
1983 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
1984 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1985 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1986 
1987 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1988 
1989 
1990 	reg = MII_TG3_MISC_SHDW_WREN |
1991 	      MII_TG3_MISC_SHDW_APD_SEL |
1992 	      MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1993 	if (enable)
1994 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1995 
1996 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1997 }
1998 
1999 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2000 {
2001 	u32 phy;
2002 
2003 	if (!tg3_flag(tp, 5705_PLUS) ||
2004 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2005 		return;
2006 
2007 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2008 		u32 ephy;
2009 
2010 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2011 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2012 
2013 			tg3_writephy(tp, MII_TG3_FET_TEST,
2014 				     ephy | MII_TG3_FET_SHADOW_EN);
2015 			if (!tg3_readphy(tp, reg, &phy)) {
2016 				if (enable)
2017 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2018 				else
2019 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2020 				tg3_writephy(tp, reg, phy);
2021 			}
2022 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2023 		}
2024 	} else {
2025 		int ret;
2026 
2027 		ret = tg3_phy_auxctl_read(tp,
2028 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2029 		if (!ret) {
2030 			if (enable)
2031 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2032 			else
2033 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2034 			tg3_phy_auxctl_write(tp,
2035 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2036 		}
2037 	}
2038 }
2039 
2040 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2041 {
2042 	int ret;
2043 	u32 val;
2044 
2045 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2046 		return;
2047 
2048 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2049 	if (!ret)
2050 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2051 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2052 }
2053 
2054 static void tg3_phy_apply_otp(struct tg3 *tp)
2055 {
2056 	u32 otp, phy;
2057 
2058 	if (!tp->phy_otp)
2059 		return;
2060 
2061 	otp = tp->phy_otp;
2062 
2063 	if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2064 		return;
2065 
2066 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2067 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2068 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2069 
2070 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2071 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2072 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2073 
2074 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2075 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2076 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2077 
2078 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2079 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2080 
2081 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2082 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2083 
2084 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2085 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2086 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2087 
2088 	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2089 }
2090 
2091 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2092 {
2093 	u32 val;
2094 
2095 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2096 		return;
2097 
2098 	tp->setlpicnt = 0;
2099 
2100 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2101 	    current_link_up == 1 &&
2102 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2103 	    (tp->link_config.active_speed == SPEED_100 ||
2104 	     tp->link_config.active_speed == SPEED_1000)) {
2105 		u32 eeectl;
2106 
2107 		if (tp->link_config.active_speed == SPEED_1000)
2108 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2109 		else
2110 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2111 
2112 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2113 
2114 		tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2115 				  TG3_CL45_D7_EEERES_STAT, &val);
2116 
2117 		if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2118 		    val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2119 			tp->setlpicnt = 2;
2120 	}
2121 
2122 	if (!tp->setlpicnt) {
2123 		if (current_link_up == 1 &&
2124 		   !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2125 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2126 			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2127 		}
2128 
2129 		val = tr32(TG3_CPMU_EEE_MODE);
2130 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2131 	}
2132 }
2133 
2134 static void tg3_phy_eee_enable(struct tg3 *tp)
2135 {
2136 	u32 val;
2137 
2138 	if (tp->link_config.active_speed == SPEED_1000 &&
2139 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2140 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2141 	     tg3_flag(tp, 57765_CLASS)) &&
2142 	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2143 		val = MII_TG3_DSP_TAP26_ALNOKO |
2144 		      MII_TG3_DSP_TAP26_RMRXSTO;
2145 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2146 		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2147 	}
2148 
2149 	val = tr32(TG3_CPMU_EEE_MODE);
2150 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2151 }
2152 
2153 static int tg3_wait_macro_done(struct tg3 *tp)
2154 {
2155 	int limit = 100;
2156 
2157 	while (limit--) {
2158 		u32 tmp32;
2159 
2160 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2161 			if ((tmp32 & 0x1000) == 0)
2162 				break;
2163 		}
2164 	}
2165 	if (limit < 0)
2166 		return -EBUSY;
2167 
2168 	return 0;
2169 }
2170 
2171 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2172 {
2173 	static const u32 test_pat[4][6] = {
2174 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2175 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2176 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2177 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2178 	};
2179 	int chan;
2180 
2181 	for (chan = 0; chan < 4; chan++) {
2182 		int i;
2183 
2184 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2185 			     (chan * 0x2000) | 0x0200);
2186 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2187 
2188 		for (i = 0; i < 6; i++)
2189 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2190 				     test_pat[chan][i]);
2191 
2192 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2193 		if (tg3_wait_macro_done(tp)) {
2194 			*resetp = 1;
2195 			return -EBUSY;
2196 		}
2197 
2198 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2199 			     (chan * 0x2000) | 0x0200);
2200 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2201 		if (tg3_wait_macro_done(tp)) {
2202 			*resetp = 1;
2203 			return -EBUSY;
2204 		}
2205 
2206 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2207 		if (tg3_wait_macro_done(tp)) {
2208 			*resetp = 1;
2209 			return -EBUSY;
2210 		}
2211 
2212 		for (i = 0; i < 6; i += 2) {
2213 			u32 low, high;
2214 
2215 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2216 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2217 			    tg3_wait_macro_done(tp)) {
2218 				*resetp = 1;
2219 				return -EBUSY;
2220 			}
2221 			low &= 0x7fff;
2222 			high &= 0x000f;
2223 			if (low != test_pat[chan][i] ||
2224 			    high != test_pat[chan][i+1]) {
2225 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2226 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2227 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2228 
2229 				return -EBUSY;
2230 			}
2231 		}
2232 	}
2233 
2234 	return 0;
2235 }
2236 
2237 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2238 {
2239 	int chan;
2240 
2241 	for (chan = 0; chan < 4; chan++) {
2242 		int i;
2243 
2244 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2245 			     (chan * 0x2000) | 0x0200);
2246 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2247 		for (i = 0; i < 6; i++)
2248 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2249 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2250 		if (tg3_wait_macro_done(tp))
2251 			return -EBUSY;
2252 	}
2253 
2254 	return 0;
2255 }
2256 
2257 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2258 {
2259 	u32 reg32, phy9_orig;
2260 	int retries, do_phy_reset, err;
2261 
2262 	retries = 10;
2263 	do_phy_reset = 1;
2264 	do {
2265 		if (do_phy_reset) {
2266 			err = tg3_bmcr_reset(tp);
2267 			if (err)
2268 				return err;
2269 			do_phy_reset = 0;
2270 		}
2271 
2272 		/* Disable transmitter and interrupt.  */
2273 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2274 			continue;
2275 
2276 		reg32 |= 0x3000;
2277 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2278 
2279 		/* Set full-duplex, 1000 mbps.  */
2280 		tg3_writephy(tp, MII_BMCR,
2281 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2282 
2283 		/* Set to master mode.  */
2284 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2285 			continue;
2286 
2287 		tg3_writephy(tp, MII_CTRL1000,
2288 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2289 
2290 		err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2291 		if (err)
2292 			return err;
2293 
2294 		/* Block the PHY control access.  */
2295 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2296 
2297 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2298 		if (!err)
2299 			break;
2300 	} while (--retries);
2301 
2302 	err = tg3_phy_reset_chanpat(tp);
2303 	if (err)
2304 		return err;
2305 
2306 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2307 
2308 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2309 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2310 
2311 	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2312 
2313 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2314 
2315 	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2316 		reg32 &= ~0x3000;
2317 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2318 	} else if (!err)
2319 		err = -EBUSY;
2320 
2321 	return err;
2322 }
2323 
2324 /* This will reset the tigon3 PHY if there is no valid
2325  * link unless the FORCE argument is non-zero.
2326  */
2327 static int tg3_phy_reset(struct tg3 *tp)
2328 {
2329 	u32 val, cpmuctrl;
2330 	int err;
2331 
2332 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2333 		val = tr32(GRC_MISC_CFG);
2334 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2335 		udelay(40);
2336 	}
2337 	err  = tg3_readphy(tp, MII_BMSR, &val);
2338 	err |= tg3_readphy(tp, MII_BMSR, &val);
2339 	if (err != 0)
2340 		return -EBUSY;
2341 
2342 	if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2343 		netif_carrier_off(tp->dev);
2344 		tg3_link_report(tp);
2345 	}
2346 
2347 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2348 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2349 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2350 		err = tg3_phy_reset_5703_4_5(tp);
2351 		if (err)
2352 			return err;
2353 		goto out;
2354 	}
2355 
2356 	cpmuctrl = 0;
2357 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2358 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2359 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2360 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2361 			tw32(TG3_CPMU_CTRL,
2362 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2363 	}
2364 
2365 	err = tg3_bmcr_reset(tp);
2366 	if (err)
2367 		return err;
2368 
2369 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2370 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2371 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2372 
2373 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2374 	}
2375 
2376 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2377 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2378 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2379 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2380 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2381 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2382 			udelay(40);
2383 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2384 		}
2385 	}
2386 
2387 	if (tg3_flag(tp, 5717_PLUS) &&
2388 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2389 		return 0;
2390 
2391 	tg3_phy_apply_otp(tp);
2392 
2393 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2394 		tg3_phy_toggle_apd(tp, true);
2395 	else
2396 		tg3_phy_toggle_apd(tp, false);
2397 
2398 out:
2399 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2400 	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2401 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2402 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2403 		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2404 	}
2405 
2406 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2407 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2408 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2409 	}
2410 
2411 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2412 		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2413 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2414 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2415 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2416 			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2417 		}
2418 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2419 		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2420 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2421 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2422 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2423 				tg3_writephy(tp, MII_TG3_TEST1,
2424 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2425 			} else
2426 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2427 
2428 			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2429 		}
2430 	}
2431 
2432 	/* Set Extended packet length bit (bit 14) on all chips that */
2433 	/* support jumbo frames */
2434 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2435 		/* Cannot do read-modify-write on 5401 */
2436 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2437 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2438 		/* Set bit 14 with read-modify-write to preserve other bits */
2439 		err = tg3_phy_auxctl_read(tp,
2440 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2441 		if (!err)
2442 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2443 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2444 	}
2445 
2446 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2447 	 * jumbo frames transmission.
2448 	 */
2449 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2450 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2451 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2452 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2453 	}
2454 
2455 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2456 		/* adjust output voltage */
2457 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2458 	}
2459 
2460 	tg3_phy_toggle_automdix(tp, 1);
2461 	tg3_phy_set_wirespeed(tp);
2462 	return 0;
2463 }
2464 
2465 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2466 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2467 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2468 					  TG3_GPIO_MSG_NEED_VAUX)
2469 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2470 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2471 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2472 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2473 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2474 
2475 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2476 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2477 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2478 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2479 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2480 
2481 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2482 {
2483 	u32 status, shift;
2484 
2485 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2486 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2487 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2488 	else
2489 		status = tr32(TG3_CPMU_DRV_STATUS);
2490 
2491 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2492 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2493 	status |= (newstat << shift);
2494 
2495 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2496 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2497 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2498 	else
2499 		tw32(TG3_CPMU_DRV_STATUS, status);
2500 
2501 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2502 }
2503 
2504 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2505 {
2506 	if (!tg3_flag(tp, IS_NIC))
2507 		return 0;
2508 
2509 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2510 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2511 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2512 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2513 			return -EIO;
2514 
2515 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2516 
2517 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2518 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2519 
2520 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2521 	} else {
2522 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2523 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2524 	}
2525 
2526 	return 0;
2527 }
2528 
2529 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2530 {
2531 	u32 grc_local_ctrl;
2532 
2533 	if (!tg3_flag(tp, IS_NIC) ||
2534 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2535 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2536 		return;
2537 
2538 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2539 
2540 	tw32_wait_f(GRC_LOCAL_CTRL,
2541 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2542 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2543 
2544 	tw32_wait_f(GRC_LOCAL_CTRL,
2545 		    grc_local_ctrl,
2546 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2547 
2548 	tw32_wait_f(GRC_LOCAL_CTRL,
2549 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2550 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2551 }
2552 
2553 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2554 {
2555 	if (!tg3_flag(tp, IS_NIC))
2556 		return;
2557 
2558 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2559 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2560 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2561 			    (GRC_LCLCTRL_GPIO_OE0 |
2562 			     GRC_LCLCTRL_GPIO_OE1 |
2563 			     GRC_LCLCTRL_GPIO_OE2 |
2564 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2565 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2566 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2567 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2568 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2569 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2570 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2571 				     GRC_LCLCTRL_GPIO_OE1 |
2572 				     GRC_LCLCTRL_GPIO_OE2 |
2573 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2574 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2575 				     tp->grc_local_ctrl;
2576 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2577 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2578 
2579 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2580 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2581 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2582 
2583 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2584 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2585 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2586 	} else {
2587 		u32 no_gpio2;
2588 		u32 grc_local_ctrl = 0;
2589 
2590 		/* Workaround to prevent overdrawing Amps. */
2591 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2592 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2593 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2594 				    grc_local_ctrl,
2595 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2596 		}
2597 
2598 		/* On 5753 and variants, GPIO2 cannot be used. */
2599 		no_gpio2 = tp->nic_sram_data_cfg &
2600 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2601 
2602 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2603 				  GRC_LCLCTRL_GPIO_OE1 |
2604 				  GRC_LCLCTRL_GPIO_OE2 |
2605 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2606 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2607 		if (no_gpio2) {
2608 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2609 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2610 		}
2611 		tw32_wait_f(GRC_LOCAL_CTRL,
2612 			    tp->grc_local_ctrl | grc_local_ctrl,
2613 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2614 
2615 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2616 
2617 		tw32_wait_f(GRC_LOCAL_CTRL,
2618 			    tp->grc_local_ctrl | grc_local_ctrl,
2619 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2620 
2621 		if (!no_gpio2) {
2622 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2623 			tw32_wait_f(GRC_LOCAL_CTRL,
2624 				    tp->grc_local_ctrl | grc_local_ctrl,
2625 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2626 		}
2627 	}
2628 }
2629 
2630 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2631 {
2632 	u32 msg = 0;
2633 
2634 	/* Serialize power state transitions */
2635 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2636 		return;
2637 
2638 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2639 		msg = TG3_GPIO_MSG_NEED_VAUX;
2640 
2641 	msg = tg3_set_function_status(tp, msg);
2642 
2643 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2644 		goto done;
2645 
2646 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2647 		tg3_pwrsrc_switch_to_vaux(tp);
2648 	else
2649 		tg3_pwrsrc_die_with_vmain(tp);
2650 
2651 done:
2652 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2653 }
2654 
2655 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2656 {
2657 	bool need_vaux = false;
2658 
2659 	/* The GPIOs do something completely different on 57765. */
2660 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2661 		return;
2662 
2663 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2664 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2665 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2666 		tg3_frob_aux_power_5717(tp, include_wol ?
2667 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2668 		return;
2669 	}
2670 
2671 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2672 		struct net_device *dev_peer;
2673 
2674 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2675 
2676 		/* remove_one() may have been run on the peer. */
2677 		if (dev_peer) {
2678 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2679 
2680 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2681 				return;
2682 
2683 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2684 			    tg3_flag(tp_peer, ENABLE_ASF))
2685 				need_vaux = true;
2686 		}
2687 	}
2688 
2689 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2690 	    tg3_flag(tp, ENABLE_ASF))
2691 		need_vaux = true;
2692 
2693 	if (need_vaux)
2694 		tg3_pwrsrc_switch_to_vaux(tp);
2695 	else
2696 		tg3_pwrsrc_die_with_vmain(tp);
2697 }
2698 
2699 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2700 {
2701 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2702 		return 1;
2703 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2704 		if (speed != SPEED_10)
2705 			return 1;
2706 	} else if (speed == SPEED_10)
2707 		return 1;
2708 
2709 	return 0;
2710 }
2711 
2712 static int tg3_setup_phy(struct tg3 *, int);
2713 static int tg3_halt_cpu(struct tg3 *, u32);
2714 
2715 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2716 {
2717 	u32 val;
2718 
2719 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2720 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2721 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2722 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2723 
2724 			sg_dig_ctrl |=
2725 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2726 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
2727 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2728 		}
2729 		return;
2730 	}
2731 
2732 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2733 		tg3_bmcr_reset(tp);
2734 		val = tr32(GRC_MISC_CFG);
2735 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2736 		udelay(40);
2737 		return;
2738 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2739 		u32 phytest;
2740 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2741 			u32 phy;
2742 
2743 			tg3_writephy(tp, MII_ADVERTISE, 0);
2744 			tg3_writephy(tp, MII_BMCR,
2745 				     BMCR_ANENABLE | BMCR_ANRESTART);
2746 
2747 			tg3_writephy(tp, MII_TG3_FET_TEST,
2748 				     phytest | MII_TG3_FET_SHADOW_EN);
2749 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2750 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2751 				tg3_writephy(tp,
2752 					     MII_TG3_FET_SHDW_AUXMODE4,
2753 					     phy);
2754 			}
2755 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2756 		}
2757 		return;
2758 	} else if (do_low_power) {
2759 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
2760 			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2761 
2762 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2763 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2764 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
2765 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2766 	}
2767 
2768 	/* The PHY should not be powered down on some chips because
2769 	 * of bugs.
2770 	 */
2771 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2772 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2773 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2774 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2775 		return;
2776 
2777 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2778 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2779 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2780 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2781 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2782 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2783 	}
2784 
2785 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2786 }
2787 
2788 /* tp->lock is held. */
2789 static int tg3_nvram_lock(struct tg3 *tp)
2790 {
2791 	if (tg3_flag(tp, NVRAM)) {
2792 		int i;
2793 
2794 		if (tp->nvram_lock_cnt == 0) {
2795 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2796 			for (i = 0; i < 8000; i++) {
2797 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2798 					break;
2799 				udelay(20);
2800 			}
2801 			if (i == 8000) {
2802 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2803 				return -ENODEV;
2804 			}
2805 		}
2806 		tp->nvram_lock_cnt++;
2807 	}
2808 	return 0;
2809 }
2810 
2811 /* tp->lock is held. */
2812 static void tg3_nvram_unlock(struct tg3 *tp)
2813 {
2814 	if (tg3_flag(tp, NVRAM)) {
2815 		if (tp->nvram_lock_cnt > 0)
2816 			tp->nvram_lock_cnt--;
2817 		if (tp->nvram_lock_cnt == 0)
2818 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2819 	}
2820 }
2821 
2822 /* tp->lock is held. */
2823 static void tg3_enable_nvram_access(struct tg3 *tp)
2824 {
2825 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2826 		u32 nvaccess = tr32(NVRAM_ACCESS);
2827 
2828 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2829 	}
2830 }
2831 
2832 /* tp->lock is held. */
2833 static void tg3_disable_nvram_access(struct tg3 *tp)
2834 {
2835 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2836 		u32 nvaccess = tr32(NVRAM_ACCESS);
2837 
2838 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2839 	}
2840 }
2841 
2842 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2843 					u32 offset, u32 *val)
2844 {
2845 	u32 tmp;
2846 	int i;
2847 
2848 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2849 		return -EINVAL;
2850 
2851 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2852 					EEPROM_ADDR_DEVID_MASK |
2853 					EEPROM_ADDR_READ);
2854 	tw32(GRC_EEPROM_ADDR,
2855 	     tmp |
2856 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
2857 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2858 	      EEPROM_ADDR_ADDR_MASK) |
2859 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
2860 
2861 	for (i = 0; i < 1000; i++) {
2862 		tmp = tr32(GRC_EEPROM_ADDR);
2863 
2864 		if (tmp & EEPROM_ADDR_COMPLETE)
2865 			break;
2866 		msleep(1);
2867 	}
2868 	if (!(tmp & EEPROM_ADDR_COMPLETE))
2869 		return -EBUSY;
2870 
2871 	tmp = tr32(GRC_EEPROM_DATA);
2872 
2873 	/*
2874 	 * The data will always be opposite the native endian
2875 	 * format.  Perform a blind byteswap to compensate.
2876 	 */
2877 	*val = swab32(tmp);
2878 
2879 	return 0;
2880 }
2881 
2882 #define NVRAM_CMD_TIMEOUT 10000
2883 
2884 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2885 {
2886 	int i;
2887 
2888 	tw32(NVRAM_CMD, nvram_cmd);
2889 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2890 		udelay(10);
2891 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2892 			udelay(10);
2893 			break;
2894 		}
2895 	}
2896 
2897 	if (i == NVRAM_CMD_TIMEOUT)
2898 		return -EBUSY;
2899 
2900 	return 0;
2901 }
2902 
2903 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2904 {
2905 	if (tg3_flag(tp, NVRAM) &&
2906 	    tg3_flag(tp, NVRAM_BUFFERED) &&
2907 	    tg3_flag(tp, FLASH) &&
2908 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2909 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
2910 
2911 		addr = ((addr / tp->nvram_pagesize) <<
2912 			ATMEL_AT45DB0X1B_PAGE_POS) +
2913 		       (addr % tp->nvram_pagesize);
2914 
2915 	return addr;
2916 }
2917 
2918 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2919 {
2920 	if (tg3_flag(tp, NVRAM) &&
2921 	    tg3_flag(tp, NVRAM_BUFFERED) &&
2922 	    tg3_flag(tp, FLASH) &&
2923 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2924 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
2925 
2926 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2927 			tp->nvram_pagesize) +
2928 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2929 
2930 	return addr;
2931 }
2932 
2933 /* NOTE: Data read in from NVRAM is byteswapped according to
2934  * the byteswapping settings for all other register accesses.
2935  * tg3 devices are BE devices, so on a BE machine, the data
2936  * returned will be exactly as it is seen in NVRAM.  On a LE
2937  * machine, the 32-bit value will be byteswapped.
2938  */
2939 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2940 {
2941 	int ret;
2942 
2943 	if (!tg3_flag(tp, NVRAM))
2944 		return tg3_nvram_read_using_eeprom(tp, offset, val);
2945 
2946 	offset = tg3_nvram_phys_addr(tp, offset);
2947 
2948 	if (offset > NVRAM_ADDR_MSK)
2949 		return -EINVAL;
2950 
2951 	ret = tg3_nvram_lock(tp);
2952 	if (ret)
2953 		return ret;
2954 
2955 	tg3_enable_nvram_access(tp);
2956 
2957 	tw32(NVRAM_ADDR, offset);
2958 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2959 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2960 
2961 	if (ret == 0)
2962 		*val = tr32(NVRAM_RDDATA);
2963 
2964 	tg3_disable_nvram_access(tp);
2965 
2966 	tg3_nvram_unlock(tp);
2967 
2968 	return ret;
2969 }
2970 
2971 /* Ensures NVRAM data is in bytestream format. */
2972 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2973 {
2974 	u32 v;
2975 	int res = tg3_nvram_read(tp, offset, &v);
2976 	if (!res)
2977 		*val = cpu_to_be32(v);
2978 	return res;
2979 }
2980 
2981 #define RX_CPU_SCRATCH_BASE	0x30000
2982 #define RX_CPU_SCRATCH_SIZE	0x04000
2983 #define TX_CPU_SCRATCH_BASE	0x34000
2984 #define TX_CPU_SCRATCH_SIZE	0x04000
2985 
2986 /* tp->lock is held. */
2987 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
2988 {
2989 	int i;
2990 
2991 	BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
2992 
2993 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2994 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
2995 
2996 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
2997 		return 0;
2998 	}
2999 	if (offset == RX_CPU_BASE) {
3000 		for (i = 0; i < 10000; i++) {
3001 			tw32(offset + CPU_STATE, 0xffffffff);
3002 			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3003 			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3004 				break;
3005 		}
3006 
3007 		tw32(offset + CPU_STATE, 0xffffffff);
3008 		tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3009 		udelay(10);
3010 	} else {
3011 		for (i = 0; i < 10000; i++) {
3012 			tw32(offset + CPU_STATE, 0xffffffff);
3013 			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3014 			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3015 				break;
3016 		}
3017 	}
3018 
3019 	if (i >= 10000) {
3020 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3021 			   __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3022 		return -ENODEV;
3023 	}
3024 
3025 	/* Clear firmware's nvram arbitration. */
3026 	if (tg3_flag(tp, NVRAM))
3027 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3028 	return 0;
3029 }
3030 
3031 struct fw_info {
3032 	unsigned int fw_base;
3033 	unsigned int fw_len;
3034 	const __be32 *fw_data;
3035 };
3036 
3037 /* tp->lock is held. */
3038 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3039 				 u32 cpu_scratch_base, int cpu_scratch_size,
3040 				 struct fw_info *info)
3041 {
3042 	int err, lock_err, i;
3043 	void (*write_op)(struct tg3 *, u32, u32);
3044 
3045 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3046 		netdev_err(tp->dev,
3047 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3048 			   __func__);
3049 		return -EINVAL;
3050 	}
3051 
3052 	if (tg3_flag(tp, 5705_PLUS))
3053 		write_op = tg3_write_mem;
3054 	else
3055 		write_op = tg3_write_indirect_reg32;
3056 
3057 	/* It is possible that bootcode is still loading at this point.
3058 	 * Get the nvram lock first before halting the cpu.
3059 	 */
3060 	lock_err = tg3_nvram_lock(tp);
3061 	err = tg3_halt_cpu(tp, cpu_base);
3062 	if (!lock_err)
3063 		tg3_nvram_unlock(tp);
3064 	if (err)
3065 		goto out;
3066 
3067 	for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3068 		write_op(tp, cpu_scratch_base + i, 0);
3069 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3070 	tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3071 	for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3072 		write_op(tp, (cpu_scratch_base +
3073 			      (info->fw_base & 0xffff) +
3074 			      (i * sizeof(u32))),
3075 			      be32_to_cpu(info->fw_data[i]));
3076 
3077 	err = 0;
3078 
3079 out:
3080 	return err;
3081 }
3082 
3083 /* tp->lock is held. */
3084 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3085 {
3086 	struct fw_info info;
3087 	const __be32 *fw_data;
3088 	int err, i;
3089 
3090 	fw_data = (void *)tp->fw->data;
3091 
3092 	/* Firmware blob starts with version numbers, followed by
3093 	   start address and length. We are setting complete length.
3094 	   length = end_address_of_bss - start_address_of_text.
3095 	   Remainder is the blob to be loaded contiguously
3096 	   from start address. */
3097 
3098 	info.fw_base = be32_to_cpu(fw_data[1]);
3099 	info.fw_len = tp->fw->size - 12;
3100 	info.fw_data = &fw_data[3];
3101 
3102 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3103 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3104 				    &info);
3105 	if (err)
3106 		return err;
3107 
3108 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3109 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3110 				    &info);
3111 	if (err)
3112 		return err;
3113 
3114 	/* Now startup only the RX cpu. */
3115 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3116 	tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3117 
3118 	for (i = 0; i < 5; i++) {
3119 		if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3120 			break;
3121 		tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3122 		tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3123 		tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3124 		udelay(1000);
3125 	}
3126 	if (i >= 5) {
3127 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3128 			   "should be %08x\n", __func__,
3129 			   tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3130 		return -ENODEV;
3131 	}
3132 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3133 	tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3134 
3135 	return 0;
3136 }
3137 
3138 /* tp->lock is held. */
3139 static int tg3_load_tso_firmware(struct tg3 *tp)
3140 {
3141 	struct fw_info info;
3142 	const __be32 *fw_data;
3143 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3144 	int err, i;
3145 
3146 	if (tg3_flag(tp, HW_TSO_1) ||
3147 	    tg3_flag(tp, HW_TSO_2) ||
3148 	    tg3_flag(tp, HW_TSO_3))
3149 		return 0;
3150 
3151 	fw_data = (void *)tp->fw->data;
3152 
3153 	/* Firmware blob starts with version numbers, followed by
3154 	   start address and length. We are setting complete length.
3155 	   length = end_address_of_bss - start_address_of_text.
3156 	   Remainder is the blob to be loaded contiguously
3157 	   from start address. */
3158 
3159 	info.fw_base = be32_to_cpu(fw_data[1]);
3160 	cpu_scratch_size = tp->fw_len;
3161 	info.fw_len = tp->fw->size - 12;
3162 	info.fw_data = &fw_data[3];
3163 
3164 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3165 		cpu_base = RX_CPU_BASE;
3166 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3167 	} else {
3168 		cpu_base = TX_CPU_BASE;
3169 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3170 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3171 	}
3172 
3173 	err = tg3_load_firmware_cpu(tp, cpu_base,
3174 				    cpu_scratch_base, cpu_scratch_size,
3175 				    &info);
3176 	if (err)
3177 		return err;
3178 
3179 	/* Now startup the cpu. */
3180 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3181 	tw32_f(cpu_base + CPU_PC, info.fw_base);
3182 
3183 	for (i = 0; i < 5; i++) {
3184 		if (tr32(cpu_base + CPU_PC) == info.fw_base)
3185 			break;
3186 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3187 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3188 		tw32_f(cpu_base + CPU_PC, info.fw_base);
3189 		udelay(1000);
3190 	}
3191 	if (i >= 5) {
3192 		netdev_err(tp->dev,
3193 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3194 			   __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3195 		return -ENODEV;
3196 	}
3197 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3198 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3199 	return 0;
3200 }
3201 
3202 
3203 /* tp->lock is held. */
3204 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3205 {
3206 	u32 addr_high, addr_low;
3207 	int i;
3208 
3209 	addr_high = ((tp->dev->dev_addr[0] << 8) |
3210 		     tp->dev->dev_addr[1]);
3211 	addr_low = ((tp->dev->dev_addr[2] << 24) |
3212 		    (tp->dev->dev_addr[3] << 16) |
3213 		    (tp->dev->dev_addr[4] <<  8) |
3214 		    (tp->dev->dev_addr[5] <<  0));
3215 	for (i = 0; i < 4; i++) {
3216 		if (i == 1 && skip_mac_1)
3217 			continue;
3218 		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3219 		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3220 	}
3221 
3222 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3223 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3224 		for (i = 0; i < 12; i++) {
3225 			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3226 			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3227 		}
3228 	}
3229 
3230 	addr_high = (tp->dev->dev_addr[0] +
3231 		     tp->dev->dev_addr[1] +
3232 		     tp->dev->dev_addr[2] +
3233 		     tp->dev->dev_addr[3] +
3234 		     tp->dev->dev_addr[4] +
3235 		     tp->dev->dev_addr[5]) &
3236 		TX_BACKOFF_SEED_MASK;
3237 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3238 }
3239 
3240 static void tg3_enable_register_access(struct tg3 *tp)
3241 {
3242 	/*
3243 	 * Make sure register accesses (indirect or otherwise) will function
3244 	 * correctly.
3245 	 */
3246 	pci_write_config_dword(tp->pdev,
3247 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3248 }
3249 
3250 static int tg3_power_up(struct tg3 *tp)
3251 {
3252 	int err;
3253 
3254 	tg3_enable_register_access(tp);
3255 
3256 	err = pci_set_power_state(tp->pdev, PCI_D0);
3257 	if (!err) {
3258 		/* Switch out of Vaux if it is a NIC */
3259 		tg3_pwrsrc_switch_to_vmain(tp);
3260 	} else {
3261 		netdev_err(tp->dev, "Transition to D0 failed\n");
3262 	}
3263 
3264 	return err;
3265 }
3266 
3267 static int tg3_power_down_prepare(struct tg3 *tp)
3268 {
3269 	u32 misc_host_ctrl;
3270 	bool device_should_wake, do_low_power;
3271 
3272 	tg3_enable_register_access(tp);
3273 
3274 	/* Restore the CLKREQ setting. */
3275 	if (tg3_flag(tp, CLKREQ_BUG)) {
3276 		u16 lnkctl;
3277 
3278 		pci_read_config_word(tp->pdev,
3279 				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3280 				     &lnkctl);
3281 		lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3282 		pci_write_config_word(tp->pdev,
3283 				      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3284 				      lnkctl);
3285 	}
3286 
3287 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3288 	tw32(TG3PCI_MISC_HOST_CTRL,
3289 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3290 
3291 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3292 			     tg3_flag(tp, WOL_ENABLE);
3293 
3294 	if (tg3_flag(tp, USE_PHYLIB)) {
3295 		do_low_power = false;
3296 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3297 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3298 			struct phy_device *phydev;
3299 			u32 phyid, advertising;
3300 
3301 			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3302 
3303 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3304 
3305 			tp->link_config.orig_speed = phydev->speed;
3306 			tp->link_config.orig_duplex = phydev->duplex;
3307 			tp->link_config.orig_autoneg = phydev->autoneg;
3308 			tp->link_config.orig_advertising = phydev->advertising;
3309 
3310 			advertising = ADVERTISED_TP |
3311 				      ADVERTISED_Pause |
3312 				      ADVERTISED_Autoneg |
3313 				      ADVERTISED_10baseT_Half;
3314 
3315 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3316 				if (tg3_flag(tp, WOL_SPEED_100MB))
3317 					advertising |=
3318 						ADVERTISED_100baseT_Half |
3319 						ADVERTISED_100baseT_Full |
3320 						ADVERTISED_10baseT_Full;
3321 				else
3322 					advertising |= ADVERTISED_10baseT_Full;
3323 			}
3324 
3325 			phydev->advertising = advertising;
3326 
3327 			phy_start_aneg(phydev);
3328 
3329 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3330 			if (phyid != PHY_ID_BCMAC131) {
3331 				phyid &= PHY_BCM_OUI_MASK;
3332 				if (phyid == PHY_BCM_OUI_1 ||
3333 				    phyid == PHY_BCM_OUI_2 ||
3334 				    phyid == PHY_BCM_OUI_3)
3335 					do_low_power = true;
3336 			}
3337 		}
3338 	} else {
3339 		do_low_power = true;
3340 
3341 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3342 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3343 			tp->link_config.orig_speed = tp->link_config.speed;
3344 			tp->link_config.orig_duplex = tp->link_config.duplex;
3345 			tp->link_config.orig_autoneg = tp->link_config.autoneg;
3346 		}
3347 
3348 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3349 			tp->link_config.speed = SPEED_10;
3350 			tp->link_config.duplex = DUPLEX_HALF;
3351 			tp->link_config.autoneg = AUTONEG_ENABLE;
3352 			tg3_setup_phy(tp, 0);
3353 		}
3354 	}
3355 
3356 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3357 		u32 val;
3358 
3359 		val = tr32(GRC_VCPU_EXT_CTRL);
3360 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3361 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
3362 		int i;
3363 		u32 val;
3364 
3365 		for (i = 0; i < 200; i++) {
3366 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3367 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3368 				break;
3369 			msleep(1);
3370 		}
3371 	}
3372 	if (tg3_flag(tp, WOL_CAP))
3373 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3374 						     WOL_DRV_STATE_SHUTDOWN |
3375 						     WOL_DRV_WOL |
3376 						     WOL_SET_MAGIC_PKT);
3377 
3378 	if (device_should_wake) {
3379 		u32 mac_mode;
3380 
3381 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3382 			if (do_low_power &&
3383 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3384 				tg3_phy_auxctl_write(tp,
3385 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3386 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
3387 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3388 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3389 				udelay(40);
3390 			}
3391 
3392 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3393 				mac_mode = MAC_MODE_PORT_MODE_GMII;
3394 			else
3395 				mac_mode = MAC_MODE_PORT_MODE_MII;
3396 
3397 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3398 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3399 			    ASIC_REV_5700) {
3400 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3401 					     SPEED_100 : SPEED_10;
3402 				if (tg3_5700_link_polarity(tp, speed))
3403 					mac_mode |= MAC_MODE_LINK_POLARITY;
3404 				else
3405 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
3406 			}
3407 		} else {
3408 			mac_mode = MAC_MODE_PORT_MODE_TBI;
3409 		}
3410 
3411 		if (!tg3_flag(tp, 5750_PLUS))
3412 			tw32(MAC_LED_CTRL, tp->led_ctrl);
3413 
3414 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3415 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3416 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3417 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3418 
3419 		if (tg3_flag(tp, ENABLE_APE))
3420 			mac_mode |= MAC_MODE_APE_TX_EN |
3421 				    MAC_MODE_APE_RX_EN |
3422 				    MAC_MODE_TDE_ENABLE;
3423 
3424 		tw32_f(MAC_MODE, mac_mode);
3425 		udelay(100);
3426 
3427 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3428 		udelay(10);
3429 	}
3430 
3431 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3432 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3433 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3434 		u32 base_val;
3435 
3436 		base_val = tp->pci_clock_ctrl;
3437 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3438 			     CLOCK_CTRL_TXCLK_DISABLE);
3439 
3440 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3441 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
3442 	} else if (tg3_flag(tp, 5780_CLASS) ||
3443 		   tg3_flag(tp, CPMU_PRESENT) ||
3444 		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3445 		/* do nothing */
3446 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3447 		u32 newbits1, newbits2;
3448 
3449 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3450 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3451 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3452 				    CLOCK_CTRL_TXCLK_DISABLE |
3453 				    CLOCK_CTRL_ALTCLK);
3454 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3455 		} else if (tg3_flag(tp, 5705_PLUS)) {
3456 			newbits1 = CLOCK_CTRL_625_CORE;
3457 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3458 		} else {
3459 			newbits1 = CLOCK_CTRL_ALTCLK;
3460 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3461 		}
3462 
3463 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3464 			    40);
3465 
3466 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3467 			    40);
3468 
3469 		if (!tg3_flag(tp, 5705_PLUS)) {
3470 			u32 newbits3;
3471 
3472 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3473 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3474 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3475 					    CLOCK_CTRL_TXCLK_DISABLE |
3476 					    CLOCK_CTRL_44MHZ_CORE);
3477 			} else {
3478 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
3479 			}
3480 
3481 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
3482 				    tp->pci_clock_ctrl | newbits3, 40);
3483 		}
3484 	}
3485 
3486 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3487 		tg3_power_down_phy(tp, do_low_power);
3488 
3489 	tg3_frob_aux_power(tp, true);
3490 
3491 	/* Workaround for unstable PLL clock */
3492 	if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3493 	    (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3494 		u32 val = tr32(0x7d00);
3495 
3496 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3497 		tw32(0x7d00, val);
3498 		if (!tg3_flag(tp, ENABLE_ASF)) {
3499 			int err;
3500 
3501 			err = tg3_nvram_lock(tp);
3502 			tg3_halt_cpu(tp, RX_CPU_BASE);
3503 			if (!err)
3504 				tg3_nvram_unlock(tp);
3505 		}
3506 	}
3507 
3508 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3509 
3510 	return 0;
3511 }
3512 
3513 static void tg3_power_down(struct tg3 *tp)
3514 {
3515 	tg3_power_down_prepare(tp);
3516 
3517 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3518 	pci_set_power_state(tp->pdev, PCI_D3hot);
3519 }
3520 
3521 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3522 {
3523 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3524 	case MII_TG3_AUX_STAT_10HALF:
3525 		*speed = SPEED_10;
3526 		*duplex = DUPLEX_HALF;
3527 		break;
3528 
3529 	case MII_TG3_AUX_STAT_10FULL:
3530 		*speed = SPEED_10;
3531 		*duplex = DUPLEX_FULL;
3532 		break;
3533 
3534 	case MII_TG3_AUX_STAT_100HALF:
3535 		*speed = SPEED_100;
3536 		*duplex = DUPLEX_HALF;
3537 		break;
3538 
3539 	case MII_TG3_AUX_STAT_100FULL:
3540 		*speed = SPEED_100;
3541 		*duplex = DUPLEX_FULL;
3542 		break;
3543 
3544 	case MII_TG3_AUX_STAT_1000HALF:
3545 		*speed = SPEED_1000;
3546 		*duplex = DUPLEX_HALF;
3547 		break;
3548 
3549 	case MII_TG3_AUX_STAT_1000FULL:
3550 		*speed = SPEED_1000;
3551 		*duplex = DUPLEX_FULL;
3552 		break;
3553 
3554 	default:
3555 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3556 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3557 				 SPEED_10;
3558 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3559 				  DUPLEX_HALF;
3560 			break;
3561 		}
3562 		*speed = SPEED_INVALID;
3563 		*duplex = DUPLEX_INVALID;
3564 		break;
3565 	}
3566 }
3567 
3568 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3569 {
3570 	int err = 0;
3571 	u32 val, new_adv;
3572 
3573 	new_adv = ADVERTISE_CSMA;
3574 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3575 	new_adv |= mii_advertise_flowctrl(flowctrl);
3576 
3577 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3578 	if (err)
3579 		goto done;
3580 
3581 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3582 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3583 
3584 		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3585 		    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3586 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3587 
3588 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3589 		if (err)
3590 			goto done;
3591 	}
3592 
3593 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3594 		goto done;
3595 
3596 	tw32(TG3_CPMU_EEE_MODE,
3597 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3598 
3599 	err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3600 	if (!err) {
3601 		u32 err2;
3602 
3603 		val = 0;
3604 		/* Advertise 100-BaseTX EEE ability */
3605 		if (advertise & ADVERTISED_100baseT_Full)
3606 			val |= MDIO_AN_EEE_ADV_100TX;
3607 		/* Advertise 1000-BaseT EEE ability */
3608 		if (advertise & ADVERTISED_1000baseT_Full)
3609 			val |= MDIO_AN_EEE_ADV_1000T;
3610 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3611 		if (err)
3612 			val = 0;
3613 
3614 		switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3615 		case ASIC_REV_5717:
3616 		case ASIC_REV_57765:
3617 		case ASIC_REV_57766:
3618 		case ASIC_REV_5719:
3619 			/* If we advertised any eee advertisements above... */
3620 			if (val)
3621 				val = MII_TG3_DSP_TAP26_ALNOKO |
3622 				      MII_TG3_DSP_TAP26_RMRXSTO |
3623 				      MII_TG3_DSP_TAP26_OPCSINPT;
3624 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3625 			/* Fall through */
3626 		case ASIC_REV_5720:
3627 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3628 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3629 						 MII_TG3_DSP_CH34TP2_HIBW01);
3630 		}
3631 
3632 		err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3633 		if (!err)
3634 			err = err2;
3635 	}
3636 
3637 done:
3638 	return err;
3639 }
3640 
3641 static void tg3_phy_copper_begin(struct tg3 *tp)
3642 {
3643 	u32 new_adv;
3644 	int i;
3645 
3646 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3647 		new_adv = ADVERTISED_10baseT_Half |
3648 			  ADVERTISED_10baseT_Full;
3649 		if (tg3_flag(tp, WOL_SPEED_100MB))
3650 			new_adv |= ADVERTISED_100baseT_Half |
3651 				   ADVERTISED_100baseT_Full;
3652 
3653 		tg3_phy_autoneg_cfg(tp, new_adv,
3654 				    FLOW_CTRL_TX | FLOW_CTRL_RX);
3655 	} else if (tp->link_config.speed == SPEED_INVALID) {
3656 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3657 			tp->link_config.advertising &=
3658 				~(ADVERTISED_1000baseT_Half |
3659 				  ADVERTISED_1000baseT_Full);
3660 
3661 		tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3662 				    tp->link_config.flowctrl);
3663 	} else {
3664 		/* Asking for a specific link mode. */
3665 		if (tp->link_config.speed == SPEED_1000) {
3666 			if (tp->link_config.duplex == DUPLEX_FULL)
3667 				new_adv = ADVERTISED_1000baseT_Full;
3668 			else
3669 				new_adv = ADVERTISED_1000baseT_Half;
3670 		} else if (tp->link_config.speed == SPEED_100) {
3671 			if (tp->link_config.duplex == DUPLEX_FULL)
3672 				new_adv = ADVERTISED_100baseT_Full;
3673 			else
3674 				new_adv = ADVERTISED_100baseT_Half;
3675 		} else {
3676 			if (tp->link_config.duplex == DUPLEX_FULL)
3677 				new_adv = ADVERTISED_10baseT_Full;
3678 			else
3679 				new_adv = ADVERTISED_10baseT_Half;
3680 		}
3681 
3682 		tg3_phy_autoneg_cfg(tp, new_adv,
3683 				    tp->link_config.flowctrl);
3684 	}
3685 
3686 	if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3687 	    tp->link_config.speed != SPEED_INVALID) {
3688 		u32 bmcr, orig_bmcr;
3689 
3690 		tp->link_config.active_speed = tp->link_config.speed;
3691 		tp->link_config.active_duplex = tp->link_config.duplex;
3692 
3693 		bmcr = 0;
3694 		switch (tp->link_config.speed) {
3695 		default:
3696 		case SPEED_10:
3697 			break;
3698 
3699 		case SPEED_100:
3700 			bmcr |= BMCR_SPEED100;
3701 			break;
3702 
3703 		case SPEED_1000:
3704 			bmcr |= BMCR_SPEED1000;
3705 			break;
3706 		}
3707 
3708 		if (tp->link_config.duplex == DUPLEX_FULL)
3709 			bmcr |= BMCR_FULLDPLX;
3710 
3711 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3712 		    (bmcr != orig_bmcr)) {
3713 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3714 			for (i = 0; i < 1500; i++) {
3715 				u32 tmp;
3716 
3717 				udelay(10);
3718 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3719 				    tg3_readphy(tp, MII_BMSR, &tmp))
3720 					continue;
3721 				if (!(tmp & BMSR_LSTATUS)) {
3722 					udelay(40);
3723 					break;
3724 				}
3725 			}
3726 			tg3_writephy(tp, MII_BMCR, bmcr);
3727 			udelay(40);
3728 		}
3729 	} else {
3730 		tg3_writephy(tp, MII_BMCR,
3731 			     BMCR_ANENABLE | BMCR_ANRESTART);
3732 	}
3733 }
3734 
3735 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3736 {
3737 	int err;
3738 
3739 	/* Turn off tap power management. */
3740 	/* Set Extended packet length bit */
3741 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3742 
3743 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3744 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3745 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3746 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3747 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3748 
3749 	udelay(40);
3750 
3751 	return err;
3752 }
3753 
3754 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3755 {
3756 	u32 advmsk, tgtadv, advertising;
3757 
3758 	advertising = tp->link_config.advertising;
3759 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
3760 
3761 	advmsk = ADVERTISE_ALL;
3762 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
3763 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
3764 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3765 	}
3766 
3767 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3768 		return false;
3769 
3770 	if ((*lcladv & advmsk) != tgtadv)
3771 		return false;
3772 
3773 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3774 		u32 tg3_ctrl;
3775 
3776 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
3777 
3778 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3779 			return false;
3780 
3781 		tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3782 		if (tg3_ctrl != tgtadv)
3783 			return false;
3784 	}
3785 
3786 	return true;
3787 }
3788 
3789 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
3790 {
3791 	u32 lpeth = 0;
3792 
3793 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3794 		u32 val;
3795 
3796 		if (tg3_readphy(tp, MII_STAT1000, &val))
3797 			return false;
3798 
3799 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
3800 	}
3801 
3802 	if (tg3_readphy(tp, MII_LPA, rmtadv))
3803 		return false;
3804 
3805 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
3806 	tp->link_config.rmt_adv = lpeth;
3807 
3808 	return true;
3809 }
3810 
3811 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3812 {
3813 	int current_link_up;
3814 	u32 bmsr, val;
3815 	u32 lcl_adv, rmt_adv;
3816 	u16 current_speed;
3817 	u8 current_duplex;
3818 	int i, err;
3819 
3820 	tw32(MAC_EVENT, 0);
3821 
3822 	tw32_f(MAC_STATUS,
3823 	     (MAC_STATUS_SYNC_CHANGED |
3824 	      MAC_STATUS_CFG_CHANGED |
3825 	      MAC_STATUS_MI_COMPLETION |
3826 	      MAC_STATUS_LNKSTATE_CHANGED));
3827 	udelay(40);
3828 
3829 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3830 		tw32_f(MAC_MI_MODE,
3831 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3832 		udelay(80);
3833 	}
3834 
3835 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3836 
3837 	/* Some third-party PHYs need to be reset on link going
3838 	 * down.
3839 	 */
3840 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3841 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3842 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3843 	    netif_carrier_ok(tp->dev)) {
3844 		tg3_readphy(tp, MII_BMSR, &bmsr);
3845 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3846 		    !(bmsr & BMSR_LSTATUS))
3847 			force_reset = 1;
3848 	}
3849 	if (force_reset)
3850 		tg3_phy_reset(tp);
3851 
3852 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3853 		tg3_readphy(tp, MII_BMSR, &bmsr);
3854 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3855 		    !tg3_flag(tp, INIT_COMPLETE))
3856 			bmsr = 0;
3857 
3858 		if (!(bmsr & BMSR_LSTATUS)) {
3859 			err = tg3_init_5401phy_dsp(tp);
3860 			if (err)
3861 				return err;
3862 
3863 			tg3_readphy(tp, MII_BMSR, &bmsr);
3864 			for (i = 0; i < 1000; i++) {
3865 				udelay(10);
3866 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3867 				    (bmsr & BMSR_LSTATUS)) {
3868 					udelay(40);
3869 					break;
3870 				}
3871 			}
3872 
3873 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3874 			    TG3_PHY_REV_BCM5401_B0 &&
3875 			    !(bmsr & BMSR_LSTATUS) &&
3876 			    tp->link_config.active_speed == SPEED_1000) {
3877 				err = tg3_phy_reset(tp);
3878 				if (!err)
3879 					err = tg3_init_5401phy_dsp(tp);
3880 				if (err)
3881 					return err;
3882 			}
3883 		}
3884 	} else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3885 		   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3886 		/* 5701 {A0,B0} CRC bug workaround */
3887 		tg3_writephy(tp, 0x15, 0x0a75);
3888 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3889 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3890 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3891 	}
3892 
3893 	/* Clear pending interrupts... */
3894 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
3895 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
3896 
3897 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3898 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3899 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3900 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
3901 
3902 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3903 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3904 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3905 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3906 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3907 		else
3908 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3909 	}
3910 
3911 	current_link_up = 0;
3912 	current_speed = SPEED_INVALID;
3913 	current_duplex = DUPLEX_INVALID;
3914 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
3915 	tp->link_config.rmt_adv = 0;
3916 
3917 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3918 		err = tg3_phy_auxctl_read(tp,
3919 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3920 					  &val);
3921 		if (!err && !(val & (1 << 10))) {
3922 			tg3_phy_auxctl_write(tp,
3923 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3924 					     val | (1 << 10));
3925 			goto relink;
3926 		}
3927 	}
3928 
3929 	bmsr = 0;
3930 	for (i = 0; i < 100; i++) {
3931 		tg3_readphy(tp, MII_BMSR, &bmsr);
3932 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3933 		    (bmsr & BMSR_LSTATUS))
3934 			break;
3935 		udelay(40);
3936 	}
3937 
3938 	if (bmsr & BMSR_LSTATUS) {
3939 		u32 aux_stat, bmcr;
3940 
3941 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3942 		for (i = 0; i < 2000; i++) {
3943 			udelay(10);
3944 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3945 			    aux_stat)
3946 				break;
3947 		}
3948 
3949 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3950 					     &current_speed,
3951 					     &current_duplex);
3952 
3953 		bmcr = 0;
3954 		for (i = 0; i < 200; i++) {
3955 			tg3_readphy(tp, MII_BMCR, &bmcr);
3956 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
3957 				continue;
3958 			if (bmcr && bmcr != 0x7fff)
3959 				break;
3960 			udelay(10);
3961 		}
3962 
3963 		lcl_adv = 0;
3964 		rmt_adv = 0;
3965 
3966 		tp->link_config.active_speed = current_speed;
3967 		tp->link_config.active_duplex = current_duplex;
3968 
3969 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3970 			if ((bmcr & BMCR_ANENABLE) &&
3971 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
3972 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
3973 				current_link_up = 1;
3974 		} else {
3975 			if (!(bmcr & BMCR_ANENABLE) &&
3976 			    tp->link_config.speed == current_speed &&
3977 			    tp->link_config.duplex == current_duplex &&
3978 			    tp->link_config.flowctrl ==
3979 			    tp->link_config.active_flowctrl) {
3980 				current_link_up = 1;
3981 			}
3982 		}
3983 
3984 		if (current_link_up == 1 &&
3985 		    tp->link_config.active_duplex == DUPLEX_FULL) {
3986 			u32 reg, bit;
3987 
3988 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3989 				reg = MII_TG3_FET_GEN_STAT;
3990 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
3991 			} else {
3992 				reg = MII_TG3_EXT_STAT;
3993 				bit = MII_TG3_EXT_STAT_MDIX;
3994 			}
3995 
3996 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
3997 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
3998 
3999 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4000 		}
4001 	}
4002 
4003 relink:
4004 	if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4005 		tg3_phy_copper_begin(tp);
4006 
4007 		tg3_readphy(tp, MII_BMSR, &bmsr);
4008 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4009 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4010 			current_link_up = 1;
4011 	}
4012 
4013 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4014 	if (current_link_up == 1) {
4015 		if (tp->link_config.active_speed == SPEED_100 ||
4016 		    tp->link_config.active_speed == SPEED_10)
4017 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4018 		else
4019 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4020 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4021 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4022 	else
4023 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4024 
4025 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4026 	if (tp->link_config.active_duplex == DUPLEX_HALF)
4027 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4028 
4029 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4030 		if (current_link_up == 1 &&
4031 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4032 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4033 		else
4034 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4035 	}
4036 
4037 	/* ??? Without this setting Netgear GA302T PHY does not
4038 	 * ??? send/receive packets...
4039 	 */
4040 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4041 	    tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4042 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4043 		tw32_f(MAC_MI_MODE, tp->mi_mode);
4044 		udelay(80);
4045 	}
4046 
4047 	tw32_f(MAC_MODE, tp->mac_mode);
4048 	udelay(40);
4049 
4050 	tg3_phy_eee_adjust(tp, current_link_up);
4051 
4052 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
4053 		/* Polled via timer. */
4054 		tw32_f(MAC_EVENT, 0);
4055 	} else {
4056 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4057 	}
4058 	udelay(40);
4059 
4060 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4061 	    current_link_up == 1 &&
4062 	    tp->link_config.active_speed == SPEED_1000 &&
4063 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4064 		udelay(120);
4065 		tw32_f(MAC_STATUS,
4066 		     (MAC_STATUS_SYNC_CHANGED |
4067 		      MAC_STATUS_CFG_CHANGED));
4068 		udelay(40);
4069 		tg3_write_mem(tp,
4070 			      NIC_SRAM_FIRMWARE_MBOX,
4071 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4072 	}
4073 
4074 	/* Prevent send BD corruption. */
4075 	if (tg3_flag(tp, CLKREQ_BUG)) {
4076 		u16 oldlnkctl, newlnkctl;
4077 
4078 		pci_read_config_word(tp->pdev,
4079 				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4080 				     &oldlnkctl);
4081 		if (tp->link_config.active_speed == SPEED_100 ||
4082 		    tp->link_config.active_speed == SPEED_10)
4083 			newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4084 		else
4085 			newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4086 		if (newlnkctl != oldlnkctl)
4087 			pci_write_config_word(tp->pdev,
4088 					      pci_pcie_cap(tp->pdev) +
4089 					      PCI_EXP_LNKCTL, newlnkctl);
4090 	}
4091 
4092 	if (current_link_up != netif_carrier_ok(tp->dev)) {
4093 		if (current_link_up)
4094 			netif_carrier_on(tp->dev);
4095 		else
4096 			netif_carrier_off(tp->dev);
4097 		tg3_link_report(tp);
4098 	}
4099 
4100 	return 0;
4101 }
4102 
4103 struct tg3_fiber_aneginfo {
4104 	int state;
4105 #define ANEG_STATE_UNKNOWN		0
4106 #define ANEG_STATE_AN_ENABLE		1
4107 #define ANEG_STATE_RESTART_INIT		2
4108 #define ANEG_STATE_RESTART		3
4109 #define ANEG_STATE_DISABLE_LINK_OK	4
4110 #define ANEG_STATE_ABILITY_DETECT_INIT	5
4111 #define ANEG_STATE_ABILITY_DETECT	6
4112 #define ANEG_STATE_ACK_DETECT_INIT	7
4113 #define ANEG_STATE_ACK_DETECT		8
4114 #define ANEG_STATE_COMPLETE_ACK_INIT	9
4115 #define ANEG_STATE_COMPLETE_ACK		10
4116 #define ANEG_STATE_IDLE_DETECT_INIT	11
4117 #define ANEG_STATE_IDLE_DETECT		12
4118 #define ANEG_STATE_LINK_OK		13
4119 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
4120 #define ANEG_STATE_NEXT_PAGE_WAIT	15
4121 
4122 	u32 flags;
4123 #define MR_AN_ENABLE		0x00000001
4124 #define MR_RESTART_AN		0x00000002
4125 #define MR_AN_COMPLETE		0x00000004
4126 #define MR_PAGE_RX		0x00000008
4127 #define MR_NP_LOADED		0x00000010
4128 #define MR_TOGGLE_TX		0x00000020
4129 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
4130 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
4131 #define MR_LP_ADV_SYM_PAUSE	0x00000100
4132 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
4133 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
4134 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
4135 #define MR_LP_ADV_NEXT_PAGE	0x00001000
4136 #define MR_TOGGLE_RX		0x00002000
4137 #define MR_NP_RX		0x00004000
4138 
4139 #define MR_LINK_OK		0x80000000
4140 
4141 	unsigned long link_time, cur_time;
4142 
4143 	u32 ability_match_cfg;
4144 	int ability_match_count;
4145 
4146 	char ability_match, idle_match, ack_match;
4147 
4148 	u32 txconfig, rxconfig;
4149 #define ANEG_CFG_NP		0x00000080
4150 #define ANEG_CFG_ACK		0x00000040
4151 #define ANEG_CFG_RF2		0x00000020
4152 #define ANEG_CFG_RF1		0x00000010
4153 #define ANEG_CFG_PS2		0x00000001
4154 #define ANEG_CFG_PS1		0x00008000
4155 #define ANEG_CFG_HD		0x00004000
4156 #define ANEG_CFG_FD		0x00002000
4157 #define ANEG_CFG_INVAL		0x00001f06
4158 
4159 };
4160 #define ANEG_OK		0
4161 #define ANEG_DONE	1
4162 #define ANEG_TIMER_ENAB	2
4163 #define ANEG_FAILED	-1
4164 
4165 #define ANEG_STATE_SETTLE_TIME	10000
4166 
4167 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4168 				   struct tg3_fiber_aneginfo *ap)
4169 {
4170 	u16 flowctrl;
4171 	unsigned long delta;
4172 	u32 rx_cfg_reg;
4173 	int ret;
4174 
4175 	if (ap->state == ANEG_STATE_UNKNOWN) {
4176 		ap->rxconfig = 0;
4177 		ap->link_time = 0;
4178 		ap->cur_time = 0;
4179 		ap->ability_match_cfg = 0;
4180 		ap->ability_match_count = 0;
4181 		ap->ability_match = 0;
4182 		ap->idle_match = 0;
4183 		ap->ack_match = 0;
4184 	}
4185 	ap->cur_time++;
4186 
4187 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4188 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4189 
4190 		if (rx_cfg_reg != ap->ability_match_cfg) {
4191 			ap->ability_match_cfg = rx_cfg_reg;
4192 			ap->ability_match = 0;
4193 			ap->ability_match_count = 0;
4194 		} else {
4195 			if (++ap->ability_match_count > 1) {
4196 				ap->ability_match = 1;
4197 				ap->ability_match_cfg = rx_cfg_reg;
4198 			}
4199 		}
4200 		if (rx_cfg_reg & ANEG_CFG_ACK)
4201 			ap->ack_match = 1;
4202 		else
4203 			ap->ack_match = 0;
4204 
4205 		ap->idle_match = 0;
4206 	} else {
4207 		ap->idle_match = 1;
4208 		ap->ability_match_cfg = 0;
4209 		ap->ability_match_count = 0;
4210 		ap->ability_match = 0;
4211 		ap->ack_match = 0;
4212 
4213 		rx_cfg_reg = 0;
4214 	}
4215 
4216 	ap->rxconfig = rx_cfg_reg;
4217 	ret = ANEG_OK;
4218 
4219 	switch (ap->state) {
4220 	case ANEG_STATE_UNKNOWN:
4221 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4222 			ap->state = ANEG_STATE_AN_ENABLE;
4223 
4224 		/* fallthru */
4225 	case ANEG_STATE_AN_ENABLE:
4226 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4227 		if (ap->flags & MR_AN_ENABLE) {
4228 			ap->link_time = 0;
4229 			ap->cur_time = 0;
4230 			ap->ability_match_cfg = 0;
4231 			ap->ability_match_count = 0;
4232 			ap->ability_match = 0;
4233 			ap->idle_match = 0;
4234 			ap->ack_match = 0;
4235 
4236 			ap->state = ANEG_STATE_RESTART_INIT;
4237 		} else {
4238 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
4239 		}
4240 		break;
4241 
4242 	case ANEG_STATE_RESTART_INIT:
4243 		ap->link_time = ap->cur_time;
4244 		ap->flags &= ~(MR_NP_LOADED);
4245 		ap->txconfig = 0;
4246 		tw32(MAC_TX_AUTO_NEG, 0);
4247 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4248 		tw32_f(MAC_MODE, tp->mac_mode);
4249 		udelay(40);
4250 
4251 		ret = ANEG_TIMER_ENAB;
4252 		ap->state = ANEG_STATE_RESTART;
4253 
4254 		/* fallthru */
4255 	case ANEG_STATE_RESTART:
4256 		delta = ap->cur_time - ap->link_time;
4257 		if (delta > ANEG_STATE_SETTLE_TIME)
4258 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4259 		else
4260 			ret = ANEG_TIMER_ENAB;
4261 		break;
4262 
4263 	case ANEG_STATE_DISABLE_LINK_OK:
4264 		ret = ANEG_DONE;
4265 		break;
4266 
4267 	case ANEG_STATE_ABILITY_DETECT_INIT:
4268 		ap->flags &= ~(MR_TOGGLE_TX);
4269 		ap->txconfig = ANEG_CFG_FD;
4270 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4271 		if (flowctrl & ADVERTISE_1000XPAUSE)
4272 			ap->txconfig |= ANEG_CFG_PS1;
4273 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4274 			ap->txconfig |= ANEG_CFG_PS2;
4275 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4276 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4277 		tw32_f(MAC_MODE, tp->mac_mode);
4278 		udelay(40);
4279 
4280 		ap->state = ANEG_STATE_ABILITY_DETECT;
4281 		break;
4282 
4283 	case ANEG_STATE_ABILITY_DETECT:
4284 		if (ap->ability_match != 0 && ap->rxconfig != 0)
4285 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
4286 		break;
4287 
4288 	case ANEG_STATE_ACK_DETECT_INIT:
4289 		ap->txconfig |= ANEG_CFG_ACK;
4290 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4291 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4292 		tw32_f(MAC_MODE, tp->mac_mode);
4293 		udelay(40);
4294 
4295 		ap->state = ANEG_STATE_ACK_DETECT;
4296 
4297 		/* fallthru */
4298 	case ANEG_STATE_ACK_DETECT:
4299 		if (ap->ack_match != 0) {
4300 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4301 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4302 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4303 			} else {
4304 				ap->state = ANEG_STATE_AN_ENABLE;
4305 			}
4306 		} else if (ap->ability_match != 0 &&
4307 			   ap->rxconfig == 0) {
4308 			ap->state = ANEG_STATE_AN_ENABLE;
4309 		}
4310 		break;
4311 
4312 	case ANEG_STATE_COMPLETE_ACK_INIT:
4313 		if (ap->rxconfig & ANEG_CFG_INVAL) {
4314 			ret = ANEG_FAILED;
4315 			break;
4316 		}
4317 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4318 			       MR_LP_ADV_HALF_DUPLEX |
4319 			       MR_LP_ADV_SYM_PAUSE |
4320 			       MR_LP_ADV_ASYM_PAUSE |
4321 			       MR_LP_ADV_REMOTE_FAULT1 |
4322 			       MR_LP_ADV_REMOTE_FAULT2 |
4323 			       MR_LP_ADV_NEXT_PAGE |
4324 			       MR_TOGGLE_RX |
4325 			       MR_NP_RX);
4326 		if (ap->rxconfig & ANEG_CFG_FD)
4327 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4328 		if (ap->rxconfig & ANEG_CFG_HD)
4329 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4330 		if (ap->rxconfig & ANEG_CFG_PS1)
4331 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
4332 		if (ap->rxconfig & ANEG_CFG_PS2)
4333 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4334 		if (ap->rxconfig & ANEG_CFG_RF1)
4335 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4336 		if (ap->rxconfig & ANEG_CFG_RF2)
4337 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4338 		if (ap->rxconfig & ANEG_CFG_NP)
4339 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
4340 
4341 		ap->link_time = ap->cur_time;
4342 
4343 		ap->flags ^= (MR_TOGGLE_TX);
4344 		if (ap->rxconfig & 0x0008)
4345 			ap->flags |= MR_TOGGLE_RX;
4346 		if (ap->rxconfig & ANEG_CFG_NP)
4347 			ap->flags |= MR_NP_RX;
4348 		ap->flags |= MR_PAGE_RX;
4349 
4350 		ap->state = ANEG_STATE_COMPLETE_ACK;
4351 		ret = ANEG_TIMER_ENAB;
4352 		break;
4353 
4354 	case ANEG_STATE_COMPLETE_ACK:
4355 		if (ap->ability_match != 0 &&
4356 		    ap->rxconfig == 0) {
4357 			ap->state = ANEG_STATE_AN_ENABLE;
4358 			break;
4359 		}
4360 		delta = ap->cur_time - ap->link_time;
4361 		if (delta > ANEG_STATE_SETTLE_TIME) {
4362 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4363 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4364 			} else {
4365 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4366 				    !(ap->flags & MR_NP_RX)) {
4367 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4368 				} else {
4369 					ret = ANEG_FAILED;
4370 				}
4371 			}
4372 		}
4373 		break;
4374 
4375 	case ANEG_STATE_IDLE_DETECT_INIT:
4376 		ap->link_time = ap->cur_time;
4377 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4378 		tw32_f(MAC_MODE, tp->mac_mode);
4379 		udelay(40);
4380 
4381 		ap->state = ANEG_STATE_IDLE_DETECT;
4382 		ret = ANEG_TIMER_ENAB;
4383 		break;
4384 
4385 	case ANEG_STATE_IDLE_DETECT:
4386 		if (ap->ability_match != 0 &&
4387 		    ap->rxconfig == 0) {
4388 			ap->state = ANEG_STATE_AN_ENABLE;
4389 			break;
4390 		}
4391 		delta = ap->cur_time - ap->link_time;
4392 		if (delta > ANEG_STATE_SETTLE_TIME) {
4393 			/* XXX another gem from the Broadcom driver :( */
4394 			ap->state = ANEG_STATE_LINK_OK;
4395 		}
4396 		break;
4397 
4398 	case ANEG_STATE_LINK_OK:
4399 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4400 		ret = ANEG_DONE;
4401 		break;
4402 
4403 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4404 		/* ??? unimplemented */
4405 		break;
4406 
4407 	case ANEG_STATE_NEXT_PAGE_WAIT:
4408 		/* ??? unimplemented */
4409 		break;
4410 
4411 	default:
4412 		ret = ANEG_FAILED;
4413 		break;
4414 	}
4415 
4416 	return ret;
4417 }
4418 
4419 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4420 {
4421 	int res = 0;
4422 	struct tg3_fiber_aneginfo aninfo;
4423 	int status = ANEG_FAILED;
4424 	unsigned int tick;
4425 	u32 tmp;
4426 
4427 	tw32_f(MAC_TX_AUTO_NEG, 0);
4428 
4429 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4430 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4431 	udelay(40);
4432 
4433 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4434 	udelay(40);
4435 
4436 	memset(&aninfo, 0, sizeof(aninfo));
4437 	aninfo.flags |= MR_AN_ENABLE;
4438 	aninfo.state = ANEG_STATE_UNKNOWN;
4439 	aninfo.cur_time = 0;
4440 	tick = 0;
4441 	while (++tick < 195000) {
4442 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
4443 		if (status == ANEG_DONE || status == ANEG_FAILED)
4444 			break;
4445 
4446 		udelay(1);
4447 	}
4448 
4449 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4450 	tw32_f(MAC_MODE, tp->mac_mode);
4451 	udelay(40);
4452 
4453 	*txflags = aninfo.txconfig;
4454 	*rxflags = aninfo.flags;
4455 
4456 	if (status == ANEG_DONE &&
4457 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4458 			     MR_LP_ADV_FULL_DUPLEX)))
4459 		res = 1;
4460 
4461 	return res;
4462 }
4463 
4464 static void tg3_init_bcm8002(struct tg3 *tp)
4465 {
4466 	u32 mac_status = tr32(MAC_STATUS);
4467 	int i;
4468 
4469 	/* Reset when initting first time or we have a link. */
4470 	if (tg3_flag(tp, INIT_COMPLETE) &&
4471 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
4472 		return;
4473 
4474 	/* Set PLL lock range. */
4475 	tg3_writephy(tp, 0x16, 0x8007);
4476 
4477 	/* SW reset */
4478 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4479 
4480 	/* Wait for reset to complete. */
4481 	/* XXX schedule_timeout() ... */
4482 	for (i = 0; i < 500; i++)
4483 		udelay(10);
4484 
4485 	/* Config mode; select PMA/Ch 1 regs. */
4486 	tg3_writephy(tp, 0x10, 0x8411);
4487 
4488 	/* Enable auto-lock and comdet, select txclk for tx. */
4489 	tg3_writephy(tp, 0x11, 0x0a10);
4490 
4491 	tg3_writephy(tp, 0x18, 0x00a0);
4492 	tg3_writephy(tp, 0x16, 0x41ff);
4493 
4494 	/* Assert and deassert POR. */
4495 	tg3_writephy(tp, 0x13, 0x0400);
4496 	udelay(40);
4497 	tg3_writephy(tp, 0x13, 0x0000);
4498 
4499 	tg3_writephy(tp, 0x11, 0x0a50);
4500 	udelay(40);
4501 	tg3_writephy(tp, 0x11, 0x0a10);
4502 
4503 	/* Wait for signal to stabilize */
4504 	/* XXX schedule_timeout() ... */
4505 	for (i = 0; i < 15000; i++)
4506 		udelay(10);
4507 
4508 	/* Deselect the channel register so we can read the PHYID
4509 	 * later.
4510 	 */
4511 	tg3_writephy(tp, 0x10, 0x8011);
4512 }
4513 
4514 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4515 {
4516 	u16 flowctrl;
4517 	u32 sg_dig_ctrl, sg_dig_status;
4518 	u32 serdes_cfg, expected_sg_dig_ctrl;
4519 	int workaround, port_a;
4520 	int current_link_up;
4521 
4522 	serdes_cfg = 0;
4523 	expected_sg_dig_ctrl = 0;
4524 	workaround = 0;
4525 	port_a = 1;
4526 	current_link_up = 0;
4527 
4528 	if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4529 	    tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4530 		workaround = 1;
4531 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4532 			port_a = 0;
4533 
4534 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
4535 		/* preserve bits 20-23 for voltage regulator */
4536 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4537 	}
4538 
4539 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
4540 
4541 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4542 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4543 			if (workaround) {
4544 				u32 val = serdes_cfg;
4545 
4546 				if (port_a)
4547 					val |= 0xc010000;
4548 				else
4549 					val |= 0x4010000;
4550 				tw32_f(MAC_SERDES_CFG, val);
4551 			}
4552 
4553 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4554 		}
4555 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
4556 			tg3_setup_flow_control(tp, 0, 0);
4557 			current_link_up = 1;
4558 		}
4559 		goto out;
4560 	}
4561 
4562 	/* Want auto-negotiation.  */
4563 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4564 
4565 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4566 	if (flowctrl & ADVERTISE_1000XPAUSE)
4567 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4568 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4569 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4570 
4571 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4572 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4573 		    tp->serdes_counter &&
4574 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
4575 				    MAC_STATUS_RCVD_CFG)) ==
4576 		     MAC_STATUS_PCS_SYNCED)) {
4577 			tp->serdes_counter--;
4578 			current_link_up = 1;
4579 			goto out;
4580 		}
4581 restart_autoneg:
4582 		if (workaround)
4583 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4584 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4585 		udelay(5);
4586 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4587 
4588 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4589 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4590 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4591 				 MAC_STATUS_SIGNAL_DET)) {
4592 		sg_dig_status = tr32(SG_DIG_STATUS);
4593 		mac_status = tr32(MAC_STATUS);
4594 
4595 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4596 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
4597 			u32 local_adv = 0, remote_adv = 0;
4598 
4599 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4600 				local_adv |= ADVERTISE_1000XPAUSE;
4601 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4602 				local_adv |= ADVERTISE_1000XPSE_ASYM;
4603 
4604 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4605 				remote_adv |= LPA_1000XPAUSE;
4606 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4607 				remote_adv |= LPA_1000XPAUSE_ASYM;
4608 
4609 			tp->link_config.rmt_adv =
4610 					   mii_adv_to_ethtool_adv_x(remote_adv);
4611 
4612 			tg3_setup_flow_control(tp, local_adv, remote_adv);
4613 			current_link_up = 1;
4614 			tp->serdes_counter = 0;
4615 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4616 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4617 			if (tp->serdes_counter)
4618 				tp->serdes_counter--;
4619 			else {
4620 				if (workaround) {
4621 					u32 val = serdes_cfg;
4622 
4623 					if (port_a)
4624 						val |= 0xc010000;
4625 					else
4626 						val |= 0x4010000;
4627 
4628 					tw32_f(MAC_SERDES_CFG, val);
4629 				}
4630 
4631 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4632 				udelay(40);
4633 
4634 				/* Link parallel detection - link is up */
4635 				/* only if we have PCS_SYNC and not */
4636 				/* receiving config code words */
4637 				mac_status = tr32(MAC_STATUS);
4638 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4639 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
4640 					tg3_setup_flow_control(tp, 0, 0);
4641 					current_link_up = 1;
4642 					tp->phy_flags |=
4643 						TG3_PHYFLG_PARALLEL_DETECT;
4644 					tp->serdes_counter =
4645 						SERDES_PARALLEL_DET_TIMEOUT;
4646 				} else
4647 					goto restart_autoneg;
4648 			}
4649 		}
4650 	} else {
4651 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4652 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4653 	}
4654 
4655 out:
4656 	return current_link_up;
4657 }
4658 
4659 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4660 {
4661 	int current_link_up = 0;
4662 
4663 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4664 		goto out;
4665 
4666 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4667 		u32 txflags, rxflags;
4668 		int i;
4669 
4670 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
4671 			u32 local_adv = 0, remote_adv = 0;
4672 
4673 			if (txflags & ANEG_CFG_PS1)
4674 				local_adv |= ADVERTISE_1000XPAUSE;
4675 			if (txflags & ANEG_CFG_PS2)
4676 				local_adv |= ADVERTISE_1000XPSE_ASYM;
4677 
4678 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
4679 				remote_adv |= LPA_1000XPAUSE;
4680 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4681 				remote_adv |= LPA_1000XPAUSE_ASYM;
4682 
4683 			tp->link_config.rmt_adv =
4684 					   mii_adv_to_ethtool_adv_x(remote_adv);
4685 
4686 			tg3_setup_flow_control(tp, local_adv, remote_adv);
4687 
4688 			current_link_up = 1;
4689 		}
4690 		for (i = 0; i < 30; i++) {
4691 			udelay(20);
4692 			tw32_f(MAC_STATUS,
4693 			       (MAC_STATUS_SYNC_CHANGED |
4694 				MAC_STATUS_CFG_CHANGED));
4695 			udelay(40);
4696 			if ((tr32(MAC_STATUS) &
4697 			     (MAC_STATUS_SYNC_CHANGED |
4698 			      MAC_STATUS_CFG_CHANGED)) == 0)
4699 				break;
4700 		}
4701 
4702 		mac_status = tr32(MAC_STATUS);
4703 		if (current_link_up == 0 &&
4704 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
4705 		    !(mac_status & MAC_STATUS_RCVD_CFG))
4706 			current_link_up = 1;
4707 	} else {
4708 		tg3_setup_flow_control(tp, 0, 0);
4709 
4710 		/* Forcing 1000FD link up. */
4711 		current_link_up = 1;
4712 
4713 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4714 		udelay(40);
4715 
4716 		tw32_f(MAC_MODE, tp->mac_mode);
4717 		udelay(40);
4718 	}
4719 
4720 out:
4721 	return current_link_up;
4722 }
4723 
4724 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4725 {
4726 	u32 orig_pause_cfg;
4727 	u16 orig_active_speed;
4728 	u8 orig_active_duplex;
4729 	u32 mac_status;
4730 	int current_link_up;
4731 	int i;
4732 
4733 	orig_pause_cfg = tp->link_config.active_flowctrl;
4734 	orig_active_speed = tp->link_config.active_speed;
4735 	orig_active_duplex = tp->link_config.active_duplex;
4736 
4737 	if (!tg3_flag(tp, HW_AUTONEG) &&
4738 	    netif_carrier_ok(tp->dev) &&
4739 	    tg3_flag(tp, INIT_COMPLETE)) {
4740 		mac_status = tr32(MAC_STATUS);
4741 		mac_status &= (MAC_STATUS_PCS_SYNCED |
4742 			       MAC_STATUS_SIGNAL_DET |
4743 			       MAC_STATUS_CFG_CHANGED |
4744 			       MAC_STATUS_RCVD_CFG);
4745 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
4746 				   MAC_STATUS_SIGNAL_DET)) {
4747 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4748 					    MAC_STATUS_CFG_CHANGED));
4749 			return 0;
4750 		}
4751 	}
4752 
4753 	tw32_f(MAC_TX_AUTO_NEG, 0);
4754 
4755 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4756 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4757 	tw32_f(MAC_MODE, tp->mac_mode);
4758 	udelay(40);
4759 
4760 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
4761 		tg3_init_bcm8002(tp);
4762 
4763 	/* Enable link change event even when serdes polling.  */
4764 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4765 	udelay(40);
4766 
4767 	current_link_up = 0;
4768 	tp->link_config.rmt_adv = 0;
4769 	mac_status = tr32(MAC_STATUS);
4770 
4771 	if (tg3_flag(tp, HW_AUTONEG))
4772 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4773 	else
4774 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4775 
4776 	tp->napi[0].hw_status->status =
4777 		(SD_STATUS_UPDATED |
4778 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4779 
4780 	for (i = 0; i < 100; i++) {
4781 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4782 				    MAC_STATUS_CFG_CHANGED));
4783 		udelay(5);
4784 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4785 					 MAC_STATUS_CFG_CHANGED |
4786 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4787 			break;
4788 	}
4789 
4790 	mac_status = tr32(MAC_STATUS);
4791 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4792 		current_link_up = 0;
4793 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4794 		    tp->serdes_counter == 0) {
4795 			tw32_f(MAC_MODE, (tp->mac_mode |
4796 					  MAC_MODE_SEND_CONFIGS));
4797 			udelay(1);
4798 			tw32_f(MAC_MODE, tp->mac_mode);
4799 		}
4800 	}
4801 
4802 	if (current_link_up == 1) {
4803 		tp->link_config.active_speed = SPEED_1000;
4804 		tp->link_config.active_duplex = DUPLEX_FULL;
4805 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
4806 				    LED_CTRL_LNKLED_OVERRIDE |
4807 				    LED_CTRL_1000MBPS_ON));
4808 	} else {
4809 		tp->link_config.active_speed = SPEED_INVALID;
4810 		tp->link_config.active_duplex = DUPLEX_INVALID;
4811 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
4812 				    LED_CTRL_LNKLED_OVERRIDE |
4813 				    LED_CTRL_TRAFFIC_OVERRIDE));
4814 	}
4815 
4816 	if (current_link_up != netif_carrier_ok(tp->dev)) {
4817 		if (current_link_up)
4818 			netif_carrier_on(tp->dev);
4819 		else
4820 			netif_carrier_off(tp->dev);
4821 		tg3_link_report(tp);
4822 	} else {
4823 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
4824 		if (orig_pause_cfg != now_pause_cfg ||
4825 		    orig_active_speed != tp->link_config.active_speed ||
4826 		    orig_active_duplex != tp->link_config.active_duplex)
4827 			tg3_link_report(tp);
4828 	}
4829 
4830 	return 0;
4831 }
4832 
4833 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4834 {
4835 	int current_link_up, err = 0;
4836 	u32 bmsr, bmcr;
4837 	u16 current_speed;
4838 	u8 current_duplex;
4839 	u32 local_adv, remote_adv;
4840 
4841 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4842 	tw32_f(MAC_MODE, tp->mac_mode);
4843 	udelay(40);
4844 
4845 	tw32(MAC_EVENT, 0);
4846 
4847 	tw32_f(MAC_STATUS,
4848 	     (MAC_STATUS_SYNC_CHANGED |
4849 	      MAC_STATUS_CFG_CHANGED |
4850 	      MAC_STATUS_MI_COMPLETION |
4851 	      MAC_STATUS_LNKSTATE_CHANGED));
4852 	udelay(40);
4853 
4854 	if (force_reset)
4855 		tg3_phy_reset(tp);
4856 
4857 	current_link_up = 0;
4858 	current_speed = SPEED_INVALID;
4859 	current_duplex = DUPLEX_INVALID;
4860 	tp->link_config.rmt_adv = 0;
4861 
4862 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4863 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4864 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4865 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4866 			bmsr |= BMSR_LSTATUS;
4867 		else
4868 			bmsr &= ~BMSR_LSTATUS;
4869 	}
4870 
4871 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4872 
4873 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4874 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4875 		/* do nothing, just check for link up at the end */
4876 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4877 		u32 adv, newadv;
4878 
4879 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4880 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4881 				 ADVERTISE_1000XPAUSE |
4882 				 ADVERTISE_1000XPSE_ASYM |
4883 				 ADVERTISE_SLCT);
4884 
4885 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4886 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
4887 
4888 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
4889 			tg3_writephy(tp, MII_ADVERTISE, newadv);
4890 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4891 			tg3_writephy(tp, MII_BMCR, bmcr);
4892 
4893 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4894 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4895 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4896 
4897 			return err;
4898 		}
4899 	} else {
4900 		u32 new_bmcr;
4901 
4902 		bmcr &= ~BMCR_SPEED1000;
4903 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4904 
4905 		if (tp->link_config.duplex == DUPLEX_FULL)
4906 			new_bmcr |= BMCR_FULLDPLX;
4907 
4908 		if (new_bmcr != bmcr) {
4909 			/* BMCR_SPEED1000 is a reserved bit that needs
4910 			 * to be set on write.
4911 			 */
4912 			new_bmcr |= BMCR_SPEED1000;
4913 
4914 			/* Force a linkdown */
4915 			if (netif_carrier_ok(tp->dev)) {
4916 				u32 adv;
4917 
4918 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4919 				adv &= ~(ADVERTISE_1000XFULL |
4920 					 ADVERTISE_1000XHALF |
4921 					 ADVERTISE_SLCT);
4922 				tg3_writephy(tp, MII_ADVERTISE, adv);
4923 				tg3_writephy(tp, MII_BMCR, bmcr |
4924 							   BMCR_ANRESTART |
4925 							   BMCR_ANENABLE);
4926 				udelay(10);
4927 				netif_carrier_off(tp->dev);
4928 			}
4929 			tg3_writephy(tp, MII_BMCR, new_bmcr);
4930 			bmcr = new_bmcr;
4931 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4932 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4933 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4934 			    ASIC_REV_5714) {
4935 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4936 					bmsr |= BMSR_LSTATUS;
4937 				else
4938 					bmsr &= ~BMSR_LSTATUS;
4939 			}
4940 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4941 		}
4942 	}
4943 
4944 	if (bmsr & BMSR_LSTATUS) {
4945 		current_speed = SPEED_1000;
4946 		current_link_up = 1;
4947 		if (bmcr & BMCR_FULLDPLX)
4948 			current_duplex = DUPLEX_FULL;
4949 		else
4950 			current_duplex = DUPLEX_HALF;
4951 
4952 		local_adv = 0;
4953 		remote_adv = 0;
4954 
4955 		if (bmcr & BMCR_ANENABLE) {
4956 			u32 common;
4957 
4958 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4959 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4960 			common = local_adv & remote_adv;
4961 			if (common & (ADVERTISE_1000XHALF |
4962 				      ADVERTISE_1000XFULL)) {
4963 				if (common & ADVERTISE_1000XFULL)
4964 					current_duplex = DUPLEX_FULL;
4965 				else
4966 					current_duplex = DUPLEX_HALF;
4967 
4968 				tp->link_config.rmt_adv =
4969 					   mii_adv_to_ethtool_adv_x(remote_adv);
4970 			} else if (!tg3_flag(tp, 5780_CLASS)) {
4971 				/* Link is up via parallel detect */
4972 			} else {
4973 				current_link_up = 0;
4974 			}
4975 		}
4976 	}
4977 
4978 	if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4979 		tg3_setup_flow_control(tp, local_adv, remote_adv);
4980 
4981 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4982 	if (tp->link_config.active_duplex == DUPLEX_HALF)
4983 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4984 
4985 	tw32_f(MAC_MODE, tp->mac_mode);
4986 	udelay(40);
4987 
4988 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4989 
4990 	tp->link_config.active_speed = current_speed;
4991 	tp->link_config.active_duplex = current_duplex;
4992 
4993 	if (current_link_up != netif_carrier_ok(tp->dev)) {
4994 		if (current_link_up)
4995 			netif_carrier_on(tp->dev);
4996 		else {
4997 			netif_carrier_off(tp->dev);
4998 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4999 		}
5000 		tg3_link_report(tp);
5001 	}
5002 	return err;
5003 }
5004 
5005 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5006 {
5007 	if (tp->serdes_counter) {
5008 		/* Give autoneg time to complete. */
5009 		tp->serdes_counter--;
5010 		return;
5011 	}
5012 
5013 	if (!netif_carrier_ok(tp->dev) &&
5014 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5015 		u32 bmcr;
5016 
5017 		tg3_readphy(tp, MII_BMCR, &bmcr);
5018 		if (bmcr & BMCR_ANENABLE) {
5019 			u32 phy1, phy2;
5020 
5021 			/* Select shadow register 0x1f */
5022 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5023 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5024 
5025 			/* Select expansion interrupt status register */
5026 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5027 					 MII_TG3_DSP_EXP1_INT_STAT);
5028 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5029 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5030 
5031 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5032 				/* We have signal detect and not receiving
5033 				 * config code words, link is up by parallel
5034 				 * detection.
5035 				 */
5036 
5037 				bmcr &= ~BMCR_ANENABLE;
5038 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5039 				tg3_writephy(tp, MII_BMCR, bmcr);
5040 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5041 			}
5042 		}
5043 	} else if (netif_carrier_ok(tp->dev) &&
5044 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5045 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5046 		u32 phy2;
5047 
5048 		/* Select expansion interrupt status register */
5049 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5050 				 MII_TG3_DSP_EXP1_INT_STAT);
5051 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5052 		if (phy2 & 0x20) {
5053 			u32 bmcr;
5054 
5055 			/* Config code words received, turn on autoneg. */
5056 			tg3_readphy(tp, MII_BMCR, &bmcr);
5057 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5058 
5059 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5060 
5061 		}
5062 	}
5063 }
5064 
5065 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5066 {
5067 	u32 val;
5068 	int err;
5069 
5070 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5071 		err = tg3_setup_fiber_phy(tp, force_reset);
5072 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5073 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
5074 	else
5075 		err = tg3_setup_copper_phy(tp, force_reset);
5076 
5077 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5078 		u32 scale;
5079 
5080 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5081 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5082 			scale = 65;
5083 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5084 			scale = 6;
5085 		else
5086 			scale = 12;
5087 
5088 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5089 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5090 		tw32(GRC_MISC_CFG, val);
5091 	}
5092 
5093 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5094 	      (6 << TX_LENGTHS_IPG_SHIFT);
5095 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5096 		val |= tr32(MAC_TX_LENGTHS) &
5097 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
5098 			TX_LENGTHS_CNT_DWN_VAL_MSK);
5099 
5100 	if (tp->link_config.active_speed == SPEED_1000 &&
5101 	    tp->link_config.active_duplex == DUPLEX_HALF)
5102 		tw32(MAC_TX_LENGTHS, val |
5103 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5104 	else
5105 		tw32(MAC_TX_LENGTHS, val |
5106 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5107 
5108 	if (!tg3_flag(tp, 5705_PLUS)) {
5109 		if (netif_carrier_ok(tp->dev)) {
5110 			tw32(HOSTCC_STAT_COAL_TICKS,
5111 			     tp->coal.stats_block_coalesce_usecs);
5112 		} else {
5113 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
5114 		}
5115 	}
5116 
5117 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
5118 		val = tr32(PCIE_PWR_MGMT_THRESH);
5119 		if (!netif_carrier_ok(tp->dev))
5120 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5121 			      tp->pwrmgmt_thresh;
5122 		else
5123 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5124 		tw32(PCIE_PWR_MGMT_THRESH, val);
5125 	}
5126 
5127 	return err;
5128 }
5129 
5130 static inline int tg3_irq_sync(struct tg3 *tp)
5131 {
5132 	return tp->irq_sync;
5133 }
5134 
5135 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5136 {
5137 	int i;
5138 
5139 	dst = (u32 *)((u8 *)dst + off);
5140 	for (i = 0; i < len; i += sizeof(u32))
5141 		*dst++ = tr32(off + i);
5142 }
5143 
5144 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5145 {
5146 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5147 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5148 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5149 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5150 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5151 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5152 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5153 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5154 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5155 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5156 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5157 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5158 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5159 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5160 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5161 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5162 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5163 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5164 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5165 
5166 	if (tg3_flag(tp, SUPPORT_MSIX))
5167 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5168 
5169 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5170 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5171 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5172 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5173 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5174 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5175 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5176 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5177 
5178 	if (!tg3_flag(tp, 5705_PLUS)) {
5179 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5180 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5181 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5182 	}
5183 
5184 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5185 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5186 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5187 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5188 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5189 
5190 	if (tg3_flag(tp, NVRAM))
5191 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5192 }
5193 
5194 static void tg3_dump_state(struct tg3 *tp)
5195 {
5196 	int i;
5197 	u32 *regs;
5198 
5199 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5200 	if (!regs) {
5201 		netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5202 		return;
5203 	}
5204 
5205 	if (tg3_flag(tp, PCI_EXPRESS)) {
5206 		/* Read up to but not including private PCI registers */
5207 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5208 			regs[i / sizeof(u32)] = tr32(i);
5209 	} else
5210 		tg3_dump_legacy_regs(tp, regs);
5211 
5212 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5213 		if (!regs[i + 0] && !regs[i + 1] &&
5214 		    !regs[i + 2] && !regs[i + 3])
5215 			continue;
5216 
5217 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5218 			   i * 4,
5219 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5220 	}
5221 
5222 	kfree(regs);
5223 
5224 	for (i = 0; i < tp->irq_cnt; i++) {
5225 		struct tg3_napi *tnapi = &tp->napi[i];
5226 
5227 		/* SW status block */
5228 		netdev_err(tp->dev,
5229 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5230 			   i,
5231 			   tnapi->hw_status->status,
5232 			   tnapi->hw_status->status_tag,
5233 			   tnapi->hw_status->rx_jumbo_consumer,
5234 			   tnapi->hw_status->rx_consumer,
5235 			   tnapi->hw_status->rx_mini_consumer,
5236 			   tnapi->hw_status->idx[0].rx_producer,
5237 			   tnapi->hw_status->idx[0].tx_consumer);
5238 
5239 		netdev_err(tp->dev,
5240 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5241 			   i,
5242 			   tnapi->last_tag, tnapi->last_irq_tag,
5243 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5244 			   tnapi->rx_rcb_ptr,
5245 			   tnapi->prodring.rx_std_prod_idx,
5246 			   tnapi->prodring.rx_std_cons_idx,
5247 			   tnapi->prodring.rx_jmb_prod_idx,
5248 			   tnapi->prodring.rx_jmb_cons_idx);
5249 	}
5250 }
5251 
5252 /* This is called whenever we suspect that the system chipset is re-
5253  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5254  * is bogus tx completions. We try to recover by setting the
5255  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5256  * in the workqueue.
5257  */
5258 static void tg3_tx_recover(struct tg3 *tp)
5259 {
5260 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5261 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
5262 
5263 	netdev_warn(tp->dev,
5264 		    "The system may be re-ordering memory-mapped I/O "
5265 		    "cycles to the network device, attempting to recover. "
5266 		    "Please report the problem to the driver maintainer "
5267 		    "and include system chipset information.\n");
5268 
5269 	spin_lock(&tp->lock);
5270 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
5271 	spin_unlock(&tp->lock);
5272 }
5273 
5274 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5275 {
5276 	/* Tell compiler to fetch tx indices from memory. */
5277 	barrier();
5278 	return tnapi->tx_pending -
5279 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5280 }
5281 
5282 /* Tigon3 never reports partial packet sends.  So we do not
5283  * need special logic to handle SKBs that have not had all
5284  * of their frags sent yet, like SunGEM does.
5285  */
5286 static void tg3_tx(struct tg3_napi *tnapi)
5287 {
5288 	struct tg3 *tp = tnapi->tp;
5289 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5290 	u32 sw_idx = tnapi->tx_cons;
5291 	struct netdev_queue *txq;
5292 	int index = tnapi - tp->napi;
5293 	unsigned int pkts_compl = 0, bytes_compl = 0;
5294 
5295 	if (tg3_flag(tp, ENABLE_TSS))
5296 		index--;
5297 
5298 	txq = netdev_get_tx_queue(tp->dev, index);
5299 
5300 	while (sw_idx != hw_idx) {
5301 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5302 		struct sk_buff *skb = ri->skb;
5303 		int i, tx_bug = 0;
5304 
5305 		if (unlikely(skb == NULL)) {
5306 			tg3_tx_recover(tp);
5307 			return;
5308 		}
5309 
5310 		pci_unmap_single(tp->pdev,
5311 				 dma_unmap_addr(ri, mapping),
5312 				 skb_headlen(skb),
5313 				 PCI_DMA_TODEVICE);
5314 
5315 		ri->skb = NULL;
5316 
5317 		while (ri->fragmented) {
5318 			ri->fragmented = false;
5319 			sw_idx = NEXT_TX(sw_idx);
5320 			ri = &tnapi->tx_buffers[sw_idx];
5321 		}
5322 
5323 		sw_idx = NEXT_TX(sw_idx);
5324 
5325 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5326 			ri = &tnapi->tx_buffers[sw_idx];
5327 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5328 				tx_bug = 1;
5329 
5330 			pci_unmap_page(tp->pdev,
5331 				       dma_unmap_addr(ri, mapping),
5332 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
5333 				       PCI_DMA_TODEVICE);
5334 
5335 			while (ri->fragmented) {
5336 				ri->fragmented = false;
5337 				sw_idx = NEXT_TX(sw_idx);
5338 				ri = &tnapi->tx_buffers[sw_idx];
5339 			}
5340 
5341 			sw_idx = NEXT_TX(sw_idx);
5342 		}
5343 
5344 		pkts_compl++;
5345 		bytes_compl += skb->len;
5346 
5347 		dev_kfree_skb(skb);
5348 
5349 		if (unlikely(tx_bug)) {
5350 			tg3_tx_recover(tp);
5351 			return;
5352 		}
5353 	}
5354 
5355 	netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
5356 
5357 	tnapi->tx_cons = sw_idx;
5358 
5359 	/* Need to make the tx_cons update visible to tg3_start_xmit()
5360 	 * before checking for netif_queue_stopped().  Without the
5361 	 * memory barrier, there is a small possibility that tg3_start_xmit()
5362 	 * will miss it and cause the queue to be stopped forever.
5363 	 */
5364 	smp_mb();
5365 
5366 	if (unlikely(netif_tx_queue_stopped(txq) &&
5367 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5368 		__netif_tx_lock(txq, smp_processor_id());
5369 		if (netif_tx_queue_stopped(txq) &&
5370 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5371 			netif_tx_wake_queue(txq);
5372 		__netif_tx_unlock(txq);
5373 	}
5374 }
5375 
5376 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5377 {
5378 	if (!ri->data)
5379 		return;
5380 
5381 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5382 			 map_sz, PCI_DMA_FROMDEVICE);
5383 	kfree(ri->data);
5384 	ri->data = NULL;
5385 }
5386 
5387 /* Returns size of skb allocated or < 0 on error.
5388  *
5389  * We only need to fill in the address because the other members
5390  * of the RX descriptor are invariant, see tg3_init_rings.
5391  *
5392  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5393  * posting buffers we only dirty the first cache line of the RX
5394  * descriptor (containing the address).  Whereas for the RX status
5395  * buffers the cpu only reads the last cacheline of the RX descriptor
5396  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5397  */
5398 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5399 			    u32 opaque_key, u32 dest_idx_unmasked)
5400 {
5401 	struct tg3_rx_buffer_desc *desc;
5402 	struct ring_info *map;
5403 	u8 *data;
5404 	dma_addr_t mapping;
5405 	int skb_size, data_size, dest_idx;
5406 
5407 	switch (opaque_key) {
5408 	case RXD_OPAQUE_RING_STD:
5409 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5410 		desc = &tpr->rx_std[dest_idx];
5411 		map = &tpr->rx_std_buffers[dest_idx];
5412 		data_size = tp->rx_pkt_map_sz;
5413 		break;
5414 
5415 	case RXD_OPAQUE_RING_JUMBO:
5416 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5417 		desc = &tpr->rx_jmb[dest_idx].std;
5418 		map = &tpr->rx_jmb_buffers[dest_idx];
5419 		data_size = TG3_RX_JMB_MAP_SZ;
5420 		break;
5421 
5422 	default:
5423 		return -EINVAL;
5424 	}
5425 
5426 	/* Do not overwrite any of the map or rp information
5427 	 * until we are sure we can commit to a new buffer.
5428 	 *
5429 	 * Callers depend upon this behavior and assume that
5430 	 * we leave everything unchanged if we fail.
5431 	 */
5432 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5433 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5434 	data = kmalloc(skb_size, GFP_ATOMIC);
5435 	if (!data)
5436 		return -ENOMEM;
5437 
5438 	mapping = pci_map_single(tp->pdev,
5439 				 data + TG3_RX_OFFSET(tp),
5440 				 data_size,
5441 				 PCI_DMA_FROMDEVICE);
5442 	if (pci_dma_mapping_error(tp->pdev, mapping)) {
5443 		kfree(data);
5444 		return -EIO;
5445 	}
5446 
5447 	map->data = data;
5448 	dma_unmap_addr_set(map, mapping, mapping);
5449 
5450 	desc->addr_hi = ((u64)mapping >> 32);
5451 	desc->addr_lo = ((u64)mapping & 0xffffffff);
5452 
5453 	return data_size;
5454 }
5455 
5456 /* We only need to move over in the address because the other
5457  * members of the RX descriptor are invariant.  See notes above
5458  * tg3_alloc_rx_data for full details.
5459  */
5460 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5461 			   struct tg3_rx_prodring_set *dpr,
5462 			   u32 opaque_key, int src_idx,
5463 			   u32 dest_idx_unmasked)
5464 {
5465 	struct tg3 *tp = tnapi->tp;
5466 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5467 	struct ring_info *src_map, *dest_map;
5468 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5469 	int dest_idx;
5470 
5471 	switch (opaque_key) {
5472 	case RXD_OPAQUE_RING_STD:
5473 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5474 		dest_desc = &dpr->rx_std[dest_idx];
5475 		dest_map = &dpr->rx_std_buffers[dest_idx];
5476 		src_desc = &spr->rx_std[src_idx];
5477 		src_map = &spr->rx_std_buffers[src_idx];
5478 		break;
5479 
5480 	case RXD_OPAQUE_RING_JUMBO:
5481 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5482 		dest_desc = &dpr->rx_jmb[dest_idx].std;
5483 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
5484 		src_desc = &spr->rx_jmb[src_idx].std;
5485 		src_map = &spr->rx_jmb_buffers[src_idx];
5486 		break;
5487 
5488 	default:
5489 		return;
5490 	}
5491 
5492 	dest_map->data = src_map->data;
5493 	dma_unmap_addr_set(dest_map, mapping,
5494 			   dma_unmap_addr(src_map, mapping));
5495 	dest_desc->addr_hi = src_desc->addr_hi;
5496 	dest_desc->addr_lo = src_desc->addr_lo;
5497 
5498 	/* Ensure that the update to the skb happens after the physical
5499 	 * addresses have been transferred to the new BD location.
5500 	 */
5501 	smp_wmb();
5502 
5503 	src_map->data = NULL;
5504 }
5505 
5506 /* The RX ring scheme is composed of multiple rings which post fresh
5507  * buffers to the chip, and one special ring the chip uses to report
5508  * status back to the host.
5509  *
5510  * The special ring reports the status of received packets to the
5511  * host.  The chip does not write into the original descriptor the
5512  * RX buffer was obtained from.  The chip simply takes the original
5513  * descriptor as provided by the host, updates the status and length
5514  * field, then writes this into the next status ring entry.
5515  *
5516  * Each ring the host uses to post buffers to the chip is described
5517  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5518  * it is first placed into the on-chip ram.  When the packet's length
5519  * is known, it walks down the TG3_BDINFO entries to select the ring.
5520  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5521  * which is within the range of the new packet's length is chosen.
5522  *
5523  * The "separate ring for rx status" scheme may sound queer, but it makes
5524  * sense from a cache coherency perspective.  If only the host writes
5525  * to the buffer post rings, and only the chip writes to the rx status
5526  * rings, then cache lines never move beyond shared-modified state.
5527  * If both the host and chip were to write into the same ring, cache line
5528  * eviction could occur since both entities want it in an exclusive state.
5529  */
5530 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5531 {
5532 	struct tg3 *tp = tnapi->tp;
5533 	u32 work_mask, rx_std_posted = 0;
5534 	u32 std_prod_idx, jmb_prod_idx;
5535 	u32 sw_idx = tnapi->rx_rcb_ptr;
5536 	u16 hw_idx;
5537 	int received;
5538 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5539 
5540 	hw_idx = *(tnapi->rx_rcb_prod_idx);
5541 	/*
5542 	 * We need to order the read of hw_idx and the read of
5543 	 * the opaque cookie.
5544 	 */
5545 	rmb();
5546 	work_mask = 0;
5547 	received = 0;
5548 	std_prod_idx = tpr->rx_std_prod_idx;
5549 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
5550 	while (sw_idx != hw_idx && budget > 0) {
5551 		struct ring_info *ri;
5552 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5553 		unsigned int len;
5554 		struct sk_buff *skb;
5555 		dma_addr_t dma_addr;
5556 		u32 opaque_key, desc_idx, *post_ptr;
5557 		u8 *data;
5558 
5559 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5560 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5561 		if (opaque_key == RXD_OPAQUE_RING_STD) {
5562 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5563 			dma_addr = dma_unmap_addr(ri, mapping);
5564 			data = ri->data;
5565 			post_ptr = &std_prod_idx;
5566 			rx_std_posted++;
5567 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5568 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5569 			dma_addr = dma_unmap_addr(ri, mapping);
5570 			data = ri->data;
5571 			post_ptr = &jmb_prod_idx;
5572 		} else
5573 			goto next_pkt_nopost;
5574 
5575 		work_mask |= opaque_key;
5576 
5577 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5578 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5579 		drop_it:
5580 			tg3_recycle_rx(tnapi, tpr, opaque_key,
5581 				       desc_idx, *post_ptr);
5582 		drop_it_no_recycle:
5583 			/* Other statistics kept track of by card. */
5584 			tp->rx_dropped++;
5585 			goto next_pkt;
5586 		}
5587 
5588 		prefetch(data + TG3_RX_OFFSET(tp));
5589 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5590 		      ETH_FCS_LEN;
5591 
5592 		if (len > TG3_RX_COPY_THRESH(tp)) {
5593 			int skb_size;
5594 
5595 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5596 						    *post_ptr);
5597 			if (skb_size < 0)
5598 				goto drop_it;
5599 
5600 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
5601 					 PCI_DMA_FROMDEVICE);
5602 
5603 			skb = build_skb(data);
5604 			if (!skb) {
5605 				kfree(data);
5606 				goto drop_it_no_recycle;
5607 			}
5608 			skb_reserve(skb, TG3_RX_OFFSET(tp));
5609 			/* Ensure that the update to the data happens
5610 			 * after the usage of the old DMA mapping.
5611 			 */
5612 			smp_wmb();
5613 
5614 			ri->data = NULL;
5615 
5616 		} else {
5617 			tg3_recycle_rx(tnapi, tpr, opaque_key,
5618 				       desc_idx, *post_ptr);
5619 
5620 			skb = netdev_alloc_skb(tp->dev,
5621 					       len + TG3_RAW_IP_ALIGN);
5622 			if (skb == NULL)
5623 				goto drop_it_no_recycle;
5624 
5625 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
5626 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5627 			memcpy(skb->data,
5628 			       data + TG3_RX_OFFSET(tp),
5629 			       len);
5630 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5631 		}
5632 
5633 		skb_put(skb, len);
5634 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
5635 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5636 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5637 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
5638 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5639 		else
5640 			skb_checksum_none_assert(skb);
5641 
5642 		skb->protocol = eth_type_trans(skb, tp->dev);
5643 
5644 		if (len > (tp->dev->mtu + ETH_HLEN) &&
5645 		    skb->protocol != htons(ETH_P_8021Q)) {
5646 			dev_kfree_skb(skb);
5647 			goto drop_it_no_recycle;
5648 		}
5649 
5650 		if (desc->type_flags & RXD_FLAG_VLAN &&
5651 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5652 			__vlan_hwaccel_put_tag(skb,
5653 					       desc->err_vlan & RXD_VLAN_MASK);
5654 
5655 		napi_gro_receive(&tnapi->napi, skb);
5656 
5657 		received++;
5658 		budget--;
5659 
5660 next_pkt:
5661 		(*post_ptr)++;
5662 
5663 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5664 			tpr->rx_std_prod_idx = std_prod_idx &
5665 					       tp->rx_std_ring_mask;
5666 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5667 				     tpr->rx_std_prod_idx);
5668 			work_mask &= ~RXD_OPAQUE_RING_STD;
5669 			rx_std_posted = 0;
5670 		}
5671 next_pkt_nopost:
5672 		sw_idx++;
5673 		sw_idx &= tp->rx_ret_ring_mask;
5674 
5675 		/* Refresh hw_idx to see if there is new work */
5676 		if (sw_idx == hw_idx) {
5677 			hw_idx = *(tnapi->rx_rcb_prod_idx);
5678 			rmb();
5679 		}
5680 	}
5681 
5682 	/* ACK the status ring. */
5683 	tnapi->rx_rcb_ptr = sw_idx;
5684 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
5685 
5686 	/* Refill RX ring(s). */
5687 	if (!tg3_flag(tp, ENABLE_RSS)) {
5688 		if (work_mask & RXD_OPAQUE_RING_STD) {
5689 			tpr->rx_std_prod_idx = std_prod_idx &
5690 					       tp->rx_std_ring_mask;
5691 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5692 				     tpr->rx_std_prod_idx);
5693 		}
5694 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5695 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
5696 					       tp->rx_jmb_ring_mask;
5697 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5698 				     tpr->rx_jmb_prod_idx);
5699 		}
5700 		mmiowb();
5701 	} else if (work_mask) {
5702 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5703 		 * updated before the producer indices can be updated.
5704 		 */
5705 		smp_wmb();
5706 
5707 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5708 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5709 
5710 		if (tnapi != &tp->napi[1])
5711 			napi_schedule(&tp->napi[1].napi);
5712 	}
5713 
5714 	return received;
5715 }
5716 
5717 static void tg3_poll_link(struct tg3 *tp)
5718 {
5719 	/* handle link change and other phy events */
5720 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5721 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5722 
5723 		if (sblk->status & SD_STATUS_LINK_CHG) {
5724 			sblk->status = SD_STATUS_UPDATED |
5725 				       (sblk->status & ~SD_STATUS_LINK_CHG);
5726 			spin_lock(&tp->lock);
5727 			if (tg3_flag(tp, USE_PHYLIB)) {
5728 				tw32_f(MAC_STATUS,
5729 				     (MAC_STATUS_SYNC_CHANGED |
5730 				      MAC_STATUS_CFG_CHANGED |
5731 				      MAC_STATUS_MI_COMPLETION |
5732 				      MAC_STATUS_LNKSTATE_CHANGED));
5733 				udelay(40);
5734 			} else
5735 				tg3_setup_phy(tp, 0);
5736 			spin_unlock(&tp->lock);
5737 		}
5738 	}
5739 }
5740 
5741 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5742 				struct tg3_rx_prodring_set *dpr,
5743 				struct tg3_rx_prodring_set *spr)
5744 {
5745 	u32 si, di, cpycnt, src_prod_idx;
5746 	int i, err = 0;
5747 
5748 	while (1) {
5749 		src_prod_idx = spr->rx_std_prod_idx;
5750 
5751 		/* Make sure updates to the rx_std_buffers[] entries and the
5752 		 * standard producer index are seen in the correct order.
5753 		 */
5754 		smp_rmb();
5755 
5756 		if (spr->rx_std_cons_idx == src_prod_idx)
5757 			break;
5758 
5759 		if (spr->rx_std_cons_idx < src_prod_idx)
5760 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5761 		else
5762 			cpycnt = tp->rx_std_ring_mask + 1 -
5763 				 spr->rx_std_cons_idx;
5764 
5765 		cpycnt = min(cpycnt,
5766 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5767 
5768 		si = spr->rx_std_cons_idx;
5769 		di = dpr->rx_std_prod_idx;
5770 
5771 		for (i = di; i < di + cpycnt; i++) {
5772 			if (dpr->rx_std_buffers[i].data) {
5773 				cpycnt = i - di;
5774 				err = -ENOSPC;
5775 				break;
5776 			}
5777 		}
5778 
5779 		if (!cpycnt)
5780 			break;
5781 
5782 		/* Ensure that updates to the rx_std_buffers ring and the
5783 		 * shadowed hardware producer ring from tg3_recycle_skb() are
5784 		 * ordered correctly WRT the skb check above.
5785 		 */
5786 		smp_rmb();
5787 
5788 		memcpy(&dpr->rx_std_buffers[di],
5789 		       &spr->rx_std_buffers[si],
5790 		       cpycnt * sizeof(struct ring_info));
5791 
5792 		for (i = 0; i < cpycnt; i++, di++, si++) {
5793 			struct tg3_rx_buffer_desc *sbd, *dbd;
5794 			sbd = &spr->rx_std[si];
5795 			dbd = &dpr->rx_std[di];
5796 			dbd->addr_hi = sbd->addr_hi;
5797 			dbd->addr_lo = sbd->addr_lo;
5798 		}
5799 
5800 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5801 				       tp->rx_std_ring_mask;
5802 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5803 				       tp->rx_std_ring_mask;
5804 	}
5805 
5806 	while (1) {
5807 		src_prod_idx = spr->rx_jmb_prod_idx;
5808 
5809 		/* Make sure updates to the rx_jmb_buffers[] entries and
5810 		 * the jumbo producer index are seen in the correct order.
5811 		 */
5812 		smp_rmb();
5813 
5814 		if (spr->rx_jmb_cons_idx == src_prod_idx)
5815 			break;
5816 
5817 		if (spr->rx_jmb_cons_idx < src_prod_idx)
5818 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5819 		else
5820 			cpycnt = tp->rx_jmb_ring_mask + 1 -
5821 				 spr->rx_jmb_cons_idx;
5822 
5823 		cpycnt = min(cpycnt,
5824 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5825 
5826 		si = spr->rx_jmb_cons_idx;
5827 		di = dpr->rx_jmb_prod_idx;
5828 
5829 		for (i = di; i < di + cpycnt; i++) {
5830 			if (dpr->rx_jmb_buffers[i].data) {
5831 				cpycnt = i - di;
5832 				err = -ENOSPC;
5833 				break;
5834 			}
5835 		}
5836 
5837 		if (!cpycnt)
5838 			break;
5839 
5840 		/* Ensure that updates to the rx_jmb_buffers ring and the
5841 		 * shadowed hardware producer ring from tg3_recycle_skb() are
5842 		 * ordered correctly WRT the skb check above.
5843 		 */
5844 		smp_rmb();
5845 
5846 		memcpy(&dpr->rx_jmb_buffers[di],
5847 		       &spr->rx_jmb_buffers[si],
5848 		       cpycnt * sizeof(struct ring_info));
5849 
5850 		for (i = 0; i < cpycnt; i++, di++, si++) {
5851 			struct tg3_rx_buffer_desc *sbd, *dbd;
5852 			sbd = &spr->rx_jmb[si].std;
5853 			dbd = &dpr->rx_jmb[di].std;
5854 			dbd->addr_hi = sbd->addr_hi;
5855 			dbd->addr_lo = sbd->addr_lo;
5856 		}
5857 
5858 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5859 				       tp->rx_jmb_ring_mask;
5860 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5861 				       tp->rx_jmb_ring_mask;
5862 	}
5863 
5864 	return err;
5865 }
5866 
5867 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5868 {
5869 	struct tg3 *tp = tnapi->tp;
5870 
5871 	/* run TX completion thread */
5872 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5873 		tg3_tx(tnapi);
5874 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5875 			return work_done;
5876 	}
5877 
5878 	/* run RX thread, within the bounds set by NAPI.
5879 	 * All RX "locking" is done by ensuring outside
5880 	 * code synchronizes with tg3->napi.poll()
5881 	 */
5882 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5883 		work_done += tg3_rx(tnapi, budget - work_done);
5884 
5885 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5886 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5887 		int i, err = 0;
5888 		u32 std_prod_idx = dpr->rx_std_prod_idx;
5889 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5890 
5891 		for (i = 1; i < tp->irq_cnt; i++)
5892 			err |= tg3_rx_prodring_xfer(tp, dpr,
5893 						    &tp->napi[i].prodring);
5894 
5895 		wmb();
5896 
5897 		if (std_prod_idx != dpr->rx_std_prod_idx)
5898 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5899 				     dpr->rx_std_prod_idx);
5900 
5901 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5902 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5903 				     dpr->rx_jmb_prod_idx);
5904 
5905 		mmiowb();
5906 
5907 		if (err)
5908 			tw32_f(HOSTCC_MODE, tp->coal_now);
5909 	}
5910 
5911 	return work_done;
5912 }
5913 
5914 static inline void tg3_reset_task_schedule(struct tg3 *tp)
5915 {
5916 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5917 		schedule_work(&tp->reset_task);
5918 }
5919 
5920 static inline void tg3_reset_task_cancel(struct tg3 *tp)
5921 {
5922 	cancel_work_sync(&tp->reset_task);
5923 	tg3_flag_clear(tp, RESET_TASK_PENDING);
5924 }
5925 
5926 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5927 {
5928 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5929 	struct tg3 *tp = tnapi->tp;
5930 	int work_done = 0;
5931 	struct tg3_hw_status *sblk = tnapi->hw_status;
5932 
5933 	while (1) {
5934 		work_done = tg3_poll_work(tnapi, work_done, budget);
5935 
5936 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5937 			goto tx_recovery;
5938 
5939 		if (unlikely(work_done >= budget))
5940 			break;
5941 
5942 		/* tp->last_tag is used in tg3_int_reenable() below
5943 		 * to tell the hw how much work has been processed,
5944 		 * so we must read it before checking for more work.
5945 		 */
5946 		tnapi->last_tag = sblk->status_tag;
5947 		tnapi->last_irq_tag = tnapi->last_tag;
5948 		rmb();
5949 
5950 		/* check for RX/TX work to do */
5951 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5952 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5953 			napi_complete(napi);
5954 			/* Reenable interrupts. */
5955 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5956 			mmiowb();
5957 			break;
5958 		}
5959 	}
5960 
5961 	return work_done;
5962 
5963 tx_recovery:
5964 	/* work_done is guaranteed to be less than budget. */
5965 	napi_complete(napi);
5966 	tg3_reset_task_schedule(tp);
5967 	return work_done;
5968 }
5969 
5970 static void tg3_process_error(struct tg3 *tp)
5971 {
5972 	u32 val;
5973 	bool real_error = false;
5974 
5975 	if (tg3_flag(tp, ERROR_PROCESSED))
5976 		return;
5977 
5978 	/* Check Flow Attention register */
5979 	val = tr32(HOSTCC_FLOW_ATTN);
5980 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5981 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5982 		real_error = true;
5983 	}
5984 
5985 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5986 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5987 		real_error = true;
5988 	}
5989 
5990 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5991 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5992 		real_error = true;
5993 	}
5994 
5995 	if (!real_error)
5996 		return;
5997 
5998 	tg3_dump_state(tp);
5999 
6000 	tg3_flag_set(tp, ERROR_PROCESSED);
6001 	tg3_reset_task_schedule(tp);
6002 }
6003 
6004 static int tg3_poll(struct napi_struct *napi, int budget)
6005 {
6006 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6007 	struct tg3 *tp = tnapi->tp;
6008 	int work_done = 0;
6009 	struct tg3_hw_status *sblk = tnapi->hw_status;
6010 
6011 	while (1) {
6012 		if (sblk->status & SD_STATUS_ERROR)
6013 			tg3_process_error(tp);
6014 
6015 		tg3_poll_link(tp);
6016 
6017 		work_done = tg3_poll_work(tnapi, work_done, budget);
6018 
6019 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6020 			goto tx_recovery;
6021 
6022 		if (unlikely(work_done >= budget))
6023 			break;
6024 
6025 		if (tg3_flag(tp, TAGGED_STATUS)) {
6026 			/* tp->last_tag is used in tg3_int_reenable() below
6027 			 * to tell the hw how much work has been processed,
6028 			 * so we must read it before checking for more work.
6029 			 */
6030 			tnapi->last_tag = sblk->status_tag;
6031 			tnapi->last_irq_tag = tnapi->last_tag;
6032 			rmb();
6033 		} else
6034 			sblk->status &= ~SD_STATUS_UPDATED;
6035 
6036 		if (likely(!tg3_has_work(tnapi))) {
6037 			napi_complete(napi);
6038 			tg3_int_reenable(tnapi);
6039 			break;
6040 		}
6041 	}
6042 
6043 	return work_done;
6044 
6045 tx_recovery:
6046 	/* work_done is guaranteed to be less than budget. */
6047 	napi_complete(napi);
6048 	tg3_reset_task_schedule(tp);
6049 	return work_done;
6050 }
6051 
6052 static void tg3_napi_disable(struct tg3 *tp)
6053 {
6054 	int i;
6055 
6056 	for (i = tp->irq_cnt - 1; i >= 0; i--)
6057 		napi_disable(&tp->napi[i].napi);
6058 }
6059 
6060 static void tg3_napi_enable(struct tg3 *tp)
6061 {
6062 	int i;
6063 
6064 	for (i = 0; i < tp->irq_cnt; i++)
6065 		napi_enable(&tp->napi[i].napi);
6066 }
6067 
6068 static void tg3_napi_init(struct tg3 *tp)
6069 {
6070 	int i;
6071 
6072 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6073 	for (i = 1; i < tp->irq_cnt; i++)
6074 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6075 }
6076 
6077 static void tg3_napi_fini(struct tg3 *tp)
6078 {
6079 	int i;
6080 
6081 	for (i = 0; i < tp->irq_cnt; i++)
6082 		netif_napi_del(&tp->napi[i].napi);
6083 }
6084 
6085 static inline void tg3_netif_stop(struct tg3 *tp)
6086 {
6087 	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
6088 	tg3_napi_disable(tp);
6089 	netif_tx_disable(tp->dev);
6090 }
6091 
6092 static inline void tg3_netif_start(struct tg3 *tp)
6093 {
6094 	/* NOTE: unconditional netif_tx_wake_all_queues is only
6095 	 * appropriate so long as all callers are assured to
6096 	 * have free tx slots (such as after tg3_init_hw)
6097 	 */
6098 	netif_tx_wake_all_queues(tp->dev);
6099 
6100 	tg3_napi_enable(tp);
6101 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6102 	tg3_enable_ints(tp);
6103 }
6104 
6105 static void tg3_irq_quiesce(struct tg3 *tp)
6106 {
6107 	int i;
6108 
6109 	BUG_ON(tp->irq_sync);
6110 
6111 	tp->irq_sync = 1;
6112 	smp_mb();
6113 
6114 	for (i = 0; i < tp->irq_cnt; i++)
6115 		synchronize_irq(tp->napi[i].irq_vec);
6116 }
6117 
6118 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6119  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6120  * with as well.  Most of the time, this is not necessary except when
6121  * shutting down the device.
6122  */
6123 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6124 {
6125 	spin_lock_bh(&tp->lock);
6126 	if (irq_sync)
6127 		tg3_irq_quiesce(tp);
6128 }
6129 
6130 static inline void tg3_full_unlock(struct tg3 *tp)
6131 {
6132 	spin_unlock_bh(&tp->lock);
6133 }
6134 
6135 /* One-shot MSI handler - Chip automatically disables interrupt
6136  * after sending MSI so driver doesn't have to do it.
6137  */
6138 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6139 {
6140 	struct tg3_napi *tnapi = dev_id;
6141 	struct tg3 *tp = tnapi->tp;
6142 
6143 	prefetch(tnapi->hw_status);
6144 	if (tnapi->rx_rcb)
6145 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6146 
6147 	if (likely(!tg3_irq_sync(tp)))
6148 		napi_schedule(&tnapi->napi);
6149 
6150 	return IRQ_HANDLED;
6151 }
6152 
6153 /* MSI ISR - No need to check for interrupt sharing and no need to
6154  * flush status block and interrupt mailbox. PCI ordering rules
6155  * guarantee that MSI will arrive after the status block.
6156  */
6157 static irqreturn_t tg3_msi(int irq, void *dev_id)
6158 {
6159 	struct tg3_napi *tnapi = dev_id;
6160 	struct tg3 *tp = tnapi->tp;
6161 
6162 	prefetch(tnapi->hw_status);
6163 	if (tnapi->rx_rcb)
6164 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6165 	/*
6166 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
6167 	 * chip-internal interrupt pending events.
6168 	 * Writing non-zero to intr-mbox-0 additional tells the
6169 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6170 	 * event coalescing.
6171 	 */
6172 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
6173 	if (likely(!tg3_irq_sync(tp)))
6174 		napi_schedule(&tnapi->napi);
6175 
6176 	return IRQ_RETVAL(1);
6177 }
6178 
6179 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6180 {
6181 	struct tg3_napi *tnapi = dev_id;
6182 	struct tg3 *tp = tnapi->tp;
6183 	struct tg3_hw_status *sblk = tnapi->hw_status;
6184 	unsigned int handled = 1;
6185 
6186 	/* In INTx mode, it is possible for the interrupt to arrive at
6187 	 * the CPU before the status block posted prior to the interrupt.
6188 	 * Reading the PCI State register will confirm whether the
6189 	 * interrupt is ours and will flush the status block.
6190 	 */
6191 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6192 		if (tg3_flag(tp, CHIP_RESETTING) ||
6193 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6194 			handled = 0;
6195 			goto out;
6196 		}
6197 	}
6198 
6199 	/*
6200 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
6201 	 * chip-internal interrupt pending events.
6202 	 * Writing non-zero to intr-mbox-0 additional tells the
6203 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6204 	 * event coalescing.
6205 	 *
6206 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
6207 	 * spurious interrupts.  The flush impacts performance but
6208 	 * excessive spurious interrupts can be worse in some cases.
6209 	 */
6210 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6211 	if (tg3_irq_sync(tp))
6212 		goto out;
6213 	sblk->status &= ~SD_STATUS_UPDATED;
6214 	if (likely(tg3_has_work(tnapi))) {
6215 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6216 		napi_schedule(&tnapi->napi);
6217 	} else {
6218 		/* No work, shared interrupt perhaps?  re-enable
6219 		 * interrupts, and flush that PCI write
6220 		 */
6221 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6222 			       0x00000000);
6223 	}
6224 out:
6225 	return IRQ_RETVAL(handled);
6226 }
6227 
6228 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6229 {
6230 	struct tg3_napi *tnapi = dev_id;
6231 	struct tg3 *tp = tnapi->tp;
6232 	struct tg3_hw_status *sblk = tnapi->hw_status;
6233 	unsigned int handled = 1;
6234 
6235 	/* In INTx mode, it is possible for the interrupt to arrive at
6236 	 * the CPU before the status block posted prior to the interrupt.
6237 	 * Reading the PCI State register will confirm whether the
6238 	 * interrupt is ours and will flush the status block.
6239 	 */
6240 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6241 		if (tg3_flag(tp, CHIP_RESETTING) ||
6242 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6243 			handled = 0;
6244 			goto out;
6245 		}
6246 	}
6247 
6248 	/*
6249 	 * writing any value to intr-mbox-0 clears PCI INTA# and
6250 	 * chip-internal interrupt pending events.
6251 	 * writing non-zero to intr-mbox-0 additional tells the
6252 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6253 	 * event coalescing.
6254 	 *
6255 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
6256 	 * spurious interrupts.  The flush impacts performance but
6257 	 * excessive spurious interrupts can be worse in some cases.
6258 	 */
6259 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6260 
6261 	/*
6262 	 * In a shared interrupt configuration, sometimes other devices'
6263 	 * interrupts will scream.  We record the current status tag here
6264 	 * so that the above check can report that the screaming interrupts
6265 	 * are unhandled.  Eventually they will be silenced.
6266 	 */
6267 	tnapi->last_irq_tag = sblk->status_tag;
6268 
6269 	if (tg3_irq_sync(tp))
6270 		goto out;
6271 
6272 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6273 
6274 	napi_schedule(&tnapi->napi);
6275 
6276 out:
6277 	return IRQ_RETVAL(handled);
6278 }
6279 
6280 /* ISR for interrupt test */
6281 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6282 {
6283 	struct tg3_napi *tnapi = dev_id;
6284 	struct tg3 *tp = tnapi->tp;
6285 	struct tg3_hw_status *sblk = tnapi->hw_status;
6286 
6287 	if ((sblk->status & SD_STATUS_UPDATED) ||
6288 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6289 		tg3_disable_ints(tp);
6290 		return IRQ_RETVAL(1);
6291 	}
6292 	return IRQ_RETVAL(0);
6293 }
6294 
6295 static int tg3_init_hw(struct tg3 *, int);
6296 static int tg3_halt(struct tg3 *, int, int);
6297 
6298 /* Restart hardware after configuration changes, self-test, etc.
6299  * Invoked with tp->lock held.
6300  */
6301 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6302 	__releases(tp->lock)
6303 	__acquires(tp->lock)
6304 {
6305 	int err;
6306 
6307 	err = tg3_init_hw(tp, reset_phy);
6308 	if (err) {
6309 		netdev_err(tp->dev,
6310 			   "Failed to re-initialize device, aborting\n");
6311 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6312 		tg3_full_unlock(tp);
6313 		del_timer_sync(&tp->timer);
6314 		tp->irq_sync = 0;
6315 		tg3_napi_enable(tp);
6316 		dev_close(tp->dev);
6317 		tg3_full_lock(tp, 0);
6318 	}
6319 	return err;
6320 }
6321 
6322 #ifdef CONFIG_NET_POLL_CONTROLLER
6323 static void tg3_poll_controller(struct net_device *dev)
6324 {
6325 	int i;
6326 	struct tg3 *tp = netdev_priv(dev);
6327 
6328 	for (i = 0; i < tp->irq_cnt; i++)
6329 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6330 }
6331 #endif
6332 
6333 static void tg3_reset_task(struct work_struct *work)
6334 {
6335 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
6336 	int err;
6337 
6338 	tg3_full_lock(tp, 0);
6339 
6340 	if (!netif_running(tp->dev)) {
6341 		tg3_flag_clear(tp, RESET_TASK_PENDING);
6342 		tg3_full_unlock(tp);
6343 		return;
6344 	}
6345 
6346 	tg3_full_unlock(tp);
6347 
6348 	tg3_phy_stop(tp);
6349 
6350 	tg3_netif_stop(tp);
6351 
6352 	tg3_full_lock(tp, 1);
6353 
6354 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6355 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
6356 		tp->write32_rx_mbox = tg3_write_flush_reg32;
6357 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
6358 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6359 	}
6360 
6361 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6362 	err = tg3_init_hw(tp, 1);
6363 	if (err)
6364 		goto out;
6365 
6366 	tg3_netif_start(tp);
6367 
6368 out:
6369 	tg3_full_unlock(tp);
6370 
6371 	if (!err)
6372 		tg3_phy_start(tp);
6373 
6374 	tg3_flag_clear(tp, RESET_TASK_PENDING);
6375 }
6376 
6377 static void tg3_tx_timeout(struct net_device *dev)
6378 {
6379 	struct tg3 *tp = netdev_priv(dev);
6380 
6381 	if (netif_msg_tx_err(tp)) {
6382 		netdev_err(dev, "transmit timed out, resetting\n");
6383 		tg3_dump_state(tp);
6384 	}
6385 
6386 	tg3_reset_task_schedule(tp);
6387 }
6388 
6389 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6390 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6391 {
6392 	u32 base = (u32) mapping & 0xffffffff;
6393 
6394 	return (base > 0xffffdcc0) && (base + len + 8 < base);
6395 }
6396 
6397 /* Test for DMA addresses > 40-bit */
6398 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6399 					  int len)
6400 {
6401 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6402 	if (tg3_flag(tp, 40BIT_DMA_BUG))
6403 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
6404 	return 0;
6405 #else
6406 	return 0;
6407 #endif
6408 }
6409 
6410 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6411 				 dma_addr_t mapping, u32 len, u32 flags,
6412 				 u32 mss, u32 vlan)
6413 {
6414 	txbd->addr_hi = ((u64) mapping >> 32);
6415 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
6416 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6417 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6418 }
6419 
6420 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6421 			    dma_addr_t map, u32 len, u32 flags,
6422 			    u32 mss, u32 vlan)
6423 {
6424 	struct tg3 *tp = tnapi->tp;
6425 	bool hwbug = false;
6426 
6427 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6428 		hwbug = true;
6429 
6430 	if (tg3_4g_overflow_test(map, len))
6431 		hwbug = true;
6432 
6433 	if (tg3_40bit_overflow_test(tp, map, len))
6434 		hwbug = true;
6435 
6436 	if (tp->dma_limit) {
6437 		u32 prvidx = *entry;
6438 		u32 tmp_flag = flags & ~TXD_FLAG_END;
6439 		while (len > tp->dma_limit && *budget) {
6440 			u32 frag_len = tp->dma_limit;
6441 			len -= tp->dma_limit;
6442 
6443 			/* Avoid the 8byte DMA problem */
6444 			if (len <= 8) {
6445 				len += tp->dma_limit / 2;
6446 				frag_len = tp->dma_limit / 2;
6447 			}
6448 
6449 			tnapi->tx_buffers[*entry].fragmented = true;
6450 
6451 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6452 				      frag_len, tmp_flag, mss, vlan);
6453 			*budget -= 1;
6454 			prvidx = *entry;
6455 			*entry = NEXT_TX(*entry);
6456 
6457 			map += frag_len;
6458 		}
6459 
6460 		if (len) {
6461 			if (*budget) {
6462 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6463 					      len, flags, mss, vlan);
6464 				*budget -= 1;
6465 				*entry = NEXT_TX(*entry);
6466 			} else {
6467 				hwbug = true;
6468 				tnapi->tx_buffers[prvidx].fragmented = false;
6469 			}
6470 		}
6471 	} else {
6472 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6473 			      len, flags, mss, vlan);
6474 		*entry = NEXT_TX(*entry);
6475 	}
6476 
6477 	return hwbug;
6478 }
6479 
6480 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6481 {
6482 	int i;
6483 	struct sk_buff *skb;
6484 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6485 
6486 	skb = txb->skb;
6487 	txb->skb = NULL;
6488 
6489 	pci_unmap_single(tnapi->tp->pdev,
6490 			 dma_unmap_addr(txb, mapping),
6491 			 skb_headlen(skb),
6492 			 PCI_DMA_TODEVICE);
6493 
6494 	while (txb->fragmented) {
6495 		txb->fragmented = false;
6496 		entry = NEXT_TX(entry);
6497 		txb = &tnapi->tx_buffers[entry];
6498 	}
6499 
6500 	for (i = 0; i <= last; i++) {
6501 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6502 
6503 		entry = NEXT_TX(entry);
6504 		txb = &tnapi->tx_buffers[entry];
6505 
6506 		pci_unmap_page(tnapi->tp->pdev,
6507 			       dma_unmap_addr(txb, mapping),
6508 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
6509 
6510 		while (txb->fragmented) {
6511 			txb->fragmented = false;
6512 			entry = NEXT_TX(entry);
6513 			txb = &tnapi->tx_buffers[entry];
6514 		}
6515 	}
6516 }
6517 
6518 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6519 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6520 				       struct sk_buff **pskb,
6521 				       u32 *entry, u32 *budget,
6522 				       u32 base_flags, u32 mss, u32 vlan)
6523 {
6524 	struct tg3 *tp = tnapi->tp;
6525 	struct sk_buff *new_skb, *skb = *pskb;
6526 	dma_addr_t new_addr = 0;
6527 	int ret = 0;
6528 
6529 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6530 		new_skb = skb_copy(skb, GFP_ATOMIC);
6531 	else {
6532 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
6533 
6534 		new_skb = skb_copy_expand(skb,
6535 					  skb_headroom(skb) + more_headroom,
6536 					  skb_tailroom(skb), GFP_ATOMIC);
6537 	}
6538 
6539 	if (!new_skb) {
6540 		ret = -1;
6541 	} else {
6542 		/* New SKB is guaranteed to be linear. */
6543 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6544 					  PCI_DMA_TODEVICE);
6545 		/* Make sure the mapping succeeded */
6546 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6547 			dev_kfree_skb(new_skb);
6548 			ret = -1;
6549 		} else {
6550 			u32 save_entry = *entry;
6551 
6552 			base_flags |= TXD_FLAG_END;
6553 
6554 			tnapi->tx_buffers[*entry].skb = new_skb;
6555 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6556 					   mapping, new_addr);
6557 
6558 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6559 					    new_skb->len, base_flags,
6560 					    mss, vlan)) {
6561 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
6562 				dev_kfree_skb(new_skb);
6563 				ret = -1;
6564 			}
6565 		}
6566 	}
6567 
6568 	dev_kfree_skb(skb);
6569 	*pskb = new_skb;
6570 	return ret;
6571 }
6572 
6573 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6574 
6575 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6576  * TSO header is greater than 80 bytes.
6577  */
6578 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6579 {
6580 	struct sk_buff *segs, *nskb;
6581 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6582 
6583 	/* Estimate the number of fragments in the worst case */
6584 	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6585 		netif_stop_queue(tp->dev);
6586 
6587 		/* netif_tx_stop_queue() must be done before checking
6588 		 * checking tx index in tg3_tx_avail() below, because in
6589 		 * tg3_tx(), we update tx index before checking for
6590 		 * netif_tx_queue_stopped().
6591 		 */
6592 		smp_mb();
6593 		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6594 			return NETDEV_TX_BUSY;
6595 
6596 		netif_wake_queue(tp->dev);
6597 	}
6598 
6599 	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6600 	if (IS_ERR(segs))
6601 		goto tg3_tso_bug_end;
6602 
6603 	do {
6604 		nskb = segs;
6605 		segs = segs->next;
6606 		nskb->next = NULL;
6607 		tg3_start_xmit(nskb, tp->dev);
6608 	} while (segs);
6609 
6610 tg3_tso_bug_end:
6611 	dev_kfree_skb(skb);
6612 
6613 	return NETDEV_TX_OK;
6614 }
6615 
6616 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6617  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6618  */
6619 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6620 {
6621 	struct tg3 *tp = netdev_priv(dev);
6622 	u32 len, entry, base_flags, mss, vlan = 0;
6623 	u32 budget;
6624 	int i = -1, would_hit_hwbug;
6625 	dma_addr_t mapping;
6626 	struct tg3_napi *tnapi;
6627 	struct netdev_queue *txq;
6628 	unsigned int last;
6629 
6630 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6631 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6632 	if (tg3_flag(tp, ENABLE_TSS))
6633 		tnapi++;
6634 
6635 	budget = tg3_tx_avail(tnapi);
6636 
6637 	/* We are running in BH disabled context with netif_tx_lock
6638 	 * and TX reclaim runs via tp->napi.poll inside of a software
6639 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
6640 	 * no IRQ context deadlocks to worry about either.  Rejoice!
6641 	 */
6642 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6643 		if (!netif_tx_queue_stopped(txq)) {
6644 			netif_tx_stop_queue(txq);
6645 
6646 			/* This is a hard error, log it. */
6647 			netdev_err(dev,
6648 				   "BUG! Tx Ring full when queue awake!\n");
6649 		}
6650 		return NETDEV_TX_BUSY;
6651 	}
6652 
6653 	entry = tnapi->tx_prod;
6654 	base_flags = 0;
6655 	if (skb->ip_summed == CHECKSUM_PARTIAL)
6656 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
6657 
6658 	mss = skb_shinfo(skb)->gso_size;
6659 	if (mss) {
6660 		struct iphdr *iph;
6661 		u32 tcp_opt_len, hdr_len;
6662 
6663 		if (skb_header_cloned(skb) &&
6664 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6665 			goto drop;
6666 
6667 		iph = ip_hdr(skb);
6668 		tcp_opt_len = tcp_optlen(skb);
6669 
6670 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6671 
6672 		if (!skb_is_gso_v6(skb)) {
6673 			iph->check = 0;
6674 			iph->tot_len = htons(mss + hdr_len);
6675 		}
6676 
6677 		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6678 		    tg3_flag(tp, TSO_BUG))
6679 			return tg3_tso_bug(tp, skb);
6680 
6681 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6682 			       TXD_FLAG_CPU_POST_DMA);
6683 
6684 		if (tg3_flag(tp, HW_TSO_1) ||
6685 		    tg3_flag(tp, HW_TSO_2) ||
6686 		    tg3_flag(tp, HW_TSO_3)) {
6687 			tcp_hdr(skb)->check = 0;
6688 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6689 		} else
6690 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6691 								 iph->daddr, 0,
6692 								 IPPROTO_TCP,
6693 								 0);
6694 
6695 		if (tg3_flag(tp, HW_TSO_3)) {
6696 			mss |= (hdr_len & 0xc) << 12;
6697 			if (hdr_len & 0x10)
6698 				base_flags |= 0x00000010;
6699 			base_flags |= (hdr_len & 0x3e0) << 5;
6700 		} else if (tg3_flag(tp, HW_TSO_2))
6701 			mss |= hdr_len << 9;
6702 		else if (tg3_flag(tp, HW_TSO_1) ||
6703 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6704 			if (tcp_opt_len || iph->ihl > 5) {
6705 				int tsflags;
6706 
6707 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6708 				mss |= (tsflags << 11);
6709 			}
6710 		} else {
6711 			if (tcp_opt_len || iph->ihl > 5) {
6712 				int tsflags;
6713 
6714 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6715 				base_flags |= tsflags << 12;
6716 			}
6717 		}
6718 	}
6719 
6720 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6721 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
6722 		base_flags |= TXD_FLAG_JMB_PKT;
6723 
6724 	if (vlan_tx_tag_present(skb)) {
6725 		base_flags |= TXD_FLAG_VLAN;
6726 		vlan = vlan_tx_tag_get(skb);
6727 	}
6728 
6729 	len = skb_headlen(skb);
6730 
6731 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6732 	if (pci_dma_mapping_error(tp->pdev, mapping))
6733 		goto drop;
6734 
6735 
6736 	tnapi->tx_buffers[entry].skb = skb;
6737 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6738 
6739 	would_hit_hwbug = 0;
6740 
6741 	if (tg3_flag(tp, 5701_DMA_BUG))
6742 		would_hit_hwbug = 1;
6743 
6744 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6745 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6746 			    mss, vlan)) {
6747 		would_hit_hwbug = 1;
6748 	/* Now loop through additional data fragments, and queue them. */
6749 	} else if (skb_shinfo(skb)->nr_frags > 0) {
6750 		u32 tmp_mss = mss;
6751 
6752 		if (!tg3_flag(tp, HW_TSO_1) &&
6753 		    !tg3_flag(tp, HW_TSO_2) &&
6754 		    !tg3_flag(tp, HW_TSO_3))
6755 			tmp_mss = 0;
6756 
6757 		last = skb_shinfo(skb)->nr_frags - 1;
6758 		for (i = 0; i <= last; i++) {
6759 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6760 
6761 			len = skb_frag_size(frag);
6762 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6763 						   len, DMA_TO_DEVICE);
6764 
6765 			tnapi->tx_buffers[entry].skb = NULL;
6766 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6767 					   mapping);
6768 			if (dma_mapping_error(&tp->pdev->dev, mapping))
6769 				goto dma_error;
6770 
6771 			if (!budget ||
6772 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6773 					    len, base_flags |
6774 					    ((i == last) ? TXD_FLAG_END : 0),
6775 					    tmp_mss, vlan)) {
6776 				would_hit_hwbug = 1;
6777 				break;
6778 			}
6779 		}
6780 	}
6781 
6782 	if (would_hit_hwbug) {
6783 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6784 
6785 		/* If the workaround fails due to memory/mapping
6786 		 * failure, silently drop this packet.
6787 		 */
6788 		entry = tnapi->tx_prod;
6789 		budget = tg3_tx_avail(tnapi);
6790 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6791 						base_flags, mss, vlan))
6792 			goto drop_nofree;
6793 	}
6794 
6795 	skb_tx_timestamp(skb);
6796 	netdev_sent_queue(tp->dev, skb->len);
6797 
6798 	/* Packets are ready, update Tx producer idx local and on card. */
6799 	tw32_tx_mbox(tnapi->prodmbox, entry);
6800 
6801 	tnapi->tx_prod = entry;
6802 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6803 		netif_tx_stop_queue(txq);
6804 
6805 		/* netif_tx_stop_queue() must be done before checking
6806 		 * checking tx index in tg3_tx_avail() below, because in
6807 		 * tg3_tx(), we update tx index before checking for
6808 		 * netif_tx_queue_stopped().
6809 		 */
6810 		smp_mb();
6811 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6812 			netif_tx_wake_queue(txq);
6813 	}
6814 
6815 	mmiowb();
6816 	return NETDEV_TX_OK;
6817 
6818 dma_error:
6819 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6820 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6821 drop:
6822 	dev_kfree_skb(skb);
6823 drop_nofree:
6824 	tp->tx_dropped++;
6825 	return NETDEV_TX_OK;
6826 }
6827 
6828 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6829 {
6830 	if (enable) {
6831 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6832 				  MAC_MODE_PORT_MODE_MASK);
6833 
6834 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6835 
6836 		if (!tg3_flag(tp, 5705_PLUS))
6837 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6838 
6839 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6840 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6841 		else
6842 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6843 	} else {
6844 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6845 
6846 		if (tg3_flag(tp, 5705_PLUS) ||
6847 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6848 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6849 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6850 	}
6851 
6852 	tw32(MAC_MODE, tp->mac_mode);
6853 	udelay(40);
6854 }
6855 
6856 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6857 {
6858 	u32 val, bmcr, mac_mode, ptest = 0;
6859 
6860 	tg3_phy_toggle_apd(tp, false);
6861 	tg3_phy_toggle_automdix(tp, 0);
6862 
6863 	if (extlpbk && tg3_phy_set_extloopbk(tp))
6864 		return -EIO;
6865 
6866 	bmcr = BMCR_FULLDPLX;
6867 	switch (speed) {
6868 	case SPEED_10:
6869 		break;
6870 	case SPEED_100:
6871 		bmcr |= BMCR_SPEED100;
6872 		break;
6873 	case SPEED_1000:
6874 	default:
6875 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6876 			speed = SPEED_100;
6877 			bmcr |= BMCR_SPEED100;
6878 		} else {
6879 			speed = SPEED_1000;
6880 			bmcr |= BMCR_SPEED1000;
6881 		}
6882 	}
6883 
6884 	if (extlpbk) {
6885 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6886 			tg3_readphy(tp, MII_CTRL1000, &val);
6887 			val |= CTL1000_AS_MASTER |
6888 			       CTL1000_ENABLE_MASTER;
6889 			tg3_writephy(tp, MII_CTRL1000, val);
6890 		} else {
6891 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6892 				MII_TG3_FET_PTEST_TRIM_2;
6893 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6894 		}
6895 	} else
6896 		bmcr |= BMCR_LOOPBACK;
6897 
6898 	tg3_writephy(tp, MII_BMCR, bmcr);
6899 
6900 	/* The write needs to be flushed for the FETs */
6901 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6902 		tg3_readphy(tp, MII_BMCR, &bmcr);
6903 
6904 	udelay(40);
6905 
6906 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6907 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6908 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6909 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
6910 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
6911 
6912 		/* The write needs to be flushed for the AC131 */
6913 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6914 	}
6915 
6916 	/* Reset to prevent losing 1st rx packet intermittently */
6917 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6918 	    tg3_flag(tp, 5780_CLASS)) {
6919 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6920 		udelay(10);
6921 		tw32_f(MAC_RX_MODE, tp->rx_mode);
6922 	}
6923 
6924 	mac_mode = tp->mac_mode &
6925 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6926 	if (speed == SPEED_1000)
6927 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
6928 	else
6929 		mac_mode |= MAC_MODE_PORT_MODE_MII;
6930 
6931 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6932 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6933 
6934 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
6935 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
6936 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6937 			mac_mode |= MAC_MODE_LINK_POLARITY;
6938 
6939 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
6940 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6941 	}
6942 
6943 	tw32(MAC_MODE, mac_mode);
6944 	udelay(40);
6945 
6946 	return 0;
6947 }
6948 
6949 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
6950 {
6951 	struct tg3 *tp = netdev_priv(dev);
6952 
6953 	if (features & NETIF_F_LOOPBACK) {
6954 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6955 			return;
6956 
6957 		spin_lock_bh(&tp->lock);
6958 		tg3_mac_loopback(tp, true);
6959 		netif_carrier_on(tp->dev);
6960 		spin_unlock_bh(&tp->lock);
6961 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6962 	} else {
6963 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6964 			return;
6965 
6966 		spin_lock_bh(&tp->lock);
6967 		tg3_mac_loopback(tp, false);
6968 		/* Force link status check */
6969 		tg3_setup_phy(tp, 1);
6970 		spin_unlock_bh(&tp->lock);
6971 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6972 	}
6973 }
6974 
6975 static netdev_features_t tg3_fix_features(struct net_device *dev,
6976 	netdev_features_t features)
6977 {
6978 	struct tg3 *tp = netdev_priv(dev);
6979 
6980 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6981 		features &= ~NETIF_F_ALL_TSO;
6982 
6983 	return features;
6984 }
6985 
6986 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
6987 {
6988 	netdev_features_t changed = dev->features ^ features;
6989 
6990 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6991 		tg3_set_loopback(dev, features);
6992 
6993 	return 0;
6994 }
6995 
6996 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6997 			       int new_mtu)
6998 {
6999 	dev->mtu = new_mtu;
7000 
7001 	if (new_mtu > ETH_DATA_LEN) {
7002 		if (tg3_flag(tp, 5780_CLASS)) {
7003 			netdev_update_features(dev);
7004 			tg3_flag_clear(tp, TSO_CAPABLE);
7005 		} else {
7006 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
7007 		}
7008 	} else {
7009 		if (tg3_flag(tp, 5780_CLASS)) {
7010 			tg3_flag_set(tp, TSO_CAPABLE);
7011 			netdev_update_features(dev);
7012 		}
7013 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7014 	}
7015 }
7016 
7017 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7018 {
7019 	struct tg3 *tp = netdev_priv(dev);
7020 	int err;
7021 
7022 	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7023 		return -EINVAL;
7024 
7025 	if (!netif_running(dev)) {
7026 		/* We'll just catch it later when the
7027 		 * device is up'd.
7028 		 */
7029 		tg3_set_mtu(dev, tp, new_mtu);
7030 		return 0;
7031 	}
7032 
7033 	tg3_phy_stop(tp);
7034 
7035 	tg3_netif_stop(tp);
7036 
7037 	tg3_full_lock(tp, 1);
7038 
7039 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7040 
7041 	tg3_set_mtu(dev, tp, new_mtu);
7042 
7043 	err = tg3_restart_hw(tp, 0);
7044 
7045 	if (!err)
7046 		tg3_netif_start(tp);
7047 
7048 	tg3_full_unlock(tp);
7049 
7050 	if (!err)
7051 		tg3_phy_start(tp);
7052 
7053 	return err;
7054 }
7055 
7056 static void tg3_rx_prodring_free(struct tg3 *tp,
7057 				 struct tg3_rx_prodring_set *tpr)
7058 {
7059 	int i;
7060 
7061 	if (tpr != &tp->napi[0].prodring) {
7062 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7063 		     i = (i + 1) & tp->rx_std_ring_mask)
7064 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7065 					tp->rx_pkt_map_sz);
7066 
7067 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
7068 			for (i = tpr->rx_jmb_cons_idx;
7069 			     i != tpr->rx_jmb_prod_idx;
7070 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
7071 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7072 						TG3_RX_JMB_MAP_SZ);
7073 			}
7074 		}
7075 
7076 		return;
7077 	}
7078 
7079 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
7080 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7081 				tp->rx_pkt_map_sz);
7082 
7083 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7084 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7085 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7086 					TG3_RX_JMB_MAP_SZ);
7087 	}
7088 }
7089 
7090 /* Initialize rx rings for packet processing.
7091  *
7092  * The chip has been shut down and the driver detached from
7093  * the networking, so no interrupts or new tx packets will
7094  * end up in the driver.  tp->{tx,}lock are held and thus
7095  * we may not sleep.
7096  */
7097 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7098 				 struct tg3_rx_prodring_set *tpr)
7099 {
7100 	u32 i, rx_pkt_dma_sz;
7101 
7102 	tpr->rx_std_cons_idx = 0;
7103 	tpr->rx_std_prod_idx = 0;
7104 	tpr->rx_jmb_cons_idx = 0;
7105 	tpr->rx_jmb_prod_idx = 0;
7106 
7107 	if (tpr != &tp->napi[0].prodring) {
7108 		memset(&tpr->rx_std_buffers[0], 0,
7109 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
7110 		if (tpr->rx_jmb_buffers)
7111 			memset(&tpr->rx_jmb_buffers[0], 0,
7112 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
7113 		goto done;
7114 	}
7115 
7116 	/* Zero out all descriptors. */
7117 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7118 
7119 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7120 	if (tg3_flag(tp, 5780_CLASS) &&
7121 	    tp->dev->mtu > ETH_DATA_LEN)
7122 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7123 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7124 
7125 	/* Initialize invariants of the rings, we only set this
7126 	 * stuff once.  This works because the card does not
7127 	 * write into the rx buffer posting rings.
7128 	 */
7129 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7130 		struct tg3_rx_buffer_desc *rxd;
7131 
7132 		rxd = &tpr->rx_std[i];
7133 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7134 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7135 		rxd->opaque = (RXD_OPAQUE_RING_STD |
7136 			       (i << RXD_OPAQUE_INDEX_SHIFT));
7137 	}
7138 
7139 	/* Now allocate fresh SKBs for each rx ring. */
7140 	for (i = 0; i < tp->rx_pending; i++) {
7141 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7142 			netdev_warn(tp->dev,
7143 				    "Using a smaller RX standard ring. Only "
7144 				    "%d out of %d buffers were allocated "
7145 				    "successfully\n", i, tp->rx_pending);
7146 			if (i == 0)
7147 				goto initfail;
7148 			tp->rx_pending = i;
7149 			break;
7150 		}
7151 	}
7152 
7153 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7154 		goto done;
7155 
7156 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7157 
7158 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7159 		goto done;
7160 
7161 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7162 		struct tg3_rx_buffer_desc *rxd;
7163 
7164 		rxd = &tpr->rx_jmb[i].std;
7165 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7166 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7167 				  RXD_FLAG_JUMBO;
7168 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7169 		       (i << RXD_OPAQUE_INDEX_SHIFT));
7170 	}
7171 
7172 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
7173 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7174 			netdev_warn(tp->dev,
7175 				    "Using a smaller RX jumbo ring. Only %d "
7176 				    "out of %d buffers were allocated "
7177 				    "successfully\n", i, tp->rx_jumbo_pending);
7178 			if (i == 0)
7179 				goto initfail;
7180 			tp->rx_jumbo_pending = i;
7181 			break;
7182 		}
7183 	}
7184 
7185 done:
7186 	return 0;
7187 
7188 initfail:
7189 	tg3_rx_prodring_free(tp, tpr);
7190 	return -ENOMEM;
7191 }
7192 
7193 static void tg3_rx_prodring_fini(struct tg3 *tp,
7194 				 struct tg3_rx_prodring_set *tpr)
7195 {
7196 	kfree(tpr->rx_std_buffers);
7197 	tpr->rx_std_buffers = NULL;
7198 	kfree(tpr->rx_jmb_buffers);
7199 	tpr->rx_jmb_buffers = NULL;
7200 	if (tpr->rx_std) {
7201 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7202 				  tpr->rx_std, tpr->rx_std_mapping);
7203 		tpr->rx_std = NULL;
7204 	}
7205 	if (tpr->rx_jmb) {
7206 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7207 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
7208 		tpr->rx_jmb = NULL;
7209 	}
7210 }
7211 
7212 static int tg3_rx_prodring_init(struct tg3 *tp,
7213 				struct tg3_rx_prodring_set *tpr)
7214 {
7215 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7216 				      GFP_KERNEL);
7217 	if (!tpr->rx_std_buffers)
7218 		return -ENOMEM;
7219 
7220 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7221 					 TG3_RX_STD_RING_BYTES(tp),
7222 					 &tpr->rx_std_mapping,
7223 					 GFP_KERNEL);
7224 	if (!tpr->rx_std)
7225 		goto err_out;
7226 
7227 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7228 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7229 					      GFP_KERNEL);
7230 		if (!tpr->rx_jmb_buffers)
7231 			goto err_out;
7232 
7233 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7234 						 TG3_RX_JMB_RING_BYTES(tp),
7235 						 &tpr->rx_jmb_mapping,
7236 						 GFP_KERNEL);
7237 		if (!tpr->rx_jmb)
7238 			goto err_out;
7239 	}
7240 
7241 	return 0;
7242 
7243 err_out:
7244 	tg3_rx_prodring_fini(tp, tpr);
7245 	return -ENOMEM;
7246 }
7247 
7248 /* Free up pending packets in all rx/tx rings.
7249  *
7250  * The chip has been shut down and the driver detached from
7251  * the networking, so no interrupts or new tx packets will
7252  * end up in the driver.  tp->{tx,}lock is not held and we are not
7253  * in an interrupt context and thus may sleep.
7254  */
7255 static void tg3_free_rings(struct tg3 *tp)
7256 {
7257 	int i, j;
7258 
7259 	for (j = 0; j < tp->irq_cnt; j++) {
7260 		struct tg3_napi *tnapi = &tp->napi[j];
7261 
7262 		tg3_rx_prodring_free(tp, &tnapi->prodring);
7263 
7264 		if (!tnapi->tx_buffers)
7265 			continue;
7266 
7267 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7268 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7269 
7270 			if (!skb)
7271 				continue;
7272 
7273 			tg3_tx_skb_unmap(tnapi, i,
7274 					 skb_shinfo(skb)->nr_frags - 1);
7275 
7276 			dev_kfree_skb_any(skb);
7277 		}
7278 	}
7279 	netdev_reset_queue(tp->dev);
7280 }
7281 
7282 /* Initialize tx/rx rings for packet processing.
7283  *
7284  * The chip has been shut down and the driver detached from
7285  * the networking, so no interrupts or new tx packets will
7286  * end up in the driver.  tp->{tx,}lock are held and thus
7287  * we may not sleep.
7288  */
7289 static int tg3_init_rings(struct tg3 *tp)
7290 {
7291 	int i;
7292 
7293 	/* Free up all the SKBs. */
7294 	tg3_free_rings(tp);
7295 
7296 	for (i = 0; i < tp->irq_cnt; i++) {
7297 		struct tg3_napi *tnapi = &tp->napi[i];
7298 
7299 		tnapi->last_tag = 0;
7300 		tnapi->last_irq_tag = 0;
7301 		tnapi->hw_status->status = 0;
7302 		tnapi->hw_status->status_tag = 0;
7303 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7304 
7305 		tnapi->tx_prod = 0;
7306 		tnapi->tx_cons = 0;
7307 		if (tnapi->tx_ring)
7308 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7309 
7310 		tnapi->rx_rcb_ptr = 0;
7311 		if (tnapi->rx_rcb)
7312 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7313 
7314 		if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7315 			tg3_free_rings(tp);
7316 			return -ENOMEM;
7317 		}
7318 	}
7319 
7320 	return 0;
7321 }
7322 
7323 /*
7324  * Must not be invoked with interrupt sources disabled and
7325  * the hardware shutdown down.
7326  */
7327 static void tg3_free_consistent(struct tg3 *tp)
7328 {
7329 	int i;
7330 
7331 	for (i = 0; i < tp->irq_cnt; i++) {
7332 		struct tg3_napi *tnapi = &tp->napi[i];
7333 
7334 		if (tnapi->tx_ring) {
7335 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7336 				tnapi->tx_ring, tnapi->tx_desc_mapping);
7337 			tnapi->tx_ring = NULL;
7338 		}
7339 
7340 		kfree(tnapi->tx_buffers);
7341 		tnapi->tx_buffers = NULL;
7342 
7343 		if (tnapi->rx_rcb) {
7344 			dma_free_coherent(&tp->pdev->dev,
7345 					  TG3_RX_RCB_RING_BYTES(tp),
7346 					  tnapi->rx_rcb,
7347 					  tnapi->rx_rcb_mapping);
7348 			tnapi->rx_rcb = NULL;
7349 		}
7350 
7351 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
7352 
7353 		if (tnapi->hw_status) {
7354 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7355 					  tnapi->hw_status,
7356 					  tnapi->status_mapping);
7357 			tnapi->hw_status = NULL;
7358 		}
7359 	}
7360 
7361 	if (tp->hw_stats) {
7362 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7363 				  tp->hw_stats, tp->stats_mapping);
7364 		tp->hw_stats = NULL;
7365 	}
7366 }
7367 
7368 /*
7369  * Must not be invoked with interrupt sources disabled and
7370  * the hardware shutdown down.  Can sleep.
7371  */
7372 static int tg3_alloc_consistent(struct tg3 *tp)
7373 {
7374 	int i;
7375 
7376 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7377 					  sizeof(struct tg3_hw_stats),
7378 					  &tp->stats_mapping,
7379 					  GFP_KERNEL);
7380 	if (!tp->hw_stats)
7381 		goto err_out;
7382 
7383 	memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7384 
7385 	for (i = 0; i < tp->irq_cnt; i++) {
7386 		struct tg3_napi *tnapi = &tp->napi[i];
7387 		struct tg3_hw_status *sblk;
7388 
7389 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7390 						      TG3_HW_STATUS_SIZE,
7391 						      &tnapi->status_mapping,
7392 						      GFP_KERNEL);
7393 		if (!tnapi->hw_status)
7394 			goto err_out;
7395 
7396 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7397 		sblk = tnapi->hw_status;
7398 
7399 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7400 			goto err_out;
7401 
7402 		/* If multivector TSS is enabled, vector 0 does not handle
7403 		 * tx interrupts.  Don't allocate any resources for it.
7404 		 */
7405 		if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7406 		    (i && tg3_flag(tp, ENABLE_TSS))) {
7407 			tnapi->tx_buffers = kzalloc(
7408 					       sizeof(struct tg3_tx_ring_info) *
7409 					       TG3_TX_RING_SIZE, GFP_KERNEL);
7410 			if (!tnapi->tx_buffers)
7411 				goto err_out;
7412 
7413 			tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7414 							    TG3_TX_RING_BYTES,
7415 							&tnapi->tx_desc_mapping,
7416 							    GFP_KERNEL);
7417 			if (!tnapi->tx_ring)
7418 				goto err_out;
7419 		}
7420 
7421 		/*
7422 		 * When RSS is enabled, the status block format changes
7423 		 * slightly.  The "rx_jumbo_consumer", "reserved",
7424 		 * and "rx_mini_consumer" members get mapped to the
7425 		 * other three rx return ring producer indexes.
7426 		 */
7427 		switch (i) {
7428 		default:
7429 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7430 			break;
7431 		case 2:
7432 			tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7433 			break;
7434 		case 3:
7435 			tnapi->rx_rcb_prod_idx = &sblk->reserved;
7436 			break;
7437 		case 4:
7438 			tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7439 			break;
7440 		}
7441 
7442 		/*
7443 		 * If multivector RSS is enabled, vector 0 does not handle
7444 		 * rx or tx interrupts.  Don't allocate any resources for it.
7445 		 */
7446 		if (!i && tg3_flag(tp, ENABLE_RSS))
7447 			continue;
7448 
7449 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7450 						   TG3_RX_RCB_RING_BYTES(tp),
7451 						   &tnapi->rx_rcb_mapping,
7452 						   GFP_KERNEL);
7453 		if (!tnapi->rx_rcb)
7454 			goto err_out;
7455 
7456 		memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7457 	}
7458 
7459 	return 0;
7460 
7461 err_out:
7462 	tg3_free_consistent(tp);
7463 	return -ENOMEM;
7464 }
7465 
7466 #define MAX_WAIT_CNT 1000
7467 
7468 /* To stop a block, clear the enable bit and poll till it
7469  * clears.  tp->lock is held.
7470  */
7471 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7472 {
7473 	unsigned int i;
7474 	u32 val;
7475 
7476 	if (tg3_flag(tp, 5705_PLUS)) {
7477 		switch (ofs) {
7478 		case RCVLSC_MODE:
7479 		case DMAC_MODE:
7480 		case MBFREE_MODE:
7481 		case BUFMGR_MODE:
7482 		case MEMARB_MODE:
7483 			/* We can't enable/disable these bits of the
7484 			 * 5705/5750, just say success.
7485 			 */
7486 			return 0;
7487 
7488 		default:
7489 			break;
7490 		}
7491 	}
7492 
7493 	val = tr32(ofs);
7494 	val &= ~enable_bit;
7495 	tw32_f(ofs, val);
7496 
7497 	for (i = 0; i < MAX_WAIT_CNT; i++) {
7498 		udelay(100);
7499 		val = tr32(ofs);
7500 		if ((val & enable_bit) == 0)
7501 			break;
7502 	}
7503 
7504 	if (i == MAX_WAIT_CNT && !silent) {
7505 		dev_err(&tp->pdev->dev,
7506 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7507 			ofs, enable_bit);
7508 		return -ENODEV;
7509 	}
7510 
7511 	return 0;
7512 }
7513 
7514 /* tp->lock is held. */
7515 static int tg3_abort_hw(struct tg3 *tp, int silent)
7516 {
7517 	int i, err;
7518 
7519 	tg3_disable_ints(tp);
7520 
7521 	tp->rx_mode &= ~RX_MODE_ENABLE;
7522 	tw32_f(MAC_RX_MODE, tp->rx_mode);
7523 	udelay(10);
7524 
7525 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7526 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7527 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7528 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7529 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7530 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7531 
7532 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7533 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7534 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7535 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7536 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7537 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7538 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7539 
7540 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7541 	tw32_f(MAC_MODE, tp->mac_mode);
7542 	udelay(40);
7543 
7544 	tp->tx_mode &= ~TX_MODE_ENABLE;
7545 	tw32_f(MAC_TX_MODE, tp->tx_mode);
7546 
7547 	for (i = 0; i < MAX_WAIT_CNT; i++) {
7548 		udelay(100);
7549 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7550 			break;
7551 	}
7552 	if (i >= MAX_WAIT_CNT) {
7553 		dev_err(&tp->pdev->dev,
7554 			"%s timed out, TX_MODE_ENABLE will not clear "
7555 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7556 		err |= -ENODEV;
7557 	}
7558 
7559 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7560 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7561 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7562 
7563 	tw32(FTQ_RESET, 0xffffffff);
7564 	tw32(FTQ_RESET, 0x00000000);
7565 
7566 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7567 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7568 
7569 	for (i = 0; i < tp->irq_cnt; i++) {
7570 		struct tg3_napi *tnapi = &tp->napi[i];
7571 		if (tnapi->hw_status)
7572 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7573 	}
7574 
7575 	return err;
7576 }
7577 
7578 /* Save PCI command register before chip reset */
7579 static void tg3_save_pci_state(struct tg3 *tp)
7580 {
7581 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7582 }
7583 
7584 /* Restore PCI state after chip reset */
7585 static void tg3_restore_pci_state(struct tg3 *tp)
7586 {
7587 	u32 val;
7588 
7589 	/* Re-enable indirect register accesses. */
7590 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7591 			       tp->misc_host_ctrl);
7592 
7593 	/* Set MAX PCI retry to zero. */
7594 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7595 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7596 	    tg3_flag(tp, PCIX_MODE))
7597 		val |= PCISTATE_RETRY_SAME_DMA;
7598 	/* Allow reads and writes to the APE register and memory space. */
7599 	if (tg3_flag(tp, ENABLE_APE))
7600 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7601 		       PCISTATE_ALLOW_APE_SHMEM_WR |
7602 		       PCISTATE_ALLOW_APE_PSPACE_WR;
7603 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7604 
7605 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7606 
7607 	if (!tg3_flag(tp, PCI_EXPRESS)) {
7608 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7609 				      tp->pci_cacheline_sz);
7610 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7611 				      tp->pci_lat_timer);
7612 	}
7613 
7614 	/* Make sure PCI-X relaxed ordering bit is clear. */
7615 	if (tg3_flag(tp, PCIX_MODE)) {
7616 		u16 pcix_cmd;
7617 
7618 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7619 				     &pcix_cmd);
7620 		pcix_cmd &= ~PCI_X_CMD_ERO;
7621 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7622 				      pcix_cmd);
7623 	}
7624 
7625 	if (tg3_flag(tp, 5780_CLASS)) {
7626 
7627 		/* Chip reset on 5780 will reset MSI enable bit,
7628 		 * so need to restore it.
7629 		 */
7630 		if (tg3_flag(tp, USING_MSI)) {
7631 			u16 ctrl;
7632 
7633 			pci_read_config_word(tp->pdev,
7634 					     tp->msi_cap + PCI_MSI_FLAGS,
7635 					     &ctrl);
7636 			pci_write_config_word(tp->pdev,
7637 					      tp->msi_cap + PCI_MSI_FLAGS,
7638 					      ctrl | PCI_MSI_FLAGS_ENABLE);
7639 			val = tr32(MSGINT_MODE);
7640 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7641 		}
7642 	}
7643 }
7644 
7645 /* tp->lock is held. */
7646 static int tg3_chip_reset(struct tg3 *tp)
7647 {
7648 	u32 val;
7649 	void (*write_op)(struct tg3 *, u32, u32);
7650 	int i, err;
7651 
7652 	tg3_nvram_lock(tp);
7653 
7654 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7655 
7656 	/* No matching tg3_nvram_unlock() after this because
7657 	 * chip reset below will undo the nvram lock.
7658 	 */
7659 	tp->nvram_lock_cnt = 0;
7660 
7661 	/* GRC_MISC_CFG core clock reset will clear the memory
7662 	 * enable bit in PCI register 4 and the MSI enable bit
7663 	 * on some chips, so we save relevant registers here.
7664 	 */
7665 	tg3_save_pci_state(tp);
7666 
7667 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7668 	    tg3_flag(tp, 5755_PLUS))
7669 		tw32(GRC_FASTBOOT_PC, 0);
7670 
7671 	/*
7672 	 * We must avoid the readl() that normally takes place.
7673 	 * It locks machines, causes machine checks, and other
7674 	 * fun things.  So, temporarily disable the 5701
7675 	 * hardware workaround, while we do the reset.
7676 	 */
7677 	write_op = tp->write32;
7678 	if (write_op == tg3_write_flush_reg32)
7679 		tp->write32 = tg3_write32;
7680 
7681 	/* Prevent the irq handler from reading or writing PCI registers
7682 	 * during chip reset when the memory enable bit in the PCI command
7683 	 * register may be cleared.  The chip does not generate interrupt
7684 	 * at this time, but the irq handler may still be called due to irq
7685 	 * sharing or irqpoll.
7686 	 */
7687 	tg3_flag_set(tp, CHIP_RESETTING);
7688 	for (i = 0; i < tp->irq_cnt; i++) {
7689 		struct tg3_napi *tnapi = &tp->napi[i];
7690 		if (tnapi->hw_status) {
7691 			tnapi->hw_status->status = 0;
7692 			tnapi->hw_status->status_tag = 0;
7693 		}
7694 		tnapi->last_tag = 0;
7695 		tnapi->last_irq_tag = 0;
7696 	}
7697 	smp_mb();
7698 
7699 	for (i = 0; i < tp->irq_cnt; i++)
7700 		synchronize_irq(tp->napi[i].irq_vec);
7701 
7702 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7703 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7704 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7705 	}
7706 
7707 	/* do the reset */
7708 	val = GRC_MISC_CFG_CORECLK_RESET;
7709 
7710 	if (tg3_flag(tp, PCI_EXPRESS)) {
7711 		/* Force PCIe 1.0a mode */
7712 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7713 		    !tg3_flag(tp, 57765_PLUS) &&
7714 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
7715 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7716 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7717 
7718 		if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7719 			tw32(GRC_MISC_CFG, (1 << 29));
7720 			val |= (1 << 29);
7721 		}
7722 	}
7723 
7724 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7725 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7726 		tw32(GRC_VCPU_EXT_CTRL,
7727 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7728 	}
7729 
7730 	/* Manage gphy power for all CPMU absent PCIe devices. */
7731 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7732 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7733 
7734 	tw32(GRC_MISC_CFG, val);
7735 
7736 	/* restore 5701 hardware bug workaround write method */
7737 	tp->write32 = write_op;
7738 
7739 	/* Unfortunately, we have to delay before the PCI read back.
7740 	 * Some 575X chips even will not respond to a PCI cfg access
7741 	 * when the reset command is given to the chip.
7742 	 *
7743 	 * How do these hardware designers expect things to work
7744 	 * properly if the PCI write is posted for a long period
7745 	 * of time?  It is always necessary to have some method by
7746 	 * which a register read back can occur to push the write
7747 	 * out which does the reset.
7748 	 *
7749 	 * For most tg3 variants the trick below was working.
7750 	 * Ho hum...
7751 	 */
7752 	udelay(120);
7753 
7754 	/* Flush PCI posted writes.  The normal MMIO registers
7755 	 * are inaccessible at this time so this is the only
7756 	 * way to make this reliably (actually, this is no longer
7757 	 * the case, see above).  I tried to use indirect
7758 	 * register read/write but this upset some 5701 variants.
7759 	 */
7760 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7761 
7762 	udelay(120);
7763 
7764 	if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7765 		u16 val16;
7766 
7767 		if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7768 			int i;
7769 			u32 cfg_val;
7770 
7771 			/* Wait for link training to complete.  */
7772 			for (i = 0; i < 5000; i++)
7773 				udelay(100);
7774 
7775 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7776 			pci_write_config_dword(tp->pdev, 0xc4,
7777 					       cfg_val | (1 << 15));
7778 		}
7779 
7780 		/* Clear the "no snoop" and "relaxed ordering" bits. */
7781 		pci_read_config_word(tp->pdev,
7782 				     pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7783 				     &val16);
7784 		val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7785 			   PCI_EXP_DEVCTL_NOSNOOP_EN);
7786 		/*
7787 		 * Older PCIe devices only support the 128 byte
7788 		 * MPS setting.  Enforce the restriction.
7789 		 */
7790 		if (!tg3_flag(tp, CPMU_PRESENT))
7791 			val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7792 		pci_write_config_word(tp->pdev,
7793 				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7794 				      val16);
7795 
7796 		/* Clear error status */
7797 		pci_write_config_word(tp->pdev,
7798 				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7799 				      PCI_EXP_DEVSTA_CED |
7800 				      PCI_EXP_DEVSTA_NFED |
7801 				      PCI_EXP_DEVSTA_FED |
7802 				      PCI_EXP_DEVSTA_URD);
7803 	}
7804 
7805 	tg3_restore_pci_state(tp);
7806 
7807 	tg3_flag_clear(tp, CHIP_RESETTING);
7808 	tg3_flag_clear(tp, ERROR_PROCESSED);
7809 
7810 	val = 0;
7811 	if (tg3_flag(tp, 5780_CLASS))
7812 		val = tr32(MEMARB_MODE);
7813 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7814 
7815 	if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7816 		tg3_stop_fw(tp);
7817 		tw32(0x5000, 0x400);
7818 	}
7819 
7820 	tw32(GRC_MODE, tp->grc_mode);
7821 
7822 	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7823 		val = tr32(0xc4);
7824 
7825 		tw32(0xc4, val | (1 << 15));
7826 	}
7827 
7828 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7829 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7830 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7831 		if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7832 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7833 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7834 	}
7835 
7836 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7837 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7838 		val = tp->mac_mode;
7839 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7840 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7841 		val = tp->mac_mode;
7842 	} else
7843 		val = 0;
7844 
7845 	tw32_f(MAC_MODE, val);
7846 	udelay(40);
7847 
7848 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7849 
7850 	err = tg3_poll_fw(tp);
7851 	if (err)
7852 		return err;
7853 
7854 	tg3_mdio_start(tp);
7855 
7856 	if (tg3_flag(tp, PCI_EXPRESS) &&
7857 	    tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7858 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7859 	    !tg3_flag(tp, 57765_PLUS)) {
7860 		val = tr32(0x7c00);
7861 
7862 		tw32(0x7c00, val | (1 << 25));
7863 	}
7864 
7865 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7866 		val = tr32(TG3_CPMU_CLCK_ORIDE);
7867 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7868 	}
7869 
7870 	/* Reprobe ASF enable state.  */
7871 	tg3_flag_clear(tp, ENABLE_ASF);
7872 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7873 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7874 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7875 		u32 nic_cfg;
7876 
7877 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7878 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7879 			tg3_flag_set(tp, ENABLE_ASF);
7880 			tp->last_event_jiffies = jiffies;
7881 			if (tg3_flag(tp, 5750_PLUS))
7882 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7883 		}
7884 	}
7885 
7886 	return 0;
7887 }
7888 
7889 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
7890 						 struct rtnl_link_stats64 *);
7891 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
7892 						struct tg3_ethtool_stats *);
7893 
7894 /* tp->lock is held. */
7895 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7896 {
7897 	int err;
7898 
7899 	tg3_stop_fw(tp);
7900 
7901 	tg3_write_sig_pre_reset(tp, kind);
7902 
7903 	tg3_abort_hw(tp, silent);
7904 	err = tg3_chip_reset(tp);
7905 
7906 	__tg3_set_mac_addr(tp, 0);
7907 
7908 	tg3_write_sig_legacy(tp, kind);
7909 	tg3_write_sig_post_reset(tp, kind);
7910 
7911 	if (tp->hw_stats) {
7912 		/* Save the stats across chip resets... */
7913 		tg3_get_stats64(tp->dev, &tp->net_stats_prev),
7914 		tg3_get_estats(tp, &tp->estats_prev);
7915 
7916 		/* And make sure the next sample is new data */
7917 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7918 	}
7919 
7920 	if (err)
7921 		return err;
7922 
7923 	return 0;
7924 }
7925 
7926 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7927 {
7928 	struct tg3 *tp = netdev_priv(dev);
7929 	struct sockaddr *addr = p;
7930 	int err = 0, skip_mac_1 = 0;
7931 
7932 	if (!is_valid_ether_addr(addr->sa_data))
7933 		return -EINVAL;
7934 
7935 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7936 
7937 	if (!netif_running(dev))
7938 		return 0;
7939 
7940 	if (tg3_flag(tp, ENABLE_ASF)) {
7941 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
7942 
7943 		addr0_high = tr32(MAC_ADDR_0_HIGH);
7944 		addr0_low = tr32(MAC_ADDR_0_LOW);
7945 		addr1_high = tr32(MAC_ADDR_1_HIGH);
7946 		addr1_low = tr32(MAC_ADDR_1_LOW);
7947 
7948 		/* Skip MAC addr 1 if ASF is using it. */
7949 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7950 		    !(addr1_high == 0 && addr1_low == 0))
7951 			skip_mac_1 = 1;
7952 	}
7953 	spin_lock_bh(&tp->lock);
7954 	__tg3_set_mac_addr(tp, skip_mac_1);
7955 	spin_unlock_bh(&tp->lock);
7956 
7957 	return err;
7958 }
7959 
7960 /* tp->lock is held. */
7961 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7962 			   dma_addr_t mapping, u32 maxlen_flags,
7963 			   u32 nic_addr)
7964 {
7965 	tg3_write_mem(tp,
7966 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7967 		      ((u64) mapping >> 32));
7968 	tg3_write_mem(tp,
7969 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7970 		      ((u64) mapping & 0xffffffff));
7971 	tg3_write_mem(tp,
7972 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7973 		       maxlen_flags);
7974 
7975 	if (!tg3_flag(tp, 5705_PLUS))
7976 		tg3_write_mem(tp,
7977 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7978 			      nic_addr);
7979 }
7980 
7981 static void __tg3_set_rx_mode(struct net_device *);
7982 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7983 {
7984 	int i;
7985 
7986 	if (!tg3_flag(tp, ENABLE_TSS)) {
7987 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7988 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7989 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7990 	} else {
7991 		tw32(HOSTCC_TXCOL_TICKS, 0);
7992 		tw32(HOSTCC_TXMAX_FRAMES, 0);
7993 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7994 	}
7995 
7996 	if (!tg3_flag(tp, ENABLE_RSS)) {
7997 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7998 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7999 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8000 	} else {
8001 		tw32(HOSTCC_RXCOL_TICKS, 0);
8002 		tw32(HOSTCC_RXMAX_FRAMES, 0);
8003 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8004 	}
8005 
8006 	if (!tg3_flag(tp, 5705_PLUS)) {
8007 		u32 val = ec->stats_block_coalesce_usecs;
8008 
8009 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8010 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8011 
8012 		if (!netif_carrier_ok(tp->dev))
8013 			val = 0;
8014 
8015 		tw32(HOSTCC_STAT_COAL_TICKS, val);
8016 	}
8017 
8018 	for (i = 0; i < tp->irq_cnt - 1; i++) {
8019 		u32 reg;
8020 
8021 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8022 		tw32(reg, ec->rx_coalesce_usecs);
8023 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8024 		tw32(reg, ec->rx_max_coalesced_frames);
8025 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8026 		tw32(reg, ec->rx_max_coalesced_frames_irq);
8027 
8028 		if (tg3_flag(tp, ENABLE_TSS)) {
8029 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8030 			tw32(reg, ec->tx_coalesce_usecs);
8031 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8032 			tw32(reg, ec->tx_max_coalesced_frames);
8033 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8034 			tw32(reg, ec->tx_max_coalesced_frames_irq);
8035 		}
8036 	}
8037 
8038 	for (; i < tp->irq_max - 1; i++) {
8039 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8040 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8041 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8042 
8043 		if (tg3_flag(tp, ENABLE_TSS)) {
8044 			tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8045 			tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8046 			tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8047 		}
8048 	}
8049 }
8050 
8051 /* tp->lock is held. */
8052 static void tg3_rings_reset(struct tg3 *tp)
8053 {
8054 	int i;
8055 	u32 stblk, txrcb, rxrcb, limit;
8056 	struct tg3_napi *tnapi = &tp->napi[0];
8057 
8058 	/* Disable all transmit rings but the first. */
8059 	if (!tg3_flag(tp, 5705_PLUS))
8060 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8061 	else if (tg3_flag(tp, 5717_PLUS))
8062 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8063 	else if (tg3_flag(tp, 57765_CLASS))
8064 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8065 	else
8066 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8067 
8068 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8069 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8070 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8071 			      BDINFO_FLAGS_DISABLED);
8072 
8073 
8074 	/* Disable all receive return rings but the first. */
8075 	if (tg3_flag(tp, 5717_PLUS))
8076 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8077 	else if (!tg3_flag(tp, 5705_PLUS))
8078 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8079 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8080 		 tg3_flag(tp, 57765_CLASS))
8081 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8082 	else
8083 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8084 
8085 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8086 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8087 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8088 			      BDINFO_FLAGS_DISABLED);
8089 
8090 	/* Disable interrupts */
8091 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8092 	tp->napi[0].chk_msi_cnt = 0;
8093 	tp->napi[0].last_rx_cons = 0;
8094 	tp->napi[0].last_tx_cons = 0;
8095 
8096 	/* Zero mailbox registers. */
8097 	if (tg3_flag(tp, SUPPORT_MSIX)) {
8098 		for (i = 1; i < tp->irq_max; i++) {
8099 			tp->napi[i].tx_prod = 0;
8100 			tp->napi[i].tx_cons = 0;
8101 			if (tg3_flag(tp, ENABLE_TSS))
8102 				tw32_mailbox(tp->napi[i].prodmbox, 0);
8103 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
8104 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8105 			tp->napi[i].chk_msi_cnt = 0;
8106 			tp->napi[i].last_rx_cons = 0;
8107 			tp->napi[i].last_tx_cons = 0;
8108 		}
8109 		if (!tg3_flag(tp, ENABLE_TSS))
8110 			tw32_mailbox(tp->napi[0].prodmbox, 0);
8111 	} else {
8112 		tp->napi[0].tx_prod = 0;
8113 		tp->napi[0].tx_cons = 0;
8114 		tw32_mailbox(tp->napi[0].prodmbox, 0);
8115 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
8116 	}
8117 
8118 	/* Make sure the NIC-based send BD rings are disabled. */
8119 	if (!tg3_flag(tp, 5705_PLUS)) {
8120 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8121 		for (i = 0; i < 16; i++)
8122 			tw32_tx_mbox(mbox + i * 8, 0);
8123 	}
8124 
8125 	txrcb = NIC_SRAM_SEND_RCB;
8126 	rxrcb = NIC_SRAM_RCV_RET_RCB;
8127 
8128 	/* Clear status block in ram. */
8129 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8130 
8131 	/* Set status block DMA address */
8132 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8133 	     ((u64) tnapi->status_mapping >> 32));
8134 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8135 	     ((u64) tnapi->status_mapping & 0xffffffff));
8136 
8137 	if (tnapi->tx_ring) {
8138 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8139 			       (TG3_TX_RING_SIZE <<
8140 				BDINFO_FLAGS_MAXLEN_SHIFT),
8141 			       NIC_SRAM_TX_BUFFER_DESC);
8142 		txrcb += TG3_BDINFO_SIZE;
8143 	}
8144 
8145 	if (tnapi->rx_rcb) {
8146 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8147 			       (tp->rx_ret_ring_mask + 1) <<
8148 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8149 		rxrcb += TG3_BDINFO_SIZE;
8150 	}
8151 
8152 	stblk = HOSTCC_STATBLCK_RING1;
8153 
8154 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8155 		u64 mapping = (u64)tnapi->status_mapping;
8156 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8157 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8158 
8159 		/* Clear status block in ram. */
8160 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8161 
8162 		if (tnapi->tx_ring) {
8163 			tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8164 				       (TG3_TX_RING_SIZE <<
8165 					BDINFO_FLAGS_MAXLEN_SHIFT),
8166 				       NIC_SRAM_TX_BUFFER_DESC);
8167 			txrcb += TG3_BDINFO_SIZE;
8168 		}
8169 
8170 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8171 			       ((tp->rx_ret_ring_mask + 1) <<
8172 				BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8173 
8174 		stblk += 8;
8175 		rxrcb += TG3_BDINFO_SIZE;
8176 	}
8177 }
8178 
8179 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8180 {
8181 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8182 
8183 	if (!tg3_flag(tp, 5750_PLUS) ||
8184 	    tg3_flag(tp, 5780_CLASS) ||
8185 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8186 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8187 	    tg3_flag(tp, 57765_PLUS))
8188 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8189 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8190 		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8191 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8192 	else
8193 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8194 
8195 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8196 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8197 
8198 	val = min(nic_rep_thresh, host_rep_thresh);
8199 	tw32(RCVBDI_STD_THRESH, val);
8200 
8201 	if (tg3_flag(tp, 57765_PLUS))
8202 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8203 
8204 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8205 		return;
8206 
8207 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8208 
8209 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8210 
8211 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
8212 	tw32(RCVBDI_JUMBO_THRESH, val);
8213 
8214 	if (tg3_flag(tp, 57765_PLUS))
8215 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8216 }
8217 
8218 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8219 {
8220 	int i;
8221 
8222 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8223 		tp->rss_ind_tbl[i] =
8224 			ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8225 }
8226 
8227 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8228 {
8229 	int i;
8230 
8231 	if (!tg3_flag(tp, SUPPORT_MSIX))
8232 		return;
8233 
8234 	if (tp->irq_cnt <= 2) {
8235 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8236 		return;
8237 	}
8238 
8239 	/* Validate table against current IRQ count */
8240 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8241 		if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8242 			break;
8243 	}
8244 
8245 	if (i != TG3_RSS_INDIR_TBL_SIZE)
8246 		tg3_rss_init_dflt_indir_tbl(tp);
8247 }
8248 
8249 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8250 {
8251 	int i = 0;
8252 	u32 reg = MAC_RSS_INDIR_TBL_0;
8253 
8254 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
8255 		u32 val = tp->rss_ind_tbl[i];
8256 		i++;
8257 		for (; i % 8; i++) {
8258 			val <<= 4;
8259 			val |= tp->rss_ind_tbl[i];
8260 		}
8261 		tw32(reg, val);
8262 		reg += 4;
8263 	}
8264 }
8265 
8266 /* tp->lock is held. */
8267 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8268 {
8269 	u32 val, rdmac_mode;
8270 	int i, err, limit;
8271 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8272 
8273 	tg3_disable_ints(tp);
8274 
8275 	tg3_stop_fw(tp);
8276 
8277 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8278 
8279 	if (tg3_flag(tp, INIT_COMPLETE))
8280 		tg3_abort_hw(tp, 1);
8281 
8282 	/* Enable MAC control of LPI */
8283 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8284 		tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8285 		       TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8286 		       TG3_CPMU_EEE_LNKIDL_UART_IDL);
8287 
8288 		tw32_f(TG3_CPMU_EEE_CTRL,
8289 		       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8290 
8291 		val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8292 		      TG3_CPMU_EEEMD_LPI_IN_TX |
8293 		      TG3_CPMU_EEEMD_LPI_IN_RX |
8294 		      TG3_CPMU_EEEMD_EEE_ENABLE;
8295 
8296 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8297 			val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8298 
8299 		if (tg3_flag(tp, ENABLE_APE))
8300 			val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8301 
8302 		tw32_f(TG3_CPMU_EEE_MODE, val);
8303 
8304 		tw32_f(TG3_CPMU_EEE_DBTMR1,
8305 		       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8306 		       TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8307 
8308 		tw32_f(TG3_CPMU_EEE_DBTMR2,
8309 		       TG3_CPMU_DBTMR2_APE_TX_2047US |
8310 		       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8311 	}
8312 
8313 	if (reset_phy)
8314 		tg3_phy_reset(tp);
8315 
8316 	err = tg3_chip_reset(tp);
8317 	if (err)
8318 		return err;
8319 
8320 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8321 
8322 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8323 		val = tr32(TG3_CPMU_CTRL);
8324 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8325 		tw32(TG3_CPMU_CTRL, val);
8326 
8327 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8328 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8329 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
8330 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8331 
8332 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8333 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8334 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
8335 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8336 
8337 		val = tr32(TG3_CPMU_HST_ACC);
8338 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
8339 		val |= CPMU_HST_ACC_MACCLK_6_25;
8340 		tw32(TG3_CPMU_HST_ACC, val);
8341 	}
8342 
8343 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8344 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8345 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8346 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
8347 		tw32(PCIE_PWR_MGMT_THRESH, val);
8348 
8349 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8350 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8351 
8352 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8353 
8354 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8355 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8356 	}
8357 
8358 	if (tg3_flag(tp, L1PLLPD_EN)) {
8359 		u32 grc_mode = tr32(GRC_MODE);
8360 
8361 		/* Access the lower 1K of PL PCIE block registers. */
8362 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8363 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8364 
8365 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8366 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8367 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8368 
8369 		tw32(GRC_MODE, grc_mode);
8370 	}
8371 
8372 	if (tg3_flag(tp, 57765_CLASS)) {
8373 		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8374 			u32 grc_mode = tr32(GRC_MODE);
8375 
8376 			/* Access the lower 1K of PL PCIE block registers. */
8377 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8378 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8379 
8380 			val = tr32(TG3_PCIE_TLDLPL_PORT +
8381 				   TG3_PCIE_PL_LO_PHYCTL5);
8382 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8383 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8384 
8385 			tw32(GRC_MODE, grc_mode);
8386 		}
8387 
8388 		if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8389 			u32 grc_mode = tr32(GRC_MODE);
8390 
8391 			/* Access the lower 1K of DL PCIE block registers. */
8392 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8393 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8394 
8395 			val = tr32(TG3_PCIE_TLDLPL_PORT +
8396 				   TG3_PCIE_DL_LO_FTSMAX);
8397 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8398 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8399 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8400 
8401 			tw32(GRC_MODE, grc_mode);
8402 		}
8403 
8404 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8405 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8406 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
8407 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8408 	}
8409 
8410 	/* This works around an issue with Athlon chipsets on
8411 	 * B3 tigon3 silicon.  This bit has no effect on any
8412 	 * other revision.  But do not set this on PCI Express
8413 	 * chips and don't even touch the clocks if the CPMU is present.
8414 	 */
8415 	if (!tg3_flag(tp, CPMU_PRESENT)) {
8416 		if (!tg3_flag(tp, PCI_EXPRESS))
8417 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8418 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8419 	}
8420 
8421 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8422 	    tg3_flag(tp, PCIX_MODE)) {
8423 		val = tr32(TG3PCI_PCISTATE);
8424 		val |= PCISTATE_RETRY_SAME_DMA;
8425 		tw32(TG3PCI_PCISTATE, val);
8426 	}
8427 
8428 	if (tg3_flag(tp, ENABLE_APE)) {
8429 		/* Allow reads and writes to the
8430 		 * APE register and memory space.
8431 		 */
8432 		val = tr32(TG3PCI_PCISTATE);
8433 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8434 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8435 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8436 		tw32(TG3PCI_PCISTATE, val);
8437 	}
8438 
8439 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8440 		/* Enable some hw fixes.  */
8441 		val = tr32(TG3PCI_MSI_DATA);
8442 		val |= (1 << 26) | (1 << 28) | (1 << 29);
8443 		tw32(TG3PCI_MSI_DATA, val);
8444 	}
8445 
8446 	/* Descriptor ring init may make accesses to the
8447 	 * NIC SRAM area to setup the TX descriptors, so we
8448 	 * can only do this after the hardware has been
8449 	 * successfully reset.
8450 	 */
8451 	err = tg3_init_rings(tp);
8452 	if (err)
8453 		return err;
8454 
8455 	if (tg3_flag(tp, 57765_PLUS)) {
8456 		val = tr32(TG3PCI_DMA_RW_CTRL) &
8457 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8458 		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8459 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8460 		if (!tg3_flag(tp, 57765_CLASS) &&
8461 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8462 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
8463 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8464 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8465 		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8466 		/* This value is determined during the probe time DMA
8467 		 * engine test, tg3_test_dma.
8468 		 */
8469 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8470 	}
8471 
8472 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8473 			  GRC_MODE_4X_NIC_SEND_RINGS |
8474 			  GRC_MODE_NO_TX_PHDR_CSUM |
8475 			  GRC_MODE_NO_RX_PHDR_CSUM);
8476 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8477 
8478 	/* Pseudo-header checksum is done by hardware logic and not
8479 	 * the offload processers, so make the chip do the pseudo-
8480 	 * header checksums on receive.  For transmit it is more
8481 	 * convenient to do the pseudo-header checksum in software
8482 	 * as Linux does that on transmit for us in all cases.
8483 	 */
8484 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8485 
8486 	tw32(GRC_MODE,
8487 	     tp->grc_mode |
8488 	     (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8489 
8490 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
8491 	val = tr32(GRC_MISC_CFG);
8492 	val &= ~0xff;
8493 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8494 	tw32(GRC_MISC_CFG, val);
8495 
8496 	/* Initialize MBUF/DESC pool. */
8497 	if (tg3_flag(tp, 5750_PLUS)) {
8498 		/* Do nothing.  */
8499 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8500 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8501 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8502 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8503 		else
8504 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8505 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8506 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8507 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
8508 		int fw_len;
8509 
8510 		fw_len = tp->fw_len;
8511 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8512 		tw32(BUFMGR_MB_POOL_ADDR,
8513 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8514 		tw32(BUFMGR_MB_POOL_SIZE,
8515 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8516 	}
8517 
8518 	if (tp->dev->mtu <= ETH_DATA_LEN) {
8519 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
8520 		     tp->bufmgr_config.mbuf_read_dma_low_water);
8521 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
8522 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
8523 		tw32(BUFMGR_MB_HIGH_WATER,
8524 		     tp->bufmgr_config.mbuf_high_water);
8525 	} else {
8526 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
8527 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8528 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
8529 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8530 		tw32(BUFMGR_MB_HIGH_WATER,
8531 		     tp->bufmgr_config.mbuf_high_water_jumbo);
8532 	}
8533 	tw32(BUFMGR_DMA_LOW_WATER,
8534 	     tp->bufmgr_config.dma_low_water);
8535 	tw32(BUFMGR_DMA_HIGH_WATER,
8536 	     tp->bufmgr_config.dma_high_water);
8537 
8538 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8539 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8540 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8541 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8542 	    tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8543 	    tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8544 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8545 	tw32(BUFMGR_MODE, val);
8546 	for (i = 0; i < 2000; i++) {
8547 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8548 			break;
8549 		udelay(10);
8550 	}
8551 	if (i >= 2000) {
8552 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8553 		return -ENODEV;
8554 	}
8555 
8556 	if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8557 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8558 
8559 	tg3_setup_rxbd_thresholds(tp);
8560 
8561 	/* Initialize TG3_BDINFO's at:
8562 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
8563 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
8564 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
8565 	 *
8566 	 * like so:
8567 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
8568 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
8569 	 *                              ring attribute flags
8570 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
8571 	 *
8572 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8573 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8574 	 *
8575 	 * The size of each ring is fixed in the firmware, but the location is
8576 	 * configurable.
8577 	 */
8578 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8579 	     ((u64) tpr->rx_std_mapping >> 32));
8580 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8581 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
8582 	if (!tg3_flag(tp, 5717_PLUS))
8583 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8584 		     NIC_SRAM_RX_BUFFER_DESC);
8585 
8586 	/* Disable the mini ring */
8587 	if (!tg3_flag(tp, 5705_PLUS))
8588 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8589 		     BDINFO_FLAGS_DISABLED);
8590 
8591 	/* Program the jumbo buffer descriptor ring control
8592 	 * blocks on those devices that have them.
8593 	 */
8594 	if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8595 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8596 
8597 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8598 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8599 			     ((u64) tpr->rx_jmb_mapping >> 32));
8600 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8601 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8602 			val = TG3_RX_JMB_RING_SIZE(tp) <<
8603 			      BDINFO_FLAGS_MAXLEN_SHIFT;
8604 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8605 			     val | BDINFO_FLAGS_USE_EXT_RECV);
8606 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8607 			    tg3_flag(tp, 57765_CLASS))
8608 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8609 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8610 		} else {
8611 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8612 			     BDINFO_FLAGS_DISABLED);
8613 		}
8614 
8615 		if (tg3_flag(tp, 57765_PLUS)) {
8616 			val = TG3_RX_STD_RING_SIZE(tp);
8617 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8618 			val |= (TG3_RX_STD_DMA_SZ << 2);
8619 		} else
8620 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8621 	} else
8622 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8623 
8624 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8625 
8626 	tpr->rx_std_prod_idx = tp->rx_pending;
8627 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8628 
8629 	tpr->rx_jmb_prod_idx =
8630 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8631 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8632 
8633 	tg3_rings_reset(tp);
8634 
8635 	/* Initialize MAC address and backoff seed. */
8636 	__tg3_set_mac_addr(tp, 0);
8637 
8638 	/* MTU + ethernet header + FCS + optional VLAN tag */
8639 	tw32(MAC_RX_MTU_SIZE,
8640 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8641 
8642 	/* The slot time is changed by tg3_setup_phy if we
8643 	 * run at gigabit with half duplex.
8644 	 */
8645 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8646 	      (6 << TX_LENGTHS_IPG_SHIFT) |
8647 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8648 
8649 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8650 		val |= tr32(MAC_TX_LENGTHS) &
8651 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
8652 			TX_LENGTHS_CNT_DWN_VAL_MSK);
8653 
8654 	tw32(MAC_TX_LENGTHS, val);
8655 
8656 	/* Receive rules. */
8657 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8658 	tw32(RCVLPC_CONFIG, 0x0181);
8659 
8660 	/* Calculate RDMAC_MODE setting early, we need it to determine
8661 	 * the RCVLPC_STATE_ENABLE mask.
8662 	 */
8663 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8664 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8665 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8666 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8667 		      RDMAC_MODE_LNGREAD_ENAB);
8668 
8669 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8670 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8671 
8672 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8673 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8674 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8675 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8676 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8677 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8678 
8679 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8680 	    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8681 		if (tg3_flag(tp, TSO_CAPABLE) &&
8682 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8683 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8684 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8685 			   !tg3_flag(tp, IS_5788)) {
8686 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8687 		}
8688 	}
8689 
8690 	if (tg3_flag(tp, PCI_EXPRESS))
8691 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8692 
8693 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
8694 		rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
8695 
8696 	if (tg3_flag(tp, HW_TSO_1) ||
8697 	    tg3_flag(tp, HW_TSO_2) ||
8698 	    tg3_flag(tp, HW_TSO_3))
8699 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8700 
8701 	if (tg3_flag(tp, 57765_PLUS) ||
8702 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8703 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8704 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8705 
8706 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8707 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8708 
8709 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8710 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8711 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8712 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8713 	    tg3_flag(tp, 57765_PLUS)) {
8714 		val = tr32(TG3_RDMA_RSRVCTRL_REG);
8715 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8716 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8717 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8718 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8719 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8720 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8721 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8722 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8723 		}
8724 		tw32(TG3_RDMA_RSRVCTRL_REG,
8725 		     val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8726 	}
8727 
8728 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8729 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8730 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8731 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8732 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8733 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8734 	}
8735 
8736 	/* Receive/send statistics. */
8737 	if (tg3_flag(tp, 5750_PLUS)) {
8738 		val = tr32(RCVLPC_STATS_ENABLE);
8739 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
8740 		tw32(RCVLPC_STATS_ENABLE, val);
8741 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8742 		   tg3_flag(tp, TSO_CAPABLE)) {
8743 		val = tr32(RCVLPC_STATS_ENABLE);
8744 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8745 		tw32(RCVLPC_STATS_ENABLE, val);
8746 	} else {
8747 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8748 	}
8749 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8750 	tw32(SNDDATAI_STATSENAB, 0xffffff);
8751 	tw32(SNDDATAI_STATSCTRL,
8752 	     (SNDDATAI_SCTRL_ENABLE |
8753 	      SNDDATAI_SCTRL_FASTUPD));
8754 
8755 	/* Setup host coalescing engine. */
8756 	tw32(HOSTCC_MODE, 0);
8757 	for (i = 0; i < 2000; i++) {
8758 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8759 			break;
8760 		udelay(10);
8761 	}
8762 
8763 	__tg3_set_coalesce(tp, &tp->coal);
8764 
8765 	if (!tg3_flag(tp, 5705_PLUS)) {
8766 		/* Status/statistics block address.  See tg3_timer,
8767 		 * the tg3_periodic_fetch_stats call there, and
8768 		 * tg3_get_stats to see how this works for 5705/5750 chips.
8769 		 */
8770 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8771 		     ((u64) tp->stats_mapping >> 32));
8772 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8773 		     ((u64) tp->stats_mapping & 0xffffffff));
8774 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8775 
8776 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8777 
8778 		/* Clear statistics and status block memory areas */
8779 		for (i = NIC_SRAM_STATS_BLK;
8780 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8781 		     i += sizeof(u32)) {
8782 			tg3_write_mem(tp, i, 0);
8783 			udelay(40);
8784 		}
8785 	}
8786 
8787 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8788 
8789 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8790 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8791 	if (!tg3_flag(tp, 5705_PLUS))
8792 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8793 
8794 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8795 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8796 		/* reset to prevent losing 1st rx packet intermittently */
8797 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8798 		udelay(10);
8799 	}
8800 
8801 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8802 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8803 			MAC_MODE_FHDE_ENABLE;
8804 	if (tg3_flag(tp, ENABLE_APE))
8805 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8806 	if (!tg3_flag(tp, 5705_PLUS) &&
8807 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8808 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8809 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8810 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8811 	udelay(40);
8812 
8813 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8814 	 * If TG3_FLAG_IS_NIC is zero, we should read the
8815 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
8816 	 * whether used as inputs or outputs, are set by boot code after
8817 	 * reset.
8818 	 */
8819 	if (!tg3_flag(tp, IS_NIC)) {
8820 		u32 gpio_mask;
8821 
8822 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8823 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8824 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8825 
8826 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8827 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8828 				     GRC_LCLCTRL_GPIO_OUTPUT3;
8829 
8830 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8831 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8832 
8833 		tp->grc_local_ctrl &= ~gpio_mask;
8834 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8835 
8836 		/* GPIO1 must be driven high for eeprom write protect */
8837 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
8838 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8839 					       GRC_LCLCTRL_GPIO_OUTPUT1);
8840 	}
8841 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8842 	udelay(100);
8843 
8844 	if (tg3_flag(tp, USING_MSIX)) {
8845 		val = tr32(MSGINT_MODE);
8846 		val |= MSGINT_MODE_ENABLE;
8847 		if (tp->irq_cnt > 1)
8848 			val |= MSGINT_MODE_MULTIVEC_EN;
8849 		if (!tg3_flag(tp, 1SHOT_MSI))
8850 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8851 		tw32(MSGINT_MODE, val);
8852 	}
8853 
8854 	if (!tg3_flag(tp, 5705_PLUS)) {
8855 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8856 		udelay(40);
8857 	}
8858 
8859 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8860 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8861 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8862 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8863 	       WDMAC_MODE_LNGREAD_ENAB);
8864 
8865 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8866 	    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8867 		if (tg3_flag(tp, TSO_CAPABLE) &&
8868 		    (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8869 		     tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8870 			/* nothing */
8871 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8872 			   !tg3_flag(tp, IS_5788)) {
8873 			val |= WDMAC_MODE_RX_ACCEL;
8874 		}
8875 	}
8876 
8877 	/* Enable host coalescing bug fix */
8878 	if (tg3_flag(tp, 5755_PLUS))
8879 		val |= WDMAC_MODE_STATUS_TAG_FIX;
8880 
8881 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8882 		val |= WDMAC_MODE_BURST_ALL_DATA;
8883 
8884 	tw32_f(WDMAC_MODE, val);
8885 	udelay(40);
8886 
8887 	if (tg3_flag(tp, PCIX_MODE)) {
8888 		u16 pcix_cmd;
8889 
8890 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8891 				     &pcix_cmd);
8892 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8893 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8894 			pcix_cmd |= PCI_X_CMD_READ_2K;
8895 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8896 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8897 			pcix_cmd |= PCI_X_CMD_READ_2K;
8898 		}
8899 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8900 				      pcix_cmd);
8901 	}
8902 
8903 	tw32_f(RDMAC_MODE, rdmac_mode);
8904 	udelay(40);
8905 
8906 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8907 	if (!tg3_flag(tp, 5705_PLUS))
8908 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8909 
8910 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8911 		tw32(SNDDATAC_MODE,
8912 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8913 	else
8914 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8915 
8916 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8917 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8918 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8919 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
8920 		val |= RCVDBDI_MODE_LRG_RING_SZ;
8921 	tw32(RCVDBDI_MODE, val);
8922 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8923 	if (tg3_flag(tp, HW_TSO_1) ||
8924 	    tg3_flag(tp, HW_TSO_2) ||
8925 	    tg3_flag(tp, HW_TSO_3))
8926 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8927 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8928 	if (tg3_flag(tp, ENABLE_TSS))
8929 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
8930 	tw32(SNDBDI_MODE, val);
8931 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8932 
8933 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8934 		err = tg3_load_5701_a0_firmware_fix(tp);
8935 		if (err)
8936 			return err;
8937 	}
8938 
8939 	if (tg3_flag(tp, TSO_CAPABLE)) {
8940 		err = tg3_load_tso_firmware(tp);
8941 		if (err)
8942 			return err;
8943 	}
8944 
8945 	tp->tx_mode = TX_MODE_ENABLE;
8946 
8947 	if (tg3_flag(tp, 5755_PLUS) ||
8948 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8949 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8950 
8951 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8952 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8953 		tp->tx_mode &= ~val;
8954 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8955 	}
8956 
8957 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8958 	udelay(100);
8959 
8960 	if (tg3_flag(tp, ENABLE_RSS)) {
8961 		tg3_rss_write_indir_tbl(tp);
8962 
8963 		/* Setup the "secret" hash key. */
8964 		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8965 		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8966 		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8967 		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8968 		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8969 		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8970 		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8971 		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8972 		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8973 		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8974 	}
8975 
8976 	tp->rx_mode = RX_MODE_ENABLE;
8977 	if (tg3_flag(tp, 5755_PLUS))
8978 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8979 
8980 	if (tg3_flag(tp, ENABLE_RSS))
8981 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
8982 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
8983 			       RX_MODE_RSS_IPV6_HASH_EN |
8984 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
8985 			       RX_MODE_RSS_IPV4_HASH_EN |
8986 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
8987 
8988 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8989 	udelay(10);
8990 
8991 	tw32(MAC_LED_CTRL, tp->led_ctrl);
8992 
8993 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8994 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8995 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8996 		udelay(10);
8997 	}
8998 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8999 	udelay(10);
9000 
9001 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9002 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9003 			!(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9004 			/* Set drive transmission level to 1.2V  */
9005 			/* only if the signal pre-emphasis bit is not set  */
9006 			val = tr32(MAC_SERDES_CFG);
9007 			val &= 0xfffff000;
9008 			val |= 0x880;
9009 			tw32(MAC_SERDES_CFG, val);
9010 		}
9011 		if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9012 			tw32(MAC_SERDES_CFG, 0x616000);
9013 	}
9014 
9015 	/* Prevent chip from dropping frames when flow control
9016 	 * is enabled.
9017 	 */
9018 	if (tg3_flag(tp, 57765_CLASS))
9019 		val = 1;
9020 	else
9021 		val = 2;
9022 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9023 
9024 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9025 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9026 		/* Use hardware link auto-negotiation */
9027 		tg3_flag_set(tp, HW_AUTONEG);
9028 	}
9029 
9030 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9031 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9032 		u32 tmp;
9033 
9034 		tmp = tr32(SERDES_RX_CTRL);
9035 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9036 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9037 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9038 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9039 	}
9040 
9041 	if (!tg3_flag(tp, USE_PHYLIB)) {
9042 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9043 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9044 			tp->link_config.speed = tp->link_config.orig_speed;
9045 			tp->link_config.duplex = tp->link_config.orig_duplex;
9046 			tp->link_config.autoneg = tp->link_config.orig_autoneg;
9047 		}
9048 
9049 		err = tg3_setup_phy(tp, 0);
9050 		if (err)
9051 			return err;
9052 
9053 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9054 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9055 			u32 tmp;
9056 
9057 			/* Clear CRC stats. */
9058 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9059 				tg3_writephy(tp, MII_TG3_TEST1,
9060 					     tmp | MII_TG3_TEST1_CRC_EN);
9061 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9062 			}
9063 		}
9064 	}
9065 
9066 	__tg3_set_rx_mode(tp->dev);
9067 
9068 	/* Initialize receive rules. */
9069 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9070 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9071 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9072 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9073 
9074 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9075 		limit = 8;
9076 	else
9077 		limit = 16;
9078 	if (tg3_flag(tp, ENABLE_ASF))
9079 		limit -= 4;
9080 	switch (limit) {
9081 	case 16:
9082 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9083 	case 15:
9084 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9085 	case 14:
9086 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9087 	case 13:
9088 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9089 	case 12:
9090 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9091 	case 11:
9092 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9093 	case 10:
9094 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9095 	case 9:
9096 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9097 	case 8:
9098 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9099 	case 7:
9100 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9101 	case 6:
9102 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9103 	case 5:
9104 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9105 	case 4:
9106 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9107 	case 3:
9108 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9109 	case 2:
9110 	case 1:
9111 
9112 	default:
9113 		break;
9114 	}
9115 
9116 	if (tg3_flag(tp, ENABLE_APE))
9117 		/* Write our heartbeat update interval to APE. */
9118 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9119 				APE_HOST_HEARTBEAT_INT_DISABLE);
9120 
9121 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9122 
9123 	return 0;
9124 }
9125 
9126 /* Called at device open time to get the chip ready for
9127  * packet processing.  Invoked with tp->lock held.
9128  */
9129 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9130 {
9131 	tg3_switch_clocks(tp);
9132 
9133 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9134 
9135 	return tg3_reset_hw(tp, reset_phy);
9136 }
9137 
9138 #define TG3_STAT_ADD32(PSTAT, REG) \
9139 do {	u32 __val = tr32(REG); \
9140 	(PSTAT)->low += __val; \
9141 	if ((PSTAT)->low < __val) \
9142 		(PSTAT)->high += 1; \
9143 } while (0)
9144 
9145 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9146 {
9147 	struct tg3_hw_stats *sp = tp->hw_stats;
9148 
9149 	if (!netif_carrier_ok(tp->dev))
9150 		return;
9151 
9152 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9153 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9154 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9155 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9156 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9157 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9158 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9159 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9160 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9161 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9162 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9163 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9164 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9165 
9166 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9167 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9168 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9169 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9170 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9171 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9172 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9173 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9174 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9175 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9176 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9177 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9178 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9179 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9180 
9181 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9182 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9183 	    tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9184 	    tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9185 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9186 	} else {
9187 		u32 val = tr32(HOSTCC_FLOW_ATTN);
9188 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9189 		if (val) {
9190 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9191 			sp->rx_discards.low += val;
9192 			if (sp->rx_discards.low < val)
9193 				sp->rx_discards.high += 1;
9194 		}
9195 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9196 	}
9197 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9198 }
9199 
9200 static void tg3_chk_missed_msi(struct tg3 *tp)
9201 {
9202 	u32 i;
9203 
9204 	for (i = 0; i < tp->irq_cnt; i++) {
9205 		struct tg3_napi *tnapi = &tp->napi[i];
9206 
9207 		if (tg3_has_work(tnapi)) {
9208 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9209 			    tnapi->last_tx_cons == tnapi->tx_cons) {
9210 				if (tnapi->chk_msi_cnt < 1) {
9211 					tnapi->chk_msi_cnt++;
9212 					return;
9213 				}
9214 				tg3_msi(0, tnapi);
9215 			}
9216 		}
9217 		tnapi->chk_msi_cnt = 0;
9218 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9219 		tnapi->last_tx_cons = tnapi->tx_cons;
9220 	}
9221 }
9222 
9223 static void tg3_timer(unsigned long __opaque)
9224 {
9225 	struct tg3 *tp = (struct tg3 *) __opaque;
9226 
9227 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9228 		goto restart_timer;
9229 
9230 	spin_lock(&tp->lock);
9231 
9232 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9233 	    tg3_flag(tp, 57765_CLASS))
9234 		tg3_chk_missed_msi(tp);
9235 
9236 	if (!tg3_flag(tp, TAGGED_STATUS)) {
9237 		/* All of this garbage is because when using non-tagged
9238 		 * IRQ status the mailbox/status_block protocol the chip
9239 		 * uses with the cpu is race prone.
9240 		 */
9241 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9242 			tw32(GRC_LOCAL_CTRL,
9243 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9244 		} else {
9245 			tw32(HOSTCC_MODE, tp->coalesce_mode |
9246 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9247 		}
9248 
9249 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9250 			spin_unlock(&tp->lock);
9251 			tg3_reset_task_schedule(tp);
9252 			goto restart_timer;
9253 		}
9254 	}
9255 
9256 	/* This part only runs once per second. */
9257 	if (!--tp->timer_counter) {
9258 		if (tg3_flag(tp, 5705_PLUS))
9259 			tg3_periodic_fetch_stats(tp);
9260 
9261 		if (tp->setlpicnt && !--tp->setlpicnt)
9262 			tg3_phy_eee_enable(tp);
9263 
9264 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
9265 			u32 mac_stat;
9266 			int phy_event;
9267 
9268 			mac_stat = tr32(MAC_STATUS);
9269 
9270 			phy_event = 0;
9271 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9272 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9273 					phy_event = 1;
9274 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9275 				phy_event = 1;
9276 
9277 			if (phy_event)
9278 				tg3_setup_phy(tp, 0);
9279 		} else if (tg3_flag(tp, POLL_SERDES)) {
9280 			u32 mac_stat = tr32(MAC_STATUS);
9281 			int need_setup = 0;
9282 
9283 			if (netif_carrier_ok(tp->dev) &&
9284 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9285 				need_setup = 1;
9286 			}
9287 			if (!netif_carrier_ok(tp->dev) &&
9288 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
9289 					 MAC_STATUS_SIGNAL_DET))) {
9290 				need_setup = 1;
9291 			}
9292 			if (need_setup) {
9293 				if (!tp->serdes_counter) {
9294 					tw32_f(MAC_MODE,
9295 					     (tp->mac_mode &
9296 					      ~MAC_MODE_PORT_MODE_MASK));
9297 					udelay(40);
9298 					tw32_f(MAC_MODE, tp->mac_mode);
9299 					udelay(40);
9300 				}
9301 				tg3_setup_phy(tp, 0);
9302 			}
9303 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9304 			   tg3_flag(tp, 5780_CLASS)) {
9305 			tg3_serdes_parallel_detect(tp);
9306 		}
9307 
9308 		tp->timer_counter = tp->timer_multiplier;
9309 	}
9310 
9311 	/* Heartbeat is only sent once every 2 seconds.
9312 	 *
9313 	 * The heartbeat is to tell the ASF firmware that the host
9314 	 * driver is still alive.  In the event that the OS crashes,
9315 	 * ASF needs to reset the hardware to free up the FIFO space
9316 	 * that may be filled with rx packets destined for the host.
9317 	 * If the FIFO is full, ASF will no longer function properly.
9318 	 *
9319 	 * Unintended resets have been reported on real time kernels
9320 	 * where the timer doesn't run on time.  Netpoll will also have
9321 	 * same problem.
9322 	 *
9323 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9324 	 * to check the ring condition when the heartbeat is expiring
9325 	 * before doing the reset.  This will prevent most unintended
9326 	 * resets.
9327 	 */
9328 	if (!--tp->asf_counter) {
9329 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9330 			tg3_wait_for_event_ack(tp);
9331 
9332 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9333 				      FWCMD_NICDRV_ALIVE3);
9334 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9335 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9336 				      TG3_FW_UPDATE_TIMEOUT_SEC);
9337 
9338 			tg3_generate_fw_event(tp);
9339 		}
9340 		tp->asf_counter = tp->asf_multiplier;
9341 	}
9342 
9343 	spin_unlock(&tp->lock);
9344 
9345 restart_timer:
9346 	tp->timer.expires = jiffies + tp->timer_offset;
9347 	add_timer(&tp->timer);
9348 }
9349 
9350 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9351 {
9352 	irq_handler_t fn;
9353 	unsigned long flags;
9354 	char *name;
9355 	struct tg3_napi *tnapi = &tp->napi[irq_num];
9356 
9357 	if (tp->irq_cnt == 1)
9358 		name = tp->dev->name;
9359 	else {
9360 		name = &tnapi->irq_lbl[0];
9361 		snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9362 		name[IFNAMSIZ-1] = 0;
9363 	}
9364 
9365 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9366 		fn = tg3_msi;
9367 		if (tg3_flag(tp, 1SHOT_MSI))
9368 			fn = tg3_msi_1shot;
9369 		flags = 0;
9370 	} else {
9371 		fn = tg3_interrupt;
9372 		if (tg3_flag(tp, TAGGED_STATUS))
9373 			fn = tg3_interrupt_tagged;
9374 		flags = IRQF_SHARED;
9375 	}
9376 
9377 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9378 }
9379 
9380 static int tg3_test_interrupt(struct tg3 *tp)
9381 {
9382 	struct tg3_napi *tnapi = &tp->napi[0];
9383 	struct net_device *dev = tp->dev;
9384 	int err, i, intr_ok = 0;
9385 	u32 val;
9386 
9387 	if (!netif_running(dev))
9388 		return -ENODEV;
9389 
9390 	tg3_disable_ints(tp);
9391 
9392 	free_irq(tnapi->irq_vec, tnapi);
9393 
9394 	/*
9395 	 * Turn off MSI one shot mode.  Otherwise this test has no
9396 	 * observable way to know whether the interrupt was delivered.
9397 	 */
9398 	if (tg3_flag(tp, 57765_PLUS)) {
9399 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9400 		tw32(MSGINT_MODE, val);
9401 	}
9402 
9403 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
9404 			  IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9405 	if (err)
9406 		return err;
9407 
9408 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9409 	tg3_enable_ints(tp);
9410 
9411 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9412 	       tnapi->coal_now);
9413 
9414 	for (i = 0; i < 5; i++) {
9415 		u32 int_mbox, misc_host_ctrl;
9416 
9417 		int_mbox = tr32_mailbox(tnapi->int_mbox);
9418 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9419 
9420 		if ((int_mbox != 0) ||
9421 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9422 			intr_ok = 1;
9423 			break;
9424 		}
9425 
9426 		if (tg3_flag(tp, 57765_PLUS) &&
9427 		    tnapi->hw_status->status_tag != tnapi->last_tag)
9428 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9429 
9430 		msleep(10);
9431 	}
9432 
9433 	tg3_disable_ints(tp);
9434 
9435 	free_irq(tnapi->irq_vec, tnapi);
9436 
9437 	err = tg3_request_irq(tp, 0);
9438 
9439 	if (err)
9440 		return err;
9441 
9442 	if (intr_ok) {
9443 		/* Reenable MSI one shot mode. */
9444 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9445 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9446 			tw32(MSGINT_MODE, val);
9447 		}
9448 		return 0;
9449 	}
9450 
9451 	return -EIO;
9452 }
9453 
9454 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9455  * successfully restored
9456  */
9457 static int tg3_test_msi(struct tg3 *tp)
9458 {
9459 	int err;
9460 	u16 pci_cmd;
9461 
9462 	if (!tg3_flag(tp, USING_MSI))
9463 		return 0;
9464 
9465 	/* Turn off SERR reporting in case MSI terminates with Master
9466 	 * Abort.
9467 	 */
9468 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9469 	pci_write_config_word(tp->pdev, PCI_COMMAND,
9470 			      pci_cmd & ~PCI_COMMAND_SERR);
9471 
9472 	err = tg3_test_interrupt(tp);
9473 
9474 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9475 
9476 	if (!err)
9477 		return 0;
9478 
9479 	/* other failures */
9480 	if (err != -EIO)
9481 		return err;
9482 
9483 	/* MSI test failed, go back to INTx mode */
9484 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9485 		    "to INTx mode. Please report this failure to the PCI "
9486 		    "maintainer and include system chipset information\n");
9487 
9488 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9489 
9490 	pci_disable_msi(tp->pdev);
9491 
9492 	tg3_flag_clear(tp, USING_MSI);
9493 	tp->napi[0].irq_vec = tp->pdev->irq;
9494 
9495 	err = tg3_request_irq(tp, 0);
9496 	if (err)
9497 		return err;
9498 
9499 	/* Need to reset the chip because the MSI cycle may have terminated
9500 	 * with Master Abort.
9501 	 */
9502 	tg3_full_lock(tp, 1);
9503 
9504 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9505 	err = tg3_init_hw(tp, 1);
9506 
9507 	tg3_full_unlock(tp);
9508 
9509 	if (err)
9510 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9511 
9512 	return err;
9513 }
9514 
9515 static int tg3_request_firmware(struct tg3 *tp)
9516 {
9517 	const __be32 *fw_data;
9518 
9519 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9520 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9521 			   tp->fw_needed);
9522 		return -ENOENT;
9523 	}
9524 
9525 	fw_data = (void *)tp->fw->data;
9526 
9527 	/* Firmware blob starts with version numbers, followed by
9528 	 * start address and _full_ length including BSS sections
9529 	 * (which must be longer than the actual data, of course
9530 	 */
9531 
9532 	tp->fw_len = be32_to_cpu(fw_data[2]);	/* includes bss */
9533 	if (tp->fw_len < (tp->fw->size - 12)) {
9534 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9535 			   tp->fw_len, tp->fw_needed);
9536 		release_firmware(tp->fw);
9537 		tp->fw = NULL;
9538 		return -EINVAL;
9539 	}
9540 
9541 	/* We no longer need firmware; we have it. */
9542 	tp->fw_needed = NULL;
9543 	return 0;
9544 }
9545 
9546 static bool tg3_enable_msix(struct tg3 *tp)
9547 {
9548 	int i, rc;
9549 	struct msix_entry msix_ent[tp->irq_max];
9550 
9551 	tp->irq_cnt = num_online_cpus();
9552 	if (tp->irq_cnt > 1) {
9553 		/* We want as many rx rings enabled as there are cpus.
9554 		 * In multiqueue MSI-X mode, the first MSI-X vector
9555 		 * only deals with link interrupts, etc, so we add
9556 		 * one to the number of vectors we are requesting.
9557 		 */
9558 		tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9559 	}
9560 
9561 	for (i = 0; i < tp->irq_max; i++) {
9562 		msix_ent[i].entry  = i;
9563 		msix_ent[i].vector = 0;
9564 	}
9565 
9566 	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9567 	if (rc < 0) {
9568 		return false;
9569 	} else if (rc != 0) {
9570 		if (pci_enable_msix(tp->pdev, msix_ent, rc))
9571 			return false;
9572 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9573 			      tp->irq_cnt, rc);
9574 		tp->irq_cnt = rc;
9575 	}
9576 
9577 	for (i = 0; i < tp->irq_max; i++)
9578 		tp->napi[i].irq_vec = msix_ent[i].vector;
9579 
9580 	netif_set_real_num_tx_queues(tp->dev, 1);
9581 	rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9582 	if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9583 		pci_disable_msix(tp->pdev);
9584 		return false;
9585 	}
9586 
9587 	if (tp->irq_cnt > 1) {
9588 		tg3_flag_set(tp, ENABLE_RSS);
9589 
9590 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9591 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9592 			tg3_flag_set(tp, ENABLE_TSS);
9593 			netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9594 		}
9595 	}
9596 
9597 	return true;
9598 }
9599 
9600 static void tg3_ints_init(struct tg3 *tp)
9601 {
9602 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9603 	    !tg3_flag(tp, TAGGED_STATUS)) {
9604 		/* All MSI supporting chips should support tagged
9605 		 * status.  Assert that this is the case.
9606 		 */
9607 		netdev_warn(tp->dev,
9608 			    "MSI without TAGGED_STATUS? Not using MSI\n");
9609 		goto defcfg;
9610 	}
9611 
9612 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9613 		tg3_flag_set(tp, USING_MSIX);
9614 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9615 		tg3_flag_set(tp, USING_MSI);
9616 
9617 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9618 		u32 msi_mode = tr32(MSGINT_MODE);
9619 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9620 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9621 		if (!tg3_flag(tp, 1SHOT_MSI))
9622 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9623 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9624 	}
9625 defcfg:
9626 	if (!tg3_flag(tp, USING_MSIX)) {
9627 		tp->irq_cnt = 1;
9628 		tp->napi[0].irq_vec = tp->pdev->irq;
9629 		netif_set_real_num_tx_queues(tp->dev, 1);
9630 		netif_set_real_num_rx_queues(tp->dev, 1);
9631 	}
9632 }
9633 
9634 static void tg3_ints_fini(struct tg3 *tp)
9635 {
9636 	if (tg3_flag(tp, USING_MSIX))
9637 		pci_disable_msix(tp->pdev);
9638 	else if (tg3_flag(tp, USING_MSI))
9639 		pci_disable_msi(tp->pdev);
9640 	tg3_flag_clear(tp, USING_MSI);
9641 	tg3_flag_clear(tp, USING_MSIX);
9642 	tg3_flag_clear(tp, ENABLE_RSS);
9643 	tg3_flag_clear(tp, ENABLE_TSS);
9644 }
9645 
9646 static int tg3_open(struct net_device *dev)
9647 {
9648 	struct tg3 *tp = netdev_priv(dev);
9649 	int i, err;
9650 
9651 	if (tp->fw_needed) {
9652 		err = tg3_request_firmware(tp);
9653 		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9654 			if (err)
9655 				return err;
9656 		} else if (err) {
9657 			netdev_warn(tp->dev, "TSO capability disabled\n");
9658 			tg3_flag_clear(tp, TSO_CAPABLE);
9659 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
9660 			netdev_notice(tp->dev, "TSO capability restored\n");
9661 			tg3_flag_set(tp, TSO_CAPABLE);
9662 		}
9663 	}
9664 
9665 	netif_carrier_off(tp->dev);
9666 
9667 	err = tg3_power_up(tp);
9668 	if (err)
9669 		return err;
9670 
9671 	tg3_full_lock(tp, 0);
9672 
9673 	tg3_disable_ints(tp);
9674 	tg3_flag_clear(tp, INIT_COMPLETE);
9675 
9676 	tg3_full_unlock(tp);
9677 
9678 	/*
9679 	 * Setup interrupts first so we know how
9680 	 * many NAPI resources to allocate
9681 	 */
9682 	tg3_ints_init(tp);
9683 
9684 	tg3_rss_check_indir_tbl(tp);
9685 
9686 	/* The placement of this call is tied
9687 	 * to the setup and use of Host TX descriptors.
9688 	 */
9689 	err = tg3_alloc_consistent(tp);
9690 	if (err)
9691 		goto err_out1;
9692 
9693 	tg3_napi_init(tp);
9694 
9695 	tg3_napi_enable(tp);
9696 
9697 	for (i = 0; i < tp->irq_cnt; i++) {
9698 		struct tg3_napi *tnapi = &tp->napi[i];
9699 		err = tg3_request_irq(tp, i);
9700 		if (err) {
9701 			for (i--; i >= 0; i--) {
9702 				tnapi = &tp->napi[i];
9703 				free_irq(tnapi->irq_vec, tnapi);
9704 			}
9705 			goto err_out2;
9706 		}
9707 	}
9708 
9709 	tg3_full_lock(tp, 0);
9710 
9711 	err = tg3_init_hw(tp, 1);
9712 	if (err) {
9713 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9714 		tg3_free_rings(tp);
9715 	} else {
9716 		if (tg3_flag(tp, TAGGED_STATUS) &&
9717 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9718 		    !tg3_flag(tp, 57765_CLASS))
9719 			tp->timer_offset = HZ;
9720 		else
9721 			tp->timer_offset = HZ / 10;
9722 
9723 		BUG_ON(tp->timer_offset > HZ);
9724 		tp->timer_counter = tp->timer_multiplier =
9725 			(HZ / tp->timer_offset);
9726 		tp->asf_counter = tp->asf_multiplier =
9727 			((HZ / tp->timer_offset) * 2);
9728 
9729 		init_timer(&tp->timer);
9730 		tp->timer.expires = jiffies + tp->timer_offset;
9731 		tp->timer.data = (unsigned long) tp;
9732 		tp->timer.function = tg3_timer;
9733 	}
9734 
9735 	tg3_full_unlock(tp);
9736 
9737 	if (err)
9738 		goto err_out3;
9739 
9740 	if (tg3_flag(tp, USING_MSI)) {
9741 		err = tg3_test_msi(tp);
9742 
9743 		if (err) {
9744 			tg3_full_lock(tp, 0);
9745 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9746 			tg3_free_rings(tp);
9747 			tg3_full_unlock(tp);
9748 
9749 			goto err_out2;
9750 		}
9751 
9752 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9753 			u32 val = tr32(PCIE_TRANSACTION_CFG);
9754 
9755 			tw32(PCIE_TRANSACTION_CFG,
9756 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
9757 		}
9758 	}
9759 
9760 	tg3_phy_start(tp);
9761 
9762 	tg3_full_lock(tp, 0);
9763 
9764 	add_timer(&tp->timer);
9765 	tg3_flag_set(tp, INIT_COMPLETE);
9766 	tg3_enable_ints(tp);
9767 
9768 	tg3_full_unlock(tp);
9769 
9770 	netif_tx_start_all_queues(dev);
9771 
9772 	/*
9773 	 * Reset loopback feature if it was turned on while the device was down
9774 	 * make sure that it's installed properly now.
9775 	 */
9776 	if (dev->features & NETIF_F_LOOPBACK)
9777 		tg3_set_loopback(dev, dev->features);
9778 
9779 	return 0;
9780 
9781 err_out3:
9782 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
9783 		struct tg3_napi *tnapi = &tp->napi[i];
9784 		free_irq(tnapi->irq_vec, tnapi);
9785 	}
9786 
9787 err_out2:
9788 	tg3_napi_disable(tp);
9789 	tg3_napi_fini(tp);
9790 	tg3_free_consistent(tp);
9791 
9792 err_out1:
9793 	tg3_ints_fini(tp);
9794 	tg3_frob_aux_power(tp, false);
9795 	pci_set_power_state(tp->pdev, PCI_D3hot);
9796 	return err;
9797 }
9798 
9799 static int tg3_close(struct net_device *dev)
9800 {
9801 	int i;
9802 	struct tg3 *tp = netdev_priv(dev);
9803 
9804 	tg3_napi_disable(tp);
9805 	tg3_reset_task_cancel(tp);
9806 
9807 	netif_tx_stop_all_queues(dev);
9808 
9809 	del_timer_sync(&tp->timer);
9810 
9811 	tg3_phy_stop(tp);
9812 
9813 	tg3_full_lock(tp, 1);
9814 
9815 	tg3_disable_ints(tp);
9816 
9817 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9818 	tg3_free_rings(tp);
9819 	tg3_flag_clear(tp, INIT_COMPLETE);
9820 
9821 	tg3_full_unlock(tp);
9822 
9823 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
9824 		struct tg3_napi *tnapi = &tp->napi[i];
9825 		free_irq(tnapi->irq_vec, tnapi);
9826 	}
9827 
9828 	tg3_ints_fini(tp);
9829 
9830 	/* Clear stats across close / open calls */
9831 	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
9832 	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
9833 
9834 	tg3_napi_fini(tp);
9835 
9836 	tg3_free_consistent(tp);
9837 
9838 	tg3_power_down(tp);
9839 
9840 	netif_carrier_off(tp->dev);
9841 
9842 	return 0;
9843 }
9844 
9845 static inline u64 get_stat64(tg3_stat64_t *val)
9846 {
9847        return ((u64)val->high << 32) | ((u64)val->low);
9848 }
9849 
9850 static u64 calc_crc_errors(struct tg3 *tp)
9851 {
9852 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
9853 
9854 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9855 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9856 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9857 		u32 val;
9858 
9859 		spin_lock_bh(&tp->lock);
9860 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9861 			tg3_writephy(tp, MII_TG3_TEST1,
9862 				     val | MII_TG3_TEST1_CRC_EN);
9863 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9864 		} else
9865 			val = 0;
9866 		spin_unlock_bh(&tp->lock);
9867 
9868 		tp->phy_crc_errors += val;
9869 
9870 		return tp->phy_crc_errors;
9871 	}
9872 
9873 	return get_stat64(&hw_stats->rx_fcs_errors);
9874 }
9875 
9876 #define ESTAT_ADD(member) \
9877 	estats->member =	old_estats->member + \
9878 				get_stat64(&hw_stats->member)
9879 
9880 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
9881 					       struct tg3_ethtool_stats *estats)
9882 {
9883 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9884 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
9885 
9886 	if (!hw_stats)
9887 		return old_estats;
9888 
9889 	ESTAT_ADD(rx_octets);
9890 	ESTAT_ADD(rx_fragments);
9891 	ESTAT_ADD(rx_ucast_packets);
9892 	ESTAT_ADD(rx_mcast_packets);
9893 	ESTAT_ADD(rx_bcast_packets);
9894 	ESTAT_ADD(rx_fcs_errors);
9895 	ESTAT_ADD(rx_align_errors);
9896 	ESTAT_ADD(rx_xon_pause_rcvd);
9897 	ESTAT_ADD(rx_xoff_pause_rcvd);
9898 	ESTAT_ADD(rx_mac_ctrl_rcvd);
9899 	ESTAT_ADD(rx_xoff_entered);
9900 	ESTAT_ADD(rx_frame_too_long_errors);
9901 	ESTAT_ADD(rx_jabbers);
9902 	ESTAT_ADD(rx_undersize_packets);
9903 	ESTAT_ADD(rx_in_length_errors);
9904 	ESTAT_ADD(rx_out_length_errors);
9905 	ESTAT_ADD(rx_64_or_less_octet_packets);
9906 	ESTAT_ADD(rx_65_to_127_octet_packets);
9907 	ESTAT_ADD(rx_128_to_255_octet_packets);
9908 	ESTAT_ADD(rx_256_to_511_octet_packets);
9909 	ESTAT_ADD(rx_512_to_1023_octet_packets);
9910 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
9911 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
9912 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
9913 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
9914 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
9915 
9916 	ESTAT_ADD(tx_octets);
9917 	ESTAT_ADD(tx_collisions);
9918 	ESTAT_ADD(tx_xon_sent);
9919 	ESTAT_ADD(tx_xoff_sent);
9920 	ESTAT_ADD(tx_flow_control);
9921 	ESTAT_ADD(tx_mac_errors);
9922 	ESTAT_ADD(tx_single_collisions);
9923 	ESTAT_ADD(tx_mult_collisions);
9924 	ESTAT_ADD(tx_deferred);
9925 	ESTAT_ADD(tx_excessive_collisions);
9926 	ESTAT_ADD(tx_late_collisions);
9927 	ESTAT_ADD(tx_collide_2times);
9928 	ESTAT_ADD(tx_collide_3times);
9929 	ESTAT_ADD(tx_collide_4times);
9930 	ESTAT_ADD(tx_collide_5times);
9931 	ESTAT_ADD(tx_collide_6times);
9932 	ESTAT_ADD(tx_collide_7times);
9933 	ESTAT_ADD(tx_collide_8times);
9934 	ESTAT_ADD(tx_collide_9times);
9935 	ESTAT_ADD(tx_collide_10times);
9936 	ESTAT_ADD(tx_collide_11times);
9937 	ESTAT_ADD(tx_collide_12times);
9938 	ESTAT_ADD(tx_collide_13times);
9939 	ESTAT_ADD(tx_collide_14times);
9940 	ESTAT_ADD(tx_collide_15times);
9941 	ESTAT_ADD(tx_ucast_packets);
9942 	ESTAT_ADD(tx_mcast_packets);
9943 	ESTAT_ADD(tx_bcast_packets);
9944 	ESTAT_ADD(tx_carrier_sense_errors);
9945 	ESTAT_ADD(tx_discards);
9946 	ESTAT_ADD(tx_errors);
9947 
9948 	ESTAT_ADD(dma_writeq_full);
9949 	ESTAT_ADD(dma_write_prioq_full);
9950 	ESTAT_ADD(rxbds_empty);
9951 	ESTAT_ADD(rx_discards);
9952 	ESTAT_ADD(rx_errors);
9953 	ESTAT_ADD(rx_threshold_hit);
9954 
9955 	ESTAT_ADD(dma_readq_full);
9956 	ESTAT_ADD(dma_read_prioq_full);
9957 	ESTAT_ADD(tx_comp_queue_full);
9958 
9959 	ESTAT_ADD(ring_set_send_prod_index);
9960 	ESTAT_ADD(ring_status_update);
9961 	ESTAT_ADD(nic_irqs);
9962 	ESTAT_ADD(nic_avoided_irqs);
9963 	ESTAT_ADD(nic_tx_threshold_hit);
9964 
9965 	ESTAT_ADD(mbuf_lwm_thresh_hit);
9966 
9967 	return estats;
9968 }
9969 
9970 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9971 						 struct rtnl_link_stats64 *stats)
9972 {
9973 	struct tg3 *tp = netdev_priv(dev);
9974 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9975 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
9976 
9977 	if (!hw_stats)
9978 		return old_stats;
9979 
9980 	stats->rx_packets = old_stats->rx_packets +
9981 		get_stat64(&hw_stats->rx_ucast_packets) +
9982 		get_stat64(&hw_stats->rx_mcast_packets) +
9983 		get_stat64(&hw_stats->rx_bcast_packets);
9984 
9985 	stats->tx_packets = old_stats->tx_packets +
9986 		get_stat64(&hw_stats->tx_ucast_packets) +
9987 		get_stat64(&hw_stats->tx_mcast_packets) +
9988 		get_stat64(&hw_stats->tx_bcast_packets);
9989 
9990 	stats->rx_bytes = old_stats->rx_bytes +
9991 		get_stat64(&hw_stats->rx_octets);
9992 	stats->tx_bytes = old_stats->tx_bytes +
9993 		get_stat64(&hw_stats->tx_octets);
9994 
9995 	stats->rx_errors = old_stats->rx_errors +
9996 		get_stat64(&hw_stats->rx_errors);
9997 	stats->tx_errors = old_stats->tx_errors +
9998 		get_stat64(&hw_stats->tx_errors) +
9999 		get_stat64(&hw_stats->tx_mac_errors) +
10000 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
10001 		get_stat64(&hw_stats->tx_discards);
10002 
10003 	stats->multicast = old_stats->multicast +
10004 		get_stat64(&hw_stats->rx_mcast_packets);
10005 	stats->collisions = old_stats->collisions +
10006 		get_stat64(&hw_stats->tx_collisions);
10007 
10008 	stats->rx_length_errors = old_stats->rx_length_errors +
10009 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
10010 		get_stat64(&hw_stats->rx_undersize_packets);
10011 
10012 	stats->rx_over_errors = old_stats->rx_over_errors +
10013 		get_stat64(&hw_stats->rxbds_empty);
10014 	stats->rx_frame_errors = old_stats->rx_frame_errors +
10015 		get_stat64(&hw_stats->rx_align_errors);
10016 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10017 		get_stat64(&hw_stats->tx_discards);
10018 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10019 		get_stat64(&hw_stats->tx_carrier_sense_errors);
10020 
10021 	stats->rx_crc_errors = old_stats->rx_crc_errors +
10022 		calc_crc_errors(tp);
10023 
10024 	stats->rx_missed_errors = old_stats->rx_missed_errors +
10025 		get_stat64(&hw_stats->rx_discards);
10026 
10027 	stats->rx_dropped = tp->rx_dropped;
10028 	stats->tx_dropped = tp->tx_dropped;
10029 
10030 	return stats;
10031 }
10032 
10033 static inline u32 calc_crc(unsigned char *buf, int len)
10034 {
10035 	u32 reg;
10036 	u32 tmp;
10037 	int j, k;
10038 
10039 	reg = 0xffffffff;
10040 
10041 	for (j = 0; j < len; j++) {
10042 		reg ^= buf[j];
10043 
10044 		for (k = 0; k < 8; k++) {
10045 			tmp = reg & 0x01;
10046 
10047 			reg >>= 1;
10048 
10049 			if (tmp)
10050 				reg ^= 0xedb88320;
10051 		}
10052 	}
10053 
10054 	return ~reg;
10055 }
10056 
10057 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10058 {
10059 	/* accept or reject all multicast frames */
10060 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10061 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10062 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10063 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10064 }
10065 
10066 static void __tg3_set_rx_mode(struct net_device *dev)
10067 {
10068 	struct tg3 *tp = netdev_priv(dev);
10069 	u32 rx_mode;
10070 
10071 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10072 				  RX_MODE_KEEP_VLAN_TAG);
10073 
10074 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10075 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10076 	 * flag clear.
10077 	 */
10078 	if (!tg3_flag(tp, ENABLE_ASF))
10079 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10080 #endif
10081 
10082 	if (dev->flags & IFF_PROMISC) {
10083 		/* Promiscuous mode. */
10084 		rx_mode |= RX_MODE_PROMISC;
10085 	} else if (dev->flags & IFF_ALLMULTI) {
10086 		/* Accept all multicast. */
10087 		tg3_set_multi(tp, 1);
10088 	} else if (netdev_mc_empty(dev)) {
10089 		/* Reject all multicast. */
10090 		tg3_set_multi(tp, 0);
10091 	} else {
10092 		/* Accept one or more multicast(s). */
10093 		struct netdev_hw_addr *ha;
10094 		u32 mc_filter[4] = { 0, };
10095 		u32 regidx;
10096 		u32 bit;
10097 		u32 crc;
10098 
10099 		netdev_for_each_mc_addr(ha, dev) {
10100 			crc = calc_crc(ha->addr, ETH_ALEN);
10101 			bit = ~crc & 0x7f;
10102 			regidx = (bit & 0x60) >> 5;
10103 			bit &= 0x1f;
10104 			mc_filter[regidx] |= (1 << bit);
10105 		}
10106 
10107 		tw32(MAC_HASH_REG_0, mc_filter[0]);
10108 		tw32(MAC_HASH_REG_1, mc_filter[1]);
10109 		tw32(MAC_HASH_REG_2, mc_filter[2]);
10110 		tw32(MAC_HASH_REG_3, mc_filter[3]);
10111 	}
10112 
10113 	if (rx_mode != tp->rx_mode) {
10114 		tp->rx_mode = rx_mode;
10115 		tw32_f(MAC_RX_MODE, rx_mode);
10116 		udelay(10);
10117 	}
10118 }
10119 
10120 static void tg3_set_rx_mode(struct net_device *dev)
10121 {
10122 	struct tg3 *tp = netdev_priv(dev);
10123 
10124 	if (!netif_running(dev))
10125 		return;
10126 
10127 	tg3_full_lock(tp, 0);
10128 	__tg3_set_rx_mode(dev);
10129 	tg3_full_unlock(tp);
10130 }
10131 
10132 static int tg3_get_regs_len(struct net_device *dev)
10133 {
10134 	return TG3_REG_BLK_SIZE;
10135 }
10136 
10137 static void tg3_get_regs(struct net_device *dev,
10138 		struct ethtool_regs *regs, void *_p)
10139 {
10140 	struct tg3 *tp = netdev_priv(dev);
10141 
10142 	regs->version = 0;
10143 
10144 	memset(_p, 0, TG3_REG_BLK_SIZE);
10145 
10146 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10147 		return;
10148 
10149 	tg3_full_lock(tp, 0);
10150 
10151 	tg3_dump_legacy_regs(tp, (u32 *)_p);
10152 
10153 	tg3_full_unlock(tp);
10154 }
10155 
10156 static int tg3_get_eeprom_len(struct net_device *dev)
10157 {
10158 	struct tg3 *tp = netdev_priv(dev);
10159 
10160 	return tp->nvram_size;
10161 }
10162 
10163 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10164 {
10165 	struct tg3 *tp = netdev_priv(dev);
10166 	int ret;
10167 	u8  *pd;
10168 	u32 i, offset, len, b_offset, b_count;
10169 	__be32 val;
10170 
10171 	if (tg3_flag(tp, NO_NVRAM))
10172 		return -EINVAL;
10173 
10174 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10175 		return -EAGAIN;
10176 
10177 	offset = eeprom->offset;
10178 	len = eeprom->len;
10179 	eeprom->len = 0;
10180 
10181 	eeprom->magic = TG3_EEPROM_MAGIC;
10182 
10183 	if (offset & 3) {
10184 		/* adjustments to start on required 4 byte boundary */
10185 		b_offset = offset & 3;
10186 		b_count = 4 - b_offset;
10187 		if (b_count > len) {
10188 			/* i.e. offset=1 len=2 */
10189 			b_count = len;
10190 		}
10191 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10192 		if (ret)
10193 			return ret;
10194 		memcpy(data, ((char *)&val) + b_offset, b_count);
10195 		len -= b_count;
10196 		offset += b_count;
10197 		eeprom->len += b_count;
10198 	}
10199 
10200 	/* read bytes up to the last 4 byte boundary */
10201 	pd = &data[eeprom->len];
10202 	for (i = 0; i < (len - (len & 3)); i += 4) {
10203 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
10204 		if (ret) {
10205 			eeprom->len += i;
10206 			return ret;
10207 		}
10208 		memcpy(pd + i, &val, 4);
10209 	}
10210 	eeprom->len += i;
10211 
10212 	if (len & 3) {
10213 		/* read last bytes not ending on 4 byte boundary */
10214 		pd = &data[eeprom->len];
10215 		b_count = len & 3;
10216 		b_offset = offset + len - b_count;
10217 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
10218 		if (ret)
10219 			return ret;
10220 		memcpy(pd, &val, b_count);
10221 		eeprom->len += b_count;
10222 	}
10223 	return 0;
10224 }
10225 
10226 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10227 
10228 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10229 {
10230 	struct tg3 *tp = netdev_priv(dev);
10231 	int ret;
10232 	u32 offset, len, b_offset, odd_len;
10233 	u8 *buf;
10234 	__be32 start, end;
10235 
10236 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10237 		return -EAGAIN;
10238 
10239 	if (tg3_flag(tp, NO_NVRAM) ||
10240 	    eeprom->magic != TG3_EEPROM_MAGIC)
10241 		return -EINVAL;
10242 
10243 	offset = eeprom->offset;
10244 	len = eeprom->len;
10245 
10246 	if ((b_offset = (offset & 3))) {
10247 		/* adjustments to start on required 4 byte boundary */
10248 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10249 		if (ret)
10250 			return ret;
10251 		len += b_offset;
10252 		offset &= ~3;
10253 		if (len < 4)
10254 			len = 4;
10255 	}
10256 
10257 	odd_len = 0;
10258 	if (len & 3) {
10259 		/* adjustments to end on required 4 byte boundary */
10260 		odd_len = 1;
10261 		len = (len + 3) & ~3;
10262 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10263 		if (ret)
10264 			return ret;
10265 	}
10266 
10267 	buf = data;
10268 	if (b_offset || odd_len) {
10269 		buf = kmalloc(len, GFP_KERNEL);
10270 		if (!buf)
10271 			return -ENOMEM;
10272 		if (b_offset)
10273 			memcpy(buf, &start, 4);
10274 		if (odd_len)
10275 			memcpy(buf+len-4, &end, 4);
10276 		memcpy(buf + b_offset, data, eeprom->len);
10277 	}
10278 
10279 	ret = tg3_nvram_write_block(tp, offset, len, buf);
10280 
10281 	if (buf != data)
10282 		kfree(buf);
10283 
10284 	return ret;
10285 }
10286 
10287 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10288 {
10289 	struct tg3 *tp = netdev_priv(dev);
10290 
10291 	if (tg3_flag(tp, USE_PHYLIB)) {
10292 		struct phy_device *phydev;
10293 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10294 			return -EAGAIN;
10295 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10296 		return phy_ethtool_gset(phydev, cmd);
10297 	}
10298 
10299 	cmd->supported = (SUPPORTED_Autoneg);
10300 
10301 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10302 		cmd->supported |= (SUPPORTED_1000baseT_Half |
10303 				   SUPPORTED_1000baseT_Full);
10304 
10305 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10306 		cmd->supported |= (SUPPORTED_100baseT_Half |
10307 				  SUPPORTED_100baseT_Full |
10308 				  SUPPORTED_10baseT_Half |
10309 				  SUPPORTED_10baseT_Full |
10310 				  SUPPORTED_TP);
10311 		cmd->port = PORT_TP;
10312 	} else {
10313 		cmd->supported |= SUPPORTED_FIBRE;
10314 		cmd->port = PORT_FIBRE;
10315 	}
10316 
10317 	cmd->advertising = tp->link_config.advertising;
10318 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
10319 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10320 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10321 				cmd->advertising |= ADVERTISED_Pause;
10322 			} else {
10323 				cmd->advertising |= ADVERTISED_Pause |
10324 						    ADVERTISED_Asym_Pause;
10325 			}
10326 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10327 			cmd->advertising |= ADVERTISED_Asym_Pause;
10328 		}
10329 	}
10330 	if (netif_running(dev) && netif_carrier_ok(dev)) {
10331 		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10332 		cmd->duplex = tp->link_config.active_duplex;
10333 		cmd->lp_advertising = tp->link_config.rmt_adv;
10334 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10335 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10336 				cmd->eth_tp_mdix = ETH_TP_MDI_X;
10337 			else
10338 				cmd->eth_tp_mdix = ETH_TP_MDI;
10339 		}
10340 	} else {
10341 		ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10342 		cmd->duplex = DUPLEX_INVALID;
10343 		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10344 	}
10345 	cmd->phy_address = tp->phy_addr;
10346 	cmd->transceiver = XCVR_INTERNAL;
10347 	cmd->autoneg = tp->link_config.autoneg;
10348 	cmd->maxtxpkt = 0;
10349 	cmd->maxrxpkt = 0;
10350 	return 0;
10351 }
10352 
10353 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10354 {
10355 	struct tg3 *tp = netdev_priv(dev);
10356 	u32 speed = ethtool_cmd_speed(cmd);
10357 
10358 	if (tg3_flag(tp, USE_PHYLIB)) {
10359 		struct phy_device *phydev;
10360 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10361 			return -EAGAIN;
10362 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10363 		return phy_ethtool_sset(phydev, cmd);
10364 	}
10365 
10366 	if (cmd->autoneg != AUTONEG_ENABLE &&
10367 	    cmd->autoneg != AUTONEG_DISABLE)
10368 		return -EINVAL;
10369 
10370 	if (cmd->autoneg == AUTONEG_DISABLE &&
10371 	    cmd->duplex != DUPLEX_FULL &&
10372 	    cmd->duplex != DUPLEX_HALF)
10373 		return -EINVAL;
10374 
10375 	if (cmd->autoneg == AUTONEG_ENABLE) {
10376 		u32 mask = ADVERTISED_Autoneg |
10377 			   ADVERTISED_Pause |
10378 			   ADVERTISED_Asym_Pause;
10379 
10380 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10381 			mask |= ADVERTISED_1000baseT_Half |
10382 				ADVERTISED_1000baseT_Full;
10383 
10384 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10385 			mask |= ADVERTISED_100baseT_Half |
10386 				ADVERTISED_100baseT_Full |
10387 				ADVERTISED_10baseT_Half |
10388 				ADVERTISED_10baseT_Full |
10389 				ADVERTISED_TP;
10390 		else
10391 			mask |= ADVERTISED_FIBRE;
10392 
10393 		if (cmd->advertising & ~mask)
10394 			return -EINVAL;
10395 
10396 		mask &= (ADVERTISED_1000baseT_Half |
10397 			 ADVERTISED_1000baseT_Full |
10398 			 ADVERTISED_100baseT_Half |
10399 			 ADVERTISED_100baseT_Full |
10400 			 ADVERTISED_10baseT_Half |
10401 			 ADVERTISED_10baseT_Full);
10402 
10403 		cmd->advertising &= mask;
10404 	} else {
10405 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10406 			if (speed != SPEED_1000)
10407 				return -EINVAL;
10408 
10409 			if (cmd->duplex != DUPLEX_FULL)
10410 				return -EINVAL;
10411 		} else {
10412 			if (speed != SPEED_100 &&
10413 			    speed != SPEED_10)
10414 				return -EINVAL;
10415 		}
10416 	}
10417 
10418 	tg3_full_lock(tp, 0);
10419 
10420 	tp->link_config.autoneg = cmd->autoneg;
10421 	if (cmd->autoneg == AUTONEG_ENABLE) {
10422 		tp->link_config.advertising = (cmd->advertising |
10423 					      ADVERTISED_Autoneg);
10424 		tp->link_config.speed = SPEED_INVALID;
10425 		tp->link_config.duplex = DUPLEX_INVALID;
10426 	} else {
10427 		tp->link_config.advertising = 0;
10428 		tp->link_config.speed = speed;
10429 		tp->link_config.duplex = cmd->duplex;
10430 	}
10431 
10432 	tp->link_config.orig_speed = tp->link_config.speed;
10433 	tp->link_config.orig_duplex = tp->link_config.duplex;
10434 	tp->link_config.orig_autoneg = tp->link_config.autoneg;
10435 
10436 	if (netif_running(dev))
10437 		tg3_setup_phy(tp, 1);
10438 
10439 	tg3_full_unlock(tp);
10440 
10441 	return 0;
10442 }
10443 
10444 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10445 {
10446 	struct tg3 *tp = netdev_priv(dev);
10447 
10448 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10449 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10450 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10451 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10452 }
10453 
10454 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10455 {
10456 	struct tg3 *tp = netdev_priv(dev);
10457 
10458 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10459 		wol->supported = WAKE_MAGIC;
10460 	else
10461 		wol->supported = 0;
10462 	wol->wolopts = 0;
10463 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10464 		wol->wolopts = WAKE_MAGIC;
10465 	memset(&wol->sopass, 0, sizeof(wol->sopass));
10466 }
10467 
10468 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10469 {
10470 	struct tg3 *tp = netdev_priv(dev);
10471 	struct device *dp = &tp->pdev->dev;
10472 
10473 	if (wol->wolopts & ~WAKE_MAGIC)
10474 		return -EINVAL;
10475 	if ((wol->wolopts & WAKE_MAGIC) &&
10476 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10477 		return -EINVAL;
10478 
10479 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10480 
10481 	spin_lock_bh(&tp->lock);
10482 	if (device_may_wakeup(dp))
10483 		tg3_flag_set(tp, WOL_ENABLE);
10484 	else
10485 		tg3_flag_clear(tp, WOL_ENABLE);
10486 	spin_unlock_bh(&tp->lock);
10487 
10488 	return 0;
10489 }
10490 
10491 static u32 tg3_get_msglevel(struct net_device *dev)
10492 {
10493 	struct tg3 *tp = netdev_priv(dev);
10494 	return tp->msg_enable;
10495 }
10496 
10497 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10498 {
10499 	struct tg3 *tp = netdev_priv(dev);
10500 	tp->msg_enable = value;
10501 }
10502 
10503 static int tg3_nway_reset(struct net_device *dev)
10504 {
10505 	struct tg3 *tp = netdev_priv(dev);
10506 	int r;
10507 
10508 	if (!netif_running(dev))
10509 		return -EAGAIN;
10510 
10511 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10512 		return -EINVAL;
10513 
10514 	if (tg3_flag(tp, USE_PHYLIB)) {
10515 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10516 			return -EAGAIN;
10517 		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10518 	} else {
10519 		u32 bmcr;
10520 
10521 		spin_lock_bh(&tp->lock);
10522 		r = -EINVAL;
10523 		tg3_readphy(tp, MII_BMCR, &bmcr);
10524 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10525 		    ((bmcr & BMCR_ANENABLE) ||
10526 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10527 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10528 						   BMCR_ANENABLE);
10529 			r = 0;
10530 		}
10531 		spin_unlock_bh(&tp->lock);
10532 	}
10533 
10534 	return r;
10535 }
10536 
10537 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10538 {
10539 	struct tg3 *tp = netdev_priv(dev);
10540 
10541 	ering->rx_max_pending = tp->rx_std_ring_mask;
10542 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
10543 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10544 	else
10545 		ering->rx_jumbo_max_pending = 0;
10546 
10547 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10548 
10549 	ering->rx_pending = tp->rx_pending;
10550 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
10551 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10552 	else
10553 		ering->rx_jumbo_pending = 0;
10554 
10555 	ering->tx_pending = tp->napi[0].tx_pending;
10556 }
10557 
10558 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10559 {
10560 	struct tg3 *tp = netdev_priv(dev);
10561 	int i, irq_sync = 0, err = 0;
10562 
10563 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10564 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10565 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10566 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
10567 	    (tg3_flag(tp, TSO_BUG) &&
10568 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10569 		return -EINVAL;
10570 
10571 	if (netif_running(dev)) {
10572 		tg3_phy_stop(tp);
10573 		tg3_netif_stop(tp);
10574 		irq_sync = 1;
10575 	}
10576 
10577 	tg3_full_lock(tp, irq_sync);
10578 
10579 	tp->rx_pending = ering->rx_pending;
10580 
10581 	if (tg3_flag(tp, MAX_RXPEND_64) &&
10582 	    tp->rx_pending > 63)
10583 		tp->rx_pending = 63;
10584 	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10585 
10586 	for (i = 0; i < tp->irq_max; i++)
10587 		tp->napi[i].tx_pending = ering->tx_pending;
10588 
10589 	if (netif_running(dev)) {
10590 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10591 		err = tg3_restart_hw(tp, 1);
10592 		if (!err)
10593 			tg3_netif_start(tp);
10594 	}
10595 
10596 	tg3_full_unlock(tp);
10597 
10598 	if (irq_sync && !err)
10599 		tg3_phy_start(tp);
10600 
10601 	return err;
10602 }
10603 
10604 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10605 {
10606 	struct tg3 *tp = netdev_priv(dev);
10607 
10608 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10609 
10610 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10611 		epause->rx_pause = 1;
10612 	else
10613 		epause->rx_pause = 0;
10614 
10615 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10616 		epause->tx_pause = 1;
10617 	else
10618 		epause->tx_pause = 0;
10619 }
10620 
10621 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10622 {
10623 	struct tg3 *tp = netdev_priv(dev);
10624 	int err = 0;
10625 
10626 	if (tg3_flag(tp, USE_PHYLIB)) {
10627 		u32 newadv;
10628 		struct phy_device *phydev;
10629 
10630 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10631 
10632 		if (!(phydev->supported & SUPPORTED_Pause) ||
10633 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10634 		     (epause->rx_pause != epause->tx_pause)))
10635 			return -EINVAL;
10636 
10637 		tp->link_config.flowctrl = 0;
10638 		if (epause->rx_pause) {
10639 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
10640 
10641 			if (epause->tx_pause) {
10642 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
10643 				newadv = ADVERTISED_Pause;
10644 			} else
10645 				newadv = ADVERTISED_Pause |
10646 					 ADVERTISED_Asym_Pause;
10647 		} else if (epause->tx_pause) {
10648 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
10649 			newadv = ADVERTISED_Asym_Pause;
10650 		} else
10651 			newadv = 0;
10652 
10653 		if (epause->autoneg)
10654 			tg3_flag_set(tp, PAUSE_AUTONEG);
10655 		else
10656 			tg3_flag_clear(tp, PAUSE_AUTONEG);
10657 
10658 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10659 			u32 oldadv = phydev->advertising &
10660 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10661 			if (oldadv != newadv) {
10662 				phydev->advertising &=
10663 					~(ADVERTISED_Pause |
10664 					  ADVERTISED_Asym_Pause);
10665 				phydev->advertising |= newadv;
10666 				if (phydev->autoneg) {
10667 					/*
10668 					 * Always renegotiate the link to
10669 					 * inform our link partner of our
10670 					 * flow control settings, even if the
10671 					 * flow control is forced.  Let
10672 					 * tg3_adjust_link() do the final
10673 					 * flow control setup.
10674 					 */
10675 					return phy_start_aneg(phydev);
10676 				}
10677 			}
10678 
10679 			if (!epause->autoneg)
10680 				tg3_setup_flow_control(tp, 0, 0);
10681 		} else {
10682 			tp->link_config.orig_advertising &=
10683 					~(ADVERTISED_Pause |
10684 					  ADVERTISED_Asym_Pause);
10685 			tp->link_config.orig_advertising |= newadv;
10686 		}
10687 	} else {
10688 		int irq_sync = 0;
10689 
10690 		if (netif_running(dev)) {
10691 			tg3_netif_stop(tp);
10692 			irq_sync = 1;
10693 		}
10694 
10695 		tg3_full_lock(tp, irq_sync);
10696 
10697 		if (epause->autoneg)
10698 			tg3_flag_set(tp, PAUSE_AUTONEG);
10699 		else
10700 			tg3_flag_clear(tp, PAUSE_AUTONEG);
10701 		if (epause->rx_pause)
10702 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
10703 		else
10704 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10705 		if (epause->tx_pause)
10706 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
10707 		else
10708 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10709 
10710 		if (netif_running(dev)) {
10711 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10712 			err = tg3_restart_hw(tp, 1);
10713 			if (!err)
10714 				tg3_netif_start(tp);
10715 		}
10716 
10717 		tg3_full_unlock(tp);
10718 	}
10719 
10720 	return err;
10721 }
10722 
10723 static int tg3_get_sset_count(struct net_device *dev, int sset)
10724 {
10725 	switch (sset) {
10726 	case ETH_SS_TEST:
10727 		return TG3_NUM_TEST;
10728 	case ETH_SS_STATS:
10729 		return TG3_NUM_STATS;
10730 	default:
10731 		return -EOPNOTSUPP;
10732 	}
10733 }
10734 
10735 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10736 			 u32 *rules __always_unused)
10737 {
10738 	struct tg3 *tp = netdev_priv(dev);
10739 
10740 	if (!tg3_flag(tp, SUPPORT_MSIX))
10741 		return -EOPNOTSUPP;
10742 
10743 	switch (info->cmd) {
10744 	case ETHTOOL_GRXRINGS:
10745 		if (netif_running(tp->dev))
10746 			info->data = tp->irq_cnt;
10747 		else {
10748 			info->data = num_online_cpus();
10749 			if (info->data > TG3_IRQ_MAX_VECS_RSS)
10750 				info->data = TG3_IRQ_MAX_VECS_RSS;
10751 		}
10752 
10753 		/* The first interrupt vector only
10754 		 * handles link interrupts.
10755 		 */
10756 		info->data -= 1;
10757 		return 0;
10758 
10759 	default:
10760 		return -EOPNOTSUPP;
10761 	}
10762 }
10763 
10764 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10765 {
10766 	u32 size = 0;
10767 	struct tg3 *tp = netdev_priv(dev);
10768 
10769 	if (tg3_flag(tp, SUPPORT_MSIX))
10770 		size = TG3_RSS_INDIR_TBL_SIZE;
10771 
10772 	return size;
10773 }
10774 
10775 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10776 {
10777 	struct tg3 *tp = netdev_priv(dev);
10778 	int i;
10779 
10780 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10781 		indir[i] = tp->rss_ind_tbl[i];
10782 
10783 	return 0;
10784 }
10785 
10786 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10787 {
10788 	struct tg3 *tp = netdev_priv(dev);
10789 	size_t i;
10790 
10791 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10792 		tp->rss_ind_tbl[i] = indir[i];
10793 
10794 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10795 		return 0;
10796 
10797 	/* It is legal to write the indirection
10798 	 * table while the device is running.
10799 	 */
10800 	tg3_full_lock(tp, 0);
10801 	tg3_rss_write_indir_tbl(tp);
10802 	tg3_full_unlock(tp);
10803 
10804 	return 0;
10805 }
10806 
10807 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10808 {
10809 	switch (stringset) {
10810 	case ETH_SS_STATS:
10811 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10812 		break;
10813 	case ETH_SS_TEST:
10814 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10815 		break;
10816 	default:
10817 		WARN_ON(1);	/* we need a WARN() */
10818 		break;
10819 	}
10820 }
10821 
10822 static int tg3_set_phys_id(struct net_device *dev,
10823 			    enum ethtool_phys_id_state state)
10824 {
10825 	struct tg3 *tp = netdev_priv(dev);
10826 
10827 	if (!netif_running(tp->dev))
10828 		return -EAGAIN;
10829 
10830 	switch (state) {
10831 	case ETHTOOL_ID_ACTIVE:
10832 		return 1;	/* cycle on/off once per second */
10833 
10834 	case ETHTOOL_ID_ON:
10835 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10836 		     LED_CTRL_1000MBPS_ON |
10837 		     LED_CTRL_100MBPS_ON |
10838 		     LED_CTRL_10MBPS_ON |
10839 		     LED_CTRL_TRAFFIC_OVERRIDE |
10840 		     LED_CTRL_TRAFFIC_BLINK |
10841 		     LED_CTRL_TRAFFIC_LED);
10842 		break;
10843 
10844 	case ETHTOOL_ID_OFF:
10845 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10846 		     LED_CTRL_TRAFFIC_OVERRIDE);
10847 		break;
10848 
10849 	case ETHTOOL_ID_INACTIVE:
10850 		tw32(MAC_LED_CTRL, tp->led_ctrl);
10851 		break;
10852 	}
10853 
10854 	return 0;
10855 }
10856 
10857 static void tg3_get_ethtool_stats(struct net_device *dev,
10858 				   struct ethtool_stats *estats, u64 *tmp_stats)
10859 {
10860 	struct tg3 *tp = netdev_priv(dev);
10861 
10862 	tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
10863 }
10864 
10865 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10866 {
10867 	int i;
10868 	__be32 *buf;
10869 	u32 offset = 0, len = 0;
10870 	u32 magic, val;
10871 
10872 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10873 		return NULL;
10874 
10875 	if (magic == TG3_EEPROM_MAGIC) {
10876 		for (offset = TG3_NVM_DIR_START;
10877 		     offset < TG3_NVM_DIR_END;
10878 		     offset += TG3_NVM_DIRENT_SIZE) {
10879 			if (tg3_nvram_read(tp, offset, &val))
10880 				return NULL;
10881 
10882 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10883 			    TG3_NVM_DIRTYPE_EXTVPD)
10884 				break;
10885 		}
10886 
10887 		if (offset != TG3_NVM_DIR_END) {
10888 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10889 			if (tg3_nvram_read(tp, offset + 4, &offset))
10890 				return NULL;
10891 
10892 			offset = tg3_nvram_logical_addr(tp, offset);
10893 		}
10894 	}
10895 
10896 	if (!offset || !len) {
10897 		offset = TG3_NVM_VPD_OFF;
10898 		len = TG3_NVM_VPD_LEN;
10899 	}
10900 
10901 	buf = kmalloc(len, GFP_KERNEL);
10902 	if (buf == NULL)
10903 		return NULL;
10904 
10905 	if (magic == TG3_EEPROM_MAGIC) {
10906 		for (i = 0; i < len; i += 4) {
10907 			/* The data is in little-endian format in NVRAM.
10908 			 * Use the big-endian read routines to preserve
10909 			 * the byte order as it exists in NVRAM.
10910 			 */
10911 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10912 				goto error;
10913 		}
10914 	} else {
10915 		u8 *ptr;
10916 		ssize_t cnt;
10917 		unsigned int pos = 0;
10918 
10919 		ptr = (u8 *)&buf[0];
10920 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10921 			cnt = pci_read_vpd(tp->pdev, pos,
10922 					   len - pos, ptr);
10923 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
10924 				cnt = 0;
10925 			else if (cnt < 0)
10926 				goto error;
10927 		}
10928 		if (pos != len)
10929 			goto error;
10930 	}
10931 
10932 	*vpdlen = len;
10933 
10934 	return buf;
10935 
10936 error:
10937 	kfree(buf);
10938 	return NULL;
10939 }
10940 
10941 #define NVRAM_TEST_SIZE 0x100
10942 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
10943 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
10944 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
10945 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
10946 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
10947 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
10948 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10949 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10950 
10951 static int tg3_test_nvram(struct tg3 *tp)
10952 {
10953 	u32 csum, magic, len;
10954 	__be32 *buf;
10955 	int i, j, k, err = 0, size;
10956 
10957 	if (tg3_flag(tp, NO_NVRAM))
10958 		return 0;
10959 
10960 	if (tg3_nvram_read(tp, 0, &magic) != 0)
10961 		return -EIO;
10962 
10963 	if (magic == TG3_EEPROM_MAGIC)
10964 		size = NVRAM_TEST_SIZE;
10965 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10966 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10967 		    TG3_EEPROM_SB_FORMAT_1) {
10968 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10969 			case TG3_EEPROM_SB_REVISION_0:
10970 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10971 				break;
10972 			case TG3_EEPROM_SB_REVISION_2:
10973 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10974 				break;
10975 			case TG3_EEPROM_SB_REVISION_3:
10976 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10977 				break;
10978 			case TG3_EEPROM_SB_REVISION_4:
10979 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10980 				break;
10981 			case TG3_EEPROM_SB_REVISION_5:
10982 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10983 				break;
10984 			case TG3_EEPROM_SB_REVISION_6:
10985 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10986 				break;
10987 			default:
10988 				return -EIO;
10989 			}
10990 		} else
10991 			return 0;
10992 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10993 		size = NVRAM_SELFBOOT_HW_SIZE;
10994 	else
10995 		return -EIO;
10996 
10997 	buf = kmalloc(size, GFP_KERNEL);
10998 	if (buf == NULL)
10999 		return -ENOMEM;
11000 
11001 	err = -EIO;
11002 	for (i = 0, j = 0; i < size; i += 4, j++) {
11003 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
11004 		if (err)
11005 			break;
11006 	}
11007 	if (i < size)
11008 		goto out;
11009 
11010 	/* Selfboot format */
11011 	magic = be32_to_cpu(buf[0]);
11012 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11013 	    TG3_EEPROM_MAGIC_FW) {
11014 		u8 *buf8 = (u8 *) buf, csum8 = 0;
11015 
11016 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11017 		    TG3_EEPROM_SB_REVISION_2) {
11018 			/* For rev 2, the csum doesn't include the MBA. */
11019 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11020 				csum8 += buf8[i];
11021 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11022 				csum8 += buf8[i];
11023 		} else {
11024 			for (i = 0; i < size; i++)
11025 				csum8 += buf8[i];
11026 		}
11027 
11028 		if (csum8 == 0) {
11029 			err = 0;
11030 			goto out;
11031 		}
11032 
11033 		err = -EIO;
11034 		goto out;
11035 	}
11036 
11037 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11038 	    TG3_EEPROM_MAGIC_HW) {
11039 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11040 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11041 		u8 *buf8 = (u8 *) buf;
11042 
11043 		/* Separate the parity bits and the data bytes.  */
11044 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11045 			if ((i == 0) || (i == 8)) {
11046 				int l;
11047 				u8 msk;
11048 
11049 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11050 					parity[k++] = buf8[i] & msk;
11051 				i++;
11052 			} else if (i == 16) {
11053 				int l;
11054 				u8 msk;
11055 
11056 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11057 					parity[k++] = buf8[i] & msk;
11058 				i++;
11059 
11060 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11061 					parity[k++] = buf8[i] & msk;
11062 				i++;
11063 			}
11064 			data[j++] = buf8[i];
11065 		}
11066 
11067 		err = -EIO;
11068 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11069 			u8 hw8 = hweight8(data[i]);
11070 
11071 			if ((hw8 & 0x1) && parity[i])
11072 				goto out;
11073 			else if (!(hw8 & 0x1) && !parity[i])
11074 				goto out;
11075 		}
11076 		err = 0;
11077 		goto out;
11078 	}
11079 
11080 	err = -EIO;
11081 
11082 	/* Bootstrap checksum at offset 0x10 */
11083 	csum = calc_crc((unsigned char *) buf, 0x10);
11084 	if (csum != le32_to_cpu(buf[0x10/4]))
11085 		goto out;
11086 
11087 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11088 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11089 	if (csum != le32_to_cpu(buf[0xfc/4]))
11090 		goto out;
11091 
11092 	kfree(buf);
11093 
11094 	buf = tg3_vpd_readblock(tp, &len);
11095 	if (!buf)
11096 		return -ENOMEM;
11097 
11098 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11099 	if (i > 0) {
11100 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11101 		if (j < 0)
11102 			goto out;
11103 
11104 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11105 			goto out;
11106 
11107 		i += PCI_VPD_LRDT_TAG_SIZE;
11108 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11109 					      PCI_VPD_RO_KEYWORD_CHKSUM);
11110 		if (j > 0) {
11111 			u8 csum8 = 0;
11112 
11113 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
11114 
11115 			for (i = 0; i <= j; i++)
11116 				csum8 += ((u8 *)buf)[i];
11117 
11118 			if (csum8)
11119 				goto out;
11120 		}
11121 	}
11122 
11123 	err = 0;
11124 
11125 out:
11126 	kfree(buf);
11127 	return err;
11128 }
11129 
11130 #define TG3_SERDES_TIMEOUT_SEC	2
11131 #define TG3_COPPER_TIMEOUT_SEC	6
11132 
11133 static int tg3_test_link(struct tg3 *tp)
11134 {
11135 	int i, max;
11136 
11137 	if (!netif_running(tp->dev))
11138 		return -ENODEV;
11139 
11140 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11141 		max = TG3_SERDES_TIMEOUT_SEC;
11142 	else
11143 		max = TG3_COPPER_TIMEOUT_SEC;
11144 
11145 	for (i = 0; i < max; i++) {
11146 		if (netif_carrier_ok(tp->dev))
11147 			return 0;
11148 
11149 		if (msleep_interruptible(1000))
11150 			break;
11151 	}
11152 
11153 	return -EIO;
11154 }
11155 
11156 /* Only test the commonly used registers */
11157 static int tg3_test_registers(struct tg3 *tp)
11158 {
11159 	int i, is_5705, is_5750;
11160 	u32 offset, read_mask, write_mask, val, save_val, read_val;
11161 	static struct {
11162 		u16 offset;
11163 		u16 flags;
11164 #define TG3_FL_5705	0x1
11165 #define TG3_FL_NOT_5705	0x2
11166 #define TG3_FL_NOT_5788	0x4
11167 #define TG3_FL_NOT_5750	0x8
11168 		u32 read_mask;
11169 		u32 write_mask;
11170 	} reg_tbl[] = {
11171 		/* MAC Control Registers */
11172 		{ MAC_MODE, TG3_FL_NOT_5705,
11173 			0x00000000, 0x00ef6f8c },
11174 		{ MAC_MODE, TG3_FL_5705,
11175 			0x00000000, 0x01ef6b8c },
11176 		{ MAC_STATUS, TG3_FL_NOT_5705,
11177 			0x03800107, 0x00000000 },
11178 		{ MAC_STATUS, TG3_FL_5705,
11179 			0x03800100, 0x00000000 },
11180 		{ MAC_ADDR_0_HIGH, 0x0000,
11181 			0x00000000, 0x0000ffff },
11182 		{ MAC_ADDR_0_LOW, 0x0000,
11183 			0x00000000, 0xffffffff },
11184 		{ MAC_RX_MTU_SIZE, 0x0000,
11185 			0x00000000, 0x0000ffff },
11186 		{ MAC_TX_MODE, 0x0000,
11187 			0x00000000, 0x00000070 },
11188 		{ MAC_TX_LENGTHS, 0x0000,
11189 			0x00000000, 0x00003fff },
11190 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
11191 			0x00000000, 0x000007fc },
11192 		{ MAC_RX_MODE, TG3_FL_5705,
11193 			0x00000000, 0x000007dc },
11194 		{ MAC_HASH_REG_0, 0x0000,
11195 			0x00000000, 0xffffffff },
11196 		{ MAC_HASH_REG_1, 0x0000,
11197 			0x00000000, 0xffffffff },
11198 		{ MAC_HASH_REG_2, 0x0000,
11199 			0x00000000, 0xffffffff },
11200 		{ MAC_HASH_REG_3, 0x0000,
11201 			0x00000000, 0xffffffff },
11202 
11203 		/* Receive Data and Receive BD Initiator Control Registers. */
11204 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11205 			0x00000000, 0xffffffff },
11206 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11207 			0x00000000, 0xffffffff },
11208 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11209 			0x00000000, 0x00000003 },
11210 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11211 			0x00000000, 0xffffffff },
11212 		{ RCVDBDI_STD_BD+0, 0x0000,
11213 			0x00000000, 0xffffffff },
11214 		{ RCVDBDI_STD_BD+4, 0x0000,
11215 			0x00000000, 0xffffffff },
11216 		{ RCVDBDI_STD_BD+8, 0x0000,
11217 			0x00000000, 0xffff0002 },
11218 		{ RCVDBDI_STD_BD+0xc, 0x0000,
11219 			0x00000000, 0xffffffff },
11220 
11221 		/* Receive BD Initiator Control Registers. */
11222 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11223 			0x00000000, 0xffffffff },
11224 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
11225 			0x00000000, 0x000003ff },
11226 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11227 			0x00000000, 0xffffffff },
11228 
11229 		/* Host Coalescing Control Registers. */
11230 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
11231 			0x00000000, 0x00000004 },
11232 		{ HOSTCC_MODE, TG3_FL_5705,
11233 			0x00000000, 0x000000f6 },
11234 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11235 			0x00000000, 0xffffffff },
11236 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11237 			0x00000000, 0x000003ff },
11238 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11239 			0x00000000, 0xffffffff },
11240 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11241 			0x00000000, 0x000003ff },
11242 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11243 			0x00000000, 0xffffffff },
11244 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11245 			0x00000000, 0x000000ff },
11246 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11247 			0x00000000, 0xffffffff },
11248 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11249 			0x00000000, 0x000000ff },
11250 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11251 			0x00000000, 0xffffffff },
11252 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11253 			0x00000000, 0xffffffff },
11254 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11255 			0x00000000, 0xffffffff },
11256 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11257 			0x00000000, 0x000000ff },
11258 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11259 			0x00000000, 0xffffffff },
11260 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11261 			0x00000000, 0x000000ff },
11262 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11263 			0x00000000, 0xffffffff },
11264 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11265 			0x00000000, 0xffffffff },
11266 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11267 			0x00000000, 0xffffffff },
11268 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11269 			0x00000000, 0xffffffff },
11270 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11271 			0x00000000, 0xffffffff },
11272 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11273 			0xffffffff, 0x00000000 },
11274 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11275 			0xffffffff, 0x00000000 },
11276 
11277 		/* Buffer Manager Control Registers. */
11278 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11279 			0x00000000, 0x007fff80 },
11280 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11281 			0x00000000, 0x007fffff },
11282 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11283 			0x00000000, 0x0000003f },
11284 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11285 			0x00000000, 0x000001ff },
11286 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
11287 			0x00000000, 0x000001ff },
11288 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11289 			0xffffffff, 0x00000000 },
11290 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11291 			0xffffffff, 0x00000000 },
11292 
11293 		/* Mailbox Registers */
11294 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11295 			0x00000000, 0x000001ff },
11296 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11297 			0x00000000, 0x000001ff },
11298 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11299 			0x00000000, 0x000007ff },
11300 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11301 			0x00000000, 0x000001ff },
11302 
11303 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
11304 	};
11305 
11306 	is_5705 = is_5750 = 0;
11307 	if (tg3_flag(tp, 5705_PLUS)) {
11308 		is_5705 = 1;
11309 		if (tg3_flag(tp, 5750_PLUS))
11310 			is_5750 = 1;
11311 	}
11312 
11313 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11314 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11315 			continue;
11316 
11317 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11318 			continue;
11319 
11320 		if (tg3_flag(tp, IS_5788) &&
11321 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
11322 			continue;
11323 
11324 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11325 			continue;
11326 
11327 		offset = (u32) reg_tbl[i].offset;
11328 		read_mask = reg_tbl[i].read_mask;
11329 		write_mask = reg_tbl[i].write_mask;
11330 
11331 		/* Save the original register content */
11332 		save_val = tr32(offset);
11333 
11334 		/* Determine the read-only value. */
11335 		read_val = save_val & read_mask;
11336 
11337 		/* Write zero to the register, then make sure the read-only bits
11338 		 * are not changed and the read/write bits are all zeros.
11339 		 */
11340 		tw32(offset, 0);
11341 
11342 		val = tr32(offset);
11343 
11344 		/* Test the read-only and read/write bits. */
11345 		if (((val & read_mask) != read_val) || (val & write_mask))
11346 			goto out;
11347 
11348 		/* Write ones to all the bits defined by RdMask and WrMask, then
11349 		 * make sure the read-only bits are not changed and the
11350 		 * read/write bits are all ones.
11351 		 */
11352 		tw32(offset, read_mask | write_mask);
11353 
11354 		val = tr32(offset);
11355 
11356 		/* Test the read-only bits. */
11357 		if ((val & read_mask) != read_val)
11358 			goto out;
11359 
11360 		/* Test the read/write bits. */
11361 		if ((val & write_mask) != write_mask)
11362 			goto out;
11363 
11364 		tw32(offset, save_val);
11365 	}
11366 
11367 	return 0;
11368 
11369 out:
11370 	if (netif_msg_hw(tp))
11371 		netdev_err(tp->dev,
11372 			   "Register test failed at offset %x\n", offset);
11373 	tw32(offset, save_val);
11374 	return -EIO;
11375 }
11376 
11377 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11378 {
11379 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11380 	int i;
11381 	u32 j;
11382 
11383 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11384 		for (j = 0; j < len; j += 4) {
11385 			u32 val;
11386 
11387 			tg3_write_mem(tp, offset + j, test_pattern[i]);
11388 			tg3_read_mem(tp, offset + j, &val);
11389 			if (val != test_pattern[i])
11390 				return -EIO;
11391 		}
11392 	}
11393 	return 0;
11394 }
11395 
11396 static int tg3_test_memory(struct tg3 *tp)
11397 {
11398 	static struct mem_entry {
11399 		u32 offset;
11400 		u32 len;
11401 	} mem_tbl_570x[] = {
11402 		{ 0x00000000, 0x00b50},
11403 		{ 0x00002000, 0x1c000},
11404 		{ 0xffffffff, 0x00000}
11405 	}, mem_tbl_5705[] = {
11406 		{ 0x00000100, 0x0000c},
11407 		{ 0x00000200, 0x00008},
11408 		{ 0x00004000, 0x00800},
11409 		{ 0x00006000, 0x01000},
11410 		{ 0x00008000, 0x02000},
11411 		{ 0x00010000, 0x0e000},
11412 		{ 0xffffffff, 0x00000}
11413 	}, mem_tbl_5755[] = {
11414 		{ 0x00000200, 0x00008},
11415 		{ 0x00004000, 0x00800},
11416 		{ 0x00006000, 0x00800},
11417 		{ 0x00008000, 0x02000},
11418 		{ 0x00010000, 0x0c000},
11419 		{ 0xffffffff, 0x00000}
11420 	}, mem_tbl_5906[] = {
11421 		{ 0x00000200, 0x00008},
11422 		{ 0x00004000, 0x00400},
11423 		{ 0x00006000, 0x00400},
11424 		{ 0x00008000, 0x01000},
11425 		{ 0x00010000, 0x01000},
11426 		{ 0xffffffff, 0x00000}
11427 	}, mem_tbl_5717[] = {
11428 		{ 0x00000200, 0x00008},
11429 		{ 0x00010000, 0x0a000},
11430 		{ 0x00020000, 0x13c00},
11431 		{ 0xffffffff, 0x00000}
11432 	}, mem_tbl_57765[] = {
11433 		{ 0x00000200, 0x00008},
11434 		{ 0x00004000, 0x00800},
11435 		{ 0x00006000, 0x09800},
11436 		{ 0x00010000, 0x0a000},
11437 		{ 0xffffffff, 0x00000}
11438 	};
11439 	struct mem_entry *mem_tbl;
11440 	int err = 0;
11441 	int i;
11442 
11443 	if (tg3_flag(tp, 5717_PLUS))
11444 		mem_tbl = mem_tbl_5717;
11445 	else if (tg3_flag(tp, 57765_CLASS))
11446 		mem_tbl = mem_tbl_57765;
11447 	else if (tg3_flag(tp, 5755_PLUS))
11448 		mem_tbl = mem_tbl_5755;
11449 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11450 		mem_tbl = mem_tbl_5906;
11451 	else if (tg3_flag(tp, 5705_PLUS))
11452 		mem_tbl = mem_tbl_5705;
11453 	else
11454 		mem_tbl = mem_tbl_570x;
11455 
11456 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11457 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11458 		if (err)
11459 			break;
11460 	}
11461 
11462 	return err;
11463 }
11464 
11465 #define TG3_TSO_MSS		500
11466 
11467 #define TG3_TSO_IP_HDR_LEN	20
11468 #define TG3_TSO_TCP_HDR_LEN	20
11469 #define TG3_TSO_TCP_OPT_LEN	12
11470 
11471 static const u8 tg3_tso_header[] = {
11472 0x08, 0x00,
11473 0x45, 0x00, 0x00, 0x00,
11474 0x00, 0x00, 0x40, 0x00,
11475 0x40, 0x06, 0x00, 0x00,
11476 0x0a, 0x00, 0x00, 0x01,
11477 0x0a, 0x00, 0x00, 0x02,
11478 0x0d, 0x00, 0xe0, 0x00,
11479 0x00, 0x00, 0x01, 0x00,
11480 0x00, 0x00, 0x02, 0x00,
11481 0x80, 0x10, 0x10, 0x00,
11482 0x14, 0x09, 0x00, 0x00,
11483 0x01, 0x01, 0x08, 0x0a,
11484 0x11, 0x11, 0x11, 0x11,
11485 0x11, 0x11, 0x11, 0x11,
11486 };
11487 
11488 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11489 {
11490 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11491 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11492 	u32 budget;
11493 	struct sk_buff *skb;
11494 	u8 *tx_data, *rx_data;
11495 	dma_addr_t map;
11496 	int num_pkts, tx_len, rx_len, i, err;
11497 	struct tg3_rx_buffer_desc *desc;
11498 	struct tg3_napi *tnapi, *rnapi;
11499 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11500 
11501 	tnapi = &tp->napi[0];
11502 	rnapi = &tp->napi[0];
11503 	if (tp->irq_cnt > 1) {
11504 		if (tg3_flag(tp, ENABLE_RSS))
11505 			rnapi = &tp->napi[1];
11506 		if (tg3_flag(tp, ENABLE_TSS))
11507 			tnapi = &tp->napi[1];
11508 	}
11509 	coal_now = tnapi->coal_now | rnapi->coal_now;
11510 
11511 	err = -EIO;
11512 
11513 	tx_len = pktsz;
11514 	skb = netdev_alloc_skb(tp->dev, tx_len);
11515 	if (!skb)
11516 		return -ENOMEM;
11517 
11518 	tx_data = skb_put(skb, tx_len);
11519 	memcpy(tx_data, tp->dev->dev_addr, 6);
11520 	memset(tx_data + 6, 0x0, 8);
11521 
11522 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11523 
11524 	if (tso_loopback) {
11525 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11526 
11527 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11528 			      TG3_TSO_TCP_OPT_LEN;
11529 
11530 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11531 		       sizeof(tg3_tso_header));
11532 		mss = TG3_TSO_MSS;
11533 
11534 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11535 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11536 
11537 		/* Set the total length field in the IP header */
11538 		iph->tot_len = htons((u16)(mss + hdr_len));
11539 
11540 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
11541 			      TXD_FLAG_CPU_POST_DMA);
11542 
11543 		if (tg3_flag(tp, HW_TSO_1) ||
11544 		    tg3_flag(tp, HW_TSO_2) ||
11545 		    tg3_flag(tp, HW_TSO_3)) {
11546 			struct tcphdr *th;
11547 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11548 			th = (struct tcphdr *)&tx_data[val];
11549 			th->check = 0;
11550 		} else
11551 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
11552 
11553 		if (tg3_flag(tp, HW_TSO_3)) {
11554 			mss |= (hdr_len & 0xc) << 12;
11555 			if (hdr_len & 0x10)
11556 				base_flags |= 0x00000010;
11557 			base_flags |= (hdr_len & 0x3e0) << 5;
11558 		} else if (tg3_flag(tp, HW_TSO_2))
11559 			mss |= hdr_len << 9;
11560 		else if (tg3_flag(tp, HW_TSO_1) ||
11561 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11562 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11563 		} else {
11564 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11565 		}
11566 
11567 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11568 	} else {
11569 		num_pkts = 1;
11570 		data_off = ETH_HLEN;
11571 	}
11572 
11573 	for (i = data_off; i < tx_len; i++)
11574 		tx_data[i] = (u8) (i & 0xff);
11575 
11576 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11577 	if (pci_dma_mapping_error(tp->pdev, map)) {
11578 		dev_kfree_skb(skb);
11579 		return -EIO;
11580 	}
11581 
11582 	val = tnapi->tx_prod;
11583 	tnapi->tx_buffers[val].skb = skb;
11584 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11585 
11586 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11587 	       rnapi->coal_now);
11588 
11589 	udelay(10);
11590 
11591 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11592 
11593 	budget = tg3_tx_avail(tnapi);
11594 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11595 			    base_flags | TXD_FLAG_END, mss, 0)) {
11596 		tnapi->tx_buffers[val].skb = NULL;
11597 		dev_kfree_skb(skb);
11598 		return -EIO;
11599 	}
11600 
11601 	tnapi->tx_prod++;
11602 
11603 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11604 	tr32_mailbox(tnapi->prodmbox);
11605 
11606 	udelay(10);
11607 
11608 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11609 	for (i = 0; i < 35; i++) {
11610 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11611 		       coal_now);
11612 
11613 		udelay(10);
11614 
11615 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11616 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
11617 		if ((tx_idx == tnapi->tx_prod) &&
11618 		    (rx_idx == (rx_start_idx + num_pkts)))
11619 			break;
11620 	}
11621 
11622 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11623 	dev_kfree_skb(skb);
11624 
11625 	if (tx_idx != tnapi->tx_prod)
11626 		goto out;
11627 
11628 	if (rx_idx != rx_start_idx + num_pkts)
11629 		goto out;
11630 
11631 	val = data_off;
11632 	while (rx_idx != rx_start_idx) {
11633 		desc = &rnapi->rx_rcb[rx_start_idx++];
11634 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11635 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11636 
11637 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11638 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11639 			goto out;
11640 
11641 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11642 			 - ETH_FCS_LEN;
11643 
11644 		if (!tso_loopback) {
11645 			if (rx_len != tx_len)
11646 				goto out;
11647 
11648 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11649 				if (opaque_key != RXD_OPAQUE_RING_STD)
11650 					goto out;
11651 			} else {
11652 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11653 					goto out;
11654 			}
11655 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11656 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11657 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
11658 			goto out;
11659 		}
11660 
11661 		if (opaque_key == RXD_OPAQUE_RING_STD) {
11662 			rx_data = tpr->rx_std_buffers[desc_idx].data;
11663 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11664 					     mapping);
11665 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11666 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11667 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11668 					     mapping);
11669 		} else
11670 			goto out;
11671 
11672 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11673 					    PCI_DMA_FROMDEVICE);
11674 
11675 		rx_data += TG3_RX_OFFSET(tp);
11676 		for (i = data_off; i < rx_len; i++, val++) {
11677 			if (*(rx_data + i) != (u8) (val & 0xff))
11678 				goto out;
11679 		}
11680 	}
11681 
11682 	err = 0;
11683 
11684 	/* tg3_free_rings will unmap and free the rx_data */
11685 out:
11686 	return err;
11687 }
11688 
11689 #define TG3_STD_LOOPBACK_FAILED		1
11690 #define TG3_JMB_LOOPBACK_FAILED		2
11691 #define TG3_TSO_LOOPBACK_FAILED		4
11692 #define TG3_LOOPBACK_FAILED \
11693 	(TG3_STD_LOOPBACK_FAILED | \
11694 	 TG3_JMB_LOOPBACK_FAILED | \
11695 	 TG3_TSO_LOOPBACK_FAILED)
11696 
11697 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11698 {
11699 	int err = -EIO;
11700 	u32 eee_cap;
11701 
11702 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11703 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11704 
11705 	if (!netif_running(tp->dev)) {
11706 		data[0] = TG3_LOOPBACK_FAILED;
11707 		data[1] = TG3_LOOPBACK_FAILED;
11708 		if (do_extlpbk)
11709 			data[2] = TG3_LOOPBACK_FAILED;
11710 		goto done;
11711 	}
11712 
11713 	err = tg3_reset_hw(tp, 1);
11714 	if (err) {
11715 		data[0] = TG3_LOOPBACK_FAILED;
11716 		data[1] = TG3_LOOPBACK_FAILED;
11717 		if (do_extlpbk)
11718 			data[2] = TG3_LOOPBACK_FAILED;
11719 		goto done;
11720 	}
11721 
11722 	if (tg3_flag(tp, ENABLE_RSS)) {
11723 		int i;
11724 
11725 		/* Reroute all rx packets to the 1st queue */
11726 		for (i = MAC_RSS_INDIR_TBL_0;
11727 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11728 			tw32(i, 0x0);
11729 	}
11730 
11731 	/* HW errata - mac loopback fails in some cases on 5780.
11732 	 * Normal traffic and PHY loopback are not affected by
11733 	 * errata.  Also, the MAC loopback test is deprecated for
11734 	 * all newer ASIC revisions.
11735 	 */
11736 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11737 	    !tg3_flag(tp, CPMU_PRESENT)) {
11738 		tg3_mac_loopback(tp, true);
11739 
11740 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11741 			data[0] |= TG3_STD_LOOPBACK_FAILED;
11742 
11743 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11744 		    tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11745 			data[0] |= TG3_JMB_LOOPBACK_FAILED;
11746 
11747 		tg3_mac_loopback(tp, false);
11748 	}
11749 
11750 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11751 	    !tg3_flag(tp, USE_PHYLIB)) {
11752 		int i;
11753 
11754 		tg3_phy_lpbk_set(tp, 0, false);
11755 
11756 		/* Wait for link */
11757 		for (i = 0; i < 100; i++) {
11758 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11759 				break;
11760 			mdelay(1);
11761 		}
11762 
11763 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11764 			data[1] |= TG3_STD_LOOPBACK_FAILED;
11765 		if (tg3_flag(tp, TSO_CAPABLE) &&
11766 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11767 			data[1] |= TG3_TSO_LOOPBACK_FAILED;
11768 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11769 		    tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11770 			data[1] |= TG3_JMB_LOOPBACK_FAILED;
11771 
11772 		if (do_extlpbk) {
11773 			tg3_phy_lpbk_set(tp, 0, true);
11774 
11775 			/* All link indications report up, but the hardware
11776 			 * isn't really ready for about 20 msec.  Double it
11777 			 * to be sure.
11778 			 */
11779 			mdelay(40);
11780 
11781 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11782 				data[2] |= TG3_STD_LOOPBACK_FAILED;
11783 			if (tg3_flag(tp, TSO_CAPABLE) &&
11784 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11785 				data[2] |= TG3_TSO_LOOPBACK_FAILED;
11786 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11787 			    tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11788 				data[2] |= TG3_JMB_LOOPBACK_FAILED;
11789 		}
11790 
11791 		/* Re-enable gphy autopowerdown. */
11792 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11793 			tg3_phy_toggle_apd(tp, true);
11794 	}
11795 
11796 	err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11797 
11798 done:
11799 	tp->phy_flags |= eee_cap;
11800 
11801 	return err;
11802 }
11803 
11804 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11805 			  u64 *data)
11806 {
11807 	struct tg3 *tp = netdev_priv(dev);
11808 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11809 
11810 	if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11811 	    tg3_power_up(tp)) {
11812 		etest->flags |= ETH_TEST_FL_FAILED;
11813 		memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11814 		return;
11815 	}
11816 
11817 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11818 
11819 	if (tg3_test_nvram(tp) != 0) {
11820 		etest->flags |= ETH_TEST_FL_FAILED;
11821 		data[0] = 1;
11822 	}
11823 	if (!doextlpbk && tg3_test_link(tp)) {
11824 		etest->flags |= ETH_TEST_FL_FAILED;
11825 		data[1] = 1;
11826 	}
11827 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
11828 		int err, err2 = 0, irq_sync = 0;
11829 
11830 		if (netif_running(dev)) {
11831 			tg3_phy_stop(tp);
11832 			tg3_netif_stop(tp);
11833 			irq_sync = 1;
11834 		}
11835 
11836 		tg3_full_lock(tp, irq_sync);
11837 
11838 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11839 		err = tg3_nvram_lock(tp);
11840 		tg3_halt_cpu(tp, RX_CPU_BASE);
11841 		if (!tg3_flag(tp, 5705_PLUS))
11842 			tg3_halt_cpu(tp, TX_CPU_BASE);
11843 		if (!err)
11844 			tg3_nvram_unlock(tp);
11845 
11846 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11847 			tg3_phy_reset(tp);
11848 
11849 		if (tg3_test_registers(tp) != 0) {
11850 			etest->flags |= ETH_TEST_FL_FAILED;
11851 			data[2] = 1;
11852 		}
11853 
11854 		if (tg3_test_memory(tp) != 0) {
11855 			etest->flags |= ETH_TEST_FL_FAILED;
11856 			data[3] = 1;
11857 		}
11858 
11859 		if (doextlpbk)
11860 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11861 
11862 		if (tg3_test_loopback(tp, &data[4], doextlpbk))
11863 			etest->flags |= ETH_TEST_FL_FAILED;
11864 
11865 		tg3_full_unlock(tp);
11866 
11867 		if (tg3_test_interrupt(tp) != 0) {
11868 			etest->flags |= ETH_TEST_FL_FAILED;
11869 			data[7] = 1;
11870 		}
11871 
11872 		tg3_full_lock(tp, 0);
11873 
11874 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11875 		if (netif_running(dev)) {
11876 			tg3_flag_set(tp, INIT_COMPLETE);
11877 			err2 = tg3_restart_hw(tp, 1);
11878 			if (!err2)
11879 				tg3_netif_start(tp);
11880 		}
11881 
11882 		tg3_full_unlock(tp);
11883 
11884 		if (irq_sync && !err2)
11885 			tg3_phy_start(tp);
11886 	}
11887 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11888 		tg3_power_down(tp);
11889 
11890 }
11891 
11892 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11893 {
11894 	struct mii_ioctl_data *data = if_mii(ifr);
11895 	struct tg3 *tp = netdev_priv(dev);
11896 	int err;
11897 
11898 	if (tg3_flag(tp, USE_PHYLIB)) {
11899 		struct phy_device *phydev;
11900 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11901 			return -EAGAIN;
11902 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11903 		return phy_mii_ioctl(phydev, ifr, cmd);
11904 	}
11905 
11906 	switch (cmd) {
11907 	case SIOCGMIIPHY:
11908 		data->phy_id = tp->phy_addr;
11909 
11910 		/* fallthru */
11911 	case SIOCGMIIREG: {
11912 		u32 mii_regval;
11913 
11914 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11915 			break;			/* We have no PHY */
11916 
11917 		if (!netif_running(dev))
11918 			return -EAGAIN;
11919 
11920 		spin_lock_bh(&tp->lock);
11921 		err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11922 		spin_unlock_bh(&tp->lock);
11923 
11924 		data->val_out = mii_regval;
11925 
11926 		return err;
11927 	}
11928 
11929 	case SIOCSMIIREG:
11930 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11931 			break;			/* We have no PHY */
11932 
11933 		if (!netif_running(dev))
11934 			return -EAGAIN;
11935 
11936 		spin_lock_bh(&tp->lock);
11937 		err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11938 		spin_unlock_bh(&tp->lock);
11939 
11940 		return err;
11941 
11942 	default:
11943 		/* do nothing */
11944 		break;
11945 	}
11946 	return -EOPNOTSUPP;
11947 }
11948 
11949 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11950 {
11951 	struct tg3 *tp = netdev_priv(dev);
11952 
11953 	memcpy(ec, &tp->coal, sizeof(*ec));
11954 	return 0;
11955 }
11956 
11957 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11958 {
11959 	struct tg3 *tp = netdev_priv(dev);
11960 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11961 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11962 
11963 	if (!tg3_flag(tp, 5705_PLUS)) {
11964 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11965 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11966 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11967 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11968 	}
11969 
11970 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11971 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11972 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11973 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11974 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11975 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11976 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11977 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11978 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11979 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11980 		return -EINVAL;
11981 
11982 	/* No rx interrupts will be generated if both are zero */
11983 	if ((ec->rx_coalesce_usecs == 0) &&
11984 	    (ec->rx_max_coalesced_frames == 0))
11985 		return -EINVAL;
11986 
11987 	/* No tx interrupts will be generated if both are zero */
11988 	if ((ec->tx_coalesce_usecs == 0) &&
11989 	    (ec->tx_max_coalesced_frames == 0))
11990 		return -EINVAL;
11991 
11992 	/* Only copy relevant parameters, ignore all others. */
11993 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11994 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11995 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11996 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11997 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11998 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11999 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12000 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12001 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12002 
12003 	if (netif_running(dev)) {
12004 		tg3_full_lock(tp, 0);
12005 		__tg3_set_coalesce(tp, &tp->coal);
12006 		tg3_full_unlock(tp);
12007 	}
12008 	return 0;
12009 }
12010 
12011 static const struct ethtool_ops tg3_ethtool_ops = {
12012 	.get_settings		= tg3_get_settings,
12013 	.set_settings		= tg3_set_settings,
12014 	.get_drvinfo		= tg3_get_drvinfo,
12015 	.get_regs_len		= tg3_get_regs_len,
12016 	.get_regs		= tg3_get_regs,
12017 	.get_wol		= tg3_get_wol,
12018 	.set_wol		= tg3_set_wol,
12019 	.get_msglevel		= tg3_get_msglevel,
12020 	.set_msglevel		= tg3_set_msglevel,
12021 	.nway_reset		= tg3_nway_reset,
12022 	.get_link		= ethtool_op_get_link,
12023 	.get_eeprom_len		= tg3_get_eeprom_len,
12024 	.get_eeprom		= tg3_get_eeprom,
12025 	.set_eeprom		= tg3_set_eeprom,
12026 	.get_ringparam		= tg3_get_ringparam,
12027 	.set_ringparam		= tg3_set_ringparam,
12028 	.get_pauseparam		= tg3_get_pauseparam,
12029 	.set_pauseparam		= tg3_set_pauseparam,
12030 	.self_test		= tg3_self_test,
12031 	.get_strings		= tg3_get_strings,
12032 	.set_phys_id		= tg3_set_phys_id,
12033 	.get_ethtool_stats	= tg3_get_ethtool_stats,
12034 	.get_coalesce		= tg3_get_coalesce,
12035 	.set_coalesce		= tg3_set_coalesce,
12036 	.get_sset_count		= tg3_get_sset_count,
12037 	.get_rxnfc		= tg3_get_rxnfc,
12038 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12039 	.get_rxfh_indir		= tg3_get_rxfh_indir,
12040 	.set_rxfh_indir		= tg3_set_rxfh_indir,
12041 };
12042 
12043 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12044 {
12045 	u32 cursize, val, magic;
12046 
12047 	tp->nvram_size = EEPROM_CHIP_SIZE;
12048 
12049 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12050 		return;
12051 
12052 	if ((magic != TG3_EEPROM_MAGIC) &&
12053 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12054 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12055 		return;
12056 
12057 	/*
12058 	 * Size the chip by reading offsets at increasing powers of two.
12059 	 * When we encounter our validation signature, we know the addressing
12060 	 * has wrapped around, and thus have our chip size.
12061 	 */
12062 	cursize = 0x10;
12063 
12064 	while (cursize < tp->nvram_size) {
12065 		if (tg3_nvram_read(tp, cursize, &val) != 0)
12066 			return;
12067 
12068 		if (val == magic)
12069 			break;
12070 
12071 		cursize <<= 1;
12072 	}
12073 
12074 	tp->nvram_size = cursize;
12075 }
12076 
12077 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12078 {
12079 	u32 val;
12080 
12081 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12082 		return;
12083 
12084 	/* Selfboot format */
12085 	if (val != TG3_EEPROM_MAGIC) {
12086 		tg3_get_eeprom_size(tp);
12087 		return;
12088 	}
12089 
12090 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12091 		if (val != 0) {
12092 			/* This is confusing.  We want to operate on the
12093 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12094 			 * call will read from NVRAM and byteswap the data
12095 			 * according to the byteswapping settings for all
12096 			 * other register accesses.  This ensures the data we
12097 			 * want will always reside in the lower 16-bits.
12098 			 * However, the data in NVRAM is in LE format, which
12099 			 * means the data from the NVRAM read will always be
12100 			 * opposite the endianness of the CPU.  The 16-bit
12101 			 * byteswap then brings the data to CPU endianness.
12102 			 */
12103 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12104 			return;
12105 		}
12106 	}
12107 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12108 }
12109 
12110 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12111 {
12112 	u32 nvcfg1;
12113 
12114 	nvcfg1 = tr32(NVRAM_CFG1);
12115 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12116 		tg3_flag_set(tp, FLASH);
12117 	} else {
12118 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12119 		tw32(NVRAM_CFG1, nvcfg1);
12120 	}
12121 
12122 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12123 	    tg3_flag(tp, 5780_CLASS)) {
12124 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12125 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12126 			tp->nvram_jedecnum = JEDEC_ATMEL;
12127 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12128 			tg3_flag_set(tp, NVRAM_BUFFERED);
12129 			break;
12130 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12131 			tp->nvram_jedecnum = JEDEC_ATMEL;
12132 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12133 			break;
12134 		case FLASH_VENDOR_ATMEL_EEPROM:
12135 			tp->nvram_jedecnum = JEDEC_ATMEL;
12136 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12137 			tg3_flag_set(tp, NVRAM_BUFFERED);
12138 			break;
12139 		case FLASH_VENDOR_ST:
12140 			tp->nvram_jedecnum = JEDEC_ST;
12141 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12142 			tg3_flag_set(tp, NVRAM_BUFFERED);
12143 			break;
12144 		case FLASH_VENDOR_SAIFUN:
12145 			tp->nvram_jedecnum = JEDEC_SAIFUN;
12146 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12147 			break;
12148 		case FLASH_VENDOR_SST_SMALL:
12149 		case FLASH_VENDOR_SST_LARGE:
12150 			tp->nvram_jedecnum = JEDEC_SST;
12151 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12152 			break;
12153 		}
12154 	} else {
12155 		tp->nvram_jedecnum = JEDEC_ATMEL;
12156 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12157 		tg3_flag_set(tp, NVRAM_BUFFERED);
12158 	}
12159 }
12160 
12161 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12162 {
12163 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12164 	case FLASH_5752PAGE_SIZE_256:
12165 		tp->nvram_pagesize = 256;
12166 		break;
12167 	case FLASH_5752PAGE_SIZE_512:
12168 		tp->nvram_pagesize = 512;
12169 		break;
12170 	case FLASH_5752PAGE_SIZE_1K:
12171 		tp->nvram_pagesize = 1024;
12172 		break;
12173 	case FLASH_5752PAGE_SIZE_2K:
12174 		tp->nvram_pagesize = 2048;
12175 		break;
12176 	case FLASH_5752PAGE_SIZE_4K:
12177 		tp->nvram_pagesize = 4096;
12178 		break;
12179 	case FLASH_5752PAGE_SIZE_264:
12180 		tp->nvram_pagesize = 264;
12181 		break;
12182 	case FLASH_5752PAGE_SIZE_528:
12183 		tp->nvram_pagesize = 528;
12184 		break;
12185 	}
12186 }
12187 
12188 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12189 {
12190 	u32 nvcfg1;
12191 
12192 	nvcfg1 = tr32(NVRAM_CFG1);
12193 
12194 	/* NVRAM protection for TPM */
12195 	if (nvcfg1 & (1 << 27))
12196 		tg3_flag_set(tp, PROTECTED_NVRAM);
12197 
12198 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12199 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12200 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12201 		tp->nvram_jedecnum = JEDEC_ATMEL;
12202 		tg3_flag_set(tp, NVRAM_BUFFERED);
12203 		break;
12204 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12205 		tp->nvram_jedecnum = JEDEC_ATMEL;
12206 		tg3_flag_set(tp, NVRAM_BUFFERED);
12207 		tg3_flag_set(tp, FLASH);
12208 		break;
12209 	case FLASH_5752VENDOR_ST_M45PE10:
12210 	case FLASH_5752VENDOR_ST_M45PE20:
12211 	case FLASH_5752VENDOR_ST_M45PE40:
12212 		tp->nvram_jedecnum = JEDEC_ST;
12213 		tg3_flag_set(tp, NVRAM_BUFFERED);
12214 		tg3_flag_set(tp, FLASH);
12215 		break;
12216 	}
12217 
12218 	if (tg3_flag(tp, FLASH)) {
12219 		tg3_nvram_get_pagesize(tp, nvcfg1);
12220 	} else {
12221 		/* For eeprom, set pagesize to maximum eeprom size */
12222 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12223 
12224 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12225 		tw32(NVRAM_CFG1, nvcfg1);
12226 	}
12227 }
12228 
12229 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12230 {
12231 	u32 nvcfg1, protect = 0;
12232 
12233 	nvcfg1 = tr32(NVRAM_CFG1);
12234 
12235 	/* NVRAM protection for TPM */
12236 	if (nvcfg1 & (1 << 27)) {
12237 		tg3_flag_set(tp, PROTECTED_NVRAM);
12238 		protect = 1;
12239 	}
12240 
12241 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12242 	switch (nvcfg1) {
12243 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
12244 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
12245 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
12246 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
12247 		tp->nvram_jedecnum = JEDEC_ATMEL;
12248 		tg3_flag_set(tp, NVRAM_BUFFERED);
12249 		tg3_flag_set(tp, FLASH);
12250 		tp->nvram_pagesize = 264;
12251 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12252 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12253 			tp->nvram_size = (protect ? 0x3e200 :
12254 					  TG3_NVRAM_SIZE_512KB);
12255 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12256 			tp->nvram_size = (protect ? 0x1f200 :
12257 					  TG3_NVRAM_SIZE_256KB);
12258 		else
12259 			tp->nvram_size = (protect ? 0x1f200 :
12260 					  TG3_NVRAM_SIZE_128KB);
12261 		break;
12262 	case FLASH_5752VENDOR_ST_M45PE10:
12263 	case FLASH_5752VENDOR_ST_M45PE20:
12264 	case FLASH_5752VENDOR_ST_M45PE40:
12265 		tp->nvram_jedecnum = JEDEC_ST;
12266 		tg3_flag_set(tp, NVRAM_BUFFERED);
12267 		tg3_flag_set(tp, FLASH);
12268 		tp->nvram_pagesize = 256;
12269 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12270 			tp->nvram_size = (protect ?
12271 					  TG3_NVRAM_SIZE_64KB :
12272 					  TG3_NVRAM_SIZE_128KB);
12273 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12274 			tp->nvram_size = (protect ?
12275 					  TG3_NVRAM_SIZE_64KB :
12276 					  TG3_NVRAM_SIZE_256KB);
12277 		else
12278 			tp->nvram_size = (protect ?
12279 					  TG3_NVRAM_SIZE_128KB :
12280 					  TG3_NVRAM_SIZE_512KB);
12281 		break;
12282 	}
12283 }
12284 
12285 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12286 {
12287 	u32 nvcfg1;
12288 
12289 	nvcfg1 = tr32(NVRAM_CFG1);
12290 
12291 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12292 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12293 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12294 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12295 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12296 		tp->nvram_jedecnum = JEDEC_ATMEL;
12297 		tg3_flag_set(tp, NVRAM_BUFFERED);
12298 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12299 
12300 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12301 		tw32(NVRAM_CFG1, nvcfg1);
12302 		break;
12303 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12304 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
12305 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
12306 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
12307 		tp->nvram_jedecnum = JEDEC_ATMEL;
12308 		tg3_flag_set(tp, NVRAM_BUFFERED);
12309 		tg3_flag_set(tp, FLASH);
12310 		tp->nvram_pagesize = 264;
12311 		break;
12312 	case FLASH_5752VENDOR_ST_M45PE10:
12313 	case FLASH_5752VENDOR_ST_M45PE20:
12314 	case FLASH_5752VENDOR_ST_M45PE40:
12315 		tp->nvram_jedecnum = JEDEC_ST;
12316 		tg3_flag_set(tp, NVRAM_BUFFERED);
12317 		tg3_flag_set(tp, FLASH);
12318 		tp->nvram_pagesize = 256;
12319 		break;
12320 	}
12321 }
12322 
12323 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12324 {
12325 	u32 nvcfg1, protect = 0;
12326 
12327 	nvcfg1 = tr32(NVRAM_CFG1);
12328 
12329 	/* NVRAM protection for TPM */
12330 	if (nvcfg1 & (1 << 27)) {
12331 		tg3_flag_set(tp, PROTECTED_NVRAM);
12332 		protect = 1;
12333 	}
12334 
12335 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12336 	switch (nvcfg1) {
12337 	case FLASH_5761VENDOR_ATMEL_ADB021D:
12338 	case FLASH_5761VENDOR_ATMEL_ADB041D:
12339 	case FLASH_5761VENDOR_ATMEL_ADB081D:
12340 	case FLASH_5761VENDOR_ATMEL_ADB161D:
12341 	case FLASH_5761VENDOR_ATMEL_MDB021D:
12342 	case FLASH_5761VENDOR_ATMEL_MDB041D:
12343 	case FLASH_5761VENDOR_ATMEL_MDB081D:
12344 	case FLASH_5761VENDOR_ATMEL_MDB161D:
12345 		tp->nvram_jedecnum = JEDEC_ATMEL;
12346 		tg3_flag_set(tp, NVRAM_BUFFERED);
12347 		tg3_flag_set(tp, FLASH);
12348 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12349 		tp->nvram_pagesize = 256;
12350 		break;
12351 	case FLASH_5761VENDOR_ST_A_M45PE20:
12352 	case FLASH_5761VENDOR_ST_A_M45PE40:
12353 	case FLASH_5761VENDOR_ST_A_M45PE80:
12354 	case FLASH_5761VENDOR_ST_A_M45PE16:
12355 	case FLASH_5761VENDOR_ST_M_M45PE20:
12356 	case FLASH_5761VENDOR_ST_M_M45PE40:
12357 	case FLASH_5761VENDOR_ST_M_M45PE80:
12358 	case FLASH_5761VENDOR_ST_M_M45PE16:
12359 		tp->nvram_jedecnum = JEDEC_ST;
12360 		tg3_flag_set(tp, NVRAM_BUFFERED);
12361 		tg3_flag_set(tp, FLASH);
12362 		tp->nvram_pagesize = 256;
12363 		break;
12364 	}
12365 
12366 	if (protect) {
12367 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12368 	} else {
12369 		switch (nvcfg1) {
12370 		case FLASH_5761VENDOR_ATMEL_ADB161D:
12371 		case FLASH_5761VENDOR_ATMEL_MDB161D:
12372 		case FLASH_5761VENDOR_ST_A_M45PE16:
12373 		case FLASH_5761VENDOR_ST_M_M45PE16:
12374 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12375 			break;
12376 		case FLASH_5761VENDOR_ATMEL_ADB081D:
12377 		case FLASH_5761VENDOR_ATMEL_MDB081D:
12378 		case FLASH_5761VENDOR_ST_A_M45PE80:
12379 		case FLASH_5761VENDOR_ST_M_M45PE80:
12380 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12381 			break;
12382 		case FLASH_5761VENDOR_ATMEL_ADB041D:
12383 		case FLASH_5761VENDOR_ATMEL_MDB041D:
12384 		case FLASH_5761VENDOR_ST_A_M45PE40:
12385 		case FLASH_5761VENDOR_ST_M_M45PE40:
12386 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12387 			break;
12388 		case FLASH_5761VENDOR_ATMEL_ADB021D:
12389 		case FLASH_5761VENDOR_ATMEL_MDB021D:
12390 		case FLASH_5761VENDOR_ST_A_M45PE20:
12391 		case FLASH_5761VENDOR_ST_M_M45PE20:
12392 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12393 			break;
12394 		}
12395 	}
12396 }
12397 
12398 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12399 {
12400 	tp->nvram_jedecnum = JEDEC_ATMEL;
12401 	tg3_flag_set(tp, NVRAM_BUFFERED);
12402 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12403 }
12404 
12405 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12406 {
12407 	u32 nvcfg1;
12408 
12409 	nvcfg1 = tr32(NVRAM_CFG1);
12410 
12411 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12412 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12413 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12414 		tp->nvram_jedecnum = JEDEC_ATMEL;
12415 		tg3_flag_set(tp, NVRAM_BUFFERED);
12416 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12417 
12418 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12419 		tw32(NVRAM_CFG1, nvcfg1);
12420 		return;
12421 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12422 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12423 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12424 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12425 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12426 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12427 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12428 		tp->nvram_jedecnum = JEDEC_ATMEL;
12429 		tg3_flag_set(tp, NVRAM_BUFFERED);
12430 		tg3_flag_set(tp, FLASH);
12431 
12432 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12433 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12434 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12435 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12436 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12437 			break;
12438 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12439 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12440 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12441 			break;
12442 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12443 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12444 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12445 			break;
12446 		}
12447 		break;
12448 	case FLASH_5752VENDOR_ST_M45PE10:
12449 	case FLASH_5752VENDOR_ST_M45PE20:
12450 	case FLASH_5752VENDOR_ST_M45PE40:
12451 		tp->nvram_jedecnum = JEDEC_ST;
12452 		tg3_flag_set(tp, NVRAM_BUFFERED);
12453 		tg3_flag_set(tp, FLASH);
12454 
12455 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12456 		case FLASH_5752VENDOR_ST_M45PE10:
12457 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12458 			break;
12459 		case FLASH_5752VENDOR_ST_M45PE20:
12460 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12461 			break;
12462 		case FLASH_5752VENDOR_ST_M45PE40:
12463 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12464 			break;
12465 		}
12466 		break;
12467 	default:
12468 		tg3_flag_set(tp, NO_NVRAM);
12469 		return;
12470 	}
12471 
12472 	tg3_nvram_get_pagesize(tp, nvcfg1);
12473 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12474 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12475 }
12476 
12477 
12478 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12479 {
12480 	u32 nvcfg1;
12481 
12482 	nvcfg1 = tr32(NVRAM_CFG1);
12483 
12484 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12485 	case FLASH_5717VENDOR_ATMEL_EEPROM:
12486 	case FLASH_5717VENDOR_MICRO_EEPROM:
12487 		tp->nvram_jedecnum = JEDEC_ATMEL;
12488 		tg3_flag_set(tp, NVRAM_BUFFERED);
12489 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12490 
12491 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12492 		tw32(NVRAM_CFG1, nvcfg1);
12493 		return;
12494 	case FLASH_5717VENDOR_ATMEL_MDB011D:
12495 	case FLASH_5717VENDOR_ATMEL_ADB011B:
12496 	case FLASH_5717VENDOR_ATMEL_ADB011D:
12497 	case FLASH_5717VENDOR_ATMEL_MDB021D:
12498 	case FLASH_5717VENDOR_ATMEL_ADB021B:
12499 	case FLASH_5717VENDOR_ATMEL_ADB021D:
12500 	case FLASH_5717VENDOR_ATMEL_45USPT:
12501 		tp->nvram_jedecnum = JEDEC_ATMEL;
12502 		tg3_flag_set(tp, NVRAM_BUFFERED);
12503 		tg3_flag_set(tp, FLASH);
12504 
12505 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12506 		case FLASH_5717VENDOR_ATMEL_MDB021D:
12507 			/* Detect size with tg3_nvram_get_size() */
12508 			break;
12509 		case FLASH_5717VENDOR_ATMEL_ADB021B:
12510 		case FLASH_5717VENDOR_ATMEL_ADB021D:
12511 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12512 			break;
12513 		default:
12514 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12515 			break;
12516 		}
12517 		break;
12518 	case FLASH_5717VENDOR_ST_M_M25PE10:
12519 	case FLASH_5717VENDOR_ST_A_M25PE10:
12520 	case FLASH_5717VENDOR_ST_M_M45PE10:
12521 	case FLASH_5717VENDOR_ST_A_M45PE10:
12522 	case FLASH_5717VENDOR_ST_M_M25PE20:
12523 	case FLASH_5717VENDOR_ST_A_M25PE20:
12524 	case FLASH_5717VENDOR_ST_M_M45PE20:
12525 	case FLASH_5717VENDOR_ST_A_M45PE20:
12526 	case FLASH_5717VENDOR_ST_25USPT:
12527 	case FLASH_5717VENDOR_ST_45USPT:
12528 		tp->nvram_jedecnum = JEDEC_ST;
12529 		tg3_flag_set(tp, NVRAM_BUFFERED);
12530 		tg3_flag_set(tp, FLASH);
12531 
12532 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12533 		case FLASH_5717VENDOR_ST_M_M25PE20:
12534 		case FLASH_5717VENDOR_ST_M_M45PE20:
12535 			/* Detect size with tg3_nvram_get_size() */
12536 			break;
12537 		case FLASH_5717VENDOR_ST_A_M25PE20:
12538 		case FLASH_5717VENDOR_ST_A_M45PE20:
12539 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12540 			break;
12541 		default:
12542 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12543 			break;
12544 		}
12545 		break;
12546 	default:
12547 		tg3_flag_set(tp, NO_NVRAM);
12548 		return;
12549 	}
12550 
12551 	tg3_nvram_get_pagesize(tp, nvcfg1);
12552 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12553 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12554 }
12555 
12556 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12557 {
12558 	u32 nvcfg1, nvmpinstrp;
12559 
12560 	nvcfg1 = tr32(NVRAM_CFG1);
12561 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12562 
12563 	switch (nvmpinstrp) {
12564 	case FLASH_5720_EEPROM_HD:
12565 	case FLASH_5720_EEPROM_LD:
12566 		tp->nvram_jedecnum = JEDEC_ATMEL;
12567 		tg3_flag_set(tp, NVRAM_BUFFERED);
12568 
12569 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12570 		tw32(NVRAM_CFG1, nvcfg1);
12571 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12572 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12573 		else
12574 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12575 		return;
12576 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
12577 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
12578 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
12579 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
12580 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
12581 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
12582 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
12583 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
12584 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
12585 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
12586 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
12587 	case FLASH_5720VENDOR_ATMEL_45USPT:
12588 		tp->nvram_jedecnum = JEDEC_ATMEL;
12589 		tg3_flag_set(tp, NVRAM_BUFFERED);
12590 		tg3_flag_set(tp, FLASH);
12591 
12592 		switch (nvmpinstrp) {
12593 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
12594 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
12595 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
12596 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12597 			break;
12598 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
12599 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
12600 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
12601 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12602 			break;
12603 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
12604 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
12605 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12606 			break;
12607 		default:
12608 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12609 			break;
12610 		}
12611 		break;
12612 	case FLASH_5720VENDOR_M_ST_M25PE10:
12613 	case FLASH_5720VENDOR_M_ST_M45PE10:
12614 	case FLASH_5720VENDOR_A_ST_M25PE10:
12615 	case FLASH_5720VENDOR_A_ST_M45PE10:
12616 	case FLASH_5720VENDOR_M_ST_M25PE20:
12617 	case FLASH_5720VENDOR_M_ST_M45PE20:
12618 	case FLASH_5720VENDOR_A_ST_M25PE20:
12619 	case FLASH_5720VENDOR_A_ST_M45PE20:
12620 	case FLASH_5720VENDOR_M_ST_M25PE40:
12621 	case FLASH_5720VENDOR_M_ST_M45PE40:
12622 	case FLASH_5720VENDOR_A_ST_M25PE40:
12623 	case FLASH_5720VENDOR_A_ST_M45PE40:
12624 	case FLASH_5720VENDOR_M_ST_M25PE80:
12625 	case FLASH_5720VENDOR_M_ST_M45PE80:
12626 	case FLASH_5720VENDOR_A_ST_M25PE80:
12627 	case FLASH_5720VENDOR_A_ST_M45PE80:
12628 	case FLASH_5720VENDOR_ST_25USPT:
12629 	case FLASH_5720VENDOR_ST_45USPT:
12630 		tp->nvram_jedecnum = JEDEC_ST;
12631 		tg3_flag_set(tp, NVRAM_BUFFERED);
12632 		tg3_flag_set(tp, FLASH);
12633 
12634 		switch (nvmpinstrp) {
12635 		case FLASH_5720VENDOR_M_ST_M25PE20:
12636 		case FLASH_5720VENDOR_M_ST_M45PE20:
12637 		case FLASH_5720VENDOR_A_ST_M25PE20:
12638 		case FLASH_5720VENDOR_A_ST_M45PE20:
12639 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12640 			break;
12641 		case FLASH_5720VENDOR_M_ST_M25PE40:
12642 		case FLASH_5720VENDOR_M_ST_M45PE40:
12643 		case FLASH_5720VENDOR_A_ST_M25PE40:
12644 		case FLASH_5720VENDOR_A_ST_M45PE40:
12645 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12646 			break;
12647 		case FLASH_5720VENDOR_M_ST_M25PE80:
12648 		case FLASH_5720VENDOR_M_ST_M45PE80:
12649 		case FLASH_5720VENDOR_A_ST_M25PE80:
12650 		case FLASH_5720VENDOR_A_ST_M45PE80:
12651 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12652 			break;
12653 		default:
12654 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12655 			break;
12656 		}
12657 		break;
12658 	default:
12659 		tg3_flag_set(tp, NO_NVRAM);
12660 		return;
12661 	}
12662 
12663 	tg3_nvram_get_pagesize(tp, nvcfg1);
12664 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12665 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12666 }
12667 
12668 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12669 static void __devinit tg3_nvram_init(struct tg3 *tp)
12670 {
12671 	tw32_f(GRC_EEPROM_ADDR,
12672 	     (EEPROM_ADDR_FSM_RESET |
12673 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
12674 	       EEPROM_ADDR_CLKPERD_SHIFT)));
12675 
12676 	msleep(1);
12677 
12678 	/* Enable seeprom accesses. */
12679 	tw32_f(GRC_LOCAL_CTRL,
12680 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12681 	udelay(100);
12682 
12683 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12684 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12685 		tg3_flag_set(tp, NVRAM);
12686 
12687 		if (tg3_nvram_lock(tp)) {
12688 			netdev_warn(tp->dev,
12689 				    "Cannot get nvram lock, %s failed\n",
12690 				    __func__);
12691 			return;
12692 		}
12693 		tg3_enable_nvram_access(tp);
12694 
12695 		tp->nvram_size = 0;
12696 
12697 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12698 			tg3_get_5752_nvram_info(tp);
12699 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12700 			tg3_get_5755_nvram_info(tp);
12701 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12702 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12703 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12704 			tg3_get_5787_nvram_info(tp);
12705 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12706 			tg3_get_5761_nvram_info(tp);
12707 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12708 			tg3_get_5906_nvram_info(tp);
12709 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12710 			 tg3_flag(tp, 57765_CLASS))
12711 			tg3_get_57780_nvram_info(tp);
12712 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12713 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12714 			tg3_get_5717_nvram_info(tp);
12715 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12716 			tg3_get_5720_nvram_info(tp);
12717 		else
12718 			tg3_get_nvram_info(tp);
12719 
12720 		if (tp->nvram_size == 0)
12721 			tg3_get_nvram_size(tp);
12722 
12723 		tg3_disable_nvram_access(tp);
12724 		tg3_nvram_unlock(tp);
12725 
12726 	} else {
12727 		tg3_flag_clear(tp, NVRAM);
12728 		tg3_flag_clear(tp, NVRAM_BUFFERED);
12729 
12730 		tg3_get_eeprom_size(tp);
12731 	}
12732 }
12733 
12734 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12735 				    u32 offset, u32 len, u8 *buf)
12736 {
12737 	int i, j, rc = 0;
12738 	u32 val;
12739 
12740 	for (i = 0; i < len; i += 4) {
12741 		u32 addr;
12742 		__be32 data;
12743 
12744 		addr = offset + i;
12745 
12746 		memcpy(&data, buf + i, 4);
12747 
12748 		/*
12749 		 * The SEEPROM interface expects the data to always be opposite
12750 		 * the native endian format.  We accomplish this by reversing
12751 		 * all the operations that would have been performed on the
12752 		 * data from a call to tg3_nvram_read_be32().
12753 		 */
12754 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12755 
12756 		val = tr32(GRC_EEPROM_ADDR);
12757 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12758 
12759 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12760 			EEPROM_ADDR_READ);
12761 		tw32(GRC_EEPROM_ADDR, val |
12762 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
12763 			(addr & EEPROM_ADDR_ADDR_MASK) |
12764 			EEPROM_ADDR_START |
12765 			EEPROM_ADDR_WRITE);
12766 
12767 		for (j = 0; j < 1000; j++) {
12768 			val = tr32(GRC_EEPROM_ADDR);
12769 
12770 			if (val & EEPROM_ADDR_COMPLETE)
12771 				break;
12772 			msleep(1);
12773 		}
12774 		if (!(val & EEPROM_ADDR_COMPLETE)) {
12775 			rc = -EBUSY;
12776 			break;
12777 		}
12778 	}
12779 
12780 	return rc;
12781 }
12782 
12783 /* offset and length are dword aligned */
12784 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12785 		u8 *buf)
12786 {
12787 	int ret = 0;
12788 	u32 pagesize = tp->nvram_pagesize;
12789 	u32 pagemask = pagesize - 1;
12790 	u32 nvram_cmd;
12791 	u8 *tmp;
12792 
12793 	tmp = kmalloc(pagesize, GFP_KERNEL);
12794 	if (tmp == NULL)
12795 		return -ENOMEM;
12796 
12797 	while (len) {
12798 		int j;
12799 		u32 phy_addr, page_off, size;
12800 
12801 		phy_addr = offset & ~pagemask;
12802 
12803 		for (j = 0; j < pagesize; j += 4) {
12804 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
12805 						  (__be32 *) (tmp + j));
12806 			if (ret)
12807 				break;
12808 		}
12809 		if (ret)
12810 			break;
12811 
12812 		page_off = offset & pagemask;
12813 		size = pagesize;
12814 		if (len < size)
12815 			size = len;
12816 
12817 		len -= size;
12818 
12819 		memcpy(tmp + page_off, buf, size);
12820 
12821 		offset = offset + (pagesize - page_off);
12822 
12823 		tg3_enable_nvram_access(tp);
12824 
12825 		/*
12826 		 * Before we can erase the flash page, we need
12827 		 * to issue a special "write enable" command.
12828 		 */
12829 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12830 
12831 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12832 			break;
12833 
12834 		/* Erase the target page */
12835 		tw32(NVRAM_ADDR, phy_addr);
12836 
12837 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12838 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12839 
12840 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12841 			break;
12842 
12843 		/* Issue another write enable to start the write. */
12844 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12845 
12846 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12847 			break;
12848 
12849 		for (j = 0; j < pagesize; j += 4) {
12850 			__be32 data;
12851 
12852 			data = *((__be32 *) (tmp + j));
12853 
12854 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
12855 
12856 			tw32(NVRAM_ADDR, phy_addr + j);
12857 
12858 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12859 				NVRAM_CMD_WR;
12860 
12861 			if (j == 0)
12862 				nvram_cmd |= NVRAM_CMD_FIRST;
12863 			else if (j == (pagesize - 4))
12864 				nvram_cmd |= NVRAM_CMD_LAST;
12865 
12866 			if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12867 				break;
12868 		}
12869 		if (ret)
12870 			break;
12871 	}
12872 
12873 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12874 	tg3_nvram_exec_cmd(tp, nvram_cmd);
12875 
12876 	kfree(tmp);
12877 
12878 	return ret;
12879 }
12880 
12881 /* offset and length are dword aligned */
12882 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12883 		u8 *buf)
12884 {
12885 	int i, ret = 0;
12886 
12887 	for (i = 0; i < len; i += 4, offset += 4) {
12888 		u32 page_off, phy_addr, nvram_cmd;
12889 		__be32 data;
12890 
12891 		memcpy(&data, buf + i, 4);
12892 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
12893 
12894 		page_off = offset % tp->nvram_pagesize;
12895 
12896 		phy_addr = tg3_nvram_phys_addr(tp, offset);
12897 
12898 		tw32(NVRAM_ADDR, phy_addr);
12899 
12900 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12901 
12902 		if (page_off == 0 || i == 0)
12903 			nvram_cmd |= NVRAM_CMD_FIRST;
12904 		if (page_off == (tp->nvram_pagesize - 4))
12905 			nvram_cmd |= NVRAM_CMD_LAST;
12906 
12907 		if (i == (len - 4))
12908 			nvram_cmd |= NVRAM_CMD_LAST;
12909 
12910 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12911 		    !tg3_flag(tp, 5755_PLUS) &&
12912 		    (tp->nvram_jedecnum == JEDEC_ST) &&
12913 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
12914 
12915 			if ((ret = tg3_nvram_exec_cmd(tp,
12916 				NVRAM_CMD_WREN | NVRAM_CMD_GO |
12917 				NVRAM_CMD_DONE)))
12918 
12919 				break;
12920 		}
12921 		if (!tg3_flag(tp, FLASH)) {
12922 			/* We always do complete word writes to eeprom. */
12923 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12924 		}
12925 
12926 		if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12927 			break;
12928 	}
12929 	return ret;
12930 }
12931 
12932 /* offset and length are dword aligned */
12933 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12934 {
12935 	int ret;
12936 
12937 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12938 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12939 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
12940 		udelay(40);
12941 	}
12942 
12943 	if (!tg3_flag(tp, NVRAM)) {
12944 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12945 	} else {
12946 		u32 grc_mode;
12947 
12948 		ret = tg3_nvram_lock(tp);
12949 		if (ret)
12950 			return ret;
12951 
12952 		tg3_enable_nvram_access(tp);
12953 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12954 			tw32(NVRAM_WRITE1, 0x406);
12955 
12956 		grc_mode = tr32(GRC_MODE);
12957 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12958 
12959 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12960 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
12961 				buf);
12962 		} else {
12963 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12964 				buf);
12965 		}
12966 
12967 		grc_mode = tr32(GRC_MODE);
12968 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12969 
12970 		tg3_disable_nvram_access(tp);
12971 		tg3_nvram_unlock(tp);
12972 	}
12973 
12974 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12975 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12976 		udelay(40);
12977 	}
12978 
12979 	return ret;
12980 }
12981 
12982 struct subsys_tbl_ent {
12983 	u16 subsys_vendor, subsys_devid;
12984 	u32 phy_id;
12985 };
12986 
12987 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12988 	/* Broadcom boards. */
12989 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12990 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12991 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12992 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12993 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12994 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12995 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12996 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12997 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12998 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12999 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13000 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13001 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13002 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13003 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13004 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13005 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13006 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13007 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13008 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13009 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13010 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13011 
13012 	/* 3com boards. */
13013 	{ TG3PCI_SUBVENDOR_ID_3COM,
13014 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13015 	{ TG3PCI_SUBVENDOR_ID_3COM,
13016 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13017 	{ TG3PCI_SUBVENDOR_ID_3COM,
13018 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13019 	{ TG3PCI_SUBVENDOR_ID_3COM,
13020 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13021 	{ TG3PCI_SUBVENDOR_ID_3COM,
13022 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13023 
13024 	/* DELL boards. */
13025 	{ TG3PCI_SUBVENDOR_ID_DELL,
13026 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13027 	{ TG3PCI_SUBVENDOR_ID_DELL,
13028 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13029 	{ TG3PCI_SUBVENDOR_ID_DELL,
13030 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13031 	{ TG3PCI_SUBVENDOR_ID_DELL,
13032 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13033 
13034 	/* Compaq boards. */
13035 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13036 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13037 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13038 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13039 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13040 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13041 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13042 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13043 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13044 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13045 
13046 	/* IBM boards. */
13047 	{ TG3PCI_SUBVENDOR_ID_IBM,
13048 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13049 };
13050 
13051 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13052 {
13053 	int i;
13054 
13055 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13056 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
13057 		     tp->pdev->subsystem_vendor) &&
13058 		    (subsys_id_to_phy_id[i].subsys_devid ==
13059 		     tp->pdev->subsystem_device))
13060 			return &subsys_id_to_phy_id[i];
13061 	}
13062 	return NULL;
13063 }
13064 
13065 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13066 {
13067 	u32 val;
13068 
13069 	tp->phy_id = TG3_PHY_ID_INVALID;
13070 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13071 
13072 	/* Assume an onboard device and WOL capable by default.  */
13073 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
13074 	tg3_flag_set(tp, WOL_CAP);
13075 
13076 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13077 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13078 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13079 			tg3_flag_set(tp, IS_NIC);
13080 		}
13081 		val = tr32(VCPU_CFGSHDW);
13082 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
13083 			tg3_flag_set(tp, ASPM_WORKAROUND);
13084 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13085 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13086 			tg3_flag_set(tp, WOL_ENABLE);
13087 			device_set_wakeup_enable(&tp->pdev->dev, true);
13088 		}
13089 		goto done;
13090 	}
13091 
13092 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13093 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13094 		u32 nic_cfg, led_cfg;
13095 		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13096 		int eeprom_phy_serdes = 0;
13097 
13098 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13099 		tp->nic_sram_data_cfg = nic_cfg;
13100 
13101 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13102 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
13103 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13104 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13105 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13106 		    (ver > 0) && (ver < 0x100))
13107 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13108 
13109 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13110 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13111 
13112 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13113 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13114 			eeprom_phy_serdes = 1;
13115 
13116 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13117 		if (nic_phy_id != 0) {
13118 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13119 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13120 
13121 			eeprom_phy_id  = (id1 >> 16) << 10;
13122 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
13123 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13124 		} else
13125 			eeprom_phy_id = 0;
13126 
13127 		tp->phy_id = eeprom_phy_id;
13128 		if (eeprom_phy_serdes) {
13129 			if (!tg3_flag(tp, 5705_PLUS))
13130 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13131 			else
13132 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13133 		}
13134 
13135 		if (tg3_flag(tp, 5750_PLUS))
13136 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13137 				    SHASTA_EXT_LED_MODE_MASK);
13138 		else
13139 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13140 
13141 		switch (led_cfg) {
13142 		default:
13143 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13144 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13145 			break;
13146 
13147 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13148 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13149 			break;
13150 
13151 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13152 			tp->led_ctrl = LED_CTRL_MODE_MAC;
13153 
13154 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
13155 			 * read on some older 5700/5701 bootcode.
13156 			 */
13157 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13158 			    ASIC_REV_5700 ||
13159 			    GET_ASIC_REV(tp->pci_chip_rev_id) ==
13160 			    ASIC_REV_5701)
13161 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13162 
13163 			break;
13164 
13165 		case SHASTA_EXT_LED_SHARED:
13166 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
13167 			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13168 			    tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13169 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13170 						 LED_CTRL_MODE_PHY_2);
13171 			break;
13172 
13173 		case SHASTA_EXT_LED_MAC:
13174 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13175 			break;
13176 
13177 		case SHASTA_EXT_LED_COMBO:
13178 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
13179 			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13180 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13181 						 LED_CTRL_MODE_PHY_2);
13182 			break;
13183 
13184 		}
13185 
13186 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13187 		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13188 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13189 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13190 
13191 		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13192 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13193 
13194 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13195 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
13196 			if ((tp->pdev->subsystem_vendor ==
13197 			     PCI_VENDOR_ID_ARIMA) &&
13198 			    (tp->pdev->subsystem_device == 0x205a ||
13199 			     tp->pdev->subsystem_device == 0x2063))
13200 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13201 		} else {
13202 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13203 			tg3_flag_set(tp, IS_NIC);
13204 		}
13205 
13206 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13207 			tg3_flag_set(tp, ENABLE_ASF);
13208 			if (tg3_flag(tp, 5750_PLUS))
13209 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13210 		}
13211 
13212 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13213 		    tg3_flag(tp, 5750_PLUS))
13214 			tg3_flag_set(tp, ENABLE_APE);
13215 
13216 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13217 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13218 			tg3_flag_clear(tp, WOL_CAP);
13219 
13220 		if (tg3_flag(tp, WOL_CAP) &&
13221 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13222 			tg3_flag_set(tp, WOL_ENABLE);
13223 			device_set_wakeup_enable(&tp->pdev->dev, true);
13224 		}
13225 
13226 		if (cfg2 & (1 << 17))
13227 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13228 
13229 		/* serdes signal pre-emphasis in register 0x590 set by */
13230 		/* bootcode if bit 18 is set */
13231 		if (cfg2 & (1 << 18))
13232 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13233 
13234 		if ((tg3_flag(tp, 57765_PLUS) ||
13235 		     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13236 		      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13237 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13238 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13239 
13240 		if (tg3_flag(tp, PCI_EXPRESS) &&
13241 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13242 		    !tg3_flag(tp, 57765_PLUS)) {
13243 			u32 cfg3;
13244 
13245 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13246 			if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13247 				tg3_flag_set(tp, ASPM_WORKAROUND);
13248 		}
13249 
13250 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13251 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13252 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13253 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13254 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13255 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13256 	}
13257 done:
13258 	if (tg3_flag(tp, WOL_CAP))
13259 		device_set_wakeup_enable(&tp->pdev->dev,
13260 					 tg3_flag(tp, WOL_ENABLE));
13261 	else
13262 		device_set_wakeup_capable(&tp->pdev->dev, false);
13263 }
13264 
13265 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13266 {
13267 	int i;
13268 	u32 val;
13269 
13270 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13271 	tw32(OTP_CTRL, cmd);
13272 
13273 	/* Wait for up to 1 ms for command to execute. */
13274 	for (i = 0; i < 100; i++) {
13275 		val = tr32(OTP_STATUS);
13276 		if (val & OTP_STATUS_CMD_DONE)
13277 			break;
13278 		udelay(10);
13279 	}
13280 
13281 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13282 }
13283 
13284 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13285  * configuration is a 32-bit value that straddles the alignment boundary.
13286  * We do two 32-bit reads and then shift and merge the results.
13287  */
13288 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13289 {
13290 	u32 bhalf_otp, thalf_otp;
13291 
13292 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13293 
13294 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13295 		return 0;
13296 
13297 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13298 
13299 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13300 		return 0;
13301 
13302 	thalf_otp = tr32(OTP_READ_DATA);
13303 
13304 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13305 
13306 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13307 		return 0;
13308 
13309 	bhalf_otp = tr32(OTP_READ_DATA);
13310 
13311 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13312 }
13313 
13314 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13315 {
13316 	u32 adv = ADVERTISED_Autoneg;
13317 
13318 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13319 		adv |= ADVERTISED_1000baseT_Half |
13320 		       ADVERTISED_1000baseT_Full;
13321 
13322 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13323 		adv |= ADVERTISED_100baseT_Half |
13324 		       ADVERTISED_100baseT_Full |
13325 		       ADVERTISED_10baseT_Half |
13326 		       ADVERTISED_10baseT_Full |
13327 		       ADVERTISED_TP;
13328 	else
13329 		adv |= ADVERTISED_FIBRE;
13330 
13331 	tp->link_config.advertising = adv;
13332 	tp->link_config.speed = SPEED_INVALID;
13333 	tp->link_config.duplex = DUPLEX_INVALID;
13334 	tp->link_config.autoneg = AUTONEG_ENABLE;
13335 	tp->link_config.active_speed = SPEED_INVALID;
13336 	tp->link_config.active_duplex = DUPLEX_INVALID;
13337 	tp->link_config.orig_speed = SPEED_INVALID;
13338 	tp->link_config.orig_duplex = DUPLEX_INVALID;
13339 	tp->link_config.orig_autoneg = AUTONEG_INVALID;
13340 }
13341 
13342 static int __devinit tg3_phy_probe(struct tg3 *tp)
13343 {
13344 	u32 hw_phy_id_1, hw_phy_id_2;
13345 	u32 hw_phy_id, hw_phy_id_masked;
13346 	int err;
13347 
13348 	/* flow control autonegotiation is default behavior */
13349 	tg3_flag_set(tp, PAUSE_AUTONEG);
13350 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13351 
13352 	if (tg3_flag(tp, USE_PHYLIB))
13353 		return tg3_phy_init(tp);
13354 
13355 	/* Reading the PHY ID register can conflict with ASF
13356 	 * firmware access to the PHY hardware.
13357 	 */
13358 	err = 0;
13359 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13360 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13361 	} else {
13362 		/* Now read the physical PHY_ID from the chip and verify
13363 		 * that it is sane.  If it doesn't look good, we fall back
13364 		 * to either the hard-coded table based PHY_ID and failing
13365 		 * that the value found in the eeprom area.
13366 		 */
13367 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13368 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13369 
13370 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13371 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13372 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13373 
13374 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13375 	}
13376 
13377 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13378 		tp->phy_id = hw_phy_id;
13379 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13380 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13381 		else
13382 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13383 	} else {
13384 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
13385 			/* Do nothing, phy ID already set up in
13386 			 * tg3_get_eeprom_hw_cfg().
13387 			 */
13388 		} else {
13389 			struct subsys_tbl_ent *p;
13390 
13391 			/* No eeprom signature?  Try the hardcoded
13392 			 * subsys device table.
13393 			 */
13394 			p = tg3_lookup_by_subsys(tp);
13395 			if (!p)
13396 				return -ENODEV;
13397 
13398 			tp->phy_id = p->phy_id;
13399 			if (!tp->phy_id ||
13400 			    tp->phy_id == TG3_PHY_ID_BCM8002)
13401 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13402 		}
13403 	}
13404 
13405 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13406 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13407 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13408 	     (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13409 	      tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13410 	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13411 	      tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13412 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13413 
13414 	tg3_phy_init_link_config(tp);
13415 
13416 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13417 	    !tg3_flag(tp, ENABLE_APE) &&
13418 	    !tg3_flag(tp, ENABLE_ASF)) {
13419 		u32 bmsr, dummy;
13420 
13421 		tg3_readphy(tp, MII_BMSR, &bmsr);
13422 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13423 		    (bmsr & BMSR_LSTATUS))
13424 			goto skip_phy_reset;
13425 
13426 		err = tg3_phy_reset(tp);
13427 		if (err)
13428 			return err;
13429 
13430 		tg3_phy_set_wirespeed(tp);
13431 
13432 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13433 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13434 					    tp->link_config.flowctrl);
13435 
13436 			tg3_writephy(tp, MII_BMCR,
13437 				     BMCR_ANENABLE | BMCR_ANRESTART);
13438 		}
13439 	}
13440 
13441 skip_phy_reset:
13442 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13443 		err = tg3_init_5401phy_dsp(tp);
13444 		if (err)
13445 			return err;
13446 
13447 		err = tg3_init_5401phy_dsp(tp);
13448 	}
13449 
13450 	return err;
13451 }
13452 
13453 static void __devinit tg3_read_vpd(struct tg3 *tp)
13454 {
13455 	u8 *vpd_data;
13456 	unsigned int block_end, rosize, len;
13457 	u32 vpdlen;
13458 	int j, i = 0;
13459 
13460 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13461 	if (!vpd_data)
13462 		goto out_no_vpd;
13463 
13464 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13465 	if (i < 0)
13466 		goto out_not_found;
13467 
13468 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13469 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13470 	i += PCI_VPD_LRDT_TAG_SIZE;
13471 
13472 	if (block_end > vpdlen)
13473 		goto out_not_found;
13474 
13475 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13476 				      PCI_VPD_RO_KEYWORD_MFR_ID);
13477 	if (j > 0) {
13478 		len = pci_vpd_info_field_size(&vpd_data[j]);
13479 
13480 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
13481 		if (j + len > block_end || len != 4 ||
13482 		    memcmp(&vpd_data[j], "1028", 4))
13483 			goto partno;
13484 
13485 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13486 					      PCI_VPD_RO_KEYWORD_VENDOR0);
13487 		if (j < 0)
13488 			goto partno;
13489 
13490 		len = pci_vpd_info_field_size(&vpd_data[j]);
13491 
13492 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
13493 		if (j + len > block_end)
13494 			goto partno;
13495 
13496 		memcpy(tp->fw_ver, &vpd_data[j], len);
13497 		strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13498 	}
13499 
13500 partno:
13501 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13502 				      PCI_VPD_RO_KEYWORD_PARTNO);
13503 	if (i < 0)
13504 		goto out_not_found;
13505 
13506 	len = pci_vpd_info_field_size(&vpd_data[i]);
13507 
13508 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
13509 	if (len > TG3_BPN_SIZE ||
13510 	    (len + i) > vpdlen)
13511 		goto out_not_found;
13512 
13513 	memcpy(tp->board_part_number, &vpd_data[i], len);
13514 
13515 out_not_found:
13516 	kfree(vpd_data);
13517 	if (tp->board_part_number[0])
13518 		return;
13519 
13520 out_no_vpd:
13521 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13522 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13523 			strcpy(tp->board_part_number, "BCM5717");
13524 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13525 			strcpy(tp->board_part_number, "BCM5718");
13526 		else
13527 			goto nomatch;
13528 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13529 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13530 			strcpy(tp->board_part_number, "BCM57780");
13531 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13532 			strcpy(tp->board_part_number, "BCM57760");
13533 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13534 			strcpy(tp->board_part_number, "BCM57790");
13535 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13536 			strcpy(tp->board_part_number, "BCM57788");
13537 		else
13538 			goto nomatch;
13539 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13540 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13541 			strcpy(tp->board_part_number, "BCM57761");
13542 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13543 			strcpy(tp->board_part_number, "BCM57765");
13544 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13545 			strcpy(tp->board_part_number, "BCM57781");
13546 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13547 			strcpy(tp->board_part_number, "BCM57785");
13548 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13549 			strcpy(tp->board_part_number, "BCM57791");
13550 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13551 			strcpy(tp->board_part_number, "BCM57795");
13552 		else
13553 			goto nomatch;
13554 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13555 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13556 			strcpy(tp->board_part_number, "BCM57762");
13557 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13558 			strcpy(tp->board_part_number, "BCM57766");
13559 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13560 			strcpy(tp->board_part_number, "BCM57782");
13561 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13562 			strcpy(tp->board_part_number, "BCM57786");
13563 		else
13564 			goto nomatch;
13565 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13566 		strcpy(tp->board_part_number, "BCM95906");
13567 	} else {
13568 nomatch:
13569 		strcpy(tp->board_part_number, "none");
13570 	}
13571 }
13572 
13573 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13574 {
13575 	u32 val;
13576 
13577 	if (tg3_nvram_read(tp, offset, &val) ||
13578 	    (val & 0xfc000000) != 0x0c000000 ||
13579 	    tg3_nvram_read(tp, offset + 4, &val) ||
13580 	    val != 0)
13581 		return 0;
13582 
13583 	return 1;
13584 }
13585 
13586 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13587 {
13588 	u32 val, offset, start, ver_offset;
13589 	int i, dst_off;
13590 	bool newver = false;
13591 
13592 	if (tg3_nvram_read(tp, 0xc, &offset) ||
13593 	    tg3_nvram_read(tp, 0x4, &start))
13594 		return;
13595 
13596 	offset = tg3_nvram_logical_addr(tp, offset);
13597 
13598 	if (tg3_nvram_read(tp, offset, &val))
13599 		return;
13600 
13601 	if ((val & 0xfc000000) == 0x0c000000) {
13602 		if (tg3_nvram_read(tp, offset + 4, &val))
13603 			return;
13604 
13605 		if (val == 0)
13606 			newver = true;
13607 	}
13608 
13609 	dst_off = strlen(tp->fw_ver);
13610 
13611 	if (newver) {
13612 		if (TG3_VER_SIZE - dst_off < 16 ||
13613 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
13614 			return;
13615 
13616 		offset = offset + ver_offset - start;
13617 		for (i = 0; i < 16; i += 4) {
13618 			__be32 v;
13619 			if (tg3_nvram_read_be32(tp, offset + i, &v))
13620 				return;
13621 
13622 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13623 		}
13624 	} else {
13625 		u32 major, minor;
13626 
13627 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13628 			return;
13629 
13630 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13631 			TG3_NVM_BCVER_MAJSFT;
13632 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13633 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13634 			 "v%d.%02d", major, minor);
13635 	}
13636 }
13637 
13638 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13639 {
13640 	u32 val, major, minor;
13641 
13642 	/* Use native endian representation */
13643 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13644 		return;
13645 
13646 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13647 		TG3_NVM_HWSB_CFG1_MAJSFT;
13648 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13649 		TG3_NVM_HWSB_CFG1_MINSFT;
13650 
13651 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13652 }
13653 
13654 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13655 {
13656 	u32 offset, major, minor, build;
13657 
13658 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13659 
13660 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13661 		return;
13662 
13663 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13664 	case TG3_EEPROM_SB_REVISION_0:
13665 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13666 		break;
13667 	case TG3_EEPROM_SB_REVISION_2:
13668 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13669 		break;
13670 	case TG3_EEPROM_SB_REVISION_3:
13671 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13672 		break;
13673 	case TG3_EEPROM_SB_REVISION_4:
13674 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13675 		break;
13676 	case TG3_EEPROM_SB_REVISION_5:
13677 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13678 		break;
13679 	case TG3_EEPROM_SB_REVISION_6:
13680 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13681 		break;
13682 	default:
13683 		return;
13684 	}
13685 
13686 	if (tg3_nvram_read(tp, offset, &val))
13687 		return;
13688 
13689 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13690 		TG3_EEPROM_SB_EDH_BLD_SHFT;
13691 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13692 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
13693 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13694 
13695 	if (minor > 99 || build > 26)
13696 		return;
13697 
13698 	offset = strlen(tp->fw_ver);
13699 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13700 		 " v%d.%02d", major, minor);
13701 
13702 	if (build > 0) {
13703 		offset = strlen(tp->fw_ver);
13704 		if (offset < TG3_VER_SIZE - 1)
13705 			tp->fw_ver[offset] = 'a' + build - 1;
13706 	}
13707 }
13708 
13709 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13710 {
13711 	u32 val, offset, start;
13712 	int i, vlen;
13713 
13714 	for (offset = TG3_NVM_DIR_START;
13715 	     offset < TG3_NVM_DIR_END;
13716 	     offset += TG3_NVM_DIRENT_SIZE) {
13717 		if (tg3_nvram_read(tp, offset, &val))
13718 			return;
13719 
13720 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13721 			break;
13722 	}
13723 
13724 	if (offset == TG3_NVM_DIR_END)
13725 		return;
13726 
13727 	if (!tg3_flag(tp, 5705_PLUS))
13728 		start = 0x08000000;
13729 	else if (tg3_nvram_read(tp, offset - 4, &start))
13730 		return;
13731 
13732 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
13733 	    !tg3_fw_img_is_valid(tp, offset) ||
13734 	    tg3_nvram_read(tp, offset + 8, &val))
13735 		return;
13736 
13737 	offset += val - start;
13738 
13739 	vlen = strlen(tp->fw_ver);
13740 
13741 	tp->fw_ver[vlen++] = ',';
13742 	tp->fw_ver[vlen++] = ' ';
13743 
13744 	for (i = 0; i < 4; i++) {
13745 		__be32 v;
13746 		if (tg3_nvram_read_be32(tp, offset, &v))
13747 			return;
13748 
13749 		offset += sizeof(v);
13750 
13751 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
13752 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13753 			break;
13754 		}
13755 
13756 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13757 		vlen += sizeof(v);
13758 	}
13759 }
13760 
13761 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13762 {
13763 	int vlen;
13764 	u32 apedata;
13765 	char *fwtype;
13766 
13767 	if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13768 		return;
13769 
13770 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13771 	if (apedata != APE_SEG_SIG_MAGIC)
13772 		return;
13773 
13774 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13775 	if (!(apedata & APE_FW_STATUS_READY))
13776 		return;
13777 
13778 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13779 
13780 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13781 		tg3_flag_set(tp, APE_HAS_NCSI);
13782 		fwtype = "NCSI";
13783 	} else {
13784 		fwtype = "DASH";
13785 	}
13786 
13787 	vlen = strlen(tp->fw_ver);
13788 
13789 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13790 		 fwtype,
13791 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13792 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13793 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13794 		 (apedata & APE_FW_VERSION_BLDMSK));
13795 }
13796 
13797 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13798 {
13799 	u32 val;
13800 	bool vpd_vers = false;
13801 
13802 	if (tp->fw_ver[0] != 0)
13803 		vpd_vers = true;
13804 
13805 	if (tg3_flag(tp, NO_NVRAM)) {
13806 		strcat(tp->fw_ver, "sb");
13807 		return;
13808 	}
13809 
13810 	if (tg3_nvram_read(tp, 0, &val))
13811 		return;
13812 
13813 	if (val == TG3_EEPROM_MAGIC)
13814 		tg3_read_bc_ver(tp);
13815 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13816 		tg3_read_sb_ver(tp, val);
13817 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13818 		tg3_read_hwsb_ver(tp);
13819 	else
13820 		return;
13821 
13822 	if (vpd_vers)
13823 		goto done;
13824 
13825 	if (tg3_flag(tp, ENABLE_APE)) {
13826 		if (tg3_flag(tp, ENABLE_ASF))
13827 			tg3_read_dash_ver(tp);
13828 	} else if (tg3_flag(tp, ENABLE_ASF)) {
13829 		tg3_read_mgmtfw_ver(tp);
13830 	}
13831 
13832 done:
13833 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13834 }
13835 
13836 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13837 
13838 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13839 {
13840 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
13841 		return TG3_RX_RET_MAX_SIZE_5717;
13842 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13843 		return TG3_RX_RET_MAX_SIZE_5700;
13844 	else
13845 		return TG3_RX_RET_MAX_SIZE_5705;
13846 }
13847 
13848 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13849 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13850 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13851 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13852 	{ },
13853 };
13854 
13855 static int __devinit tg3_get_invariants(struct tg3 *tp)
13856 {
13857 	u32 misc_ctrl_reg;
13858 	u32 pci_state_reg, grc_misc_cfg;
13859 	u32 val;
13860 	u16 pci_cmd;
13861 	int err;
13862 
13863 	/* Force memory write invalidate off.  If we leave it on,
13864 	 * then on 5700_BX chips we have to enable a workaround.
13865 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13866 	 * to match the cacheline size.  The Broadcom driver have this
13867 	 * workaround but turns MWI off all the times so never uses
13868 	 * it.  This seems to suggest that the workaround is insufficient.
13869 	 */
13870 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13871 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13872 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13873 
13874 	/* Important! -- Make sure register accesses are byteswapped
13875 	 * correctly.  Also, for those chips that require it, make
13876 	 * sure that indirect register accesses are enabled before
13877 	 * the first operation.
13878 	 */
13879 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13880 			      &misc_ctrl_reg);
13881 	tp->misc_host_ctrl |= (misc_ctrl_reg &
13882 			       MISC_HOST_CTRL_CHIPREV);
13883 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13884 			       tp->misc_host_ctrl);
13885 
13886 	tp->pci_chip_rev_id = (misc_ctrl_reg >>
13887 			       MISC_HOST_CTRL_CHIPREV_SHIFT);
13888 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13889 		u32 prod_id_asic_rev;
13890 
13891 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13892 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13893 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13894 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13895 			pci_read_config_dword(tp->pdev,
13896 					      TG3PCI_GEN2_PRODID_ASICREV,
13897 					      &prod_id_asic_rev);
13898 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13899 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13900 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13901 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13902 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13903 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13904 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13905 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13906 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13907 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13908 			pci_read_config_dword(tp->pdev,
13909 					      TG3PCI_GEN15_PRODID_ASICREV,
13910 					      &prod_id_asic_rev);
13911 		else
13912 			pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13913 					      &prod_id_asic_rev);
13914 
13915 		tp->pci_chip_rev_id = prod_id_asic_rev;
13916 	}
13917 
13918 	/* Wrong chip ID in 5752 A0. This code can be removed later
13919 	 * as A0 is not in production.
13920 	 */
13921 	if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13922 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13923 
13924 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13925 	 * we need to disable memory and use config. cycles
13926 	 * only to access all registers. The 5702/03 chips
13927 	 * can mistakenly decode the special cycles from the
13928 	 * ICH chipsets as memory write cycles, causing corruption
13929 	 * of register and memory space. Only certain ICH bridges
13930 	 * will drive special cycles with non-zero data during the
13931 	 * address phase which can fall within the 5703's address
13932 	 * range. This is not an ICH bug as the PCI spec allows
13933 	 * non-zero address during special cycles. However, only
13934 	 * these ICH bridges are known to drive non-zero addresses
13935 	 * during special cycles.
13936 	 *
13937 	 * Since special cycles do not cross PCI bridges, we only
13938 	 * enable this workaround if the 5703 is on the secondary
13939 	 * bus of these ICH bridges.
13940 	 */
13941 	if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13942 	    (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13943 		static struct tg3_dev_id {
13944 			u32	vendor;
13945 			u32	device;
13946 			u32	rev;
13947 		} ich_chipsets[] = {
13948 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13949 			  PCI_ANY_ID },
13950 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13951 			  PCI_ANY_ID },
13952 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13953 			  0xa },
13954 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13955 			  PCI_ANY_ID },
13956 			{ },
13957 		};
13958 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
13959 		struct pci_dev *bridge = NULL;
13960 
13961 		while (pci_id->vendor != 0) {
13962 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
13963 						bridge);
13964 			if (!bridge) {
13965 				pci_id++;
13966 				continue;
13967 			}
13968 			if (pci_id->rev != PCI_ANY_ID) {
13969 				if (bridge->revision > pci_id->rev)
13970 					continue;
13971 			}
13972 			if (bridge->subordinate &&
13973 			    (bridge->subordinate->number ==
13974 			     tp->pdev->bus->number)) {
13975 				tg3_flag_set(tp, ICH_WORKAROUND);
13976 				pci_dev_put(bridge);
13977 				break;
13978 			}
13979 		}
13980 	}
13981 
13982 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13983 		static struct tg3_dev_id {
13984 			u32	vendor;
13985 			u32	device;
13986 		} bridge_chipsets[] = {
13987 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13988 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13989 			{ },
13990 		};
13991 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13992 		struct pci_dev *bridge = NULL;
13993 
13994 		while (pci_id->vendor != 0) {
13995 			bridge = pci_get_device(pci_id->vendor,
13996 						pci_id->device,
13997 						bridge);
13998 			if (!bridge) {
13999 				pci_id++;
14000 				continue;
14001 			}
14002 			if (bridge->subordinate &&
14003 			    (bridge->subordinate->number <=
14004 			     tp->pdev->bus->number) &&
14005 			    (bridge->subordinate->subordinate >=
14006 			     tp->pdev->bus->number)) {
14007 				tg3_flag_set(tp, 5701_DMA_BUG);
14008 				pci_dev_put(bridge);
14009 				break;
14010 			}
14011 		}
14012 	}
14013 
14014 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
14015 	 * DMA addresses > 40-bit. This bridge may have other additional
14016 	 * 57xx devices behind it in some 4-port NIC designs for example.
14017 	 * Any tg3 device found behind the bridge will also need the 40-bit
14018 	 * DMA workaround.
14019 	 */
14020 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14021 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14022 		tg3_flag_set(tp, 5780_CLASS);
14023 		tg3_flag_set(tp, 40BIT_DMA_BUG);
14024 		tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14025 	} else {
14026 		struct pci_dev *bridge = NULL;
14027 
14028 		do {
14029 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14030 						PCI_DEVICE_ID_SERVERWORKS_EPB,
14031 						bridge);
14032 			if (bridge && bridge->subordinate &&
14033 			    (bridge->subordinate->number <=
14034 			     tp->pdev->bus->number) &&
14035 			    (bridge->subordinate->subordinate >=
14036 			     tp->pdev->bus->number)) {
14037 				tg3_flag_set(tp, 40BIT_DMA_BUG);
14038 				pci_dev_put(bridge);
14039 				break;
14040 			}
14041 		} while (bridge);
14042 	}
14043 
14044 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14045 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14046 		tp->pdev_peer = tg3_find_peer(tp);
14047 
14048 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14049 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14050 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14051 		tg3_flag_set(tp, 5717_PLUS);
14052 
14053 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14054 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14055 		tg3_flag_set(tp, 57765_CLASS);
14056 
14057 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14058 		tg3_flag_set(tp, 57765_PLUS);
14059 
14060 	/* Intentionally exclude ASIC_REV_5906 */
14061 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14062 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14063 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14064 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14065 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14066 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14067 	    tg3_flag(tp, 57765_PLUS))
14068 		tg3_flag_set(tp, 5755_PLUS);
14069 
14070 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14071 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14072 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14073 	    tg3_flag(tp, 5755_PLUS) ||
14074 	    tg3_flag(tp, 5780_CLASS))
14075 		tg3_flag_set(tp, 5750_PLUS);
14076 
14077 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14078 	    tg3_flag(tp, 5750_PLUS))
14079 		tg3_flag_set(tp, 5705_PLUS);
14080 
14081 	/* Determine TSO capabilities */
14082 	if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14083 		; /* Do nothing. HW bug. */
14084 	else if (tg3_flag(tp, 57765_PLUS))
14085 		tg3_flag_set(tp, HW_TSO_3);
14086 	else if (tg3_flag(tp, 5755_PLUS) ||
14087 		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14088 		tg3_flag_set(tp, HW_TSO_2);
14089 	else if (tg3_flag(tp, 5750_PLUS)) {
14090 		tg3_flag_set(tp, HW_TSO_1);
14091 		tg3_flag_set(tp, TSO_BUG);
14092 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14093 		    tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14094 			tg3_flag_clear(tp, TSO_BUG);
14095 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14096 		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14097 		   tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14098 			tg3_flag_set(tp, TSO_BUG);
14099 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14100 			tp->fw_needed = FIRMWARE_TG3TSO5;
14101 		else
14102 			tp->fw_needed = FIRMWARE_TG3TSO;
14103 	}
14104 
14105 	/* Selectively allow TSO based on operating conditions */
14106 	if (tg3_flag(tp, HW_TSO_1) ||
14107 	    tg3_flag(tp, HW_TSO_2) ||
14108 	    tg3_flag(tp, HW_TSO_3) ||
14109 	    tp->fw_needed) {
14110 		/* For firmware TSO, assume ASF is disabled.
14111 		 * We'll disable TSO later if we discover ASF
14112 		 * is enabled in tg3_get_eeprom_hw_cfg().
14113 		 */
14114 		tg3_flag_set(tp, TSO_CAPABLE);
14115 	} else {
14116 		tg3_flag_clear(tp, TSO_CAPABLE);
14117 		tg3_flag_clear(tp, TSO_BUG);
14118 		tp->fw_needed = NULL;
14119 	}
14120 
14121 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14122 		tp->fw_needed = FIRMWARE_TG3;
14123 
14124 	tp->irq_max = 1;
14125 
14126 	if (tg3_flag(tp, 5750_PLUS)) {
14127 		tg3_flag_set(tp, SUPPORT_MSI);
14128 		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14129 		    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14130 		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14131 		     tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14132 		     tp->pdev_peer == tp->pdev))
14133 			tg3_flag_clear(tp, SUPPORT_MSI);
14134 
14135 		if (tg3_flag(tp, 5755_PLUS) ||
14136 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14137 			tg3_flag_set(tp, 1SHOT_MSI);
14138 		}
14139 
14140 		if (tg3_flag(tp, 57765_PLUS)) {
14141 			tg3_flag_set(tp, SUPPORT_MSIX);
14142 			tp->irq_max = TG3_IRQ_MAX_VECS;
14143 			tg3_rss_init_dflt_indir_tbl(tp);
14144 		}
14145 	}
14146 
14147 	if (tg3_flag(tp, 5755_PLUS))
14148 		tg3_flag_set(tp, SHORT_DMA_BUG);
14149 
14150 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14151 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14152 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14153 		tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
14154 
14155 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14156 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14157 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14158 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
14159 
14160 	if (tg3_flag(tp, 57765_PLUS) &&
14161 	    tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14162 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14163 
14164 	if (!tg3_flag(tp, 5705_PLUS) ||
14165 	    tg3_flag(tp, 5780_CLASS) ||
14166 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
14167 		tg3_flag_set(tp, JUMBO_CAPABLE);
14168 
14169 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14170 			      &pci_state_reg);
14171 
14172 	if (pci_is_pcie(tp->pdev)) {
14173 		u16 lnkctl;
14174 
14175 		tg3_flag_set(tp, PCI_EXPRESS);
14176 
14177 		if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
14178 			int readrq = pcie_get_readrq(tp->pdev);
14179 			if (readrq > 2048)
14180 				pcie_set_readrq(tp->pdev, 2048);
14181 		}
14182 
14183 		pci_read_config_word(tp->pdev,
14184 				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14185 				     &lnkctl);
14186 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14187 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14188 			    ASIC_REV_5906) {
14189 				tg3_flag_clear(tp, HW_TSO_2);
14190 				tg3_flag_clear(tp, TSO_CAPABLE);
14191 			}
14192 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14193 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14194 			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14195 			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14196 				tg3_flag_set(tp, CLKREQ_BUG);
14197 		} else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14198 			tg3_flag_set(tp, L1PLLPD_EN);
14199 		}
14200 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14201 		/* BCM5785 devices are effectively PCIe devices, and should
14202 		 * follow PCIe codepaths, but do not have a PCIe capabilities
14203 		 * section.
14204 		 */
14205 		tg3_flag_set(tp, PCI_EXPRESS);
14206 	} else if (!tg3_flag(tp, 5705_PLUS) ||
14207 		   tg3_flag(tp, 5780_CLASS)) {
14208 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14209 		if (!tp->pcix_cap) {
14210 			dev_err(&tp->pdev->dev,
14211 				"Cannot find PCI-X capability, aborting\n");
14212 			return -EIO;
14213 		}
14214 
14215 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14216 			tg3_flag_set(tp, PCIX_MODE);
14217 	}
14218 
14219 	/* If we have an AMD 762 or VIA K8T800 chipset, write
14220 	 * reordering to the mailbox registers done by the host
14221 	 * controller can cause major troubles.  We read back from
14222 	 * every mailbox register write to force the writes to be
14223 	 * posted to the chip in order.
14224 	 */
14225 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
14226 	    !tg3_flag(tp, PCI_EXPRESS))
14227 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
14228 
14229 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14230 			     &tp->pci_cacheline_sz);
14231 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14232 			     &tp->pci_lat_timer);
14233 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14234 	    tp->pci_lat_timer < 64) {
14235 		tp->pci_lat_timer = 64;
14236 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14237 				      tp->pci_lat_timer);
14238 	}
14239 
14240 	/* Important! -- It is critical that the PCI-X hw workaround
14241 	 * situation is decided before the first MMIO register access.
14242 	 */
14243 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14244 		/* 5700 BX chips need to have their TX producer index
14245 		 * mailboxes written twice to workaround a bug.
14246 		 */
14247 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
14248 
14249 		/* If we are in PCI-X mode, enable register write workaround.
14250 		 *
14251 		 * The workaround is to use indirect register accesses
14252 		 * for all chip writes not to mailbox registers.
14253 		 */
14254 		if (tg3_flag(tp, PCIX_MODE)) {
14255 			u32 pm_reg;
14256 
14257 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14258 
14259 			/* The chip can have it's power management PCI config
14260 			 * space registers clobbered due to this bug.
14261 			 * So explicitly force the chip into D0 here.
14262 			 */
14263 			pci_read_config_dword(tp->pdev,
14264 					      tp->pm_cap + PCI_PM_CTRL,
14265 					      &pm_reg);
14266 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14267 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14268 			pci_write_config_dword(tp->pdev,
14269 					       tp->pm_cap + PCI_PM_CTRL,
14270 					       pm_reg);
14271 
14272 			/* Also, force SERR#/PERR# in PCI command. */
14273 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14274 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14275 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14276 		}
14277 	}
14278 
14279 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14280 		tg3_flag_set(tp, PCI_HIGH_SPEED);
14281 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14282 		tg3_flag_set(tp, PCI_32BIT);
14283 
14284 	/* Chip-specific fixup from Broadcom driver */
14285 	if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14286 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14287 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14288 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14289 	}
14290 
14291 	/* Default fast path register access methods */
14292 	tp->read32 = tg3_read32;
14293 	tp->write32 = tg3_write32;
14294 	tp->read32_mbox = tg3_read32;
14295 	tp->write32_mbox = tg3_write32;
14296 	tp->write32_tx_mbox = tg3_write32;
14297 	tp->write32_rx_mbox = tg3_write32;
14298 
14299 	/* Various workaround register access methods */
14300 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14301 		tp->write32 = tg3_write_indirect_reg32;
14302 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14303 		 (tg3_flag(tp, PCI_EXPRESS) &&
14304 		  tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14305 		/*
14306 		 * Back to back register writes can cause problems on these
14307 		 * chips, the workaround is to read back all reg writes
14308 		 * except those to mailbox regs.
14309 		 *
14310 		 * See tg3_write_indirect_reg32().
14311 		 */
14312 		tp->write32 = tg3_write_flush_reg32;
14313 	}
14314 
14315 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14316 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
14317 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
14318 			tp->write32_rx_mbox = tg3_write_flush_reg32;
14319 	}
14320 
14321 	if (tg3_flag(tp, ICH_WORKAROUND)) {
14322 		tp->read32 = tg3_read_indirect_reg32;
14323 		tp->write32 = tg3_write_indirect_reg32;
14324 		tp->read32_mbox = tg3_read_indirect_mbox;
14325 		tp->write32_mbox = tg3_write_indirect_mbox;
14326 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
14327 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
14328 
14329 		iounmap(tp->regs);
14330 		tp->regs = NULL;
14331 
14332 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14333 		pci_cmd &= ~PCI_COMMAND_MEMORY;
14334 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14335 	}
14336 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14337 		tp->read32_mbox = tg3_read32_mbox_5906;
14338 		tp->write32_mbox = tg3_write32_mbox_5906;
14339 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
14340 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
14341 	}
14342 
14343 	if (tp->write32 == tg3_write_indirect_reg32 ||
14344 	    (tg3_flag(tp, PCIX_MODE) &&
14345 	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14346 	      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14347 		tg3_flag_set(tp, SRAM_USE_CONFIG);
14348 
14349 	/* The memory arbiter has to be enabled in order for SRAM accesses
14350 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
14351 	 * sure it is enabled, but other entities such as system netboot
14352 	 * code might disable it.
14353 	 */
14354 	val = tr32(MEMARB_MODE);
14355 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14356 
14357 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14358 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14359 	    tg3_flag(tp, 5780_CLASS)) {
14360 		if (tg3_flag(tp, PCIX_MODE)) {
14361 			pci_read_config_dword(tp->pdev,
14362 					      tp->pcix_cap + PCI_X_STATUS,
14363 					      &val);
14364 			tp->pci_fn = val & 0x7;
14365 		}
14366 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14367 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14368 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14369 		    NIC_SRAM_CPMUSTAT_SIG) {
14370 			tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14371 			tp->pci_fn = tp->pci_fn ? 1 : 0;
14372 		}
14373 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14374 		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14375 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14376 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14377 		    NIC_SRAM_CPMUSTAT_SIG) {
14378 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14379 				     TG3_CPMU_STATUS_FSHFT_5719;
14380 		}
14381 	}
14382 
14383 	/* Get eeprom hw config before calling tg3_set_power_state().
14384 	 * In particular, the TG3_FLAG_IS_NIC flag must be
14385 	 * determined before calling tg3_set_power_state() so that
14386 	 * we know whether or not to switch out of Vaux power.
14387 	 * When the flag is set, it means that GPIO1 is used for eeprom
14388 	 * write protect and also implies that it is a LOM where GPIOs
14389 	 * are not used to switch power.
14390 	 */
14391 	tg3_get_eeprom_hw_cfg(tp);
14392 
14393 	if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14394 		tg3_flag_clear(tp, TSO_CAPABLE);
14395 		tg3_flag_clear(tp, TSO_BUG);
14396 		tp->fw_needed = NULL;
14397 	}
14398 
14399 	if (tg3_flag(tp, ENABLE_APE)) {
14400 		/* Allow reads and writes to the
14401 		 * APE register and memory space.
14402 		 */
14403 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14404 				 PCISTATE_ALLOW_APE_SHMEM_WR |
14405 				 PCISTATE_ALLOW_APE_PSPACE_WR;
14406 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14407 				       pci_state_reg);
14408 
14409 		tg3_ape_lock_init(tp);
14410 	}
14411 
14412 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14413 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14414 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14415 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14416 	    tg3_flag(tp, 57765_PLUS))
14417 		tg3_flag_set(tp, CPMU_PRESENT);
14418 
14419 	/* Set up tp->grc_local_ctrl before calling
14420 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14421 	 * will bring 5700's external PHY out of reset.
14422 	 * It is also used as eeprom write protect on LOMs.
14423 	 */
14424 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14425 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14426 	    tg3_flag(tp, EEPROM_WRITE_PROT))
14427 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14428 				       GRC_LCLCTRL_GPIO_OUTPUT1);
14429 	/* Unused GPIO3 must be driven as output on 5752 because there
14430 	 * are no pull-up resistors on unused GPIO pins.
14431 	 */
14432 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14433 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14434 
14435 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14436 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14437 	    tg3_flag(tp, 57765_CLASS))
14438 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14439 
14440 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14441 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14442 		/* Turn off the debug UART. */
14443 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14444 		if (tg3_flag(tp, IS_NIC))
14445 			/* Keep VMain power. */
14446 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14447 					      GRC_LCLCTRL_GPIO_OUTPUT0;
14448 	}
14449 
14450 	/* Switch out of Vaux if it is a NIC */
14451 	tg3_pwrsrc_switch_to_vmain(tp);
14452 
14453 	/* Derive initial jumbo mode from MTU assigned in
14454 	 * ether_setup() via the alloc_etherdev() call
14455 	 */
14456 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14457 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
14458 
14459 	/* Determine WakeOnLan speed to use. */
14460 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14461 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14462 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14463 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14464 		tg3_flag_clear(tp, WOL_SPEED_100MB);
14465 	} else {
14466 		tg3_flag_set(tp, WOL_SPEED_100MB);
14467 	}
14468 
14469 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14470 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
14471 
14472 	/* A few boards don't want Ethernet@WireSpeed phy feature */
14473 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14474 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14475 	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14476 	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14477 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14478 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14479 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14480 
14481 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14482 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14483 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14484 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14485 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14486 
14487 	if (tg3_flag(tp, 5705_PLUS) &&
14488 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14489 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14490 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14491 	    !tg3_flag(tp, 57765_PLUS)) {
14492 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14493 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14494 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14495 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14496 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14497 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14498 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14499 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14500 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14501 		} else
14502 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14503 	}
14504 
14505 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14506 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14507 		tp->phy_otp = tg3_read_otp_phycfg(tp);
14508 		if (tp->phy_otp == 0)
14509 			tp->phy_otp = TG3_OTP_DEFAULT;
14510 	}
14511 
14512 	if (tg3_flag(tp, CPMU_PRESENT))
14513 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14514 	else
14515 		tp->mi_mode = MAC_MI_MODE_BASE;
14516 
14517 	tp->coalesce_mode = 0;
14518 	if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14519 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14520 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14521 
14522 	/* Set these bits to enable statistics workaround. */
14523 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14524 	    tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14525 	    tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14526 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14527 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14528 	}
14529 
14530 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14531 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14532 		tg3_flag_set(tp, USE_PHYLIB);
14533 
14534 	err = tg3_mdio_init(tp);
14535 	if (err)
14536 		return err;
14537 
14538 	/* Initialize data/descriptor byte/word swapping. */
14539 	val = tr32(GRC_MODE);
14540 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14541 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14542 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
14543 			GRC_MODE_B2HRX_ENABLE |
14544 			GRC_MODE_HTX2B_ENABLE |
14545 			GRC_MODE_HOST_STACKUP);
14546 	else
14547 		val &= GRC_MODE_HOST_STACKUP;
14548 
14549 	tw32(GRC_MODE, val | tp->grc_mode);
14550 
14551 	tg3_switch_clocks(tp);
14552 
14553 	/* Clear this out for sanity. */
14554 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14555 
14556 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14557 			      &pci_state_reg);
14558 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14559 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14560 		u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14561 
14562 		if (chiprevid == CHIPREV_ID_5701_A0 ||
14563 		    chiprevid == CHIPREV_ID_5701_B0 ||
14564 		    chiprevid == CHIPREV_ID_5701_B2 ||
14565 		    chiprevid == CHIPREV_ID_5701_B5) {
14566 			void __iomem *sram_base;
14567 
14568 			/* Write some dummy words into the SRAM status block
14569 			 * area, see if it reads back correctly.  If the return
14570 			 * value is bad, force enable the PCIX workaround.
14571 			 */
14572 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14573 
14574 			writel(0x00000000, sram_base);
14575 			writel(0x00000000, sram_base + 4);
14576 			writel(0xffffffff, sram_base + 4);
14577 			if (readl(sram_base) != 0x00000000)
14578 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14579 		}
14580 	}
14581 
14582 	udelay(50);
14583 	tg3_nvram_init(tp);
14584 
14585 	grc_misc_cfg = tr32(GRC_MISC_CFG);
14586 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14587 
14588 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14589 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14590 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14591 		tg3_flag_set(tp, IS_5788);
14592 
14593 	if (!tg3_flag(tp, IS_5788) &&
14594 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14595 		tg3_flag_set(tp, TAGGED_STATUS);
14596 	if (tg3_flag(tp, TAGGED_STATUS)) {
14597 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14598 				      HOSTCC_MODE_CLRTICK_TXBD);
14599 
14600 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14601 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14602 				       tp->misc_host_ctrl);
14603 	}
14604 
14605 	/* Preserve the APE MAC_MODE bits */
14606 	if (tg3_flag(tp, ENABLE_APE))
14607 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14608 	else
14609 		tp->mac_mode = 0;
14610 
14611 	/* these are limited to 10/100 only */
14612 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14613 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14614 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14615 	     tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14616 	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14617 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14618 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14619 	    (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14620 	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14621 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14622 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14623 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14624 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14625 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14626 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
14627 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14628 
14629 	err = tg3_phy_probe(tp);
14630 	if (err) {
14631 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14632 		/* ... but do not return immediately ... */
14633 		tg3_mdio_fini(tp);
14634 	}
14635 
14636 	tg3_read_vpd(tp);
14637 	tg3_read_fw_ver(tp);
14638 
14639 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14640 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14641 	} else {
14642 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14643 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14644 		else
14645 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14646 	}
14647 
14648 	/* 5700 {AX,BX} chips have a broken status block link
14649 	 * change bit implementation, so we must use the
14650 	 * status register in those cases.
14651 	 */
14652 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14653 		tg3_flag_set(tp, USE_LINKCHG_REG);
14654 	else
14655 		tg3_flag_clear(tp, USE_LINKCHG_REG);
14656 
14657 	/* The led_ctrl is set during tg3_phy_probe, here we might
14658 	 * have to force the link status polling mechanism based
14659 	 * upon subsystem IDs.
14660 	 */
14661 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14662 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14663 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14664 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14665 		tg3_flag_set(tp, USE_LINKCHG_REG);
14666 	}
14667 
14668 	/* For all SERDES we poll the MAC status register. */
14669 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14670 		tg3_flag_set(tp, POLL_SERDES);
14671 	else
14672 		tg3_flag_clear(tp, POLL_SERDES);
14673 
14674 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14675 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14676 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14677 	    tg3_flag(tp, PCIX_MODE)) {
14678 		tp->rx_offset = NET_SKB_PAD;
14679 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14680 		tp->rx_copy_thresh = ~(u16)0;
14681 #endif
14682 	}
14683 
14684 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14685 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14686 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14687 
14688 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14689 
14690 	/* Increment the rx prod index on the rx std ring by at most
14691 	 * 8 for these chips to workaround hw errata.
14692 	 */
14693 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14694 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14695 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14696 		tp->rx_std_max_post = 8;
14697 
14698 	if (tg3_flag(tp, ASPM_WORKAROUND))
14699 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14700 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
14701 
14702 	return err;
14703 }
14704 
14705 #ifdef CONFIG_SPARC
14706 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14707 {
14708 	struct net_device *dev = tp->dev;
14709 	struct pci_dev *pdev = tp->pdev;
14710 	struct device_node *dp = pci_device_to_OF_node(pdev);
14711 	const unsigned char *addr;
14712 	int len;
14713 
14714 	addr = of_get_property(dp, "local-mac-address", &len);
14715 	if (addr && len == 6) {
14716 		memcpy(dev->dev_addr, addr, 6);
14717 		memcpy(dev->perm_addr, dev->dev_addr, 6);
14718 		return 0;
14719 	}
14720 	return -ENODEV;
14721 }
14722 
14723 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14724 {
14725 	struct net_device *dev = tp->dev;
14726 
14727 	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14728 	memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14729 	return 0;
14730 }
14731 #endif
14732 
14733 static int __devinit tg3_get_device_address(struct tg3 *tp)
14734 {
14735 	struct net_device *dev = tp->dev;
14736 	u32 hi, lo, mac_offset;
14737 	int addr_ok = 0;
14738 
14739 #ifdef CONFIG_SPARC
14740 	if (!tg3_get_macaddr_sparc(tp))
14741 		return 0;
14742 #endif
14743 
14744 	mac_offset = 0x7c;
14745 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14746 	    tg3_flag(tp, 5780_CLASS)) {
14747 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14748 			mac_offset = 0xcc;
14749 		if (tg3_nvram_lock(tp))
14750 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14751 		else
14752 			tg3_nvram_unlock(tp);
14753 	} else if (tg3_flag(tp, 5717_PLUS)) {
14754 		if (tp->pci_fn & 1)
14755 			mac_offset = 0xcc;
14756 		if (tp->pci_fn > 1)
14757 			mac_offset += 0x18c;
14758 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14759 		mac_offset = 0x10;
14760 
14761 	/* First try to get it from MAC address mailbox. */
14762 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14763 	if ((hi >> 16) == 0x484b) {
14764 		dev->dev_addr[0] = (hi >>  8) & 0xff;
14765 		dev->dev_addr[1] = (hi >>  0) & 0xff;
14766 
14767 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14768 		dev->dev_addr[2] = (lo >> 24) & 0xff;
14769 		dev->dev_addr[3] = (lo >> 16) & 0xff;
14770 		dev->dev_addr[4] = (lo >>  8) & 0xff;
14771 		dev->dev_addr[5] = (lo >>  0) & 0xff;
14772 
14773 		/* Some old bootcode may report a 0 MAC address in SRAM */
14774 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14775 	}
14776 	if (!addr_ok) {
14777 		/* Next, try NVRAM. */
14778 		if (!tg3_flag(tp, NO_NVRAM) &&
14779 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14780 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14781 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14782 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14783 		}
14784 		/* Finally just fetch it out of the MAC control regs. */
14785 		else {
14786 			hi = tr32(MAC_ADDR_0_HIGH);
14787 			lo = tr32(MAC_ADDR_0_LOW);
14788 
14789 			dev->dev_addr[5] = lo & 0xff;
14790 			dev->dev_addr[4] = (lo >> 8) & 0xff;
14791 			dev->dev_addr[3] = (lo >> 16) & 0xff;
14792 			dev->dev_addr[2] = (lo >> 24) & 0xff;
14793 			dev->dev_addr[1] = hi & 0xff;
14794 			dev->dev_addr[0] = (hi >> 8) & 0xff;
14795 		}
14796 	}
14797 
14798 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14799 #ifdef CONFIG_SPARC
14800 		if (!tg3_get_default_macaddr_sparc(tp))
14801 			return 0;
14802 #endif
14803 		return -EINVAL;
14804 	}
14805 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14806 	return 0;
14807 }
14808 
14809 #define BOUNDARY_SINGLE_CACHELINE	1
14810 #define BOUNDARY_MULTI_CACHELINE	2
14811 
14812 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14813 {
14814 	int cacheline_size;
14815 	u8 byte;
14816 	int goal;
14817 
14818 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14819 	if (byte == 0)
14820 		cacheline_size = 1024;
14821 	else
14822 		cacheline_size = (int) byte * 4;
14823 
14824 	/* On 5703 and later chips, the boundary bits have no
14825 	 * effect.
14826 	 */
14827 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14828 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14829 	    !tg3_flag(tp, PCI_EXPRESS))
14830 		goto out;
14831 
14832 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14833 	goal = BOUNDARY_MULTI_CACHELINE;
14834 #else
14835 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14836 	goal = BOUNDARY_SINGLE_CACHELINE;
14837 #else
14838 	goal = 0;
14839 #endif
14840 #endif
14841 
14842 	if (tg3_flag(tp, 57765_PLUS)) {
14843 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14844 		goto out;
14845 	}
14846 
14847 	if (!goal)
14848 		goto out;
14849 
14850 	/* PCI controllers on most RISC systems tend to disconnect
14851 	 * when a device tries to burst across a cache-line boundary.
14852 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14853 	 *
14854 	 * Unfortunately, for PCI-E there are only limited
14855 	 * write-side controls for this, and thus for reads
14856 	 * we will still get the disconnects.  We'll also waste
14857 	 * these PCI cycles for both read and write for chips
14858 	 * other than 5700 and 5701 which do not implement the
14859 	 * boundary bits.
14860 	 */
14861 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14862 		switch (cacheline_size) {
14863 		case 16:
14864 		case 32:
14865 		case 64:
14866 		case 128:
14867 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14868 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14869 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14870 			} else {
14871 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14872 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14873 			}
14874 			break;
14875 
14876 		case 256:
14877 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14878 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14879 			break;
14880 
14881 		default:
14882 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14883 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14884 			break;
14885 		}
14886 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
14887 		switch (cacheline_size) {
14888 		case 16:
14889 		case 32:
14890 		case 64:
14891 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14892 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14893 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14894 				break;
14895 			}
14896 			/* fallthrough */
14897 		case 128:
14898 		default:
14899 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14900 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14901 			break;
14902 		}
14903 	} else {
14904 		switch (cacheline_size) {
14905 		case 16:
14906 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14907 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
14908 					DMA_RWCTRL_WRITE_BNDRY_16);
14909 				break;
14910 			}
14911 			/* fallthrough */
14912 		case 32:
14913 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14914 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
14915 					DMA_RWCTRL_WRITE_BNDRY_32);
14916 				break;
14917 			}
14918 			/* fallthrough */
14919 		case 64:
14920 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14921 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
14922 					DMA_RWCTRL_WRITE_BNDRY_64);
14923 				break;
14924 			}
14925 			/* fallthrough */
14926 		case 128:
14927 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14928 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
14929 					DMA_RWCTRL_WRITE_BNDRY_128);
14930 				break;
14931 			}
14932 			/* fallthrough */
14933 		case 256:
14934 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
14935 				DMA_RWCTRL_WRITE_BNDRY_256);
14936 			break;
14937 		case 512:
14938 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
14939 				DMA_RWCTRL_WRITE_BNDRY_512);
14940 			break;
14941 		case 1024:
14942 		default:
14943 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14944 				DMA_RWCTRL_WRITE_BNDRY_1024);
14945 			break;
14946 		}
14947 	}
14948 
14949 out:
14950 	return val;
14951 }
14952 
14953 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14954 {
14955 	struct tg3_internal_buffer_desc test_desc;
14956 	u32 sram_dma_descs;
14957 	int i, ret;
14958 
14959 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14960 
14961 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14962 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14963 	tw32(RDMAC_STATUS, 0);
14964 	tw32(WDMAC_STATUS, 0);
14965 
14966 	tw32(BUFMGR_MODE, 0);
14967 	tw32(FTQ_RESET, 0);
14968 
14969 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
14970 	test_desc.addr_lo = buf_dma & 0xffffffff;
14971 	test_desc.nic_mbuf = 0x00002100;
14972 	test_desc.len = size;
14973 
14974 	/*
14975 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14976 	 * the *second* time the tg3 driver was getting loaded after an
14977 	 * initial scan.
14978 	 *
14979 	 * Broadcom tells me:
14980 	 *   ...the DMA engine is connected to the GRC block and a DMA
14981 	 *   reset may affect the GRC block in some unpredictable way...
14982 	 *   The behavior of resets to individual blocks has not been tested.
14983 	 *
14984 	 * Broadcom noted the GRC reset will also reset all sub-components.
14985 	 */
14986 	if (to_device) {
14987 		test_desc.cqid_sqid = (13 << 8) | 2;
14988 
14989 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14990 		udelay(40);
14991 	} else {
14992 		test_desc.cqid_sqid = (16 << 8) | 7;
14993 
14994 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14995 		udelay(40);
14996 	}
14997 	test_desc.flags = 0x00000005;
14998 
14999 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15000 		u32 val;
15001 
15002 		val = *(((u32 *)&test_desc) + i);
15003 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15004 				       sram_dma_descs + (i * sizeof(u32)));
15005 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15006 	}
15007 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15008 
15009 	if (to_device)
15010 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15011 	else
15012 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15013 
15014 	ret = -ENODEV;
15015 	for (i = 0; i < 40; i++) {
15016 		u32 val;
15017 
15018 		if (to_device)
15019 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15020 		else
15021 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15022 		if ((val & 0xffff) == sram_dma_descs) {
15023 			ret = 0;
15024 			break;
15025 		}
15026 
15027 		udelay(100);
15028 	}
15029 
15030 	return ret;
15031 }
15032 
15033 #define TEST_BUFFER_SIZE	0x2000
15034 
15035 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15036 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15037 	{ },
15038 };
15039 
15040 static int __devinit tg3_test_dma(struct tg3 *tp)
15041 {
15042 	dma_addr_t buf_dma;
15043 	u32 *buf, saved_dma_rwctrl;
15044 	int ret = 0;
15045 
15046 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15047 				 &buf_dma, GFP_KERNEL);
15048 	if (!buf) {
15049 		ret = -ENOMEM;
15050 		goto out_nofree;
15051 	}
15052 
15053 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15054 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15055 
15056 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15057 
15058 	if (tg3_flag(tp, 57765_PLUS))
15059 		goto out;
15060 
15061 	if (tg3_flag(tp, PCI_EXPRESS)) {
15062 		/* DMA read watermark not used on PCIE */
15063 		tp->dma_rwctrl |= 0x00180000;
15064 	} else if (!tg3_flag(tp, PCIX_MODE)) {
15065 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15066 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15067 			tp->dma_rwctrl |= 0x003f0000;
15068 		else
15069 			tp->dma_rwctrl |= 0x003f000f;
15070 	} else {
15071 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15072 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15073 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15074 			u32 read_water = 0x7;
15075 
15076 			/* If the 5704 is behind the EPB bridge, we can
15077 			 * do the less restrictive ONE_DMA workaround for
15078 			 * better performance.
15079 			 */
15080 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15081 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15082 				tp->dma_rwctrl |= 0x8000;
15083 			else if (ccval == 0x6 || ccval == 0x7)
15084 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15085 
15086 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15087 				read_water = 4;
15088 			/* Set bit 23 to enable PCIX hw bug fix */
15089 			tp->dma_rwctrl |=
15090 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15091 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15092 				(1 << 23);
15093 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15094 			/* 5780 always in PCIX mode */
15095 			tp->dma_rwctrl |= 0x00144000;
15096 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15097 			/* 5714 always in PCIX mode */
15098 			tp->dma_rwctrl |= 0x00148000;
15099 		} else {
15100 			tp->dma_rwctrl |= 0x001b000f;
15101 		}
15102 	}
15103 
15104 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15105 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15106 		tp->dma_rwctrl &= 0xfffffff0;
15107 
15108 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15109 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15110 		/* Remove this if it causes problems for some boards. */
15111 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15112 
15113 		/* On 5700/5701 chips, we need to set this bit.
15114 		 * Otherwise the chip will issue cacheline transactions
15115 		 * to streamable DMA memory with not all the byte
15116 		 * enables turned on.  This is an error on several
15117 		 * RISC PCI controllers, in particular sparc64.
15118 		 *
15119 		 * On 5703/5704 chips, this bit has been reassigned
15120 		 * a different meaning.  In particular, it is used
15121 		 * on those chips to enable a PCI-X workaround.
15122 		 */
15123 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15124 	}
15125 
15126 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15127 
15128 #if 0
15129 	/* Unneeded, already done by tg3_get_invariants.  */
15130 	tg3_switch_clocks(tp);
15131 #endif
15132 
15133 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15134 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15135 		goto out;
15136 
15137 	/* It is best to perform DMA test with maximum write burst size
15138 	 * to expose the 5700/5701 write DMA bug.
15139 	 */
15140 	saved_dma_rwctrl = tp->dma_rwctrl;
15141 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15142 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15143 
15144 	while (1) {
15145 		u32 *p = buf, i;
15146 
15147 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15148 			p[i] = i;
15149 
15150 		/* Send the buffer to the chip. */
15151 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15152 		if (ret) {
15153 			dev_err(&tp->pdev->dev,
15154 				"%s: Buffer write failed. err = %d\n",
15155 				__func__, ret);
15156 			break;
15157 		}
15158 
15159 #if 0
15160 		/* validate data reached card RAM correctly. */
15161 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15162 			u32 val;
15163 			tg3_read_mem(tp, 0x2100 + (i*4), &val);
15164 			if (le32_to_cpu(val) != p[i]) {
15165 				dev_err(&tp->pdev->dev,
15166 					"%s: Buffer corrupted on device! "
15167 					"(%d != %d)\n", __func__, val, i);
15168 				/* ret = -ENODEV here? */
15169 			}
15170 			p[i] = 0;
15171 		}
15172 #endif
15173 		/* Now read it back. */
15174 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15175 		if (ret) {
15176 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15177 				"err = %d\n", __func__, ret);
15178 			break;
15179 		}
15180 
15181 		/* Verify it. */
15182 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15183 			if (p[i] == i)
15184 				continue;
15185 
15186 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15187 			    DMA_RWCTRL_WRITE_BNDRY_16) {
15188 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15189 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15190 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15191 				break;
15192 			} else {
15193 				dev_err(&tp->pdev->dev,
15194 					"%s: Buffer corrupted on read back! "
15195 					"(%d != %d)\n", __func__, p[i], i);
15196 				ret = -ENODEV;
15197 				goto out;
15198 			}
15199 		}
15200 
15201 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15202 			/* Success. */
15203 			ret = 0;
15204 			break;
15205 		}
15206 	}
15207 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15208 	    DMA_RWCTRL_WRITE_BNDRY_16) {
15209 		/* DMA test passed without adjusting DMA boundary,
15210 		 * now look for chipsets that are known to expose the
15211 		 * DMA bug without failing the test.
15212 		 */
15213 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15214 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15215 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15216 		} else {
15217 			/* Safe to use the calculated DMA boundary. */
15218 			tp->dma_rwctrl = saved_dma_rwctrl;
15219 		}
15220 
15221 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15222 	}
15223 
15224 out:
15225 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15226 out_nofree:
15227 	return ret;
15228 }
15229 
15230 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15231 {
15232 	if (tg3_flag(tp, 57765_PLUS)) {
15233 		tp->bufmgr_config.mbuf_read_dma_low_water =
15234 			DEFAULT_MB_RDMA_LOW_WATER_5705;
15235 		tp->bufmgr_config.mbuf_mac_rx_low_water =
15236 			DEFAULT_MB_MACRX_LOW_WATER_57765;
15237 		tp->bufmgr_config.mbuf_high_water =
15238 			DEFAULT_MB_HIGH_WATER_57765;
15239 
15240 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15241 			DEFAULT_MB_RDMA_LOW_WATER_5705;
15242 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15243 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15244 		tp->bufmgr_config.mbuf_high_water_jumbo =
15245 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15246 	} else if (tg3_flag(tp, 5705_PLUS)) {
15247 		tp->bufmgr_config.mbuf_read_dma_low_water =
15248 			DEFAULT_MB_RDMA_LOW_WATER_5705;
15249 		tp->bufmgr_config.mbuf_mac_rx_low_water =
15250 			DEFAULT_MB_MACRX_LOW_WATER_5705;
15251 		tp->bufmgr_config.mbuf_high_water =
15252 			DEFAULT_MB_HIGH_WATER_5705;
15253 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15254 			tp->bufmgr_config.mbuf_mac_rx_low_water =
15255 				DEFAULT_MB_MACRX_LOW_WATER_5906;
15256 			tp->bufmgr_config.mbuf_high_water =
15257 				DEFAULT_MB_HIGH_WATER_5906;
15258 		}
15259 
15260 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15261 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15262 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15263 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15264 		tp->bufmgr_config.mbuf_high_water_jumbo =
15265 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15266 	} else {
15267 		tp->bufmgr_config.mbuf_read_dma_low_water =
15268 			DEFAULT_MB_RDMA_LOW_WATER;
15269 		tp->bufmgr_config.mbuf_mac_rx_low_water =
15270 			DEFAULT_MB_MACRX_LOW_WATER;
15271 		tp->bufmgr_config.mbuf_high_water =
15272 			DEFAULT_MB_HIGH_WATER;
15273 
15274 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15275 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15276 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15277 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15278 		tp->bufmgr_config.mbuf_high_water_jumbo =
15279 			DEFAULT_MB_HIGH_WATER_JUMBO;
15280 	}
15281 
15282 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15283 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15284 }
15285 
15286 static char * __devinit tg3_phy_string(struct tg3 *tp)
15287 {
15288 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
15289 	case TG3_PHY_ID_BCM5400:	return "5400";
15290 	case TG3_PHY_ID_BCM5401:	return "5401";
15291 	case TG3_PHY_ID_BCM5411:	return "5411";
15292 	case TG3_PHY_ID_BCM5701:	return "5701";
15293 	case TG3_PHY_ID_BCM5703:	return "5703";
15294 	case TG3_PHY_ID_BCM5704:	return "5704";
15295 	case TG3_PHY_ID_BCM5705:	return "5705";
15296 	case TG3_PHY_ID_BCM5750:	return "5750";
15297 	case TG3_PHY_ID_BCM5752:	return "5752";
15298 	case TG3_PHY_ID_BCM5714:	return "5714";
15299 	case TG3_PHY_ID_BCM5780:	return "5780";
15300 	case TG3_PHY_ID_BCM5755:	return "5755";
15301 	case TG3_PHY_ID_BCM5787:	return "5787";
15302 	case TG3_PHY_ID_BCM5784:	return "5784";
15303 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
15304 	case TG3_PHY_ID_BCM5906:	return "5906";
15305 	case TG3_PHY_ID_BCM5761:	return "5761";
15306 	case TG3_PHY_ID_BCM5718C:	return "5718C";
15307 	case TG3_PHY_ID_BCM5718S:	return "5718S";
15308 	case TG3_PHY_ID_BCM57765:	return "57765";
15309 	case TG3_PHY_ID_BCM5719C:	return "5719C";
15310 	case TG3_PHY_ID_BCM5720C:	return "5720C";
15311 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
15312 	case 0:			return "serdes";
15313 	default:		return "unknown";
15314 	}
15315 }
15316 
15317 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15318 {
15319 	if (tg3_flag(tp, PCI_EXPRESS)) {
15320 		strcpy(str, "PCI Express");
15321 		return str;
15322 	} else if (tg3_flag(tp, PCIX_MODE)) {
15323 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15324 
15325 		strcpy(str, "PCIX:");
15326 
15327 		if ((clock_ctrl == 7) ||
15328 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15329 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15330 			strcat(str, "133MHz");
15331 		else if (clock_ctrl == 0)
15332 			strcat(str, "33MHz");
15333 		else if (clock_ctrl == 2)
15334 			strcat(str, "50MHz");
15335 		else if (clock_ctrl == 4)
15336 			strcat(str, "66MHz");
15337 		else if (clock_ctrl == 6)
15338 			strcat(str, "100MHz");
15339 	} else {
15340 		strcpy(str, "PCI:");
15341 		if (tg3_flag(tp, PCI_HIGH_SPEED))
15342 			strcat(str, "66MHz");
15343 		else
15344 			strcat(str, "33MHz");
15345 	}
15346 	if (tg3_flag(tp, PCI_32BIT))
15347 		strcat(str, ":32-bit");
15348 	else
15349 		strcat(str, ":64-bit");
15350 	return str;
15351 }
15352 
15353 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15354 {
15355 	struct pci_dev *peer;
15356 	unsigned int func, devnr = tp->pdev->devfn & ~7;
15357 
15358 	for (func = 0; func < 8; func++) {
15359 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
15360 		if (peer && peer != tp->pdev)
15361 			break;
15362 		pci_dev_put(peer);
15363 	}
15364 	/* 5704 can be configured in single-port mode, set peer to
15365 	 * tp->pdev in that case.
15366 	 */
15367 	if (!peer) {
15368 		peer = tp->pdev;
15369 		return peer;
15370 	}
15371 
15372 	/*
15373 	 * We don't need to keep the refcount elevated; there's no way
15374 	 * to remove one half of this device without removing the other
15375 	 */
15376 	pci_dev_put(peer);
15377 
15378 	return peer;
15379 }
15380 
15381 static void __devinit tg3_init_coal(struct tg3 *tp)
15382 {
15383 	struct ethtool_coalesce *ec = &tp->coal;
15384 
15385 	memset(ec, 0, sizeof(*ec));
15386 	ec->cmd = ETHTOOL_GCOALESCE;
15387 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15388 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15389 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15390 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15391 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15392 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15393 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15394 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15395 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15396 
15397 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15398 				 HOSTCC_MODE_CLRTICK_TXBD)) {
15399 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15400 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15401 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15402 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15403 	}
15404 
15405 	if (tg3_flag(tp, 5705_PLUS)) {
15406 		ec->rx_coalesce_usecs_irq = 0;
15407 		ec->tx_coalesce_usecs_irq = 0;
15408 		ec->stats_block_coalesce_usecs = 0;
15409 	}
15410 }
15411 
15412 static const struct net_device_ops tg3_netdev_ops = {
15413 	.ndo_open		= tg3_open,
15414 	.ndo_stop		= tg3_close,
15415 	.ndo_start_xmit		= tg3_start_xmit,
15416 	.ndo_get_stats64	= tg3_get_stats64,
15417 	.ndo_validate_addr	= eth_validate_addr,
15418 	.ndo_set_rx_mode	= tg3_set_rx_mode,
15419 	.ndo_set_mac_address	= tg3_set_mac_addr,
15420 	.ndo_do_ioctl		= tg3_ioctl,
15421 	.ndo_tx_timeout		= tg3_tx_timeout,
15422 	.ndo_change_mtu		= tg3_change_mtu,
15423 	.ndo_fix_features	= tg3_fix_features,
15424 	.ndo_set_features	= tg3_set_features,
15425 #ifdef CONFIG_NET_POLL_CONTROLLER
15426 	.ndo_poll_controller	= tg3_poll_controller,
15427 #endif
15428 };
15429 
15430 static int __devinit tg3_init_one(struct pci_dev *pdev,
15431 				  const struct pci_device_id *ent)
15432 {
15433 	struct net_device *dev;
15434 	struct tg3 *tp;
15435 	int i, err, pm_cap;
15436 	u32 sndmbx, rcvmbx, intmbx;
15437 	char str[40];
15438 	u64 dma_mask, persist_dma_mask;
15439 	netdev_features_t features = 0;
15440 
15441 	printk_once(KERN_INFO "%s\n", version);
15442 
15443 	err = pci_enable_device(pdev);
15444 	if (err) {
15445 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15446 		return err;
15447 	}
15448 
15449 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
15450 	if (err) {
15451 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15452 		goto err_out_disable_pdev;
15453 	}
15454 
15455 	pci_set_master(pdev);
15456 
15457 	/* Find power-management capability. */
15458 	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15459 	if (pm_cap == 0) {
15460 		dev_err(&pdev->dev,
15461 			"Cannot find Power Management capability, aborting\n");
15462 		err = -EIO;
15463 		goto err_out_free_res;
15464 	}
15465 
15466 	err = pci_set_power_state(pdev, PCI_D0);
15467 	if (err) {
15468 		dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15469 		goto err_out_free_res;
15470 	}
15471 
15472 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15473 	if (!dev) {
15474 		dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15475 		err = -ENOMEM;
15476 		goto err_out_power_down;
15477 	}
15478 
15479 	SET_NETDEV_DEV(dev, &pdev->dev);
15480 
15481 	tp = netdev_priv(dev);
15482 	tp->pdev = pdev;
15483 	tp->dev = dev;
15484 	tp->pm_cap = pm_cap;
15485 	tp->rx_mode = TG3_DEF_RX_MODE;
15486 	tp->tx_mode = TG3_DEF_TX_MODE;
15487 
15488 	if (tg3_debug > 0)
15489 		tp->msg_enable = tg3_debug;
15490 	else
15491 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
15492 
15493 	/* The word/byte swap controls here control register access byte
15494 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15495 	 * setting below.
15496 	 */
15497 	tp->misc_host_ctrl =
15498 		MISC_HOST_CTRL_MASK_PCI_INT |
15499 		MISC_HOST_CTRL_WORD_SWAP |
15500 		MISC_HOST_CTRL_INDIR_ACCESS |
15501 		MISC_HOST_CTRL_PCISTATE_RW;
15502 
15503 	/* The NONFRM (non-frame) byte/word swap controls take effect
15504 	 * on descriptor entries, anything which isn't packet data.
15505 	 *
15506 	 * The StrongARM chips on the board (one for tx, one for rx)
15507 	 * are running in big-endian mode.
15508 	 */
15509 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15510 			GRC_MODE_WSWAP_NONFRM_DATA);
15511 #ifdef __BIG_ENDIAN
15512 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15513 #endif
15514 	spin_lock_init(&tp->lock);
15515 	spin_lock_init(&tp->indirect_lock);
15516 	INIT_WORK(&tp->reset_task, tg3_reset_task);
15517 
15518 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
15519 	if (!tp->regs) {
15520 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15521 		err = -ENOMEM;
15522 		goto err_out_free_dev;
15523 	}
15524 
15525 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15526 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15527 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15528 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15529 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15530 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15531 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15532 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15533 		tg3_flag_set(tp, ENABLE_APE);
15534 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15535 		if (!tp->aperegs) {
15536 			dev_err(&pdev->dev,
15537 				"Cannot map APE registers, aborting\n");
15538 			err = -ENOMEM;
15539 			goto err_out_iounmap;
15540 		}
15541 	}
15542 
15543 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15544 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15545 
15546 	dev->ethtool_ops = &tg3_ethtool_ops;
15547 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
15548 	dev->netdev_ops = &tg3_netdev_ops;
15549 	dev->irq = pdev->irq;
15550 
15551 	err = tg3_get_invariants(tp);
15552 	if (err) {
15553 		dev_err(&pdev->dev,
15554 			"Problem fetching invariants of chip, aborting\n");
15555 		goto err_out_apeunmap;
15556 	}
15557 
15558 	/* The EPB bridge inside 5714, 5715, and 5780 and any
15559 	 * device behind the EPB cannot support DMA addresses > 40-bit.
15560 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15561 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15562 	 * do DMA address check in tg3_start_xmit().
15563 	 */
15564 	if (tg3_flag(tp, IS_5788))
15565 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15566 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15567 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15568 #ifdef CONFIG_HIGHMEM
15569 		dma_mask = DMA_BIT_MASK(64);
15570 #endif
15571 	} else
15572 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15573 
15574 	/* Configure DMA attributes. */
15575 	if (dma_mask > DMA_BIT_MASK(32)) {
15576 		err = pci_set_dma_mask(pdev, dma_mask);
15577 		if (!err) {
15578 			features |= NETIF_F_HIGHDMA;
15579 			err = pci_set_consistent_dma_mask(pdev,
15580 							  persist_dma_mask);
15581 			if (err < 0) {
15582 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
15583 					"DMA for consistent allocations\n");
15584 				goto err_out_apeunmap;
15585 			}
15586 		}
15587 	}
15588 	if (err || dma_mask == DMA_BIT_MASK(32)) {
15589 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15590 		if (err) {
15591 			dev_err(&pdev->dev,
15592 				"No usable DMA configuration, aborting\n");
15593 			goto err_out_apeunmap;
15594 		}
15595 	}
15596 
15597 	tg3_init_bufmgr_config(tp);
15598 
15599 	features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15600 
15601 	/* 5700 B0 chips do not support checksumming correctly due
15602 	 * to hardware bugs.
15603 	 */
15604 	if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15605 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15606 
15607 		if (tg3_flag(tp, 5755_PLUS))
15608 			features |= NETIF_F_IPV6_CSUM;
15609 	}
15610 
15611 	/* TSO is on by default on chips that support hardware TSO.
15612 	 * Firmware TSO on older chips gives lower performance, so it
15613 	 * is off by default, but can be enabled using ethtool.
15614 	 */
15615 	if ((tg3_flag(tp, HW_TSO_1) ||
15616 	     tg3_flag(tp, HW_TSO_2) ||
15617 	     tg3_flag(tp, HW_TSO_3)) &&
15618 	    (features & NETIF_F_IP_CSUM))
15619 		features |= NETIF_F_TSO;
15620 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15621 		if (features & NETIF_F_IPV6_CSUM)
15622 			features |= NETIF_F_TSO6;
15623 		if (tg3_flag(tp, HW_TSO_3) ||
15624 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15625 		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15626 		     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15627 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15628 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15629 			features |= NETIF_F_TSO_ECN;
15630 	}
15631 
15632 	dev->features |= features;
15633 	dev->vlan_features |= features;
15634 
15635 	/*
15636 	 * Add loopback capability only for a subset of devices that support
15637 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15638 	 * loopback for the remaining devices.
15639 	 */
15640 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15641 	    !tg3_flag(tp, CPMU_PRESENT))
15642 		/* Add the loopback capability */
15643 		features |= NETIF_F_LOOPBACK;
15644 
15645 	dev->hw_features |= features;
15646 
15647 	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15648 	    !tg3_flag(tp, TSO_CAPABLE) &&
15649 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15650 		tg3_flag_set(tp, MAX_RXPEND_64);
15651 		tp->rx_pending = 63;
15652 	}
15653 
15654 	err = tg3_get_device_address(tp);
15655 	if (err) {
15656 		dev_err(&pdev->dev,
15657 			"Could not obtain valid ethernet address, aborting\n");
15658 		goto err_out_apeunmap;
15659 	}
15660 
15661 	/*
15662 	 * Reset chip in case UNDI or EFI driver did not shutdown
15663 	 * DMA self test will enable WDMAC and we'll see (spurious)
15664 	 * pending DMA on the PCI bus at that point.
15665 	 */
15666 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15667 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15668 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15669 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15670 	}
15671 
15672 	err = tg3_test_dma(tp);
15673 	if (err) {
15674 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15675 		goto err_out_apeunmap;
15676 	}
15677 
15678 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15679 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15680 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15681 	for (i = 0; i < tp->irq_max; i++) {
15682 		struct tg3_napi *tnapi = &tp->napi[i];
15683 
15684 		tnapi->tp = tp;
15685 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15686 
15687 		tnapi->int_mbox = intmbx;
15688 		if (i <= 4)
15689 			intmbx += 0x8;
15690 		else
15691 			intmbx += 0x4;
15692 
15693 		tnapi->consmbox = rcvmbx;
15694 		tnapi->prodmbox = sndmbx;
15695 
15696 		if (i)
15697 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15698 		else
15699 			tnapi->coal_now = HOSTCC_MODE_NOW;
15700 
15701 		if (!tg3_flag(tp, SUPPORT_MSIX))
15702 			break;
15703 
15704 		/*
15705 		 * If we support MSIX, we'll be using RSS.  If we're using
15706 		 * RSS, the first vector only handles link interrupts and the
15707 		 * remaining vectors handle rx and tx interrupts.  Reuse the
15708 		 * mailbox values for the next iteration.  The values we setup
15709 		 * above are still useful for the single vectored mode.
15710 		 */
15711 		if (!i)
15712 			continue;
15713 
15714 		rcvmbx += 0x8;
15715 
15716 		if (sndmbx & 0x4)
15717 			sndmbx -= 0x4;
15718 		else
15719 			sndmbx += 0xc;
15720 	}
15721 
15722 	tg3_init_coal(tp);
15723 
15724 	pci_set_drvdata(pdev, dev);
15725 
15726 	if (tg3_flag(tp, 5717_PLUS)) {
15727 		/* Resume a low-power mode */
15728 		tg3_frob_aux_power(tp, false);
15729 	}
15730 
15731 	err = register_netdev(dev);
15732 	if (err) {
15733 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15734 		goto err_out_apeunmap;
15735 	}
15736 
15737 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15738 		    tp->board_part_number,
15739 		    tp->pci_chip_rev_id,
15740 		    tg3_bus_string(tp, str),
15741 		    dev->dev_addr);
15742 
15743 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15744 		struct phy_device *phydev;
15745 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15746 		netdev_info(dev,
15747 			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15748 			    phydev->drv->name, dev_name(&phydev->dev));
15749 	} else {
15750 		char *ethtype;
15751 
15752 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15753 			ethtype = "10/100Base-TX";
15754 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15755 			ethtype = "1000Base-SX";
15756 		else
15757 			ethtype = "10/100/1000Base-T";
15758 
15759 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15760 			    "(WireSpeed[%d], EEE[%d])\n",
15761 			    tg3_phy_string(tp), ethtype,
15762 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15763 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15764 	}
15765 
15766 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15767 		    (dev->features & NETIF_F_RXCSUM) != 0,
15768 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
15769 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15770 		    tg3_flag(tp, ENABLE_ASF) != 0,
15771 		    tg3_flag(tp, TSO_CAPABLE) != 0);
15772 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15773 		    tp->dma_rwctrl,
15774 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15775 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15776 
15777 	pci_save_state(pdev);
15778 
15779 	return 0;
15780 
15781 err_out_apeunmap:
15782 	if (tp->aperegs) {
15783 		iounmap(tp->aperegs);
15784 		tp->aperegs = NULL;
15785 	}
15786 
15787 err_out_iounmap:
15788 	if (tp->regs) {
15789 		iounmap(tp->regs);
15790 		tp->regs = NULL;
15791 	}
15792 
15793 err_out_free_dev:
15794 	free_netdev(dev);
15795 
15796 err_out_power_down:
15797 	pci_set_power_state(pdev, PCI_D3hot);
15798 
15799 err_out_free_res:
15800 	pci_release_regions(pdev);
15801 
15802 err_out_disable_pdev:
15803 	pci_disable_device(pdev);
15804 	pci_set_drvdata(pdev, NULL);
15805 	return err;
15806 }
15807 
15808 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15809 {
15810 	struct net_device *dev = pci_get_drvdata(pdev);
15811 
15812 	if (dev) {
15813 		struct tg3 *tp = netdev_priv(dev);
15814 
15815 		if (tp->fw)
15816 			release_firmware(tp->fw);
15817 
15818 		tg3_reset_task_cancel(tp);
15819 
15820 		if (tg3_flag(tp, USE_PHYLIB)) {
15821 			tg3_phy_fini(tp);
15822 			tg3_mdio_fini(tp);
15823 		}
15824 
15825 		unregister_netdev(dev);
15826 		if (tp->aperegs) {
15827 			iounmap(tp->aperegs);
15828 			tp->aperegs = NULL;
15829 		}
15830 		if (tp->regs) {
15831 			iounmap(tp->regs);
15832 			tp->regs = NULL;
15833 		}
15834 		free_netdev(dev);
15835 		pci_release_regions(pdev);
15836 		pci_disable_device(pdev);
15837 		pci_set_drvdata(pdev, NULL);
15838 	}
15839 }
15840 
15841 #ifdef CONFIG_PM_SLEEP
15842 static int tg3_suspend(struct device *device)
15843 {
15844 	struct pci_dev *pdev = to_pci_dev(device);
15845 	struct net_device *dev = pci_get_drvdata(pdev);
15846 	struct tg3 *tp = netdev_priv(dev);
15847 	int err;
15848 
15849 	if (!netif_running(dev))
15850 		return 0;
15851 
15852 	tg3_reset_task_cancel(tp);
15853 	tg3_phy_stop(tp);
15854 	tg3_netif_stop(tp);
15855 
15856 	del_timer_sync(&tp->timer);
15857 
15858 	tg3_full_lock(tp, 1);
15859 	tg3_disable_ints(tp);
15860 	tg3_full_unlock(tp);
15861 
15862 	netif_device_detach(dev);
15863 
15864 	tg3_full_lock(tp, 0);
15865 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15866 	tg3_flag_clear(tp, INIT_COMPLETE);
15867 	tg3_full_unlock(tp);
15868 
15869 	err = tg3_power_down_prepare(tp);
15870 	if (err) {
15871 		int err2;
15872 
15873 		tg3_full_lock(tp, 0);
15874 
15875 		tg3_flag_set(tp, INIT_COMPLETE);
15876 		err2 = tg3_restart_hw(tp, 1);
15877 		if (err2)
15878 			goto out;
15879 
15880 		tp->timer.expires = jiffies + tp->timer_offset;
15881 		add_timer(&tp->timer);
15882 
15883 		netif_device_attach(dev);
15884 		tg3_netif_start(tp);
15885 
15886 out:
15887 		tg3_full_unlock(tp);
15888 
15889 		if (!err2)
15890 			tg3_phy_start(tp);
15891 	}
15892 
15893 	return err;
15894 }
15895 
15896 static int tg3_resume(struct device *device)
15897 {
15898 	struct pci_dev *pdev = to_pci_dev(device);
15899 	struct net_device *dev = pci_get_drvdata(pdev);
15900 	struct tg3 *tp = netdev_priv(dev);
15901 	int err;
15902 
15903 	if (!netif_running(dev))
15904 		return 0;
15905 
15906 	netif_device_attach(dev);
15907 
15908 	tg3_full_lock(tp, 0);
15909 
15910 	tg3_flag_set(tp, INIT_COMPLETE);
15911 	err = tg3_restart_hw(tp, 1);
15912 	if (err)
15913 		goto out;
15914 
15915 	tp->timer.expires = jiffies + tp->timer_offset;
15916 	add_timer(&tp->timer);
15917 
15918 	tg3_netif_start(tp);
15919 
15920 out:
15921 	tg3_full_unlock(tp);
15922 
15923 	if (!err)
15924 		tg3_phy_start(tp);
15925 
15926 	return err;
15927 }
15928 
15929 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15930 #define TG3_PM_OPS (&tg3_pm_ops)
15931 
15932 #else
15933 
15934 #define TG3_PM_OPS NULL
15935 
15936 #endif /* CONFIG_PM_SLEEP */
15937 
15938 /**
15939  * tg3_io_error_detected - called when PCI error is detected
15940  * @pdev: Pointer to PCI device
15941  * @state: The current pci connection state
15942  *
15943  * This function is called after a PCI bus error affecting
15944  * this device has been detected.
15945  */
15946 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15947 					      pci_channel_state_t state)
15948 {
15949 	struct net_device *netdev = pci_get_drvdata(pdev);
15950 	struct tg3 *tp = netdev_priv(netdev);
15951 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15952 
15953 	netdev_info(netdev, "PCI I/O error detected\n");
15954 
15955 	rtnl_lock();
15956 
15957 	if (!netif_running(netdev))
15958 		goto done;
15959 
15960 	tg3_phy_stop(tp);
15961 
15962 	tg3_netif_stop(tp);
15963 
15964 	del_timer_sync(&tp->timer);
15965 
15966 	/* Want to make sure that the reset task doesn't run */
15967 	tg3_reset_task_cancel(tp);
15968 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15969 
15970 	netif_device_detach(netdev);
15971 
15972 	/* Clean up software state, even if MMIO is blocked */
15973 	tg3_full_lock(tp, 0);
15974 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15975 	tg3_full_unlock(tp);
15976 
15977 done:
15978 	if (state == pci_channel_io_perm_failure)
15979 		err = PCI_ERS_RESULT_DISCONNECT;
15980 	else
15981 		pci_disable_device(pdev);
15982 
15983 	rtnl_unlock();
15984 
15985 	return err;
15986 }
15987 
15988 /**
15989  * tg3_io_slot_reset - called after the pci bus has been reset.
15990  * @pdev: Pointer to PCI device
15991  *
15992  * Restart the card from scratch, as if from a cold-boot.
15993  * At this point, the card has exprienced a hard reset,
15994  * followed by fixups by BIOS, and has its config space
15995  * set up identically to what it was at cold boot.
15996  */
15997 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15998 {
15999 	struct net_device *netdev = pci_get_drvdata(pdev);
16000 	struct tg3 *tp = netdev_priv(netdev);
16001 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16002 	int err;
16003 
16004 	rtnl_lock();
16005 
16006 	if (pci_enable_device(pdev)) {
16007 		netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16008 		goto done;
16009 	}
16010 
16011 	pci_set_master(pdev);
16012 	pci_restore_state(pdev);
16013 	pci_save_state(pdev);
16014 
16015 	if (!netif_running(netdev)) {
16016 		rc = PCI_ERS_RESULT_RECOVERED;
16017 		goto done;
16018 	}
16019 
16020 	err = tg3_power_up(tp);
16021 	if (err)
16022 		goto done;
16023 
16024 	rc = PCI_ERS_RESULT_RECOVERED;
16025 
16026 done:
16027 	rtnl_unlock();
16028 
16029 	return rc;
16030 }
16031 
16032 /**
16033  * tg3_io_resume - called when traffic can start flowing again.
16034  * @pdev: Pointer to PCI device
16035  *
16036  * This callback is called when the error recovery driver tells
16037  * us that its OK to resume normal operation.
16038  */
16039 static void tg3_io_resume(struct pci_dev *pdev)
16040 {
16041 	struct net_device *netdev = pci_get_drvdata(pdev);
16042 	struct tg3 *tp = netdev_priv(netdev);
16043 	int err;
16044 
16045 	rtnl_lock();
16046 
16047 	if (!netif_running(netdev))
16048 		goto done;
16049 
16050 	tg3_full_lock(tp, 0);
16051 	tg3_flag_set(tp, INIT_COMPLETE);
16052 	err = tg3_restart_hw(tp, 1);
16053 	tg3_full_unlock(tp);
16054 	if (err) {
16055 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
16056 		goto done;
16057 	}
16058 
16059 	netif_device_attach(netdev);
16060 
16061 	tp->timer.expires = jiffies + tp->timer_offset;
16062 	add_timer(&tp->timer);
16063 
16064 	tg3_netif_start(tp);
16065 
16066 	tg3_phy_start(tp);
16067 
16068 done:
16069 	rtnl_unlock();
16070 }
16071 
16072 static struct pci_error_handlers tg3_err_handler = {
16073 	.error_detected	= tg3_io_error_detected,
16074 	.slot_reset	= tg3_io_slot_reset,
16075 	.resume		= tg3_io_resume
16076 };
16077 
16078 static struct pci_driver tg3_driver = {
16079 	.name		= DRV_MODULE_NAME,
16080 	.id_table	= tg3_pci_tbl,
16081 	.probe		= tg3_init_one,
16082 	.remove		= __devexit_p(tg3_remove_one),
16083 	.err_handler	= &tg3_err_handler,
16084 	.driver.pm	= TG3_PM_OPS,
16085 };
16086 
16087 static int __init tg3_init(void)
16088 {
16089 	return pci_register_driver(&tg3_driver);
16090 }
16091 
16092 static void __exit tg3_cleanup(void)
16093 {
16094 	pci_unregister_driver(&tg3_driver);
16095 }
16096 
16097 module_init(tg3_init);
16098 module_exit(tg3_cleanup);
16099